diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
new file mode 100644
index 0000000..46782a8
--- /dev/null
+++ b/.github/workflows/release.yml
@@ -0,0 +1,43 @@
+name: release
+
+on:
+ release:
+ types: [published]
+
+permissions:
+ contents: write
+
+jobs:
+ bump-version:
+ name: Bump version to match release tag
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ github.event.release.target_commitish }}
+
+ - name: Extract version from tag
+ id: version
+ run: |
+ TAG="${{ github.event.release.tag_name }}"
+ # Strip leading 'v' if present (v0.2.0 -> 0.2.0)
+ VERSION="${TAG#v}"
+ echo "version=$VERSION" >> "$GITHUB_OUTPUT"
+
+ - name: Update version in pyproject.toml
+ run: |
+ sed -i "s/^version = \".*\"/version = \"${{ steps.version.outputs.version }}\"/" pyproject.toml
+
+ - name: Verify version
+ run: |
+ grep "^version = \"${{ steps.version.outputs.version }}\"" pyproject.toml
+
+ - name: Commit and push version bump
+ run: |
+ git config user.name "github-actions[bot]"
+ git config user.email "github-actions[bot]@users.noreply.github.com"
+ git add pyproject.toml
+ git diff --cached --quiet && echo "No changes to commit" && exit 0
+ git commit -m "bump version to ${{ steps.version.outputs.version }}"
+ git push
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 9919f39..828d6e7 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -1,6 +1,7 @@
name: tests
on:
+ workflow_dispatch:
push:
paths:
- '**'
@@ -10,7 +11,7 @@ on:
jobs:
lint:
- name: Lint (ruff + mypy)
+ name: Lint (pre-commit)
runs-on: ubuntu-latest
steps:
- name: Checkout repository
@@ -19,18 +20,17 @@ jobs:
uses: actions/setup-python@v5
with:
python-version: '3.11'
- cache: pip
- cache-dependency-path: pyproject.toml
- - name: Install dev dependencies
+ - name: Install dependencies
+ shell: bash
run: |
python -m pip install --upgrade pip
+ BRANCH="${GITHUB_HEAD_REF:-${GITHUB_REF_NAME}}"
+ if git ls-remote --heads https://github.com/Jepson2k/waldoctl.git "$BRANCH" 2>/dev/null | grep -q .; then
+ pip install "waldoctl @ git+https://github.com/Jepson2k/waldoctl.git@${BRANCH}"
+ fi
pip install -e ".[dev]"
- - name: Ruff (lint)
- run: ruff check parol6
- - name: Ruff (format check)
- run: ruff format --check parol6
- - name: MyPy
- run: mypy parol6
+ - name: Run pre-commit
+ uses: pre-commit/action@v3.0.1
test:
name: ${{ matrix.os }} / Python ${{ matrix.python-version }}
runs-on: ${{ matrix.os }}
@@ -39,7 +39,7 @@ jobs:
fail-fast: false
matrix:
os: [ubuntu-latest, windows-latest, macos-latest]
- python-version: ['3.10', '3.11']
+ python-version: ['3.11', '3.12', '3.13', '3.14']
steps:
- name: Checkout repository (with submodules)
@@ -52,9 +52,18 @@ jobs:
cache: pip
cache-dependency-path: pyproject.toml
- - name: Upgrade pip and install dependencies
+ - name: Install cross-repo dependencies
+ shell: bash
run: |
python -m pip install --upgrade pip
+ BRANCH="${GITHUB_HEAD_REF:-${GITHUB_REF_NAME}}"
+ # waldoctl: try matching branch, fall back to tagged version
+ if git ls-remote --heads https://github.com/Jepson2k/waldoctl.git "$BRANCH" 2>/dev/null | grep -q .; then
+ pip install "waldoctl @ git+https://github.com/Jepson2k/waldoctl.git@${BRANCH}"
+ fi
+
+ - name: Install package
+ run: |
pip install -e ".[dev]" pytest-timeout
- name: Show environment
@@ -62,7 +71,7 @@ jobs:
python -V
pip list
- - name: Run tests (skip hardware)
+ - name: Run tests
env:
PYTHONUNBUFFERED: '1'
PYTHONUTF8: '1'
diff --git a/.gitignore b/.gitignore
index 26ef302..94457d1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -124,4 +124,5 @@ cython_debug/
# VS Code
.vscode/
-serial_port.txt
+serial_port.txt
+test-results.xml
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index aec62c7..27af936 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,33 +1,25 @@
-default_language_version:
- python: python3.11
-
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
- rev: v4.6.0
+ rev: v6.0.0
hooks:
- id: check-yaml
- id: end-of-file-fixer
- id: trailing-whitespace
- id: check-merge-conflict
- - id: check-added-large-files
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.6.9
+ rev: v0.14.10
hooks:
- id: ruff
args: ["--fix"]
- id: ruff-format
- - repo: https://github.com/pre-commit/mirrors-mypy
- rev: v1.11.2
+ - repo: local
hooks:
- - id: mypy
- name: mypy (parol6)
+ - id: ty
+ name: ty (parol6)
+ entry: ty check parol6
+ language: system
files: ^parol6/
pass_filenames: false
- args: ["parol6"]
- stages: [commit, push]
- additional_dependencies:
- - numpy==1.26.4
- - spatialmath-python==1.1.14
- - scipy==1.11.4
+ stages: [pre-commit, pre-push]
diff --git a/CLAUDE.md b/CLAUDE.md
new file mode 100644
index 0000000..43282a3
--- /dev/null
+++ b/CLAUDE.md
@@ -0,0 +1,154 @@
+# CLAUDE.md
+
+This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
+
+## Project Overview
+
+PAROL6 Python API is a lightweight client and controller for PAROL6 6-DOF robot arms. It provides:
+- **AsyncRobotClient**: Async UDP client for robot control
+- **RobotClient**: Synchronous wrapper for imperative scripts
+- **Controller**: Fixed-rate control loop with serial/simulator transport
+
+## Architecture
+
+```
+┌─────────────────────────────────────────┐
+│ Client Application │
+│ (AsyncRobotClient / RobotClient) │
+└──────────────────┬──────────────────────┘
+ UDP commands (port 5001)
+ Multicast status (239.255.0.101:50510)
+┌──────────────────▼──────────────────────┐
+│ Controller │
+│ (parol6/server/controller.py) │
+│ - 100 Hz control loop (configurable) │
+│ - Command queue & execution │
+│ - Status multicast broadcasting │
+└──────────────────┬──────────────────────┘
+ Serial (3 Mbaud) or MockSerial
+┌──────────────────▼──────────────────────┐
+│ Robot Hardware / Simulator │
+└─────────────────────────────────────────┘
+```
+
+## Build & Test Commands
+
+```bash
+# Development setup
+pip install -e .[dev]
+pre-commit install
+
+# Linting & formatting
+ruff check .
+ruff format .
+ty check parol6/
+
+# Run all pre-commit hooks
+pre-commit run -a
+
+# Testing (simulator used by default via conftest.py — do NOT prefix with env vars)
+pytest
+
+# Run specific test file
+pytest tests/unit/test_wire.py -v
+```
+
+**IMPORTANT: Do NOT prefix `pytest` commands with environment variables like `PAROL6_FAKE_SERIAL=1 pytest ...`. The conftest.py already configures `PAROL6_FAKE_SERIAL=1`. Just run `pytest` directly.**
+
+## Controller CLI
+
+```bash
+# Start controller
+parol6-server --log-level=INFO
+
+# With explicit serial port
+parol6-server --serial=/dev/ttyUSB0 --log-level=DEBUG
+
+# Verbosity shortcuts: -v (INFO), -vv (DEBUG), -vvv (TRACE)
+```
+
+## Key Modules
+
+- **`parol6/client/async_client.py`**: Primary API - async UDP client with motion commands, queries, and status streaming
+- **`parol6/server/controller.py`**: Controller with fixed-rate loop and command execution
+- **`parol6/commands/`**: Polymorphic command classes using `@register_command(CmdType.XXX)` decorator
+- **`parol6/protocol/wire.py`**: Msgpack message encoding/decoding, command structs (tagged union)
+- **`parol6/PAROL6_ROBOT.py`**: Robot kinematics config, DH parameters, joint limits, tool transforms
+
+## Adding a New Command
+
+1. Create a class in `parol6/commands/` and decorate with `@register_command(CmdType.YOUR_CMD)`
+2. Define a `PARAMS_TYPE` msgspec Struct for wire validation
+3. Implement `do_setup(state)` and `execute_step(state)` lifecycle methods
+4. For motion commands, set `streamable=True` if supporting high-rate streaming
+
+## Environment Variables
+
+| Variable | Default | Purpose |
+|----------|---------|---------|
+| `PAROL6_CONTROL_RATE_HZ` | 100 | Control loop frequency |
+| `PAROL6_STATUS_RATE_HZ` | 50 | Status broadcast rate (tests use 20 Hz) |
+| `PAROL6_FAKE_SERIAL` | 0 | Enable simulator (no hardware) |
+| `PAROL6_COM_PORT` | auto | Serial port override |
+| `PAROL_TRACE` | 0 | Enable TRACE logging |
+
+## Simulator Mode
+
+- Uses `MockSerialTransport` to emulate robot dynamics without hardware
+- Toggle via client: `simulator(True)` / `simulator(False)`
+- Controller updates `PAROL6_FAKE_SERIAL` and reinitializes transport seamlessly
+- **Important**: Simulator cannot guarantee hardware success—motor/current limits may cause failures on real robot
+
+## Kinematics Notes
+
+- Uses numerical IK via pinokin (C++/Pinocchio with nanobind Python bindings)
+- J4 is particularly sensitive—some cartesian targets may fail to solve
+- Update `PAROL6_ROBOT.py` for modified hardware (gear ratios, limits, DH params)
+
+## Test Markers
+
+- `@pytest.mark.unit`: Isolated component tests
+- `@pytest.mark.integration`: Component interaction tests (uses simulator)
+
+## Performance Warnings
+
+If you see `Control loop avg period degraded by +XX%`:
+- Reduce `PAROL6_CONTROL_RATE_HZ`
+- Disable TRACE logging (significant overhead)
+- Avoid heavy background tasks during motion
+
+## Hot Path Rules
+
+`execute_step()` and `tick()` run at 100 Hz. **No heap allocations** in these methods.
+
+For streamable commands (`streamable = True`), `do_setup()` also runs at high frequency (50 Hz from UI) via `assign_params()` + `do_setup()` fast-path. Treat it as a hot path too.
+
+- No `list(...)`, `[x for x in ...]`, `dict(...)`, or other container construction
+- No string formatting or f-strings (except in error/stop paths that run once)
+- Pre-allocate all buffers in `__init__`
+- `ndarray[:] = list` is fine — numpy writes into existing buffer in-place
+- Use `dest[:] = src` for array copies. Only use `np.copyto(dest, src, casting=...)` when casting is needed — it's slower otherwise
+
+## Code Style
+
+- **Comments**: Describe the final code state, not what changed. Avoid "changed X to Y" or "added this because..." comments.
+- **Git commits/PRs**: No emoji, no "Generated by..." footers, no co-author boilerplate.
+- **Type annotations**: Fix type errors properly instead of using `# type: ignore`. Prefer:
+ - `@overload` decorators for functions with different return types based on input
+ - `assert` statements to narrow types after None checks
+ - `cast()` from typing when the type is known but the checker can't infer it
+ - `np.atleast_1d()` or similar to guarantee array returns from numpy functions
+ - Only use `# type: ignore` as a last resort for genuine type checker limitations (e.g., numpy's `ArrayLike` being too broad)
+
+## Shell Command Style
+
+- **Do NOT use compound `cd` commands** like `cd foo && git status` or `cd foo && pytest`. Compound commands that cross directories require explicit user approval and slow things down. Instead, issue a standalone `cd foo` command first, then run the action as a separate command. The Bash tool's working directory persists between calls.
+
+## Testing Guidelines
+
+**When CI tests fail, fix them.** Don't waste time analyzing whether failures are "related to your changes" — just fix all failing tests. The goal is a green CI, not attribution.
+
+Prefer fewer, comprehensive integration tests that mimic manual testing over a large number of unit tests. We have no code coverage requirements—the goal is working features, not metrics.
+- **Test results are in `test-results.xml`.** Pytest writes JUnit XML to `test-results.xml` automatically. When diagnosing failures, read this file — it contains test names, durations, failure messages, and captured output. This is more reliable than parsing console output.
+- **NEVER run parol6 and web commander test suites in parallel** — no proper isolation, they share resources and have timing issues when resource-constrained. Always run sequentially.
+- **NEVER allow subagents to run tests.** Many tests are timing-sensitive and the system doesn't have enough resources for agents and tests to run simultaneously. Only the main conversation should run tests, and only after all agents have completed.
diff --git a/README.md b/README.md
index 90d17c5..56744d1 100644
--- a/README.md
+++ b/README.md
@@ -1,220 +1,371 @@
# PAROL6 Python API
-Lightweight Python client and headless controller manager for PAROL6 robot arms.
+Python client and controller for PAROL6 robot arms, implementing the [waldoctl](https://github.com/Jepson2k/waldoctl) robot interface. Any application built against the waldoctl ABCs (e.g., [PAROL Web Commander](https://github.com/PCrnjak/PAROL-Web-Commander)) can use this package as a drop-in backend.
This package provides:
-- AsyncRobotClient (async UDP client)
-- RobotClient (sync wrapper around the async client)
-- ServerManager utilities (manage_server and CLI parol6-server)
+- **Robot** — unified entry point: lifecycle, kinematics (FK/IK), client factories (`waldoctl.Robot`)
+- **AsyncRobotClient** — async UDP client for motion commands and status streaming (`waldoctl.RobotClient`)
+- **RobotClient** — sync wrapper around the async client
+- **DryRunRobotClient** — offline trajectory simulation
+- **`parol6-server`** CLI for standalone controller operation
-It supports a headless controller process speaking a simple text-based UDP protocol. The client can run on the same machine or remotely.
+The controller speaks a msgpack-based UDP protocol and can run on the same machine or remotely.
+---
-## Installation (users)
-Run from this repository directory (external/PAROL6-python-API):
+## Table of contents
+
+- [Installation](#installation)
+- [Quickstart](#quickstart)
+- [Architecture overview](#architecture-overview)
+- [Control loop internals](#control-loop-internals)
+- [Hot path rules](#hot-path-rules)
+- [Motion profiles](#motion-profiles)
+- [Command system](#command-system)
+- [Kinematics and tools](#kinematics-and-tools)
+- [Environment variables](#environment-variables)
+- [Development setup](#development-setup)
+- [FAQ / Troubleshooting](#faq--troubleshooting)
+- [Safety notes](#safety-notes)
+
+---
+
+## Installation
+
+**Requirements:** Python >= 3.11 · Supported platforms: macOS (ARM64), Windows (AMD64), Linux (x86_64, aarch64)
```bash
pip install .
```
-Notes on robotics-toolbox-python:
-- This project uses a custom robotics-toolbox-python build to fix upstream issues needed by PAROL6. Wheels are pinned in pyproject and will be selected automatically by pip based on platform and Python version.
-- Supported prebuilt wheels (from pyproject):
- - Linux x86_64 (cp310, cp311)
- - Linux aarch64 (cp310, cp311) — Raspberry Pi 5 supported; ~1000 Hz cartesian jogging has been achieved on RPi 5
- - macOS arm64 (cp310, cp311)
- - macOS x86_64 (cp310, cp311)
- - Windows AMD64 (cp310, cp311)
-- Primary development platform: Raspberry Pi 5. CI/CD runners are configured to help ensure a similar experience on Windows and macOS.
-
-To launch the headless controller after installation:
+To launch the controller after installation:
```bash
parol6-server --log-level=INFO
```
+## Quickstart
-## Development setup
-For contributors working on this repository:
+### Using the Robot class
+```python
+from parol6 import Robot, RobotClient
-```bash
-# From external/PAROL6-python-API/
-pip install -e .[dev]
-pre-commit install
+robot = Robot(host="127.0.0.1", port=5001)
+robot.start() # starts controller subprocess, blocks until ready
+try:
+ with RobotClient(host="127.0.0.1", port=5001) as client:
+ print("ping:", client.ping())
+ print("pose:", client.get_pose())
+finally:
+ robot.stop()
```
-- Run all pre-commit hooks locally: `pre-commit run -a`
-- Run tests with pytest:
- - `pytest -m "unit or integration"`
- - Simulator is used by default (PAROL6_FAKE_SERIAL=1). Hardware tests are marked and require explicit opt-in.
+The `Robot` class can also be used as a context manager:
+```python
+with Robot() as robot:
+ with RobotClient() as client:
+ client.home(wait=True)
+```
+
+### Async client
+```python
+import asyncio
+from parol6 import AsyncRobotClient
+
+async def main():
+ async with AsyncRobotClient(host="127.0.0.1", port=5001) as client:
+ ready = await client.wait_ready(timeout=3)
+ print("server ready:", ready)
+ print("ping:", await client.ping())
+ status = await client.get_status()
+ print("status keys:", list(status.keys()) if status else None)
+
+asyncio.run(main())
+```
+
+### Sync client (convenience wrapper)
+```python
+from parol6 import RobotClient
+
+with RobotClient(host="127.0.0.1", port=5001) as client:
+ print("ping:", client.ping())
+ print("pose:", client.get_pose())
+```
-Adding a command (overview):
-- Create a class under `parol6/commands/` and decorate it with `@register_command("NAME")` (see `parol6/server/command_registry.py`).
-- Implement `match(parts)`, `setup(state)`, and `tick(state)`; set `streamable=True` on motion commands that support streaming.
-- Use helpers from `parol6/commands/base.py` (parsers, motion profiles). The wire name should match your decorator name.
+### Examples
+See the [`examples/`](examples/) directory for runnable scripts:
+- `sync_client_quickstart.py` -- basic sync client usage (ping, query)
+- `async_client_quickstart.py` -- async client with status streaming and motion
+- `manage_server_demo.py` -- starting/stopping the controller programmatically
+- `pick_and_place.py` -- pick-and-place cycle with electric gripper
+- `draw_circle.py` -- curved motion commands (moveC, moveS, moveP)
+- `zigzag_scan.py` -- raster scan pattern with blend radius for smooth corners
+- `speed_comparison.py` -- timing different speeds and motion profiles
## Architecture overview
-PAROL6 is split into a headless controller and a lightweight client.
-- Headless Controller (`parol6.server.controller`)
- - Binds a UDP server for commands (default 0.0.0.0:5001)
- - Runs a fixed-rate control loop translating high-level commands into low-level joint commands
- - Publishes push-based STATUS frames at a configurable rate using multicast (with unicast fallback)
- - Talks to robot hardware via a SerialTransport, or to a software simulator via MockSerialTransport
+```mermaid
+flowchart TB
+ subgraph Client["Client Application"]
+ ROB["Robot
(lifecycle, kinematics, factories)"]
+ ARC["AsyncRobotClient / RobotClient"]
+ DRC["DryRunRobotClient
(offline simulation)"]
+ end
+
+ subgraph Controller["Controller Process"]
+ direction TB
+
+ subgraph UDP["UDP Layer"]
+ UDP_RX["UDPTransport
recv (port 5001)"]
+ UDP_TX["ACK/Response"]
+ end
+
+ subgraph CmdProc["Command Processing"]
+ REG["Command Registry
(auto-discover)"]
+ QUEUE["Command Queue
(max 100)"]
+ end
+
+ subgraph Planner["MotionPlanner (subprocess)"]
+ direction TB
+ PLAN_IN["command_queue"]
+ PLAN_WORK["TrajectoryPlanner
(path gen → IK chain → TOPPRA)"]
+ PLAN_OUT["segment_queue"]
+
+ PLAN_IN --> PLAN_WORK --> PLAN_OUT
+ end
+
+ subgraph MainLoop["Main Control Loop (100 Hz)"]
+ direction TB
+ RX_SERIAL["1. Read Serial Frame"]
+ POLL["2. Poll UDP Commands"]
+ STATUS["3. Broadcast Status
(change-detection cache)"]
+ ESTOP["4. E-Stop Check"]
+ EXEC["5. Execute
(SegmentPlayer or StreamingExecutor)"]
+ TX_SERIAL["6. Write Serial Frame"]
+ TIMING["7. Deadline Wait"]
+
+ RX_SERIAL --> POLL --> STATUS --> ESTOP --> EXEC --> TX_SERIAL --> TIMING
+ end
+ end
+
+ subgraph Transports["Transport Layer"]
+ FACTORY["TransportFactory"]
+ SERIAL["SerialTransport
(3 Mbaud)"]
+ MOCK["MockSerialTransport
(shared memory IPC)"]
+ end
+
+ subgraph HW["Hardware / Simulator"]
+ BOARD["PAROL6 Board"]
+ SIM["Simulated Dynamics
(subprocess)"]
+ end
+
+ %% Client to Controller
+ ROB -->|"start / stop"| Controller
+ ARC -->|"UDP commands"| UDP_RX
+ UDP_TX -->|"ACK/response"| ARC
+ STATUS -->|"STATUS multicast
239.255.0.101:50510"| ARC
+
+ %% Command flow
+ UDP_RX --> REG --> QUEUE
+
+ %% Planned path: queue → planner subprocess → segment player
+ QUEUE -->|"planned moves
(MoveJ, MoveL, etc.)"| PLAN_IN
+ PLAN_OUT -->|"TrajectorySegment"| EXEC
+
+ %% Streaming path: queue → executor directly (bypasses planner)
+ QUEUE -->|"streaming cmds
(JogJ, ServoJ, etc.)"| EXEC
+
+ %% Transport to hardware
+ TX_SERIAL --> FACTORY
+ FACTORY --> SERIAL --> BOARD
+ FACTORY --> MOCK --> SIM
+```
-- Client (`parol6.client`)
- - AsyncRobotClient: async UDP client implementing commands, queries, waits and status streaming helpers
- - RobotClient: synchronous facade over AsyncRobotClient for imperative scripts
- - ServerManager: spawns the controller in a subprocess and manages its lifecycle
+### Component summary
-- Simulator
- - Simulator mode uses `MockSerialTransport` to emulate the serial protocol and robot dynamics (no hardware required)
- - Toggle via the `SIMULATOR` system command (client methods `simulator_on()` / `simulator_off()`). The controller updates `PAROL6_FAKE_SERIAL` and seamlessly reinitializes the transport; when enabling, it syncs the simulator to the current controller state
- - Important: the simulator helps validate sequences and visualize motion, but it cannot guarantee success on hardware. For example, multi‑joint jogging while fully extended with a heavy payload (e.g., the electric gripper) may succeed in simulation but fail on the real robot due to motor/current limits.
+- **Robot** (`parol6.robot`): Unified entry point — server lifecycle, kinematics (FK/IK), client factories, configuration
+- **Client** (`parol6.client`): `AsyncRobotClient` (async UDP with built-in multicast status listener), `RobotClient` (sync wrapper), `DryRunRobotClient` (offline simulation)
+- **Controller** (`parol6.server.controller`): Main loop with phase-based execution at 100 Hz, UDP command server, status broadcasting
+- **MotionPlanner** (`parol6.server.motion_planner`): Separate subprocess for trajectory computation (TOPPRA, IK chains) — keeps the 100 Hz loop free. Only planned moves (MoveJ, MoveL, MoveC, MoveS, MoveP) go through the planner; streaming commands (JogJ, ServoJ, etc.) execute directly in the main loop
+- **SegmentPlayer** (`parol6.server.segment_player`): Consumes computed trajectory segments in the control loop — indexes one waypoint per tick with zero allocation
+- **StreamingExecutor** (`parol6.motion.streaming_executors`): Joint-space and Cartesian Ruckig-based executors for real-time jog/servo commands
+- **Motion pipeline** (`parol6.motion`): Offline trajectory generation (TOPPRA, Ruckig, Quintic, Trapezoid, Linear) and online streaming executors
+- **Transports** (`parol6.server.transports`): `SerialTransport` (hardware, 3 Mbaud), `MockSerialTransport` (simulator via shared memory IPC)
+- **StatusCache** (`parol6.server.status_cache`): Change-detection cache with async IK worker for cartesian/joint enablement computation
-Why multicast status?
-- The controller pushes status via UDP multicast to avoid client-side polling and reduce command-channel contention
-- Multicast works well for loopback and multiple observers; when multicast isn’t available, it falls back to unicast automatically
+### Why multicast status?
+The controller pushes status via UDP multicast to avoid client-side polling, reduce command-channel contention, and support multiple observers (GUI, logging). Falls back to unicast when multicast is unavailable (`PAROL6_STATUS_TRANSPORT=UNICAST`).
-## Security and multiple senders
-Important: The controller has no authentication or authorization mechanism. It will accept any correctly parsed command on its UDP port. Deploy only on a trusted local network and avoid exposing the controller to untrusted networks.
+### Simulator mode
-Multiple senders: The controller intentionally allows commands from multiple senders concurrently. This enables a GUI or higher-level orchestrator to run sub‑programs while another sender can issue commands like STOP. While useful, this design increases the attack surface. Combine it with network isolation (trusted LAN), firewall rules, or host‑level ACLs.
+Uses `MockSerialTransport` with shared memory IPC for subprocess isolation. Toggle via `simulator_on()` / `simulator_off()`. The simulator syncs to controller state on enable for pose continuity. **Note**: Simulation cannot guarantee hardware success—motor/current limits may cause failures on the real robot.
+---
-## Control rate and performance
-- Default control loop: `PAROL6_CONTROL_RATE_HZ=250` (chosen for consistent cross-platform behavior)
-- Higher control rates require stronger hardware and generally produce smoother motion, but there are diminishing returns—the motion will not become infinitely smooth just by increasing the rate
-- If the controller is falling behind you will see warnings like:
+## Control loop internals
- `Control loop avg period degraded by +XX% (avg=Ys target=Zs); latest overrun=Ws`
+The main loop (`controller.py`) runs a fixed sequence of phases every tick:
- If you see this, or motion feels inconsistent, reduce `PAROL6_CONTROL_RATE_HZ`
-- TRACE logging impacts performance; enable only when necessary (set `PAROL_TRACE=1` or use `--log-level=TRACE`)
+1. **Read serial frame** — poll transport for incoming telemetry (position, I/O, gripper)
+2. **Poll UDP commands** — non-blocking receive up to 25 messages per tick
+3. **Broadcast status** — multicast at `STATUS_RATE_HZ` (default 50 Hz), skipped if status cache is stale
+4. **E-Stop check** — hardware pin polling; on activation: cancel all motion, clear queue, send DISABLE to firmware. Auto-recovers on release
+5. **Execute** — run SegmentPlayer (planned moves) or StreamingExecutor (jog/servo)
+6. **Write serial frame** — pack output into 58-byte frame and transmit
+7. **Deadline wait** — hybrid sleep + busy-loop to hit exact tick boundary
+### Timing: hybrid sleep + busy-loop
-## Streaming mode
-- Streaming (`STREAM|ON` / `STREAM|OFF`; `client.stream_on()` / `client.stream_off()`) is intended for high-rate jogging and continuous updates
-- In stream mode the server de-duplicates stale inputs, reduces ACK chatter, and can reuse the active streamable command fast-path to minimize overhead
-- Use streaming for UI-driven jog or live teleoperation; use non-streaming for discrete motions and queued programs
+The loop timer (`loop_timer.py`) uses a two-phase strategy for precise tick timing:
+```
+deadline = now + interval (10ms at 100Hz)
-## Kinematics, IK, and singularities
-Numerical IK vs. analytical:
-- This project uses numerical IK (via robotics-toolbox-python) for flexibility: it adapts to tool changes and hardware modifications without deriving new closed forms
-- Trade-offs: numerical IK can be slower and less robust near singularities compared to an ideal analytical solver
+if time_remaining > busy_threshold (1ms):
+ time.sleep(time_remaining - busy_threshold) # OS sleep for bulk of wait
-Known behaviors and limitations:
-- A slight stutter may occur when jogging near singularities; this is a known limitation of the current kinematics implementation
-- The current robotics-toolbox-python backend used here does not expose null-space manipulation in C. As a result, some cartesian targets can fail to solve—joint 4 (J4) is particularly sensitive. Future work in the ported backend may add null-space features
+while time.perf_counter() < deadline:
+ pass # Busy-loop for final 1ms
+```
-Adapting to modified hardware:
-- Update `parol6/PAROL6_ROBOT.py` (gear ratios, joint limits, speed/acc/jerk limits)
-- Update tool transforms in `parol6/tools.py` (4×4 SE3 matrices)
-- Optionally update the URDF in `parol6/urdf_model/` for visualization or geometry changes
-- Advanced: you can also add or replace DH joints programmatically without modifying the URDF. For example:
+OS `time.sleep()` has ~1-4ms jitter depending on platform and load. The busy-loop absorbs this jitter to hit deadline with sub-millisecond precision. The `PAROL6_BUSY_THRESHOLD_MS` env var (default 1.0) controls the crossover point.
- ```python
- # Example: replace the 3rd joint with a custom DH link without editing the URDF
- from roboticstoolbox import Link, ET
- from parol6.PAROL6_ROBOT import _cached_urdf
- import roboticstoolbox as rtb
+### Planned vs streaming command paths
- base_links = list(_cached_urdf.elinks)
+**Planned moves** (MoveJ, MoveL, MoveC, MoveS, MoveP, Home):
+1. Command arrives via UDP → decoded → queued
+2. Submitted to MotionPlanner subprocess via `command_queue`
+3. Planner runs TrajectoryBuilder (TOPPRA, IK) — can take 10-500ms
+4. Result sent back as `TrajectorySegment` via `segment_queue`
+5. SegmentPlayer indexes one waypoint per tick: `Position_out[:] = trajectory_steps[step]`
- # Create a new DH link (Revolute) – customize a, d, alpha, and any fixed offset
- j3_custom = Link(ET.DH(a=0.045, d=0.0, alpha=0.0), name="J3_custom", parent=base_links[1])
+**Streaming commands** (JogJ, JogL, ServoJ, ServoL):
+1. Command arrives via UDP → **stream fast-path** (no full decode if type matches active command)
+2. `assign_params()` updates target on existing command instance
+3. `do_setup()` re-runs (also a hot path at ~50Hz for UI-driven jog)
+4. StreamingExecutor ticks Ruckig for smooth interpolation
+5. Per-tick IK solve for Cartesian commands (JogL, ServoL)
- # Rebuild link chain: keep upstream joints as-is, insert replacement, then reuse downstream links
- all_links = [
- base_links[0], # J1
- base_links[1], # J2
- j3_custom, # new J3
- *base_links[3:], # J4..end reuse
- ]
+The fast-path avoids command object creation entirely — it reuses the active command instance and re-assigns parameters. This is critical for 50Hz jog streams.
- robot = rtb.Robot(all_links, name=_cached_urdf.name)
- # Apply a tool afterward if needed (see parol6.tools)
- ```
+## Hot path rules
-- Note: The bundled URDF and STL assets have been adjusted from the originals to make robotics‑toolbox‑python Cartesian transforms behave correctly. Programmatic DH edits let you experiment without modifying those files.
+The control loop and streaming command paths are latency-critical. GC pauses are the primary cause of loop timing degradation. The codebase enforces strict allocation discipline:
+### Zero-allocation zones
-## Tools
-Currently supported tools (see `parol6/tools.py`):
-- `NONE` (bare flange)
-- `PNEUMATIC` (pneumatic gripper)
+`execute_step()` and `tick()` run at 100Hz. `do_setup()` for streamable commands runs at ~50Hz (UI sends at status rate). These are **zero heap allocation** zones:
+
+- **No container construction**: No `list(...)`, `[x for x in ...]`, `dict(...)`, `set(...)`, or comprehensions
+- **No string formatting**: No f-strings or `%` formatting (except error paths that run once)
+- **No object creation**: No `dataclass()`, `namedtuple()`, or class instantiation
+- **Pre-allocate all buffers in `__init__`**: numpy arrays, lists, memoryviews
+- **In-place array ops**: `dest[:] = src` (numpy writes into existing buffer)
+- **`np.copyto(dest, src, casting=...)` only when casting is needed** — it's slower than `dest[:] = src` otherwise
+
+Pre-allocate all buffers in `__init__` and reuse them every tick via `dest[:] = src`. StreamingExecutor's `tick()` returns reused `list[float]` — callers must copy if they need values across ticks.
+
+Performance-critical functions (unit conversions, serial frame packing, IK checks, SE3 ops, statistics) are Numba JIT-compiled with `@numba.njit(cache=True)`. First run takes 3-10s for compilation; `warmup_jit()` pre-compiles at startup. Subsequent runs use the cache (~100ms).
+
+---
+
+## Motion profiles
+
+Set the motion profile for all moves:
-Set tool at runtime from the client:
```python
-from parol6 import RobotClient
-with RobotClient() as c:
- c.set_tool("PNEUMATIC")
+client.set_profile("TOPPRA") # Default: time-optimal path-following
```
-Add a new tool by extending `TOOL_CONFIGS` with a name, description, and `transform` (SE3 → 4×4 matrix).
+**Available profiles** (`SETPROFILE`):
+| Profile | Description |
+|---------|-------------|
+| **TOPPRA** | Time-optimal path-following (default) |
+| **RUCKIG** | Jerk-limited point-to-point motion (joint moves only) |
+| **QUINTIC** | C² smooth polynomial trajectories |
+| **TRAPEZOID** | Linear segments with parabolic blends |
+| **LINEAR** | Direct interpolation (no smoothing) |
-## Quickstart
+Note: RUCKIG is point-to-point only and cannot follow Cartesian paths. When RUCKIG is set, Cartesian moves automatically use TOPPRA instead.
+
+### Speed and acceleration
-### Async client (recommended API)
```python
-import asyncio
-from parol6 import AsyncRobotClient
+client.moveJ(target, speed=0.5, accel=0.5) # 50% of joint limits
+client.moveL(target, speed=0.25, accel=1.0) # 25% cart speed, full accel
+client.moveL(target, duration=2.0) # Fixed duration (uses TOPPRA)
+```
-async def main():
- async with AsyncRobotClient(host="127.0.0.1", port=5001) as client:
- ready = await client.wait_for_server_ready(timeout=3)
- print("server ready:", ready)
- print("ping:", await client.ping())
- status = await client.get_status()
- print("status keys:", list(status.keys()) if status else None)
+Speed and accel are fractions of maximum (0.0–1.0), not percentages.
-asyncio.run(main())
-```
+For Cartesian moves, joint limits stay at 100% as hard bounds—the speed fraction only affects the Cartesian velocity constraint.
-### Sync client (convenience wrapper)
-```python
-from parol6 import RobotClient
+## Command system
-with RobotClient(host="127.0.0.1", port=5001) as client:
- print("ping:", client.ping())
- print("pose:", client.get_pose())
-```
+Jog and servo commands (JogJ, JogL, ServoJ, ServoL) automatically use the streaming fast-path — the server de-duplicates stale inputs, reduces ACK chatter, and reuses the active command. Use jog/servo for UI-driven motion or teleoperation; use planned moves (MoveJ, MoveL, etc.) for discrete motions and queued programs.
-### Starting/stopping the controller from Python
-```python
-from parol6 import manage_server, RobotClient
+### Command categories
-mgr = manage_server(host="127.0.0.1", port=5001) # blocks until PING works
-try:
- with RobotClient() as client:
- print("ready:", client.wait_for_server_ready(timeout=3))
- print("ping:", client.ping())
-finally:
- mgr.stop_controller()
-```
+| Category | Examples | Queue | ACK | Execution |
+|----------|----------|-------|-----|-----------|
+| **Query** | PING, GET_STATUS, GET_ANGLES | No | Request/response | Immediate |
+| **System** | RESUME, HALT, SET_IO, SIMULATOR | No | Always | Immediate (even when disabled) |
+| **Planned motion** | MOVEJ, MOVEL, MOVEC, MOVES, MOVEP, HOME | Yes | With command_index | MotionPlanner subprocess → SegmentPlayer |
+| **Streaming motion** | JOGJ, JOGL, SERVOJ, SERVOL | Yes | Fire-and-forget | StreamingExecutor in main loop |
+| **Utility** | DELAY, CHECKPOINT, SET_TOOL | Yes | With command_index | Inline via MotionPlanner (preserves ordering) |
+| **Tool action** | TOOL_ACTION | Yes | With command_index | Inline via MotionPlanner |
+### Command lifecycle
-## Examples
-Three runnable examples are provided under examples/ demonstrating typical usage. They intentionally mix simulator usage:
-- `examples/async_client_quickstart.py` — managed server + `simulator_on()`; demonstrates `status_stream` and a small relative TRF move
-- `examples/sync_client_quickstart.py` — assumes a running controller; no simulator calls; simple ping and status query
-- `examples/manage_server_demo.py` — demonstrates `manage_server()` lifecycle; toggles simulator on/off
+All commands implement the `CommandBase` protocol:
+- `setup(state)` → calls `do_setup(state)`: one-time preparation (trajectory computation, target resolution)
+- `tick(state)` → calls `execute_step(state)`: per-tick execution in control loop, returns `EXECUTING`, `COMPLETED`, or `FAILED`
+- `assign_params(params)`: for streamable commands, updates target without recreating the command
-Run an example from the repository root:
+### Adding a new command
-```bash
-python external/PAROL6-python-API/examples/manage_server_demo.py
+1. Create a class under `parol6/commands/` and decorate with `@register_command(CmdType.YOUR_CMD)`
+2. Define a `PARAMS_TYPE` msgspec Struct for wire validation
+3. Implement `do_setup(state)` and `execute_step(state)` — obey hot path rules
+4. Set `streamable = True` if the command supports high-rate streaming
+5. Add client method to `async_client.py` and `sync_client.py`
+
+## Kinematics and tools
+
+Uses numerical IK via pinokin (C++/Pinocchio bindings). Some Cartesian targets may fail to solve — J4 is particularly sensitive. To adapt to modified hardware, update `parol6/PAROL6_ROBOT.py` (gear ratios, limits) and `parol6/tools.py` (tool transforms).
+
+Currently supported tools (see `parol6/tools.py`):
+- `NONE` (bare flange)
+- `PNEUMATIC` (pneumatic gripper — vertical/horizontal variants)
+- `SSG-48` (adaptive electric gripper — finger/pinch variants)
+- `MSG` (compliant AI stepper gripper — 100mm/150mm/200mm rail variants)
+- `VACUUM` (vacuum gripper)
+
+Set tool at runtime from the client:
+```python
+from parol6 import RobotClient
+with RobotClient() as c:
+ c.set_tool("PNEUMATIC") # default variant
+ c.set_tool("PNEUMATIC", variant_key="horizontal") # horizontal pneumatic
+ c.set_tool("SSG-48", variant_key="pinch") # pinch grip
+ c.set_tool("MSG", variant_key="150mm") # 150mm rail
```
+Add a new tool by creating a `ToolConfig` subclass (or using `ToolConfig` directly) and calling `register_tool("KEY", config)` in `parol6/tools.py`.
+
+
+**Security note:** The controller has no authentication — it accepts any correctly parsed command on its UDP port. Multiple senders are supported by design (e.g., GUI + orchestrator), but deploy only on trusted networks.
## Environment variables
-- `PAROL6_CONTROL_RATE_HZ` — control loop frequency in Hz (default 250)
-- `PAROL6_STATUS_RATE_HZ` — STATUS broadcast rate in Hz (default 50)
-- `PAROL6_STATUS_STALE_S` — skip broadcast if cache is older than this (default 0.2)
+- `PAROL6_CONTROL_RATE_HZ` — control loop frequency in Hz (default 100)
+- `PAROL6_STATUS_RATE_HZ` — STATUS broadcast rate in Hz (default 50; tests use 20 Hz to reduce CI load)
+- `PAROL6_STATUS_STALE_S` — skip broadcast if cache is older than this (default 0.5)
+- `PAROL6_BUSY_THRESHOLD_MS` — busy-loop threshold for loop timing in ms (default 1.0)
+- `PAROL6_PATH_SAMPLES` — trajectory path sampling points (default 50)
+- `PAROL6_MAX_BLEND_LOOKAHEAD` — command blending lookahead count (default 100)
- `PAROL6_MCAST_GROUP` — multicast group for status (default 239.255.0.101)
- `PAROL6_MCAST_PORT` — multicast port for status (default 50510)
- `PAROL6_MCAST_TTL` — multicast TTL (default 1)
@@ -224,38 +375,52 @@ python external/PAROL6-python-API/examples/manage_server_demo.py
- `PAROL6_CONTROLLER_IP` / `PAROL6_CONTROLLER_PORT` — bind host/port for controller
- `PAROL6_FORCE_ACK` — force ACK for motion commands regardless of policy
- `PAROL6_FAKE_SERIAL` — enable simulator ("1"/"true"/"on"); used internally by simulator_on/off
-- `PAROL6_TX_KEEPALIVE_S` — TX keepalive period seconds for serial writes (default 0.2)
-- `PAROL6_HOME_ANGLES_DEG` — CSV of 6 joint angles for home pose in simulation/tests
- `PAROL6_COM_FILE` — path to persistent COM port file (default `~/.parol6/com_port.txt`)
- `PAROL6_COM_PORT` / `PAROL6_SERIAL` — explicit serial port override (e.g., `/dev/ttyUSB0` or `COM3`)
- `PAROL_TRACE` — `1` enables TRACE logging level unless overridden by CLI
-## Controller CLI
-From `parol6-server` (or `python -m parol6.server.controller`):
+## Development setup
+
+For contributors working on this repository:
+
+```bash
+pip install -e .[dev]
+pre-commit install
+```
+
+- Run all pre-commit hooks locally: `pre-commit run -a`
+- Run tests with pytest:
+ - `pytest`
+ - Simulator is used by default (PAROL6_FAKE_SERIAL=1).
+
+### Control rate and performance
+
+The default control loop rate is **100 Hz** (`PAROL6_CONTROL_RATE_HZ=100`). Higher rates up to 250 Hz and even 500 Hz are achievable, but there are diminishing returns in motion smoothness as you go higher.
+
+Even under complete IK failure (worst-case computation), the control loop typically completes in **under 2ms**. However, consistent high-rate performance requires consideration of OS scheduling—the operating system commonly interrupts user-space processes, which can cause jitter at higher rates.
+
+**Note:** Rates above 250 Hz may require increasing IK solving tolerance, as the distance moved per tick becomes smaller and numerical precision becomes a factor.
+
+### Tuning for higher rates
-- `--host` — UDP host address (default 0.0.0.0)
-- `--port` — UDP port (default 5001)
-- `--serial` — Serial port (e.g., `/dev/ttyUSB0` or `COM3`)
-- `--baudrate` — Serial baudrate (default 3000000)
-- `--auto-home` — Queue HOME on startup (default off)
-- `-v` / `-vv` / `-vvv` — Increase verbosity (INFO/DEBUG/TRACE)
-- `-q` / `--quiet` — Set WARNING level
-- `--log-level` — TRACE/DEBUG/INFO/WARNING/ERROR/CRITICAL (overrides -v/-q)
+For consistent high-rate performance:
+1. **Elevate process priority**: On Linux, use `nice -n -20` or `chrt -f 50` for real-time scheduling
+2. **Disable logging**: TRACE and DEBUG logging add significant overhead
+3. **Reduce background load**: Heavy background tasks compete for CPU time
+4. **Consider CPU isolation**: Pin the controller to dedicated cores with `taskset`
## FAQ / Troubleshooting
- I see `Control loop avg period degraded by …` warnings
- - The loop is falling behind. Reduce `PAROL6_CONTROL_RATE_HZ` and ensure TRACE logging is disabled.
+ - The loop is falling behind. Reduce `PAROL6_CONTROL_RATE_HZ` and ensure TRACE and DEBUG logging is disabled.
- Motion feels inconsistent or jittery on my machine
- - Lower the control rate; avoid heavy background tasks; disable TRACE logging.
-- There is a slight stutter when jogging near singularities
- - Known limitation of the current numerical kinematics; try different approach angles or reduce speed.
+ - Lower the control rate; avoid heavy background tasks; disable TRACE and DEBUG logging.
- Some cartesian targets fail to solve (especially around J4)
- Without null-space control in the backend, some poses are hard to reach. Re-plan the path, adjust the target, or change the starting posture. Future backend updates may add null-space manipulation.
## Safety notes
- Keep physical E‑Stop accessible at all times when connected to hardware
-- The headless controller can halt motion via `disable()/stop()` and reacts to E‑Stop inputs when on real hardware
+- The controller can halt motion via `halt()` and reacts to E‑Stop inputs when on real hardware
- Prefer `simulator_on()` for development without hardware and validate motions before switching to real serial
diff --git a/examples/async_client_quickstart.py b/examples/async_client_quickstart.py
index 61f5ea1..6bc7c0f 100644
--- a/examples/async_client_quickstart.py
+++ b/examples/async_client_quickstart.py
@@ -5,12 +5,11 @@
- Shows basic queries and status streaming
Run from the repository root:
- python external/PAROL6-python-API/examples/async_client_quickstart.py
+ python examples/async_client_quickstart.py
"""
import asyncio
-from parol6 import AsyncRobotClient
-from parol6.client.manager import managed_server
+from parol6 import AsyncRobotClient, Robot
HOST = "127.0.0.1"
PORT = 5001
@@ -18,36 +17,36 @@
async def run_client() -> int:
async with AsyncRobotClient(host=HOST, port=PORT, timeout=2.0) as client:
- ready = await client.wait_for_server_ready(timeout=5.0)
+ ready = await client.wait_ready(timeout=5.0)
print(f"server ready: {ready}")
if not ready:
return 1
# Safety: enable simulator for this demo
- ok = await client.simulator_on()
- print(f"simulator_on: {ok}")
+ ok = await client.simulator(True)
+ print(f"simulator(True): {ok}")
print("ping:", await client.ping())
- pose_xyz = await client.get_pose_xyz()
+ pose_xyz = (await client.pose())[:3]
print("pose xyz:", pose_xyz)
# Consume one status broadcast
print("one status frame speeds:")
- async for status in client.status_stream():
- print(status.get("speeds"))
+ async for status in client.stream_status():
+ print(status.speeds)
break
- # Small relative TRF move (safe in simulator)
+ # Small relative move (safe in simulator)
# Move +5mm in Z over 1.0s
- moved = await client.move_cartesian_rel_trf([0, 0, 5, 0, 0, 0], duration=1.0)
- print("move_cartesian_rel_trf ->", moved)
+ moved = await client.move_l([0, 0, 5, 0, 0, 0], rel=True, duration=1.0)
+ print("move_l ->", moved)
return 0
def main() -> None:
# Auto-start and stop controller for this example
- with managed_server(host=HOST, port=PORT, normalize_logs=True):
+ with Robot(host=HOST, port=PORT, normalize_logs=True):
code = asyncio.run(run_client())
raise SystemExit(code)
diff --git a/examples/demo_showcase.py b/examples/demo_showcase.py
new file mode 100644
index 0000000..9d6d763
--- /dev/null
+++ b/examples/demo_showcase.py
@@ -0,0 +1,158 @@
+"""Showcase script demonstrating all motion types.
+
+Exercises move_j, move_l, move_c, move_p, move_s, blended zig-zag,
+tool actions, TCP offset, and precision TRF rotations.
+
+Run:
+ python examples/demo_showcase.py
+"""
+
+import math
+
+from parol6 import Robot
+
+HOST = "127.0.0.1"
+PORT = 5001
+
+with Robot(host=HOST, port=PORT, normalize_logs=True) as robot:
+ rbt = robot.create_sync_client(timeout=2.0)
+ rbt.wait_ready(timeout=5.0)
+ rbt.simulator(True)
+
+ # Select tool and home
+ rbt.select_tool("SSG-48")
+ rbt.tool.calibrate()
+ rbt.home(wait=True)
+
+ # move_j vs move_l (joint-space then linear-cartesian to nearby pose)
+ rbt.move_j(pose=[100, 240, 334, 90, 0, 90], speed=0.5, wait=True)
+ rbt.move_l([-50, 240, 334, 90, 0, 90], speed=0.5, wait=True)
+
+ # ── Curved motion: three vertical circles + sine-wave spline ──────────
+ RADIUS = 30
+ SPEED = 0.4
+ CIRCLE_Y = 340
+ ORIENTATION = [90, 0, 90]
+ CENTERS = [(0, CIRCLE_Y, 280), (0, CIRCLE_Y, 210), (0, CIRCLE_Y, 140)]
+
+ def circle_pt(cx, cz, angle_deg):
+ """Circle in the XZ plane (vertical) at fixed Y."""
+ a = math.radians(angle_deg)
+ return [
+ cx + RADIUS * math.cos(a),
+ CIRCLE_Y,
+ cz + RADIUS * math.sin(a),
+ ] + ORIENTATION
+
+ # Circle 1: full circle with a single move_c (start = end)
+ cx, _, cz = CENTERS[0]
+ rbt.move_j(pose=circle_pt(cx, cz, 0), speed=0.5, wait=True)
+ rbt.move_c(
+ via=circle_pt(cx, cz, 180), end=circle_pt(cx, cz, 0), speed=SPEED, wait=True
+ )
+
+ # Circle 2: two half-circle move_c arcs
+ cx, _, cz = CENTERS[1]
+ rbt.move_l(circle_pt(cx, cz, 0), speed=SPEED, wait=True)
+ rbt.move_c(
+ via=circle_pt(cx, cz, 90), end=circle_pt(cx, cz, 180), speed=SPEED, wait=True
+ )
+ rbt.move_c(
+ via=circle_pt(cx, cz, 270), end=circle_pt(cx, cz, 0), speed=SPEED, wait=True
+ )
+
+ # Circle 3: computed waypoints with move_p
+ cx, _, cz = CENTERS[2]
+ waypoints = [circle_pt(cx, cz, i * 30) for i in range(12)]
+ waypoints.append(waypoints[0])
+ rbt.move_l(waypoints[0], speed=SPEED, wait=True)
+ rbt.move_p(waypoints, speed=SPEED, wait=True)
+
+ # Sine wave through all three circle centers (bottom to top) using move_s
+ SINE_POINTS = 36
+ z_min, z_max = CENTERS[2][2], CENTERS[0][2]
+ spline = []
+ for i in range(SINE_POINTS + 1):
+ t = i / SINE_POINTS
+ z = z_min + t * (z_max - z_min)
+ x = RADIUS * math.cos(t * 3 * 2 * math.pi)
+ spline.append([x, CIRCLE_Y, z] + ORIENTATION)
+ rbt.move_s(spline, speed=SPEED, wait=True)
+
+ # ── Zig-zag scan ─────────────────────────────────────────────────────
+ ZZ_ORI = [-180, -90, -180]
+ ROWS = 6
+ Y_MIN, Y_MAX = 0, 160
+ Z_MIN, Z_MAX = 200, 300
+ X = 280
+ BLEND = 15
+
+ rbt.move_j(pose=[X, 0, 334] + ZZ_ORI, speed=0.5, wait=True)
+ rbt.move_l([X, Y_MIN, Z_MAX + 30] + ZZ_ORI, speed=0.5, wait=True)
+ z_step = (Z_MAX - Z_MIN) / (ROWS - 1)
+ for row in range(ROWS):
+ z = Z_MAX - row * z_step
+ is_last = row == ROWS - 1
+ y_start, y_end = (Y_MIN, Y_MAX) if row % 2 == 0 else (Y_MAX, Y_MIN)
+ rbt.move_l([X, y_start, z] + ZZ_ORI, speed=0.5, r=BLEND, wait=False)
+ rbt.move_l(
+ [X, y_end, z] + ZZ_ORI, speed=0.5, r=0 if is_last else BLEND, wait=False
+ )
+ rbt.wait_motion()
+
+ # ── Precision demo: pencil pick-up and TCP-offset rotations ──────────
+ # Home first — orientation flip from zigzag end requires fresh joint config.
+ rbt.home(wait=True)
+ PRECISION_POSE = [0, -250, 350, -90, 0, -90]
+ rbt.move_j(pose=PRECISION_POSE, speed=0.5, wait=True)
+
+ # Test gripper: two quick close/open cycles
+ rbt.tool.close(speed=1.0)
+ rbt.tool.open(speed=1.0)
+ rbt.tool.close(speed=1.0)
+ rbt.tool.open(speed=1.0)
+
+ # Approach pencil: move_j to 100mm above, descend linearly, grab, retract
+ PENCIL_ABOVE = [-90, -81.6, 161.8, 0, -69.4, 180]
+ rbt.move_j(angles=PENCIL_ABOVE, speed=0.3, wait=True)
+ rbt.move_l([0, 0, -100, 0, 0, 0], rel=True, speed=0.2, wait=True)
+ rbt.tool.close(wait=True)
+ rbt.move_l([0, 0, 100, 0, 0, 0], rel=True, speed=0.2, wait=True)
+ rbt.move_j(pose=PRECISION_POSE, speed=0.3, wait=True)
+
+ # Offset TCP to pencil tip (~100mm exposed below gripper)
+ rbt.set_tcp_offset(-100, 0, 0)
+
+ # Pencil tip traces straight lines (linear precision demo)
+ # Forward/back (tool Z = world -Y at this pose)
+ rbt.move_l([0, 0, 100, 0, 0, 0], speed=0.3, frame="TRF", rel=True, wait=True)
+ rbt.move_l([0, 0, -200, 0, 0, 0], speed=0.3, frame="TRF", rel=True, wait=True)
+ rbt.move_l([0, 0, 100, 0, 0, 0], speed=0.3, frame="TRF", rel=True, wait=True)
+ # Side to side (tool Y = world -X at this pose)
+ rbt.move_l([0, 60, 0, 0, 0, 0], speed=0.3, frame="TRF", rel=True, wait=True)
+ rbt.move_l([0, -120, 0, 0, 0, 0], speed=0.3, frame="TRF", rel=True, wait=True)
+ rbt.move_l([0, 60, 0, 0, 0, 0], speed=0.3, frame="TRF", rel=True, wait=True)
+
+ # Precision TRF rotations — pencil tip stays stationary while wrist rotates
+ SWEEP = 20
+ for axis in range(3):
+ delta = [0, 0, 0, 0, 0, 0]
+ delta[3 + axis] = -SWEEP
+ rbt.move_l(delta, speed=0.5, frame="TRF", rel=True, wait=True)
+ delta[3 + axis] = SWEEP
+ rbt.move_l(delta, speed=0.5, frame="TRF", rel=True, wait=True)
+ rbt.move_l(delta, speed=0.5, frame="TRF", rel=True, wait=True)
+ delta[3 + axis] = -SWEEP
+ rbt.move_l(delta, speed=0.5, frame="TRF", rel=True, wait=True)
+
+ # Place pencil back: descend linearly, release, retract
+ rbt.set_tcp_offset(0, 0, 0)
+ rbt.move_j(angles=PENCIL_ABOVE, speed=0.3, wait=True)
+ rbt.move_l([0, 0, -100, 0, 0, 0], rel=True, speed=0.2, wait=True)
+ rbt.tool.open(wait=True)
+ rbt.move_l([0, 0, 100, 0, 0, 0], rel=True, speed=0.2, wait=True)
+
+ # Return and finish
+ rbt.move_j(pose=PRECISION_POSE, speed=0.3, wait=True)
+ rbt.home(wait=True)
+ print("Done!")
diff --git a/examples/draw_circle.py b/examples/draw_circle.py
new file mode 100644
index 0000000..34ceb01
--- /dev/null
+++ b/examples/draw_circle.py
@@ -0,0 +1,81 @@
+"""Curved motion commands: move_c, move_p, and move_s.
+
+Draws three circles using progressively more flexible curve commands,
+then connects their centers with a sine-wave spline:
+ 1. Full circle via single move_c (start = end)
+ 2. Two half-circle move_c arcs
+ 3. Computed 12-point waypoints via move_p
+ 4. Sine-wave spline (move_s) through all three centers
+
+Run:
+ python examples/draw_circle.py
+"""
+
+import math
+
+from parol6 import Robot, RobotClient
+
+HOST = "127.0.0.1"
+PORT = 5001
+
+RADIUS = 30
+SPEED = 0.4
+CIRCLE_Y = 340
+ORIENTATION = [90, 0, 90]
+CENTERS = [(0, CIRCLE_Y, 280), (0, CIRCLE_Y, 210), (0, CIRCLE_Y, 140)]
+
+
+def circle_pt(cx, cz, angle_deg):
+ """Circle in the XZ plane (vertical) at fixed Y."""
+ a = math.radians(angle_deg)
+ return [
+ cx + RADIUS * math.cos(a),
+ CIRCLE_Y,
+ cz + RADIUS * math.sin(a),
+ ] + ORIENTATION
+
+
+with Robot(host=HOST, port=PORT, normalize_logs=True):
+ rbt = RobotClient(host=HOST, port=PORT, timeout=2.0)
+ rbt.wait_ready(timeout=5.0)
+ rbt.simulator(True)
+
+ rbt.home(wait=True)
+
+ # Circle 1: full circle with a single move_c (start = end)
+ cx, _, cz = CENTERS[0]
+ rbt.move_j(pose=circle_pt(cx, cz, 0), speed=0.5, wait=True)
+ rbt.move_c(
+ via=circle_pt(cx, cz, 180), end=circle_pt(cx, cz, 0), speed=SPEED, wait=True
+ )
+
+ # Circle 2: two half-circle move_c arcs
+ cx, _, cz = CENTERS[1]
+ rbt.move_l(circle_pt(cx, cz, 0), speed=SPEED, wait=True)
+ rbt.move_c(
+ via=circle_pt(cx, cz, 90), end=circle_pt(cx, cz, 180), speed=SPEED, wait=True
+ )
+ rbt.move_c(
+ via=circle_pt(cx, cz, 270), end=circle_pt(cx, cz, 0), speed=SPEED, wait=True
+ )
+
+ # Circle 3: computed 12-point waypoints with move_p
+ cx, _, cz = CENTERS[2]
+ waypoints = [circle_pt(cx, cz, i * 30) for i in range(12)]
+ waypoints.append(waypoints[0])
+ rbt.move_l(waypoints[0], speed=SPEED, wait=True)
+ rbt.move_p(waypoints, speed=SPEED, wait=True)
+
+ # Sine-wave spline through all three circle centers (bottom to top)
+ SINE_POINTS = 36
+ z_min, z_max = CENTERS[2][2], CENTERS[0][2]
+ spline = []
+ for i in range(SINE_POINTS + 1):
+ t = i / SINE_POINTS
+ z = z_min + t * (z_max - z_min)
+ x = RADIUS * math.cos(t * 3 * 2 * math.pi)
+ spline.append([x, CIRCLE_Y, z] + ORIENTATION)
+ rbt.move_s(spline, speed=SPEED, wait=True)
+
+ rbt.home(wait=True)
+ print("Done!")
diff --git a/examples/manage_server_demo.py b/examples/manage_server_demo.py
index 882b6c2..f5192eb 100644
--- a/examples/manage_server_demo.py
+++ b/examples/manage_server_demo.py
@@ -1,25 +1,26 @@
"""
Manage server lifecycle demonstration for PAROL6.
-- Starts a local headless controller at 127.0.0.1:5001
+- Starts a local controller at 127.0.0.1:5001
- Waits until ready, then connects with RobotClient
- Toggles simulator ON for a safe demo motion, then OFF
- Stops the controller on exit
Run from the repository root:
- python external/PAROL6-python-API/examples/manage_server_demo.py
+ python examples/manage_server_demo.py
"""
-from parol6 import manage_server, RobotClient
+from parol6 import Robot, RobotClient
HOST = "127.0.0.1"
PORT = 5001
def main() -> None:
- mgr = manage_server(host=HOST, port=PORT, normalize_logs=True)
+ robot = Robot(host=HOST, port=PORT, normalize_logs=True)
+ robot.start()
try:
with RobotClient(host=HOST, port=PORT, timeout=2.0) as client:
- ready = client.wait_for_server_ready(timeout=5.0)
+ ready = client.wait_ready(timeout=5.0)
print(f"server ready: {ready}")
if not ready:
raise SystemExit(1)
@@ -27,21 +28,21 @@ def main() -> None:
print("ping:", client.ping())
# Enable simulator for a safe motion
- sim_on = client.simulator_on()
- print("simulator_on:", sim_on)
+ sim_on = client.simulator(True)
+ print("simulator(True):", sim_on)
if sim_on:
- # Small relative TRF move: +3mm in Z over 0.8s
- moved = client.move_cartesian_rel_trf([0, 0, 3, 0, 0, 0], duration=0.8)
- print("move_cartesian_rel_trf ->", moved)
+ # Small relative move: +3mm in Z over 0.8s
+ moved = client.move_l([0, 0, 3, 0, 0, 0], rel=True, duration=0.8)
+ print("move_l ->", moved)
# Demonstrate toggling simulator off again (no motion follows)
- sim_off = client.simulator_off()
- print("simulator_off:", sim_off)
+ sim_off = client.simulator(False)
+ print("simulator(False):", sim_off)
raise SystemExit(0)
finally:
- mgr.stop_controller()
+ robot.stop()
if __name__ == "__main__":
diff --git a/examples/precision.py b/examples/precision.py
new file mode 100644
index 0000000..836845f
--- /dev/null
+++ b/examples/precision.py
@@ -0,0 +1,76 @@
+"""Precision demo: TCP offset, TRF moves, and orientation rotations.
+
+Picks up a pencil, offsets the TCP to the pencil tip, then demonstrates
+linear and rotational moves in the Tool Reference Frame (TRF). The pencil
+tip stays at a fixed point while the wrist rotates around it.
+
+Run:
+ python examples/precision.py
+"""
+
+from parol6 import Robot
+
+HOST = "127.0.0.1"
+PORT = 5001
+
+with Robot(host=HOST, port=PORT, normalize_logs=True) as robot:
+ rbt = robot.create_sync_client(timeout=2.0)
+ rbt.wait_ready(timeout=5.0)
+ rbt.simulator(True)
+
+ rbt.select_tool("SSG-48")
+ rbt.tool.calibrate()
+ rbt.home(wait=True)
+
+ PRECISION_POSE = [0, -250, 350, -90, 0, -90]
+ rbt.move_j(pose=PRECISION_POSE, speed=0.5, wait=True)
+
+ # Test gripper: two quick close/open cycles
+ rbt.tool.close(speed=1.0)
+ rbt.tool.open(speed=1.0)
+ rbt.tool.close(speed=1.0)
+ rbt.tool.open(speed=1.0)
+
+ # Approach pencil: move_j to 100mm above, descend linearly, grab, retract
+ PENCIL_ABOVE = [-90, -81.6, 161.8, 0, -69.4, 180]
+ rbt.move_j(angles=PENCIL_ABOVE, speed=0.3, wait=True)
+ rbt.move_l([0, 0, -100, 0, 0, 0], rel=True, speed=0.2, wait=True)
+ rbt.tool.close(wait=True)
+ rbt.move_l([0, 0, 100, 0, 0, 0], rel=True, speed=0.2, wait=True)
+ rbt.move_j(pose=PRECISION_POSE, speed=0.3, wait=True)
+
+ # Offset TCP to pencil tip (~100mm exposed below gripper)
+ rbt.set_tcp_offset(-100, 0, 0)
+
+ # Pencil tip traces straight lines (linear precision demo)
+ # Forward/back (tool Z = world -Y at this pose)
+ rbt.move_l([0, 0, 100, 0, 0, 0], speed=0.3, frame="TRF", rel=True, wait=True)
+ rbt.move_l([0, 0, -200, 0, 0, 0], speed=0.3, frame="TRF", rel=True, wait=True)
+ rbt.move_l([0, 0, 100, 0, 0, 0], speed=0.3, frame="TRF", rel=True, wait=True)
+ # Side to side (tool Y = world -X at this pose)
+ rbt.move_l([0, 60, 0, 0, 0, 0], speed=0.3, frame="TRF", rel=True, wait=True)
+ rbt.move_l([0, -120, 0, 0, 0, 0], speed=0.3, frame="TRF", rel=True, wait=True)
+ rbt.move_l([0, 60, 0, 0, 0, 0], speed=0.3, frame="TRF", rel=True, wait=True)
+
+ # Precision TRF rotations — pencil tip stays stationary while wrist rotates
+ SWEEP = 20
+ for axis in range(3):
+ delta = [0, 0, 0, 0, 0, 0]
+ delta[3 + axis] = -SWEEP
+ rbt.move_l(delta, speed=0.5, frame="TRF", rel=True, wait=True)
+ delta[3 + axis] = SWEEP
+ rbt.move_l(delta, speed=0.5, frame="TRF", rel=True, wait=True)
+ rbt.move_l(delta, speed=0.5, frame="TRF", rel=True, wait=True)
+ delta[3 + axis] = -SWEEP
+ rbt.move_l(delta, speed=0.5, frame="TRF", rel=True, wait=True)
+
+ # Place pencil back: descend linearly, release, retract
+ rbt.set_tcp_offset(0, 0, 0)
+ rbt.move_j(angles=PENCIL_ABOVE, speed=0.3, wait=True)
+ rbt.move_l([0, 0, -100, 0, 0, 0], rel=True, speed=0.2, wait=True)
+ rbt.tool.open(wait=True)
+ rbt.move_l([0, 0, 100, 0, 0, 0], rel=True, speed=0.2, wait=True)
+
+ rbt.move_j(pose=PRECISION_POSE, speed=0.3, wait=True)
+ rbt.home(wait=True)
+ print("Done!")
diff --git a/examples/speed_comparison.py b/examples/speed_comparison.py
new file mode 100644
index 0000000..032c936
--- /dev/null
+++ b/examples/speed_comparison.py
@@ -0,0 +1,74 @@
+"""Speed and motion profile comparison.
+
+Runs the same L-shaped path at different speeds and with different
+trajectory profiles, timing each run. Useful for tuning speed and
+profile selection for your application. Runs in the built-in simulator.
+
+Run:
+ python examples/speed_comparison.py
+"""
+
+import time
+
+from parol6 import Robot, RobotClient
+
+HOST = "127.0.0.1"
+PORT = 5001
+
+ORIENTATION = [90, 0, 90]
+START = [0, 280, 300] + ORIENTATION
+PATH = [
+ [80, 280, 300] + ORIENTATION,
+ [80, 280, 150] + ORIENTATION,
+ [-80, 280, 150] + ORIENTATION,
+]
+
+
+def run_path(rbt: RobotClient, speed: float) -> float:
+ rbt.move_l(START, speed=1.0, wait=True)
+ t0 = time.time()
+ for wp in PATH:
+ rbt.move_l(wp, speed=speed, wait=True)
+ return time.time() - t0
+
+
+with Robot(host=HOST, port=PORT, normalize_logs=True):
+ rbt = RobotClient(host=HOST, port=PORT, timeout=2.0)
+ rbt.wait_ready(timeout=5.0)
+ rbt.simulator(True)
+
+ print("Homing...")
+ rbt.home(wait=True)
+
+ # -- Speed comparison --
+ print("\n--- Speed Comparison (TOPPRA) ---\n")
+ rbt.select_profile("TOPPRA")
+
+ speed_results = []
+ for speed in [0.3, 0.6, 1.0]:
+ elapsed = run_path(rbt, speed)
+ speed_results.append((speed, elapsed))
+ print(f" speed={speed:.1f} time={elapsed:.2f}s")
+
+ # -- Profile comparison --
+ print("\n--- Profile Comparison (speed=0.5) ---\n")
+
+ profile_results = []
+ for profile in ["TOPPRA", "TRAPEZOID"]:
+ rbt.select_profile(profile)
+ elapsed = run_path(rbt, 0.5)
+ profile_results.append((profile, elapsed))
+ print(f" {profile:12s} time={elapsed:.2f}s")
+
+ rbt.select_profile("TOPPRA")
+
+ print("\n--- Summary ---")
+ print(
+ f" Speed 0.3 vs 1.0: {speed_results[0][1] / speed_results[2][1]:.1f}x slower"
+ )
+ print(
+ f" TOPPRA vs TRAPEZOID: {profile_results[0][1] / profile_results[1][1]:.2f}x ratio"
+ )
+
+ rbt.home(wait=True)
+ print("Done!")
diff --git a/examples/sync_client_quickstart.py b/examples/sync_client_quickstart.py
index 686e6f5..8492b0c 100644
--- a/examples/sync_client_quickstart.py
+++ b/examples/sync_client_quickstart.py
@@ -1,28 +1,35 @@
"""
Sync client quickstart for PAROL6.
-- Assumes a controller is already running at 127.0.0.1:5001
-- Does not enable simulator in this example
-- Performs ping and basic queries
+- Starts a local headless controller
+- Enables simulator mode for safety
+- Performs ping, queries, and a small move
Run from the repository root:
- python external/PAROL6-python-API/examples/sync_client_quickstart.py
+ python examples/sync_client_quickstart.py
"""
-from parol6 import RobotClient
+from parol6 import Robot, RobotClient
HOST = "127.0.0.1"
PORT = 5001
def main() -> None:
- with RobotClient(host=HOST, port=PORT, timeout=2.0) as client:
- ready = client.wait_for_server_ready(timeout=3.0)
- print(f"server ready: {ready}")
- print("ping:", client.ping())
- print("pose xyz:", client.get_pose_xyz())
- print("angles:", client.get_angles())
- code = 0 if ready else 1
- raise SystemExit(code)
+ with Robot(host=HOST, port=PORT, normalize_logs=True):
+ with RobotClient(host=HOST, port=PORT, timeout=2.0) as client:
+ ready = client.wait_ready(timeout=5.0)
+ print(f"server ready: {ready}")
+ if not ready:
+ raise SystemExit(1)
+
+ client.simulator(True)
+ print("ping:", client.ping())
+ print("pose xyz:", client.pose()[:3])
+ print("angles:", client.angles())
+
+ # Small relative move (safe in simulator)
+ moved = client.move_l([0, 0, 5, 0, 0, 0], rel=True, duration=1.0)
+ print("move_l ->", moved)
if __name__ == "__main__":
diff --git a/examples/zigzag_scan.py b/examples/zigzag_scan.py
new file mode 100644
index 0000000..37c0f6a
--- /dev/null
+++ b/examples/zigzag_scan.py
@@ -0,0 +1,47 @@
+"""Zig-zag raster scan with blended corners.
+
+Queues linear moves with blend radius `r` so the controller rounds
+the corners into one smooth, continuous path. Each row alternates
+sweep direction to minimise repositioning.
+
+Run:
+ python examples/zigzag_scan.py
+"""
+
+from parol6 import Robot, RobotClient
+
+HOST = "127.0.0.1"
+PORT = 5001
+
+ZZ_ORI = [-180, -90, -180]
+ROWS = 6
+Y_MIN, Y_MAX = 0, 160
+Z_MIN, Z_MAX = 200, 300
+X = 280
+BLEND = 15
+
+with Robot(host=HOST, port=PORT, normalize_logs=True):
+ rbt = RobotClient(host=HOST, port=PORT, timeout=2.0)
+ rbt.wait_ready(timeout=5.0)
+ rbt.simulator(True)
+
+ rbt.home(wait=True)
+
+ # Approach the scan area
+ rbt.move_j(pose=[X, 0, 334] + ZZ_ORI, speed=0.5, wait=True)
+ rbt.move_l([X, Y_MIN, Z_MAX + 30] + ZZ_ORI, speed=0.5, wait=True)
+
+ # Raster scan — queue every move without waiting so the controller
+ # can blend adjacent segments through the blend radius.
+ z_step = (Z_MAX - Z_MIN) / (ROWS - 1)
+ for row in range(ROWS):
+ z = Z_MAX - row * z_step
+ is_last = row == ROWS - 1
+ y_start, y_end = (Y_MIN, Y_MAX) if row % 2 == 0 else (Y_MAX, Y_MIN)
+ rbt.move_l([X, y_start, z] + ZZ_ORI, speed=0.5, r=BLEND, wait=False)
+ rbt.move_l(
+ [X, y_end, z] + ZZ_ORI, speed=0.5, r=0 if is_last else BLEND, wait=False
+ )
+
+ rbt.wait_motion()
+ print("Done!")
diff --git a/parol6/PAROL6_ROBOT.py b/parol6/PAROL6_ROBOT.py
index baa490e..28b1ede 100644
--- a/parol6/PAROL6_ROBOT.py
+++ b/parol6/PAROL6_ROBOT.py
@@ -1,16 +1,14 @@
-# Clean, hierarchical, vectorized, and typed robot configuration and helpers
+"""PAROL6 robot kinematics, limits, and configuration."""
+
+import atexit
import logging
-from collections.abc import Callable
from dataclasses import dataclass
from pathlib import Path
-from typing import Final, Union
+from typing import Final
import numpy as np
-import roboticstoolbox as rtb
-from numpy.typing import ArrayLike, NDArray
-from roboticstoolbox import ET, Link
-from roboticstoolbox.tools.urdf import URDF
-from spatialmath import SE3
+from numpy.typing import NDArray
+from pinokin import Robot
from parol6.tools import get_tool_transform
@@ -19,8 +17,6 @@
# -----------------------------
# Typing aliases
# -----------------------------
-IndexArg = Union[int, NDArray[np.int_], None]
-
Vec6f = NDArray[np.float64]
Vec6i = NDArray[np.int32]
Limits2f = NDArray[np.float64] # shape (6,2)
@@ -28,7 +24,6 @@
# -----------------------------
# Kinematics and conversion constants
# -----------------------------
-Joint_num = 6
Microstep = 32
steps_per_revolution = 200
@@ -54,71 +49,63 @@
dtype=np.float64,
)
-_joint_limits_radian: Limits2f = np.deg2rad(_joint_limits_degree).astype(np.float64)
-
+_joint_limits_radian: Limits2f = np.deg2rad(_joint_limits_degree)
-# URDF-based robot model (frames/limits aligned with controller)
-def _load_urdf() -> URDF:
- """Load and cache the URDF object for robot reconstruction."""
- base_path = Path(__file__).resolve().parent / "urdf_model"
- urdf_path = base_path / "urdf" / "PAROL6.urdf"
- urdf_string = urdf_path.read_text(encoding="utf-8")
- return URDF.loadstr(urdf_string, str(urdf_path), base_path=base_path)
+# URDF path for pinokin Robot
+_urdf_path = str(
+ Path(__file__).resolve().parent / "urdf_model" / "urdf" / "PAROL6.urdf"
+)
-# Cache the URDF object (parsed once, reused for robot reconstruction)
-_cached_urdf = _load_urdf()
-
-# Current robot instance (rebuilt when tool changes)
-robot = None
+# Current robot instance (tool transform applied in-place)
+robot: Robot = Robot(_urdf_path)
-def apply_tool(tool_name: str) -> None:
+def apply_tool(
+ tool_name: str,
+ variant_key: str = "",
+ tcp_offset_m: tuple[float, float, float] | None = None,
+) -> None:
"""
- Rebuild the robot with the specified tool as an additional link.
- This ensures the tool transform is properly integrated into the kinematic chain
- and affects forward kinematics calculations.
+ Apply tool transform to the robot model.
Parameters
----------
tool_name : str
- Name of the tool from tools.TOOL_CONFIGS
+ Name of the tool from the tool registry
+ variant_key : str
+ Optional variant key for the tool
+ tcp_offset_m : tuple, optional
+ Additional (x, y, z) offset in meters, composed in the tool's local frame.
"""
- global robot
-
- # Get tool transform
- T_tool = get_tool_transform(tool_name)
-
- # Get the base elinks from cached URDF
- base_links = list(_cached_urdf.elinks)
-
- # Create a tool link if there's a non-identity transform
- if tool_name != "NONE" and not np.allclose(T_tool, np.eye(4)):
- # Create an ELink for the tool
- # The tool is a fixed transform from the last joint
- tool_link = Link(
- ET.SE3(SE3(T_tool)),
- name=f"tool_{tool_name}",
- parent=base_links[-1], # Attach to the last link
- )
-
- # Add tool link to the chain
- all_links = base_links + [tool_link]
- logger.info(f"Applied tool '{tool_name}' to robot model as link")
+ T_tool = get_tool_transform(tool_name, variant_key=variant_key or None)
+
+ if tcp_offset_m is not None and any(v != 0 for v in tcp_offset_m):
+ T_offset = np.eye(4, dtype=np.float64)
+ T_offset[0, 3] = tcp_offset_m[0]
+ T_offset[1, 3] = tcp_offset_m[1]
+ T_offset[2, 3] = tcp_offset_m[2]
+ T_tool = T_tool @ T_offset
+
+ label = f"'{tool_name}:{variant_key}'" if variant_key else f"'{tool_name}'"
+ if not np.allclose(T_tool, np.eye(4)):
+ robot.set_tool_transform(T_tool)
+ logger.info(f"Applied tool {label} to robot model")
else:
- all_links = base_links
- logger.info(f"Applied tool '{tool_name}' (no additional link needed)")
-
- # Create robot with the complete link chain
- robot = rtb.Robot(
- all_links,
- name=_cached_urdf.name,
- )
+ robot.clear_tool_transform()
+ logger.info(f"Applied tool {label} (identity)")
# Initialize with no tool
apply_tool("NONE")
+
+@atexit.register
+def _cleanup_robot() -> None:
+ global robot
+ del robot
+
+
# -----------------------------
# Additional raw parameter arrays
# -----------------------------
@@ -128,231 +115,108 @@ def apply_tool(tool_name: str) -> None:
)
# Joint speeds (steps/s)
-_joint_max_speed: Vec6i = np.array(
- [6500, 18000, 20000, 20000, 22000, 22000], dtype=np.int32
+_joint_max_speed_hw: Vec6i = np.array(
+ [15000, 25000, 32000, 10000, 10000, 27000], dtype=np.int32
)
_joint_min_speed: Vec6i = np.array([100, 100, 100, 100, 100, 100], dtype=np.int32)
-# Jog speeds (steps/s)
-_joint_max_jog_speed: Vec6i = np.array(
- [1500, 3000, 3600, 7000, 7000, 18000], dtype=np.int32
-)
+# Effective max speeds with scaling applied
+_joint_max_speed: Vec6i = _joint_max_speed_hw.copy()
+
+# Jog speeds (steps/s) - 80% of scaled max for safety margin during jogging
+_joint_max_jog_speed: Vec6i = (_joint_max_speed * 0.8).astype(np.int32)
_joint_min_jog_speed: Vec6i = np.array([100, 100, 100, 100, 100, 100], dtype=np.int32)
-# Joint accelerations (rad/s^2) - scalar limits applied per joint
-_joint_max_acc_rad: float = float(32000)
-_joint_min_acc_rad: float = float(100)
+# Joint accelerations (steps/s^2) per joint
+# Derived: a_max = v_max * 3 (reach max speed in ~0.33s)
+_joint_max_acc: Vec6i = (_joint_max_speed * 3).astype(np.int32)
# Maximum jerk limits (steps/s^3) per joint
-_joint_max_jerk: Vec6i = np.array([1600, 1000, 1100, 3000, 3000, 2000], dtype=np.int32)
+# Derived: j_max = a_max * 10 (reach max accel in ~0.1s)
+_joint_max_jerk: Vec6i = (_joint_max_acc * 10).astype(np.int32)
-# Cartesian limits
-_cart_linear_velocity_min_JOG: float = 0.002
-_cart_linear_velocity_max_JOG: float = 0.06
+# Compute joint angular velocities/accelerations in rad/s
+_joint_speed_rad = (
+ _joint_max_speed.astype(float) * radian_per_step_constant / _joint_ratio
+)
+_joint_acc_rad = _joint_max_acc.astype(float) * radian_per_step_constant / _joint_ratio
+_joint_jerk_rad = (
+ _joint_max_jerk.astype(float) * radian_per_step_constant / _joint_ratio
+)
-_cart_linear_velocity_min: float = 0.002
-_cart_linear_velocity_max: float = 0.06
-_cart_linear_acc_min: float = 0.002
-_cart_linear_acc_max: float = 0.06
+# Pre-computed Cartesian limits from Jacobian pseudoinverse workspace sampling.
+# Derived from _compute_tcp_velocity_at_config() over 500/200/200 random configs
+# with seeds 42/43/44, using median velocity and mean angular rates from wrist joints.
+# Values are floored to reasonable precision to avoid false precision.
+#
+# Linear units: mm/s, mm/s^2, mm/s^3
+# Angular units: deg/s, deg/s^2, deg/s^3
+_cart_linear_velocity_max: float = 200
+_cart_angular_velocity_max: float = 100
+_cart_linear_acc_max: float = 550
+_cart_angular_acc_max: float = 275
+_cart_linear_jerk_max: float = 5500
+_cart_angular_jerk_max: float = 2750
+
+# Min values as 1% of max
+_cart_linear_velocity_min: float = _cart_linear_velocity_max * 0.01
+_cart_angular_velocity_min: float = _cart_angular_velocity_max * 0.01
+_cart_linear_acc_min: float = _cart_linear_acc_max * 0.01
+_cart_angular_acc_min: float = _cart_angular_acc_max * 0.01
+_cart_linear_jerk_min: float = _cart_linear_jerk_max * 0.01
+_cart_angular_jerk_min: float = _cart_angular_jerk_max * 0.01
+
+# Jog limits (80% of max for safety margin)
+_cart_linear_velocity_max_JOG: float = _cart_linear_velocity_max * 0.8
+_cart_linear_velocity_min_JOG: float = _cart_linear_velocity_min
+
+
+def log_derived_limits() -> None:
+ """Log the derived Cartesian limits. Call at controller startup."""
+ logger.info("=== Derived Kinematic Limits ===")
+ logger.info("Joint velocity (rad/s): %s", np.round(_joint_speed_rad, 3))
+ logger.info("Joint accel (rad/s²): %s", np.round(_joint_acc_rad, 2))
+ logger.info("Joint jerk (rad/s³): %s", np.round(_joint_jerk_rad, 1))
+ logger.info(
+ "Cartesian linear velocity: %.1f mm/s (jog: %.1f mm/s)",
+ _cart_linear_velocity_max,
+ _cart_linear_velocity_max_JOG,
+ )
+ logger.info("Cartesian angular velocity: %.2f deg/s", _cart_angular_velocity_max)
+ logger.info(
+ "Cartesian linear accel: %.1f mm/s², angular: %.2f deg/s²",
+ _cart_linear_acc_max,
+ _cart_angular_acc_max,
+ )
+ logger.info(
+ "Cartesian linear jerk: %.1f mm/s³, angular: %.2f deg/s³",
+ _cart_linear_jerk_max,
+ _cart_angular_jerk_max,
+ )
+ logger.info("================================")
-_cart_angular_velocity_min: float = 0.7 # deg/s
-_cart_angular_velocity_max: float = 25.0 # deg/s
# Standby positions
_standby_deg: Vec6f = np.array([90.0, -90.0, 180.0, 0.0, 0.0, 180.0], dtype=np.float64)
-_standby_rad: Vec6f = np.deg2rad(_standby_deg).astype(np.float64)
-
-
-# -----------------------------
-# Vectorized helpers (ops)
-# -----------------------------
-def _apply_ratio(values: NDArray, idx: IndexArg) -> NDArray:
- """
- Apply per-joint gear ratio.
- If idx is None, broadcast ratio across last dimension (length 6).
- If idx is an int or ndarray of ints, select ratios accordingly.
- """
- if idx is None:
- return values * _joint_ratio
- idx_arr = np.asarray(idx)
- return values * _joint_ratio[idx_arr]
-
-
-def deg_to_steps(deg: ArrayLike, idx: IndexArg = None) -> np.int32 | NDArray[np.int32]:
- """Degrees to steps (gear ratio aware). Fast scalar path when idx is int."""
- if isinstance(idx, (int, np.integer)) and np.isscalar(deg):
- return np.int32((deg / degree_per_step_constant) * _joint_ratio[idx]) # type: ignore
- deg_arr = np.asarray(deg, dtype=np.float64)
- steps_f = _apply_ratio(deg_arr / degree_per_step_constant, idx)
- return steps_f.astype(np.int32, copy=False)
-
-
-def steps_to_deg(
- steps: ArrayLike, idx: IndexArg = None
-) -> np.float64 | NDArray[np.float64]:
- """Steps to degrees (gear ratio aware). Fast scalar path when idx is int."""
- if isinstance(idx, (int, np.integer)) and np.isscalar(steps):
- return np.float64((steps * degree_per_step_constant) / _joint_ratio[idx]) # type: ignore
- steps_arr = np.asarray(steps, dtype=np.float64)
- ratio = _joint_ratio if idx is None else _joint_ratio[np.asarray(idx)]
- return (steps_arr * degree_per_step_constant) / ratio
-
-
-def rad_to_steps(rad: ArrayLike, idx: IndexArg = None) -> np.int32 | NDArray[np.int32]:
- """Radians to steps. Fast scalar path when idx is int."""
- if isinstance(idx, (int, np.integer)) and np.isscalar(rad):
- return np.int32((rad / radian_per_step_constant) * _joint_ratio[idx]) # type: ignore
- rad_arr = np.asarray(rad, dtype=np.float64)
- deg_arr = np.rad2deg(rad_arr)
- return deg_to_steps(deg_arr, idx)
-
-
-def steps_to_rad(
- steps: ArrayLike, idx: IndexArg = None
-) -> np.float64 | NDArray[np.float64]:
- """Steps to radians. Fast scalar path when idx is int."""
- if isinstance(idx, (int, np.integer)) and np.isscalar(steps):
- return np.float64((steps * radian_per_step_constant) / _joint_ratio[idx]) # type: ignore
- deg_arr = steps_to_deg(steps, idx)
- return np.deg2rad(deg_arr)
-
-
-def speed_steps_to_deg(
- sps: ArrayLike, idx: IndexArg = None
-) -> np.float64 | NDArray[np.float64]:
- """Speed: steps/s to deg/s. Fast scalar path when idx is int."""
- if isinstance(idx, (int, np.integer)) and np.isscalar(sps):
- return np.float64((sps * degree_per_step_constant) / _joint_ratio[idx]) # type: ignore
- sps_arr = np.asarray(sps, dtype=np.float64)
- ratio = _joint_ratio if idx is None else _joint_ratio[np.asarray(idx)]
- return (sps_arr * degree_per_step_constant) / ratio
-
-
-def speed_deg_to_steps(
- dps: ArrayLike, idx: IndexArg = None
-) -> np.int32 | NDArray[np.int32]:
- """Speed: deg/s to steps/s. Fast scalar path when idx is int."""
- if isinstance(idx, (int, np.integer)) and np.isscalar(dps):
- return np.int32((dps / degree_per_step_constant) * _joint_ratio[idx]) # type: ignore
- dps_arr = np.asarray(dps, dtype=np.float64)
- stepsps = _apply_ratio(dps_arr / degree_per_step_constant, idx)
- return stepsps.astype(np.int32, copy=False)
-
-
-def speed_steps_to_rad(
- sps: ArrayLike, idx: IndexArg = None
-) -> np.float64 | NDArray[np.float64]:
- """Speed: steps/s to rad/s. Fast scalar path when idx is int."""
- if isinstance(idx, (int, np.integer)) and np.isscalar(sps):
- return np.float64((sps * radian_per_step_constant) / _joint_ratio[idx]) # type: ignore
- sps_arr = np.asarray(sps, dtype=np.float64)
- ratio = _joint_ratio if idx is None else _joint_ratio[np.asarray(idx)]
- return (sps_arr * radian_per_step_constant) / ratio
-
-
-def speed_rad_to_steps(
- rps: ArrayLike, idx: IndexArg = None
-) -> np.int32 | NDArray[np.int32]:
- """Speed: rad/s to steps/s. Fast scalar path when idx is int."""
- if isinstance(idx, (int, np.integer)) and np.isscalar(rps):
- return np.int32((rps / radian_per_step_constant) * _joint_ratio[idx]) # type: ignore
- rps_arr = np.asarray(rps, dtype=np.float64)
- stepsps = _apply_ratio(rps_arr / radian_per_step_constant, idx)
- return stepsps.astype(np.int32, copy=False)
-
-
-def clip_speed_to_joint_limits(sps: ArrayLike) -> NDArray[np.int32]:
- """Clip steps/s vector to per-joint limits (int32)."""
- sps_arr = np.asarray(sps, dtype=np.int32)
- return np.clip(sps_arr, -_joint_max_speed, _joint_max_speed).astype(
- np.int32, copy=False
- )
-
-
-def clamp_steps_delta(
- prev_steps: ArrayLike, target_steps: ArrayLike, dt: float, safety: float = 1.2
-) -> NDArray[np.int32]:
- """
- Clamp per-tick step change to max allowed based on joint.max_speed and dt.
- Returns int32 array.
- """
- prev_arr = np.asarray(prev_steps, dtype=np.int32)
- tgt_arr = np.asarray(target_steps, dtype=np.int32)
- step_diff = tgt_arr - prev_arr
- max_step_diff = (_joint_max_speed * dt * safety).astype(np.int32)
- sign = np.sign(step_diff).astype(np.int32)
- over = np.abs(step_diff) > max_step_diff
- clamped = tgt_arr.copy()
- clamped[over] = prev_arr[over] + sign[over] * max_step_diff[over]
- return clamped.astype(np.int32, copy=False)
-
-
-# -----------------------------
-# Limits (steps) derived from deg
-# -----------------------------
-_joint_limits_steps_list: list[list[int]] = []
-for j in range(6):
- mn_deg, mx_deg = (
- float(_joint_limits_degree[j, 0]),
- float(_joint_limits_degree[j, 1]),
- )
- mn_steps = int(deg_to_steps(mn_deg, idx=j))
- mx_steps = int(deg_to_steps(mx_deg, idx=j))
- _joint_limits_steps_list.append([mn_steps, mx_steps])
-_joint_limits_steps: NDArray[np.int32] = np.array(
- _joint_limits_steps_list, dtype=np.int32
-) # (6,2)
# -----------------------------
# Typed hierarchical API
# -----------------------------
-@dataclass(frozen=True)
-class JointLimits:
- deg: Limits2f
- rad: Limits2f
- steps: NDArray[np.int32]
-
-
-@dataclass(frozen=True)
-class JointJogSpeed:
- max: Vec6i
- min: Vec6i
-
-
-@dataclass(frozen=True)
-class JointSpeed:
- max: Vec6i
- min: Vec6i
- jog: JointJogSpeed
-
-
-@dataclass(frozen=True)
-class JointAcc:
- max_rad: float
- min_rad: float
-
-
-@dataclass(frozen=True)
-class JointJerk:
- max: Vec6i
-
-
-@dataclass(frozen=True)
-class Standby:
- deg: Vec6f
- rad: Vec6f
-
-
@dataclass(frozen=True)
class Joint:
- limits: JointLimits
- speed: JointSpeed
- acc: JointAcc
- jerk: JointJerk
- ratio: NDArray[np.float64]
- standby: Standby
+ """Minimal joint configuration - all values in native units (deg for position, steps/s for speed)."""
+
+ limits_deg: Limits2f # Position limits in degrees [6, 2]
+ speed_max: Vec6i # Max speed in steps/s
+ speed_min: Vec6i # Min speed in steps/s
+ jog_speed_max: Vec6i # Max jog speed in steps/s
+ jog_speed_min: Vec6i # Min jog speed in steps/s
+ acc_max: Vec6i # Max acceleration in steps/s²
+ jerk_max: Vec6i # Max jerk in steps/s³
+ ratio: Vec6f # Gear ratio per joint
+ standby_deg: Vec6f # Standby position in degrees
@dataclass(frozen=True)
@@ -371,12 +235,20 @@ class CartVel:
@dataclass(frozen=True)
class CartAcc:
linear: RangeF
+ angular: RangeF
+
+
+@dataclass(frozen=True)
+class CartJerk:
+ linear: RangeF
+ angular: RangeF
@dataclass(frozen=True)
class Cart:
vel: CartVel
acc: CartAcc
+ jerk: CartJerk
@dataclass(frozen=True)
@@ -387,47 +259,16 @@ class Conv:
deg_sec_to_rad_sec: float
-@dataclass(frozen=True)
-class Ops:
- # Use Callable[..., T] to allow optional idx parameter without arity errors in type checkers
- deg_to_steps: Callable[..., np.int32 | NDArray[np.int32]]
- steps_to_deg: Callable[..., np.float64 | NDArray[np.float64]]
- rad_to_steps: Callable[..., np.int32 | NDArray[np.int32]]
- steps_to_rad: Callable[..., np.float64 | NDArray[np.float64]]
- speed_deg_to_steps: Callable[..., np.int32 | NDArray[np.int32]]
- speed_steps_to_deg: Callable[..., np.float64 | NDArray[np.float64]]
- speed_rad_to_steps: Callable[..., np.int32 | NDArray[np.int32]]
- speed_steps_to_rad: Callable[..., np.float64 | NDArray[np.float64]]
- clip_speed_to_joint_limits: Callable[[ArrayLike], NDArray[np.int32]]
- clamp_steps_delta: Callable[[ArrayLike, ArrayLike, float, float], NDArray[np.int32]]
-
-
joint: Final[Joint] = Joint(
- limits=JointLimits(
- deg=_joint_limits_degree,
- rad=_joint_limits_radian,
- steps=_joint_limits_steps,
- ),
- speed=JointSpeed(
- max=_joint_max_speed,
- min=_joint_min_speed,
- jog=JointJogSpeed(
- max=_joint_max_jog_speed,
- min=_joint_min_jog_speed,
- ),
- ),
- acc=JointAcc(
- max_rad=_joint_max_acc_rad,
- min_rad=_joint_min_acc_rad,
- ),
- jerk=JointJerk(
- max=_joint_max_jerk,
- ),
+ limits_deg=_joint_limits_degree,
+ speed_max=_joint_max_speed,
+ speed_min=_joint_min_speed,
+ jog_speed_max=_joint_max_jog_speed,
+ jog_speed_min=_joint_min_jog_speed,
+ acc_max=_joint_max_acc,
+ jerk_max=_joint_max_jerk,
ratio=_joint_ratio,
- standby=Standby(
- deg=_standby_deg,
- rad=_standby_rad,
- ),
+ standby_deg=_standby_deg,
)
cart: Final[Cart] = Cart(
@@ -440,6 +281,11 @@ class Ops:
),
acc=CartAcc(
linear=RangeF(min=_cart_linear_acc_min, max=_cart_linear_acc_max),
+ angular=RangeF(min=_cart_angular_acc_min, max=_cart_angular_acc_max),
+ ),
+ jerk=CartJerk(
+ linear=RangeF(min=_cart_linear_jerk_min, max=_cart_linear_jerk_max),
+ angular=RangeF(min=_cart_angular_jerk_min, max=_cart_angular_jerk_max),
),
)
@@ -450,124 +296,6 @@ class Ops:
deg_sec_to_rad_sec=deg_per_sec_2_radian_per_sec_const,
)
-ops: Final[Ops] = Ops(
- deg_to_steps=deg_to_steps,
- steps_to_deg=steps_to_deg,
- rad_to_steps=rad_to_steps,
- steps_to_rad=steps_to_rad,
- speed_deg_to_steps=speed_deg_to_steps,
- speed_steps_to_deg=speed_steps_to_deg,
- speed_rad_to_steps=speed_rad_to_steps,
- speed_steps_to_rad=speed_steps_to_rad,
- clip_speed_to_joint_limits=clip_speed_to_joint_limits,
- clamp_steps_delta=clamp_steps_delta,
-)
-
-# -----------------------------
-# Fast, vectorized limit checking with edge-triggered logging
-# -----------------------------
-_last_violation_mask = np.zeros(6, dtype=bool)
-_last_any_violation = False
-
-
-# TODO: confirm whether this is actually faster than the previous loop based approach
-def check_limits(
- q: ArrayLike,
- target_q: ArrayLike | None = None,
- allow_recovery: bool = True,
- *,
- log: bool = True,
-) -> bool:
- """
- Vectorized limits check in radians.
- - q: current joint angles in radians (array-like)
- - target_q: optional target joint angles in radians (array-like)
- - allow_recovery: allow movement that heads back toward valid range if currently violating
- - log: emit edge-triggered warning/info logs on violation state changes
-
- Returns True if move is allowed (within limits or valid recovery), False otherwise.
- """
- global _last_violation_mask, _last_any_violation
-
- q_arr = np.asarray(q, dtype=np.float64).reshape(-1)
- mn = joint.limits.rad[:, 0]
- mx = joint.limits.rad[:, 1]
-
- below = q_arr < mn
- above = q_arr > mx
- cur_viol = below | above
-
- if target_q is None:
- ok_mask = ~cur_viol
- t_below = t_above = None
- else:
- t = np.asarray(target_q, dtype=np.float64).reshape(-1)
- t_below = t < mn
- t_above = t > mx
- t_viol = t_below | t_above
- if allow_recovery:
- rec_ok = (above & (t <= q_arr)) | (below & (t >= q_arr))
- else:
- rec_ok = np.zeros(6, dtype=bool)
- ok_mask = (~cur_viol & ~t_viol) | (cur_viol & rec_ok)
-
- all_ok = bool(np.all(ok_mask))
-
- if log:
- viol = ~ok_mask
- any_viol = bool(np.any(viol))
-
- # Edge-triggered violation logs
- if any_viol and (
- np.any(viol != _last_violation_mask) or not _last_any_violation
- ):
- idxs = np.where(viol)[0]
- tokens = []
- for i in idxs:
- if cur_viol[i]:
- tokens.append(f"J{i + 1}:" + ("curmax"))
- else:
- # target violates
- if t_below is not None and t_below[i]:
- tokens.append(f"J{i + 1}:targetmax")
- else:
- tokens.append(f"J{i + 1}:violation")
- logger.warning("LIMIT VIOLATION: %s", " ".join(tokens))
- elif (not any_viol) and _last_any_violation:
- logger.info("Limits back in range")
-
- _last_violation_mask[:] = viol
- _last_any_violation = any_viol
-
- return all_ok
-
-
-def check_limits_mask(
- q: ArrayLike, target_q: ArrayLike | None = None, allow_recovery: bool = True
-) -> NDArray[np.bool_]:
- """Return per-joint boolean mask (True if OK for that joint)."""
- q_arr = np.asarray(q, dtype=np.float64).reshape(-1)
- mn = joint.limits.rad[:, 0]
- mx = joint.limits.rad[:, 1]
- below = q_arr < mn
- above = q_arr > mx
- cur_viol = below | above
-
- if target_q is None:
- return ~cur_viol
- t = np.asarray(target_q, dtype=np.float64).reshape(-1)
- t_below = t < mn
- t_above = t > mx
- t_viol = t_below | t_above
- if allow_recovery:
- rec_ok = (above & (t <= q_arr)) | (below & (t >= q_arr))
- else:
- rec_ok = np.zeros(6, dtype=bool)
- ok_mask = (~cur_viol & ~t_viol) | (cur_viol & rec_ok)
- return ok_mask
-
# -----------------------------
# CAN helpers and bitfield utils (used by transports/gripper)
@@ -599,7 +327,83 @@ def split_2_bitfield(var_in: int) -> list[int]:
if __name__ == "__main__":
- # Simple sanity prints
- j_step_rad = steps_to_rad(np.array([1, 1, 1, 1, 1, 1], dtype=np.int32))
+ # Recalculate Cartesian limits from current joint parameters.
+ # Run: python -m parol6.PAROL6_ROBOT
+ #
+ # Uses Jacobian pseudoinverse workspace sampling to derive achievable
+ # TCP velocity/acceleration/jerk while maintaining tool orientation.
+ # Copy the printed values into the pre-computed constants above.
+
+ from parol6.config import steps_to_rad
+
+ def _compute_tcp_velocity_at_config(
+ q: NDArray, direction: int, v_max_joint: NDArray
+ ) -> float | None:
+ """Max TCP velocity in one Cartesian direction.
+
+ For linear directions (0-2), rejects samples that cause orientation change.
+ For angular directions (3-5), rejects samples that cause linear translation.
+ """
+ try:
+ J = robot.jacob0(q)
+ if np.linalg.cond(J) > 1e6:
+ return None
+ desired = np.zeros(6)
+ desired[direction] = 1.0
+ q_dot = np.linalg.pinv(J) @ desired
+ if direction < 3:
+ if np.linalg.norm(J[3:, :] @ q_dot) > 0.01:
+ return None
+ else:
+ if np.linalg.norm(J[:3, :] @ q_dot) > 0.01:
+ return None
+ return float(np.min(v_max_joint / (np.abs(q_dot) + 1e-10)))
+ except (np.linalg.LinAlgError, ValueError):
+ return None
+
+ _home_rad = np.deg2rad(_standby_deg)
+
+ def _sample_limit(
+ n_samples: int, seed: int, v_max: NDArray, spread_deg: float = 30.0
+ ) -> tuple[float, float]:
+ """Sample around home position and return (median_linear_m, median_angular_rad).
+
+ Samples joint configurations from a Gaussian centered on home with
+ std dev of ``spread_deg`` degrees, clamped to joint limits.
+ """
+ rng = np.random.default_rng(seed)
+ spread_rad = np.deg2rad(spread_deg)
+ lin_results = []
+ ang_results = []
+ for _ in range(n_samples):
+ q = _home_rad + rng.normal(0, spread_rad, size=6)
+ q = np.clip(q, _joint_limits_radian[:, 0], _joint_limits_radian[:, 1])
+ for d in range(3):
+ v = _compute_tcp_velocity_at_config(q, d, v_max)
+ if v is not None and v > 0.001:
+ lin_results.append(v)
+ for d in range(3, 6):
+ v = _compute_tcp_velocity_at_config(q, d, v_max)
+ if v is not None and v > 0.001:
+ ang_results.append(v)
+ linear = float(np.median(lin_results)) if lin_results else 0.1
+ angular = float(np.median(ang_results)) if ang_results else 0.1
+ return linear, angular
+
+ vel_lin, vel_ang = _sample_limit(500, 42, _joint_speed_rad)
+ acc_lin, acc_ang = _sample_limit(200, 43, _joint_acc_rad)
+ jerk_lin, jerk_ang = _sample_limit(200, 44, _joint_jerk_rad)
+
+ print("=== Recalculated Cartesian Limits ===")
+ print(f"_cart_linear_velocity_max: float = {vel_lin * 1000:.0f}")
+ print(f"_cart_angular_velocity_max: float = {np.degrees(vel_ang):.0f}")
+ print(f"_cart_linear_acc_max: float = {acc_lin * 1000:.0f}")
+ print(f"_cart_angular_acc_max: float = {np.degrees(acc_ang):.0f}")
+ print(f"_cart_linear_jerk_max: float = {jerk_lin * 1000:.0f}")
+ print(f"_cart_angular_jerk_max: float = {np.degrees(jerk_ang):.0f}")
+
+ print("\n=== Joint Info ===")
+ j_step_rad = np.zeros(6, dtype=np.float64)
+ steps_to_rad(np.array([1, 1, 1, 1, 1, 1], dtype=np.int32), j_step_rad)
print("Smallest step (deg):", np.rad2deg(j_step_rad))
- print("Standby rad:", joint.standby.rad)
+ print("Standby deg:", joint.standby_deg)
diff --git a/parol6/__init__.py b/parol6/__init__.py
index 6997a08..e539744 100644
--- a/parol6/__init__.py
+++ b/parol6/__init__.py
@@ -5,25 +5,60 @@
optional sync wrapper, and server management capabilities.
Key components:
+- Robot: Unified entry point — lifecycle, configuration, kinematics, factories
- AsyncRobotClient: Async UDP client for robot operations
- RobotClient: Sync wrapper with automatic event loop handling
-- ServerManager: Manages headless controller process lifecycle
-- manage_server: Convenience function to start a controller process
-- is_server_running: Helper to probe for an existing controller
"""
+from importlib.metadata import version as _pkg_version
+
from . import PAROL6_ROBOT
-from ._version import __version__
+
+__version__: str = _pkg_version("parol6")
from .client.async_client import AsyncRobotClient
-from .client.manager import ServerManager, is_server_running, manage_server
+from .client.dry_run_client import DryRunRobotClient
from .client.sync_client import RobotClient
+from .protocol.wire import (
+ CurrentActionResultStruct,
+ LoopStatsResultStruct,
+ StatusResultStruct,
+ ToolResultStruct,
+)
+from waldoctl.status import PingResult
+from .robot import Robot
+from .utils.error_catalog import RobotError, extract_robot_error, make_error
+from .utils.error_codes import ErrorCode
+from .utils.errors import MotionError
+
+# Type aliases for backward compatibility
+CurrentActionResult = CurrentActionResultStruct
+LoopStatsResult = LoopStatsResultStruct
+StatusResult = StatusResultStruct
+ToolResult = ToolResultStruct
__all__ = [
"__version__",
+ "Robot",
"AsyncRobotClient",
+ "DryRunRobotClient",
"RobotClient",
- "ServerManager",
- "manage_server",
- "is_server_running",
"PAROL6_ROBOT",
+ # Result types (msgspec structs)
+ "CurrentActionResultStruct",
+ "LoopStatsResultStruct",
+ "StatusResultStruct",
+ "ToolResultStruct",
+ # Backward-compatible aliases
+ "CurrentActionResult",
+ "LoopStatsResult",
+ "StatusResult",
+ "ToolResult",
+ # Other types
+ "PingResult",
+ # Errors
+ "ErrorCode",
+ "RobotError",
+ "extract_robot_error",
+ "make_error",
+ "MotionError",
]
diff --git a/parol6/_version.py b/parol6/_version.py
deleted file mode 100644
index 8545f72..0000000
--- a/parol6/_version.py
+++ /dev/null
@@ -1,3 +0,0 @@
-"""Version information for parol6 package."""
-
-__version__ = "0.1.0"
diff --git a/parol6/ack_policy.py b/parol6/ack_policy.py
index 16b1754..e8512a7 100644
--- a/parol6/ack_policy.py
+++ b/parol6/ack_policy.py
@@ -1,33 +1,65 @@
import os
-from collections.abc import Callable
-
-SYSTEM_COMMANDS: set[str] = {
- "STOP",
- "ENABLE",
- "DISABLE",
- "SET_PORT",
- "STREAM",
- "SIMULATOR",
+
+from parol6.protocol.wire import CmdType
+
+# System command types (always require ACK)
+SYSTEM_CMD_TYPES: set[CmdType] = {
+ CmdType.RESUME,
+ CmdType.HALT,
+ CmdType.CONNECT_HARDWARE,
+ CmdType.SIMULATOR,
+ CmdType.SELECT_PROFILE,
+ CmdType.RESET,
+ CmdType.WRITE_IO,
+ CmdType.SET_TCP_OFFSET,
}
-QUERY_COMMANDS: set[str] = {
- "GET_POSE",
- "GET_ANGLES",
- "GET_IO",
- "GET_GRIPPER",
- "GET_SPEEDS",
- "GET_STATUS",
- "GET_GCODE_STATUS",
- "GET_LOOP_STATS",
- "GET_CURRENT_ACTION",
- "GET_QUEUE",
- "PING",
+# Query command types (use request/response, not ACK)
+QUERY_CMD_TYPES: set[CmdType] = {
+ CmdType.POSE,
+ CmdType.ANGLES,
+ CmdType.IO,
+ CmdType.JOINT_SPEEDS,
+ CmdType.STATUS,
+ CmdType.LOOP_STATS,
+ CmdType.ACTIVITY,
+ CmdType.QUEUE,
+ CmdType.TOOLS,
+ CmdType.TOOL_STATUS,
+ CmdType.PROFILE,
+ CmdType.REACHABLE,
+ CmdType.ERROR,
+ CmdType.TCP_SPEED,
+ CmdType.PING,
+ CmdType.IS_SIMULATOR,
+ CmdType.TCP_OFFSET,
}
+# Streaming commands are fire-and-forget (no ACK needed)
+FIRE_AND_FORGET: set[CmdType] = {
+ CmdType.SERVOJ,
+ CmdType.SERVOJ_POSE,
+ CmdType.SERVOL,
+ CmdType.JOGJ,
+ CmdType.JOGL,
+ CmdType.TELEPORT,
+ CmdType.RESET_LOOP_STATS,
+}
-def is_localhost(host: str) -> bool:
- h = (host or "").strip().lower()
- return h in {"127.0.0.1", "localhost", "::1"}
+# Queued motion commands that return a command index in their ACK
+QUEUED_CMD_TYPES: set[CmdType] = {
+ CmdType.HOME,
+ CmdType.MOVEJ,
+ CmdType.MOVEJ_POSE,
+ CmdType.MOVEL,
+ CmdType.MOVEC,
+ CmdType.MOVES,
+ CmdType.MOVEP,
+ CmdType.SELECT_TOOL,
+ CmdType.DELAY,
+ CmdType.CHECKPOINT,
+ CmdType.TOOL_ACTION,
+}
class AckPolicy:
@@ -36,46 +68,47 @@ class AckPolicy:
Rules:
- If force_ack is set, it overrides everything.
- - Safety-critical commands always require ack.
- - If running on localhost/loopback, default to no-ack (low drop risk).
- - If stream mode is ON, default to no-ack (high-rate streaming traffic).
- - Otherwise default to no-ack.
+ - System commands always require ack.
+ - Query commands use request/response, not ACKs.
+ - Streaming commands (servo/jog) are fire-and-forget.
+ - Queued motion commands require ack (returns command index).
+
+ When force_ack is not provided, the PAROL6_FORCE_ACK env var is checked.
"""
def __init__(
self,
- get_stream_mode: Callable[[], bool],
force_ack: bool | None = None,
) -> None:
- self._get_stream_mode = get_stream_mode
+ if force_ack is None:
+ raw = os.getenv("PAROL6_FORCE_ACK", "").strip().lower()
+ if raw in {"1", "true", "yes", "on"}:
+ force_ack = True
+ elif raw in {"0", "false", "no", "off"}:
+ force_ack = False
self._force_ack = force_ack
- @staticmethod
- def from_env(get_stream_mode: Callable[[], bool]) -> "AckPolicy":
- raw = os.getenv("PAROL6_FORCE_ACK", "").strip().lower()
- if raw in {"1", "true", "yes", "on"}:
- force = True
- elif raw in {"0", "false", "no", "off"}:
- force = False
- else:
- force = None
- return AckPolicy(get_stream_mode=get_stream_mode, force_ack=force)
-
- def requires_ack(self, message: str) -> bool:
+ def requires_ack(self, cmd_type: CmdType) -> bool:
+ """Check if a command type requires an ACK response."""
# Forced override (e.g., diagnostics)
if self._force_ack is not None:
return bool(self._force_ack)
- name = (message or "").split("|", 1)[0].strip().upper()
-
# System commands always require ACKs
- if name in SYSTEM_COMMANDS:
+ if cmd_type in SYSTEM_CMD_TYPES:
return True
# Query commands use request/response, not ACKs
- if name in QUERY_COMMANDS:
+ if cmd_type in QUERY_CMD_TYPES:
+ return False
+
+ # Streaming commands are fire-and-forget
+ if cmd_type in FIRE_AND_FORGET:
return False
- # Motion and other commands: ACKs only when forced
- # Localhost and stream mode both favor no-ack by default
+ # Queued motion commands require ACK (returns command index)
+ if cmd_type in QUEUED_CMD_TYPES:
+ return True
+
+ # Default: no ACK
return False
diff --git a/parol6/cli/server.py b/parol6/cli/server.py
index a4933d8..0a7fd65 100644
--- a/parol6/cli/server.py
+++ b/parol6/cli/server.py
@@ -4,12 +4,12 @@
This module provides the command-line interface for starting the PAROL6 headless controller.
"""
-from parol6.server.controller import main
+from parol6.server.cli import main
def main_entry():
"""Entry point for the parol6-server command."""
- main()
+ return main()
if __name__ == "__main__":
diff --git a/parol6/client/async_client.py b/parol6/client/async_client.py
index c431b61..ec66551 100644
--- a/parol6/client/async_client.py
+++ b/parol6/client/async_client.py
@@ -4,26 +4,108 @@
import asyncio
import contextlib
-import json
import logging
import random
+import socket
+import struct
import time
-from collections.abc import AsyncIterator, Callable, Iterable
-from typing import Literal, cast
+from collections.abc import AsyncIterator, Callable
+from typing import TYPE_CHECKING, Any, cast
+import msgspec
import numpy as np
-from spatialmath import SO3
+from waldoctl import RobotClient as _RobotClientABC, ToolStatus
+from waldoctl.status import ActionState, ActivityResult, ToolResult
+from waldoctl.tools import ToolSpec
from .. import config as cfg
-from ..ack_policy import QUERY_COMMANDS, SYSTEM_COMMANDS, AckPolicy
-from ..client.status_subscriber import subscribe_status
-from ..protocol import wire
-from ..protocol.types import Axis, Frame, StatusAggregate
+from ..ack_policy import QUERY_CMD_TYPES, SYSTEM_CMD_TYPES, AckPolicy
+from ..utils.error_catalog import RobotError
+from ..utils.errors import MotionError
+from ..protocol.wire import (
+ STRUCT_TO_CMDTYPE,
+ ActivityCmd,
+ AnglesCmd,
+ AnglesResultStruct,
+ decode_status_bin_into,
+ CheckpointCmd,
+ ConnectHardwareCmd,
+ CurrentActionResultStruct,
+ DelayCmd,
+ EnablementResultStruct,
+ ErrorCmd,
+ ErrorResultStruct,
+ ErrorMsg,
+ IOCmd,
+ IOResultStruct,
+ JointSpeedsCmd,
+ LoopStatsCmd,
+ HaltCmd,
+ HomeCmd,
+ JogJCmd,
+ JogLCmd,
+ LoopStatsResultStruct,
+ MoveCCmd,
+ MoveJCmd,
+ MoveJPoseCmd,
+ MoveLCmd,
+ MovePCmd,
+ MoveSCmd,
+ OkMsg,
+ PingCmd,
+ PingResultStruct,
+ PoseCmd,
+ PoseResultStruct,
+ ProfileCmd,
+ ProfileResultStruct,
+ QueueCmd,
+ QueueResultStruct,
+ ReachableCmd,
+ ResetCmd,
+ ResetLoopStatsCmd,
+ ResumeCmd,
+ Response,
+ ResponseMsg,
+ SelectProfileCmd,
+ SelectToolCmd,
+ SetTcpOffsetCmd,
+ ServoJCmd,
+ ServoJPoseCmd,
+ ServoLCmd,
+ SimulatorCmd,
+ IsSimulatorCmd,
+ IsSimulatorResultStruct,
+ StatusCmd,
+ TcpOffsetCmd,
+ TcpOffsetResultStruct,
+ TcpSpeedCmd,
+ TeleportCmd,
+ SpeedsResultStruct,
+ StatusBuffer,
+ StatusResultStruct,
+ TcpSpeedResultStruct,
+ ToolActionCmd,
+ ToolResultStruct,
+ ToolStatusCmd,
+ ToolStatusResultStruct,
+ ToolsCmd,
+ WriteIOCmd,
+ decode_message,
+ encode_command,
+ encode_command_into,
+)
+from waldoctl.types import Axis, Frame
+from waldoctl import PingResult
+from pinokin import so3_rpy
logger = logging.getLogger(__name__)
-# Sentinel used to signal status_stream termination
-_STATUS_SENTINEL = cast(StatusAggregate, object())
+_AXIS_MAP: dict[str, int] = {"X": 0, "Y": 1, "Z": 2, "RX": 3, "RY": 4, "RZ": 5}
+_ACTION_STATE_MAP: dict[str, ActionState] = {
+ "idle": ActionState.IDLE,
+ "executing": ActionState.EXECUTING,
+ "error": ActionState.ERROR,
+}
class _UDPClientProtocol(asyncio.DatagramProtocol):
@@ -37,8 +119,110 @@ def connection_made(self, transport: asyncio.BaseTransport) -> None:
def datagram_received(self, data: bytes, addr: tuple[str, int]) -> None:
try:
self.rx_queue.put_nowait((data, addr))
+ except asyncio.QueueFull:
+ pass # Drop packet when queue is full (expected under load)
+
+ def error_received(self, exc: Exception) -> None:
+ pass
+
+ def connection_lost(self, exc: Exception | None) -> None:
+ pass
+
+
+def _create_multicast_socket(group: str, port: int, iface_ip: str) -> socket.socket:
+ """Create and configure a multicast socket with loopback-first semantics."""
+ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
+
+ # Allow multiple listeners on same port
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ try:
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
+ except AttributeError:
+ pass
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1 << 20)
+
+ # Bind to port
+ try:
+ sock.bind(("", port))
+ except OSError:
+ sock.bind((iface_ip, port))
+
+ # Helper to detect primary NIC IP
+ def _detect_primary_ip() -> str:
+ tmp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ try:
+ tmp.connect(("1.1.1.1", 80))
+ return tmp.getsockname()[0]
except Exception:
- pass
+ return "127.0.0.1"
+ finally:
+ with contextlib.suppress(Exception):
+ tmp.close()
+
+ # Join multicast group with fallbacks
+ try:
+ mreq = struct.pack("=4s4s", socket.inet_aton(group), socket.inet_aton(iface_ip))
+ sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
+ except Exception:
+ try:
+ primary_ip = _detect_primary_ip()
+ mreq = struct.pack(
+ "=4s4s", socket.inet_aton(group), socket.inet_aton(primary_ip)
+ )
+ sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
+ except Exception:
+ mreq_any = struct.pack("=4sl", socket.inet_aton(group), socket.INADDR_ANY)
+ sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq_any)
+
+ sock.setblocking(False)
+ return sock
+
+
+def _create_unicast_socket(port: int, host: str) -> socket.socket:
+ """Create and configure a plain UDP socket for unicast reception."""
+ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ try:
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
+ except AttributeError:
+ pass
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1 << 20)
+ try:
+ sock.bind((host, port))
+ except OSError:
+ sock.bind(("", port))
+ sock.setblocking(False)
+ return sock
+
+
+if TYPE_CHECKING:
+ from typing import Protocol
+
+ class _StatusNotifier(Protocol):
+ _shared_status: StatusBuffer
+ _status_generation: int
+ _status_event: asyncio.Event
+ _closed: bool
+
+
+class _StatusProtocol(asyncio.DatagramProtocol):
+ """Protocol handler for status datagrams - decodes directly into shared buffer."""
+
+ def __init__(self, client: "_StatusNotifier"):
+ self._client = client
+ self._transport: asyncio.DatagramTransport | None = None
+
+ def connection_made(self, transport: asyncio.BaseTransport) -> None:
+ self._transport = cast(asyncio.DatagramTransport, transport)
+
+ def datagram_received(self, data: bytes, addr: tuple[str, int]) -> None:
+ if self._client._closed:
+ return
+ # Zero-allocation decode directly into shared buffer
+ if decode_status_bin_into(data, self._client._shared_status):
+ self._client._status_generation += 1
+ # Event.set() is synchronous and wakes all waiters
+ self._client._status_event.set()
def error_received(self, exc: Exception) -> None:
pass
@@ -47,7 +231,7 @@ def connection_lost(self, exc: Exception | None) -> None:
pass
-class AsyncRobotClient:
+class AsyncRobotClient(_RobotClientABC):
"""
Async UDP client for the PAROL6 headless controller.
@@ -68,6 +252,13 @@ def __init__(
self.timeout = timeout
self.retries = retries
+ # Pre-allocated buffers for pose() RPY conversion
+ self._R_buf = np.zeros((3, 3), dtype=np.float64)
+ self._rpy_buf = np.zeros(3, dtype=np.float64)
+
+ # Pre-allocated TX buffer for fire-and-forget command encoding
+ self._tx_buf = bytearray(256)
+
# Persistent asyncio datagram endpoint
self._transport: asyncio.DatagramTransport | None = None
self._protocol: _UDPClientProtocol | None = None
@@ -79,18 +270,61 @@ def __init__(
# Serialize request/response
self._req_lock = asyncio.Lock()
- # Stream flag for UI convenience
- self._stream_mode = False
- # ACK policy
- self._ack_policy = AckPolicy.from_env(lambda: self._stream_mode)
+ # ACK policy (category-based, no stream_mode dependency)
+ self._ack_policy = AckPolicy()
+
+ # Shared status state (single buffer, event-based notification)
+ self._status_transport: asyncio.DatagramTransport | None = None
+ self._status_sock: socket.socket | None = None
+ self._shared_status: StatusBuffer = StatusBuffer()
+ self._status_generation: int = 0
+ self._status_event: asyncio.Event = asyncio.Event()
- # Multicast listener using subscribe_status
- self._multicast_task: asyncio.Task | None = None
- self._status_queue: asyncio.Queue[StatusAggregate] = asyncio.Queue(maxsize=100)
+ # Last command index returned by server for queued commands
+ self._last_command_index: int | None = None
+
+ # Active tool key (set by select_tool)
+ self._active_tool_key: str | None = None
+ self._active_variant_key: str = ""
+
+ # Bound tool specs. Populated from the parol6 tool registry so that
+ # `from parol6 import AsyncRobotClient; c = AsyncRobotClient(...)` works
+ # without going through Robot.create_async_client(). The Robot factory
+ # rebinds these from its own tools.available, which is the same source.
+ self._bound_tools: dict[str, ToolSpec] = {}
+ self._bind_default_tools()
# Lifecycle flag
self._closed: bool = False
+ def _bind_default_tools(self) -> None:
+ """Populate self._bound_tools from the parol6 tool registry.
+
+ Lazy import of parol6.robot avoids the import cycle (parol6.robot
+ imports AsyncRobotClient at module level).
+ """
+ import copy
+
+ from parol6.robot import _build_tools
+
+ bound: dict[str, ToolSpec] = {}
+ for spec in _build_tools().available:
+ bound_spec = copy.copy(spec)
+ bound_spec._execute = self.tool_action # type: ignore[attr-defined, ty:unresolved-attribute]
+ bound_spec._get_status = self._tool_status # type: ignore[attr-defined, ty:unresolved-attribute]
+ bound[spec.key] = bound_spec
+ self._bound_tools = bound
+
+ # --------------- Tool access ---------------
+
+ @property
+ def tool(self) -> ToolSpec:
+ """Active bound tool. Raises if no tool has been set."""
+ key = (self._active_tool_key or "").upper()
+ if not key:
+ raise RuntimeError("No tool set. Call select_tool() first.")
+ return self._bound_tools[key]
+
# --------------- Endpoint configuration properties ---------------
@property
@@ -146,49 +380,47 @@ async def _ensure_endpoint(self) -> None:
await self._start_multicast_listener()
async def _start_multicast_listener(self) -> None:
- """Start listening for multicast status broadcasts using subscribe_status."""
- if self._multicast_task is not None and not self._multicast_task.done():
+ """Start listening for multicast/unicast status broadcasts.
+
+ Creates a UDP socket and protocol that decodes status datagrams directly
+ into the shared buffer, notifying consumers via condition variable.
+ """
+ if self._status_transport is not None:
return
logger.info(
- f"Status subscriber config: group={cfg.MCAST_GROUP} port={cfg.MCAST_PORT} iface={cfg.MCAST_IF}"
+ f"Status subscriber config: transport={cfg.STATUS_TRANSPORT} group={cfg.MCAST_GROUP} port={cfg.MCAST_PORT} iface={cfg.MCAST_IF}"
)
# Quick readiness check (no blind wait): bounded by client's own timeout
try:
- await self.wait_for_server_ready(
+ await self.wait_ready(
timeout=min(1.0, float(self.timeout or 0.3)), interval=0.5
)
except Exception:
pass
- async def _listener():
- """Consume status broadcasts and queue them."""
+ # Create the socket based on configured transport
+ if cfg.STATUS_TRANSPORT == "UNICAST":
+ self._status_sock = _create_unicast_socket(
+ cfg.MCAST_PORT, cfg.STATUS_UNICAST_HOST
+ )
+ else:
try:
- async for status in subscribe_status():
- # Exit promptly if the client is closing
- if self._closed:
- break
-
- # Put in queue, drop old if full
- if self._status_queue.full():
- try:
- self._status_queue.get_nowait()
- except asyncio.QueueEmpty:
- pass
- try:
- self._status_queue.put_nowait(status)
- except asyncio.QueueFull:
- pass
- except asyncio.CancelledError:
- # Normal shutdown path; propagate cancellation so the task
- # is marked as cancelled and can be awaited cleanly.
- raise
- except Exception:
- # Subscriber ended unexpectedly, could retry but for now just exit
- pass
+ self._status_sock = _create_multicast_socket(
+ cfg.MCAST_GROUP, cfg.MCAST_PORT, cfg.MCAST_IF
+ )
+ except OSError:
+ logging.warning("Multicast socket failed, falling back to unicast")
+ self._status_sock = _create_unicast_socket(
+ cfg.MCAST_PORT, cfg.STATUS_UNICAST_HOST
+ )
- # Start listener task
- self._multicast_task = asyncio.create_task(_listener())
+ # Create the datagram endpoint with the status protocol
+ loop = asyncio.get_running_loop()
+ self._status_transport, _ = await loop.create_datagram_endpoint(
+ lambda: _StatusProtocol(self),
+ sock=self._status_sock,
+ )
# --------------- Lifecycle / context management ---------------
@@ -202,31 +434,29 @@ async def close(self) -> None:
logging.debug("Closing Client...")
self._closed = True
- # status_stream consumers will be signaled via sentinel after stopping the multicast listener.
+ # Wake all stream_status consumers
+ self._status_event.set()
- # Stop multicast listener
- if self._multicast_task is not None:
- self._multicast_task.cancel()
- with contextlib.suppress(asyncio.CancelledError, Exception):
- await self._multicast_task
- self._multicast_task = None
-
- # Wake status_stream consumer(s) promptly: clear queue then enqueue sentinel so it's next
- with contextlib.suppress(Exception):
- while not self._status_queue.empty():
- self._status_queue.get_nowait()
- with contextlib.suppress(asyncio.QueueFull):
- self._status_queue.put_nowait(_STATUS_SENTINEL)
+ # Close status transport - yield first to let pending I/O complete
+ if self._status_transport is not None:
+ with contextlib.suppress(Exception):
+ await asyncio.sleep(0)
+ self._status_transport.close()
+ self._status_transport = None
+ if self._status_sock is not None:
+ with contextlib.suppress(Exception):
+ self._status_sock.close()
+ self._status_sock = None
- # Close UDP transport
+ # Close UDP command transport
if self._transport is not None:
with contextlib.suppress(Exception):
+ await asyncio.sleep(0)
self._transport.close()
self._transport = None
self._protocol = None
- # Best-effort drain for RX queue to free memory.
- # Do not drain status_queue here to ensure sentinel reaches consumers.
+ # Best-effort drain for RX queue to free memory
with contextlib.suppress(Exception):
while not self._rx_queue.empty():
self._rx_queue.get_nowait()
@@ -239,311 +469,631 @@ async def __aenter__(self) -> "AsyncRobotClient":
async def __aexit__(self, exc_type, exc, tb) -> None:
await self.close()
- async def status_stream(self) -> AsyncIterator[StatusAggregate]:
+ async def stream_status(self) -> AsyncIterator[StatusBuffer]:
"""Async generator that yields status updates from multicast broadcasts.
Usage:
- async for status in client.status_stream():
- print(f"Speeds: {status.get('speeds')}")
+ async for status in client.stream_status():
+ print(f"Angles: {status.angles}")
This generator terminates automatically when :meth:`close` is
called on the client, so callers do not need to manually cancel
their consumer tasks.
+
+ Each yielded StatusBuffer is a copy - safe to store or process async.
+ For zero-copy hot paths, use :meth:`stream_status_shared` instead.
+
+ Slow consumers automatically skip to the latest state (desired for real-time).
+ """
+ async for status in self.stream_status_shared():
+ yield status.copy()
+
+ async def stream_status_shared(self) -> AsyncIterator[StatusBuffer]:
+ """Zero-copy async generator that yields the shared status buffer.
+
+ Usage:
+ async for status in client.stream_status_shared():
+ # Process immediately - don't store references
+ print(f"Angles: {status.angles}")
+
+ WARNING: The same buffer instance is yielded on every iteration.
+ Do not store references to the yielded object - data will be
+ overwritten on the next iteration. For safe storage, use
+ :meth:`stream_status` or call status.copy().
+
+ This generator terminates automatically when :meth:`close` is
+ called on the client.
+
+ Slow consumers automatically skip to the latest state (desired for real-time).
"""
await self._ensure_endpoint()
- while True:
- item = await self._status_queue.get()
- if item is _STATUS_SENTINEL:
+ last_gen = 0
+
+ while not self._closed:
+ # Clear before waiting - only affects future waits, not current waiters
+ self._status_event.clear()
+
+ # Check if we already have new data (arrived between yield and clear)
+ if self._status_generation != last_gen:
+ last_gen = self._status_generation
+ yield self._shared_status
+ continue
+
+ # Wait for next update
+ await self._status_event.wait()
+
+ if self._closed:
break
- yield item
+ if self._status_generation != last_gen:
+ last_gen = self._status_generation
+ yield self._shared_status
- async def _send(self, message: str) -> bool:
+ async def _send(self, cmd: msgspec.Struct) -> int:
"""
- Send a command based on AckPolicy:
- - System commands: wait for server OK/ERROR, return True on OK
- - Motion commands: wait iff policy requires ACK; otherwise fire-and-forget (return True on send)
- - Query commands: should use _request path; if invoked here, just fire-and-forget
+ Send a binary command based on AckPolicy.
+
+ Returns:
+ int (command index ≥ 0) for ACK'd queued commands, -1 on failure,
+ 1 for system/fire-and-forget/query success, 0 on failure.
"""
await self._ensure_endpoint()
assert self._transport is not None
- name = (message or "").split("|", 1)[0].strip().upper()
+ # Get command type from struct's tag
+ cmd_type = STRUCT_TO_CMDTYPE.get(type(cmd))
+ if cmd_type is None:
+ return 0
- # System commands: wait for OK/ERROR
- if name in SYSTEM_COMMANDS:
- return await self._request_ok(message, self.timeout)
+ # System commands: need stable bytes across await
+ if cmd_type in SYSTEM_CMD_TYPES:
+ try:
+ await self._request_ok_raw(encode_command(cmd), self.timeout)
+ return 1
+ except TimeoutError:
+ return 0
# Motion and other non-query commands
- if name not in QUERY_COMMANDS:
- if self._ack_policy.requires_ack(message):
- return await self._request_ok(message, self.timeout)
- # Fire-and-forget
- self._transport.sendto(message.encode("ascii"))
- return True
-
- # Queries: fire-and-forget here (query methods use _request())
- self._transport.sendto(message.encode("ascii"))
- return True
-
- async def _request(self, message: str, bufsize: int = 2048) -> str | None:
- """Send a request and wait for a UDP response."""
+ if cmd_type not in QUERY_CMD_TYPES:
+ if self._ack_policy.requires_ack(cmd_type):
+ try:
+ ok = await self._request_ok_raw(encode_command(cmd), self.timeout)
+ self._last_command_index = ok.index
+ return ok.index if ok.index is not None else 0
+ except TimeoutError:
+ return -1
+ # Fire-and-forget: reuse pre-allocated buffer (sendto copies)
+ encode_command_into(cmd, self._tx_buf)
+ self._transport.sendto(self._tx_buf)
+ return 1
+
+ # Queries via _send: fire-and-forget
+ encode_command_into(cmd, self._tx_buf)
+ self._transport.sendto(self._tx_buf)
+ return 1
+
+ async def _request(self, cmd: msgspec.Struct) -> Response | None:
+ """Send a query command and wait for a typed response.
+
+ Drains the receive queue until a ResponseMsg is found or timeout.
+ Non-ResponseMsg datagrams (e.g. status broadcasts) are discarded.
+
+ Args:
+ cmd: Typed command struct
+
+ Returns:
+ Typed Response struct, or None on timeout.
+
+ Raises:
+ MotionError: If the server responds with an error.
+ """
await self._ensure_endpoint()
assert self._transport is not None
+ data = encode_command(cmd)
for attempt in range(self.retries + 1):
try:
async with self._req_lock:
- self._transport.sendto(message.encode("ascii"))
- data, _addr = await asyncio.wait_for(
- self._rx_queue.get(), timeout=self.timeout
- )
- return data.decode("ascii", errors="ignore")
+ self._transport.sendto(data)
+ end_time = time.monotonic() + self.timeout
+ while time.monotonic() < end_time:
+ try:
+ resp_data, _ = await asyncio.wait_for(
+ self._rx_queue.get(),
+ timeout=max(0.0, end_time - time.monotonic()),
+ )
+ try:
+ parsed = decode_message(resp_data)
+ if isinstance(parsed, ResponseMsg):
+ return parsed.result
+ if isinstance(parsed, ErrorMsg):
+ raise MotionError(
+ RobotError.from_wire(parsed.message)
+ )
+ except MotionError:
+ raise
+ except Exception:
+ pass # Ignore non-matching datagrams
+ except (asyncio.TimeoutError, TimeoutError):
+ break
+ except MotionError:
+ raise
except (asyncio.TimeoutError, TimeoutError):
- if attempt < self.retries:
- backoff = min(0.5, 0.05 * (2**attempt)) + random.uniform(0, 0.05)
- await asyncio.sleep(backoff)
- continue
+ pass
except Exception:
break
+ if attempt < self.retries:
+ backoff = min(0.5, 0.05 * (2**attempt)) + random.uniform(0, 0.05)
+ await asyncio.sleep(backoff)
return None
- async def _request_ok(self, message: str, timeout: float) -> bool:
+ async def _request_ok_raw(self, data: bytes, timeout: float) -> OkMsg:
"""
- Send a command and wait for a simple 'OK' or 'ERROR|...' reply.
- Returns True on OK; raises on ERROR or timeout.
+ Send pre-encoded binary command and wait for 'OK' or 'ERROR' reply.
+
+ Args:
+ data: Pre-encoded msgpack bytes
+ timeout: Timeout in seconds.
+
+ Returns OkMsg on OK; raises RuntimeError on ERROR, TimeoutError on timeout.
"""
await self._ensure_endpoint()
assert self._transport is not None
- end_time = time.time() + timeout
+ end_time = time.monotonic() + timeout
async with self._req_lock:
- self._transport.sendto(message.encode("ascii"))
- while time.time() < end_time:
+ self._transport.sendto(data)
+ while time.monotonic() < end_time:
try:
- data, _addr = await asyncio.wait_for(
- self._rx_queue.get(), timeout=max(0.0, end_time - time.time())
+ resp_data, _addr = await asyncio.wait_for(
+ self._rx_queue.get(),
+ timeout=max(0.0, end_time - time.monotonic()),
)
- text = data.decode("ascii", errors="ignore").strip()
- if text == "OK":
- return True
- if text.startswith("ERROR|"):
- raise RuntimeError(text)
- # Ignore unrelated datagrams
+ try:
+ match decode_message(resp_data):
+ case OkMsg() as ok:
+ return ok
+ case ErrorMsg(message):
+ raise MotionError(RobotError.from_wire(message))
+ except msgspec.ValidationError:
+ pass # Ignore non-matching datagrams
except (asyncio.TimeoutError, TimeoutError):
break
- except Exception:
- break
raise TimeoutError("Timeout waiting for OK")
# --------------- Motion / Control ---------------
- async def home(self) -> bool:
- return await self._send("HOME")
+ async def home(
+ self, wait: bool = False, timeout: float = 60.0, **wait_kwargs: Any
+ ) -> int:
+ """Home the robot to its home position.
+
+ Returns the command index (≥ 0) on success, -1 on failure.
+
+ Category: Motion
+
+ Example:
+ rbt.home()
+
+ Args:
+ wait: If True, block until motion completes
+ timeout: Maximum time to wait in seconds (only used when wait=True)
+ """
+ index = await self._send(HomeCmd())
+ assert isinstance(index, int)
+ if wait and index >= 0:
+ ok = await self.wait_command(index, timeout=timeout)
+ if not ok:
+ raise TimeoutError(f"home() timed out after {timeout}s")
+ return index
+
+ async def resume(self) -> int:
+ """Re-enable the robot controller, allowing motion commands.
+
+ Category: Control
+
+ Example:
+ rbt.resume()
+
+ Returns:
+ 1 if acknowledged, 0 on failure.
+ """
+ return await self._send(ResumeCmd())
+
+ async def halt(self) -> int:
+ """Halt the robot — stop all motion and disable.
+
+ Category: Control
+
+ Example:
+ rbt.halt()
+
+ Returns:
+ 1 if acknowledged, 0 on failure.
+ """
+ return await self._send(HaltCmd())
+
+ async def simulator(self, enabled: bool) -> int:
+ """Enable or disable simulator mode.
+
+ Category: Control
+
+ Example:
+ rbt.simulator(True)
+ """
+ return await self._send(SimulatorCmd(on=enabled))
+
+ async def is_simulator(self) -> bool:
+ """Query whether simulator mode is active.
- async def enable(self) -> bool:
- return await self._send("ENABLE")
+ Category: Query
+
+ Example:
+ active = rbt.is_simulator()
+ """
+ resp = await self._request(IsSimulatorCmd())
+ return resp.active if isinstance(resp, IsSimulatorResultStruct) else False
- async def disable(self) -> bool:
- return await self._send("DISABLE")
+ async def teleport(
+ self,
+ angles_deg: list[float],
+ tool_positions: list[float] | None = None,
+ ) -> int:
+ """Instantly set joint angles and optional tool positions (simulator only).
- async def stop(self) -> bool:
- """Alias for disable() - stops motion and disables controller."""
- return await self.disable()
+ Category: Control
- async def start(self) -> bool:
- """Alias for enable() - enables controller."""
- return await self.enable()
+ Example:
+ rbt.teleport([0, -90, 0, 0, 0, 0])
+ rbt.teleport([0, -90, 0, 0, 0, 0], tool_positions=[1.0])
+ """
+ return await self._send(
+ TeleportCmd(angles=angles_deg, tool_positions=tool_positions)
+ )
- async def stream_on(self) -> bool:
- self._stream_mode = True
- return await self._send("STREAM|ON")
+ async def connect_hardware(self, port_str: str) -> int:
+ """Connect to robot hardware via serial port.
- async def stream_off(self) -> bool:
- self._stream_mode = False
- return await self._send("STREAM|OFF")
+ Category: Configuration
- async def simulator_on(self) -> bool:
- return await self._send("SIMULATOR|ON")
+ Example:
+ rbt.connect_hardware("/dev/ttyUSB0")
- async def simulator_off(self) -> bool:
- return await self._send("SIMULATOR|OFF")
+ Args:
+ port_str: Serial port path (e.g., '/dev/ttyUSB0' or 'COM3').
- async def set_serial_port(self, port_str: str) -> bool:
+ Returns:
+ 1 if acknowledged, 0 on failure.
+
+ Raises:
+ ValueError: If port_str is empty.
+ """
if not port_str:
raise ValueError("No port provided")
- return await self._send(f"SET_PORT|{port_str}")
+ return await self._send(ConnectHardwareCmd(port_str=port_str))
+
+ async def reset(self) -> int:
+ """Reset controller state to initial values.
+
+ Instantly resets positions to home, clears queues, resets tool/errors.
+ Preserves serial connection. Useful for fast test isolation.
+
+ Category: Control
+
+ Example:
+ rbt.reset()
+ """
+ return await self._send(ResetCmd())
# --------------- Status / Queries ---------------
- async def ping(self) -> str | None:
- """Return raw 'PONG|...' text (e.g., 'PONG|SERIAL=1') or None on timeout."""
- return await self._request("PING", bufsize=256)
+ async def ping(self) -> PingResult | None:
+ """Return parsed ping result with hardware_connected status.
- async def get_angles(self) -> list[float] | None:
- resp = await self._request("GET_ANGLES", bufsize=1024)
- if not resp:
- return None
- vals = wire.decode_simple(resp, "ANGLES")
- return cast(list[float] | None, vals)
+ Category: Query
- async def get_io(self) -> list[int] | None:
- resp = await self._request("GET_IO", bufsize=1024)
- if not resp:
+ Example:
+ rbt.ping()
+ """
+ resp = await self._request(PingCmd())
+ if not isinstance(resp, PingResultStruct):
return None
- vals = wire.decode_simple(resp, "IO")
- return cast(list[int] | None, vals)
+ return PingResult(hardware_connected=bool(resp.hardware_connected))
- async def get_gripper_status(self) -> list[int] | None:
- resp = await self._request("GET_GRIPPER", bufsize=1024)
- if not resp:
- return None
- vals = wire.decode_simple(resp, "GRIPPER")
- return cast(list[int] | None, vals)
+ async def angles(self) -> list[float] | None:
+ """Current joint angles in degrees [J1, J2, J3, J4, J5, J6].
- async def get_speeds(self) -> list[float] | None:
- resp = await self._request("GET_SPEEDS", bufsize=1024)
- if not resp:
- return None
- vals = wire.decode_simple(resp, "SPEEDS")
- return cast(list[float] | None, vals)
+ Category: Query
- async def get_pose(
- self, frame: Literal["WRF", "TRF"] = "WRF"
- ) -> list[float] | None:
+ Example:
+ angles = rbt.angles()
"""
- Returns 16-element transformation matrix (flattened) with translation in mm.
+ resp = await self._request(AnglesCmd())
+ return resp.angles if isinstance(resp, AnglesResultStruct) else None
- Args:
- frame: Reference frame - "WRF" for World Reference Frame (default),
- "TRF" for Tool Reference Frame
+ async def io(self) -> list[int] | None:
+ """Digital I/O status [in1, in2, out1, out2, estop].
- Expected wire format: "POSE|p0,p1,p2,...,p15"
+ Category: Query
+
+ Example:
+ io = rbt.io()
"""
- command = f"GET_POSE {frame}" if frame != "WRF" else "GET_POSE"
- resp = await self._request(command, bufsize=2048)
- if not resp:
- return None
- vals = wire.decode_simple(resp, "POSE")
- return cast(list[float] | None, vals)
+ resp = await self._request(IOCmd())
+ return resp.io if isinstance(resp, IOResultStruct) else None
+
+ async def joint_speeds(self) -> list[float] | None:
+ """Current joint speeds in steps/sec [J1, J2, J3, J4, J5, J6].
- async def get_gripper(self) -> list[int] | None:
- """Alias for get_gripper_status for compatibility."""
- return await self.get_gripper_status()
+ Category: Query
- async def get_status(self) -> dict | None:
+ Example:
+ speeds = rbt.joint_speeds()
"""
- Aggregate status.
- Expected format:
- STATUS|POSE=p0,p1,...,p15|ANGLES=a0,...,a5|IO=in1,in2,out1,out2,estop|GRIPPER=id,pos,spd,cur,status,obj
- Returns dict with keys: pose (list[float] len=16 with translation in mm), angles (list[float] len=6),
- io (list[int] len=5), gripper (list[int] len>=6)
+ resp = await self._request(JointSpeedsCmd())
+ return resp.speeds if isinstance(resp, SpeedsResultStruct) else None
+
+ async def _pose_matrix(self, frame: Frame = "WRF") -> list[float] | None:
+ """Raw 16-element transformation matrix (flattened) with translation in mm."""
+ resp = await self._request(PoseCmd(frame=frame))
+ return resp.pose if isinstance(resp, PoseResultStruct) else None
+
+ async def pose(self, frame: Frame = "WRF") -> list[float] | None:
+ """Current TCP pose as [x, y, z, rx, ry, rz] in mm and degrees.
+
+ Category: Query
+
+ Example:
+ pose = rbt.pose()
"""
- resp = await self._request("GET_STATUS", bufsize=4096)
- if not resp:
+ pose_matrix = await self._pose_matrix(frame=frame)
+ if not pose_matrix or len(pose_matrix) != 16:
+ return None
+ try:
+ x, y, z = pose_matrix[3], pose_matrix[7], pose_matrix[11]
+ R = self._R_buf
+ R[0, 0] = pose_matrix[0]
+ R[0, 1] = pose_matrix[1]
+ R[0, 2] = pose_matrix[2]
+ R[1, 0] = pose_matrix[4]
+ R[1, 1] = pose_matrix[5]
+ R[1, 2] = pose_matrix[6]
+ R[2, 0] = pose_matrix[8]
+ R[2, 1] = pose_matrix[9]
+ R[2, 2] = pose_matrix[10]
+ so3_rpy(R, self._rpy_buf)
+ rpy_deg = np.degrees(self._rpy_buf)
+ return [x, y, z, rpy_deg[0], rpy_deg[1], rpy_deg[2]]
+ except (ValueError, IndexError):
return None
- return cast(dict | None, wire.decode_status(resp))
- async def get_loop_stats(self) -> dict | None:
+ async def status(self) -> StatusResultStruct | None:
+ """Aggregate status snapshot (pose, angles, speeds, io, tool_status).
+
+ Category: Query
+
+ Example:
+ status = rbt.status()
"""
- Fetch control-loop runtime metrics.
- Expected wire format: "LOOP_STATS|{json}"
+ resp = await self._request(StatusCmd())
+ return resp if isinstance(resp, StatusResultStruct) else None
+
+ async def loop_stats(self) -> LoopStatsResultStruct | None:
+ """Fetch control-loop runtime metrics.
+
+ Category: Query
+
+ Example:
+ stats = rbt.loop_stats()
"""
- resp = await self._request("GET_LOOP_STATS", bufsize=1024)
- if not resp or not resp.startswith("LOOP_STATS|"):
- return None
- return cast(dict, json.loads(resp.split("|", 1)[1]))
+ resp = await self._request(LoopStatsCmd())
+ return resp if isinstance(resp, LoopStatsResultStruct) else None
+
+ async def reset_loop_stats(self) -> int:
+ """Reset control-loop min/max metrics and overrun count.
- async def set_tool(self, tool_name: str) -> bool:
+ Category: Query
+
+ Example:
+ rbt.reset_loop_stats()
"""
- Set the current end-effector tool configuration.
+ return await self._send(ResetLoopStatsCmd())
+
+ async def select_tool(self, tool_name: str, variant_key: str = "") -> int:
+ """Set the active end-effector tool on the controller.
+
+ Category: Configuration
+
+ Example:
+ rbt.select_tool("PNEUMATIC")
Args:
- tool_name: Name of the tool ('NONE', 'PNEUMATIC', 'ELECTRIC')
+ tool_name: Name of the tool ('NONE', 'PNEUMATIC', 'SSG-48', 'MSG', 'VACUUM')
+ variant_key: Optional variant within the tool type.
Returns:
- True if successful
+ Command index (>= 0) if queued, 0 on failure.
"""
- return await self._send(f"SET_TOOL|{tool_name.upper()}")
+ self._active_tool_key = tool_name.upper()
+ self._active_variant_key = variant_key
+ return await self._send(
+ SelectToolCmd(tool_name=self._active_tool_key, variant_key=variant_key)
+ )
+
+ async def set_tcp_offset(self, x: float = 0, y: float = 0, z: float = 0) -> int:
+ """Set TCP offset in mm, composed on top of the current tool transform.
+
+ The offset shifts the effective TCP point in the tool's local frame.
+ Subsequent motion (especially TRF relative moves) will use the new TCP.
+ Call with (0, 0, 0) to reset. Changing tools resets the offset.
+
+ Category: Configuration
+
+ Example:
+ rbt.set_tcp_offset(0, 0, -190) # pencil tip 190mm from gripper
+ """
+ return await self._send(SetTcpOffsetCmd(x=x, y=y, z=z))
+
+ async def tcp_offset(self) -> list[float]:
+ """Query current TCP offset in mm [x, y, z].
- async def get_tool(self) -> dict | None:
+ Category: Configuration
+
+ Example:
+ offset = rbt.tcp_offset()
"""
- Get the current tool configuration and available tools.
+ resp = await self._request(TcpOffsetCmd())
+ if isinstance(resp, TcpOffsetResultStruct):
+ return [resp.x, resp.y, resp.z]
+ return [0.0, 0.0, 0.0]
+
+ async def select_profile(self, profile: str) -> int:
+ """Set the motion profile (e.g. ``"TOPPRA"``).
+
+ Category: Configuration
+
+ Example:
+ rbt.select_profile("TOPPRA")
+
+ Args:
+ profile: Motion profile type ('TOPPRA', 'RUCKIG', 'QUINTIC', 'TRAPEZOID', 'LINEAR')
+ Note: RUCKIG is point-to-point only; Cartesian moves will use TOPPRA.
Returns:
- Dict with keys: 'tool' (current tool name), 'available' (list of available tools)
- Expected wire format: "TOOL|{json}"
+ True if successful
"""
- resp = await self._request("GET_TOOL", bufsize=1024)
- if not resp or not resp.startswith("TOOL|"):
- return None
- return cast(dict, json.loads(resp.split("|", 1)[1]))
+ return await self._send(SelectProfileCmd(profile=profile.upper()))
+
+ async def profile(self) -> str | None:
+ """Current motion profile name.
+
+ Category: Query
- async def get_current_action(self) -> dict | None:
+ Example:
+ profile = rbt.profile()
"""
- Get the current executing action/command and its state.
+ resp = await self._request(ProfileCmd())
+ return resp.profile.upper() if isinstance(resp, ProfileResultStruct) else None
- Returns:
- Dict with keys: 'current' (current action name), 'state' (action state),
- 'next' (next action if any)
- Expected wire format: "ACTION|{json}"
+ async def tools(self) -> ToolResult | None:
+ """Current tool and available tools.
+
+ Category: Query
+
+ Example:
+ tools = rbt.tools()
"""
- resp = await self._request("GET_CURRENT_ACTION", bufsize=1024)
- if not resp or not resp.startswith("ACTION|"):
+ resp = await self._request(ToolsCmd())
+ if not isinstance(resp, ToolResultStruct):
return None
- return cast(dict, json.loads(resp.split("|", 1)[1]))
+ return ToolResult(tool=resp.tool, available=resp.available)
+
+ async def activity(self) -> ActivityResult | None:
+ """What the robot is currently doing.
+
+ Returns state (idle/executing/error), current command name,
+ parameters, and error description if applicable.
+
+ Category: Query
- async def get_queue(self) -> dict | None:
+ Example:
+ act = rbt.activity()
"""
- Get the list of queued non-streamable commands.
+ resp = await self._request(ActivityCmd())
+ if not isinstance(resp, CurrentActionResultStruct):
+ return None
+ state = _ACTION_STATE_MAP.get(resp.state.lower(), ActionState.IDLE)
+ return ActivityResult(
+ state=state,
+ command=resp.current,
+ params=resp.params,
+ error="" if state != ActionState.ERROR else resp.current,
+ )
- Returns:
- Dict with keys: 'non_streamable' (list of queued commands), 'size' (queue size)
- Expected wire format: "QUEUE|{json}"
+ async def queue(self) -> list[str] | None:
+ """Queued command list.
+
+ Category: Query
+
+ Example:
+ queue = rbt.queue()
"""
- resp = await self._request("GET_QUEUE", bufsize=2048)
- if not resp or not resp.startswith("QUEUE|"):
+ resp = await self._request(QueueCmd())
+ return resp.queue if isinstance(resp, QueueResultStruct) else None
+
+ async def _tool_status(self) -> ToolStatus | None:
+ """Query tool status from the controller (internal — use ``rbt.tool.status()``)."""
+ resp = await self._request(ToolStatusCmd())
+ if not isinstance(resp, ToolStatusResultStruct):
return None
- return cast(dict, json.loads(resp.split("|", 1)[1]))
+ return ToolStatus(
+ key=resp.tool_key,
+ state=resp.state,
+ engaged=resp.engaged,
+ part_detected=resp.part_detected,
+ fault_code=resp.fault_code,
+ positions=tuple(resp.positions),
+ channels=tuple(resp.channels),
+ )
- # --------------- Helper methods ---------------
+ async def reachable(self) -> EnablementResultStruct | None:
+ """Remaining freedom of movement per joint/axis before hitting limits.
+
+ Category: Query
- async def get_pose_rpy(self) -> list[float] | None:
+ Example:
+ en = rbt.reachable()
"""
- Get robot pose as [x_mm, y_mm, z_mm, rx_deg, ry_deg, rz_deg] using RPY order='xyz'.
+ resp = await self._request(ReachableCmd())
+ return resp if isinstance(resp, EnablementResultStruct) else None
+
+ async def error(self) -> RobotError | None:
+ """Current error state, or None if no error.
+
+ Category: Query
+
+ Example:
+ err = rbt.error()
"""
- pose_matrix = await self.get_pose()
- if not pose_matrix or len(pose_matrix) != 16:
+ resp = await self._request(ErrorCmd())
+ if not isinstance(resp, ErrorResultStruct) or resp.error is None:
return None
+ return RobotError.from_wire(resp.error)
- try:
- x, y, z = pose_matrix[3], pose_matrix[7], pose_matrix[11]
- # Rotation matrix rows (row-major layout)
- R = np.array(
- [
- [pose_matrix[0], pose_matrix[1], pose_matrix[2]],
- [pose_matrix[4], pose_matrix[5], pose_matrix[6]],
- [pose_matrix[8], pose_matrix[9], pose_matrix[10]],
- ]
- )
- # Use xyz convention (rx, ry, rz) - Roll-Pitch-Yaw
- rpy_deg = SO3(R).rpy(order="xyz", unit="deg")
- return [x, y, z, rpy_deg[0], rpy_deg[1], rpy_deg[2]]
- except (ValueError, IndexError, ImportError):
- return None
+ async def tcp_speed(self) -> float | None:
+ """TCP linear velocity in mm/s.
+
+ Category: Query
- async def get_pose_xyz(self) -> list[float] | None:
- """Get robot position as [x, y, z] in mm."""
- pose_rpy = await self.get_pose_rpy()
- return pose_rpy[:3] if pose_rpy else None
+ Example:
+ speed = rbt.tcp_speed()
+ """
+ resp = await self._request(TcpSpeedCmd())
+ return resp.speed if isinstance(resp, TcpSpeedResultStruct) else None
+
+ # --------------- Helper methods ---------------
async def is_estop_pressed(self) -> bool:
- """Check if E-stop is pressed. Returns True if pressed."""
- io_status = await self.get_io()
+ """Check if E-stop is pressed. Returns True if pressed.
+
+ Category: Query
+
+ Example:
+ pressed = rbt.is_estop_pressed()
+ """
+ io_status = await self.io()
if io_status and len(io_status) >= 5:
return io_status[4] == 0 # E-stop at index 4, 0 means pressed
return False
async def is_robot_stopped(self, threshold_speed: float = 2.0) -> bool:
- """
- Check if robot has stopped moving.
+ """Check if robot has stopped moving.
+
+ Category: Query
+
+ Prefer ``wait_command()`` for waiting on specific commands.
+ This method polls raw joint speeds and is mainly useful for
+ diagnostics or manual stopping logic.
+
+ Example:
+ stopped = rbt.is_robot_stopped()
Args:
threshold_speed: Speed threshold in steps/sec
@@ -551,802 +1101,657 @@ async def is_robot_stopped(self, threshold_speed: float = 2.0) -> bool:
Returns:
True if all joints below threshold
"""
- speeds = await self.get_speeds()
+ speeds = await self.joint_speeds()
if not speeds:
return False
return max(abs(s) for s in speeds) < threshold_speed
- async def wait_until_stopped(
+ async def wait_motion(
self,
- timeout: float = 90.0,
- settle_window: float = 1.0,
- speed_threshold: float = 2.0,
+ timeout: float = 10.0,
+ settle_window: float = 0.25,
+ speed_threshold: float = 0.01,
angle_threshold: float = 0.5,
+ motion_start_timeout: float = 1.0,
+ **kwargs: Any,
) -> bool:
- """
- Wait for robot to stop moving using multicast status broadcasts.
+ """Wait for robot to stop moving using multicast status broadcasts.
+
+ This method first waits for motion to START (speeds above threshold),
+ then waits for motion to COMPLETE (speeds below threshold for settle_window).
+ This avoids a race condition where the method returns immediately if
+ called before motion has begun.
+
+ Category: Synchronization
+
+ Example:
+ rbt.wait_motion()
Args:
timeout: Maximum time to wait in seconds
settle_window: How long robot must be stable to be considered stopped
- speed_threshold: Max joint speed to be considered stopped (steps/sec)
+ speed_threshold: Max joint speed to be considered stopped (rad/s)
angle_threshold: Max angle change to be considered stopped (degrees)
+ motion_start_timeout: Max time to wait for motion to start (seconds)
Returns:
True if robot stopped, False if timeout
"""
await self._ensure_endpoint()
- last_angles = None
- settle_start = None
- timeout_task = asyncio.create_task(asyncio.sleep(timeout))
+ last_angles: np.ndarray | None = None
+ settle_start: float | None = None
+ motion_started = False
+ start_time = time.monotonic()
try:
- async for status in self.status_stream():
- if timeout_task.done():
- return False
-
- # Check speeds from status
- speeds = status.get("speeds")
- if speeds and isinstance(speeds, Iterable):
- if max(abs(s) for s in speeds) < speed_threshold:
- if settle_start is None:
- settle_start = time.time()
- elif time.time() - settle_start > settle_window:
- return True
- else:
- settle_start = None
+ async with asyncio.timeout(timeout):
+ async for status in self.stream_status_shared():
+ speeds = status.speeds
+ angles = status.angles
- # Also check angles as fallback
- angles = status.get("angles")
- if angles and not speeds:
+ max_speed = float(np.abs(speeds).max())
+
+ max_angle_change = 0.0
if last_angles is not None:
- max_change = max(
- abs(a - b)
- for a, b in zip(angles, last_angles, strict=False)
- )
- if max_change < angle_threshold:
+ max_angle_change = float(np.abs(angles - last_angles).max())
+ last_angles[:] = angles
+ else:
+ last_angles = angles.copy()
+
+ now = time.monotonic()
+
+ # Phase 1: Wait for motion to start
+ if not motion_started:
+ if (
+ max_speed >= speed_threshold
+ or max_angle_change >= angle_threshold
+ ):
+ motion_started = True
+ settle_start = None
+ elif now - start_time > motion_start_timeout:
+ motion_started = True
+
+ # Phase 2: Wait for motion to complete
+ if motion_started:
+ if (
+ max_speed < speed_threshold
+ and max_angle_change < angle_threshold
+ ):
if settle_start is None:
- settle_start = time.time()
- elif time.time() - settle_start > settle_window:
+ settle_start = now
+ elif now - settle_start > settle_window:
return True
else:
settle_start = None
- last_angles = angles
- finally:
- timeout_task.cancel()
- try:
- await timeout_task
- except asyncio.CancelledError:
- pass
+ except TimeoutError:
+ return False
return False
# --------------- Additional waits and utilities ---------------
- async def wait_for_server_ready(
- self, timeout: float = 5.0, interval: float = 0.05
- ) -> bool:
- """Poll ping() until server responds or timeout."""
- end_time = time.time() + timeout
- while time.time() < end_time:
- ok = await self.ping()
- if ok:
+ async def wait_ready(self, timeout: float = 5.0, interval: float = 0.05) -> bool:
+ """Poll ping() until server responds or timeout.
+
+ Args:
+ timeout: Maximum time to wait for server to respond
+ interval: Polling interval between ping attempts
+
+ Returns:
+ True if server responded to PING, False on timeout
+ """
+ end_time = time.monotonic() + timeout
+ while time.monotonic() < end_time:
+ result = await self.ping()
+ if result:
return True
await asyncio.sleep(interval)
return False
- async def wait_for_status(
- self, predicate: Callable[[StatusAggregate], bool], timeout: float = 5.0
+ async def wait_status(
+ self, predicate: Callable[[StatusBuffer], bool], timeout: float = 5.0
) -> bool:
"""Wait until a multicast status satisfies predicate(status) within timeout."""
await self._ensure_endpoint()
- end_time = time.time() + timeout
- while time.time() < end_time:
- remaining = max(0.0, end_time - time.time())
+ last_gen = 0
+ end_time = time.monotonic() + timeout
+
+ while time.monotonic() < end_time and not self._closed:
+ self._status_event.clear()
+
+ # Check if we already have new data
+ if self._status_generation != last_gen:
+ last_gen = self._status_generation
+ try:
+ if predicate(self._shared_status):
+ return True
+ except Exception:
+ logger.debug("Status predicate raised", exc_info=True)
+ continue
+
+ # Wait for next update with timeout
+ remaining = max(0.0, end_time - time.monotonic())
+ if remaining <= 0:
+ return False
try:
- status = await asyncio.wait_for(
- self._status_queue.get(), timeout=remaining
+ await asyncio.wait_for(
+ self._status_event.wait(),
+ timeout=min(remaining, 0.5),
)
- except (asyncio.TimeoutError, TimeoutError):
- break
- try:
- if predicate(status):
- return True
- except Exception:
- # Ignore predicate exceptions from tests
- pass
+ except asyncio.TimeoutError:
+ continue
+
+ if self._closed:
+ return False
+ if self._status_generation != last_gen:
+ last_gen = self._status_generation
+ try:
+ if predicate(self._shared_status):
+ return True
+ except Exception:
+ logger.debug("Status predicate raised", exc_info=True)
return False
- async def send_raw(
- self, message: str, await_reply: bool = False, timeout: float = 2.0
- ) -> bool | str | None:
- """Send a raw UDP message; optionally await a single reply."""
- await self._ensure_endpoint()
- assert self._transport is not None
- try:
- if not await_reply:
- self._transport.sendto(message.encode("ascii"))
+ async def wait_command(self, command_index: int, timeout: float = 10.0) -> bool:
+ """Wait until a specific command index has been completed.
+
+ Uses status broadcasts to monitor the server's completed_command_index.
+ Raises MotionError if the pipeline reports a planning/execution failure
+ at or before the awaited command index.
+
+ Args:
+ command_index: The command index to wait for (returned by motion commands).
+ timeout: Maximum time to wait in seconds.
+
+ Returns:
+ True if the command completed within timeout, False otherwise.
+
+ Raises:
+ MotionError: If the pipeline errored at or before command_index.
+ """
+
+ def _done(s: StatusBuffer) -> bool:
+ if s.completed_index >= command_index:
return True
- async with self._req_lock:
- self._transport.sendto(message.encode("ascii"))
- data, _addr = await asyncio.wait_for(
- self._rx_queue.get(), timeout=timeout
- )
- return data.decode("ascii", errors="ignore")
- except (asyncio.TimeoutError, TimeoutError):
- return None
- except Exception:
- return None
+ if s.error is not None and s.error.command_index <= command_index:
+ return True
+ return False
- # --------------- Motion encoders ---------------
+ ok = await self.wait_status(_done, timeout=timeout)
+ if ok:
+ s = self._shared_status
+ if s.error is not None and s.error.command_index <= command_index:
+ raise MotionError(s.error)
+ return ok
- async def move_joints(
- self,
- joint_angles: list[float],
- duration: float | None = None,
- speed_percentage: int | None = None,
- accel_percentage: int | None = None, # accepted but not sent
- profile: str | None = None, # accepted but not sent
- tracking: str | None = None, # accepted but not sent
- ) -> bool:
- if duration is None and speed_percentage is None:
- raise RuntimeError(
- "You must provide either a duration or a speed_percentage."
- )
- message = wire.encode_move_joint(joint_angles, duration, speed_percentage)
- return await self._send(message)
+ # --------------- Move commands (queued, pre-computed trajectory) ---------------
- async def move_pose(
+ async def move_j(
self,
- pose: list[float],
- duration: float | None = None,
- speed_percentage: int | None = None,
- accel_percentage: int | None = None,
- profile: str | None = None,
- tracking: str | None = None,
- ) -> bool:
- if duration is None and speed_percentage is None:
- raise RuntimeError(
- "You must provide either a duration or a speed_percentage."
+ angles: list[float] | None = None,
+ *,
+ pose: list[float] | None = None,
+ duration: float = 0.0,
+ speed: float = 0.0,
+ accel: float = 1.0,
+ r: float = 0.0,
+ rel: bool = False,
+ wait: bool = False,
+ timeout: float = 10.0,
+ **wait_kwargs: Any,
+ ) -> int:
+ """Joint-space move. Positional arg = joint angles; pose= kwarg = Cartesian target with IK.
+
+ Returns the command index (≥ 0) on success, -1 on failure.
+
+ Category: Motion
+
+ Example:
+ rbt.move_j([90, -90, 180, 0, 0, 180], speed=0.5)
+
+ Args:
+ angles: 6 joint angles in degrees (ignored if pose= is set)
+ pose: If set, Cartesian target [x,y,z,rx,ry,rz] — dispatches to MOVEJ_POSE
+ duration: Motion duration in seconds (mutually exclusive with speed)
+ speed: Speed fraction 0-1 (mutually exclusive with duration)
+ accel: Acceleration fraction 0-1
+ r: Blend radius in mm (0 = stop at target)
+ rel: If True, angles are relative to current position
+ wait: If True, block until motion completes
+ """
+ if pose is not None:
+ index = await self._send(
+ MoveJPoseCmd(
+ pose=pose, duration=duration, speed=speed, accel=accel, r=r
+ )
+ )
+ else:
+ index = await self._send(
+ MoveJCmd(
+ angles=angles or [],
+ duration=duration,
+ speed=speed,
+ accel=accel,
+ r=r,
+ rel=rel,
+ )
)
- message = wire.encode_move_pose(pose, duration, speed_percentage)
- return await self._send(message)
+ if wait and index >= 0:
+ await self.wait_command(index, timeout=timeout)
+ return index
- async def move_cartesian(
+ async def move_l(
self,
pose: list[float],
- duration: float | None = None,
- speed_percentage: float | None = None,
- accel_percentage: int | None = None,
- profile: str | None = None,
- tracking: str | None = None,
- ) -> bool:
- if duration is None and speed_percentage is None:
- raise RuntimeError(
- "Error: You must provide either a duration or a speed_percentage."
- )
- message = wire.encode_move_cartesian(pose, duration, speed_percentage)
- return await self._send(message)
+ *,
+ frame: Frame = "WRF",
+ duration: float = 0.0,
+ speed: float = 0.0,
+ accel: float = 1.0,
+ r: float = 0.0,
+ rel: bool = False,
+ wait: bool = False,
+ timeout: float = 10.0,
+ **wait_kwargs: Any,
+ ) -> int:
+ """Linear Cartesian move to target pose.
+
+ Returns the command index (≥ 0) on success, -1 on failure.
+
+ Category: Motion
- async def move_cartesian_rel_trf(
- self,
- deltas: list[float], # [dx, dy, dz, rx, ry, rz]
- duration: float | None = None,
- speed_percentage: float | None = None,
- accel_percentage: int | None = None,
- profile: str | None = None,
- tracking: str | None = None,
- ) -> bool:
- """
- Send a MOVECARTRELTRF (relative straight-line in TRF) command.
- Provide either duration or speed_percentage (1..100).
- """
- if duration is None and speed_percentage is None:
- raise RuntimeError(
- "Error: You must provide either a duration or a speed_percentage."
- )
- message = wire.encode_move_cartesian_rel_trf(
- deltas, duration, speed_percentage, accel_percentage, profile, tracking
+ Example:
+ rbt.move_l([0, 263, 242, 90, 0, 90], speed=0.5)
+
+ Args:
+ pose: Target [x,y,z,rx,ry,rz] in mm and degrees
+ frame: Reference frame ("WRF" or "TRF")
+ duration: Motion duration in seconds
+ speed: Speed fraction 0-1
+ accel: Acceleration fraction 0-1
+ r: Blend radius in mm
+ rel: If True, pose is relative delta
+ wait: If True, block until motion completes
+ """
+ cmd = MoveLCmd(
+ pose=pose,
+ frame=frame,
+ duration=duration,
+ speed=speed,
+ accel=accel,
+ r=r,
+ rel=rel,
)
- return await self._send(message)
+ index = await self._send(cmd)
+ if wait and index >= 0:
+ await self.wait_command(index, timeout=timeout)
+ return index
- async def jog_joint(
+ async def move_c(
self,
- joint_index: int,
- speed_percentage: int,
+ via: list[float],
+ end: list[float],
+ *,
+ frame: Frame = "WRF",
duration: float | None = None,
- distance_deg: float | None = None,
- ) -> bool:
- """
- Send a JOG command for a single joint (0..5 positive, 6..11 negative for reverse).
- duration and distance_deg are optional; at least one should be provided for one-shot jog.
- """
- if duration is None and distance_deg is None:
- raise RuntimeError(
- "Error: You must provide either a duration or a distance_deg."
- )
- message = wire.encode_jog_joint(
- joint_index, speed_percentage, duration, distance_deg
+ speed: float | None = None,
+ accel: float = 1.0,
+ r: float = 0.0,
+ wait: bool = False,
+ timeout: float = 10.0,
+ **wait_kwargs: Any,
+ ) -> int:
+ """Circular arc through current position -> via -> end.
+
+ Returns the command index (≥ 0) on success, -1 on failure.
+
+ Category: Motion
+
+ Example:
+ rbt.move_c(via=[0, 250, 250, 90, 0, 90], end=[0, 263, 242, 90, 0, 90], speed=0.5)
+
+ Args:
+ via: Via-point pose [x,y,z,rx,ry,rz]
+ end: End-point pose [x,y,z,rx,ry,rz]
+ frame: Reference frame
+ duration: Motion duration in seconds
+ speed: Speed fraction 0-1
+ accel: Acceleration fraction 0-1
+ r: Blend radius in mm
+ wait: If True, block until motion completes
+ """
+ cmd = MoveCCmd(
+ via=via,
+ end=end,
+ frame=frame,
+ duration=duration,
+ speed=speed,
+ accel=accel,
+ r=r,
)
- return await self._send(message)
+ index = await self._send(cmd)
+ if wait and index >= 0:
+ await self.wait_command(index, timeout=timeout)
+ return index
- async def jog_cartesian(
+ async def move_s(
self,
- frame: Frame,
- axis: Axis,
- speed_percentage: int,
- duration: float,
- ) -> bool:
- """
- Send a CARTJOG command (frame 'TRF' or 'WRF', axis in {X+/X-/Y+/.../RZ-}).
- """
- message = wire.encode_cart_jog(frame, axis, speed_percentage, duration)
- return await self._send(message)
+ waypoints: list[list[float]],
+ *,
+ frame: Frame = "WRF",
+ duration: float | None = None,
+ speed: float | None = None,
+ accel: float = 1.0,
+ wait: bool = False,
+ timeout: float = 10.0,
+ **wait_kwargs: Any,
+ ) -> int:
+ """Cubic spline through waypoints.
- async def jog_multiple(
- self,
- joints: list[int],
- speeds: list[float],
- duration: float,
- ) -> bool:
- """
- Send a MULTIJOG command to jog multiple joints simultaneously for 'duration' seconds.
- """
- if len(joints) != len(speeds):
- raise ValueError(
- "Error: The number of joints must match the number of speeds."
- )
- joints_str = ",".join(str(j) for j in joints)
- speeds_str = ",".join(str(s) for s in speeds)
- message = f"MULTIJOG|{joints_str}|{speeds_str}|{duration}"
- return await self._send(message)
+ Returns the command index (≥ 0) on success, -1 on failure.
- async def set_io(self, index: int, value: int) -> bool:
- """
- Set digital I/O bit.
- index: 0..7, value: 0 or 1
- """
- if index < 0 or index > 7:
- raise ValueError("I/O index must be 0..7")
- if value not in (0, 1):
- raise ValueError("I/O value must be 0 or 1")
- return await self._send(f"SET_IO|{index}|{value}")
+ Category: Motion
- async def delay(self, seconds: float) -> bool:
- """
- Insert a non-blocking delay in the motion queue.
- """
- if seconds <= 0:
- raise ValueError("Delay must be positive")
- return await self._send(f"DELAY|{seconds}")
+ Example:
+ rbt.move_s([[0, 263, 242, 90, 0, 90], [50, 250, 200, 90, 0, 90]], speed=0.5)
- # --------------- IO / Gripper ---------------
+ Args:
+ waypoints: List of poses [[x,y,z,rx,ry,rz], ...]
+ frame: Reference frame
+ duration: Motion duration in seconds
+ speed: Speed fraction 0-1
+ accel: Acceleration fraction 0-1
+ wait: If True, block until motion completes
+ """
+ cmd = MoveSCmd(
+ waypoints=waypoints,
+ frame=frame,
+ duration=duration,
+ speed=speed,
+ accel=accel,
+ )
+ index = await self._send(cmd)
+ if wait and index >= 0:
+ await self.wait_command(index, timeout=timeout)
+ return index
- async def control_pneumatic_gripper(self, action: str, port: int) -> bool:
- """
- Control pneumatic gripper via digital outputs.
- action: 'open' or 'close'
- port: 1 or 2
- """
- action = action.lower()
- if action not in ("open", "close"):
- raise ValueError("Invalid pneumatic action")
- if port not in (1, 2):
- raise ValueError("Invalid pneumatic port")
- message = f"PNEUMATICGRIPPER|{action}|{port}"
- return await self._send(message)
-
- async def control_electric_gripper(
+ async def move_p(
self,
- action: str,
- position: int | None = 255,
- speed: int | None = 150,
- current: int | None = 500,
- ) -> bool:
- """
- Control electric gripper.
- action: 'move' or 'calibrate'
- position: 0..255
- speed: 0..255
- current: 100..1000 (mA)
- """
- action = action.lower()
- if action not in ("move", "calibrate"):
- raise ValueError("Invalid electric gripper action")
- pos = 0 if position is None else int(position)
- spd = 0 if speed is None else int(speed)
- cur = 100 if current is None else int(current)
- message = f"ELECTRICGRIPPER|{action}|{pos}|{spd}|{cur}"
- return await self._send(message)
-
- # --------------- GCODE ---------------
-
- async def execute_gcode(self, gcode_line: str) -> bool:
- """
- Execute a single GCODE line.
- """
- message = wire.encode_gcode(gcode_line)
- return await self._send(message)
+ waypoints: list[list[float]],
+ *,
+ frame: Frame = "WRF",
+ duration: float | None = None,
+ speed: float | None = None,
+ accel: float = 1.0,
+ wait: bool = False,
+ timeout: float = 10.0,
+ **wait_kwargs: Any,
+ ) -> int:
+ """Process move — constant TCP speed with auto-blending at corners.
- async def execute_gcode_program(self, program_lines: list[str]) -> bool:
- """
- Execute a GCODE program from a list of lines.
- """
- for i, line in enumerate(program_lines):
- if "|" in line:
- raise SyntaxError(f"Line {i + 1} contains invalid '|'")
- message = wire.encode_gcode_program_inline(program_lines)
- return await self._send(message)
+ Returns the command index (≥ 0) on success, -1 on failure.
- async def load_gcode_file(self, filepath: str) -> bool:
- """
- Load and execute a GCODE program from a file.
- """
- message = f"GCODE_PROGRAM|FILE|{filepath}"
- return await self._send(message)
+ Category: Motion
- async def get_gcode_status(self) -> dict | None:
- """
- Get the current status of the GCODE interpreter.
- """
- resp = await self._request("GET_GCODE_STATUS", bufsize=2048)
- if not resp or not resp.startswith("GCODE_STATUS|"):
- return None
+ Example:
+ rbt.move_p([[0, 263, 242, 90, 0, 90], [50, 250, 200, 90, 0, 90]], speed=0.5)
- status_json = resp.split("|", 1)[1]
- return json.loads(status_json)
+ Args:
+ waypoints: List of poses [[x,y,z,rx,ry,rz], ...]
+ frame: Reference frame
+ duration: Motion duration in seconds
+ speed: Speed fraction 0-1
+ accel: Acceleration fraction 0-1
+ wait: If True, block until motion completes
+ """
+ cmd = MovePCmd(
+ waypoints=waypoints,
+ frame=frame,
+ duration=duration,
+ speed=speed,
+ accel=accel,
+ )
+ index = await self._send(cmd)
+ if wait and index >= 0:
+ await self.wait_command(index, timeout=timeout)
+ return index
- async def pause_gcode_program(self) -> bool:
- """Pause the currently running GCODE program."""
- return await self._send("GCODE_PAUSE")
+ async def checkpoint(self, label: str) -> int:
+ """Insert a checkpoint marker in the motion queue.
- async def resume_gcode_program(self) -> bool:
- """Resume a paused GCODE program."""
- return await self._send("GCODE_RESUME")
+ Returns the command index (≥ 0) on success, -1 on failure.
- async def stop_gcode_program(self) -> bool:
- """Stop the currently running GCODE program."""
- return await self._send("GCODE_STOP")
+ Category: Synchronization
- # --------------- Smooth motion ---------------
+ Example:
+ rbt.checkpoint("pick_done")
- async def smooth_circle(
- self,
- center: list[float],
- radius: float,
- plane: Literal["XY", "XZ", "YZ"] = "XY",
- frame: Literal["WRF", "TRF"] = "WRF",
- center_mode: Literal["ABSOLUTE", "TOOL", "RELATIVE"] = "ABSOLUTE",
- entry_mode: Literal["AUTO", "TANGENT", "DIRECT", "NONE"] = "NONE",
- start_pose: list[float] | None = None,
- duration: float | None = None,
- speed_percentage: float | None = None,
- clockwise: bool = False,
- trajectory_type: Literal["cubic", "quintic", "s_curve"] = "cubic",
- jerk_limit: float | None = None,
- ) -> bool:
+ Args:
+ label: Checkpoint label for progress tracking
"""
- Execute a smooth circular motion.
+ index = await self._send(CheckpointCmd(label=label))
+ return index
+
+ async def wait_checkpoint(self, label: str, timeout: float = 30.0) -> bool:
+ """Wait until a checkpoint with the given label has been reached.
+
+ Category: Synchronization
+
+ Example:
+ rbt.wait_checkpoint("pick_done")
Args:
- center: [x, y, z] center point in mm
- radius: Circle radius in mm
- plane: Plane of the circle ('XY', 'XZ', or 'YZ')
- frame: Reference frame ('WRF' for World, 'TRF' for Tool)
- center_mode: How to interpret center point
- entry_mode: How to approach circle if not on perimeter
- start_pose: Optional [x, y, z, rx, ry, rz] start pose
- duration: Time to complete the circle in seconds
- speed_percentage: Speed as percentage (1-100)
- clockwise: Direction of motion
- trajectory_type: Type of trajectory
- jerk_limit: Optional jerk limit for s_curve trajectory
+ label: Checkpoint label to wait for
+ timeout: Maximum wait time in seconds
"""
- if duration is None and speed_percentage is None:
- raise RuntimeError(
- "Error: You must provide either duration or speed_percentage."
- )
- center_str = ",".join(map(str, center))
- start_str = ",".join(map(str, start_pose)) if start_pose else "CURRENT"
- clockwise_str = "1" if clockwise else "0"
- timing_str = (
- f"DURATION|{duration}"
- if duration is not None
- else f"SPEED|{speed_percentage}"
- )
- traj_params = f"|{trajectory_type}"
- if trajectory_type == "s_curve" and jerk_limit is not None:
- traj_params += f"|{jerk_limit}"
- elif trajectory_type != "cubic":
- traj_params += "|DEFAULT"
- mode_params = f"|{center_mode}|{entry_mode}"
- command = (
- f"SMOOTH_CIRCLE|{center_str}|{radius}|{plane}|{frame}|{start_str}|"
- f"{timing_str}|{clockwise_str}{traj_params}{mode_params}"
+ return await self.wait_status(
+ lambda s: s.last_checkpoint == label,
+ timeout=timeout,
)
- return await self._send(command)
- async def smooth_arc_center(
+ # --------------- Servo commands (streaming position) ---------------
+
+ async def servo_j(
self,
- end_pose: list[float],
- center: list[float],
- frame: Literal["WRF", "TRF"] = "WRF",
- start_pose: list[float] | None = None,
- duration: float | None = None,
- speed_percentage: float | None = None,
- clockwise: bool = False,
- trajectory_type: Literal["cubic", "quintic", "s_curve"] = "cubic",
- jerk_limit: float | None = None,
- ) -> bool:
- """
- Execute a smooth arc motion defined by center point.
+ angles: list[float],
+ *,
+ pose: list[float] | None = None,
+ speed: float = 1.0,
+ accel: float = 1.0,
+ ) -> int:
+ """Streaming joint position target. Fire-and-forget.
+
+ Category: Streaming
+
+ Example:
+ rbt.servo_j([90, -90, 180, 0, 0, 180])
Args:
- end_pose: [x, y, z, rx, ry, rz] end pose (mm and degrees)
- center: [x, y, z] arc center point in mm
- frame: Reference frame ('WRF' for World, 'TRF' for Tool)
- start_pose: Optional [x, y, z, rx, ry, rz] start pose
- duration: Time to complete the arc in seconds
- speed_percentage: Speed as percentage (1-100)
- clockwise: Direction of motion
- trajectory_type: Type of trajectory
- jerk_limit: Optional jerk limit for s_curve trajectory
+ angles: 6 joint angles in degrees (ignored if pose= is set)
+ pose: If set, Cartesian target — dispatches to SERVOJ_POSE
+ speed: Speed fraction 0-1
+ accel: Acceleration fraction 0-1
"""
- if duration is None and speed_percentage is None:
- raise RuntimeError(
- "Error: You must provide either a duration or a speed_percentage."
- )
- end_str = ",".join(map(str, end_pose))
- center_str = ",".join(map(str, center))
- start_str = ",".join(map(str, start_pose)) if start_pose else "CURRENT"
- clockwise_str = "1" if clockwise else "0"
- timing_str = (
- f"DURATION|{duration}"
- if duration is not None
- else f"SPEED|{speed_percentage}"
- )
- traj_params = f"|{trajectory_type}"
- if trajectory_type == "s_curve" and jerk_limit is not None:
- traj_params += f"|{jerk_limit}"
- elif trajectory_type != "cubic":
- traj_params += "|DEFAULT"
- command = (
- f"SMOOTH_ARC_CENTER|{end_str}|{center_str}|{frame}|{start_str}|"
- f"{timing_str}|{clockwise_str}{traj_params}"
- )
- return await self._send(command)
+ if pose is not None:
+ return await self._send(ServoJPoseCmd(pose=pose, speed=speed, accel=accel))
+ return await self._send(ServoJCmd(angles=angles, speed=speed, accel=accel))
- async def smooth_arc_param(
+ async def servo_l(
self,
- end_pose: list[float],
- radius: float,
- arc_angle: float,
- frame: Literal["WRF", "TRF"] = "WRF",
- start_pose: list[float] | None = None,
- duration: float | None = None,
- speed_percentage: float | None = None,
- trajectory_type: Literal["cubic", "quintic", "s_curve"] = "cubic",
- jerk_limit: float | None = None,
- clockwise: bool = False,
- ) -> bool:
- """
- Execute a smooth arc motion defined parametrically (radius and angle).
- """
- if duration is None and speed_percentage is None:
- raise RuntimeError(
- "You must provide either a duration or a speed_percentage."
- )
- end_str = ",".join(map(str, end_pose))
- start_str = ",".join(map(str, start_pose)) if start_pose else "CURRENT"
- timing_str = (
- f"DURATION|{duration}"
- if duration is not None
- else f"SPEED|{speed_percentage}"
- )
- parts = [
- "SMOOTH_ARC_PARAM",
- end_str,
- str(radius),
- str(arc_angle),
- frame,
- start_str,
- timing_str,
- trajectory_type,
- ]
- if trajectory_type == "s_curve" and jerk_limit is not None:
- parts.append(str(jerk_limit))
- if clockwise:
- parts.append("CW")
- return await self._send("|".join(parts))
-
- async def smooth_spline(
- self,
- waypoints: list[list[float]],
- frame: Literal["WRF", "TRF"] = "WRF",
- start_pose: list[float] | None = None,
- duration: float | None = None,
- speed_percentage: float | None = None,
- trajectory_type: Literal["cubic", "quintic", "s_curve"] = "cubic",
- jerk_limit: float | None = None,
- ) -> bool:
- """
- Execute a smooth spline motion through waypoints.
+ pose: list[float],
+ *,
+ speed: float = 1.0,
+ accel: float = 1.0,
+ ) -> int:
+ """Streaming linear Cartesian position target. Fire-and-forget.
+
+ Category: Streaming
+
+ Example:
+ rbt.servo_l([0, 263, 242, 90, 0, 90])
Args:
- waypoints: List of [x, y, z, rx, ry, rz] poses (mm and degrees)
- frame: Reference frame ('WRF' for World, 'TRF' for Tool)
- start_pose: Optional [x, y, z, rx, ry, rz] start pose
- duration: Total time for the motion in seconds
- speed_percentage: Speed as percentage (1-100)
- trajectory_type: Type of trajectory
- jerk_limit: Optional jerk limit for s_curve trajectory
- wait_for_ack: Enable command tracking
- timeout: Timeout for acknowledgment
- non_blocking: Return immediately with command ID
+ pose: Target [x,y,z,rx,ry,rz] in mm and degrees
+ speed: Speed fraction 0-1
+ accel: Acceleration fraction 0-1
"""
- if duration is None and speed_percentage is None:
- raise RuntimeError(
- "Error: You must provide either duration or speed_percentage."
- )
- num_waypoints = len(waypoints)
- start_str = ",".join(map(str, start_pose)) if start_pose else "CURRENT"
- timing_str = (
- f"DURATION|{duration}"
- if duration is not None
- else f"SPEED|{speed_percentage}"
- )
- waypoint_strs: list[str] = []
- for wp in waypoints:
- waypoint_strs.extend(map(str, wp))
- parts = [
- "SMOOTH_SPLINE",
- str(num_waypoints),
- frame,
- start_str,
- timing_str,
- trajectory_type,
- ]
- if trajectory_type == "s_curve" and jerk_limit is not None:
- parts.append(str(jerk_limit))
- elif trajectory_type == "s_curve":
- parts.append("DEFAULT")
- parts.extend(waypoint_strs)
- return await self._send("|".join(parts))
-
- async def smooth_helix(
+ return await self._send(ServoLCmd(pose=pose, speed=speed, accel=accel))
+
+ # --------------- Jog commands (streaming velocity) ---------------
+
+ async def jog_j(
self,
- center: list[float],
- radius: float,
- pitch: float,
- height: float,
- frame: Literal["WRF", "TRF"] = "WRF",
- trajectory_type: Literal["cubic", "quintic", "s_curve"] = "cubic",
- jerk_limit: float | None = None,
- start_pose: list[float] | None = None,
- duration: float | None = None,
- speed_percentage: float | None = None,
- clockwise: bool = False,
- ) -> bool:
- """
- Execute a smooth helical motion.
+ joint: int = -1,
+ speed: float = 0.0,
+ duration: float = 0.1,
+ *,
+ joints: list[int] | None = None,
+ speeds: list[float] | None = None,
+ accel: float = 1.0,
+ ) -> int:
+ """Joint velocity jog. Single-joint or multi-joint.
+
+ Single joint: jog_j(0, 0.5, 1.0)
+ Multi joint: jog_j(joints=[0,1], speeds=[0.5, -0.3], duration=1.0)
+
+ Category: Jog
+
+ Example:
+ rbt.jog_j(0, speed=0.5, duration=1.0)
Args:
- center: [x, y, z] helix center point in mm
- radius: Helix radius in mm
- pitch: Vertical distance per revolution in mm
- height: Total height of helix in mm
- frame: Reference frame ('WRF' for World, 'TRF' for Tool)
- trajectory_type: Type of trajectory
- jerk_limit: Optional jerk limit for s_curve trajectory
- start_pose: Optional [x, y, z, rx, ry, rz] start pose
- duration: Time to complete the helix in seconds
- speed_percentage: Speed as percentage (1-100)
- clockwise: Direction of motion
- """
- if duration is None and speed_percentage is None:
- raise RuntimeError(
- "Error: You must provide either duration or speed_percentage."
- )
- center_str = ",".join(map(str, center))
- start_str = ",".join(map(str, start_pose)) if start_pose else "CURRENT"
- clockwise_str = "1" if clockwise else "0"
- timing_str = (
- f"DURATION|{duration}"
- if duration is not None
- else f"SPEED|{speed_percentage}"
- )
- traj_params = f"|{trajectory_type}"
- if trajectory_type == "s_curve" and jerk_limit is not None:
- traj_params += f"|{jerk_limit}"
- elif trajectory_type != "cubic":
- traj_params += "|DEFAULT"
- command = (
- f"SMOOTH_HELIX|{center_str}|{radius}|{pitch}|{height}|{frame}|{start_str}|"
- f"{timing_str}|{clockwise_str}{traj_params}"
+ joint: Joint index (0-5) for single-joint jog
+ speed: Signed speed fraction for single-joint jog
+ duration: Jog duration in seconds
+ joints: List of joint indices for multi-joint jog
+ speeds: List of signed speed fractions for multi-joint jog
+ accel: Acceleration fraction 0-1
+ """
+ speed_arr = [0.0] * 6
+ if joints is not None and speeds is not None:
+ for j, s in zip(joints, speeds):
+ speed_arr[j] = s
+ elif joint >= 0:
+ speed_arr[joint] = speed
+ else:
+ raise ValueError("jog_j requires either joint= or joints=/speeds=")
+ return await self._send(
+ JogJCmd(speeds=speed_arr, duration=duration, accel=accel)
)
- return await self._send(command)
- async def smooth_blend(
+ async def jog_l(
self,
- segments: list[dict],
- blend_time: float = 0.5,
- frame: Literal["WRF", "TRF"] = "WRF",
- start_pose: list[float] | None = None,
- duration: float | None = None,
- speed_percentage: float | None = None,
- ) -> bool:
- """
- Execute a blended motion through multiple segments.
+ frame: Frame,
+ axis: Axis | None = None,
+ speed: float = 0.0,
+ duration: float = 0.1,
+ *,
+ axes: list[Axis] | None = None,
+ speeds_list: list[float] | None = None,
+ accel: float = 1.0,
+ ) -> int:
+ """Cartesian velocity jog. Single-axis or multi-axis.
+
+ Single axis: jog_l("WRF", "X", 0.5, 1.0)
+ Multi axis: jog_l("WRF", axes=["X","Y"], speeds_list=[0.5, -0.3], duration=1.0)
+
+ Category: Jog
+
+ Example:
+ rbt.jog_l("WRF", "X", speed=0.5, duration=1.0)
Args:
- segments: List of segment dictionaries
- blend_time: Time to blend between segments in seconds
- frame: Reference frame ('WRF' for World, 'TRF' for Tool)
- start_pose: Optional [x, y, z, rx, ry, rz] start pose
- duration: Total time for entire motion
- speed_percentage: Speed as percentage (1-100) for entire motion
- wait_for_ack: Enable command tracking
- timeout: Timeout for acknowledgment
- non_blocking: Return immediately with command ID
- """
- num_segments = len(segments)
- start_str = ",".join(map(str, start_pose)) if start_pose else "CURRENT"
- if duration is None and speed_percentage is None:
- timing_str = "DEFAULT"
- elif duration is not None:
- timing_str = f"DURATION|{duration}"
+ frame: Reference frame ("WRF" or "TRF")
+ axis: Axis name for single-axis jog
+ speed: Signed speed fraction for single-axis jog
+ duration: Jog duration in seconds
+ axes: List of axis names for multi-axis jog
+ speeds_list: List of signed speed fractions for multi-axis jog
+ accel: Acceleration fraction 0-1
+ """
+ vel = [0.0] * 6
+ if axes is not None and speeds_list is not None:
+ for a, s in zip(axes, speeds_list):
+ vel[_AXIS_MAP[a]] = s
+ elif axis is not None:
+ vel[_AXIS_MAP[axis]] = speed
else:
- timing_str = f"SPEED|{speed_percentage}"
-
- segment_strs = []
- for seg in segments:
- seg_type = seg["type"]
- if seg_type == "LINE":
- end_str = ",".join(map(str, seg["end"]))
- seg_str = f"LINE|{end_str}|{seg.get('duration', 2.0)}"
- elif seg_type == "CIRCLE":
- center_str = ",".join(map(str, seg["center"]))
- clockwise_str = "1" if seg.get("clockwise", False) else "0"
- seg_str = f"CIRCLE|{center_str}|{seg['radius']}|{seg['plane']}|{seg.get('duration', 3.0)}|{clockwise_str}"
- elif seg_type == "ARC":
- end_str = ",".join(map(str, seg["end"]))
- center_str = ",".join(map(str, seg["center"]))
- clockwise_str = "1" if seg.get("clockwise", False) else "0"
- seg_str = f"ARC|{end_str}|{center_str}|{seg.get('duration', 2.0)}|{clockwise_str}"
- elif seg_type == "SPLINE":
- waypoints_str = ";".join(
- [",".join(map(str, wp)) for wp in seg["waypoints"]]
- )
- seg_str = f"SPLINE|{len(seg['waypoints'])}|{waypoints_str}|{seg.get('duration', 3.0)}"
- else:
- continue
- segment_strs.append(seg_str)
-
- command = (
- f"SMOOTH_BLEND|{num_segments}|{blend_time}|{frame}|{start_str}|{timing_str}|"
- + "||".join(segment_strs)
+ raise ValueError("jog_l requires either axis= or axes=/speeds_list=")
+ return await self._send(
+ JogLCmd(frame=frame, velocities=vel, duration=duration, accel=accel)
)
- return await self._send(command)
- async def smooth_waypoints(
- self,
- waypoints: list[list[float]],
- blend_radii: Literal["AUTO"] | list[float] = "AUTO",
- blend_mode: Literal["parabolic", "circular", "none"] = "parabolic",
- via_modes: list[str] | None = None,
- max_velocity: float = 100.0,
- max_acceleration: float = 500.0,
- frame: Literal["WRF", "TRF"] = "WRF",
- trajectory_type: Literal["cubic", "quintic", "s_curve"] = "quintic",
- duration: float | None = None,
- ) -> bool:
- """
- Execute a waypoint trajectory with blending.
- """
- if not waypoints or any(len(wp) != 6 for wp in waypoints):
- raise ValueError("Waypoints must be a non-empty list of [x,y,z,rx,ry,rz]")
- wp_str = ";".join(",".join(map(str, wp)) for wp in waypoints)
- if blend_radii == "AUTO":
- radii_str = "AUTO"
- else:
- if len(blend_radii) != max(0, len(waypoints) - 2):
- raise ValueError(
- f"Blend radii count must be {max(0, len(waypoints) - 2)}"
- )
- radii_str = ",".join(map(str, blend_radii))
- if via_modes is None:
- via_modes_list: list[str] = ["via"] * len(waypoints)
- else:
- via_modes_list = list(via_modes)
- if len(via_modes_list) != len(waypoints):
- raise ValueError("via_modes length must match waypoints length")
- if any(vm not in ("via", "stop") for vm in via_modes_list):
- raise ValueError("via_modes entries must be 'via' or 'stop'")
- via_str = ",".join(via_modes_list)
- parts = [
- "SMOOTH_WAYPOINTS",
- wp_str,
- radii_str,
- blend_mode,
- via_str,
- str(max_velocity),
- str(max_acceleration),
- frame,
- trajectory_type,
- ]
- if duration is not None:
- parts.append(str(duration))
- return await self._send("|".join(parts))
-
- # --------------- Work coordinate helpers ---------------
-
- async def set_work_coordinate_offset(
- self,
- coordinate_system: str,
- x: float | None = None,
- y: float | None = None,
- z: float | None = None,
- ) -> bool:
- """
- Set work coordinate system offsets (G54-G59).
+ # --------------- IO / Gripper / Utility ---------------
- Args:
- coordinate_system: Work coordinate system to set ('G54' through 'G59')
- x: X axis offset in mm (None to keep current)
- y: Y axis offset in mm (None to keep current)
- z: Z axis offset in mm (None to keep current)
+ async def write_io(self, index: int, value: int) -> int:
+ """Set digital output by logical index (0 = first output pin).
- Returns:
- Success message, command ID, or dict with status details
+ The firmware I/O byte layout is ``[in0, in1, out0, out1, estop, ...]``
+ so logical output index 0 maps to bit position 2.
- Example:
- # Set G54 origin to current position
- await client.set_work_coordinate_offset('G54', x=0, y=0, z=0)
+ Returns the command index (≥ 0) on success, -1 on failure.
- # Offset G55 by 100mm in X
- await client.set_work_coordinate_offset('G55', x=100)
+ Category: I/O
+
+ Example:
+ rbt.write_io(0, 1) # Set first output HIGH
"""
- valid_systems = ["G54", "G55", "G56", "G57", "G58", "G59"]
- if coordinate_system not in valid_systems:
- raise RuntimeError(
- f"Invalid coordinate system: {coordinate_system}. Must be one of {valid_systems}"
- )
+ if index < 0 or index > 1:
+ raise ValueError("Output index must be 0 or 1")
+ if value not in (0, 1):
+ raise ValueError("I/O value must be 0 or 1")
+ # Firmware bit layout: [in0, in1, out0, out1, estop, ...]
+ firmware_index = index + 2
+ result = await self._send(WriteIOCmd(port_index=firmware_index, value=value))
+ return result
- coord_num = int(coordinate_system[1:]) - 53 # G54=1, G55=2, etc.
- offset_params = []
- if x is not None:
- offset_params.append(f"X{x}")
- if y is not None:
- offset_params.append(f"Y{y}")
- if z is not None:
- offset_params.append(f"Z{z}")
-
- # Always select CS first, then apply offset if any
- ok = await self.execute_gcode(coordinate_system)
- if not ok:
- return False
- if offset_params:
- offset_cmd = f"G10 L2 P{coord_num} {' '.join(offset_params)}"
- return await self.execute_gcode(offset_cmd)
- return True
+ async def delay(self, seconds: float) -> int:
+ """Insert a non-blocking delay in the motion queue.
- async def zero_work_coordinates(
- self,
- coordinate_system: str = "G54",
- ) -> bool:
+ Returns the command index (≥ 0) on success, -1 on failure.
+
+ Category: Synchronization
+
+ Example:
+ rbt.delay(1.0)
"""
- Set the current position as zero in the specified work coordinate system.
+ if seconds <= 0:
+ raise ValueError("Delay must be positive")
+ result = await self._send(DelayCmd(seconds=seconds))
+ return result
- Args:
- coordinate_system: Work coordinate system to zero ('G54' through 'G59')
+ async def tool_action(
+ self,
+ tool_key: str,
+ action: str,
+ params: list | None = None,
+ *,
+ wait: bool = True,
+ timeout: float = 10.0,
+ ) -> int:
+ """Send a generic tool action command.
- Returns:
- Success message, command ID, or dict with status details
+ Returns the command index (>= 0) on success, -1 on failure.
+
+ Category: I/O
Example:
- # Set current position as origin in G54
- await client.zero_work_coordinates('G54')
- """
- # This sets the current position as 0,0,0 in the work coordinate system
- return await self.set_work_coordinate_offset(coordinate_system, x=0, y=0, z=0)
+ rbt.tool_action("PNEUMATIC", "open")
+
+ Args:
+ tool_key: Tool registry key (e.g. "PNEUMATIC", "SSG-48", "MSG")
+ action: Action name (e.g. "open", "close", "move", "calibrate")
+ params: Positional parameters (meaning defined by tool config)
+ wait: If True, block until action completes
+ timeout: Maximum time to wait in seconds (only used when wait=True)
+ """
+ cmd = ToolActionCmd(
+ tool_key=tool_key.strip().upper(),
+ action=action.strip().lower(),
+ params=params or [],
+ )
+ result = await self._send(cmd)
+ if wait and result >= 0:
+ await self.wait_command(result, timeout=timeout)
+ return result
diff --git a/parol6/client/dry_run_client.py b/parol6/client/dry_run_client.py
new file mode 100644
index 0000000..b32ec71
--- /dev/null
+++ b/parol6/client/dry_run_client.py
@@ -0,0 +1,555 @@
+"""
+Dry-run client that executes commands through the trajectory planner locally.
+
+Delegates trajectory planning to TrajectoryPlanner (diagnostic=True) —
+the same logic used by the real PlannerWorker subprocess. Jog commands
+are simulated separately since the planner doesn't handle streaming.
+"""
+
+from __future__ import annotations
+
+import logging
+from dataclasses import dataclass
+from typing import Any
+
+import numpy as np
+
+import parol6.PAROL6_ROBOT as PAROL6_ROBOT
+from ..commands.base import MotionCommand
+from ..commands.cartesian_commands import (
+ JogLCommand,
+ _CART_ANG_JOG_MAX_RAD,
+ _CART_ANG_JOG_MIN_RAD,
+ _CART_LIN_JOG_MAX_MS,
+ _CART_LIN_JOG_MIN_MS,
+ _linmap_frac,
+)
+from ..commands.basic_commands import JogJCommand
+from ..config import (
+ CONTROL_RATE_HZ,
+ HOME_ANGLES_DEG,
+ deg_to_steps,
+ rad_to_steps,
+ steps_to_rad,
+)
+from ..motion.geometry import joint_path_to_tcp_poses
+from ..utils.ik import solve_ik
+from pinokin import se3_from_rpy, se3_rpy
+import re as _re
+
+import parol6.protocol.wire as _wire
+from ..protocol.wire import (
+ HomeCmd,
+ SelectToolCmd,
+ SetTcpOffsetCmd,
+ TeleportCmd,
+ ToolActionCmd,
+)
+from ..server.command_registry import CommandRegistry
+from ..server.motion_planner import (
+ ErrorSegment,
+ InlineSegment,
+ Segment,
+ TrajectoryPlanner,
+ TrajectorySegment,
+)
+from ..server.state import ControllerState, get_fkine_se3
+from ..utils.error_catalog import RobotError, make_error
+from ..utils.error_codes import ErrorCode
+from parol6.tools import get_registry
+
+
+def _pascal_to_snake(name: str) -> str:
+ """Convert PascalCase to snake_case: MoveJPose → move_j_pose"""
+ s = _re.sub(r"([A-Z]+)([A-Z][a-z])", r"\1_\2", name)
+ s = _re.sub(r"([a-z0-9])([A-Z])", r"\1_\2", s)
+ return s.lower()
+
+
+# Auto-derive method_name → struct_class from wire module.
+# E.g. MoveJCmd → move_j, IsSimulatorCmd → is_simulator
+_CMD_STRUCTS: dict[str, type] = {}
+for _attr in dir(_wire):
+ if _attr.endswith("Cmd") and isinstance(getattr(_wire, _attr), type):
+ _CMD_STRUCTS[_pascal_to_snake(_attr.removesuffix("Cmd"))] = getattr(
+ _wire, _attr
+ )
+
+_UPPER_FIELDS: frozenset[str] = frozenset({"tool_name", "tool_key", "profile"})
+
+
+def build_cmd(name: str, *args: Any, **kwargs: Any) -> Any:
+ """Build a command struct by method name."""
+ struct_cls = _CMD_STRUCTS.get(name)
+ if struct_cls is None:
+ raise ValueError(f"Unknown command: {name}")
+ struct_fields: tuple[str, ...] = getattr(struct_cls, "__struct_fields__", ())
+ filtered = {}
+ for k, v in kwargs.items():
+ if v is None or k not in struct_fields:
+ continue
+ if k in _UPPER_FIELDS and isinstance(v, str):
+ v = v.upper()
+ filtered[k] = v
+ return struct_cls(*args, **filtered)
+
+
+logger = logging.getLogger(__name__)
+
+
+@dataclass
+class DryRunResult:
+ """Result from a dry-run motion command."""
+
+ tcp_poses: np.ndarray # (N, 6) [x_m, y_m, z_m, rx_rad, ry_rad, rz_rad]
+ end_joints_rad: np.ndarray # (6,) final joint angles
+ duration: float # trajectory duration in seconds
+ error: RobotError | None = None
+ valid: np.ndarray | None = None # (N,) per-pose bool; None = all valid
+ joint_trajectory_rad: np.ndarray | None = None # (N, 6) full joint trajectory
+
+
+def _error_result(error: RobotError) -> DryRunResult:
+ """Build a DryRunResult for an error (empty trajectory)."""
+ return DryRunResult(
+ tcp_poses=np.empty((0, 6)),
+ end_joints_rad=np.empty(6),
+ duration=0.0,
+ error=error,
+ joint_trajectory_rad=None,
+ )
+
+
+def _build_result(radians: np.ndarray, duration: float) -> DryRunResult:
+ """Build a DryRunResult from joint radians (N, 6) and duration.
+
+ Converts joint radians → TCP poses in meters + radians.
+ """
+ tcp_poses = joint_path_to_tcp_poses(radians)
+ tcp_poses[:, :3] /= 1000.0 # mm → m
+ np.deg2rad(tcp_poses[:, 3:], out=tcp_poses[:, 3:]) # deg → rad
+ return DryRunResult(
+ tcp_poses=tcp_poses,
+ end_joints_rad=radians[-1].copy(),
+ duration=duration,
+ joint_trajectory_rad=radians.copy(),
+ )
+
+
+class _DryRunTool:
+ """Tool proxy for dry-run. Routes actions through the planner."""
+
+ def __init__(self, client: DryRunRobotClient) -> None:
+ self._client = client
+
+ def __getattr__(self, name: str) -> Any:
+ def method(*args: Any, **kwargs: Any) -> DryRunResult | None:
+ return self._client.tool_action(
+ self._client._active_tool_key, name, list(args), **kwargs
+ )
+
+ return method
+
+
+class DryRunRobotClient:
+ """Runs commands through the trajectory planner without UDP/serial.
+
+ Trajectory dispatch (including blend buffering and error handling) is
+ delegated to TrajectoryPlanner in diagnostic mode. Jog commands are
+ simulated separately since the planner doesn't handle streaming.
+
+ Most methods are auto-dispatched via __getattr__ using CMD_MAP.
+ Explicit methods exist only for angles/pose (read from state)
+ and delay (no-op).
+ """
+
+ def __init__(
+ self,
+ initial_joints_deg: list[float] | None = None,
+ max_snapshot_points: int = 200,
+ ) -> None:
+ # Reset tool transform — process pool workers persist across
+ # invocations, so a previous run's select_tool() leaves a stale
+ # TCP offset on the module-level robot singleton.
+ PAROL6_ROBOT.apply_tool("NONE")
+
+ self._state = ControllerState()
+ init_deg = np.asarray(
+ initial_joints_deg if initial_joints_deg is not None else HOME_ANGLES_DEG,
+ dtype=np.float64,
+ )
+ deg_to_steps(init_deg, self._state.Position_in)
+
+ self._planner = TrajectoryPlanner(diagnostic=True)
+ self._planner.state.Position_in[:] = self._state.Position_in
+
+ self._registry = CommandRegistry()
+ self._q_rad_buf = np.zeros(6, dtype=np.float64)
+ self._rpy_buf = np.zeros(3, dtype=np.float64)
+ self._max_snapshot_points = max_snapshot_points
+ self._active_tool_key: str = ""
+ self._active_variant_key: str = ""
+ self._tcp_offset_m: tuple[float, float, float] = (0.0, 0.0, 0.0)
+ self._tool_proxy = _DryRunTool(self)
+
+ @property
+ def state(self) -> ControllerState:
+ """Access the simulated controller state."""
+ return self._state
+
+ @property
+ def tool(self) -> _DryRunTool:
+ """Tool proxy that routes actions through the planner."""
+ return self._tool_proxy
+
+ def tcp_offset(self) -> list[float]:
+ """Return current TCP offset in mm."""
+ return [
+ self._tcp_offset_m[0] * 1000.0,
+ self._tcp_offset_m[1] * 1000.0,
+ self._tcp_offset_m[2] * 1000.0,
+ ]
+
+ def flush(self) -> list[DryRunResult]:
+ """Flush pending blend buffer. Call after script completion."""
+ segments = self._planner.flush()
+ self._state.Position_in[:] = self._planner.state.Position_in
+ results: list[DryRunResult] = []
+ for seg in segments:
+ r = self._segment_to_result(seg)
+ if r is not None:
+ results.append(r)
+ return results
+
+ def _snap_to_angles(self, angles_deg: list[float]) -> DryRunResult:
+ """Snap to angles instantly (no trajectory) — used by Home and Teleport."""
+ self._planner.flush()
+ deg = np.asarray(angles_deg, dtype=np.float64)
+ deg_to_steps(deg, self._state.Position_in)
+ self._planner.state.Position_in[:] = self._state.Position_in
+ rad = np.radians(deg).reshape(1, -1)
+ return _build_result(rad, duration=0.0)
+
+ def _dispatch(self, params: Any) -> DryRunResult | None:
+ """Route a command struct through the trajectory planner."""
+ if isinstance(params, HomeCmd):
+ return self._snap_to_angles(HOME_ANGLES_DEG)
+ if isinstance(params, TeleportCmd):
+ return self._snap_to_angles(params.angles)
+ if isinstance(params, SelectToolCmd):
+ self._active_tool_key = params.tool_name.strip().upper()
+ self._active_variant_key = params.variant_key
+ self._tcp_offset_m = (0.0, 0.0, 0.0)
+ if isinstance(params, SetTcpOffsetCmd):
+ self._tcp_offset_m = (
+ params.x / 1000.0,
+ params.y / 1000.0,
+ params.z / 1000.0,
+ )
+ self._state._tcp_offset_m = self._tcp_offset_m
+ PAROL6_ROBOT.apply_tool(
+ self._active_tool_key or "NONE",
+ variant_key=self._active_variant_key,
+ tcp_offset_m=self._tcp_offset_m,
+ )
+ # Detect jog/servo commands — planner doesn't handle streaming.
+ # Other non-trajectory MotionCommands (SelectTool, Home) fall through
+ # to the planner which handles them as inline segments.
+ cmd_cls = self._registry.get_command_for_struct(type(params))
+ if cmd_cls is not None and issubclass(cmd_cls, (JogJCommand, JogLCommand)):
+ # Flush blend buffer, sync state, simulate jog
+ self._planner.flush()
+ self._state.Position_in[:] = self._planner.state.Position_in
+ cmd = cmd_cls(params)
+ assert isinstance(cmd, MotionCommand)
+ result = self._simulate_jog(cmd)
+ self._planner.state.Position_in[:] = self._state.Position_in
+ return result
+
+ # Everything else → planner
+ segments = self._planner.process(params)
+ self._state.Position_in[:] = self._planner.state.Position_in
+
+ results: list[DryRunResult] = []
+ for seg in segments:
+ r = self._segment_to_result(seg)
+ if r is not None:
+ results.append(r)
+
+ if not results:
+ return None
+ if len(results) == 1:
+ return results[0]
+ return self._merge_results(results)
+
+ def _segment_to_result(self, seg: Segment) -> DryRunResult | None:
+ """Convert a planner segment to a DryRunResult."""
+ if isinstance(seg, TrajectorySegment):
+ return self._trajectory_segment_to_result(seg)
+ if isinstance(seg, ErrorSegment):
+ return self._error_segment_to_result(seg)
+ if isinstance(seg, InlineSegment) and isinstance(seg.params, ToolActionCmd):
+ return self._tool_action_segment_to_result(seg.params)
+ # Other InlineSegments (SelectTool, Home, etc.) — no visualization
+ return None
+
+ def _trajectory_segment_to_result(self, seg: TrajectorySegment) -> DryRunResult:
+ """Convert a TrajectorySegment to a DryRunResult."""
+ steps = seg.trajectory_steps
+ stride = max(1, len(steps) // self._max_snapshot_points)
+ sampled = steps[::stride]
+ if not np.array_equal(sampled[-1], steps[-1]):
+ sampled = np.vstack([sampled, steps[-1:]])
+
+ radians = np.empty((len(sampled), 6), dtype=np.float64)
+ for i in range(len(sampled)):
+ steps_to_rad(sampled[i], radians[i])
+
+ return _build_result(radians, seg.duration)
+
+ def _error_segment_to_result(self, seg: ErrorSegment) -> DryRunResult:
+ """Convert an ErrorSegment to a DryRunResult with per-pose validity."""
+ if seg.cartesian_path is not None and seg.ik_valid is not None:
+ return DryRunResult(
+ tcp_poses=seg.cartesian_path,
+ end_joints_rad=np.zeros(6, dtype=np.float64),
+ duration=0.0,
+ error=seg.error,
+ valid=seg.ik_valid,
+ joint_trajectory_rad=None,
+ )
+ return _error_result(seg.error)
+
+ def _tool_action_segment_to_result(self, cmd: ToolActionCmd) -> DryRunResult:
+ """Return a single-point DryRunResult at the current TCP pose."""
+ steps_to_rad(self._state.Position_in, self._q_rad_buf)
+ duration = 0.0
+ cfg = get_registry().get(cmd.tool_key.strip().upper())
+ if cfg is not None:
+ duration = cfg.estimate_duration(cmd.action, cmd.params)
+ return _build_result(self._q_rad_buf[np.newaxis], duration)
+
+ def _merge_results(self, results: list[DryRunResult]) -> DryRunResult:
+ """Merge multiple DryRunResults into one (for multi-segment blends)."""
+ non_empty = [r for r in results if r.tcp_poses.shape[0] > 0]
+ first_error = next((r.error for r in results if r.error is not None), None)
+ if not non_empty:
+ if first_error is not None:
+ return _error_result(first_error)
+ return _error_result(make_error(ErrorCode.TRAJ_EMPTY_RESULT, detail=""))
+
+ tcp_all = np.vstack([r.tcp_poses for r in non_empty])
+ total_duration = sum(r.duration for r in results)
+ last = non_empty[-1]
+
+ has_any_valid = any(r.valid is not None for r in non_empty)
+ if has_any_valid:
+ valids = [
+ r.valid
+ if r.valid is not None
+ else np.ones(r.tcp_poses.shape[0], dtype=np.bool_)
+ for r in non_empty
+ ]
+ merged_valid = np.concatenate(valids)
+ else:
+ merged_valid = None
+
+ has_any_joints = any(r.joint_trajectory_rad is not None for r in non_empty)
+ if has_any_joints:
+ joint_parts = [
+ r.joint_trajectory_rad
+ if r.joint_trajectory_rad is not None
+ else np.broadcast_to(
+ r.end_joints_rad[np.newaxis, :],
+ (r.tcp_poses.shape[0], r.end_joints_rad.shape[0]),
+ ).copy()
+ for r in non_empty
+ ]
+ merged_joints = np.vstack(joint_parts)
+ else:
+ merged_joints = None
+
+ return DryRunResult(
+ tcp_poses=tcp_all,
+ end_joints_rad=last.end_joints_rad,
+ duration=total_duration,
+ error=first_error,
+ valid=merged_valid,
+ joint_trajectory_rad=merged_joints,
+ )
+
+ # ---- Jog simulation (planner doesn't handle streaming) ----
+
+ def _simulate_jog(self, cmd: MotionCommand) -> DryRunResult | None:
+ """Simulate jog commands by computing linear displacement."""
+ # Run do_setup so speeds_out / _axis_index / etc. are computed
+ cmd.setup(self._state)
+
+ if isinstance(cmd, JogJCommand):
+ return self._simulate_joint_jog(cmd)
+ if isinstance(cmd, JogLCommand):
+ return self._simulate_cartesian_jog(cmd)
+ return None
+
+ def _simulate_joint_jog(self, cmd: JogJCommand) -> DryRunResult:
+ """Simulate joint jog by computing linear displacement in joint space."""
+ duration = cmd.p.duration
+ n_points = min(
+ self._max_snapshot_points,
+ max(2, int(duration * CONTROL_RATE_HZ)),
+ )
+
+ # Compute total displacement (steps/tick * ticks_in_duration)
+ ticks = duration * CONTROL_RATE_HZ
+ displacements = cmd.speeds_out.astype(np.int64) * int(ticks)
+
+ start_pos = self._state.Position_in.copy()
+ fracs = np.arange(1, n_points + 1, dtype=np.float64) / n_points
+ # trajectory shape (n_points, 6): start + fraction * displacement
+ trajectory = start_pos[np.newaxis, :] + (
+ fracs[:, np.newaxis] * displacements[np.newaxis, :]
+ ).astype(np.int64)
+
+ self._state.Position_in[:] = start_pos + displacements
+
+ radians = np.empty((n_points, 6), dtype=np.float64)
+ for i in range(n_points):
+ steps_to_rad(trajectory[i], radians[i])
+
+ return _build_result(radians, duration)
+
+ def _simulate_cartesian_jog(self, cmd: JogLCommand) -> DryRunResult | None:
+ """Simulate cartesian jog by displacing along a Cartesian axis and solving IK."""
+ duration = cmd.p.duration
+ n_points = min(
+ self._max_snapshot_points,
+ max(2, int(duration * CONTROL_RATE_HZ)),
+ )
+
+ # Current pose via FK
+ current_se3 = get_fkine_se3(self._state)
+ se3_rpy(current_se3, self._rpy_buf)
+ # pose = [x_m, y_m, z_m, rx_rad, ry_rad, rz_rad]
+ pose = np.array(
+ [
+ current_se3[0, 3],
+ current_se3[1, 3],
+ current_se3[2, 3],
+ self._rpy_buf[0],
+ self._rpy_buf[1],
+ self._rpy_buf[2],
+ ],
+ dtype=np.float64,
+ )
+
+ # Compute velocity along dominant axis using same mapping as production
+ vels = cmd.p.velocities
+ speed_mag = abs(vels[cmd._axis_index + (3 if cmd.is_rotation else 0)])
+ if cmd.is_rotation:
+ vel = _linmap_frac(speed_mag, _CART_ANG_JOG_MIN_RAD, _CART_ANG_JOG_MAX_RAD)
+ total_disp = vel * cmd._axis_sign * duration
+ else:
+ vel = _linmap_frac(speed_mag, _CART_LIN_JOG_MIN_MS, _CART_LIN_JOG_MAX_MS)
+ total_disp = vel * cmd._axis_sign * duration
+
+ # Determine which component of the 6-element pose to displace
+ pose_index = (3 + cmd._axis_index) if cmd.is_rotation else cmd._axis_index
+
+ # Get current joint angles for IK seed
+ steps_to_rad(self._state.Position_in, self._q_rad_buf)
+ q_current = self._q_rad_buf.copy()
+ steps_buf = np.zeros_like(self._state.Position_in)
+
+ # Generate trajectory by interpolating and solving IK at each point
+ radians = np.empty((n_points, 6), dtype=np.float64)
+ last_valid_q = q_current.copy()
+ target_se3 = np.zeros((4, 4), dtype=np.float64)
+
+ for i in range(n_points):
+ t = (i + 1) / n_points
+ target_pose = pose.copy()
+ target_pose[pose_index] += total_disp * t
+
+ se3_from_rpy(
+ target_pose[0],
+ target_pose[1],
+ target_pose[2],
+ target_pose[3],
+ target_pose[4],
+ target_pose[5],
+ target_se3,
+ )
+ ik_result = solve_ik(
+ PAROL6_ROBOT.robot, target_se3, last_valid_q, quiet_logging=True
+ )
+ if ik_result.success:
+ last_valid_q = ik_result.q.copy()
+
+ radians[i] = last_valid_q
+
+ # Update state to final position
+ rad_to_steps(last_valid_q, steps_buf)
+ self._state.Position_in[:] = steps_buf
+
+ return _build_result(radians, duration)
+
+ # ---- Explicit methods for state reads ----
+
+ def angles(self) -> list[float]:
+ steps_to_rad(self._state.Position_in, self._q_rad_buf)
+ return np.degrees(self._q_rad_buf).tolist()
+
+ def pose(self) -> list[float]:
+ """Return [x_mm, y_mm, z_mm, rx_deg, ry_deg, rz_deg]."""
+ se3 = get_fkine_se3(self._state)
+ se3_rpy(se3, self._rpy_buf)
+ return [
+ se3[0, 3] * 1000.0,
+ se3[1, 3] * 1000.0,
+ se3[2, 3] * 1000.0,
+ float(np.degrees(self._rpy_buf[0])),
+ float(np.degrees(self._rpy_buf[1])),
+ float(np.degrees(self._rpy_buf[2])),
+ ]
+
+ def move_j(
+ self,
+ angles: list[float] | None = None,
+ *,
+ pose: list[float] | None = None,
+ **kwargs: Any,
+ ) -> DryRunResult | None:
+ if pose is not None:
+ return self._dispatch(build_cmd("move_j_pose", pose, **kwargs))
+ return self._dispatch(build_cmd("move_j", angles or [], **kwargs))
+
+ def servo_j(
+ self,
+ angles: list[float] | None = None,
+ *,
+ pose: list[float] | None = None,
+ **kwargs: Any,
+ ) -> DryRunResult | None:
+ if pose is not None:
+ return self._dispatch(build_cmd("servo_j_pose", pose, **kwargs))
+ return self._dispatch(build_cmd("servo_j", angles or [], **kwargs))
+
+ def delay(self, seconds: float = 0.0) -> None:
+ pass
+
+ def wait_motion(self, **kwargs: Any) -> None:
+ self.flush()
+
+ # ---- Auto-dispatch for everything else ----
+
+ def __getattr__(self, name: str) -> Any:
+ if name.startswith("_"):
+ raise AttributeError(name)
+ if name not in _CMD_STRUCTS:
+ raise AttributeError(f"'{type(self).__name__}' has no attribute '{name}'")
+
+ def method(*args: Any, **kwargs: Any) -> DryRunResult | None:
+ cmd = build_cmd(name, *args, **kwargs)
+ return self._dispatch(cmd)
+
+ return method
diff --git a/parol6/client/manager.py b/parol6/client/manager.py
deleted file mode 100644
index d7ac6e7..0000000
--- a/parol6/client/manager.py
+++ /dev/null
@@ -1,383 +0,0 @@
-"""
-Server management for PAROL6 controller.
-
-Provides lifecycle management and automatic spawning of the controller process.
-"""
-
-import asyncio
-import contextlib
-import logging
-import os
-import re
-import socket
-import subprocess
-import sys
-import threading
-import time
-from typing import Tuple
-from dataclasses import dataclass
-from pathlib import Path
-
-# Precompiled regex patterns for server log normalization
-_SIMPLE_FORMAT_RE = re.compile(
- r"^\s*(\d{2}:\d{2}:\d{2})\s+(DEBUG|INFO|WARNING|ERROR|CRITICAL|TRACE)\s+([A-Za-z0-9_.-]+):\s+(.*)$"
-)
-
-
-@dataclass
-class ServerOptions:
- """Options for launching the controller."""
-
- com_port: str | None = None
- no_autohome: bool = True # Set PAROL6_NOAUTOHOME=1 by default
- extra_env: dict | None = None
-
-
-class ServerManager:
- """
- Manages the lifecycle of the PAROL6 controller.
-
- - Writes com_port.txt in the controller working directory to preselect the port.
- - Spawns the controller as a subprocess using sys.executable.
- - Provides stop and liveness checks.
- - Can be used with a custom controller script path or defaults to the package's bundled controller.
- """
-
- def __init__(
- self, controller_path: str | None = None, normalize_logs: bool = False
- ) -> None:
- """
- Initialize the ServerManager.
-
- Args:
- controller_path: Optional path to controller script. If None, uses bundled controller.
- normalize_logs: If True, parse and normalize controller log output to avoid duplicate
- timestamp/level/module info. Set to True when used from web GUI.
- """
- if controller_path:
- self.controller_path = Path(controller_path).resolve()
- if not self.controller_path.exists():
- raise FileNotFoundError(
- f"Controller script not found: {self.controller_path}"
- )
- else:
- # Use the package's bundled commander
- self.controller_path = Path(__file__).parent / "controller.py"
-
- self._proc: subprocess.Popen | None = None
- self._reader_thread: threading.Thread | None = None
- self._stop_reader = threading.Event()
- self.normalize_logs = normalize_logs
-
- @property
- def pid(self) -> int | None:
- return self._proc.pid if self._proc and self._proc.poll() is None else None
-
- def is_running(self) -> bool:
- return self._proc is not None and self._proc.poll() is None
-
- def start_controller(
- self,
- com_port: str | None = None,
- no_autohome: bool = True,
- extra_env: dict | None = None,
- server_host: str | None = None,
- server_port: int | None = None,
- ) -> None:
- """Start the controller if not already running."""
- if self.is_running():
- return
- # Working directory should be the PAROL6-python-API repo root to find dependencies
- # Use a more direct approach to find the package root
- cwd = (
- Path(__file__).resolve().parents[2]
- ) # parol6/server/manager.py -> repo root
-
- env = os.environ.copy()
- # Disable autohome unless explicitly overridden
- if no_autohome:
- env["PAROL6_NOAUTOHOME"] = "1"
- if extra_env:
- env.update(extra_env)
- # Explicitly bind controller (if provided)
- if server_host:
- env["PAROL6_CONTROLLER_IP"] = server_host
- if server_port is not None:
- env["PAROL6_CONTROLLER_PORT"] = str(server_port)
- # Ensure the subprocess can import the local 'parol6' package
- existing_py_path = env.get("PYTHONPATH", "")
- env["PYTHONPATH"] = (
- f"{cwd}{os.pathsep}{existing_py_path}" if existing_py_path else str(cwd)
- )
-
- # Launch the controller as a module to ensure package imports resolve
- args = [sys.executable, "-u", "-m", "parol6.server.controller"]
-
- # Decide controller log level:
- # - If PAROL_TRACE is set in the environment, prefer TRACE for the child
- # - Otherwise, inherit the current root logger level name
- root_logger = logging.getLogger()
- root_level = root_logger.level
-
- parol_trace_flag = str(env.get("PAROL_TRACE", "0")).strip().lower()
- if parol_trace_flag in ("1", "true", "yes", "on"):
- level_name = "TRACE"
- else:
- level_name = logging.getLevelName(root_level)
- # Normalize custom/unnamed levels (e.g. "Level 5")
- if isinstance(level_name, str) and level_name.upper().startswith("LEVEL"):
- if root_level == 5:
- level_name = "TRACE"
- else:
- level_name = "INFO"
-
- args.append(f"--log-level={level_name}")
- if com_port:
- args.append(f"--serial={com_port}")
-
- try:
- self._proc = subprocess.Popen(
- args,
- cwd=str(cwd),
- env=env,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT,
- text=True,
- bufsize=1, # line-buffered
- )
- except Exception as e:
- raise RuntimeError(f"Failed to start controller: {e}") from e
-
- # Start background reader thread to stream server stdout/stderr to logging
- if self._proc and self._proc.stdout is not None:
- self._stop_reader.clear()
- self._reader_thread = threading.Thread(
- target=self._stream_output,
- args=(self._proc,),
- name="ServerOutputReader",
- daemon=True,
- )
- self._reader_thread.start()
-
- def _stream_output(self, proc: subprocess.Popen) -> None:
- """Read controller stdout and forward to logging."""
- try:
- assert proc.stdout is not None
- # Maintain last logger/level for multi-line tracebacks
- last_logger = "parol6.server"
-
- for raw_line in iter(proc.stdout.readline, ""):
- if self._stop_reader.is_set():
- break
- line = raw_line.rstrip("\r\n")
- if not line:
- continue
-
- if self.normalize_logs:
- # Normalize child log line and route to embedded module logger
- level = logging.INFO
- logger_name: str | None = None
- msg = line
-
- s = _SIMPLE_FORMAT_RE.match(line)
- if s:
- _, level_name, logger_name, actual_message = s.groups()
- logger_name = (logger_name or "").strip()
- msg = actual_message
- level = getattr(
- logging, (level_name or "INFO").upper(), logging.INFO
- )
- elif line.startswith("Traceback"):
- # Traceback and continuation lines -> keep last context, escalate on Traceback
- level = logging.ERROR
-
- # Choose target logger
- target_logger_name = logger_name or last_logger or "parol6.server"
- target_logger = logging.getLogger(target_logger_name)
- target_logger.log(level, msg)
-
- # Update last context if we identified a module
- if logger_name:
- last_logger = logger_name
- else:
- # No normalization - forward line as-is to root logger
- print(line)
- except Exception as e:
- logging.warning("ServerManager: output reader stopped: %s", e)
-
- def stop_controller(self, timeout: float = 2.0) -> None:
- """Stop the controller process if running.
-
- This method attempts a graceful shutdown first using SIGTERM (or terminate() on Windows)
- and then escalates to a forced kill if the process does not exit within ``timeout``.
- After sending SIGKILL it will wait up to ``kill_timeout`` seconds for the process to
- actually exit before giving up.
- """
- self._stop_reader.set()
- if self._reader_thread and self._reader_thread.is_alive():
- self._reader_thread.join(timeout=timeout)
- self._reader_thread = None
- if self._proc and self._proc.poll() is None:
- logging.debug("Stopping Controller...")
- try:
- self._proc.terminate()
- self._proc.wait(timeout=timeout)
- except Exception as e:
- logging.warning("stop_controller: terminate/wait failed: %s", e)
-
- # Step 2: escalate to forced kill if still running
- if self._proc and self._proc.poll() is None:
- logging.warning(
- "Controller did not exit after SIGTERM within %.1fs, sending SIGKILL",
- timeout,
- )
- try:
- self._proc.kill()
- self._proc.wait(timeout=timeout)
- except Exception as e:
- logging.warning("stop_controller: kill/wait failed: %s", e)
- # Clear reference
- self._proc = None
-
- async def await_ready(
- self,
- host: str = "127.0.0.1",
- port: int = 5001,
- timeout: float = 10.0,
- poll_interval: float = 0.2,
- ) -> bool:
- """
- Wait until the controller responds to PING over UDP, asynchronously.
-
- Returns:
- True if the server becomes ready within timeout, else False.
- """
- loop = asyncio.get_running_loop()
- deadline = loop.time() + max(0.0, float(timeout))
- addr: Tuple[str, int] = (host, port)
-
- sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- sock.setblocking(False)
-
- try:
- while True:
- now = loop.time()
- if now >= deadline:
- return False
-
- # Try a PING and wait up to poll_interval (or remaining time)
- recv_timeout = min(poll_interval, deadline - now)
-
- try:
- # send PING
- await loop.sock_sendto(sock, b"PING", addr)
-
- # wait for PONG with a timeout
- data, _ = await asyncio.wait_for(
- loop.sock_recvfrom(sock, 256),
- timeout=recv_timeout,
- )
- if data.decode("ascii", errors="ignore").startswith("PONG"):
- return True
- except (asyncio.TimeoutError, OSError):
- # Timeout or send/recv error -> just try again until deadline
- pass
-
- # Optional extra delay to avoid tight loop; keep within deadline
- remain = deadline - loop.time()
- if remain <= 0:
- return False
- await asyncio.sleep(min(poll_interval, remain))
- finally:
- sock.close()
-
-
-def is_server_running(
- host: str = "127.0.0.1",
- port: int = 5001,
- timeout: float = 1.0,
-) -> bool:
- """Return True if a PAROL6 controller responds to UDP PING at host:port."""
- try:
- with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
- sock.settimeout(timeout)
- sock.sendto(b"PING", (host, port))
- data, _ = sock.recvfrom(256)
- return data.decode("ascii", errors="ignore").startswith("PONG")
- except Exception:
- return False
-
-
-def manage_server(
- host: str = "127.0.0.1",
- port: int = 5001,
- com_port: str | None = None,
- extra_env: dict | None = None,
- normalize_logs: bool = False,
-) -> ServerManager:
- """Synchronously start a PAROL6 controller at host:port and block until ready.
-
- Fast-fails if a server is already running there.
-
- Returns a ServerManager that owns the spawned controller.
- """
- if is_server_running(host=host, port=port):
- raise RuntimeError(f"Server already running at {host}:{port}")
-
- logging.info(f"Server not responding at {host}:{port}, starting controller...")
-
- # Prepare environment for child controller bind tuple
- env_to_pass = dict(extra_env) if extra_env else {}
- env_to_pass["PAROL6_CONTROLLER_IP"] = host
- env_to_pass["PAROL6_CONTROLLER_PORT"] = str(port)
-
- manager = ServerManager(normalize_logs=normalize_logs)
- manager.start_controller(
- com_port=com_port,
- no_autohome=True,
- extra_env=env_to_pass,
- server_host=host,
- server_port=port,
- )
-
- # Block until PING responds or timeout
- deadline = time.time() + 5.0
- while time.time() < deadline:
- try:
- if is_server_running(host=host, port=port, timeout=0.2):
- logging.info(f"Successfully started server at {host}:{port}")
- return manager
- except Exception:
- pass
- time.sleep(0.05)
-
- logging.error("Server spawn failed or not responding after startup")
- manager.stop_controller()
- raise RuntimeError("Failed to start PAROL6 controller")
-
-
-@contextlib.contextmanager
-def managed_server(
- host: str = "127.0.0.1",
- port: int = 5001,
- com_port: str | None = None,
- extra_env: dict | None = None,
- normalize_logs: bool = False,
-):
- """Synchronous context manager that ensures the controller is stopped on exit.
-
- Usage:
- with managed_server(host, port) as mgr:
- ...
- """
- mgr = manage_server(
- host=host,
- port=port,
- com_port=com_port,
- extra_env=extra_env,
- normalize_logs=normalize_logs,
- )
- try:
- yield mgr
- finally:
- mgr.stop_controller()
diff --git a/parol6/client/status_subscriber.py b/parol6/client/status_subscriber.py
deleted file mode 100644
index b6b3bef..0000000
--- a/parol6/client/status_subscriber.py
+++ /dev/null
@@ -1,208 +0,0 @@
-import asyncio
-import contextlib
-import logging
-import socket
-import struct
-import time
-from collections.abc import AsyncIterator
-
-from parol6 import config as cfg
-from parol6.protocol.types import StatusAggregate
-from parol6.protocol.wire import decode_status
-
-logger = logging.getLogger(__name__)
-
-
-class UDPProtocol(asyncio.DatagramProtocol):
- """Protocol handler for UDP datagrams (multicast or unicast)."""
-
- def __init__(self, queue: asyncio.Queue):
- self.queue = queue
- self.transport = None
- self.receive_count = 0
- self.last_log_time = time.time()
-
- # EMA rate tracking for RX
- self._rx_count = 0
- self._rx_last_time = time.monotonic()
- self._rx_ema_period = 0.05 # Initialize with ~20 Hz expected
- self._rx_last_log_time = time.monotonic()
-
- def connection_made(self, transport):
- self.transport = transport
-
- def datagram_received(self, data, addr):
- # Track RX rate with EMA
- now = time.monotonic()
- if self._rx_count > 0: # Skip first sample for period calculation
- period = now - self._rx_last_time
- if period > 0:
- # EMA update: 0.1 * new + 0.9 * old
- self._rx_ema_period = 0.1 * period + 0.9 * self._rx_ema_period
- self._rx_last_time = now
- self._rx_count += 1
-
- # Log rate every 3 seconds
- if now - self._rx_last_log_time >= 3.0 and self._rx_ema_period > 0:
- rx_hz = 1.0 / self._rx_ema_period
- logger.debug(f"Status RX: {rx_hz:.1f} Hz (count={self._rx_count})")
- self._rx_last_log_time = now
-
- try:
- self.queue.put_nowait((data, addr))
- except asyncio.QueueFull:
- # Drop oldest packet if queue is full
- try:
- self.queue.get_nowait()
- self.queue.put_nowait((data, addr))
- except Exception:
- pass
-
- def error_received(self, exc):
- logger.error(f"Error received: {exc}")
-
- def connection_lost(self, exc):
- logger.info(f"Connection lost: {exc}")
-
-
-def _create_multicast_socket(group: str, port: int, iface_ip: str) -> socket.socket:
- """Create and configure a multicast socket with loopback-first semantics and robust joins."""
- sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
-
- # Allow multiple listeners on same port; prefer SO_REUSEPORT where available
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- try:
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
- except Exception:
- # Not available or not permitted on this platform; continue
- pass
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1 << 20)
- # Bind to port (try wildcard first, then iface_ip)
- try:
- sock.bind(("", port))
- except OSError:
- sock.bind((iface_ip, port))
-
- # Helper to determine active NIC IP (no packets sent)
- def _detect_primary_ip() -> str:
- tmp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- try:
- tmp.connect(("1.1.1.1", 80))
- return tmp.getsockname()[0]
- except Exception:
- return "127.0.0.1"
- finally:
- with contextlib.suppress(Exception):
- tmp.close()
-
- # Join multicast group on specified interface (loopback preferred), with fallbacks
- try:
- mreq = struct.pack("=4s4s", socket.inet_aton(group), socket.inet_aton(iface_ip))
- sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
- except Exception:
- # Retry using primary NIC IP
- try:
- primary_ip = _detect_primary_ip()
- mreq = struct.pack(
- "=4s4s", socket.inet_aton(group), socket.inet_aton(primary_ip)
- )
- sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
- except Exception:
- # Final fallback: INADDR_ANY variant
- mreq_any = struct.pack("=4sl", socket.inet_aton(group), socket.INADDR_ANY)
- sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq_any)
-
- # Non-blocking for asyncio
- sock.setblocking(False)
- return sock
-
-
-def _create_unicast_socket(port: int, host: str) -> socket.socket:
- """Create and configure a plain UDP socket for unicast reception.
-
- Binds to the provided host (default 127.0.0.1) and port with large RCVBUF.
- """
- sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- try:
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
- except Exception:
- pass
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1 << 20)
- try:
- sock.bind((host, port))
- except OSError:
- # Fallback to wildcard
- sock.bind(("", port))
- sock.setblocking(False)
- return sock
-
-
-async def subscribe_status(
- group: str | None = None, port: int | None = None, iface_ip: str | None = None
-) -> AsyncIterator[StatusAggregate]:
- """
- Async generator that yields decoded STATUS dicts from the UDP broadcaster.
-
- Uses create_datagram_endpoint for uvloop compatibility.
-
- Notes:
- - Uses loopback multicast by default (cfg.MCAST_* values).
- - Yields only messages that decode successfully via decode_status; otherwise skips.
- """
- group = group or cfg.MCAST_GROUP
- port = port or cfg.MCAST_PORT
- iface_ip = iface_ip or cfg.MCAST_IF
-
- logger.info(
- f"subscribe_status starting: transport={cfg.STATUS_TRANSPORT} group={group}, port={port}, iface_ip={iface_ip}"
- )
-
- loop = asyncio.get_running_loop()
- queue = asyncio.Queue(maxsize=100) # type: ignore
-
- # Create the socket based on configured transport
- if cfg.STATUS_TRANSPORT == "UNICAST":
- sock = _create_unicast_socket(port, cfg.STATUS_UNICAST_HOST)
- else:
- # Multicast socket bound to ("", port) will also receive unicast datagrams to that port
- sock = _create_multicast_socket(group, port, iface_ip)
-
- # Create the datagram endpoint with our protocol
- transport = None
- try:
- transport, _ = await loop.create_datagram_endpoint(
- lambda: UDPProtocol(queue), sock=sock
- )
-
- while True:
- try:
- # Wait for data with timeout
- data, addr = await asyncio.wait_for(queue.get(), timeout=2.0)
- text = data.decode("ascii", errors="ignore")
-
- parsed = decode_status(text)
- if parsed is not None:
- yield parsed
-
- except (asyncio.TimeoutError, TimeoutError):
- logger.warning(
- f"No status received for 2s on {('unicast' if cfg.STATUS_TRANSPORT == 'UNICAST' else 'multicast')} {group}:{port} (iface={iface_ip})"
- )
- continue
-
- except asyncio.CancelledError:
- # Normal shutdown path when consumer task is cancelled
- logger.info("subscribe_status cancelled")
- raise
- except Exception as e:
- logger.error(f"Error in subscribe_status: {e}", exc_info=True)
- raise
- finally:
- if transport:
- logger.info("Closing transport...")
- transport.close()
- try:
- sock.close()
- except Exception:
- pass
diff --git a/parol6/client/sync_client.py b/parol6/client/sync_client.py
index 384a93f..fd94e13 100644
--- a/parol6/client/sync_client.py
+++ b/parol6/client/sync_client.py
@@ -9,9 +9,21 @@
import atexit
import threading
from collections.abc import Callable, Coroutine
-from typing import Any, Literal, TypeVar
+from typing import Any, TypeVar, overload
-from ..protocol.types import Axis, Frame, StatusAggregate
+from waldoctl.tools import ToolSpec
+
+from waldoctl import PingResult, ToolStatus
+from waldoctl.status import ActivityResult, ToolResult
+
+from waldoctl.types import Axis, Frame
+from ..protocol.wire import (
+ EnablementResultStruct,
+ LoopStatsResultStruct,
+ StatusBuffer,
+ StatusResultStruct,
+)
+from ..utils.error_catalog import RobotError
from .async_client import AsyncRobotClient
T = TypeVar("T")
@@ -31,13 +43,31 @@ def _loop_worker(loop: asyncio.AbstractEventLoop) -> None:
def _stop_sync_loop() -> None:
global _SYNC_LOOP, _SYNC_THREAD
- if _SYNC_LOOP is not None:
- try:
- _SYNC_LOOP.call_soon_threadsafe(_SYNC_LOOP.stop)
- except Exception:
- pass
- _SYNC_LOOP = None
- _SYNC_THREAD = None
+ if _SYNC_LOOP is None:
+ return
+
+ loop = _SYNC_LOOP
+
+ async def _shutdown():
+ # Cancel all pending tasks
+ tasks = [t for t in asyncio.all_tasks(loop) if t is not asyncio.current_task()]
+ for task in tasks:
+ task.cancel()
+ # Let cancelled tasks finalize
+ if tasks:
+ await asyncio.gather(*tasks, return_exceptions=True)
+ loop.stop()
+
+ try:
+ asyncio.run_coroutine_threadsafe(_shutdown(), loop)
+ if _SYNC_THREAD is not None:
+ _SYNC_THREAD.join(timeout=2.0)
+ except (RuntimeError, asyncio.InvalidStateError):
+ # Loop may already be stopped or thread may not be joinable
+ pass
+
+ _SYNC_LOOP = None
+ _SYNC_THREAD = None
def _ensure_sync_loop() -> None:
@@ -84,7 +114,7 @@ class RobotClient:
Can be used as a context manager to ensure proper cleanup:
with RobotClient() as client:
- client.enable()
+ client.resume()
...
"""
@@ -100,6 +130,31 @@ def __init__(
self._inner = AsyncRobotClient(
host=host, port=port, timeout=timeout, retries=retries
)
+ # Wrap the inner async client's bound tools with sync adapters so that
+ # `from parol6 import RobotClient; rbt = RobotClient(...)` works without
+ # going through Robot.create_sync_client(). The Robot factory rebinds
+ # these afterwards from the same registry.
+ self._bound_tools: dict[str, ToolSpec] = {}
+ self._bind_default_tools()
+
+ def _bind_default_tools(self) -> None:
+ """Wrap inner async client's bound tools with sync adapters."""
+ from waldoctl.sync_tools import make_sync_tool
+
+ self._bound_tools = {
+ key: make_sync_tool(async_tool, _run)
+ for key, async_tool in self._inner._bound_tools.items()
+ }
+
+ # ---------- tool access ----------
+
+ @property
+ def tool(self) -> ToolSpec:
+ """Active bound tool. Raises if no tool has been set."""
+ key = (self._inner._active_tool_key or "").upper()
+ if not key:
+ raise RuntimeError("No tool set. Call select_tool() first.")
+ return self._bound_tools[key]
def close(self) -> None:
"""Close underlying AsyncRobotClient and release resources."""
@@ -127,551 +182,631 @@ def port(self) -> int:
# ---------- motion / control ----------
- def home(self) -> bool:
- return _run(self._inner.home())
+ def home(self, wait: bool = False, timeout: float = 60.0) -> int:
+ """Home the robot to its home position.
- def enable(self) -> bool:
- return _run(self._inner.enable())
+ Returns the command index (≥ 0) on success, -1 on failure.
- def disable(self) -> bool:
- return _run(self._inner.disable())
+ Args:
+ wait: If True, block until motion completes.
+ timeout: Maximum time to wait in seconds (only used when wait=True).
+ """
+ return _run(self._inner.home(wait=wait, timeout=timeout))
- def stop(self) -> bool:
- """Alias for disable() - stops motion and disables controller."""
- return self.disable()
+ def teleport(
+ self,
+ angles_deg: list[float],
+ tool_positions: list[float] | None = None,
+ ) -> int:
+ """Instantly set joint angles and optional tool positions (simulator only)."""
+ return _run(self._inner.teleport(angles_deg, tool_positions=tool_positions))
- def start(self) -> bool:
- """Alias for enable() - enables controller."""
- return self.enable()
+ def resume(self) -> int:
+ """Re-enable the robot controller, allowing motion commands.
- def stream_on(self) -> bool:
- return _run(self._inner.stream_on())
+ Returns:
+ 1 if acknowledged, 0 on failure.
+ """
+ return _run(self._inner.resume())
- def stream_off(self) -> bool:
- return _run(self._inner.stream_off())
+ def halt(self) -> int:
+ """Halt the robot — stop all motion and disable.
- def simulator_on(self) -> bool:
- return _run(self._inner.simulator_on())
+ Returns:
+ 1 if acknowledged, 0 on failure.
+ """
+ return _run(self._inner.halt())
- def simulator_off(self) -> bool:
- return _run(self._inner.simulator_off())
+ def simulator(self, enabled: bool) -> int:
+ """Enable or disable simulator mode."""
+ return _run(self._inner.simulator(enabled))
- def set_serial_port(self, port_str: str) -> bool:
- return _run(self._inner.set_serial_port(port_str))
+ def is_simulator(self) -> bool:
+ """Query whether simulator mode is active."""
+ return _run(self._inner.is_simulator())
- # ---------- status / queries ----------
- def ping(self) -> str | None:
- return _run(self._inner.ping())
+ def connect_hardware(self, port_str: str) -> int:
+ """Connect to robot hardware via serial port.
- def get_angles(self) -> list[float] | None:
- return _run(self._inner.get_angles())
+ Args:
+ port_str: Serial port path (e.g., '/dev/ttyUSB0' or 'COM3').
- def get_io(self) -> list[int] | None:
- return _run(self._inner.get_io())
+ Returns:
+ 1 if acknowledged, 0 on failure.
+ """
+ return _run(self._inner.connect_hardware(port_str))
- def get_gripper_status(self) -> list[int] | None:
- return _run(self._inner.get_gripper_status())
+ def reset(self) -> int:
+ """Reset controller state to initial values."""
+ return _run(self._inner.reset())
- def get_speeds(self) -> list[float] | None:
- return _run(self._inner.get_speeds())
+ # ---------- status / queries ----------
+ def ping(self) -> PingResult | None:
+ """Ping the controller to check connectivity.
- def get_pose(self) -> list[float] | None:
- return _run(self._inner.get_pose())
+ Returns:
+ PingResult with hardware_connected status, or None on timeout.
+ """
+ return _run(self._inner.ping())
- def get_gripper(self) -> list[int] | None:
- return _run(self._inner.get_gripper())
+ def angles(self) -> list[float] | None:
+ """Current joint angles in degrees.
- def get_status(self) -> dict | None:
- return _run(self._inner.get_status())
+ Returns:
+ List of 6 joint angles [J1-J6] in degrees, or None on timeout.
+ """
+ return _run(self._inner.angles())
- def get_loop_stats(self) -> dict | None:
- return _run(self._inner.get_loop_stats())
+ def io(self) -> list[int] | None:
+ """Digital I/O status.
- def get_tool(self) -> dict | None:
+ Returns:
+ List of 5 integers [in1, in2, out1, out2, estop], or None on timeout.
"""
- Get the current tool configuration and available tools.
+ return _run(self._inner.io())
+
+ def joint_speeds(self) -> list[float] | None:
+ """Current joint speeds in steps per second.
Returns:
- Dict with keys: 'tool' (current tool name), 'available' (list of available tools)
+ List of 6 joint speeds [J1-J6] in steps/sec, or None on timeout.
"""
- return _run(self._inner.get_tool())
+ return _run(self._inner.joint_speeds())
- def set_tool(self, tool_name: str) -> bool:
- """
- Set the current end-effector tool configuration.
+ def pose(self, frame: str = "WRF") -> list[float] | None:
+ """Current TCP pose as [x, y, z, rx, ry, rz] in mm and degrees.
Args:
- tool_name: Name of the tool ('NONE', 'PNEUMATIC', 'ELECTRIC')
+ frame: Reference frame - "WRF" (world) or "TRF" (tool).
Returns:
- True if successful
+ [x, y, z, rx, ry, rz] in mm and degrees, or None on timeout.
"""
- return _run(self._inner.set_tool(tool_name))
+ return _run(self._inner.pose(frame=frame))
- def get_current_action(self) -> dict | None:
- """
- Get the current executing action/command and its state.
+ def status(self) -> StatusResultStruct | None:
+ """Aggregate status snapshot.
Returns:
- Dict with keys: 'current' (current action name), 'state' (action state),
- 'next' (next action if any)
+ StatusResultStruct with pose, angles, speeds, io, tool_status, or None on timeout.
"""
- return _run(self._inner.get_current_action())
+ return _run(self._inner.status())
+
+ def loop_stats(self) -> LoopStatsResultStruct | None:
+ """Control loop runtime statistics.
- def get_queue(self) -> dict | None:
+ Returns:
+ LoopStatsResultStruct with loop timing metrics, or None on timeout.
"""
- Get the list of queued non-streamable commands.
+ return _run(self._inner.loop_stats())
+
+ def reset_loop_stats(self) -> int:
+ """Reset control-loop min/max metrics and overrun count."""
+ return _run(self._inner.reset_loop_stats())
+
+ def tools(self) -> ToolResult | None:
+ """Current tool and available tools.
Returns:
- Dict with keys: 'non_streamable' (list of queued commands), 'size' (queue size)
+ ToolResult with tool (current) and available (list), or None.
"""
- return _run(self._inner.get_queue())
+ return _run(self._inner.tools())
- # ---------- helper methods ----------
+ def select_tool(self, tool_name: str, variant_key: str = "") -> int:
+ """Set the active end-effector tool on the controller.
- def get_pose_rpy(self) -> list[float] | None:
- return _run(self._inner.get_pose_rpy())
+ Args:
+ tool_name: Name of the tool ('NONE', 'PNEUMATIC', 'SSG-48', 'MSG', 'VACUUM')
+ variant_key: Optional variant within the tool type.
- def get_pose_xyz(self) -> list[float] | None:
- return _run(self._inner.get_pose_xyz())
+ Returns:
+ Command index (>= 0) if queued, 0 on failure.
+ """
+ return _run(self._inner.select_tool(tool_name, variant_key=variant_key))
- def is_estop_pressed(self) -> bool:
- return _run(self._inner.is_estop_pressed())
+ def set_tcp_offset(self, x: float = 0, y: float = 0, z: float = 0) -> int:
+ """Set TCP offset in mm, composed on top of the current tool transform."""
+ return _run(self._inner.set_tcp_offset(x=x, y=y, z=z))
- def is_robot_stopped(self, threshold_speed: float = 2.0) -> bool:
- return _run(self._inner.is_robot_stopped(threshold_speed))
+ def tcp_offset(self) -> list[float]:
+ """Query current TCP offset in mm [x, y, z]."""
+ return _run(self._inner.tcp_offset())
- def wait_until_stopped(
- self,
- timeout: float = 90.0,
- settle_window: float = 1.0,
- speed_threshold: float = 2.0,
- angle_threshold: float = 0.5,
- ) -> bool:
- return _run(
- self._inner.wait_until_stopped(
- timeout=timeout,
- settle_window=settle_window,
- speed_threshold=speed_threshold,
- angle_threshold=angle_threshold,
- )
- )
+ def select_profile(self, profile: str) -> int:
+ """Set the motion profile (e.g. ``"TOPPRA"``).
- # ---------- responsive waits / raw send ----------
+ Args:
+ profile: Motion profile type ('TOPPRA', 'RUCKIG', 'QUINTIC', 'TRAPEZOID', 'LINEAR')
+ Note: RUCKIG is point-to-point only; Cartesian moves will use TOPPRA.
- def wait_for_server_ready(
- self, timeout: float = 5.0, interval: float = 0.05
- ) -> bool:
- """Poll ping() until server responds or timeout."""
- return _run(
- self._inner.wait_for_server_ready(timeout=timeout, interval=interval)
- )
+ Returns:
+ True if successful
+ """
+ return _run(self._inner.select_profile(profile))
- def wait_for_status(
- self, predicate: Callable[[StatusAggregate], bool], timeout: float = 5.0
- ) -> bool:
+ def profile(self) -> str | None:
+ """Current motion profile name.
+
+ Returns:
+ Current motion profile, or None on timeout.
"""
- Wait until a multicast status satisfies predicate(status) within timeout.
- Note: predicate is executed in the client's event loop thread.
+ return _run(self._inner.profile())
+
+ def activity(self) -> ActivityResult | None:
+ """What the robot is currently doing.
+
+ Returns:
+ ActivityResult with state, command, params, and error.
"""
- return _run(self._inner.wait_for_status(predicate, timeout=timeout))
+ return _run(self._inner.activity())
+
+ def queue(self) -> list[str] | None:
+ """Queued command list.
- def send_raw(
- self, message: str, await_reply: bool = False, timeout: float = 2.0
- ) -> bool | str | None:
+ Returns:
+ List of queued command names, or None on timeout.
"""
- Send a raw UDP message; optionally await a single reply and return its text.
- Returns True on fire-and-forget send, str on reply, or None on timeout/error when awaiting.
+ return _run(self._inner.queue())
+
+ def _tool_status(self) -> ToolStatus | None:
+ """Query tool status (internal — use ``rbt.tool.status()``)."""
+ return _run(self._inner._tool_status())
+
+ def reachable(self) -> EnablementResultStruct | None:
+ """Remaining freedom of movement per joint/axis before hitting limits.
+
+ Returns:
+ EnablementResultStruct with joint_en, cart_en_wrf, cart_en_trf.
"""
- return _run(
- self._inner.send_raw(message, await_reply=await_reply, timeout=timeout)
- )
+ return _run(self._inner.reachable())
- # ---------- extended controls / motion ----------
+ def error(self) -> RobotError | None:
+ """Current error state, or None if no error.
- def move_joints(
- self,
- joint_angles: list[float],
- duration: float | None = None,
- speed_percentage: int | None = None,
- accel_percentage: int | None = None,
- profile: str | None = None,
- tracking: str | None = None,
- ) -> bool:
- return _run(
- self._inner.move_joints(
- joint_angles,
- duration,
- speed_percentage,
- accel_percentage,
- profile,
- tracking,
- )
- )
+ Returns:
+ RobotError if an error is active, None otherwise.
+ """
+ return _run(self._inner.error())
- def move_pose(
- self,
- pose: list[float],
- duration: float | None = None,
- speed_percentage: int | None = None,
- accel_percentage: int | None = None,
- profile: str | None = None,
- tracking: str | None = None,
- ) -> bool:
- return _run(
- self._inner.move_pose(
- pose,
- duration,
- speed_percentage,
- accel_percentage,
- profile,
- tracking,
- )
- )
+ def tcp_speed(self) -> float | None:
+ """TCP linear velocity in mm/s.
- def move_cartesian(
- self,
- pose: list[float],
- duration: float | None = None,
- speed_percentage: float | None = None,
- accel_percentage: int | None = None,
- profile: str | None = None,
- tracking: str | None = None,
- ) -> bool:
- return _run(
- self._inner.move_cartesian(
- pose,
- duration,
- speed_percentage,
- accel_percentage,
- profile,
- tracking,
- )
- )
+ Returns:
+ TCP speed as float, or None on timeout.
+ """
+ return _run(self._inner.tcp_speed())
- def move_cartesian_rel_trf(
- self,
- deltas: list[float],
- duration: float | None = None,
- speed_percentage: float | None = None,
- accel_percentage: int | None = None,
- profile: str | None = None,
- tracking: str | None = None,
- ) -> bool:
- return _run(
- self._inner.move_cartesian_rel_trf(
- deltas,
- duration,
- speed_percentage,
- accel_percentage,
- profile,
- tracking,
- )
- )
+ # ---------- helper methods ----------
- def jog_joint(
- self,
- joint_index: int,
- speed_percentage: int,
- duration: float | None = None,
- distance_deg: float | None = None,
- ) -> bool:
- return _run(
- self._inner.jog_joint(
- joint_index,
- speed_percentage,
- duration,
- distance_deg,
- )
- )
+ def is_estop_pressed(self) -> bool:
+ """Check if E-stop is pressed.
- def jog_cartesian(
- self,
- frame: Frame,
- axis: Axis,
- speed_percentage: int,
- duration: float,
- ) -> bool:
- return _run(self._inner.jog_cartesian(frame, axis, speed_percentage, duration))
+ Returns:
+ True if E-stop is pressed, False otherwise.
+ """
+ return _run(self._inner.is_estop_pressed())
- def jog_multiple(
- self,
- joints: list[int],
- speeds: list[float],
- duration: float,
- ) -> bool:
- return _run(self._inner.jog_multiple(joints, speeds, duration))
+ def is_robot_stopped(self, threshold_speed: float = 2.0) -> bool:
+ """Check if robot has stopped moving.
- def set_io(self, index: int, value: int) -> bool:
- """Set digital I/O bit (0..7) to 0 or 1."""
- return _run(self._inner.set_io(index, value))
+ Prefer ``wait_command()`` for waiting on specific commands.
+ This method polls raw joint speeds and is mainly useful for
+ diagnostics or manual stopping logic.
- def delay(self, seconds: float) -> bool:
- """Insert a non-blocking delay in the motion queue."""
- return _run(self._inner.delay(seconds))
+ Args:
+ threshold_speed: Speed threshold in steps/sec.
- # ---------- IO / gripper ----------
+ Returns:
+ True if all joints below threshold.
+ """
+ return _run(self._inner.is_robot_stopped(threshold_speed))
- def control_pneumatic_gripper(
+ def wait_motion(
self,
- action: str,
- port: int,
+ timeout: float = 10.0,
+ settle_window: float = 0.25,
+ speed_threshold: float = 0.01,
+ angle_threshold: float = 0.5,
+ motion_start_timeout: float = 1.0,
) -> bool:
- return _run(self._inner.control_pneumatic_gripper(action, port))
+ """Wait for robot to stop moving.
- def control_electric_gripper(
- self,
- action: str,
- position: int | None = 255,
- speed: int | None = 150,
- current: int | None = 500,
- ) -> bool:
+ Args:
+ timeout: Maximum time to wait in seconds.
+ settle_window: How long robot must be stable.
+ speed_threshold: Max joint speed to be considered stopped (rad/s).
+ angle_threshold: Max angle change to be considered stopped.
+ motion_start_timeout: Max time to wait for motion to start.
+
+ Returns:
+ True if robot stopped, False if timeout.
+ """
return _run(
- self._inner.control_electric_gripper(action, position, speed, current)
+ self._inner.wait_motion(
+ timeout=timeout,
+ settle_window=settle_window,
+ speed_threshold=speed_threshold,
+ angle_threshold=angle_threshold,
+ motion_start_timeout=motion_start_timeout,
+ )
)
- # ---------- GCODE ----------
+ # ---------- responsive waits / raw send ----------
- def execute_gcode(
- self,
- gcode_line: str,
- ) -> bool:
- return _run(self._inner.execute_gcode(gcode_line))
+ def wait_ready(self, timeout: float = 5.0, interval: float = 0.05) -> bool:
+ """Poll ping() until server responds or timeout."""
+ return _run(self._inner.wait_ready(timeout=timeout, interval=interval))
- def execute_gcode_program(
- self,
- program_lines: list[str],
+ def wait_status(
+ self, predicate: Callable[[StatusBuffer], bool], timeout: float = 5.0
) -> bool:
- return _run(self._inner.execute_gcode_program(program_lines))
+ """Wait until a multicast status satisfies predicate(status) within timeout.
- def load_gcode_file(
- self,
- filepath: str,
- ) -> bool:
- return _run(self._inner.load_gcode_file(filepath))
+ Note: predicate is executed in the client's event loop thread.
+ """
+ return _run(self._inner.wait_status(predicate, timeout=timeout))
- def get_gcode_status(self) -> dict | None:
- return _run(self._inner.get_gcode_status())
+ def wait_command(self, command_index: int, timeout: float = 10.0) -> bool:
+ """Wait until a specific command index has been completed.
- def pause_gcode_program(self) -> bool:
- return _run(self._inner.pause_gcode_program())
+ Args:
+ command_index: The command index to wait for (returned by motion commands).
+ timeout: Maximum time to wait in seconds.
- def resume_gcode_program(self) -> bool:
- return _run(self._inner.resume_gcode_program())
+ Returns:
+ True if the command completed within timeout, False otherwise.
- def stop_gcode_program(self) -> bool:
- return _run(self._inner.stop_gcode_program())
+ Raises:
+ MotionError: If the pipeline errored at or before command_index.
+ """
+ return _run(self._inner.wait_command(command_index, timeout=timeout))
- # ---------- smooth motion ----------
+ # ---------- motion ----------
- def smooth_circle(
+ @overload
+ def move_j(
self,
- center: list[float],
- radius: float,
- plane: Literal["XY", "XZ", "YZ"] = "XY",
- frame: Literal["WRF", "TRF"] = "WRF",
- center_mode: Literal["ABSOLUTE", "TOOL", "RELATIVE"] = "ABSOLUTE",
- entry_mode: Literal["AUTO", "TANGENT", "DIRECT", "NONE"] = "NONE",
- start_pose: list[float] | None = None,
- duration: float | None = None,
- speed_percentage: float | None = None,
- clockwise: bool = False,
- trajectory_type: Literal["cubic", "quintic", "s_curve"] = "cubic",
- jerk_limit: float | None = None,
- ) -> bool:
+ angles: list[float],
+ *,
+ duration: float = ...,
+ speed: float = ...,
+ accel: float = ...,
+ r: float = ...,
+ rel: bool = ...,
+ wait: bool = ...,
+ timeout: float = ...,
+ ) -> int: ...
+
+ @overload
+ def move_j(
+ self,
+ angles: list[float] | None = ...,
+ *,
+ pose: list[float],
+ duration: float = ...,
+ speed: float = ...,
+ accel: float = ...,
+ r: float = ...,
+ wait: bool = ...,
+ timeout: float = ...,
+ ) -> int: ...
+
+ def move_j(
+ self,
+ angles: list[float] | None = None,
+ *,
+ pose: list[float] | None = None,
+ duration: float = 0.0,
+ speed: float = 0.0,
+ accel: float = 1.0,
+ r: float = 0.0,
+ rel: bool = False,
+ wait: bool = True,
+ timeout: float = 10.0,
+ ) -> int:
+ if pose is not None:
+ return _run(
+ self._inner.move_j(
+ pose=pose,
+ duration=duration,
+ speed=speed,
+ accel=accel,
+ r=r,
+ wait=wait,
+ timeout=timeout,
+ )
+ )
return _run(
- self._inner.smooth_circle(
- center=center,
- radius=radius,
- plane=plane,
- frame=frame,
- center_mode=center_mode,
- entry_mode=entry_mode,
- start_pose=start_pose,
+ self._inner.move_j(
+ angles or [],
duration=duration,
- speed_percentage=speed_percentage,
- clockwise=clockwise,
- trajectory_type=trajectory_type,
- jerk_limit=jerk_limit,
+ speed=speed,
+ accel=accel,
+ r=r,
+ rel=rel,
+ wait=wait,
+ timeout=timeout,
)
)
- def smooth_arc_center(
+ def move_l(
self,
- end_pose: list[float],
- center: list[float],
- frame: Literal["WRF", "TRF"] = "WRF",
- start_pose: list[float] | None = None,
- duration: float | None = None,
- speed_percentage: float | None = None,
- clockwise: bool = False,
- trajectory_type: Literal["cubic", "quintic", "s_curve"] = "cubic",
- jerk_limit: float | None = None,
- ) -> bool:
+ pose: list[float],
+ *,
+ frame: Frame = "WRF",
+ duration: float = 0.0,
+ speed: float = 0.0,
+ accel: float = 1.0,
+ r: float = 0.0,
+ rel: bool = False,
+ wait: bool = True,
+ timeout: float = 10.0,
+ ) -> int:
return _run(
- self._inner.smooth_arc_center(
- end_pose=end_pose,
- center=center,
+ self._inner.move_l(
+ pose,
frame=frame,
- start_pose=start_pose,
duration=duration,
- speed_percentage=speed_percentage,
- clockwise=clockwise,
- trajectory_type=trajectory_type,
- jerk_limit=jerk_limit,
+ speed=speed,
+ accel=accel,
+ r=r,
+ rel=rel,
+ wait=wait,
+ timeout=timeout,
)
)
- def smooth_arc_param(
+ def move_c(
self,
- end_pose: list[float],
- radius: float,
- arc_angle: float,
- frame: Literal["WRF", "TRF"] = "WRF",
- start_pose: list[float] | None = None,
+ via: list[float],
+ end: list[float],
+ *,
+ frame: Frame = "WRF",
duration: float | None = None,
- speed_percentage: float | None = None,
- trajectory_type: Literal["cubic", "quintic", "s_curve"] = "cubic",
- jerk_limit: float | None = None,
- clockwise: bool = False,
- ) -> bool:
+ speed: float | None = None,
+ accel: float = 1.0,
+ r: float = 0.0,
+ wait: bool = True,
+ timeout: float = 10.0,
+ ) -> int:
return _run(
- self._inner.smooth_arc_param(
- end_pose=end_pose,
- radius=radius,
- arc_angle=arc_angle,
+ self._inner.move_c(
+ via,
+ end,
frame=frame,
- start_pose=start_pose,
duration=duration,
- speed_percentage=speed_percentage,
- trajectory_type=trajectory_type,
- jerk_limit=jerk_limit,
- clockwise=clockwise,
+ speed=speed,
+ accel=accel,
+ r=r,
+ wait=wait,
+ timeout=timeout,
)
)
- def smooth_spline(
+ def move_s(
self,
waypoints: list[list[float]],
- frame: Literal["WRF", "TRF"] = "WRF",
- start_pose: list[float] | None = None,
+ *,
+ frame: Frame = "WRF",
duration: float | None = None,
- speed_percentage: float | None = None,
- trajectory_type: Literal["cubic", "quintic", "s_curve"] = "cubic",
- jerk_limit: float | None = None,
- ) -> bool:
+ speed: float | None = None,
+ accel: float = 1.0,
+ wait: bool = True,
+ timeout: float = 10.0,
+ ) -> int:
return _run(
- self._inner.smooth_spline(
- waypoints=waypoints,
+ self._inner.move_s(
+ waypoints,
frame=frame,
- start_pose=start_pose,
duration=duration,
- speed_percentage=speed_percentage,
- trajectory_type=trajectory_type,
- jerk_limit=jerk_limit,
+ speed=speed,
+ accel=accel,
+ wait=wait,
+ timeout=timeout,
)
)
- def smooth_helix(
+ def move_p(
self,
- center: list[float],
- radius: float,
- pitch: float,
- height: float,
- frame: Literal["WRF", "TRF"] = "WRF",
- trajectory_type: Literal["cubic", "quintic", "s_curve"] = "cubic",
- jerk_limit: float | None = None,
- start_pose: list[float] | None = None,
+ waypoints: list[list[float]],
+ *,
+ frame: Frame = "WRF",
duration: float | None = None,
- speed_percentage: float | None = None,
- clockwise: bool = False,
- ) -> bool:
+ speed: float | None = None,
+ accel: float = 1.0,
+ wait: bool = True,
+ timeout: float = 10.0,
+ ) -> int:
return _run(
- self._inner.smooth_helix(
- center=center,
- radius=radius,
- pitch=pitch,
- height=height,
+ self._inner.move_p(
+ waypoints,
frame=frame,
- trajectory_type=trajectory_type,
- jerk_limit=jerk_limit,
- start_pose=start_pose,
duration=duration,
- speed_percentage=speed_percentage,
- clockwise=clockwise,
+ speed=speed,
+ accel=accel,
+ wait=wait,
+ timeout=timeout,
)
)
- def smooth_blend(
+ @overload
+ def servo_j(
self,
- segments: list[dict],
- blend_time: float = 0.5,
- frame: Literal["WRF", "TRF"] = "WRF",
- start_pose: list[float] | None = None,
- duration: float | None = None,
- speed_percentage: float | None = None,
- ) -> bool:
- return _run(
- self._inner.smooth_blend(
- segments=segments,
- blend_time=blend_time,
- frame=frame,
- start_pose=start_pose,
- duration=duration,
- speed_percentage=speed_percentage,
- )
- )
+ angles: list[float],
+ *,
+ speed: float = ...,
+ accel: float = ...,
+ ) -> int: ...
+
+ @overload
+ def servo_j(
+ self,
+ angles: list[float] | None = ...,
+ *,
+ pose: list[float],
+ speed: float = ...,
+ accel: float = ...,
+ ) -> int: ...
- def smooth_waypoints(
+ def servo_j(
self,
- waypoints: list[list[float]],
- blend_radii: Literal["AUTO"] | list[float] = "AUTO",
- blend_mode: Literal["parabolic", "circular", "none"] = "parabolic",
- via_modes: list[str] | None = None,
- max_velocity: float = 100.0,
- max_acceleration: float = 500.0,
- frame: Literal["WRF", "TRF"] = "WRF",
- trajectory_type: Literal["cubic", "quintic", "s_curve"] = "quintic",
- duration: float | None = None,
- ) -> bool:
- return _run(
- self._inner.smooth_waypoints(
- waypoints=waypoints,
- blend_radii=blend_radii,
- blend_mode=blend_mode,
- via_modes=via_modes,
- max_velocity=max_velocity,
- max_acceleration=max_acceleration,
- frame=frame,
- trajectory_type=trajectory_type,
- duration=duration,
+ angles: list[float] | None = None,
+ *,
+ pose: list[float] | None = None,
+ speed: float = 1.0,
+ accel: float = 1.0,
+ ) -> int:
+ if pose is not None:
+ return _run(
+ self._inner.servo_j(angles or [], pose=pose, speed=speed, accel=accel)
)
- )
+ if angles is None:
+ raise ValueError("servo_j requires angles or pose")
+ return _run(self._inner.servo_j(angles, speed=speed, accel=accel))
- # ---------- work coordinate helpers ----------
+ def servo_l(
+ self,
+ pose: list[float],
+ *,
+ speed: float = 1.0,
+ accel: float = 1.0,
+ ) -> int:
+ return _run(self._inner.servo_l(pose, speed=speed, accel=accel))
+
+ @overload
+ def jog_j(
+ self,
+ joint: int,
+ speed: float,
+ duration: float = ...,
+ *,
+ accel: float = ...,
+ ) -> int: ...
+
+ @overload
+ def jog_j(
+ self,
+ *,
+ joints: list[int],
+ speeds: list[float],
+ duration: float = ...,
+ accel: float = ...,
+ ) -> int: ...
- def set_work_coordinate_offset(
+ def jog_j(
self,
- coordinate_system: str,
- x: float | None = None,
- y: float | None = None,
- z: float | None = None,
- ) -> bool:
- return _run(
- self._inner.set_work_coordinate_offset(
- coordinate_system=coordinate_system,
- x=x,
- y=y,
- z=z,
+ joint: int | None = None,
+ speed: float = 0.0,
+ duration: float = 0.1,
+ *,
+ joints: list[int] | None = None,
+ speeds: list[float] | None = None,
+ accel: float = 1.0,
+ ) -> int:
+ if joints is not None and speeds is not None:
+ return _run(
+ self._inner.jog_j(
+ joints=joints, speeds=speeds, duration=duration, accel=accel
+ )
)
- )
+ if joint is not None:
+ return _run(self._inner.jog_j(joint, speed, duration, accel=accel))
+ raise ValueError("jog_j requires either joint or joints/speeds")
- def zero_work_coordinates(
+ @overload
+ def jog_l(
self,
- coordinate_system: str = "G54",
- ) -> bool:
+ frame: Frame,
+ axis: Axis,
+ speed: float,
+ duration: float = ...,
+ *,
+ accel: float = ...,
+ ) -> int: ...
+
+ @overload
+ def jog_l(
+ self,
+ frame: Frame,
+ *,
+ axes: list[Axis],
+ speeds_list: list[float],
+ duration: float = ...,
+ accel: float = ...,
+ ) -> int: ...
+
+ def jog_l(
+ self,
+ frame: Frame,
+ axis: Axis | None = None,
+ speed: float = 0.0,
+ duration: float = 0.1,
+ *,
+ axes: list[Axis] | None = None,
+ speeds_list: list[float] | None = None,
+ accel: float = 1.0,
+ ) -> int:
+ if axes is not None and speeds_list is not None:
+ return _run(
+ self._inner.jog_l(
+ frame,
+ axes=axes,
+ speeds_list=speeds_list,
+ duration=duration,
+ accel=accel,
+ )
+ )
+ if axis is not None:
+ return _run(self._inner.jog_l(frame, axis, speed, duration, accel=accel))
+ raise ValueError("jog_l requires either axis or axes/speeds_list")
+
+ def checkpoint(self, label: str) -> int:
+ return _run(self._inner.checkpoint(label))
+
+ def wait_checkpoint(self, label: str, timeout: float = 30.0) -> bool:
+ return _run(self._inner.wait_checkpoint(label, timeout=timeout))
+
+ def write_io(self, index: int, value: int) -> int:
+ """Set digital output by logical index (0 = first output pin)."""
+ return _run(self._inner.write_io(index, value))
+
+ def delay(self, seconds: float) -> int:
+ """Insert a non-blocking delay in the motion queue."""
+ return _run(self._inner.delay(seconds))
+
+ # ---------- IO / tool ----------
+
+ def tool_action(
+ self,
+ tool_key: str,
+ action: str,
+ params: list | None = None,
+ *,
+ wait: bool = True,
+ timeout: float = 10.0,
+ ) -> int:
return _run(
- self._inner.zero_work_coordinates(
- coordinate_system=coordinate_system,
+ self._inner.tool_action(
+ tool_key, action, params, wait=wait, timeout=timeout
)
)
diff --git a/parol6/commands/__init__.py b/parol6/commands/__init__.py
index 0910028..89fc340 100644
--- a/parol6/commands/__init__.py
+++ b/parol6/commands/__init__.py
@@ -4,15 +4,9 @@
# Re-export IK helpers for convenience
from parol6.utils.ik import (
- AXIS_MAP,
- quintic_scaling,
solve_ik,
- unwrap_angles,
)
__all__ = [
- "unwrap_angles",
"solve_ik",
- "quintic_scaling",
- "AXIS_MAP",
]
diff --git a/parol6/commands/base.py b/parol6/commands/base.py
index 2ba229c..25b89e8 100644
--- a/parol6/commands/base.py
+++ b/parol6/commands/base.py
@@ -2,22 +2,19 @@
Base abstractions and helpers for command implementations.
"""
-import json
import logging
import time
from abc import ABC, abstractmethod
-from dataclasses import dataclass
-from enum import Enum
-from typing import Any, ClassVar, cast
+from enum import Enum, auto
+from typing import Any, ClassVar, Generic, TypeVar
import numpy as np
-import roboticstoolbox as rp
-import parol6.PAROL6_ROBOT as PAROL6_ROBOT
-from parol6.config import INTERVAL_S, TRACE
-from parol6.protocol.wire import CommandCode
+from parol6.config import TRACE
+from parol6.protocol.wire import CmdType, Command, CommandCode, QueryType
from parol6.server.state import ControllerState
-from parol6.utils.ik import AXIS_MAP
+from parol6.utils.error_catalog import RobotError, extract_robot_error
+from parol6.utils.error_codes import ErrorCode
logger = logging.getLogger(__name__)
@@ -25,143 +22,14 @@
class ExecutionStatusCode(Enum):
"""Enumeration for command execution status codes."""
- QUEUED = "QUEUED"
- EXECUTING = "EXECUTING"
- COMPLETED = "COMPLETED"
- FAILED = "FAILED"
- CANCELLED = "CANCELLED"
+ QUEUED = auto()
+ EXECUTING = auto()
+ COMPLETED = auto()
+ FAILED = auto()
+ CANCELLED = auto()
-@dataclass
-class ExecutionStatus:
- """
- Status returned from command execution steps.
- """
-
- code: ExecutionStatusCode
- message: str
- error: Exception | None = None
- details: dict[str, Any] | None = None
- error_type: str | None = None
-
- @classmethod
- def executing(
- cls, message: str = "Executing", details: dict[str, Any] | None = None
- ) -> "ExecutionStatus":
- return cls(ExecutionStatusCode.EXECUTING, message, error=None, details=details)
-
- @classmethod
- def completed(
- cls, message: str = "Completed", details: dict[str, Any] | None = None
- ) -> "ExecutionStatus":
- return cls(ExecutionStatusCode.COMPLETED, message, error=None, details=details)
-
- @classmethod
- def failed(
- cls,
- message: str,
- error: Exception | None = None,
- details: dict[str, Any] | None = None,
- ) -> "ExecutionStatus":
- et = type(error).__name__ if error is not None else None
- return cls(
- ExecutionStatusCode.FAILED,
- message,
- error=error,
- details=details,
- error_type=et,
- )
-
-
-# ----- Shared context and small utilities -----
-
-
-@dataclass
-class CommandContext:
- """Shared dynamic execution context for commands."""
-
- udp_transport: Any = None
- addr: tuple | None = None
- gcode_interpreter: Any = None
- dt: float = INTERVAL_S
-
-
-# Parsing utilities (lightweight, shared)
-def _noneify(token: Any) -> str | None:
- if token is None:
- return None
- t = str(token).strip()
- return None if t == "" or t.upper() in ("NONE", "NULL") else t
-
-
-def parse_int(token: Any) -> int | None:
- t = _noneify(token)
- return None if t is None else int(t)
-
-
-def parse_float(token: Any) -> float | None:
- t = _noneify(token)
- return None if t is None else float(t)
-
-
-def csv_ints(token: Any) -> list[int]:
- t = _noneify(token)
- return [] if t is None else [int(x) for x in t.split(",") if x != ""]
-
-
-def csv_floats(token: Any) -> list[float]:
- t = _noneify(token)
- return [] if t is None else [float(x) for x in t.split(",") if x != ""]
-
-
-def parse_bool(token: Any) -> bool:
- t = (str(token or "")).strip().lower()
- return t in ("1", "true", "yes", "on")
-
-
-def typed(token: Any, type_: type[Any] = float) -> Any | None:
- """Parse token with type, supporting None/Null/empty as None."""
- t = _noneify(token)
- if t is None:
- return None
- if type_ is bool:
- return parse_bool(t)
- return type_(t)
-
-
-def expect_len(parts: list[str], n: int, cmd: str) -> None:
- """Ensure parts list has exactly n elements."""
- if len(parts) != n:
- raise ValueError(f"{cmd} requires {n - 1} parameters, got {len(parts) - 1}")
-
-
-def at_least_len(parts: list[str], n: int, cmd: str) -> None:
- """Ensure parts list has at least n elements."""
- if len(parts) < n:
- raise ValueError(
- f"{cmd} requires at least {n - 1} parameters, got {len(parts) - 1}"
- )
-
-
-def parse_frame(token: Any) -> str:
- """Parse and validate frame token (WRF/TRF)."""
- t = (str(token or "")).strip().upper()
- if t not in ("WRF", "TRF"):
- raise ValueError(f"Invalid frame: {token}")
- return t
-
-
-def parse_axis(token: Any) -> str:
- """Parse and validate axis token against AXIS_MAP."""
- t = (str(token or "")).strip().upper()
- # Convert to match AXIS_MAP format (e.g., +X -> X+, -Y -> Y-)
- if len(t) == 2 and t[0] in "+-" and t[1] in "XYZ":
- t = t[1] + t[0] # Swap sign and axis
- elif len(t) == 3 and t[0] == "R" and t[2] in "+-":
- t = "R" + t[1] + t[2] # Keep RX+ format
- if t not in AXIS_MAP:
- raise ValueError(f"Invalid axis: {token}")
- return t
+# ----- Small utilities -----
class Countdown:
@@ -201,45 +69,53 @@ def tick(self, active: bool) -> bool:
return False
-class CommandBase(ABC):
+P = TypeVar("P")
+
+
+class CommandBase(ABC, Generic[P]):
"""
Reusable base for commands with shared lifecycle and safety helpers.
+
+ Commands use typed msgspec structs for parameters. The PARAMS_TYPE class
+ variable indicates which struct type this command expects.
"""
# Set by @register_command decorator; used by controller stream fast-path
- _registered_name: ClassVar[str] = ""
+ _cmd_type: ClassVar[CmdType | None] = None
+
+ # The params struct type this command expects (override in subclass)
+ PARAMS_TYPE: ClassVar[type[Command] | None] = None
__slots__ = (
- "is_valid",
+ "p",
"is_finished",
"error_state",
- "error_message",
- "udp_transport",
- "addr",
- "gcode_interpreter",
+ "robot_error",
"_t0",
"_t_end",
+ "_q_rad_buf",
+ "_steps_buf",
)
- def __init__(self) -> None:
- self.is_valid: bool = True
+ def __init__(self, p: P) -> None:
+ self.p = p
self.is_finished: bool = False
self.error_state: bool = False
- self.error_message: str = ""
- self.udp_transport: Any = None
- self.addr: Any = None
- self.gcode_interpreter: Any = None
+ self.robot_error: RobotError | None = None
self._t0: float | None = None
self._t_end: float | None = None
+ # Pre-allocated buffers for zero-allocation unit conversions
+ self._q_rad_buf: np.ndarray = np.zeros(6, dtype=np.float64)
+ self._steps_buf: np.ndarray = np.zeros(6, dtype=np.int32)
- # Ensure command objects are usable as dict keys (e.g., in server command_id_map)
+ # Ensure command objects are usable as dict keys (identity-based)
def __hash__(self) -> int:
# Identity-based hash is appropriate for ephemeral command instances
return id(self)
@property
def name(self) -> str:
- return self._registered_name or type(self).__name__
+ return self._cmd_type.name if self._cmd_type else type(self).__name__
# Logging helpers (uniform, include command identity)
def log_trace(self, msg: str, *args: Any) -> None:
@@ -262,39 +138,14 @@ def stop_and_idle(state: ControllerState) -> None:
state.Speed_out.fill(0)
state.Command_out = CommandCode.IDLE
- def bind(self, context: CommandContext) -> None:
- """
- Bind dynamic execution context. Controller should call this prior to setup().
- """
- self.udp_transport = context.udp_transport
- self.addr = context.addr
- self.gcode_interpreter = context.gcode_interpreter
-
- @abstractmethod
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """
- Check if this command can handle the given message parts.
-
- Args:
- parts: Pre-split message parts (e.g., ['JOG', '0', '50', '2.0', 'None'])
-
- Returns:
- Tuple of (can_handle, error_message)
- - can_handle: True if this command can process the message
- - error_message: Optional error message if the message is invalid
+ def assign_params(self, params: Command) -> None:
"""
- raise NotImplementedError
+ Assign pre-validated params struct.
- def match(self, parts: list[str]) -> tuple[bool, str | None]:
+ Called AFTER msgspec has decoded and validated the struct
+ (via constraints and __post_init__). No validation here.
"""
- Wrapper that guards subclass do_match() to avoid propagating exceptions.
- Centralizes try/except so subclasses don't repeat it.
- """
- try:
- return self.do_match(parts)
- except Exception as e:
- # Do not log here to avoid duplicate noise; registry/controller provide lifecycle TRACE.
- return False, str(e)
+ self.p = params
def do_setup(self, state: ControllerState) -> None:
"""Subclass hook for preparation; override in subclasses."""
@@ -308,24 +159,43 @@ def setup(self, state: ControllerState) -> None:
self.log_trace("setup ok")
except Exception as e:
# Mark invalid and propagate for higher-level lifecycle logging
- self.fail(f"Setup error: {e}")
+ self.fail(
+ extract_robot_error(e, ErrorCode.MOTN_SETUP_FAILED, detail=str(e))
+ )
self.log_error("Setup error: %s", e)
raise
- @abstractmethod
- def tick(self, state: ControllerState) -> ExecutionStatus:
- """
- Template-method wrapper that centralizes lifecycle/error handling and calls do_execute().
- Controllers should prefer tick() over calling execute_step() directly.
- """
- raise NotImplementedError
+ def tick(self, state: ControllerState) -> ExecutionStatusCode:
+ """Template method: guards + execute_step + error handling."""
+ if self.is_finished:
+ return (
+ ExecutionStatusCode.FAILED
+ if self.error_state
+ else ExecutionStatusCode.COMPLETED
+ )
+ try:
+ return self.execute_step(state)
+ except Exception as e:
+ self._on_tick_error(state, e)
+ return ExecutionStatusCode.FAILED
+
+ def _on_tick_error(self, state: ControllerState, error: Exception) -> None:
+ """Error-path cleanup. Override in subclasses for specialized behavior."""
+ self.fail(
+ extract_robot_error(error, ErrorCode.MOTN_TICK_FAILED, detail=str(error))
+ )
+ self.log_error(str(error))
@abstractmethod
- def execute_step(self, state: "ControllerState") -> ExecutionStatus:
+ def execute_step(self, state: "ControllerState") -> ExecutionStatusCode:
"""
- Execute one control-loop step and return an ExecutionStatus.
+ Execute one control-loop step.
- Commands MUST interact with state.* arrays/buffers directly (Position_in/out, Speed_out, Command_out, etc.).
+ Returns ExecutionStatusCode.EXECUTING while in progress,
+ COMPLETED when done, or FAILED on error.
+
+ Commands MUST interact with state.* arrays/buffers directly
+ (Position_in/out, Speed_out, Command_out, etc.).
"""
raise NotImplementedError
@@ -335,11 +205,10 @@ def finish(self) -> None:
"""Mark command as finished."""
self.is_finished = True
- def fail(self, message: str) -> None:
- """Mark command as invalid/failed with an error message."""
- self.is_valid = False
+ def fail(self, error: RobotError) -> None:
+ """Mark command as failed with a structured error."""
self.error_state = True
- self.error_message = message
+ self.robot_error = error
self.is_finished = True
# ---- timing helpers ----
@@ -361,63 +230,29 @@ def progress01(self, duration_s: float) -> float:
return 0.0 if p < 0.0 else (1.0 if p > 1.0 else p)
-class QueryCommand(CommandBase):
+class QueryCommand(CommandBase[P]):
"""
Base class for query commands that execute immediately and bypass the queue.
- Query commands are read-only operations that return information about the robot state.
- They execute immediately without waiting in the command queue.
+ Query commands compute a result, pack it as a wire response, and return
+ the bytes. The controller calls compute() and sends the result directly.
+ Subclasses set QUERY_TYPE and implement compute().
"""
- def reply_text(self, message: str) -> None:
- """Send an opaque ASCII message via UDP."""
- if self.udp_transport and self.addr:
- try:
- self.udp_transport.send_response(message, self.addr)
- except Exception as e:
- self.log_warning("Failed to send UDP reply: %s", e)
-
- def reply_ascii(self, prefix_or_message: str, payload: str | None = None) -> None:
- """
- Reply as 'PREFIX|payload' if payload provided; otherwise send prefix_or_message verbatim.
- """
- if payload is None:
- self.reply_text(prefix_or_message)
- else:
- self.reply_text(f"{prefix_or_message}|{payload}")
+ QUERY_TYPE: ClassVar[QueryType]
- def reply_json(self, prefix: str, obj: Any) -> None:
- """Reply with JSON payload."""
- try:
- s = json.dumps(obj)
- except Exception:
- s = "{}"
- self.reply_ascii(prefix, s)
+ @abstractmethod
+ def compute(self, state: ControllerState) -> bytes:
+ """Compute the query result, pack it, and return response bytes."""
+ ...
- def tick(self, state: ControllerState) -> ExecutionStatus:
- """
- Template-method wrapper that centralizes lifecycle/error handling and calls do_execute().
- Controllers should prefer tick() over calling execute_step() directly.
- """
- if self.is_finished or not self.is_valid:
- return (
- ExecutionStatus.completed("Already finished")
- if self.is_finished
- else ExecutionStatus.failed("Invalid command")
- )
- if not self.udp_transport or not self.addr:
- self.fail("Missing UDP transport or address")
- return ExecutionStatus.failed("Missing UDP transport or address")
- try:
- status = self.execute_step(state)
- except Exception as e:
- # Hard failure safeguards
- self.fail(str(e))
- return ExecutionStatus.failed("Execution error", error=e)
- return status
+ def execute_step(self, state: ControllerState) -> ExecutionStatusCode:
+ # Queries are dispatched via compute() by the controller.
+ # This exists only to satisfy the abstract method.
+ raise NotImplementedError("Queries use compute(), not execute_step()")
-class MotionCommand(CommandBase):
+class MotionCommand(CommandBase[P]):
"""
Base class for motion commands that require the controller to be enabled.
@@ -425,222 +260,97 @@ class MotionCommand(CommandBase):
Some motion commands (like jog commands) can be replaced in stream mode.
"""
- streamable: bool = False # Can be replaced in stream mode (only for jog commands)
-
- # Limits and kinematic constants
- LIMS_STEPS: ClassVar[np.ndarray] = PAROL6_ROBOT.joint.limits.steps
- J_MIN: ClassVar[np.ndarray] = PAROL6_ROBOT.joint.speed.min
- J_MAX: ClassVar[np.ndarray] = PAROL6_ROBOT.joint.speed.max
- JOG_MIN: ClassVar[np.ndarray] = PAROL6_ROBOT.joint.speed.jog.min
- JOG_MAX: ClassVar[np.ndarray] = PAROL6_ROBOT.joint.speed.jog.max
- ACC_MIN_RAD: ClassVar[float] = PAROL6_ROBOT.joint.acc.min_rad
- ACC_MAX_RAD: ClassVar[float] = PAROL6_ROBOT.joint.acc.max_rad
- CART_LIN_JOG_MIN: ClassVar[float] = PAROL6_ROBOT.cart.vel.jog.min
- CART_LIN_JOG_MAX: ClassVar[float] = PAROL6_ROBOT.cart.vel.jog.max
- CART_ANG_JOG_MIN: ClassVar[float] = PAROL6_ROBOT.cart.vel.angular.min # deg/s
- CART_ANG_JOG_MAX: ClassVar[float] = PAROL6_ROBOT.cart.vel.angular.max # deg/s
-
- def __init__(self) -> None:
- super().__init__()
-
- # ---- mapping ----
- @staticmethod
- def linmap_pct(pct: float, lo: float, hi: float) -> float:
- if pct < 0.0:
- pct = 0.0
- elif pct > 100.0:
- pct = 100.0
- return lo + (hi - lo) * (pct / 100.0)
-
- # ---- per-joint max speed/acc ----
- def joint_vmax(self, velocity_percent: float) -> np.ndarray:
- return self.J_MIN + (self.J_MAX - self.J_MIN) * (
- max(0.0, min(100.0, velocity_percent)) / 100.0
- )
-
- def joint_amax_steps(self, accel_percent: float) -> np.ndarray:
- a_rad = self.linmap_pct(accel_percent, self.ACC_MIN_RAD, self.ACC_MAX_RAD)
- return np.asarray(
- PAROL6_ROBOT.ops.speed_rad_to_steps(np.full(6, a_rad)), dtype=float
- )
-
- # ---- speed scaling & limits ----
- def scale_speeds_to_joint_max(self, speeds: np.ndarray) -> np.ndarray:
- denom = np.where(self.J_MAX != 0.0, self.J_MAX, 1.0)
- scale = float(np.max(np.abs(speeds) / denom))
- if scale > 1.0:
- return np.rint(speeds / scale).astype(np.int32)
- else:
- return np.asarray(speeds, dtype=np.int32)
-
- def limit_hit_mask(self, pos_steps: np.ndarray, speeds: np.ndarray) -> np.ndarray:
- return ((speeds > 0) & (pos_steps >= self.LIMS_STEPS[:, 1])) | (
- (speeds < 0) & (pos_steps <= self.LIMS_STEPS[:, 0])
- )
+ streamable: bool = False
- # ---- trapezoid batch planner for step-space ----
- @staticmethod
- def plan_trapezoids(
- start_steps: np.ndarray, target_steps: np.ndarray, tgrid: np.ndarray
- ) -> tuple[np.ndarray, np.ndarray]:
- n = int(tgrid.size)
- q = np.empty((n, 6), dtype=float)
- qd = np.empty((n, 6), dtype=float)
- stationary = target_steps == start_steps
- if np.any(stationary):
- q[:, stationary] = start_steps[stationary]
- qd[:, stationary] = 0.0
- for i in np.flatnonzero(~stationary):
- jt = rp.trapezoidal(float(start_steps[i]), float(target_steps[i]), tgrid)
- q[:, i] = jt.q
- qd[:, i] = jt.qd
- return q, qd
-
- def fail_and_idle(self, state: ControllerState, message: str) -> None:
- self.fail(message)
+ def fail_and_idle(self, state: ControllerState, error: RobotError) -> None:
+ self.fail(error)
self.stop_and_idle(state)
- # ---- Higher-level IO helpers for common patterns ----
def set_move_position(self, state: ControllerState, steps: np.ndarray) -> None:
"""Set position for MOVE command (zero speeds, Command=MOVE)."""
- np.copyto(state.Position_out, steps, casting="no")
+ state.Position_out[:] = steps
state.Speed_out.fill(0)
state.Command_out = CommandCode.MOVE
- def tick(self, state: ControllerState) -> ExecutionStatus:
- """
- Template-method wrapper that centralizes lifecycle/error handling and calls do_execute().
- Controllers should prefer tick() over calling execute_step() directly.
- """
- if self.is_finished or not self.is_valid:
- return (
- ExecutionStatus.completed("Already finished")
- if self.is_finished
- else ExecutionStatus.failed("Invalid command")
- )
- try:
- status = self.execute_step(state)
- except Exception as e:
- # Hard failure safeguards
- self.fail_and_idle(state, str(e))
- self.log_error(str(e))
- return ExecutionStatus.failed("Execution error", error=e)
- return status
+ def _on_tick_error(self, state: ControllerState, error: Exception) -> None:
+ """Zero speeds and set IDLE on error."""
+ self.fail_and_idle(
+ state,
+ extract_robot_error(error, ErrorCode.MOTN_TICK_FAILED, detail=str(error)),
+ )
+ self.log_error(str(error))
-# TODO: need to get and support the other motion profiles from the original program
-class MotionProfile:
- """
- Utilities to build motion profiles in step-space using consistent trapezoids.
+class TrajectoryMoveCommandBase(MotionCommand[P]):
"""
+ Base class for commands that execute pre-computed trajectories.
- @staticmethod
- def from_duration_steps(
- start_steps: np.ndarray,
- target_steps: np.ndarray,
- duration_s: float,
- dt: float = INTERVAL_S,
- ) -> np.ndarray:
- """
- Build per-joint trapezoids to reach target in given duration.
- Returns array of shape (N, 6) steps (int32).
- """
- dur = float(max(0.0, duration_s))
- if dur == 0.0:
- # Degenerate: single step
- return np.asarray(target_steps, dtype=np.int32).reshape(1, -1)
- n = max(2, int(np.ceil(dur / max(1e-9, dt))))
- tgrid = np.linspace(0.0, dur, n, dtype=float)
- q, _qd = MotionCommand.plan_trapezoids(
- np.asarray(start_steps, dtype=float),
- np.asarray(target_steps, dtype=float),
- tgrid,
- )
- return cast(np.ndarray, q.astype(np.int32, copy=False))
+ Subclasses pre-compute trajectory_steps in do_setup(). Velocity/acceleration
+ limits are enforced during trajectory building via local segment slowdown,
+ so execute_step() simply outputs waypoints tick-by-tick.
+ """
- @staticmethod
- def from_velocity_percent(
- start_steps: np.ndarray,
- target_steps: np.ndarray,
- velocity_percent: float,
- accel_percent: float,
- dt: float = INTERVAL_S,
- ) -> np.ndarray:
- """
- Build per-joint trapezoids sized by per-joint vmax and accel derived from percent settings.
- """
- start_steps = np.asarray(start_steps, dtype=float)
- target_steps = np.asarray(target_steps, dtype=float)
-
- # Per-joint vmax and amax (steps/s and steps/s^2)
- jmin = MotionCommand.J_MIN
- jmax = MotionCommand.J_MAX
- v_max_joint = jmin + (jmax - jmin) * (
- max(0.0, min(100.0, velocity_percent)) / 100.0
- )
+ __slots__ = ("trajectory_steps", "command_step", "_duration")
- # Compute accel steps without instantiating MotionCommand
- a_rad = MotionCommand.linmap_pct(
- accel_percent, MotionCommand.ACC_MIN_RAD, MotionCommand.ACC_MAX_RAD
- )
- a_steps_vec = np.asarray(
- PAROL6_ROBOT.ops.speed_rad_to_steps(np.full(6, a_rad)), dtype=float
- )
+ def __init__(self, p: P):
+ super().__init__(p)
+ self.trajectory_steps: np.ndarray = np.empty((0, 6), dtype=np.int32)
+ self.command_step = 0
+ self._duration: float = 0.0
- if np.any(v_max_joint <= 0) or np.any(a_steps_vec <= 0):
- raise ValueError("Invalid speed/acceleration (must be positive).")
+ @property
+ def blend_radius(self) -> float:
+ """Blend radius in mm. Default 0 (stop at target). Read from params.r if present."""
+ return float(getattr(self.p, "r", 0.0))
- path = np.abs(target_steps - start_steps)
- t_accel = v_max_joint / a_steps_vec # time to reach vmax per joint
- short_path = path < (v_max_joint * t_accel)
+ def do_setup_with_blend(
+ self,
+ state: ControllerState,
+ next_cmds: "list[TrajectoryMoveCommandBase]",
+ ) -> int:
+ """Set up trajectory with blend through N next commands.
- # Safe accel time for short paths
- t_accel_adj = t_accel.copy()
- mask = short_path
- t_accel_adj[mask] = 0.0
- mask_safe = mask & (a_steps_vec > 0)
- t_accel_adj[mask_safe] = np.sqrt(path[mask_safe] / a_steps_vec[mask_safe])
+ Subclasses that support blending (MoveLCommand, JointMoveCommandBase)
+ override this method. The default falls back to single-command setup.
- # Per-joint total time, then horizon
- joint_time = np.where(
- short_path, 2.0 * t_accel_adj, path / v_max_joint + t_accel
- )
- total_time = float(np.max(joint_time))
- if total_time <= 0.0:
- return cast(
- np.ndarray, np.asarray(start_steps, dtype=np.int32).reshape(1, -1)
- )
- if total_time < (2 * dt):
- total_time = 2 * dt
+ Returns:
+ Number of *next_cmds* consumed (0 = no blending).
+ """
+ self.do_setup(state)
+ return 0
+
+ def execute_step(self, state: ControllerState) -> ExecutionStatusCode:
+ """Execute trajectory by outputting pre-computed waypoints."""
+ if self.command_step >= len(self.trajectory_steps):
+ self.log_info("%s finished.", self.__class__.__name__)
+ self.finish()
+ self.stop_and_idle(state)
+ return ExecutionStatusCode.COMPLETED
+
+ target = self.trajectory_steps[self.command_step]
+ state.Position_out[:] = target
+ state.Command_out = CommandCode.MOVE
+ self.command_step += 1
- n = max(2, int(np.ceil(total_time / max(dt, 1e-9))))
- tgrid = np.linspace(0.0, total_time, n, dtype=float)
- q, _qd = MotionCommand.plan_trapezoids(start_steps, target_steps, tgrid)
- return cast(np.ndarray, q.astype(np.int32, copy=False))
+ return ExecutionStatusCode.EXECUTING
-class SystemCommand(CommandBase):
+class SystemCommand(CommandBase[P]):
"""
Base class for system control commands that can execute regardless of enable state.
System commands control the overall state of the robot controller (enable/disable, stop, etc.)
and can execute even when the controller is disabled.
+
+ Side-effect signaling: commands that need infrastructure changes (simulator toggle,
+ port switch, mock sync) set the corresponding attribute. The controller reads these
+ after tick() and orchestrates the actual change.
"""
- def tick(self, state: "ControllerState") -> ExecutionStatus:
- """
- Centralized lifecycle/error handling for system commands.
- """
- if self.is_finished or not self.is_valid:
- return (
- ExecutionStatus.completed("Already finished")
- if self.is_finished
- else ExecutionStatus.failed("Invalid command")
- )
- try:
- status = self.execute_step(state)
- except Exception as e:
- self.fail(str(e))
- self.log_error(str(e))
- return ExecutionStatus.failed("Execution error", error=e)
- return status
+ __slots__ = ("_switch_simulator", "_switch_port", "_sync_mock")
+
+ def __init__(self, p: P) -> None:
+ super().__init__(p)
+ self._switch_simulator: bool | None = None
+ self._switch_port: str | None = None
+ self._sync_mock: bool = False
diff --git a/parol6/commands/basic_commands.py b/parol6/commands/basic_commands.py
index c2e7d0e..59d0f52 100644
--- a/parol6/commands/basic_commands.py
+++ b/parol6/commands/basic_commands.py
@@ -1,509 +1,266 @@
"""
Basic Robot Commands
-Contains fundamental movement commands: Home, Jog, and MultiJog
+Contains fundamental movement commands: Home, Jog, and SelectTool.
"""
import logging
-from math import ceil
-
+from enum import Enum, auto
import numpy as np
-import parol6.PAROL6_ROBOT as PAROL6_ROBOT
-from parol6.config import INTERVAL_S
+from parol6.config import (
+ JOG_MIN_STEPS,
+ LIMITS,
+ rad_to_steps,
+ speed_steps_to_rad_scalar,
+ steps_to_rad,
+)
+from parol6.protocol.wire import (
+ CmdType,
+ HomeCmd,
+ JogJCmd,
+ SelectToolCmd,
+ TeleportCmd,
+)
from parol6.protocol.wire import CommandCode
from parol6.server.command_registry import register_command
from parol6.server.state import ControllerState
-from parol6.tools import TOOL_CONFIGS, list_tools
+from parol6.utils.error_catalog import make_error
+from parol6.utils.error_codes import ErrorCode
+from parol6.config import deg_to_steps
+from parol6.server.transports.transport_factory import is_simulation_mode
from .base import (
- ExecutionStatus,
+ ExecutionStatusCode,
MotionCommand,
- csv_floats,
- csv_ints,
- parse_float,
- parse_int,
)
logger = logging.getLogger(__name__)
-@register_command("HOME")
-class HomeCommand(MotionCommand):
+def _limit_hit_mask(pos_steps: np.ndarray, speeds: np.ndarray) -> np.ndarray:
+ return ((speeds > 0) & (pos_steps >= LIMITS.joint.position.steps[:, 1])) | (
+ (speeds < 0) & (pos_steps <= LIMITS.joint.position.steps[:, 0])
+ )
+
+
+class HomeState(Enum):
+ """State machine states for the homing sequence."""
+
+ START = auto()
+ WAITING_FOR_UNHOMED = auto()
+ WAITING_FOR_HOMED = auto()
+
+
+@register_command(CmdType.HOME)
+class HomeCommand(MotionCommand[HomeCmd]):
"""
A non-blocking command that tells the robot to perform its internal homing sequence.
This version uses a state machine to allow re-homing even if the robot is already homed.
"""
+ PARAMS_TYPE = HomeCmd
+
__slots__ = (
"state",
"start_cmd_counter",
"timeout_counter",
)
- def __init__(self):
- super().__init__()
- # State machine: START -> WAIT_FOR_UNHOMED -> WAIT_FOR_HOMED -> FINISHED
- self.state = "START"
- # Counter to send the home command for multiple cycles
- self.start_cmd_counter = 10 # Send command 100 for 10 cycles (0.1s)
- # Safety timeout (20 seconds at 0.01s interval)
- self.timeout_counter = 2000
-
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """
- Parse HOME command (no parameters).
-
- Format: HOME
- """
- if len(parts) != 1:
- return (False, "HOME command takes no parameters")
- self.log_trace("Parsed HOME command")
- return (True, None)
-
- def execute_step(self, state: "ControllerState") -> ExecutionStatus:
- """
- Manages the homing command and monitors for completion using a state machine.
- """
- # --- State: START ---
- # On the first few executions, continuously send the 'home' (100) command.
- if self.state == "START":
+ def __init__(self, p: HomeCmd):
+ super().__init__(p)
+ self.state = HomeState.START
+ self.start_cmd_counter = 10
+ self.timeout_counter = 4500
+
+ def execute_step(self, state: "ControllerState") -> ExecutionStatusCode:
+ """Manages the homing command and monitors for completion using a state machine."""
+ if self.state == HomeState.START:
logger.debug(
- f" -> Sending home signal (100)... Countdown: {self.start_cmd_counter}"
+ " -> Sending home signal (100)... Countdown: %d",
+ self.start_cmd_counter,
)
state.Command_out = CommandCode.HOME
self.start_cmd_counter -= 1
if self.start_cmd_counter <= 0:
- # Once sent for enough cycles, move to the next state
- self.state = "WAITING_FOR_UNHOMED"
- return ExecutionStatus.executing("Homing: start")
-
- # --- State: WAITING_FOR_UNHOMED ---
- # The robot's firmware should reset the homed status. We wait to see that happen.
- # During this time, we send 'idle' (255) to let the robot's controller take over.
- if self.state == "WAITING_FOR_UNHOMED":
+ self.state = HomeState.WAITING_FOR_UNHOMED
+ return ExecutionStatusCode.EXECUTING
+
+ if self.state == HomeState.WAITING_FOR_UNHOMED:
state.Command_out = CommandCode.IDLE
- # Homing sequence initiated detection
if np.any(state.Homed_in[:6] == 0):
logger.info(" -> Homing sequence initiated by robot.")
- self.state = "WAITING_FOR_HOMED"
- # Homing timeout protection
+ self.state = HomeState.WAITING_FOR_HOMED
self.timeout_counter -= 1
if self.timeout_counter <= 0:
- raise TimeoutError(
- "Timeout waiting for robot to start homing sequence."
- )
- return ExecutionStatus.executing("Homing: waiting for unhomed")
+ self.fail(make_error(ErrorCode.MOTN_HOME_TIMEOUT))
+ self.stop_and_idle(state)
+ return ExecutionStatusCode.FAILED
+ return ExecutionStatusCode.EXECUTING
- # --- State: WAITING_FOR_HOMED ---
- # Now we wait for all joints to report that they are homed (all flags are 1).
- if self.state == "WAITING_FOR_HOMED":
+ if self.state == HomeState.WAITING_FOR_HOMED:
state.Command_out = CommandCode.IDLE
- # Homing completion verification
if np.all(state.Homed_in[:6] == 1):
self.log_info("Homing sequence complete. All joints reported home.")
- self.is_finished = True
+ self.finish()
self.stop_and_idle(state)
- return ExecutionStatus.completed("Homing complete")
-
- return ExecutionStatus.executing("Homing: waiting for homed")
+ return ExecutionStatusCode.COMPLETED
+ self.timeout_counter -= 1
+ if self.timeout_counter <= 0:
+ self.fail(make_error(ErrorCode.MOTN_HOME_TIMEOUT))
+ self.stop_and_idle(state)
+ return ExecutionStatusCode.FAILED
- return ExecutionStatus.executing("Homing")
+ return ExecutionStatusCode.EXECUTING
-@register_command("JOG")
-class JogCommand(MotionCommand):
+@register_command(CmdType.JOGJ)
+class JogJCommand(MotionCommand[JogJCmd]):
"""
- A non-blocking command to jog a joint for a specific duration or distance.
- It performs all safety and validity checks upon initialization.
+ A non-blocking command to jog joints for a specific duration.
+ Uses static 6-element speed array on the wire (all joints, zeros for inactive).
"""
- streamable = True # Can be replaced in stream mode
+ PARAMS_TYPE = JogJCmd
+ streamable = True
__slots__ = (
- "mode",
- "command_step",
- "joint",
- "speed_percentage",
- "duration",
- "distance_deg",
- "direction",
- "joint_index",
- "speed_out",
- "command_len",
- "target_position",
+ "speeds_out",
+ "_jog_initialized",
+ "_jog_vel_rad",
)
- def __init__(self):
- """
- Initializes the jog command.
- Parameters are parsed in do_match() method.
- """
- super().__init__()
- self.mode = None
- self.command_step = 0
-
- # Parameters (set in match())
- self.joint = None
- self.speed_percentage = None
- self.duration = None
- self.distance_deg = None
-
- # Calculated values
- self.direction = 1
- self.joint_index = 0
- self.speed_out = 0
- self.target_position = 0
-
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """
- Parse JOG command parameters.
-
- Format: JOG|joint|speed_pct|duration|distance
- Example: JOG|0|50|2.0|None
-
- Args:
- parts: Pre-split message parts
-
- Returns:
- Tuple of (can_handle, error_message)
- """
- if len(parts) != 5:
- return (
- False,
- "JOG requires 4 parameters: joint, speed, duration, distance",
- )
-
- # Parse parameters using utilities
- self.joint = parse_int(parts[1])
- self.speed_percentage = parse_float(parts[2])
- self.duration = parse_float(parts[3])
- self.distance_deg = parse_float(parts[4])
-
- if self.joint is None or self.speed_percentage is None:
- return (False, "Joint and speed percentage are required")
-
- # Determine mode
- if self.duration and self.distance_deg:
- self.mode = "distance"
- self.log_trace(
- "Parsed Jog: Joint %s, Distance %s deg, Duration %ss.",
- self.joint,
- self.distance_deg,
- self.duration,
- )
- elif self.duration:
- self.mode = "time"
- self.log_trace(
- "Parsed Jog: Joint %s, Speed %s%%, Duration %ss.",
- self.joint,
- self.speed_percentage,
- self.duration,
- )
- elif self.distance_deg:
- self.mode = "distance"
- self.log_trace(
- "Parsed Jog: Joint %s, Speed %s%%, Distance %s deg.",
- self.joint,
- self.speed_percentage,
- self.distance_deg,
- )
- else:
- return (False, "JOG requires either duration or distance")
-
- self.is_valid = True
- return (True, None)
-
- def setup(self, state):
- """Pre-computes speeds and target positions using live data."""
- # Validate joint is set
- if self.joint is None:
- raise RuntimeError("Joint index not set")
-
- # Joint direction and index mapping
- self.direction = 1 if 0 <= self.joint <= 5 else -1
- self.joint_index = self.joint if self.direction == 1 else self.joint - 6
-
- lims = self.LIMS_STEPS[self.joint_index]
- min_limit, max_limit = lims[0], lims[1]
-
- distance_steps = 0
- if self.distance_deg is not None:
- distance_steps = int(
- PAROL6_ROBOT.ops.deg_to_steps(abs(self.distance_deg), self.joint_index)
- )
- self.target_position = state.Position_in[self.joint_index] + (
- distance_steps * self.direction
- )
-
- if not (min_limit <= self.target_position <= max_limit):
- # Convert to degrees for clearer error message
- target_deg = PAROL6_ROBOT.ops.steps_to_deg(
- self.target_position, self.joint_index
- )
- min_deg = PAROL6_ROBOT.ops.steps_to_deg(min_limit, self.joint_index)
- max_deg = PAROL6_ROBOT.ops.steps_to_deg(max_limit, self.joint_index)
- raise ValueError(
- f"Target position {target_deg:.2f}° is out of joint limits ({min_deg:.2f}°, {max_deg:.2f}°)."
- )
-
- # Motion timing calculations
- jog_min = self.JOG_MIN[self.joint_index]
- jog_max = self.JOG_MAX[self.joint_index]
-
- if self.mode == "distance" and self.duration:
- speed_steps_per_sec = (
- int(distance_steps / self.duration) if self.duration > 0 else 0
- )
- if speed_steps_per_sec > jog_max:
- raise ValueError(
- f"Required speed ({speed_steps_per_sec} steps/s) exceeds joint's max jog speed ({jog_max} steps/s)."
+ def __init__(self, p: JogJCmd):
+ super().__init__(p)
+ self.speeds_out = np.zeros(6, dtype=np.int32)
+ self._jog_initialized = False
+ self._jog_vel_rad = np.zeros(6, dtype=np.float64)
+
+ def do_setup(self, state: "ControllerState") -> None:
+ """Pre-compute step speeds and rad/s velocities for all 6 joints."""
+ for i in range(6):
+ s = self.p.speeds[i]
+ if s == 0.0:
+ self.speeds_out[i] = 0
+ self._jog_vel_rad[i] = 0.0
+ else:
+ frac = min(abs(s), 1.0)
+ step_speed = int(
+ JOG_MIN_STEPS
+ + (LIMITS.joint.jog.velocity_steps[i] - JOG_MIN_STEPS) * frac
)
- # Ensure speed is at least the minimum jog speed if not zero
- if speed_steps_per_sec > 0:
- speed_steps_per_sec = max(speed_steps_per_sec, jog_min)
- else:
- if self.speed_percentage is None:
- raise ValueError(
- "'speed_percentage' must be provided if not calculating automatically."
+ self.speeds_out[i] = step_speed if s > 0 else -step_speed
+ self._jog_vel_rad[i] = speed_steps_to_rad_scalar(step_speed, i) * (
+ 1 if s > 0 else -1
)
- speed_steps_per_sec = int(
- self.linmap_pct(abs(self.speed_percentage), jog_min, jog_max)
- )
-
- self.speed_out = speed_steps_per_sec * self.direction
-
- # Start timer for time-based mode
- if self.mode == "time" and self.duration and self.duration > 0:
- self.start_timer(self.duration)
-
- def execute_step(self, state: "ControllerState") -> ExecutionStatus:
- """This is the EXECUTION phase. It runs on every loop cycle."""
+ self.start_timer(self.p.duration)
+ self._jog_initialized = False
- # Type guard to ensure joint_index is valid
- if self.joint_index is None or not isinstance(self.joint_index, int):
- raise RuntimeError("Invalid joint index in execute_step")
+ def execute_step(self, state: "ControllerState") -> ExecutionStatusCode:
+ """Execute one tick of joint jogging via StreamingExecutor."""
+ se = state.streaming_executor
- stop_reason = None
- cur = state.Position_in[self.joint_index]
+ # Sync position on first tick
+ if not self._jog_initialized:
+ steps_to_rad(state.Position_in, self._q_rad_buf)
+ se.sync_position(self._q_rad_buf)
+ self._jog_initialized = True
- if self.mode == "time" and self.timer_expired():
- stop_reason = "Timed jog finished."
- elif self.mode == "distance" and (
- (self.direction == 1 and cur >= self.target_position)
- or (self.direction == -1 and cur <= self.target_position)
- ):
- stop_reason = "Distance jog finished."
-
- if not stop_reason:
- # Use base class limit_hit_mask helper
- speeds_array = np.zeros(6)
- speeds_array[self.joint_index] = self.speed_out
- limit_mask = self.limit_hit_mask(state.Position_in, speeds_array)
- if limit_mask[self.joint_index]:
- stop_reason = f"Limit reached on joint {self.joint_index + 1}."
+ stop_reason = self._check_stop_conditions(state)
if stop_reason:
- if stop_reason.startswith("Limit"):
- logger.warning(stop_reason)
- else:
- self.log_trace(stop_reason)
-
- self.is_finished = True
- self.stop_and_idle(state)
- return ExecutionStatus.completed(stop_reason)
+ self._jog_vel_rad.fill(0.0)
+ se.set_jog_velocity(self._jog_vel_rad)
+ pos_rad, vel, finished = se.tick()
+ self._q_rad_buf[:] = pos_rad
+ rad_to_steps(self._q_rad_buf, self._steps_buf)
+ self.set_move_position(state, self._steps_buf)
+
+ if finished or np.dot(vel, vel) < 1e-6:
+ if stop_reason.startswith("Limit"):
+ logger.warning(stop_reason)
+ else:
+ self.log_trace(stop_reason)
+ se.active = False
+ self.finish()
+ return ExecutionStatusCode.COMPLETED
+ return ExecutionStatusCode.EXECUTING
+
+ se.set_jog_velocity(self._jog_vel_rad)
+ pos_rad, _vel, _finished = se.tick()
+ self._q_rad_buf[:] = pos_rad
+ rad_to_steps(self._q_rad_buf, self._steps_buf)
+ self.set_move_position(state, self._steps_buf)
+
+ return ExecutionStatusCode.EXECUTING
+
+ def _check_stop_conditions(self, state: "ControllerState") -> str | None:
+ """Check if jog should stop. Returns stop reason or None."""
+ if self.timer_expired():
+ return "Timed jog finished."
+
+ limit_mask = _limit_hit_mask(state.Position_in, self.speeds_out)
+ if np.any(limit_mask):
+ return f"Limit reached on joint {int(np.argmax(limit_mask)) + 1}."
- state.Speed_out.fill(0)
- state.Speed_out[self.joint_index] = self.speed_out
- state.Command_out = CommandCode.JOG
- self.command_step += 1
- return ExecutionStatus.executing("Jogging")
+ return None
-@register_command("MULTIJOG")
-class MultiJogCommand(MotionCommand):
+@register_command(CmdType.SELECT_TOOL)
+class SelectToolCommand(MotionCommand[SelectToolCmd]):
"""
- A non-blocking command to jog multiple joints simultaneously for a specific duration.
- It performs all safety and validity checks upon initialization.
+ Set the current end-effector tool configuration.
"""
- streamable = True # Can be replaced in stream mode
-
- __slots__ = (
- "command_step",
- "joints",
- "speed_percentages",
- "duration",
- "command_len",
- "speeds_out",
- "_lims_steps",
- )
-
- def __init__(self):
- """
- Initializes the multi-jog command.
- Parameters are parsed in do_match() method.
- """
- super().__init__()
- self.command_step = 0
-
- # Parameters (set in do_match())
- self.joints = None
- self.speed_percentages = None
- self.duration = None
- self.command_len = 0
-
- # Calculated values
- self.speeds_out = np.zeros(6, dtype=np.int32)
- self._lims_steps = PAROL6_ROBOT.joint.limits.steps
-
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """
- Parse MULTIJOG command parameters.
-
- Format: MULTIJOG|joints_csv|speeds_csv|duration
- Example: MULTIJOG|0,1,2|50,75,100|3.0
-
- Args:
- parts: Pre-split message parts
-
- Returns:
- Tuple of (can_handle, error_message)
- """
- if len(parts) != 4:
- return (False, "MULTIJOG requires 3 parameters: joints, speeds, duration")
-
- # Parse parameters using utilities
- self.joints = csv_ints(parts[1])
- self.speed_percentages = csv_floats(parts[2])
- self.duration = parse_float(parts[3]) or 0.0
-
- # Validate
- if len(self.joints) != len(self.speed_percentages):
- return (False, "Number of joints must match number of speeds")
-
- if self.duration <= 0:
- return (False, "Duration must be positive")
-
- # Conflict detection on base joints
- base = set()
- for j in self.joints:
- b = j % 6
- if b in base:
- return (False, f"Conflicting commands for Joint {b + 1}")
- base.add(b)
-
- self.log_trace(
- "Parsed MultiJog for joints %s with speeds %s%% for %ss.",
- self.joints,
- self.speed_percentages,
- self.duration,
- )
-
- self.command_len = ceil(self.duration / INTERVAL_S)
- self.is_valid = True
- return (True, None)
-
- def setup(self, state):
- """Pre-computes the speeds for each joint."""
- # Validate joints and speed_percentages are set
- if self.joints is None or self.speed_percentages is None:
- raise ValueError("Joints or speed percentages not set")
-
- # Vectorized computation for all joints
- joints_arr = np.asarray(self.joints, dtype=int)
- speeds_pct = np.asarray(self.speed_percentages, dtype=float)
-
- # Map to base joint index (0-5) and direction (+/-)
- direction = np.where((joints_arr >= 0) & (joints_arr <= 5), 1, -1)
- joint_index = np.where(direction == 1, joints_arr, joints_arr - 6)
-
- # Validate indices
- invalid_mask = (joint_index < 0) | (joint_index >= 6)
- if np.any(invalid_mask):
- bad = joint_index[invalid_mask]
- raise ValueError(f"Invalid joint indices {bad.tolist()}")
-
- pct = np.clip(np.abs(speeds_pct) / 100.0, 0.0, 1.0)
- for i, idx in enumerate(joint_index):
- self.speeds_out[idx] = (
- int(
- self.linmap_pct(
- pct[i] * 100.0, self.JOG_MIN[idx], self.JOG_MAX[idx]
- )
- )
- * direction[i]
- )
-
- # Start timer if duration is specified
- if self.duration and self.duration > 0:
- self.start_timer(self.duration)
+ PARAMS_TYPE = SelectToolCmd
- def execute_step(self, state: "ControllerState") -> ExecutionStatus:
- """This is the EXECUTION phase. It runs on every loop cycle."""
- # Stop if the duration has elapsed (check both timer and step count)
- if self.timer_expired() or self.command_step >= self.command_len:
- self.is_finished = True
- self.stop_and_idle(state)
- return ExecutionStatus.completed("MultiJog")
-
- # Use base class helper for limit checks
- limit_mask = self.limit_hit_mask(state.Position_in, self.speeds_out)
- if np.any(limit_mask):
- i = np.argmax(limit_mask) # first violating joint
- logger.warning(f"Limit reached on joint {i + 1}. Stopping jog.")
- self.is_finished = True
- self.stop_and_idle(state)
- return ExecutionStatus.completed(f"Limit reached on J{i + 1}")
-
- # Apply self.speeds_out
- np.copyto(state.Speed_out, self.speeds_out, casting="no")
- state.Command_out = CommandCode.JOG
- self.command_step += 1
- return ExecutionStatus.executing("MultiJog")
+ __slots__ = ()
+ def execute_step(self, state: "ControllerState") -> ExecutionStatusCode:
+ """Set the tool in state and update robot kinematics."""
+ tool_name = self.p.tool_name.strip().upper()
+ variant_key = self.p.variant_key
-@register_command("SET_TOOL")
-class SetToolCommand(MotionCommand):
- """
- Set the current end-effector tool configuration.
- Changes the tool transform used for forward/inverse kinematics.
- """
+ state.set_tool(tool_name, variant_key)
+ self.finish()
+ return ExecutionStatusCode.COMPLETED
- __slots__ = ("tool_name",)
- def __init__(self):
- super().__init__()
- self.tool_name = None
+@register_command(CmdType.TELEPORT)
+class TeleportCommand(MotionCommand[TeleportCmd]):
+ """Instantly set joint angles (simulator only, no trajectory)."""
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """
- Parse SET_TOOL command parameters.
+ PARAMS_TYPE = TeleportCmd
+ streamable = True
- Format: SET_TOOL|tool_name
- Example: SET_TOOL|PNEUMATIC
- """
- if len(parts) != 2:
- return (False, "SET_TOOL requires 1 parameter: tool_name")
+ __slots__ = ("_target_steps", "_deg_buf", "_sim_mode")
- self.tool_name = parts[1].strip().upper()
+ def __init__(self, p: TeleportCmd):
+ super().__init__(p)
+ self._target_steps = np.empty(6, dtype=np.int32)
+ self._deg_buf = np.empty(6, dtype=np.float64)
+ self._sim_mode = False
- # Validate tool name during parsing
- if self.tool_name not in TOOL_CONFIGS:
- available = list_tools()
- return (False, f"Unknown tool '{self.tool_name}'. Available: {available}")
+ def do_setup(self, state: ControllerState) -> None:
+ self._sim_mode = is_simulation_mode()
+ self._deg_buf[:] = self.p.angles
+ deg_to_steps(self._deg_buf, self._target_steps)
- self.log_trace(f"Parsed SET_TOOL command: {self.tool_name}")
- return (True, None)
+ def execute_step(self, state: ControllerState) -> ExecutionStatusCode:
+ if not self._sim_mode:
+ logger.warning("TELEPORT rejected: only allowed in simulator mode")
+ self.finish()
+ return ExecutionStatusCode.COMPLETED
- def execute_step(self, state: "ControllerState") -> ExecutionStatus:
- """Set the tool in state and update robot kinematics."""
- # Type guard
- if self.tool_name is None:
- raise RuntimeError("Tool name not set")
+ state.Position_out[:] = self._target_steps
+ state.Speed_out.fill(0)
+ state.Command_out = CommandCode.TELEPORT
- # Update server state - property setter handles tool application and cache invalidation
- state.current_tool = self.tool_name
+ if self.p.tool_positions:
+ state.tool_teleport_pos = (
+ max(0.0, min(1.0, self.p.tool_positions[0])) * 255.0
+ )
+ # Clear gripper command bits so write_frame's JIT doesn't re-arm the ramp
+ state.Gripper_data_out[3] = 0
- self.log_info(f"Tool set to: {self.tool_name}")
- self.is_finished = True
- return ExecutionStatus.completed(f"Tool set: {self.tool_name}")
+ self.finish()
+ return ExecutionStatusCode.COMPLETED
diff --git a/parol6/commands/cartesian_commands.py b/parol6/commands/cartesian_commands.py
index 2dfb8fe..54f4169 100644
--- a/parol6/commands/cartesian_commands.py
+++ b/parol6/commands/cartesian_commands.py
@@ -1,506 +1,490 @@
"""
Cartesian Movement Commands
-Contains commands for Cartesian space movements: CartesianJog, MovePose, and MoveCart
+Contains commands for Cartesian space movements: CartesianJog, MovePose, MoveCart, MoveCartRelTrf
"""
import logging
-import time
from typing import cast
import numpy as np
-from spatialmath import SE3
import parol6.PAROL6_ROBOT as PAROL6_ROBOT
-from parol6.config import DEFAULT_ACCEL_PERCENT, INTERVAL_S, TRACE
-from parol6.protocol.wire import CommandCode
+from parol6.config import (
+ CART_ANG_JOG_MIN,
+ CART_LIN_JOG_MIN,
+ INTERVAL_S,
+ LIMITS,
+ PATH_SAMPLES,
+ rad_to_steps,
+ steps_to_rad,
+)
+from parol6.motion import JointPath, TrajectoryBuilder
+from parol6.protocol.wire import (
+ CmdType,
+ JogLCmd,
+ MoveLCmd,
+)
from parol6.server.command_registry import register_command
from parol6.server.state import ControllerState, get_fkine_se3
-from parol6.utils.errors import IKError
-from parol6.utils.ik import AXIS_MAP, quintic_scaling, solve_ik
+from parol6.utils.error_catalog import make_error
+from parol6.utils.error_codes import ErrorCode
+from parol6.utils.ik import RateLimitedWarning, solve_ik
+from pinokin import se3_from_rpy, se3_interp, se3_rpy
-from .base import ExecutionStatus, MotionCommand, MotionProfile
+from parol6.commands.servo_commands import _max_vel_ratio_jit
+
+from .base import (
+ ExecutionStatusCode,
+ MotionCommand,
+ TrajectoryMoveCommandBase,
+)
logger = logging.getLogger(__name__)
+# Pre-computed Cartesian jog limit constants (avoid per-tick recomputation)
+_CART_ANG_JOG_MIN_RAD: float = float(np.deg2rad(CART_ANG_JOG_MIN))
+_CART_ANG_JOG_MAX_RAD: float = float(LIMITS.cart.jog.velocity.angular)
+_CART_LIN_JOG_MIN_MS: float = CART_LIN_JOG_MIN / 1000.0
+_CART_LIN_JOG_MAX_MS: float = float(LIMITS.cart.jog.velocity.linear)
+
+
+def _linmap_frac(frac: float, lo: float, hi: float) -> float:
+ if frac < 0.0:
+ frac = 0.0
+ elif frac > 1.0:
+ frac = 1.0
+ return lo + (hi - lo) * frac
+
-@register_command("CARTJOG")
-class CartesianJogCommand(MotionCommand):
+_ik_warn = RateLimitedWarning()
+
+
+@register_command(CmdType.JOGL)
+class JogLCommand(MotionCommand[JogLCmd]):
"""
A non-blocking command to jog the robot's end-effector in Cartesian space.
+
+ CSE drives Cartesian velocity (Ruckig-smoothed). IK converts each
+ smoothed pose to joint space. Velocity clamping and commanded-position
+ tracking match servo_l for smooth, deterministic joint trajectories.
"""
+ PARAMS_TYPE = JogLCmd
streamable = True
__slots__ = (
- "frame",
- "axis",
- "speed_percentage",
- "duration",
- "axis_vectors",
"is_rotation",
+ "_ik_stopping",
+ "_axis_index",
+ "_axis_sign",
+ "_dot_buf",
+ "_q_commanded",
+ "_q_ik_seed",
+ "_dq_buf",
+ "_pos_rad_buf",
+ "_vel_ratio",
)
- def __init__(self):
- """
- Initializes the Cartesian jog command.
- Parameters are parsed in do_match() method.
- """
- super().__init__()
-
- # Parameters (set in do_match())
- self.frame = None
- self.axis = None
- self.speed_percentage: float = 50.0
- self.duration: float = 1.5
-
- # Runtime state
- self.axis_vectors = None
+ def __init__(self, p: JogLCmd):
+ super().__init__(p)
self.is_rotation = False
+ self._ik_stopping = False
+ self._axis_index = 0
+ self._axis_sign = 1.0
+ self._vel_ratio = 1.0
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """
- Parse CARTJOG command parameters.
-
- Format: CARTJOG|frame|axis|speed_pct|duration
- Example: CARTJOG|WRF|+X|50|2.0
-
- Args:
- parts: Pre-split message parts
-
- Returns:
- Tuple of (can_handle, error_message)
- """
- if len(parts) != 5:
- return (
- False,
- "CARTJOG requires 4 parameters: frame, axis, speed, duration",
- )
-
- # Parse parameters
- self.frame = parts[1].upper()
- self.axis = parts[2]
- self.speed_percentage = float(parts[3])
- self.duration = float(parts[4])
-
- # Validate frame
- if self.frame not in ["WRF", "TRF"]:
- return (False, f"Invalid frame: {self.frame}. Must be WRF or TRF")
-
- # Validate axis
- if self.axis not in AXIS_MAP:
- return (False, f"Invalid axis: {self.axis}")
-
- # Store axis vectors for execution
- self.axis_vectors = AXIS_MAP[self.axis]
- self.is_rotation = any(self.axis_vectors[1])
-
- self.is_valid = True
- return (True, None)
+ self._dot_buf = np.zeros((), dtype=np.float64)
+ self._q_commanded = np.zeros(6, dtype=np.float64)
+ self._q_ik_seed = np.zeros(6, dtype=np.float64)
+ self._dq_buf = np.zeros(6, dtype=np.float64)
+ self._pos_rad_buf = np.zeros(6, dtype=np.float64)
def do_setup(self, state: "ControllerState") -> None:
- """Set the end time when the command actually starts."""
- self.start_timer(float(self.duration))
-
- def execute_step(self, state: "ControllerState") -> ExecutionStatus:
- # --- A. Check for completion ---
- if self._t_end is None:
- # Initialize timer if missing (stream update or late init)
- self.start_timer(
- max(0.1, self.duration if self.duration is not None else 0.1)
- )
+ """Find dominant axis and start timer."""
+ vels = self.p.velocities
+ max_idx = 0
+ max_abs = abs(vels[0])
+ for i in range(1, 6):
+ a = abs(vels[i])
+ if a > max_abs:
+ max_abs = a
+ max_idx = i
+ self.is_rotation = max_idx >= 3
+ self._axis_index = max_idx - 3 if max_idx >= 3 else max_idx
+ self._axis_sign = 1.0 if vels[max_idx] >= 0 else -1.0
+ self.start_timer(self.p.duration)
+ self._ik_stopping = False
+
+ def _track_and_send(self, state: "ControllerState", ik_q: np.ndarray) -> None:
+ """Velocity-clamp IK result, update tracked position, send MOVE."""
+ self._q_ik_seed[:] = ik_q
+ dq = self._dq_buf
+ for i in range(6):
+ dq[i] = float(ik_q[i]) - self._q_commanded[i]
+ ratio = _max_vel_ratio_jit(ik_q, self._q_commanded)
+ if ratio > 1.0:
+ for i in range(6):
+ self._q_commanded[i] += dq[i] / ratio
+ self._vel_ratio = ratio
+ else:
+ self._q_commanded[:] = ik_q
+ self._vel_ratio = 1.0
+ self._pos_rad_buf[:] = self._q_commanded
+ rad_to_steps(self._pos_rad_buf, self._steps_buf)
+ self.set_move_position(state, self._steps_buf)
+
+ def execute_step(self, state: "ControllerState") -> ExecutionStatusCode:
+ """Execute one tick of Cartesian jogging."""
+ cse = state.cartesian_streaming_executor
+
+ # Initialize only if not already active (preserve velocity across streaming)
+ if not cse.active:
+ steps_to_rad(state.Position_in, self._q_rad_buf)
+ cse.sync_pose(get_fkine_se3(state))
+ cse.set_limits(1.0, self.p.accel)
+ self._q_commanded[:] = self._q_rad_buf
+ self._q_ik_seed[:] = self._q_rad_buf
+ self._vel_ratio = 1.0
+
+ # Handle timer expiry - stop smoothly
if self.timer_expired():
- self.is_finished = True
+ cse.set_jog_velocity_1dof(self._axis_index, 0.0, self.is_rotation)
+ smoothed_pose, smoothed_vel, finished = cse.tick()
+
+ np.dot(smoothed_vel, smoothed_vel, out=self._dot_buf)
+ if not finished and self._dot_buf > 1e-8:
+ ik_result = solve_ik(PAROL6_ROBOT.robot, smoothed_pose, self._q_ik_seed)
+ if ik_result.success and ik_result.q is not None:
+ self._track_and_send(state, ik_result.q)
+ return ExecutionStatusCode.EXECUTING
+
+ cse.active = False
+ self.finish()
self.stop_and_idle(state)
- return ExecutionStatus.completed("CARTJOG complete")
-
- # --- B. Calculate Target Pose using clean vector math ---
- state.Command_out = CommandCode.JOG
-
- q_current = np.asarray(
- PAROL6_ROBOT.ops.steps_to_rad(state.Position_in), dtype=float
- )
- T_current = get_fkine_se3()
-
- if not isinstance(T_current, SE3):
- return ExecutionStatus.executing("Waiting for valid pose")
- if self.axis_vectors is None:
- return ExecutionStatus.executing("Waiting for axis vectors")
-
- linear_speed_ms = self.linmap_pct(
- self.speed_percentage, self.CART_LIN_JOG_MIN, self.CART_LIN_JOG_MAX
- )
- angular_speed_degs = self.linmap_pct(
- self.speed_percentage, self.CART_ANG_JOG_MIN, self.CART_ANG_JOG_MAX
- )
-
- delta_linear = linear_speed_ms * INTERVAL_S
- delta_angular_rad = np.deg2rad(angular_speed_degs * INTERVAL_S)
-
- # Create the small incremental transformation (delta_pose)
- trans_vec = np.array(self.axis_vectors[0]) * delta_linear
- rot_vec = np.array(self.axis_vectors[1]) * delta_angular_rad
-
- # Build delta transformation
- if not self.is_rotation:
- target_pose = SE3.Rt(T_current.R, T_current.t)
-
- if self.frame == "WRF":
- target_pose.t = T_current.t + trans_vec
- else: # TRF
- target_pose.t = T_current.t + (T_current.R @ trans_vec)
- else:
- if rot_vec[0] != 0: # RX rotation
- delta_pose = SE3.Rx(rot_vec[0]) * SE3(trans_vec)
- elif rot_vec[1] != 0: # RY rotation
- delta_pose = SE3.Ry(rot_vec[1]) * SE3(trans_vec)
- elif rot_vec[2] != 0: # RZ rotation
- delta_pose = SE3.Rz(rot_vec[2]) * SE3(trans_vec)
- else:
- delta_pose = SE3(trans_vec)
- # Apply the transformation in the correct reference frame
- if self.frame == "WRF":
- # Pre-multiply to apply the change in the World Reference Frame
- target_pose = delta_pose * T_current
- else: # TRF
- # Post-multiply to apply the change in the Tool Reference Frame
- target_pose = T_current * delta_pose
-
- # --- C. Solve IK and Calculate Velocities ---
- var = solve_ik(PAROL6_ROBOT.robot, target_pose, q_current, jogging=True)
-
- if var.success and var.q is not None:
- q = np.asarray(var.q, dtype=float)
- q_velocities = (q - q_current) / INTERVAL_S
- sps = PAROL6_ROBOT.ops.speed_rad_to_steps(q_velocities)
- np.copyto(state.Speed_out, np.asarray(sps), casting="no")
+ return ExecutionStatusCode.COMPLETED
+
+ # Compute target velocity based on speed fraction from velocity vector
+ vels = self.p.velocities
+ speed_mag = abs(vels[self._axis_index + (3 if self.is_rotation else 0)])
+ if self.is_rotation:
+ velocity = (
+ _linmap_frac(speed_mag, _CART_ANG_JOG_MIN_RAD, _CART_ANG_JOG_MAX_RAD)
+ * self._axis_sign
+ )
else:
- raise IKError("IK Warning: Could not find solution for jog step. Stopping.")
-
- # --- D. Speed Scaling using base class helper ---
- scaled_speeds = self.scale_speeds_to_joint_max(state.Speed_out)
- np.copyto(state.Speed_out, scaled_speeds)
-
- return ExecutionStatus.executing("CARTJOG")
-
-
-@register_command("MOVEPOSE")
-class MovePoseCommand(MotionCommand):
- """
- A non-blocking command to move the robot to a specific Cartesian pose.
- The movement itself is a joint-space interpolation.
- """
-
- __slots__ = (
- "command_step",
- "trajectory_steps",
- "pose",
- "duration",
- "velocity_percent",
- "accel_percent",
- "trajectory_type",
- )
-
- def __init__(self, pose=None, duration=None):
- super().__init__()
- self.command_step = 0
- self.trajectory_steps: np.ndarray = np.empty((0, 6), dtype=np.int32)
-
- # Parameters (set in do_match())
- self.pose = pose
- self.duration = duration
- self.velocity_percent = None
- self.accel_percent = DEFAULT_ACCEL_PERCENT
- self.trajectory_type = "trapezoid"
-
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """
- Parse MOVEPOSE command parameters.
-
- Format: MOVEPOSE|x|y|z|rx|ry|rz|duration|speed
- Example: MOVEPOSE|100|200|300|0|0|0|None|50
-
- Args:
- parts: Pre-split message parts
-
- Returns:
- Tuple of (can_handle, error_message)
- """
- if len(parts) != 9:
- return (
- False,
- "MOVEPOSE requires 8 parameters: x, y, z, rx, ry, rz, duration, speed",
+ velocity = (
+ _linmap_frac(speed_mag, _CART_LIN_JOG_MIN_MS, _CART_LIN_JOG_MAX_MS)
+ * self._axis_sign
)
- # Parse pose (6 values)
- self.pose = [float(parts[i]) for i in range(1, 7)]
-
- # Parse duration and speed
- self.duration = None if parts[7].upper() == "NONE" else float(parts[7])
- self.velocity_percent = None if parts[8].upper() == "NONE" else float(parts[8])
+ # Scale velocity by previous tick's clamping ratio to keep CSE
+ # in sync with joint-velocity-limited motion
+ if self._vel_ratio > 1.0:
+ velocity /= self._vel_ratio
- self.log_debug("Parsed MovePose: %s", self.pose)
- self.is_valid = True
- return (True, None)
+ # Set target velocity (WRF transforms to body frame, TRF uses body directly)
+ if self.p.frame == "WRF":
+ cse.set_jog_velocity_1dof_wrf(self._axis_index, velocity, self.is_rotation)
+ else:
+ cse.set_jog_velocity_1dof(self._axis_index, velocity, self.is_rotation)
- def do_setup(self, state: "ControllerState") -> None:
- """Calculates the full trajectory just-in-time before execution."""
- self.log_trace(" -> Preparing trajectory for MovePose to %s...", self.pose)
+ smoothed_pose, smoothed_vel, _finished = cse.tick()
- initial_pos_rad = np.asarray(
- PAROL6_ROBOT.ops.steps_to_rad(state.Position_in), dtype=float
+ ik_result = solve_ik(
+ PAROL6_ROBOT.robot,
+ smoothed_pose,
+ self._q_ik_seed,
)
- pose = cast(list[float], self.pose)
- target_pose = SE3.RPY(pose[3:6], unit="deg", order="xyz")
- target_pose.t = np.array(pose[:3]) / 1000.0
-
- ik_solution = solve_ik(PAROL6_ROBOT.robot, target_pose, initial_pos_rad)
-
- if not ik_solution.success:
- error_str = "An intermediate point on the path is unreachable."
- if ik_solution.violations:
- error_str += (
- f" Reason: Path violates joint limits: {ik_solution.violations}"
+ if not ik_result.success or ik_result.q is None:
+ if not self._ik_stopping:
+ _ik_warn(
+ logger,
+ "[CARTJOG] IK failed - initiating graceful stop: pos=%s",
+ smoothed_pose[:3, 3],
)
- raise IKError(error_str)
-
- target_pos_rad = ik_solution.q
-
- if self.duration and self.duration > 0:
- if self.velocity_percent is not None:
- self.log_trace(
- " -> INFO: Both duration and velocity were provided. Using duration."
+ cse.stop()
+ self._ik_stopping = True
+ else:
+ # Still failing, check if we've stopped decelerating
+ np.dot(smoothed_vel, smoothed_vel, out=self._dot_buf)
+ if self._dot_buf < 1e-8:
+ cse.sync_pose(get_fkine_se3(state))
+ cse.active = False
+ self.finish()
+ return ExecutionStatusCode.COMPLETED
+ return ExecutionStatusCode.EXECUTING
+
+ # IK succeeded - if we were stopping, recover by resuming jogging
+ if self._ik_stopping:
+ logger.info("[CARTJOG] IK recovered - resuming jog")
+ steps_to_rad(state.Position_in, self._q_rad_buf)
+ cse.sync_pose(get_fkine_se3(state))
+ self._q_commanded[:] = self._q_rad_buf
+ self._q_ik_seed[:] = self._q_rad_buf
+ self._vel_ratio = 1.0
+ self._ik_stopping = False
+ if self.p.frame == "WRF":
+ cse.set_jog_velocity_1dof_wrf(
+ self._axis_index, velocity, self.is_rotation
)
- initial_pos_steps = state.Position_in
- target_pos_steps = np.asarray(
- PAROL6_ROBOT.ops.rad_to_steps(target_pos_rad), dtype=np.int32
- )
- dur = float(self.duration)
- self.trajectory_steps = MotionProfile.from_duration_steps(
- initial_pos_steps, target_pos_steps, dur, dt=INTERVAL_S
- )
+ else:
+ cse.set_jog_velocity_1dof(self._axis_index, velocity, self.is_rotation)
- elif self.velocity_percent is not None:
- initial_pos_steps = state.Position_in
- target_pos_steps = np.asarray(
- PAROL6_ROBOT.ops.rad_to_steps(target_pos_rad), dtype=np.int32
- )
- accel_percent = (
- float(self.accel_percent)
- if self.accel_percent is not None
- else float(DEFAULT_ACCEL_PERCENT)
- )
- self.trajectory_steps = MotionProfile.from_velocity_percent(
- initial_pos_steps,
- target_pos_steps,
- float(self.velocity_percent),
- accel_percent,
- dt=INTERVAL_S,
- )
- self.log_trace(" -> Command is valid (velocity profile).")
- else:
- self.log_trace(" -> Using conservative values for MovePose.")
- command_len = 200
- initial_pos_steps = state.Position_in
- target_pos_steps = np.asarray(
- PAROL6_ROBOT.ops.rad_to_steps(target_pos_rad), dtype=np.int32
- )
- total_dur = float(command_len) * INTERVAL_S
- self.trajectory_steps = MotionProfile.from_duration_steps(
- initial_pos_steps, target_pos_steps, total_dur, dt=INTERVAL_S
- )
+ self._track_and_send(state, ik_result.q)
- if len(self.trajectory_steps) == 0:
- raise IKError(
- "Trajectory calculation resulted in no steps. Command is invalid."
- )
- logger.log(
- TRACE, " -> Trajectory prepared with %s steps.", len(self.trajectory_steps)
- )
+ return ExecutionStatusCode.EXECUTING
- def execute_step(self, state: "ControllerState") -> ExecutionStatus:
- if self.command_step >= len(self.trajectory_steps):
- logger.info(f"{type(self).__name__} finished.")
- self.is_finished = True
- self.stop_and_idle(state)
- return ExecutionStatus.completed("MOVEPOSE complete")
- else:
- self.set_move_position(state, self.trajectory_steps[self.command_step])
- self.command_step += 1
- return ExecutionStatus.executing("MovePose")
+@register_command(CmdType.MOVEL)
+class MoveLCommand(TrajectoryMoveCommandBase[MoveLCmd]):
+ """Move the robot's end-effector in a straight line to a Cartesian pose.
-@register_command("MOVECART")
-class MoveCartCommand(MotionCommand):
+ Supports absolute and relative modes via the `rel` field, and WRF/TRF frames.
"""
- A non-blocking command to move the robot's end-effector in a straight line
- in Cartesian space, completing the move in an exact duration.
- It works by:
- 1. Pre-validating the final target pose.
- 2. Interpolating the pose in Cartesian space in real-time.
- 3. Solving Inverse Kinematics for each intermediate step to ensure path validity.
- """
+ PARAMS_TYPE = MoveLCmd
__slots__ = (
- "pose",
- "duration",
- "velocity_percent",
- "start_time",
"initial_pose",
"target_pose",
+ "cartesian_diagnostic",
+ "_cart_poses_buf",
)
- def __init__(self):
- super().__init__()
-
- # Parameters (set in do_match())
- self.pose = None
- self.duration = None
- self.velocity_percent = None
-
- # Runtime state
- self.start_time = None
- self.initial_pose = None
- self.target_pose = None
-
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """
- Parse MOVECART command parameters.
-
- Format: MOVECART|x|y|z|rx|ry|rz|duration|speed
- Example: MOVECART|100|200|300|0|0|0|2.0|None
+ def __init__(self, p: MoveLCmd):
+ super().__init__(p)
+ self.initial_pose: np.ndarray | None = None
+ self.target_pose: np.ndarray | None = None
+ self.cartesian_diagnostic: dict | None = None
+ self._cart_poses_buf = np.empty((PATH_SAMPLES, 4, 4), dtype=np.float64)
- Args:
- parts: Pre-split message parts
-
- Returns:
- Tuple of (can_handle, error_message)
- """
- if len(parts) != 9:
- return (
- False,
- "MOVECART requires 8 parameters: x, y, z, rx, ry, rz, duration, speed",
- )
+ def do_setup(self, state: "ControllerState") -> None:
+ """Set up the move - compute target pose and pre-compute trajectory."""
+ self.initial_pose = get_fkine_se3(state)
+ self._compute_target_pose(state)
+ self._precompute_trajectory(state)
- # Parse pose (6 values)
- self.pose = [float(parts[i]) for i in range(1, 7)]
+ def _precompute_trajectory(self, state: "ControllerState") -> None:
+ """Pre-compute joint trajectory that follows straight-line Cartesian path."""
+ from parol6.utils.errors import IKError
- # Parse duration and speed
- self.duration = None if parts[7].upper() == "NONE" else float(parts[7])
- self.velocity_percent = None if parts[8].upper() == "NONE" else float(parts[8])
+ assert self.initial_pose is not None and self.target_pose is not None
- # Validate that at least one timing parameter is given
- if self.duration is None and self.velocity_percent is None:
- return (False, "MOVECART requires either duration or velocity_percent")
+ steps_to_rad(state.Position_in, self._q_rad_buf)
+ current_rad = self._q_rad_buf
- if self.duration is not None and self.velocity_percent is not None:
- logger.info(
- " -> INFO: Both duration and velocity_percent provided. Using duration."
- )
- self.velocity_percent = None # Prioritize duration
+ cart_poses = self._cart_poses_buf
+ for i in range(PATH_SAMPLES):
+ s = i / (PATH_SAMPLES - 1)
+ se3_interp(self.initial_pose, self.target_pose, s, cart_poses[i])
- self.log_debug("Parsed MoveCart: %s", self.pose)
- self.is_valid = True
- return (True, None)
+ stop_on_failure = state.stop_on_failure
+ joint_path = JointPath.from_poses(
+ cart_poses,
+ current_rad,
+ stop_on_failure=stop_on_failure,
+ )
- def do_setup(self, state: "ControllerState") -> None:
- """Captures the initial state and validates the path just before execution."""
- self.initial_pose = get_fkine_se3()
- pose = cast(list[float], self.pose)
-
- # Construct pose: rotation first, then set translation (xyz convention)
- self.target_pose = SE3.RPY(pose[3:6], unit="deg", order="xyz")
- self.target_pose.t = (
- np.array(pose[:3]) / 1000.0
- ) # Vectorized translation assignment
-
- if self.velocity_percent is not None:
- # Calculate the total distance for translation and rotation
- tp = cast(SE3, self.target_pose)
- ip = cast(SE3, self.initial_pose)
- linear_distance = np.linalg.norm(tp.t - ip.t)
- angular_distance_rad = ip.angdist(tp)
-
- target_linear_speed = self.linmap_pct(
- self.velocity_percent, self.CART_LIN_JOG_MIN, self.CART_LIN_JOG_MAX
- )
- target_angular_speed_rad = np.deg2rad(
- self.linmap_pct(
- self.velocity_percent, self.CART_ANG_JOG_MIN, self.CART_ANG_JOG_MAX
+ if joint_path.is_partial:
+ ik_valid = joint_path.valid
+ assert ik_valid is not None
+ # Extract TCP poses (x,y,z,rx,ry,rz) in meters+radians from SE3
+ n = len(cart_poses)
+ tcp_poses = np.empty((n, 6), dtype=np.float64)
+ _rpy_buf = np.empty(3, dtype=np.float64)
+ for i in range(n):
+ tcp_poses[i, :3] = cart_poses[i][:3, 3]
+ se3_rpy(cart_poses[i], _rpy_buf)
+ tcp_poses[i, 3:] = _rpy_buf
+ self.cartesian_diagnostic = {
+ "tcp_poses": tcp_poses,
+ "ik_valid": ik_valid,
+ }
+ raise IKError(
+ make_error(
+ ErrorCode.IK_PARTIAL_PATH,
+ valid=str(int(ik_valid.sum())),
+ total=str(len(ik_valid)),
)
)
- # Calculate time required for each component of the movement
- time_linear = (
- linear_distance / target_linear_speed if target_linear_speed > 0 else 0
- )
- time_angular = (
- angular_distance_rad / target_angular_speed_rad
- if target_angular_speed_rad > 0
- else 0
- )
-
- # The total duration is the longer of the two times to ensure synchronization
- calculated_duration = max(time_linear, time_angular)
-
- if calculated_duration <= 0:
- logger.info(
- " -> INFO: MoveCart has zero duration. Marking as finished."
- )
- self.is_finished = True
- self.is_valid = True # It's valid, just already done.
- return
-
- self.duration = calculated_duration
- self.log_debug(" -> Calculated MoveCart duration: %.2fs", self.duration)
-
- self.log_debug(" -> Command is valid and ready for execution.")
- if self.duration and float(self.duration) > 0.0:
- self.start_timer(float(self.duration))
+ builder = TrajectoryBuilder(
+ joint_path=joint_path,
+ profile=state.motion_profile,
+ velocity_frac=self.p.resolved_speed,
+ accel_frac=self.p.accel,
+ duration=self.p.resolved_duration,
+ dt=INTERVAL_S,
+ cart_vel_limit=LIMITS.cart.hard.velocity.linear * self.p.resolved_speed,
+ cart_acc_limit=LIMITS.cart.hard.acceleration.linear * self.p.accel,
+ )
- def execute_step(self, state: "ControllerState") -> ExecutionStatus:
- dur = float(self.duration or 0.0)
- if dur <= 0.0:
- self.is_finished = True
- self.stop_and_idle(state)
- return ExecutionStatus.completed("MOVECART complete")
- s = self.progress01(dur)
- s_scaled = quintic_scaling(float(s))
+ trajectory = builder.build()
+ self.trajectory_steps = trajectory.steps
+ self._duration = trajectory.duration
- assert self.initial_pose is not None and self.target_pose is not None
- _ctp = cast(SE3, self.initial_pose).interp(
- cast(SE3, self.target_pose), s_scaled
+ self.log_debug(
+ " -> Pre-computed Cartesian path: profile=%s, steps=%d, duration=%.3fs",
+ state.motion_profile,
+ len(self.trajectory_steps),
+ float(self._duration),
)
- if not isinstance(_ctp, SE3):
- return ExecutionStatus.executing("Waiting for pose interpolation")
- current_target_pose = cast(SE3, _ctp)
- current_q_rad = np.asarray(
- PAROL6_ROBOT.ops.steps_to_rad(state.Position_in), dtype=float
+ def _compute_target_pose(self, state: "ControllerState") -> None:
+ """Compute target pose — absolute or relative based on rel flag."""
+ pose = self.p.pose
+
+ if self.p.rel:
+ # Relative move: compute delta SE3, then apply in tool frame (TRF)
+ # or world frame (WRF) depending on self.p.frame
+ delta_se3 = np.zeros((4, 4), dtype=np.float64)
+ se3_from_rpy(
+ pose[0] / 1000.0,
+ pose[1] / 1000.0,
+ pose[2] / 1000.0,
+ np.radians(pose[3]),
+ np.radians(pose[4]),
+ np.radians(pose[5]),
+ delta_se3,
+ )
+ if self.p.frame == "TRF":
+ # Post-multiply for tool-relative motion
+ self.target_pose = cast(np.ndarray, self.initial_pose) @ delta_se3
+ else:
+ # Pre-multiply for world-relative motion
+ self.target_pose = delta_se3 @ cast(np.ndarray, self.initial_pose)
+ else:
+ # Absolute target pose
+ self.target_pose = np.zeros((4, 4), dtype=np.float64)
+ se3_from_rpy(
+ pose[0] / 1000.0,
+ pose[1] / 1000.0,
+ pose[2] / 1000.0,
+ np.radians(pose[3]),
+ np.radians(pose[4]),
+ np.radians(pose[5]),
+ self.target_pose,
+ )
+
+ def do_setup_with_blend(
+ self,
+ state: "ControllerState",
+ next_cmds: "list[TrajectoryMoveCommandBase]",
+ ) -> int:
+ """Build composite Cartesian trajectory with blend zones."""
+ if self.blend_radius <= 0 or not next_cmds:
+ self.do_setup(state)
+ return 0
+
+ chain: list[MoveLCommand] = [self]
+ for cmd in next_cmds:
+ if isinstance(cmd, MoveLCommand):
+ chain.append(cmd)
+ else:
+ break
+ if len(chain) < 2:
+ self.do_setup(state)
+ return 0
+
+ from parol6.motion.geometry import build_composite_cartesian_path
+
+ initial_pose = get_fkine_se3(state)
+ self.initial_pose = initial_pose
+
+ waypoints = [initial_pose]
+ blend_radii: list[float] = []
+ prev_pose = initial_pose
+
+ for i, movel in enumerate(chain):
+ movel.initial_pose = prev_pose
+ movel._compute_target_pose(state)
+ if movel.target_pose is None:
+ if i < 2:
+ self.do_setup(state)
+ return 0
+ chain = chain[:i]
+ break
+ waypoints.append(movel.target_pose)
+ prev_pose = movel.target_pose
+ if i < len(chain) - 1:
+ blend_radii.append(movel.blend_radius)
+
+ if len(waypoints) < 3:
+ self.do_setup(state)
+ return 0
+
+ composite_poses = build_composite_cartesian_path(
+ waypoints,
+ blend_radii,
+ samples_per_segment=PATH_SAMPLES,
)
- ik_solution = solve_ik(PAROL6_ROBOT.robot, current_target_pose, current_q_rad)
- if not ik_solution.success:
- error_str = "An intermediate point on the path is unreachable."
- if ik_solution.violations:
- error_str += (
- f" Reason: Path violates joint limits: {ik_solution.violations}"
- )
- raise IKError(error_str)
+ if len(composite_poses) == 0:
+ self.do_setup(state)
+ return 0
- current_pos_rad = ik_solution.q
+ steps_to_rad(state.Position_in, self._q_rad_buf)
- # Send only the target position and let the firmware's P-controller handle speed.
- # Set feed-forward velocity to zero for smooth P-control.
- steps = PAROL6_ROBOT.ops.rad_to_steps(current_pos_rad)
- self.set_move_position(state, np.asarray(steps))
+ try:
+ joint_path = JointPath.from_poses(composite_poses, self._q_rad_buf)
+ except Exception:
+ self.log_warning(
+ "Blend IK failed for %d-segment Cartesian path, falling back",
+ len(waypoints) - 1,
+ )
+ self.do_setup(state)
+ return 0
- if s >= 1.0:
- actual_elapsed = (
- (time.perf_counter() - self._t0) if self._t0 is not None else dur
+ if joint_path.is_partial:
+ self.log_warning(
+ "Blend IK partial for %d-segment Cartesian path, falling back",
+ len(waypoints) - 1,
)
- self.log_info("MoveCart finished in ~%.2fs.", actual_elapsed)
- self.is_finished = True
- return ExecutionStatus.completed("MOVECART complete")
+ self.do_setup(state)
+ return 0
+
+ # Use minimum speed/accel across chain, sum durations when all duration-based
+ min_speed = self.p.resolved_speed
+ min_accel = self.p.accel
+ total_duration = self.p.resolved_duration
+ all_have_duration = total_duration is not None
+
+ for i in range(1, len(chain)):
+ cmd = chain[i]
+ s = cmd.p.resolved_speed
+ a = cmd.p.accel
+ if s < min_speed:
+ min_speed = s
+ if a < min_accel:
+ min_accel = a
+ d = cmd.p.resolved_duration
+ if all_have_duration and d is not None:
+ assert total_duration is not None
+ total_duration += d
+ else:
+ all_have_duration = False
+ total_duration = None
+
+ builder = TrajectoryBuilder(
+ joint_path=joint_path,
+ profile=state.motion_profile,
+ velocity_frac=min_speed,
+ accel_frac=min_accel,
+ duration=total_duration,
+ dt=INTERVAL_S,
+ cart_vel_limit=LIMITS.cart.hard.velocity.linear * min_speed,
+ cart_acc_limit=LIMITS.cart.hard.acceleration.linear * min_accel,
+ )
+
+ trajectory = builder.build()
+ self.trajectory_steps = trajectory.steps
+ self._duration = trajectory.duration
- return ExecutionStatus.executing("MoveCart")
+ consumed = len(chain) - 1
+ self.log_info(
+ " -> Blended Cartesian trajectory: %d segments, steps=%d, duration=%.3fs",
+ len(waypoints) - 1,
+ len(self.trajectory_steps),
+ trajectory.duration,
+ )
+ return consumed
diff --git a/parol6/commands/curved_commands.py b/parol6/commands/curved_commands.py
new file mode 100644
index 0000000..fd2daf2
--- /dev/null
+++ b/parol6/commands/curved_commands.py
@@ -0,0 +1,362 @@
+"""
+Smooth Geometry Commands
+
+Commands for generating smooth geometric paths: circles, arcs, and splines.
+These use the unified motion pipeline with TOPP-RA for time-optimal path parameterization.
+"""
+
+import logging
+from collections.abc import Sequence
+from typing import TYPE_CHECKING, TypeVar
+
+import numpy as np
+
+from parol6.commands.base import TrajectoryMoveCommandBase
+from parol6.config import INTERVAL_S, LIMITS, steps_to_rad
+from parol6.motion import CircularMotion, JointPath, SplineMotion, TrajectoryBuilder
+from parol6.protocol.wire import (
+ CmdType,
+ MoveCCmd,
+ MotionParamsMixin,
+ MovePCmd,
+ MoveSCmd,
+)
+from parol6.motion.geometry import compute_circle_from_3_points
+from parol6.server.command_registry import register_command
+from parol6.server.state import get_fkine_se3
+from parol6.utils.error_catalog import make_error
+from parol6.utils.error_codes import ErrorCode
+from parol6.utils.errors import IKError, TrajectoryPlanningError
+from pinokin import se3_from_rpy, se3_interp, se3_rpy
+
+_MP = TypeVar("_MP", bound=MotionParamsMixin)
+
+if TYPE_CHECKING:
+ from parol6.server.state import ControllerState
+
+logger = logging.getLogger(__name__)
+
+
+# =============================================================================
+# TRF/WRF Transformation Utilities
+# =============================================================================
+
+# Pre-allocated workspace buffers for TRF/WRF transformations (command setup phase)
+_pose_trf_buf: np.ndarray = np.zeros((4, 4), dtype=np.float64)
+_pose_wrf_buf: np.ndarray = np.zeros((4, 4), dtype=np.float64)
+_rpy_rad_buf: np.ndarray = np.zeros(3, dtype=np.float64)
+
+
+def _pose6_trf_to_wrf(
+ pose6_mm_deg: Sequence[float], tool_pose: np.ndarray, out: np.ndarray
+) -> None:
+ """Convert 6D pose [x,y,z,rx,ry,rz] from TRF to WRF (mm, degrees)."""
+ se3_from_rpy(
+ pose6_mm_deg[0] / 1000.0,
+ pose6_mm_deg[1] / 1000.0,
+ pose6_mm_deg[2] / 1000.0,
+ np.radians(pose6_mm_deg[3]),
+ np.radians(pose6_mm_deg[4]),
+ np.radians(pose6_mm_deg[5]),
+ _pose_trf_buf,
+ )
+ np.matmul(tool_pose, _pose_trf_buf, out=_pose_wrf_buf)
+ se3_rpy(_pose_wrf_buf, _rpy_rad_buf)
+ out[:3] = _pose_wrf_buf[:3, 3] * 1000.0
+ np.degrees(_rpy_rad_buf, out=out[3:])
+
+
+def _transform_waypoints_trf_to_wrf(
+ waypoints: Sequence[Sequence[float]], frame: str, state: "ControllerState"
+) -> np.ndarray:
+ """Transform 6D waypoint poses from TRF to WRF. Returns (N, 6) array."""
+ n = len(waypoints)
+ result = np.empty((n, 6), dtype=np.float64)
+ if frame == "WRF":
+ for i in range(n):
+ result[i] = waypoints[i]
+ return result
+ tool_pose = get_fkine_se3(state)
+ for i in range(n):
+ _pose6_trf_to_wrf(waypoints[i], tool_pose, out=result[i])
+ return result
+
+
+# =============================================================================
+# Smooth Motion Command Base
+# =============================================================================
+
+
+class BaseSmoothMotionCommand(TrajectoryMoveCommandBase[_MP]):
+ """Base class for smooth geometry commands (circle, arc, helix, spline).
+
+ Subclasses implement generate_main_trajectory() to create Cartesian geometry.
+ This base class handles IK conversion and trajectory building.
+ """
+
+ __slots__ = (
+ "_rpy_rad_buf",
+ "_pose6_buf",
+ )
+
+ def __init__(self, p: _MP) -> None:
+ super().__init__(p)
+ self._rpy_rad_buf = np.zeros(3, dtype=np.float64)
+ self._pose6_buf = np.zeros(6, dtype=np.float64)
+
+ def get_current_pose(self, state: "ControllerState") -> np.ndarray:
+ """Get current TCP pose as [x_mm, y_mm, z_mm, rx_deg, ry_deg, rz_deg]."""
+ current_se3 = get_fkine_se3(state)
+ se3_rpy(current_se3, self._rpy_rad_buf)
+ self._pose6_buf[:3] = current_se3[:3, 3] * 1000 # m -> mm
+ np.degrees(self._rpy_rad_buf, out=self._pose6_buf[3:])
+ return self._pose6_buf
+
+ def do_setup(self, state: "ControllerState") -> None:
+ """Pre-compute trajectory from current position."""
+ self.log_debug(" -> Preparing %s...", self.name)
+
+ current_pose = self.get_current_pose(state)
+ self.log_info(
+ " -> Generating %s from position: %s",
+ self.name,
+ [round(p, 1) for p in current_pose[:3]],
+ )
+
+ cartesian_trajectory = self.generate_main_trajectory(current_pose)
+ if cartesian_trajectory is None or len(cartesian_trajectory) == 0:
+ raise TrajectoryPlanningError(
+ make_error(
+ ErrorCode.TRAJ_EMPTY_RESULT, detail="empty cartesian trajectory"
+ )
+ )
+
+ steps_to_rad(state.Position_in, self._q_rad_buf)
+
+ try:
+ joint_path = JointPath.from_poses(cartesian_trajectory, self._q_rad_buf)
+ except IKError as e:
+ self.log_error(" -> ERROR: IK failed during trajectory generation: %s", e)
+ raise
+
+ if joint_path.is_partial:
+ assert joint_path.valid is not None
+ n_valid = int(joint_path.valid.sum())
+ n_total = len(joint_path)
+ self.log_error(
+ " -> ERROR: Partial IK during trajectory generation (%d/%d valid)",
+ n_valid,
+ n_total,
+ )
+ raise TrajectoryPlanningError(
+ make_error(
+ ErrorCode.IK_PARTIAL_PATH, valid=str(n_valid), total=str(n_total)
+ )
+ )
+
+ builder = TrajectoryBuilder(
+ joint_path=joint_path,
+ profile=state.motion_profile,
+ velocity_frac=self.p.resolved_speed,
+ accel_frac=self.p.accel,
+ duration=self.p.resolved_duration,
+ dt=INTERVAL_S,
+ cart_vel_limit=LIMITS.cart.hard.velocity.linear * self.p.resolved_speed,
+ cart_acc_limit=LIMITS.cart.hard.acceleration.linear * self.p.accel,
+ )
+
+ trajectory = builder.build()
+ self.trajectory_steps = trajectory.steps
+ self._duration = trajectory.duration
+
+ self.log_info(
+ " -> Trajectory prepared: %d steps, %.2fs duration",
+ len(self.trajectory_steps),
+ trajectory.duration,
+ )
+
+ def generate_main_trajectory(self, effective_start_pose) -> np.ndarray:
+ """Override this in subclasses to generate the specific motion trajectory."""
+ raise NotImplementedError("Subclasses must implement generate_main_trajectory")
+
+
+@register_command(CmdType.MOVEC)
+class MoveCCommand(BaseSmoothMotionCommand[MoveCCmd]):
+ """Execute circular arc motion through current → via → end (3-point arc).
+
+ Computes circle center and normal from the 3 points, then delegates to
+ CircularMotion.generate_arc().
+ """
+
+ PARAMS_TYPE = MoveCCmd
+
+ __slots__ = ("_via", "_end")
+
+ def __init__(self, p: MoveCCmd) -> None:
+ super().__init__(p)
+ self._via: np.ndarray = np.asarray(p.via, dtype=np.float64)
+ self._end: np.ndarray = np.asarray(p.end, dtype=np.float64)
+
+ def do_setup(self, state: "ControllerState") -> None:
+ """Transform via/end from TRF if needed, then compute arc."""
+ if self.p.frame == "TRF":
+ tool_pose = get_fkine_se3(state)
+ _pose6_trf_to_wrf(self.p.via, tool_pose, out=self._via)
+ _pose6_trf_to_wrf(self.p.end, tool_pose, out=self._end)
+ return super().do_setup(state)
+
+ def generate_main_trajectory(self, effective_start_pose) -> np.ndarray:
+ """Generate arc geometry from current position through via to end."""
+ start_xyz = effective_start_pose[:3]
+ via_xyz = self._via[:3]
+ end_xyz = self._end[:3]
+
+ center, _radius, normal = compute_circle_from_3_points(
+ start_xyz, via_xyz, end_xyz
+ )
+
+ return CircularMotion().generate_arc(
+ start_pose=effective_start_pose,
+ end_pose=self._end,
+ center=center,
+ normal=normal,
+ clockwise=False,
+ )
+
+
+@register_command(CmdType.MOVES)
+class MoveSCommand(BaseSmoothMotionCommand[MoveSCmd]):
+ """Execute smooth spline motion through waypoints."""
+
+ PARAMS_TYPE = MoveSCmd
+
+ __slots__ = ("_waypoints",)
+
+ def __init__(self, p: MoveSCmd) -> None:
+ super().__init__(p)
+ self._waypoints: np.ndarray | None = None
+
+ def do_setup(self, state: "ControllerState") -> None:
+ """Transform parameters if in TRF."""
+ self._waypoints = _transform_waypoints_trf_to_wrf(
+ self.p.waypoints, self.p.frame, state
+ )
+ return super().do_setup(state)
+
+ def generate_main_trajectory(self, effective_start_pose) -> np.ndarray:
+ """Generate spline starting from actual position."""
+ assert self._waypoints is not None
+
+ wps = self._waypoints
+ motion_gen = SplineMotion()
+
+ first_wp_error = float(np.linalg.norm(wps[0, :3] - effective_start_pose[:3]))
+
+ if first_wp_error > 5.0:
+ modified_waypoints = np.vstack([effective_start_pose[np.newaxis], wps])
+ logger.info(
+ f" Added start position as first waypoint (distance: {first_wp_error:.1f}mm)"
+ )
+ else:
+ modified_waypoints = np.vstack([effective_start_pose[np.newaxis], wps[1:]])
+ logger.info(" Replaced first waypoint with actual start position")
+
+ duration = self.p.resolved_duration
+ trajectory = motion_gen.generate_spline(
+ waypoints=modified_waypoints,
+ duration=duration,
+ )
+
+ logger.debug(f" Generated spline with {len(trajectory)} points")
+
+ return trajectory
+
+
+# Number of SE3 samples per linear segment for move_p
+_MOVEP_SAMPLES_PER_SEGMENT: int = 20
+
+
+@register_command(CmdType.MOVEP)
+class MovePCommand(BaseSmoothMotionCommand[MovePCmd]):
+ """Process move — constant TCP speed through waypoints with piecewise linear segments.
+
+ Phase 3 will add auto-blending at corners (Bézier blend zones).
+ Currently uses sharp piecewise-linear interpolation.
+ """
+
+ PARAMS_TYPE = MovePCmd
+
+ __slots__ = ("_waypoints", "_se3_buf_a", "_se3_buf_b")
+
+ def __init__(self, p: MovePCmd) -> None:
+ super().__init__(p)
+ self._waypoints: np.ndarray | None = None
+ self._se3_buf_a = np.zeros((4, 4), dtype=np.float64)
+ self._se3_buf_b = np.zeros((4, 4), dtype=np.float64)
+
+ def do_setup(self, state: "ControllerState") -> None:
+ """Transform parameters if TRF, build trajectory with constant TCP speed."""
+ self._waypoints = _transform_waypoints_trf_to_wrf(
+ self.p.waypoints, self.p.frame, state
+ )
+ return super().do_setup(state)
+
+ def generate_main_trajectory(self, effective_start_pose) -> np.ndarray:
+ """Generate piecewise-linear Cartesian path through waypoints.
+
+ Each segment is linearly interpolated in SE3 space.
+ Phase 3 adds Bézier blend zones at corner points.
+ """
+ assert self._waypoints is not None
+
+ wps = self._waypoints
+
+ first_wp_error = float(np.linalg.norm(wps[0, :3] - effective_start_pose[:3]))
+ if first_wp_error > 5.0:
+ all_waypoints = np.vstack([effective_start_pose[np.newaxis], wps])
+ else:
+ all_waypoints = np.vstack([effective_start_pose[np.newaxis], wps[1:]])
+
+ # Pre-compute total SE3 poses for single allocation
+ n = _MOVEP_SAMPLES_PER_SEGMENT
+ n_segs = len(all_waypoints) - 1
+ total = n_segs * n - (n_segs - 1) # first segment full, rest skip junction
+ cart_poses = np.empty((total, 4, 4), dtype=np.float64)
+ cursor = 0
+
+ for seg_idx in range(n_segs):
+ wp_a = all_waypoints[seg_idx]
+ wp_b = all_waypoints[seg_idx + 1]
+
+ se3_from_rpy(
+ wp_a[0] / 1000.0,
+ wp_a[1] / 1000.0,
+ wp_a[2] / 1000.0,
+ np.radians(wp_a[3]),
+ np.radians(wp_a[4]),
+ np.radians(wp_a[5]),
+ self._se3_buf_a,
+ )
+ se3_from_rpy(
+ wp_b[0] / 1000.0,
+ wp_b[1] / 1000.0,
+ wp_b[2] / 1000.0,
+ np.radians(wp_b[3]),
+ np.radians(wp_b[4]),
+ np.radians(wp_b[5]),
+ self._se3_buf_b,
+ )
+
+ start_i = 0 if seg_idx == 0 else 1
+ for i in range(start_i, n):
+ s = i / (n - 1)
+ se3_interp(self._se3_buf_a, self._se3_buf_b, s, cart_poses[cursor])
+ cursor += 1
+
+ logger.debug(
+ " Generated process move path with %d SE3 poses across %d segments",
+ cursor,
+ n_segs,
+ )
+
+ return cart_poses[:cursor]
diff --git a/parol6/commands/gcode_commands.py b/parol6/commands/gcode_commands.py
deleted file mode 100644
index cb37638..0000000
--- a/parol6/commands/gcode_commands.py
+++ /dev/null
@@ -1,181 +0,0 @@
-"""
-GCODE command wrappers for robot control.
-
-These commands integrate the GCODE interpreter with the robot command system.
-"""
-
-from typing import TYPE_CHECKING
-
-from parol6.commands.base import CommandBase, ExecutionStatus
-from parol6.gcode import GcodeInterpreter
-from parol6.server.command_registry import register_command
-from parol6.server.state import ControllerState, get_fkine_matrix
-
-if TYPE_CHECKING:
- from parol6.server.state import ControllerState
-
-
-@register_command("GCODE")
-class GcodeCommand(CommandBase):
- """Execute a single GCODE line."""
-
- __slots__ = (
- "gcode_line",
- "interpreter",
- "generated_commands",
- "current_command_index",
- )
- gcode_line: str
- interpreter: GcodeInterpreter | None
- generated_commands: list[str]
- current_command_index: int
-
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """Check if this is a GCODE command."""
- if parts[0].upper() == "GCODE" and len(parts) >= 2:
- # Rejoin the GCODE line (it might contain | characters)
- self.gcode_line = "|".join(parts[1:])
- return True, None
- return False, None
-
- def do_setup(self, state: "ControllerState") -> None:
- """Set up GCODE interpreter and parse the line."""
- # Use injected interpreter or create one
- self.interpreter = (
- self.gcode_interpreter or self.interpreter or GcodeInterpreter()
- )
- assert self.interpreter is not None
- # Update interpreter position with current robot position
- current_pose_matrix = get_fkine_matrix()
- current_xyz = current_pose_matrix[:3, 3]
- self.interpreter.state.update_position(
- {
- "X": current_xyz[0] * 1000,
- "Y": current_xyz[1] * 1000,
- "Z": current_xyz[2] * 1000,
- }
- )
- # Parse and store generated robot commands (strings)
- self.generated_commands = self.interpreter.parse_line(self.gcode_line) or []
-
- def execute_step(self, state: "ControllerState") -> ExecutionStatus:
- """Return generated commands for the controller to enqueue."""
- # Report back the list so controller can enqueue them
- details = {}
- if self.generated_commands:
- details["enqueue"] = self.generated_commands
- self.finish()
- return ExecutionStatus.completed("GCODE parsed", details=details)
-
-
-@register_command("GCODE_PROGRAM")
-class GcodeProgramCommand(CommandBase):
- """Load and execute a GCODE program."""
-
- __slots__ = ("program_type", "program_data", "interpreter")
- program_type: str
- program_data: str
- interpreter: GcodeInterpreter | None
-
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """Check if this is a GCODE_PROGRAM command."""
- if parts[0].upper() == "GCODE_PROGRAM" and len(parts) >= 3:
- self.program_type = parts[1].upper()
-
- if self.program_type == "FILE":
- self.program_data = parts[2]
- elif self.program_type == "INLINE":
- # Join remaining parts and split by semicolon for inline programs
- self.program_data = "|".join(parts[2:])
- else:
- return False, "Invalid GCODE_PROGRAM type (expected FILE or INLINE)"
-
- return True, None
- return False, None
-
- def do_setup(self, state: ControllerState) -> None:
- """Load the GCODE program using the interpreter."""
- # Use injected interpreter or create one
- self.interpreter = (
- self.gcode_interpreter or self.interpreter or GcodeInterpreter()
- )
- assert self.interpreter is not None
- if self.program_type == "FILE":
- if not self.interpreter.load_file(self.program_data):
- raise RuntimeError(f"Failed to load GCODE file: {self.program_data}")
- elif self.program_type == "INLINE":
- program_lines = self.program_data.split(";")
- if not self.interpreter.load_program(program_lines):
- raise RuntimeError("Failed to load inline GCODE program")
- else:
- raise ValueError("Invalid GCODE_PROGRAM type (expected FILE or INLINE)")
- # Start program execution
- self.interpreter.start_program()
-
- def execute_step(self, state: "ControllerState") -> ExecutionStatus:
- """Signal that the program was loaded; controller will fetch commands."""
- self.finish()
- return ExecutionStatus.completed("GCODE program loaded")
-
-
-@register_command("GCODE_STOP")
-class GcodeStopCommand(CommandBase):
- """Stop GCODE program execution."""
-
- __slots__ = ()
- is_immediate: bool = True
-
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """Check if this is a GCODE_STOP command."""
- if parts[0].upper() == "GCODE_STOP":
- return True, None
- return False, None
-
- def execute_step(self, state: "ControllerState") -> ExecutionStatus:
- """Stop the GCODE program."""
- if self.gcode_interpreter:
- self.gcode_interpreter.stop_program()
- self.finish()
- return ExecutionStatus.completed("GCODE stopped")
-
-
-@register_command("GCODE_PAUSE")
-class GcodePauseCommand(CommandBase):
- """Pause GCODE program execution."""
-
- __slots__ = ()
- is_immediate: bool = True
-
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """Check if this is a GCODE_PAUSE command."""
- if parts[0].upper() == "GCODE_PAUSE":
- return True, None
- return False, None
-
- def execute_step(self, state: "ControllerState") -> ExecutionStatus:
- """Pause the GCODE program."""
- if self.gcode_interpreter:
- self.gcode_interpreter.pause_program()
- self.finish()
- return ExecutionStatus.completed("GCODE paused")
-
-
-@register_command("GCODE_RESUME")
-class GcodeResumeCommand(CommandBase):
- """Resume GCODE program execution."""
-
- __slots__ = ()
- is_immediate: bool = True
-
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """Check if this is a GCODE_RESUME command."""
- if parts[0].upper() == "GCODE_RESUME":
- return True, None
- return False, None
-
- def execute_step(self, state: "ControllerState") -> ExecutionStatus:
- """Resume the GCODE program."""
- if self.gcode_interpreter:
- self.gcode_interpreter.start_program() # resumes if already loaded
- self.finish()
- return ExecutionStatus.completed("GCODE resumed")
diff --git a/parol6/commands/gripper_commands.py b/parol6/commands/gripper_commands.py
index b8a2cf3..8c04193 100644
--- a/parol6/commands/gripper_commands.py
+++ b/parol6/commands/gripper_commands.py
@@ -1,259 +1,189 @@
"""
-Gripper Control Commands
-Contains commands for electric and pneumatic gripper control
+Gripper Control Commands — 100 Hz execution engines for tool actions.
+
+These are instantiated by ToolActionCommand, not directly from wire structs.
"""
+from __future__ import annotations
+
import logging
+from dataclasses import dataclass
from enum import Enum
-from parol6.commands.base import Debouncer, ExecutionStatus, MotionCommand
-from parol6.server.command_registry import register_command
+from parol6.commands.base import Debouncer, ExecutionStatusCode, MotionCommand
from parol6.server.state import ControllerState
+from parol6.utils.error_catalog import make_error
+from parol6.utils.error_codes import ErrorCode
logger = logging.getLogger(__name__)
-# Lifecycle TRACE is centralized in higher layers; keep semantic logs here only.
+class ElectricGripperState(Enum):
+ """State machine states for the electric gripper command engine."""
-class GripperState(Enum):
START = "START"
SEND_CALIBRATE = "SEND_CALIBRATE"
WAITING_CALIBRATION = "WAITING_CALIBRATION"
WAIT_FOR_POSITION = "WAIT_FOR_POSITION"
-@register_command("PNEUMATICGRIPPER")
-@register_command("ELECTRICGRIPPER")
-class GripperCommand(MotionCommand):
- """
- A single, unified, non-blocking command to control all gripper functions.
- It internally selects the correct logic (position-based waiting, timed delay,
- or instantaneous) based on the specified action.
- """
+@dataclass(frozen=True)
+class PneumaticGripperParams:
+ """Parameters for pneumatic gripper action."""
+
+ action: str # "open" or "close"
+ port: int # 1 or 2
+
+
+@dataclass(frozen=True)
+class ElectricGripperParams:
+ """Parameters for electric gripper action."""
+
+ action: str # "move" or "calibrate"
+ position: float
+ speed: float
+ current: int
+
+
+class PneumaticGripperCommand(MotionCommand[PneumaticGripperParams]):
+ """Control pneumatic gripper (open/close)."""
+
+ PARAMS_TYPE = None # Not wire-registered — instantiated by ToolActionCommand
+
+ __slots__ = (
+ "timeout_counter",
+ "_state_to_set",
+ "_port_index",
+ )
+
+ def __init__(self, p: PneumaticGripperParams):
+ super().__init__(p)
+ self.timeout_counter = 1000
+ self._state_to_set: int = 0
+ self._port_index: int = 0
+
+ @classmethod
+ def from_tool_action(cls, *, action: str, port: int) -> PneumaticGripperCommand:
+ return cls(PneumaticGripperParams(action=action, port=port))
+
+ def do_setup(self, state: ControllerState) -> None:
+ self._state_to_set = 1 if self.p.action == "open" else 0
+ # port 1 -> index 2, port 2 -> index 3
+ self._port_index = 2 if self.p.port == 1 else 3
+
+ def execute_step(self, state: ControllerState) -> ExecutionStatusCode:
+ self.timeout_counter -= 1
+ if self.timeout_counter <= 0:
+ self.fail(make_error(ErrorCode.MOTN_GRIPPER_TIMEOUT))
+ return ExecutionStatusCode.FAILED
+
+ state.InOut_out[self._port_index] = self._state_to_set
+ self.finish()
+ return ExecutionStatusCode.COMPLETED
+
+
+class ElectricGripperCommand(MotionCommand[ElectricGripperParams]):
+ """Control electric gripper (move/calibrate)."""
+
+ PARAMS_TYPE = None # Not wire-registered — instantiated by ToolActionCommand
__slots__ = (
"state",
"timeout_counter",
"object_debouncer",
"wait_counter",
- "gripper_type",
- "action",
- "target_position",
- "speed",
- "current",
- "state_to_set",
- "port_index",
+ "_hw_position",
+ "_hw_speed",
)
- def __init__(self):
- """
- Initializes the Gripper command.
- Parameters are parsed in do_match() method.
- """
- super().__init__()
- self.state = GripperState.START
- self.timeout_counter = 1000 # 10-second safety timeout for all waiting states
- self.object_debouncer = Debouncer(5) # 0.05s debounce for object detection
+ def __init__(self, p: ElectricGripperParams):
+ super().__init__(p)
+ self.state = ElectricGripperState.START
+ self.timeout_counter = 1000
+ self.object_debouncer = Debouncer(5)
self.wait_counter = 0
-
- # Parameters (set in do_match())
- self.gripper_type = None
- self.action = None
- self.target_position = None
- self.speed = None
- self.current = None
- self.state_to_set = None
- self.port_index = None
-
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """
- Parse gripper command parameters.
-
- Formats:
- - PNEUMATICGRIPPER|action|port
- - ELECTRICGRIPPER|action|pos|spd|curr
-
- Args:
- parts: Pre-split message parts
-
- Returns:
- Tuple of (can_handle, error_message)
- """
- command_name = parts[0].upper()
-
- if command_name == "PNEUMATICGRIPPER":
- if len(parts) != 3:
- return (False, "PNEUMATICGRIPPER requires 2 parameters: action, port")
-
- self.gripper_type = "pneumatic"
- self.action = parts[1].lower()
- output_port = int(parts[2])
-
- # Validate action
- if self.action not in ["open", "close"]:
- return (False, f"Invalid pneumatic gripper action: {self.action}")
-
- # Configure pneumatic settings
- self.state_to_set = 1 if self.action == "open" else 0
- self.port_index = 2 if output_port == 1 else 3
-
- self.log_debug(
- "Parsed PNEUMATICGRIPPER: action=%s, port=%s", self.action, output_port
+ self._hw_position = 0
+ self._hw_speed = 1
+
+ @classmethod
+ def from_tool_action(
+ cls,
+ *,
+ action: str,
+ position: float = 0.0,
+ speed: float = 0.5,
+ current: int = 500,
+ ) -> ElectricGripperCommand:
+ return cls(
+ ElectricGripperParams(
+ action=action, position=position, speed=speed, current=current
)
- self.is_valid = True
- return (True, None)
-
- elif command_name == "ELECTRICGRIPPER":
- if len(parts) != 5:
- return (
- False,
- "ELECTRICGRIPPER requires 4 parameters: action, position, speed, current",
- )
-
- self.gripper_type = "electric"
-
- # Parse action
- action_token = parts[1].upper()
- self.action = (
- "move" if action_token in ("NONE", "MOVE") else parts[1].lower()
- )
-
- # Parse numeric parameters
- position = int(parts[2])
- speed = int(parts[3])
- current = int(parts[4])
-
- # Configure based on action
- if self.action == "move":
- self.target_position = position
- self.speed = speed
- self.current = current
-
- # Validate ranges
- if not (0 <= position <= 255):
- return (False, f"Position must be 0-255, got {position}")
- if not (0 <= speed <= 255):
- return (False, f"Speed must be 0-255, got {speed}")
- if not (100 <= current <= 1000):
- return (False, f"Current must be 100-1000, got {current}")
-
- elif self.action == "calibrate":
- self.wait_counter = 200 # 2-second fixed delay for calibration
- else:
- return (False, f"Invalid electric gripper action: {self.action}")
-
- self.log_debug(
- "Parsed ELECTRICGRIPPER: action=%s, pos=%s, speed=%s, current=%s",
- self.action,
- position,
- speed,
- current,
- )
- self.is_valid = True
- return (True, None)
+ )
- return (False, f"Unknown gripper command: {command_name}")
+ def do_setup(self, state: ControllerState) -> None:
+ self._hw_position = int(round(self.p.position * 255))
+ self._hw_speed = max(1, int(round(self.p.speed * 255)))
+ if self.p.action == "calibrate":
+ self.wait_counter = 200
- def execute_step(self, state: "ControllerState") -> ExecutionStatus:
- """State-based execution for pneumatic and electric grippers."""
+ def execute_step(self, state: ControllerState) -> ExecutionStatusCode:
self.timeout_counter -= 1
if self.timeout_counter <= 0:
- raise TimeoutError(f"Gripper command timed out in state {self.state}.")
-
- # --- Pneumatic Logic (Instantaneous) ---
- if self.gripper_type == "pneumatic":
- state.InOut_out[self.port_index] = self.state_to_set
- logger.info(" -> Pneumatic gripper command sent.")
- self.is_finished = True
- return ExecutionStatus.completed("Pneumatic gripper toggled")
-
- # --- Electric Gripper Logic ---
- if self.gripper_type == "electric":
- # On the first run, transition to the correct state for the action
- if self.state == GripperState.START:
- if self.action == "calibrate":
- self.state = GripperState.SEND_CALIBRATE
- else: # 'move'
- self.state = GripperState.WAIT_FOR_POSITION
- # --- Calibrate Logic (Timed Delay) ---
- if self.state == GripperState.SEND_CALIBRATE:
- logger.debug(" -> Sending one-shot calibrate command...")
- state.Gripper_data_out[4] = 1 # Set mode to calibrate
- self.state = GripperState.WAITING_CALIBRATION
- return ExecutionStatus.executing("Calibrating")
-
- if self.state == GripperState.WAITING_CALIBRATION:
- self.wait_counter -= 1
- if self.wait_counter <= 0:
- logger.info(" -> Calibration delay finished.")
- state.Gripper_data_out[4] = 0 # Reset to operation mode
- self.is_finished = True
- return ExecutionStatus.completed("Calibration complete")
- return ExecutionStatus.executing("Calibrating")
-
- # --- Move Logic (Position-Based) ---
- if self.state == GripperState.WAIT_FOR_POSITION:
- # Persistently send the move command
- state.Gripper_data_out[0] = self.target_position
- state.Gripper_data_out[1] = self.speed
- state.Gripper_data_out[2] = self.current
- state.Gripper_data_out[4] = 0 # Operation mode
-
- # Pack bitfield with direct bitwise operations (avoid bytearray/hex conversions)
- bits = [1, 1, int(not state.InOut_in[4]), 1, 0, 0, 0, 0]
- val = 0
- for b in bits:
- val = (val << 1) | int(b)
- state.Gripper_data_out[3] = val
-
- object_detection = (
- state.Gripper_data_in[5] if len(state.Gripper_data_in) > 5 else 0
- )
- logger.debug(
- f" -> Gripper moving to {self.target_position} (current: {state.Gripper_data_in[1]}), object detected: {object_detection}"
- )
-
- # Use Debouncer from base class for object detection
- object_detected = self.object_debouncer.tick(object_detection != 0)
-
- # Check for completion
- current_position = state.Gripper_data_in[1]
- if abs(current_position - self.target_position) <= 5:
- logger.info(" -> Gripper move complete.")
- self.is_finished = True
- # Set command back to idle
- bits = [1, 0, int(not state.InOut_in[4]), 1, 0, 0, 0, 0]
- val = 0
- for b in bits:
- val = (val << 1) | int(b)
- state.Gripper_data_out[3] = val
- return ExecutionStatus.completed("Gripper move complete")
-
- # Check for object detection after debouncing
- if object_detected:
- if (object_detection == 1) and (
- self.target_position > current_position
- ):
- logger.info(
- " -> Gripper move holding position due to object detection when closing."
- )
- self.is_finished = True
- return ExecutionStatus.completed(
- "Object detected while closing - hold"
- )
-
- if (object_detection == 2) and (
- self.target_position < current_position
- ):
- logger.info(
- " -> Gripper move holding position due to object detection when opening."
- )
- self.is_finished = True
- return ExecutionStatus.completed(
- "Object detected while opening - hold"
- )
-
- return ExecutionStatus.executing("Moving gripper")
-
- # Should not reach here for known gripper types
- return ExecutionStatus.failed("Unknown gripper type")
+ self.fail(make_error(ErrorCode.MOTN_GRIPPER_TIMEOUT, state=str(self.state)))
+ return ExecutionStatusCode.FAILED
+
+ hw = state.gripper_hw
+
+ if self.state == ElectricGripperState.START:
+ if self.p.action == "calibrate":
+ self.state = ElectricGripperState.SEND_CALIBRATE
+ else:
+ self.state = ElectricGripperState.WAIT_FOR_POSITION
+
+ if self.state == ElectricGripperState.SEND_CALIBRATE:
+ logger.debug(" -> Sending one-shot calibrate command...")
+ hw.mode = 1
+ self.state = ElectricGripperState.WAITING_CALIBRATION
+ return ExecutionStatusCode.EXECUTING
+
+ if self.state == ElectricGripperState.WAITING_CALIBRATION:
+ self.wait_counter -= 1
+ if self.wait_counter <= 0:
+ logger.info(" -> Calibration delay finished.")
+ hw.mode = 0
+ self.finish()
+ return ExecutionStatusCode.COMPLETED
+ return ExecutionStatusCode.EXECUTING
+
+ if self.state == ElectricGripperState.WAIT_FOR_POSITION:
+ hw.target_position = self._hw_position
+ hw.target_speed = self._hw_speed
+ hw.target_current = self.p.current
+ hw.mode = 0
+
+ estop = not state.InOut_in[4]
+ hw.set_command_bits(move_active=True, estop=estop)
+
+ object_detection = hw.object_detection
+
+ object_detected = self.object_debouncer.tick(object_detection != 0)
+
+ current_position = hw.feedback_position
+ if abs(current_position - self._hw_position) <= 5:
+ self.finish()
+ hw.set_command_bits(move_active=False, estop=estop)
+ return ExecutionStatusCode.COMPLETED
+
+ if object_detected:
+ if (object_detection == 1) and (self._hw_position > current_position):
+ self.finish()
+ return ExecutionStatusCode.COMPLETED
+
+ if (object_detection == 2) and (self._hw_position < current_position):
+ self.finish()
+ return ExecutionStatusCode.COMPLETED
+
+ return ExecutionStatusCode.EXECUTING
+
+ self.fail(make_error(ErrorCode.MOTN_GRIPPER_UNKNOWN))
+ return ExecutionStatusCode.FAILED
diff --git a/parol6/commands/joint_commands.py b/parol6/commands/joint_commands.py
index 792f985..2894127 100644
--- a/parol6/commands/joint_commands.py
+++ b/parol6/commands/joint_commands.py
@@ -1,167 +1,294 @@
"""
Joint Movement Commands
-Contains commands for direct joint angle movements
+Contains commands for joint-space movements with unified trajectory execution.
+
+Uses unified motion pipeline with TOPP-RA for time-optimal path parameterization.
+All commands inherit from JointMoveCommandBase which uses MotionExecutor for
+jerk-limited smoothing during execution.
"""
+from __future__ import annotations
+
import logging
+from abc import abstractmethod
+from typing import TYPE_CHECKING, TypeVar
import numpy as np
import parol6.PAROL6_ROBOT as PAROL6_ROBOT
-from parol6.commands.base import ExecutionStatus, MotionCommand, MotionProfile
-from parol6.config import DEFAULT_ACCEL_PERCENT, INTERVAL_S, TRACE
+from parol6.commands.base import TrajectoryMoveCommandBase
+from parol6.config import (
+ INTERVAL_S,
+ MAX_BLEND_LOOKAHEAD,
+ steps_to_rad,
+)
+from parol6.motion import JointPath, TrajectoryBuilder
+from parol6.protocol.wire import CmdType, MoveJCmd, MoveJPoseCmd, MotionParamsMixin
from parol6.server.command_registry import register_command
-from parol6.server.state import ControllerState
+from parol6.utils.error_catalog import make_error
+from parol6.utils.error_codes import ErrorCode
+from parol6.utils.errors import IKError, TrajectoryPlanningError
+from parol6.utils.ik import solve_ik
+from pinokin import se3_from_rpy
+
+_MP = TypeVar("_MP", bound=MotionParamsMixin)
+
+if TYPE_CHECKING:
+ from parol6.server.state import ControllerState
logger = logging.getLogger(__name__)
-@register_command("MOVEJOINT")
-class MoveJointCommand(MotionCommand):
- """
- A non-blocking command to move the robot's joints to a specific configuration.
- It pre-calculates the entire trajectory upon initialization.
+class JointMoveCommandBase(TrajectoryMoveCommandBase[_MP]):
+ """Base class for joint-space trajectory commands.
+
+ Subclasses must implement:
+ - _get_target_rad(): Return target joint positions in radians
+
+ This base class provides:
+ - do_setup(): Builds trajectory via JointPath.interpolate + TrajectoryBuilder
+ - execute_step(): Inherited from TrajectoryMoveCommandBase (uses MotionExecutor)
"""
__slots__ = (
- "command_step",
- "trajectory_steps",
- "target_angles",
- "target_radians",
- "duration",
- "velocity_percent",
- "accel_percent",
- "trajectory_type",
+ "_T_buf",
+ "_q_full_buf",
+ "_diff_buf",
+ "_current_rad_buf",
+ "_tcp_mm_buf",
)
- def __init__(self):
- super().__init__()
- self.command_step = 0
- self.trajectory_steps: np.ndarray = np.empty((0, 6), dtype=np.int32)
+ def __init__(self, p: _MP) -> None:
+ super().__init__(p)
+ nq = PAROL6_ROBOT.robot.nq
+ self._T_buf = np.zeros((4, 4), dtype=np.float64, order="F")
+ self._q_full_buf = np.zeros(nq, dtype=np.float64)
+ self._diff_buf = np.empty(3, dtype=np.float64)
+ self._current_rad_buf = np.zeros(6, dtype=np.float64)
+ self._tcp_mm_buf = np.empty((MAX_BLEND_LOOKAHEAD + 2, 3), dtype=np.float64)
- # Parameters (set in do_match())
- self.target_angles = None
- self.target_radians = None
- self.duration = None
- self.velocity_percent = None
- self.accel_percent = DEFAULT_ACCEL_PERCENT
- self.trajectory_type = "trapezoid"
+ @abstractmethod
+ def _get_target_rad(
+ self, state: ControllerState, current_rad: np.ndarray
+ ) -> np.ndarray:
+ """Return target joint positions in radians.
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
+ Args:
+ state: Controller state
+ current_rad: Current joint positions in radians (for IK seed if needed)
"""
- Parse MOVEJOINT command parameters.
+ ...
- Format: MOVEJOINT|j1|j2|j3|j4|j5|j6|duration|speed
- Example: MOVEJOINT|0|45|90|-45|30|0|None|50
+ def do_setup(self, state: ControllerState) -> None:
+ """Build trajectory from current position to target using unified motion pipeline."""
+ steps_to_rad(state.Position_in, self._q_rad_buf)
+ target_rad = self._get_target_rad(state, self._q_rad_buf)
+ current_rad = self._q_rad_buf
- Args:
- parts: Pre-split message parts
+ joint_path = JointPath.interpolate(current_rad, target_rad, n_samples=50)
+ builder = TrajectoryBuilder(
+ joint_path=joint_path,
+ profile=state.motion_profile,
+ velocity_frac=self.p.resolved_speed,
+ accel_frac=self.p.accel,
+ duration=self.p.resolved_duration,
+ dt=INTERVAL_S,
+ )
- Returns:
- Tuple of (can_handle, error_message)
- """
- if len(parts) != 9:
- return (
- False,
- "MOVEJOINT requires 8 parameters: 6 joint angles, duration, speed",
- )
+ trajectory = builder.build()
+ self.trajectory_steps = trajectory.steps
+ self._duration = trajectory.duration
- # Parse joint angles
- self.target_angles = np.asarray(
- [float(parts[i]) for i in range(1, 7)], dtype=float
- )
+ if len(self.trajectory_steps) == 0:
+ raise TrajectoryPlanningError(
+ make_error(ErrorCode.TRAJ_NO_STEPS, detail="")
+ )
- # Parse duration and speed
- self.duration = None if parts[7].upper() == "NONE" else float(parts[7])
- self.velocity_percent = None if parts[8].upper() == "NONE" else float(parts[8])
-
- # Validate joint limits
- self.target_radians = np.deg2rad(self.target_angles)
- for i in range(6):
- min_rad, max_rad = PAROL6_ROBOT.joint.limits.rad[i]
- if not (min_rad <= self.target_radians[i] <= max_rad):
- return (
- False,
- f"Joint {i + 1} target ({self.target_angles[i]} deg) is out of range",
- )
-
- self.log_debug("Parsed MoveJoint: %s", self.target_angles)
- self.is_valid = True
- return (True, None)
-
- def do_setup(self, state: "ControllerState") -> None:
- """Calculates the trajectory just before execution begins."""
self.log_trace(
- "Preparing trajectory for MoveJoint to %s...", self.target_angles
+ " -> Using profile: %s, duration: %.3fs, steps: %d",
+ state.motion_profile,
+ trajectory.duration,
+ len(self.trajectory_steps),
)
- if self.duration and self.duration > 0:
- if self.velocity_percent is not None:
- self.log_trace(
- " -> INFO: Both duration and velocity were provided. Using duration."
- )
- initial_pos_steps = state.Position_in
- target_pos_steps = np.asarray(
- PAROL6_ROBOT.ops.rad_to_steps(self.target_radians), dtype=np.int32
- )
- dur = float(self.duration)
- self.trajectory_steps = MotionProfile.from_duration_steps(
- initial_pos_steps, target_pos_steps, dur, dt=INTERVAL_S
- )
+ def do_setup_with_blend(
+ self,
+ state: ControllerState,
+ next_cmds: "list[TrajectoryMoveCommandBase]",
+ ) -> int:
+ """Build composite joint-space trajectory with blend zones."""
+ if self.blend_radius <= 0 or not next_cmds:
+ self.do_setup(state)
+ return 0
- elif self.velocity_percent is not None:
- initial_pos_steps = state.Position_in
- target_pos_steps = np.asarray(
- PAROL6_ROBOT.ops.rad_to_steps(self.target_radians), dtype=np.int32
- )
- accel_percent = (
- float(self.accel_percent)
- if self.accel_percent is not None
- else float(DEFAULT_ACCEL_PERCENT)
- )
- self.trajectory_steps = MotionProfile.from_velocity_percent(
- initial_pos_steps,
- target_pos_steps,
- float(self.velocity_percent),
- accel_percent,
- dt=INTERVAL_S,
- )
- self.log_trace(" -> Command is valid (duration calculated from speed).")
-
- else:
- logger.log(TRACE, " -> Using conservative values for MoveJoint.")
- command_len = 200
- initial_pos_steps = state.Position_in
- target_pos_steps = np.asarray(
- PAROL6_ROBOT.ops.rad_to_steps(self.target_radians), dtype=np.int32
- )
- total_dur = float(command_len) * INTERVAL_S
- self.trajectory_steps = MotionProfile.from_duration_steps(
- initial_pos_steps, target_pos_steps, total_dur, dt=INTERVAL_S
- )
+ chain: list[JointMoveCommandBase] = [self]
+ for cmd in next_cmds:
+ if isinstance(cmd, JointMoveCommandBase):
+ chain.append(cmd)
+ else:
+ break
+ if len(chain) < 2:
+ self.do_setup(state)
+ return 0
- if len(self.trajectory_steps) == 0:
- raise ValueError(
- "Trajectory calculation resulted in no steps. Command is invalid."
+ from parol6.motion.geometry import build_composite_joint_path
+
+ steps_to_rad(state.Position_in, self._q_rad_buf)
+ self._current_rad_buf[:] = self._q_rad_buf
+ current_rad = self._current_rad_buf
+
+ waypoints_rad: list[np.ndarray] = [current_rad]
+ blend_radii_mm: list[float] = []
+
+ for i, cmd in enumerate(chain):
+ target_rad = cmd._get_target_rad(state, current_rad)
+ waypoints_rad.append(target_rad)
+ if i < len(chain) - 1:
+ blend_radii_mm.append(cmd.blend_radius)
+ current_rad = target_rad
+
+ if len(waypoints_rad) < 3:
+ self.do_setup(state)
+ return 0
+
+ # FK at each waypoint for TCP positions (zone sizing)
+ nq = PAROL6_ROBOT.robot.nq
+ T_buf = self._T_buf
+ T_buf.fill(0)
+ q_full = self._q_full_buf
+ q_full.fill(0)
+ n_wp = len(waypoints_rad)
+ tcp_mm = self._tcp_mm_buf[:n_wp]
+ for wi, q in enumerate(waypoints_rad):
+ nj = min(len(q), nq)
+ q_full[:nj] = q[:nj]
+ q_full[nj:] = 0.0
+ PAROL6_ROBOT.robot.fkine_into(q_full, T_buf)
+ tcp_mm[wi, 0] = T_buf[0, 3] * 1000.0
+ tcp_mm[wi, 1] = T_buf[1, 3] * 1000.0
+ tcp_mm[wi, 2] = T_buf[2, 3] * 1000.0
+
+ # Convert mm blend radii to segment fractions via TCP distances
+ blend_fracs: list[tuple[float, float]] = []
+ diff_buf = self._diff_buf
+ for i in range(len(blend_radii_mm)):
+ wp_idx = i + 1
+ np.subtract(tcp_mm[wp_idx], tcp_mm[wp_idx - 1], diff_buf)
+ seg_before = float(np.linalg.norm(diff_buf))
+ np.subtract(tcp_mm[wp_idx + 1], tcp_mm[wp_idx], diff_buf)
+ seg_after = float(np.linalg.norm(diff_buf))
+ r = blend_radii_mm[i]
+ frac_before = min(r / seg_before, 0.5) if seg_before > 1e-6 else 0.0
+ frac_after = min(r / seg_after, 0.5) if seg_after > 1e-6 else 0.0
+ blend_fracs.append((frac_before, frac_after))
+
+ try:
+ positions = build_composite_joint_path(
+ waypoints_rad,
+ blend_fracs,
+ samples_per_segment=50,
)
- self.log_trace(
- " -> Trajectory prepared with %s steps.", len(self.trajectory_steps)
+ except Exception as e:
+ self.log_warning("Joint blend path failed: %s, falling back", e)
+ self.do_setup(state)
+ return 0
+
+ joint_path = JointPath(positions=positions)
+
+ # Use minimum speed/accel across chain, sum durations when all duration-based
+ min_speed = self.p.resolved_speed
+ min_accel = self.p.accel
+ total_duration = self.p.resolved_duration
+ all_have_duration = total_duration is not None
+
+ for i in range(1, len(chain)):
+ cmd = chain[i]
+ s = cmd.p.resolved_speed
+ a = cmd.p.accel
+ if s < min_speed:
+ min_speed = s
+ if a < min_accel:
+ min_accel = a
+ d = cmd.p.resolved_duration
+ if all_have_duration and d is not None:
+ total_duration += d
+ else:
+ all_have_duration = False
+ total_duration = None
+
+ builder = TrajectoryBuilder(
+ joint_path=joint_path,
+ profile=state.motion_profile,
+ velocity_frac=min_speed,
+ accel_frac=min_accel,
+ duration=total_duration,
+ dt=INTERVAL_S,
)
- def execute_step(self, state: "ControllerState") -> ExecutionStatus:
- if self.is_finished or not self.is_valid:
- return (
- ExecutionStatus.completed("Already finished")
- if self.is_finished
- else ExecutionStatus.failed("Invalid command")
- )
+ trajectory = builder.build()
+ self.trajectory_steps = trajectory.steps
+ self._duration = trajectory.duration
+
+ consumed = len(chain) - 1
+ self.log_info(
+ " -> Blended joint trajectory: %d segments, steps=%d, duration=%.3fs",
+ len(waypoints_rad) - 1,
+ len(self.trajectory_steps),
+ trajectory.duration,
+ )
+ return consumed
+
+
+@register_command(CmdType.MOVEJ)
+class MoveJCommand(JointMoveCommandBase[MoveJCmd]):
+ """Move the robot's joints to a specific configuration."""
+
+ PARAMS_TYPE = MoveJCmd
+
+ __slots__ = ()
+
+ def _get_target_rad(
+ self, state: ControllerState, current_rad: np.ndarray
+ ) -> np.ndarray:
+ """Return target joint positions in radians."""
+ target = np.deg2rad(self.p.angles)
+ if self.p.rel:
+ target += current_rad
+ return target
+
+
+@register_command(CmdType.MOVEJ_POSE)
+class MoveJPoseCommand(JointMoveCommandBase[MoveJPoseCmd]):
+ """Move the robot to a specific Cartesian pose via joint-space interpolation.
+
+ Uses IK to find the target joint configuration, then interpolates in joint space.
+ This is different from MoveL which follows a straight-line Cartesian path.
+ """
+
+ PARAMS_TYPE = MoveJPoseCmd
+
+ __slots__ = ()
+
+ def _get_target_rad(
+ self, state: ControllerState, current_rad: np.ndarray
+ ) -> np.ndarray:
+ """Solve IK for target pose and return joint positions in radians."""
+ pose = self.p.pose
+
+ target_pose = np.zeros((4, 4), dtype=np.float64)
+ se3_from_rpy(
+ pose[0] / 1000.0,
+ pose[1] / 1000.0,
+ pose[2] / 1000.0,
+ np.radians(pose[3]),
+ np.radians(pose[4]),
+ np.radians(pose[5]),
+ target_pose,
+ )
+
+ ik_solution = solve_ik(PAROL6_ROBOT.robot, target_pose, current_rad)
+ if not ik_solution.success:
+ detail = ik_solution.violations or ""
+ raise IKError(make_error(ErrorCode.IK_TARGET_UNREACHABLE, detail=detail))
- if self.command_step >= len(self.trajectory_steps):
- logger.log(TRACE, f"{type(self).__name__} finished.")
- self.is_finished = True
- self.stop_and_idle(state)
- return ExecutionStatus.completed("MOVEJOINT")
- else:
- self.set_move_position(state, self.trajectory_steps[self.command_step])
- self.command_step += 1
- return ExecutionStatus.executing("MOVEJOINT")
+ return np.asarray(ik_solution.q, dtype=np.float64)
diff --git a/parol6/commands/query_commands.py b/parol6/commands/query_commands.py
index 597f7cb..2d99b07 100644
--- a/parol6/commands/query_commands.py
+++ b/parol6/commands/query_commands.py
@@ -6,327 +6,371 @@
import numpy as np
-import parol6.PAROL6_ROBOT as PAROL6_ROBOT
from parol6 import config as cfg
-from parol6.commands.base import ExecutionStatus, QueryCommand
+from parol6.commands.base import QueryCommand
+from parol6.protocol.wire import (
+ ActivityCmd,
+ AnglesCmd,
+ AnglesResultStruct,
+ CmdType,
+ CurrentActionResultStruct,
+ EnablementResultStruct,
+ ErrorCmd,
+ ErrorResultStruct,
+ IOCmd,
+ IOResultStruct,
+ JointSpeedsCmd,
+ LoopStatsCmd,
+ LoopStatsResultStruct,
+ PingCmd,
+ PingResultStruct,
+ PoseCmd,
+ PoseResultStruct,
+ ProfileCmd,
+ ProfileResultStruct,
+ QueryType,
+ QueueCmd,
+ QueueResultStruct,
+ ReachableCmd,
+ IsSimulatorCmd,
+ IsSimulatorResultStruct,
+ SpeedsResultStruct,
+ TcpOffsetCmd,
+ TcpOffsetResultStruct,
+ StatusCmd,
+ StatusResultStruct,
+ TcpSpeedCmd,
+ TcpSpeedResultStruct,
+ ToolStatusCmd,
+ ToolResultStruct,
+ ToolStatusResultStruct,
+ ToolsCmd,
+ pack_response,
+)
from parol6.server.command_registry import register_command
-from parol6.server.state import get_fkine_flat_mm, get_fkine_matrix
+from parol6.server.state import get_fkine_flat_mm, get_fkine_se3
from parol6.server.status_cache import get_cache
-from parol6.server.transports import is_simulation_mode
from parol6.tools import list_tools
if TYPE_CHECKING:
from parol6.server.state import ControllerState
-@register_command("GET_POSE")
-class GetPoseCommand(QueryCommand):
+@register_command(CmdType.POSE)
+class PoseCommand(QueryCommand[PoseCmd]):
"""Get current robot pose matrix in specified frame (WRF or TRF)."""
- __slots__ = ("frame",)
-
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """Check if this is a GET_POSE command and parse optional frame parameter."""
- if parts[0].upper() == "GET_POSE":
- # Parse optional frame parameter (default WRF for backward compatibility)
- if len(parts) > 1:
- self.frame = parts[1].upper()
- if self.frame not in ("WRF", "TRF"):
- return False, f"Invalid frame: {self.frame}. Must be WRF or TRF"
- else:
- self.frame = "WRF"
- return True, None
- return False, None
-
- def execute_step(self, state: "ControllerState") -> ExecutionStatus:
- """Execute immediately and return pose data with translation in mm."""
- if self.frame == "TRF":
- # Get current pose as 4x4 matrix (translation in meters)
- T = get_fkine_matrix(state)
-
- # Compute inverse to express world in tool frame: T^(-1) = [R^T | -R^T * t]
- T_inv = np.linalg.inv(T)
+ PARAMS_TYPE = PoseCmd
+ QUERY_TYPE = QueryType.POSE
+
+ __slots__ = ()
- # Convert translation to mm
+ def compute(self, state: "ControllerState") -> bytes:
+ frame = self.p.frame or "WRF"
+ if frame == "TRF":
+ T = get_fkine_se3(state)
+ T_inv = np.linalg.inv(T)
T_inv[0:3, 3] *= 1000.0
+ return pack_response(PoseResultStruct(pose=T_inv.reshape(-1).tolist()))
+ return pack_response(PoseResultStruct(pose=get_fkine_flat_mm(state).tolist()))
+
- # Flatten row-major (same format as WRF)
- pose_flat = T_inv.reshape(-1)
- else:
- # WRF: use existing implementation
- pose_flat = get_fkine_flat_mm(state)
+@register_command(CmdType.ANGLES)
+class AnglesCommand(QueryCommand[AnglesCmd]):
+ """Get current joint angles in degrees."""
- pose_str = ",".join(map(str, pose_flat))
- self.reply_ascii("POSE", pose_str)
+ PARAMS_TYPE = AnglesCmd
+ QUERY_TYPE = QueryType.ANGLES
- self.finish()
- return ExecutionStatus.completed("Pose sent")
+ __slots__ = ()
+ def compute(self, state: "ControllerState") -> bytes:
+ cfg.steps_to_rad(state.Position_in, self._q_rad_buf)
+ return pack_response(
+ AnglesResultStruct(angles=np.rad2deg(self._q_rad_buf).tolist())
+ )
-@register_command("GET_ANGLES")
-class GetAnglesCommand(QueryCommand):
- """Get current joint angles in degrees."""
- __slots__ = ()
+@register_command(CmdType.IO)
+class IOCommand(QueryCommand[IOCmd]):
+ """Get current I/O status."""
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """Check if this is a GET_ANGLES command."""
- if parts[0].upper() == "GET_ANGLES":
- return True, None
- return False, None
+ PARAMS_TYPE = IOCmd
+ QUERY_TYPE = QueryType.IO
- def execute_step(self, state: "ControllerState") -> ExecutionStatus:
- """Execute immediately and return angle data."""
- angles_rad = PAROL6_ROBOT.ops.steps_to_rad(state.Position_in)
- angles_deg = np.rad2deg(angles_rad)
- angles_str = ",".join(map(str, angles_deg))
- self.reply_ascii("ANGLES", angles_str)
+ __slots__ = ()
- self.finish()
- return ExecutionStatus.completed("Angles sent")
+ def compute(self, state: "ControllerState") -> bytes:
+ return pack_response(IOResultStruct(io=state.InOut_in[:5].tolist()))
-@register_command("GET_IO")
-class GetIOCommand(QueryCommand):
- """Get current I/O status."""
+@register_command(CmdType.JOINT_SPEEDS)
+class JointSpeedsCommand(QueryCommand[JointSpeedsCmd]):
+ """Get current joint speeds."""
+
+ PARAMS_TYPE = JointSpeedsCmd
+ QUERY_TYPE = QueryType.SPEEDS
__slots__ = ()
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """Check if this is a GET_IO command."""
- if parts[0].upper() == "GET_IO":
- return True, None
- return False, None
+ def compute(self, state: "ControllerState") -> bytes:
+ return pack_response(SpeedsResultStruct(speeds=state.Speed_in.tolist()))
+
- def execute_step(self, state: "ControllerState") -> ExecutionStatus:
- """Execute immediately and return I/O data."""
- io_status_str = ",".join(map(str, state.InOut_in[:5]))
- self.reply_ascii("IO", io_status_str)
+@register_command(CmdType.STATUS)
+class StatusCommand(QueryCommand[StatusCmd]):
+ """Get aggregated robot status (pose, angles, I/O, tool_status) from cache."""
- self.finish()
- return ExecutionStatus.completed("I/O sent")
+ PARAMS_TYPE = StatusCmd
+ QUERY_TYPE = QueryType.STATUS
+ __slots__ = ()
-@register_command("GET_GRIPPER")
-class GetGripperCommand(QueryCommand):
- """Get current gripper status."""
+ def compute(self, state: "ControllerState") -> bytes:
+ cache = get_cache()
+ cache.update_from_state(state)
+ ts = cache.tool_status
+ return pack_response(
+ StatusResultStruct(
+ pose=cache.pose.tolist(),
+ angles=cache.angles_deg.tolist(),
+ speeds=cache.speeds_rad_s.tolist(),
+ io=cache.io.tolist(),
+ tool_status=[
+ ts.key,
+ ts.state,
+ ts.engaged,
+ ts.part_detected,
+ ts.fault_code,
+ list(ts.positions),
+ list(ts.channels),
+ ],
+ )
+ )
+
+
+@register_command(CmdType.LOOP_STATS)
+class LoopStatsCommand(QueryCommand[LoopStatsCmd]):
+ """Return control-loop metrics."""
+
+ PARAMS_TYPE = LoopStatsCmd
+ QUERY_TYPE = QueryType.LOOP_STATS
__slots__ = ()
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """Check if this is a GET_GRIPPER command."""
- if parts[0].upper() == "GET_GRIPPER":
- return True, None
- return False, None
+ def compute(self, state: "ControllerState") -> bytes:
+ target_hz = 1.0 / max(cfg.INTERVAL_S, 1e-9)
+ mean_hz = (1.0 / state.mean_period_s) if state.mean_period_s > 0.0 else 0.0
+ return pack_response(
+ LoopStatsResultStruct(
+ target_hz=target_hz,
+ loop_count=state.loop_count,
+ overrun_count=state.overrun_count,
+ mean_period_s=state.mean_period_s,
+ std_period_s=state.std_period_s,
+ min_period_s=state.min_period_s,
+ max_period_s=state.max_period_s,
+ p95_period_s=state.p95_period_s,
+ p99_period_s=state.p99_period_s,
+ mean_hz=mean_hz,
+ )
+ )
+
+
+@register_command(CmdType.PING)
+class PingCommand(QueryCommand[PingCmd]):
+ """Respond to ping requests."""
- def execute_step(self, state: "ControllerState") -> ExecutionStatus:
- """Execute immediately and return gripper data."""
- gripper_status_str = ",".join(map(str, state.Gripper_data_in))
- self.reply_ascii("GRIPPER", gripper_status_str)
+ PARAMS_TYPE = PingCmd
+ QUERY_TYPE = QueryType.PING
- self.finish()
- return ExecutionStatus.completed("Gripper sent")
+ __slots__ = ()
+ def compute(self, state: "ControllerState") -> bytes:
+ return pack_response(
+ PingResultStruct(hardware_connected=int(state.hardware_connected))
+ )
-@register_command("GET_SPEEDS")
-class GetSpeedsCommand(QueryCommand):
- """Get current joint speeds."""
- __slots__ = ()
+@register_command(CmdType.TOOLS)
+class ToolsCommand(QueryCommand[ToolsCmd]):
+ """Get current tool configuration and available tools."""
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """Check if this is a GET_SPEEDS command."""
- if parts[0].upper() == "GET_SPEEDS":
- return True, None
- return False, None
+ PARAMS_TYPE = ToolsCmd
+ QUERY_TYPE = QueryType.TOOL
- def execute_step(self, state: "ControllerState") -> ExecutionStatus:
- """Execute immediately and return speed data."""
- speeds_str = ",".join(map(str, state.Speed_in))
- self.reply_ascii("SPEEDS", speeds_str)
+ __slots__ = ()
- self.finish()
- return ExecutionStatus.completed("Speeds sent")
+ def compute(self, state: "ControllerState") -> bytes:
+ return pack_response(
+ ToolResultStruct(tool=state.current_tool, available=list_tools())
+ )
-@register_command("GET_STATUS")
-class GetStatusCommand(QueryCommand):
- """Get aggregated robot status (pose, angles, I/O, gripper) from cache."""
+@register_command(CmdType.TOOL_STATUS)
+class ToolStatusCommand(QueryCommand[ToolStatusCmd]):
+ """Get current tool status (key + DOF positions)."""
- __slots__ = ()
+ PARAMS_TYPE = ToolStatusCmd
+ QUERY_TYPE = QueryType.TOOL_STATUS
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """Check if this is a GET_STATUS command."""
- if parts[0].upper() == "GET_STATUS":
- return True, None
- return False, None
+ __slots__ = ()
- def execute_step(self, state: "ControllerState") -> ExecutionStatus:
- """Execute immediately and return cached aggregated status (ASCII)."""
- # Always refresh cache from current state before replying
+ def compute(self, state: "ControllerState") -> bytes:
cache = get_cache()
cache.update_from_state(state)
- payload = cache.to_ascii()
- self.reply_text(payload) # Already has full format
+ ts = cache.tool_status
+ return pack_response(
+ ToolStatusResultStruct(
+ tool_key=ts.key,
+ state=ts.state,
+ engaged=ts.engaged,
+ part_detected=ts.part_detected,
+ fault_code=ts.fault_code,
+ positions=list(ts.positions),
+ channels=list(ts.channels),
+ )
+ )
+
+
+@register_command(CmdType.ACTIVITY)
+class ActivityCommand(QueryCommand[ActivityCmd]):
+ """Get the current executing action/command and its state."""
- self.finish()
- return ExecutionStatus.completed("Status sent")
+ PARAMS_TYPE = ActivityCmd
+ QUERY_TYPE = QueryType.CURRENT_ACTION
+ __slots__ = ()
-@register_command("GET_GCODE_STATUS")
-class GetGcodeStatusCommand(QueryCommand):
- """Get GCODE interpreter status."""
+ def compute(self, state: "ControllerState") -> bytes:
+ return pack_response(
+ CurrentActionResultStruct(
+ current=state.action_current,
+ state=state.action_state.name,
+ next=state.action_next,
+ params=state.action_params,
+ )
+ )
- __slots__ = ()
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """Check if this is a GET_GCODE_STATUS command."""
- if parts[0].upper() == "GET_GCODE_STATUS":
- return True, None
- return False, None
+@register_command(CmdType.QUEUE)
+class QueueCommand(QueryCommand[QueueCmd]):
+ """Get the list of queued non-streamable commands."""
+
+ PARAMS_TYPE = QueueCmd
+ QUERY_TYPE = QueryType.QUEUE
- def execute_step(self, state: "ControllerState") -> ExecutionStatus:
- """Execute immediately and return GCODE status."""
- if self.gcode_interpreter:
- gcode_status = self.gcode_interpreter.get_status()
- else:
- gcode_status = {
- "is_running": False,
- "is_paused": False,
- "current_line": None,
- "total_lines": 0,
- "state": {},
- }
+ __slots__ = ()
- self.reply_json("GCODE_STATUS", gcode_status)
+ def compute(self, state: "ControllerState") -> bytes:
+ return pack_response(
+ QueueResultStruct(
+ queue=state.queue_nonstreamable,
+ executing_index=state.executing_command_index,
+ completed_index=state.completed_command_index,
+ last_checkpoint=state.last_checkpoint,
+ queued_duration=state.queued_duration,
+ )
+ )
- self.finish()
- return ExecutionStatus.completed("GCODE status sent")
+@register_command(CmdType.PROFILE)
+class ProfileCommand(QueryCommand[ProfileCmd]):
+ """Query the current motion profile."""
-@register_command("GET_LOOP_STATS")
-class GetLoopStatsCommand(QueryCommand):
- """Return control-loop metrics (no ACK dependency)."""
+ PARAMS_TYPE = ProfileCmd
+ QUERY_TYPE = QueryType.PROFILE
__slots__ = ()
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- if parts[0].upper() == "GET_LOOP_STATS":
- return True, None
- return False, None
+ def compute(self, state: "ControllerState") -> bytes:
+ return pack_response(ProfileResultStruct(profile=state.motion_profile))
- def execute_step(self, state: "ControllerState") -> ExecutionStatus:
- target_hz = 1.0 / max(cfg.INTERVAL_S, 1e-9)
- ema_hz = (1.0 / state.ema_period_s) if state.ema_period_s > 0.0 else 0.0
- payload = {
- "target_hz": float(target_hz),
- "loop_count": int(state.loop_count),
- "overrun_count": int(state.overrun_count),
- "last_period_s": float(state.last_period_s),
- "ema_period_s": float(state.ema_period_s),
- "ema_hz": float(ema_hz),
- }
- self.reply_json("LOOP_STATS", payload)
- self.finish()
- return ExecutionStatus.completed("Loop stats sent")
-
-
-@register_command("PING")
-class PingCommand(QueryCommand):
- """Respond to ping requests."""
+
+@register_command(CmdType.REACHABLE)
+class ReachableCommand(QueryCommand[ReachableCmd]):
+ """Get joint and Cartesian enablement flags."""
+
+ PARAMS_TYPE = ReachableCmd
+ QUERY_TYPE = QueryType.ENABLEMENT
__slots__ = ()
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """Check if this is a PING command."""
- if parts[0].upper() == "PING":
- return True, None
- return False, None
+ def compute(self, state: "ControllerState") -> bytes:
+ cache = get_cache()
+ cache.update_from_state(state)
+ return pack_response(
+ EnablementResultStruct(
+ joint_en=cache.joint_en.tolist(),
+ cart_en_wrf=cache.cart_en_wrf.tolist(),
+ cart_en_trf=cache.cart_en_trf.tolist(),
+ )
+ )
- def execute_step(self, state: "ControllerState") -> ExecutionStatus:
- """Execute immediately and return PONG with serial connectivity bit (0 in simulator mode)."""
- # Check if we're in simulator mode
- sim = is_simulation_mode()
- # In simulator mode, report SERIAL=0 (not real hardware)
- # Otherwise, check if we've observed fresh serial frames recently
- if sim:
- serial_connected = 0
- else:
- serial_connected = 1 if get_cache().age_s() <= cfg.STATUS_STALE_S else 0
+@register_command(CmdType.ERROR)
+class ErrorCommand(QueryCommand[ErrorCmd]):
+ """Get the current error state."""
- self.reply_ascii("PONG", f"SERIAL={serial_connected}")
+ PARAMS_TYPE = ErrorCmd
+ QUERY_TYPE = QueryType.ERROR
- self.finish()
- return ExecutionStatus.completed("PONG")
+ __slots__ = ()
+ def compute(self, state: "ControllerState") -> bytes:
+ error = state.error
+ return pack_response(
+ ErrorResultStruct(
+ error=error.to_wire() if error is not None else None,
+ )
+ )
-@register_command("GET_TOOL")
-class GetToolCommand(QueryCommand):
- """Get current tool configuration and available tools."""
- __slots__ = ()
+@register_command(CmdType.TCP_SPEED)
+class TcpSpeedCommand(QueryCommand[TcpSpeedCmd]):
+ """Get current TCP linear speed in mm/s."""
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """Check if this is a GET_TOOL command."""
- if parts[0].upper() == "GET_TOOL":
- return True, None
- return False, None
+ PARAMS_TYPE = TcpSpeedCmd
+ QUERY_TYPE = QueryType.TCP_SPEED
- def execute_step(self, state: "ControllerState") -> ExecutionStatus:
- """Execute immediately and return current tool info."""
+ __slots__ = ()
- payload = {"tool": state.current_tool, "available": list_tools()}
- self.reply_json("TOOL", payload)
+ def compute(self, state: "ControllerState") -> bytes:
+ cache = get_cache()
+ cache.update_from_state(state)
+ return pack_response(TcpSpeedResultStruct(speed=cache.tcp_speed))
- self.finish()
- return ExecutionStatus.completed("Tool info sent")
+@register_command(CmdType.IS_SIMULATOR)
+class IsSimulatorCommand(QueryCommand[IsSimulatorCmd]):
+ """Query current simulator mode state."""
-@register_command("GET_CURRENT_ACTION")
-class GetCurrentActionCommand(QueryCommand):
- """Get the current executing action/command and its state."""
+ PARAMS_TYPE = IsSimulatorCmd
+ QUERY_TYPE = QueryType.IS_SIMULATOR
__slots__ = ()
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """Check if this is a GET_CURRENT_ACTION command."""
- if parts[0].upper() == "GET_CURRENT_ACTION":
- return True, None
- return False, None
+ def compute(self, state: "ControllerState") -> bytes:
+ from parol6.server.transports.transport_factory import is_simulation_mode
- def execute_step(self, state: "ControllerState") -> ExecutionStatus:
- """Execute immediately and return current action info."""
- payload = {
- "current": state.action_current,
- "state": state.action_state,
- "next": state.action_next,
- }
- self.reply_json("ACTION", payload)
+ return pack_response(IsSimulatorResultStruct(active=is_simulation_mode()))
- self.finish()
- return ExecutionStatus.completed("Current action info sent")
+@register_command(CmdType.TCP_OFFSET)
+class TcpOffsetCommand(QueryCommand[TcpOffsetCmd]):
+ """Query current TCP offset in mm."""
-@register_command("GET_QUEUE")
-class GetQueueCommand(QueryCommand):
- """Get the list of queued non-streamable commands."""
+ PARAMS_TYPE = TcpOffsetCmd
+ QUERY_TYPE = QueryType.TCP_OFFSET
__slots__ = ()
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """Check if this is a GET_QUEUE command."""
- if parts[0].upper() == "GET_QUEUE":
- return True, None
- return False, None
-
- def execute_step(self, state: "ControllerState") -> ExecutionStatus:
- """Execute immediately and return queue info."""
- payload = {
- "non_streamable": state.queue_nonstreamable,
- "size": len(state.queue_nonstreamable),
- }
- self.reply_json("QUEUE", payload)
-
- self.finish()
- return ExecutionStatus.completed("Queue info sent")
+ def compute(self, state: "ControllerState") -> bytes:
+ offset = state.tcp_offset_m
+ return pack_response(
+ TcpOffsetResultStruct(
+ x=offset[0] * 1000,
+ y=offset[1] * 1000,
+ z=offset[2] * 1000,
+ )
+ )
diff --git a/parol6/commands/servo_commands.py b/parol6/commands/servo_commands.py
new file mode 100644
index 0000000..90ac4ff
--- /dev/null
+++ b/parol6/commands/servo_commands.py
@@ -0,0 +1,311 @@
+"""
+Servo Commands — streaming position targets (not queued).
+
+ServoJ: joint-space position target via StreamingExecutor
+ServoJPose: joint-space target from Cartesian pose (IK + StreamingExecutor)
+ServoL: Cartesian-space target via CartesianStreamingExecutor + IK
+"""
+
+import logging
+import math
+
+import numpy as np
+from numba import njit
+
+import parol6.PAROL6_ROBOT as PAROL6_ROBOT
+from parol6.config import (
+ INTERVAL_S,
+ LIMITS,
+ rad_to_steps,
+ steps_to_rad,
+)
+from parol6.protocol.wire import CmdType, ServoJCmd, ServoJPoseCmd, ServoLCmd
+from parol6.server.command_registry import register_command
+from parol6.server.state import ControllerState, get_fkine_se3
+from parol6.utils.error_catalog import make_error
+from parol6.utils.error_codes import ErrorCode
+from parol6.utils.errors import IKError
+from parol6.utils.ik import RateLimitedWarning, solve_ik
+from pinokin import se3_from_rpy
+
+from .base import ExecutionStatusCode, MotionCommand
+
+logger = logging.getLogger(__name__)
+
+# Velocity ratio uses hardware limits (jog limits only apply to jog_j/jog_l)
+_JOINT_MAX_STEP_INV = 1.0 / (
+ np.array(LIMITS.joint.hard.velocity, dtype=np.float64) * INTERVAL_S
+)
+# Differential wrist coupling: motor 6 = J6*ratio[5] + J4*ratio[3] in step space
+
+_J4_STEP_FACTOR: float = (
+ float(PAROL6_ROBOT.joint.ratio[3]) / PAROL6_ROBOT.radian_per_step_constant
+)
+_J6_STEP_FACTOR: float = (
+ float(PAROL6_ROBOT.joint.ratio[5]) / PAROL6_ROBOT.radian_per_step_constant
+)
+_MOTOR6_MAX_STEP_INV: float = 1.0 / (
+ float(PAROL6_ROBOT._joint_max_speed_hw[5]) * INTERVAL_S
+)
+_ik_warn = RateLimitedWarning()
+
+
+@njit(cache=True)
+def _max_vel_ratio_jit(
+ target_q: np.ndarray,
+ current_q: np.ndarray,
+) -> float:
+ """Max per-tick velocity ratio across all joints. >1.0 means limit exceeded.
+
+ Accounts for the differential wrist coupling: motor6 drives both J6 and
+ compensates for J4 rotation. The effective motor 6 step velocity is
+ ``dJ6 * ratio[5] + dJ4 * ratio[3]`` (in step space), which must stay
+ within motor 6's hardware speed limit.
+ """
+ max_ratio = 0.0
+ n = target_q.shape[0]
+ for i in range(n):
+ r = abs(target_q[i] - current_q[i]) * _JOINT_MAX_STEP_INV[i]
+ if r > max_ratio:
+ max_ratio = r
+ # Differential wrist: motor 6 effective speed includes J4 coupling
+ if n >= 6:
+ dq4_steps = abs(target_q[3] - current_q[3]) * _J4_STEP_FACTOR
+ dq6_steps = abs(target_q[5] - current_q[5]) * _J6_STEP_FACTOR
+ motor6_ratio = (dq4_steps + dq6_steps) * _MOTOR6_MAX_STEP_INV
+ if motor6_ratio > max_ratio:
+ max_ratio = motor6_ratio
+ return max_ratio
+
+
+def _streaming_joint_step(
+ cmd: "ServoJCommand | ServoJPoseCommand", state: ControllerState
+) -> ExecutionStatusCode:
+ """Shared execute_step for ServoJ and ServoJPose commands."""
+ se = state.streaming_executor
+
+ if not cmd._initialized or not se.active:
+ steps_to_rad(state.Position_in, cmd._q_rad_buf)
+ se.sync_position(cmd._q_rad_buf)
+ se.set_limits(cmd.p.speed, cmd.p.accel)
+ cmd._initialized = True
+
+ se.set_position_target(cmd._target_rad)
+ pos_rad, _vel, finished = se.tick()
+
+ cmd._pos_rad_buf[:] = pos_rad
+ rad_to_steps(cmd._pos_rad_buf, cmd._steps_buf)
+ cmd.set_move_position(state, cmd._steps_buf)
+
+ if finished:
+ se.active = False
+ cmd.finish()
+ return ExecutionStatusCode.COMPLETED
+
+ return ExecutionStatusCode.EXECUTING
+
+
+@register_command(CmdType.SERVOJ)
+class ServoJCommand(MotionCommand[ServoJCmd]):
+ """Streaming joint position target.
+
+ Uses StreamingExecutor with set_position_target() for smooth Ruckig-
+ interpolated motion to the target joint angles.
+ """
+
+ PARAMS_TYPE = ServoJCmd
+ streamable = True
+
+ __slots__ = (
+ "_initialized",
+ "_target_rad",
+ "_pos_rad_buf",
+ )
+
+ def __init__(self, p: ServoJCmd):
+ super().__init__(p)
+ self._initialized = False
+ self._target_rad = [0.0] * 6
+ self._pos_rad_buf = np.zeros(6, dtype=np.float64)
+
+ def do_setup(self, state: ControllerState) -> None:
+ # Convert target from degrees to radians into pre-allocated list
+ for i in range(6):
+ self._target_rad[i] = math.radians(self.p.angles[i])
+
+ def execute_step(self, state: ControllerState) -> ExecutionStatusCode:
+ return _streaming_joint_step(self, state)
+
+
+@register_command(CmdType.SERVOJ_POSE)
+class ServoJPoseCommand(MotionCommand[ServoJPoseCmd]):
+ """Streaming joint position target via Cartesian pose.
+
+ Solves IK for the target pose, then uses StreamingExecutor like ServoJ.
+ """
+
+ PARAMS_TYPE = ServoJPoseCmd
+ streamable = True
+
+ __slots__ = (
+ "_initialized",
+ "_target_rad",
+ "_pos_rad_buf",
+ "_target_se3",
+ )
+
+ def __init__(self, p: ServoJPoseCmd):
+ super().__init__(p)
+ self._initialized = False
+ self._target_rad = [0.0] * 6
+ self._pos_rad_buf = np.zeros(6, dtype=np.float64)
+ self._target_se3 = np.zeros((4, 4), dtype=np.float64)
+
+ def do_setup(self, state: ControllerState) -> None:
+ pose = self.p.pose
+
+ # Build target SE3 from [x_mm, y_mm, z_mm, rx_deg, ry_deg, rz_deg]
+ se3_from_rpy(
+ pose[0] / 1000.0,
+ pose[1] / 1000.0,
+ pose[2] / 1000.0,
+ math.radians(pose[3]),
+ math.radians(pose[4]),
+ math.radians(pose[5]),
+ self._target_se3,
+ )
+
+ # Solve IK using current joint angles as seed
+ steps_to_rad(state.Position_in, self._q_rad_buf)
+ ik_result = solve_ik(PAROL6_ROBOT.robot, self._target_se3, self._q_rad_buf)
+ if not ik_result.success or ik_result.q is None:
+ raise IKError(
+ make_error(
+ ErrorCode.IK_TARGET_UNREACHABLE,
+ detail=f"SERVOJ_POSE: IK failed for pose {[round(v, 1) for v in pose]}",
+ )
+ )
+
+ for i in range(6):
+ self._target_rad[i] = float(ik_result.q[i])
+
+ def execute_step(self, state: ControllerState) -> ExecutionStatusCode:
+ return _streaming_joint_step(self, state)
+
+
+@register_command(CmdType.SERVOL)
+class ServoLCommand(MotionCommand[ServoLCmd]):
+ """Streaming Cartesian position target.
+
+ CSE drives the Cartesian path (with its own internal Ruckig for smooth
+ TCP motion). IK converts each smoothed pose to joint space. If any
+ joint's per-tick delta exceeds its hardware velocity limit, all deltas
+ are scaled proportionally.
+ """
+
+ PARAMS_TYPE = ServoLCmd
+ streamable = True
+
+ __slots__ = (
+ "_initialized",
+ "_ik_stopping",
+ "_target_se3",
+ "_pos_rad_buf",
+ "_q_commanded",
+ "_q_ik_seed",
+ "_dq_buf",
+ )
+
+ def __init__(self, p: ServoLCmd):
+ super().__init__(p)
+ self._initialized = False
+ self._ik_stopping = False
+ self._target_se3 = np.zeros((4, 4), dtype=np.float64)
+ self._pos_rad_buf = np.zeros(6, dtype=np.float64)
+ self._q_commanded = np.zeros(6, dtype=np.float64)
+ self._q_ik_seed = np.zeros(6, dtype=np.float64)
+ self._dq_buf = np.zeros(6, dtype=np.float64)
+
+ def do_setup(self, state: ControllerState) -> None:
+ pose = self.p.pose
+
+ # Build target SE3 from [x_mm, y_mm, z_mm, rx_deg, ry_deg, rz_deg]
+ se3_from_rpy(
+ pose[0] / 1000.0,
+ pose[1] / 1000.0,
+ pose[2] / 1000.0,
+ math.radians(pose[3]),
+ math.radians(pose[4]),
+ math.radians(pose[5]),
+ self._target_se3,
+ )
+
+ def execute_step(self, state: ControllerState) -> ExecutionStatusCode:
+ cse = state.cartesian_streaming_executor
+
+ if not self._initialized or not cse.active:
+ steps_to_rad(state.Position_in, self._q_rad_buf)
+ cse.sync_pose(get_fkine_se3(state))
+ cse.set_limits(self.p.speed, self.p.accel)
+ self._q_commanded[:] = self._q_rad_buf
+ self._q_ik_seed[:] = self._q_rad_buf
+ self._initialized = True
+
+ # CSE drives Cartesian path
+ cse.set_pose_target(self._target_se3)
+ smoothed_pose, vel, finished = cse.tick()
+
+ # Solve IK seeded from previous IK result (branch continuity)
+ ik_result = solve_ik(
+ PAROL6_ROBOT.robot,
+ smoothed_pose,
+ self._q_ik_seed,
+ )
+ if ik_result.success and ik_result.q is not None:
+ if self._ik_stopping:
+ logger.info("[SERVOL] IK recovered — resuming")
+ steps_to_rad(state.Position_in, self._q_rad_buf)
+ cse.sync_pose(get_fkine_se3(state))
+ self._q_commanded[:] = self._q_rad_buf
+ self._q_ik_seed[:] = self._q_rad_buf
+ self._ik_stopping = False
+ # Let next tick handle normal tracking
+ else:
+ self._q_ik_seed[:] = ik_result.q
+
+ # Compute per-joint delta from commanded position
+ dq = self._dq_buf
+ for i in range(6):
+ dq[i] = float(ik_result.q[i]) - self._q_commanded[i]
+
+ # Velocity ratio: worst-case joint vs its per-tick hard limit
+ ratio = _max_vel_ratio_jit(ik_result.q, self._q_commanded)
+
+ if ratio > 1.0:
+ for i in range(6):
+ self._q_commanded[i] += dq[i] / ratio
+ cse.set_limits(self.p.speed / ratio, self.p.accel)
+ else:
+ self._q_commanded[:] = ik_result.q
+ cse.set_limits(self.p.speed, self.p.accel)
+ else:
+ # IK failed — graceful deceleration
+ if not self._ik_stopping:
+ _ik_warn(
+ logger,
+ "[SERVOL] IK failed — decelerating: pos=%s",
+ smoothed_pose[:3, 3],
+ )
+ cse.stop()
+ self._ik_stopping = True
+
+ self._pos_rad_buf[:] = self._q_commanded
+ rad_to_steps(self._pos_rad_buf, self._steps_buf)
+ self.set_move_position(state, self._steps_buf)
+
+ if finished and not self._ik_stopping:
+ self.finish()
+ cse.active = False
+ return ExecutionStatusCode.COMPLETED
+
+ return ExecutionStatusCode.EXECUTING
diff --git a/parol6/commands/smooth_commands.py b/parol6/commands/smooth_commands.py
deleted file mode 100644
index 394a754..0000000
--- a/parol6/commands/smooth_commands.py
+++ /dev/null
@@ -1,2155 +0,0 @@
-"""
-Smooth Motion Commands
-Contains all smooth trajectory generation commands for advanced robot movements
-"""
-
-import json
-import logging
-from collections.abc import Sequence
-from typing import TYPE_CHECKING, Any, Optional, cast
-
-import numpy as np
-from numpy.typing import NDArray
-from spatialmath import SE3
-
-import parol6.PAROL6_ROBOT as PAROL6_ROBOT
-from parol6.commands.base import ExecutionStatus, ExecutionStatusCode, MotionCommand
-from parol6.commands.cartesian_commands import MovePoseCommand
-from parol6.config import ENTRY_MM_TOL_MM, INTERVAL_S, NEAR_MM_TOL_MM
-from parol6.protocol.wire import CommandCode
-from parol6.server.command_registry import register_command
-from parol6.server.state import get_fkine_se3
-from parol6.smooth_motion import (
- CircularMotion,
- HelixMotion,
- SplineMotion,
- WaypointTrajectoryPlanner,
-)
-from parol6.smooth_motion.advanced import AdvancedMotionBlender
-from parol6.utils.errors import IKError
-from parol6.utils.frames import (
- pose6_trf_to_wrf,
- transform_command_params_to_wrf,
- transform_start_pose_if_needed,
-)
-from parol6.utils.ik import solve_ik
-
-if TYPE_CHECKING:
- from parol6.server.state import ControllerState
-
-logger = logging.getLogger(__name__)
-
-
-class BaseSmoothMotionCommand(MotionCommand):
- """
- Base class for all smooth motion commands with proper error tracking.
- """
-
- __slots__ = (
- "description",
- "trajectory",
- "trajectory_command",
- "transition_command",
- "specified_start_pose",
- "transition_complete",
- "trajectory_prepared",
- "trajectory_generated",
- )
-
- def __init__(self, description="smooth motion"):
- super().__init__()
- self.description = description
- self.trajectory: np.ndarray | None = None
- self.trajectory_command: SmoothTrajectoryCommand | None = None
- self.transition_command: MovePoseCommand | None = None
- self.specified_start_pose: NDArray[np.floating] | None = None
- self.transition_complete = False
- self.trajectory_prepared = False
- self.trajectory_generated = False
-
- # Parsing utility methods
- @staticmethod
- def parse_start_pose(start_str: str) -> NDArray[np.floating] | None:
- """
- Parse start pose from string.
-
- Args:
- start_str: Either 'CURRENT', 'NONE', or comma-separated pose values
-
- Returns:
- None for CURRENT/NONE, or numpy array of floats for specified pose
- """
- if start_str.upper() in ("CURRENT", "NONE"):
- return None
- else:
- try:
- return np.asarray(
- list(map(float, start_str.split(","))), dtype=np.float64
- )
- except Exception:
- raise ValueError(f"Invalid start pose format: {start_str}")
-
- @staticmethod
- def parse_timing(
- timing_type: str, timing_value: float, path_length: float
- ) -> float:
- """
- Convert timing specification to duration.
-
- Args:
- timing_type: 'DURATION' or 'SPEED'
- timing_value: Duration in seconds or speed in mm/s
- path_length: Estimated path length in mm
-
- Returns:
- Duration in seconds
- """
- if timing_type.upper() == "DURATION":
- return timing_value
- elif timing_type.upper() == "SPEED":
- if timing_value <= 0:
- raise ValueError(f"Speed must be positive, got {timing_value}")
- return path_length / timing_value
- else:
- raise ValueError(f"Unknown timing type: {timing_type}")
-
- @staticmethod
- def calculate_path_length(command_type: str, params: dict) -> float:
- """
- Estimate trajectory path length for timing calculations.
-
- Args:
- command_type: Type of smooth motion command
- params: Command parameters
-
- Returns:
- Estimated path length in mm
- """
- if command_type == "SMOOTH_CIRCLE":
- # Full circle circumference
- return 2 * np.pi * params.get("radius", 100)
- elif command_type in ["SMOOTH_ARC_CENTER", "SMOOTH_ARC_PARAM"]:
- # Estimate arc length (approximate)
- radius = params.get("radius", 100)
- angle = params.get("arc_angle", 90) # degrees
- return radius * np.radians(angle)
- elif command_type == "SMOOTH_HELIX":
- # Helix path length
- radius = params.get("radius", 100)
- height = params.get("height", 100)
- turns = (
- height / params.get("pitch", 10) if params.get("pitch", 10) > 0 else 1
- )
- return np.sqrt((2 * np.pi * radius * turns) ** 2 + height**2)
- else:
- # Default estimate
- return 300 # mm
-
- @staticmethod
- def parse_trajectory_type(
- parts: list[str], index: int
- ) -> tuple[str, float | None, int]:
- """
- Parse trajectory type and optional jerk limit.
-
- Args:
- parts: Command parts
- index: Current index in parts
-
- Returns:
- Tuple of (trajectory_type, jerk_limit, next_index)
- """
- if index >= len(parts):
- return "cubic", None, index
-
- traj_type = parts[index].lower()
- index += 1
-
- if traj_type not in ["cubic", "quintic", "s_curve"]:
- # Not a valid trajectory type, use default
- return "cubic", None, index - 1
-
- # Check for jerk limit if s_curve
- jerk_limit = None
- if traj_type == "s_curve" and index < len(parts):
- try:
- jerk_limit = float(parts[index])
- index += 1
- except (ValueError, IndexError):
- jerk_limit = 1000 # Default jerk limit
-
- return traj_type, jerk_limit, index
-
- def create_transition_command(
- self, current_pose: np.ndarray, target_pose: NDArray[np.floating]
- ) -> Optional["MovePoseCommand"]:
- """Create a MovePose command for smooth transition to start position."""
- pos_error = np.linalg.norm(target_pose[:3] - current_pose[:3])
-
- if pos_error < NEAR_MM_TOL_MM: # proximity threshold
- self.log_info(" -> Already near start position (error: %.1fmm)", pos_error)
- return None
-
- self.log_info(
- " -> Creating smooth transition to start (%.1fmm away)", pos_error
- )
-
- # Calculate transition speed based on distance
- if pos_error < 10:
- transition_speed = 20.0 # mm/s for short distances
- elif pos_error < 30:
- transition_speed = 30.0 # mm/s for medium distances
- else:
- transition_speed = 40.0 # mm/s for long distances
-
- transition_duration = max(pos_error / transition_speed, 0.5) # Minimum 0.5s
-
- # MovePoseCommand expects a list, so convert array to list here
- transition_cmd: MovePoseCommand = MovePoseCommand(
- target_pose.tolist(), transition_duration
- )
-
- return transition_cmd
-
- def get_current_pose_from_position(self, position_in):
- """Convert current position to pose [x,y,z,rx,ry,rz]"""
- current_pose_se3 = get_fkine_se3()
-
- current_xyz = current_pose_se3.t * 1000 # Convert to mm
- current_rpy = current_pose_se3.rpy(unit="deg", order="xyz")
- return np.concatenate([current_xyz, current_rpy])
-
- def do_setup(self, state: "ControllerState") -> None:
- """Minimal preparation - just check if we need a transition."""
- self.log_debug(" -> Preparing %s...", self.description)
-
- # If there's a specified start pose, prepare transition
- if self.specified_start_pose is not None:
- actual_current_pose = self.get_current_pose_from_position(state.Position_in)
- self.transition_command = self.create_transition_command(
- actual_current_pose, self.specified_start_pose
- )
-
- if self.transition_command:
- self.transition_command.setup(state)
- if not self.transition_command.is_valid:
- self.log_error(" -> ERROR: Cannot reach specified start position")
- self.fail("Cannot reach specified start position")
- return
- else:
- self.transition_command = None
-
- # DON'T generate trajectory yet - wait until execution
- self.trajectory_generated = False
- self.trajectory_prepared = False
- self.log_debug(
- " -> %s preparation complete (trajectory will be generated at execution)",
- self.description,
- )
-
- def generate_main_trajectory(self, effective_start_pose):
- """Override this in subclasses to generate the specific motion trajectory."""
- raise NotImplementedError("Subclasses must implement generate_main_trajectory")
-
- def execute_step(self, state: "ControllerState") -> ExecutionStatus:
- """Execute transition first if needed, then generate and execute trajectory."""
- if self.is_finished or not self.is_valid:
- return (
- ExecutionStatus.completed("Already finished")
- if self.is_finished
- else ExecutionStatus.failed("Invalid smooth command")
- )
-
- # Execute transition first if needed
- if self.transition_command and not self.transition_complete:
- status = self.transition_command.execute_step(state)
- if status.code == ExecutionStatusCode.COMPLETED:
- self.log_info(" -> Transition complete")
- self.transition_complete = True
- # Continue to main trajectory generation next tick
- return ExecutionStatus.executing("Transition completed")
- elif status.code == ExecutionStatusCode.FAILED:
- self.fail(
- getattr(
- self.transition_command, "error_message", "Transition failed"
- )
- )
- self.finish()
- self.stop_and_idle(state)
- return ExecutionStatus.failed("Transition failed")
- else:
- return ExecutionStatus.executing("Transitioning")
-
- # Generate trajectory on first execution step (not during preparation!)
- if not self.trajectory_generated:
- # Get ACTUAL current position NOW
- actual_current_pose = self.get_current_pose_from_position(state.Position_in)
- self.log_info(
- " -> Generating %s from ACTUAL position: %s",
- self.description,
- [round(p, 1) for p in actual_current_pose[:3]],
- )
-
- # Generate trajectory from where we ACTUALLY are
- self.trajectory = self.generate_main_trajectory(actual_current_pose)
- if self.trajectory is None:
- raise RuntimeError("Smooth trajectory generator returned None")
- self.trajectory_command = SmoothTrajectoryCommand(
- self.trajectory, self.description
- )
-
- # Quick validation of first point only
- current_q = np.asarray(
- PAROL6_ROBOT.ops.steps_to_rad(state.Position_in), dtype=float
- )
- first_pose = self.trajectory[0]
- target_se3 = SE3(
- first_pose[0] / 1000, first_pose[1] / 1000, first_pose[2] / 1000
- ) * SE3.RPY(
- [float(first_pose[3]), float(first_pose[4]), float(first_pose[5])],
- unit="deg",
- order="xyz",
- )
-
- ik_result = solve_ik(
- cast(Any, PAROL6_ROBOT.robot), target_se3, current_q, jogging=False
- )
-
- if not ik_result.success:
- self.log_error(" -> ERROR: Cannot reach first trajectory point")
- self.finish()
- self.fail("Cannot reach trajectory start")
- self.stop_and_idle(state)
- return ExecutionStatus.failed(
- "Cannot reach trajectory start",
- error=IKError("Cannot reach trajectory start"),
- )
-
- self.trajectory_generated = True
- self.trajectory_prepared = True
-
- # Verify first point is close to current
- distance = np.linalg.norm(
- first_pose[:3] - np.array(actual_current_pose[:3])
- )
- if distance > 5.0:
- self.log_warning(
- " -> WARNING: First trajectory point %.1fmm from current!",
- distance,
- )
-
- # Execute main trajectory
- if self.trajectory_command and self.trajectory_prepared:
- status = self.trajectory_command.execute_step(state)
-
- # Check for errors in trajectory execution
- if (
- hasattr(self.trajectory_command, "error_state")
- and self.trajectory_command.error_state
- ):
- self.fail(self.trajectory_command.error_message)
-
- if status.code == ExecutionStatusCode.COMPLETED:
- self.finish()
- return ExecutionStatus.completed(f"Smooth {self.description} complete")
- elif status.code == ExecutionStatusCode.FAILED:
- self.finish()
- return status
- else:
- return ExecutionStatus.executing(f"Smooth {self.description}")
-
- self.finish()
- return ExecutionStatus.completed(f"Smooth {self.description} complete")
-
- def _generate_radial_entry(
- self,
- start_pose: Sequence[float],
- center: Sequence[float],
- normal: Sequence[float],
- radius: float,
- duration: float,
- sample_rate: float = 100.0,
- ) -> np.ndarray:
- """
- Generate a simple radial entry trajectory from the current pose to the circle/helix perimeter.
- Produces a straight-line interpolation in Cartesian space.
- """
- start_pose_arr = np.array(start_pose, dtype=float)
- center_arr = np.array(center, dtype=float)
- normal_arr = np.array(normal, dtype=float)
- if np.linalg.norm(normal_arr) > 0:
- normal_arr = normal_arr / np.linalg.norm(normal_arr)
-
- start_pos = start_pose_arr[:3]
- to_start = start_pos - center_arr
- # Project onto plane
- to_plane = to_start - np.dot(to_start, normal_arr) * normal_arr
- dist = float(np.linalg.norm(to_plane))
-
- if dist < 1e-6:
- # Choose arbitrary direction perpendicular to normal
- axis = np.array([1.0, 0.0, 0.0])
- if abs(np.dot(axis, normal_arr)) > 0.9:
- axis = np.array([0.0, 1.0, 0.0])
- direction = np.cross(normal_arr, axis)
- direction = direction / np.linalg.norm(direction)
- else:
- direction = to_plane / dist
-
- target_pos = center_arr + direction * float(radius)
- # Keep orientation constant
- target_orient = start_pose_arr[3:]
-
- # Number of samples
- n = max(2, int(max(0.05, float(duration)) * float(sample_rate)))
- ts = np.linspace(0.0, 1.0, n)
- traj = []
- for s in ts:
- pos = (1.0 - s) * start_pos + s * target_pos
- traj.append(np.concatenate([pos, target_orient]))
- return np.array(traj)
-
-
-class SmoothTrajectoryCommand:
- """Command class for executing pre-generated smooth trajectories."""
-
- def __init__(self, trajectory, description="smooth motion"):
- self.trajectory = np.array(trajectory)
- self.description = description
- self.trajectory_index = 0
- self.is_valid = True
- self.is_finished = False
- self.first_step = True
- self.error_state = False
- self.error_message = ""
-
- logger.debug(f"Initializing smooth {description} with {len(trajectory)} points")
-
- def setup(self, state: "ControllerState") -> None:
- """Skip validation - trajectory is already generated from correct position"""
- self.is_valid = True
- return
-
- def execute_step(self, state: "ControllerState") -> ExecutionStatus:
- """Execute one step of the smooth trajectory"""
- if self.is_finished or not self.is_valid:
- return (
- ExecutionStatus.completed("Already finished")
- if self.is_finished
- else ExecutionStatus.failed("Invalid smooth trajectory")
- )
-
- if self.trajectory_index >= len(self.trajectory):
- logger.info(f"Smooth {self.description} finished.")
- self.is_finished = True
- state.Command_out = CommandCode.IDLE
- state.Speed_out.fill(0)
- return ExecutionStatus.completed(f"Smooth {self.description} complete")
-
- # Get target pose for this step
- target_pose = self.trajectory[self.trajectory_index]
-
- # Convert to SE3
- target_se3 = SE3(
- target_pose[0] / 1000, target_pose[1] / 1000, target_pose[2] / 1000
- ) * SE3.RPY(
- [float(target_pose[3]), float(target_pose[4]), float(target_pose[5])],
- unit="deg",
- order="xyz",
- )
-
- # Get current joint configuration
- current_q = np.asarray(
- PAROL6_ROBOT.ops.steps_to_rad(state.Position_in), dtype=float
- )
-
- # Solve IK
- ik_result = solve_ik(
- cast(Any, PAROL6_ROBOT.robot), target_se3, current_q, jogging=False
- )
-
- if not ik_result.success:
- logger.error(f" -> IK failed at trajectory point {self.trajectory_index}")
- self.is_finished = True
- self.error_state = True
- self.error_message = (
- f"IK failed at point {self.trajectory_index}/{len(self.trajectory)}"
- )
- state.Command_out = CommandCode.IDLE
- state.Speed_out.fill(0)
- return ExecutionStatus.failed(
- self.error_message, error=IKError(self.error_message)
- )
-
- # Convert to steps
- target_steps = PAROL6_ROBOT.ops.rad_to_steps(ik_result.q)
-
- # ADD VELOCITY LIMITING - This prevents violent movements
- if self.trajectory_index > 0:
- # Vectorized per-tick clamping with 1.2x safety margin
- target_steps = PAROL6_ROBOT.ops.clamp_steps_delta(
- state.Position_in,
- np.asarray(target_steps, dtype=np.int32),
- INTERVAL_S,
- 1.2,
- )
-
- # Send position command (inline to avoid instance-method binding)
- np.copyto(
- state.Position_out, np.asarray(target_steps, dtype=np.int32), casting="no"
- )
- state.Speed_out.fill(0)
- state.Command_out = CommandCode.MOVE
-
- # Advance to next point
- self.trajectory_index += 1
-
- return ExecutionStatus.executing(f"Smooth {self.description}")
-
-
-@register_command("SMOOTH_CIRCLE")
-class SmoothCircleCommand(BaseSmoothMotionCommand):
- """Execute smooth circular motion."""
-
- __slots__ = (
- "center",
- "radius",
- "plane",
- "duration",
- "clockwise",
- "frame",
- "trajectory_type",
- "jerk_limit",
- "center_mode",
- "entry_mode",
- "normal_vector",
- "current_position_in",
- )
-
- def __init__(self) -> None:
- super().__init__(description="smooth circle")
- self.center: NDArray[np.floating] | None = None
- self.radius: float = 100.0
- self.plane: str = "XY"
- self.duration: float = 5.0
- self.clockwise: bool = False
- self.frame: str = "WRF"
- self.trajectory_type: str = "cubic"
- self.jerk_limit: float | None = None
- self.center_mode: str = "ABSOLUTE"
- self.entry_mode: str = "NONE"
- self.normal_vector: NDArray | None = None
- self.current_position_in: NDArray[np.int32] | None = None
-
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """
- Parse SMOOTH_CIRCLE command.
- Format: SMOOTH_CIRCLE|center_x,y,z|radius|plane|frame|start_pose|timing_type|timing_value|[trajectory_type]|[jerk_limit]|[clockwise]
- """
- if parts[0].upper() != "SMOOTH_CIRCLE":
- return False, None
-
- if len(parts) < 8:
- return False, "SMOOTH_CIRCLE requires at least 8 parameters"
-
- try:
- # Parse center
- center_list = list(map(float, parts[1].split(",")))
- if len(center_list) != 3:
- return False, "Center must have 3 coordinates"
- self.center = np.asarray(center_list, dtype=np.float64)
-
- # Parse radius
- self.radius = float(parts[2])
-
- # Parse plane
- self.plane = parts[3].upper()
- if self.plane not in ["XY", "XZ", "YZ"]:
- return False, f"Invalid plane: {self.plane}"
-
- # Parse frame
- self.frame = parts[4].upper()
- if self.frame not in ["WRF", "TRF"]:
- return False, f"Invalid frame: {self.frame}"
-
- # Parse start pose
- self.specified_start_pose = self.parse_start_pose(parts[5])
-
- # Parse timing
- timing_type = parts[6].upper()
- timing_value = float(parts[7])
- path_length = 2 * np.pi * self.radius
- self.duration = self.parse_timing(timing_type, timing_value, path_length)
-
- # Parse optional trajectory type and jerk limit
- idx = 8
- if idx < len(parts):
- self.trajectory_type, self.jerk_limit, idx = self.parse_trajectory_type(
- parts, idx
- )
-
- # Parse optional clockwise flag
- if idx < len(parts) and parts[idx].upper() in ["CW", "CLOCKWISE", "TRUE"]:
- self.clockwise = True
- idx += 1
-
- # Parse optional center mode
- if idx < len(parts):
- self.center_mode = parts[idx].upper()
- if self.center_mode not in ["ABSOLUTE", "TOOL", "RELATIVE"]:
- self.center_mode = "ABSOLUTE"
- idx += 1
-
- # Parse optional entry mode
- if idx < len(parts):
- self.entry_mode = parts[idx].upper()
- if self.entry_mode not in ["AUTO", "TANGENT", "DIRECT", "NONE"]:
- self.entry_mode = "NONE"
-
- # Initialize description
- self.description = (
- f"circle (r={self.radius}mm, {self.frame}, {self.trajectory_type})"
- )
-
- return True, None
-
- except (ValueError, IndexError) as e:
- return False, f"Invalid SMOOTH_CIRCLE parameters: {e}"
-
- def do_setup(self, state: "ControllerState") -> None:
- """Transform parameters if in TRF, then prepare normally."""
-
- # Store current position for potential use in generate_main_trajectory
- self.current_position_in = np.asarray(state.Position_in, dtype=np.int32)
-
- if self.frame == "TRF":
- # Transform parameters to WRF
- params = {"center": self.center, "plane": self.plane}
- transformed = transform_command_params_to_wrf(
- "SMOOTH_CIRCLE", params, "TRF", state.Position_in
- )
-
- # Update with transformed values
- self.center = transformed["center"]
- self.normal_vector = transformed.get("normal_vector")
-
- if self.center is not None:
- logger.info(
- f" -> TRF Circle: center {self.center[:3].tolist()} (WRF), normal {self.normal_vector}"
- )
-
- # Transform start_pose if specified - convert array to list for the API
- if self.specified_start_pose is not None:
- result = transform_start_pose_if_needed(
- self.specified_start_pose.tolist(), self.frame, state.Position_in
- )
- if result is not None:
- self.specified_start_pose = np.asarray(result, dtype=np.float64)
-
- # Now do normal preparation with transformed parameters
- return super().do_setup(state)
-
- def generate_main_trajectory(self, effective_start_pose):
- """Generate circle starting from the actual start position."""
- motion_gen = CircularMotion()
-
- # Use transformed normal for TRF, or standard for WRF
- if self.normal_vector is not None:
- # TRF - use the transformed normal vector
- normal = np.array(self.normal_vector)
- logger.info(f" Using transformed normal: {normal.round(3)}")
- else:
- # WRF - use standard plane definition
- plane_normals = {"XY": [0, 0, 1], "XZ": [0, 1, 0], "YZ": [1, 0, 0]}
- normal = np.array(plane_normals.get(self.plane, [0, 0, 1]))
- logger.info(f" Using WRF plane {self.plane} with normal: {normal}")
-
- logger.info(
- f" Generating circle from position: {[round(p, 1) for p in effective_start_pose[:3]]}"
- )
- if self.center is not None:
- logger.info(f" Circle center: {[round(c, 1) for c in self.center]}")
-
- # Handle center_mode
- if self.center is not None:
- actual_center = self.center.copy()
- else:
- actual_center = np.array([0.0, 0.0, 0.0])
- if self.center_mode == "TOOL":
- # Center at current tool position
- actual_center = np.array(effective_start_pose[:3])
- logger.info(
- f" Center mode: TOOL - centering at current position {actual_center}"
- )
- elif self.center_mode == "RELATIVE":
- # Center relative to current position
- center_np = (
- np.asarray(self.center, dtype=float)
- if self.center is not None
- else np.zeros(3)
- )
- actual_center = np.array(
- [effective_start_pose[i] + center_np[i] for i in range(3)]
- )
- logger.info(
- f" Center mode: RELATIVE - center offset from current position to {actual_center}"
- )
- else:
- # ABSOLUTE mode uses provided center as-is
- actual_center = np.array(actual_center)
-
- # Check if entry trajectory might be needed
- distance_to_center = np.linalg.norm(
- np.array(effective_start_pose[:3]) - np.array(actual_center)
- )
- distance_from_perimeter = float(abs(distance_to_center - self.radius))
-
- # Automatically generate entry trajectory if needed
- entry_trajectory = None
- if distance_from_perimeter > ENTRY_MM_TOL_MM: # perimeter tolerance
- effective_entry_mode = self.entry_mode
-
- # Auto-detect need for entry if not specified
- if (
- self.entry_mode == "NONE" and distance_from_perimeter > 5.0
- ): # Auto-enable for > 5mm
- logger.warning(
- f" Robot is {distance_from_perimeter:.1f}mm from circle perimeter - auto-enabling entry trajectory"
- )
- effective_entry_mode = "AUTO"
-
- if effective_entry_mode != "NONE":
- logger.info(
- f" Generating {effective_entry_mode} entry trajectory (distance: {distance_from_perimeter:.1f}mm)"
- )
-
- # Calculate entry duration based on distance (0.5s min, 2.0s max)
- entry_duration = float(
- min(2.0, max(0.5, float(distance_from_perimeter) / 50.0))
- )
-
- # Generate entry trajectory (radial approach)
- entry_trajectory = self._generate_radial_entry(
- effective_start_pose.tolist()
- if hasattr(effective_start_pose, "tolist")
- else list(effective_start_pose),
- actual_center.tolist()
- if hasattr(actual_center, "tolist")
- else list(actual_center),
- normal.tolist() if hasattr(normal, "tolist") else list(normal),
- float(self.radius),
- entry_duration,
- )
-
- if entry_trajectory is not None and len(entry_trajectory) > 0:
- logger.info(
- f" Entry trajectory generated: {len(entry_trajectory)} points over {entry_duration:.1f}s"
- )
-
- # Generate circle with specified trajectory profile
- trajectory = motion_gen.generate_circle_with_profile(
- center=actual_center, # Use adjusted center
- radius=self.radius,
- normal=normal, # This normal now correctly represents the plane
- start_point=effective_start_pose, # Pass full pose including orientation
- duration=self.duration,
- trajectory_type=self.trajectory_type,
- jerk_limit=self.jerk_limit,
- )
-
- if self.clockwise:
- trajectory = trajectory[::-1]
-
- # Update orientations to match start pose
- for i in range(len(trajectory)):
- trajectory[i][3:] = effective_start_pose[3:]
-
- # Concatenate entry trajectory if it exists
- if entry_trajectory is not None and len(entry_trajectory) > 0:
- full_trajectory = np.concatenate([entry_trajectory, trajectory])
- return full_trajectory
- else:
- return trajectory
-
-
-@register_command("SMOOTH_ARC_CENTER")
-class SmoothArcCenterCommand(BaseSmoothMotionCommand):
- """Execute smooth arc motion defined by center point."""
-
- __slots__ = (
- "end_pose",
- "center",
- "duration",
- "clockwise",
- "frame",
- "trajectory_type",
- "jerk_limit",
- "normal_vector",
- )
-
- def __init__(self) -> None:
- super().__init__(description="smooth arc (center)")
- self.end_pose: list[float] | None = None
- self.center: list[float] | None = None
- self.duration: float = 5.0
- self.clockwise: bool = False
- self.frame: str = "WRF"
- self.trajectory_type: str = "cubic"
- self.jerk_limit: float | None = None
- self.normal_vector: NDArray | None = None
-
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """
- Parse SMOOTH_ARC_CENTER command.
- Format: SMOOTH_ARC_CENTER|end_x,y,z,rx,ry,rz|center_x,y,z|frame|start_pose|timing_type|timing_value|[trajectory_type]|[jerk_limit]|[clockwise]
- """
- if parts[0].upper() != "SMOOTH_ARC_CENTER":
- return False, None
-
- if len(parts) < 7:
- return False, "SMOOTH_ARC_CENTER requires at least 7 parameters"
-
- try:
- # Parse end pose
- self.end_pose = list(map(float, parts[1].split(",")))
- if len(self.end_pose) != 6:
- return False, "End pose must have 6 values (x,y,z,rx,ry,rz)"
-
- # Parse center
- self.center = list(map(float, parts[2].split(",")))
- if len(self.center) != 3:
- return False, "Center must have 3 coordinates"
-
- # Parse frame
- self.frame = parts[3].upper()
- if self.frame not in ["WRF", "TRF"]:
- return False, f"Invalid frame: {self.frame}"
-
- # Parse start pose
- self.specified_start_pose = self.parse_start_pose(parts[4])
-
- # Parse timing
- timing_type = parts[5].upper()
- timing_value = float(parts[6])
- # Estimate arc length
- path_length = 300 # Default estimate, could be improved
- self.duration = self.parse_timing(timing_type, timing_value, path_length)
-
- # Parse optional trajectory type and jerk limit
- idx = 7
- if idx < len(parts):
- self.trajectory_type, self.jerk_limit, idx = self.parse_trajectory_type(
- parts, idx
- )
-
- # Parse optional clockwise flag
- if idx < len(parts) and parts[idx].upper() in ["CW", "CLOCKWISE", "TRUE"]:
- self.clockwise = True
-
- # Initialize description
- self.description = (
- f"arc (center-based, {self.frame}, {self.trajectory_type})"
- )
-
- return True, None
-
- except (ValueError, IndexError) as e:
- return False, f"Invalid SMOOTH_ARC_CENTER parameters: {e}"
-
- def do_setup(self, state: "ControllerState") -> None:
- """Transform parameters if in TRF."""
-
- if self.frame == "TRF":
- params = {"end_pose": self.end_pose, "center": self.center}
- transformed = transform_command_params_to_wrf(
- "SMOOTH_ARC_CENTER", params, "TRF", state.Position_in
- )
- self.end_pose = transformed["end_pose"]
- self.center = transformed["center"]
- self.normal_vector = transformed.get("normal_vector")
-
- # Transform start_pose if specified
- _sp = transform_start_pose_if_needed(
- self.specified_start_pose.tolist()
- if self.specified_start_pose is not None
- else None,
- self.frame,
- state.Position_in,
- )
- self.specified_start_pose = (
- np.asarray(_sp, dtype=np.float64) if _sp is not None else None
- )
-
- return super().do_setup(state)
-
- def generate_main_trajectory(self, effective_start_pose):
- """Generate arc from actual start to end with direct velocity profile."""
- motion_gen = CircularMotion()
-
- # Ensure required parameters are present for typing
- assert self.end_pose is not None
- assert self.center is not None
-
- # Use new direct profile generation method
- trajectory = motion_gen.generate_arc_with_profile(
- effective_start_pose,
- self.end_pose,
- self.center,
- normal=self.normal_vector, # Use transformed normal if TRF
- clockwise=self.clockwise,
- duration=self.duration,
- trajectory_type=self.trajectory_type,
- jerk_limit=self.jerk_limit,
- )
-
- return trajectory
-
-
-@register_command("SMOOTH_ARC_PARAM")
-class SmoothArcParamCommand(BaseSmoothMotionCommand):
- """Execute smooth arc motion defined by radius and angle."""
-
- __slots__ = (
- "end_pose",
- "radius",
- "arc_angle",
- "duration",
- "clockwise",
- "frame",
- "trajectory_type",
- "jerk_limit",
- "normal_vector",
- "current_position_in",
- )
-
- def __init__(self) -> None:
- super().__init__(description="smooth arc (param)")
- self.end_pose: list[float] | None = None
- self.radius: float = 100.0
- self.arc_angle: float = 90.0
- self.duration: float = 5.0
- self.clockwise: bool = False
- self.frame: str = "WRF"
- self.trajectory_type: str = "cubic"
- self.jerk_limit: float | None = None
- self.normal_vector: NDArray | None = None
- self.current_position_in: NDArray[np.int32] | None = None
-
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """
- Parse SMOOTH_ARC_PARAM command.
- Format: SMOOTH_ARC_PARAM|end_x,y,z,rx,ry,rz|radius|arc_angle|frame|start_pose|timing_type|timing_value|[trajectory_type]|[jerk_limit]|[clockwise]
- """
- if parts[0].upper() != "SMOOTH_ARC_PARAM":
- return False, None
-
- if len(parts) < 8:
- return False, "SMOOTH_ARC_PARAM requires at least 8 parameters"
-
- try:
- # Parse end pose
- self.end_pose = list(map(float, parts[1].split(",")))
- if len(self.end_pose) != 6:
- return False, "End pose must have 6 values (x,y,z,rx,ry,rz)"
-
- # Parse radius and arc angle
- self.radius = float(parts[2])
- self.arc_angle = float(parts[3])
-
- # Parse frame
- self.frame = parts[4].upper()
- if self.frame not in ["WRF", "TRF"]:
- return False, f"Invalid frame: {self.frame}"
-
- # Parse start pose
- self.specified_start_pose = self.parse_start_pose(parts[5])
-
- # Parse timing
- timing_type = parts[6].upper()
- timing_value = float(parts[7])
- path_length = self.radius * np.radians(self.arc_angle)
- self.duration = self.parse_timing(timing_type, timing_value, path_length)
-
- # Parse optional trajectory type and jerk limit
- idx = 8
- if idx < len(parts):
- self.trajectory_type, self.jerk_limit, idx = self.parse_trajectory_type(
- parts, idx
- )
-
- # Parse optional clockwise flag
- if idx < len(parts) and parts[idx].upper() in ["CW", "CLOCKWISE", "TRUE"]:
- self.clockwise = True
-
- # Initialize description
- self.description = f"parametric arc (r={self.radius}mm, θ={self.arc_angle}°, {self.frame}, {self.trajectory_type})"
-
- return True, None
-
- except (ValueError, IndexError) as e:
- return False, f"Invalid SMOOTH_ARC_PARAM parameters: {e}"
-
- def do_setup(self, state: "ControllerState") -> None:
- """Transform parameters if in TRF, then prepare normally."""
-
- self.current_position_in = state.Position_in
-
- if self.frame == "TRF":
- # Transform parameters to WRF
- params = {
- "end_pose": self.end_pose,
- "plane": "XY", # Default plane for parametric arc
- }
- transformed = transform_command_params_to_wrf(
- "SMOOTH_ARC_PARAM", params, "TRF", state.Position_in
- )
-
- # Update with transformed values
- self.end_pose = transformed["end_pose"]
- self.normal_vector = transformed.get("normal_vector")
-
- if self.end_pose is not None:
- logger.info(f" -> TRF Parametric Arc: end {self.end_pose[:3]} (WRF)")
- else:
- logger.info(
- " -> TRF Parametric Arc: end pose unavailable after transform"
- )
-
- # Transform start_pose if specified
- _sp = transform_start_pose_if_needed(
- self.specified_start_pose.tolist()
- if self.specified_start_pose is not None
- else None,
- self.frame,
- state.Position_in,
- )
- self.specified_start_pose = (
- np.asarray(_sp, dtype=np.float64) if _sp is not None else None
- )
-
- return super().do_setup(state)
-
- def generate_main_trajectory(self, effective_start_pose):
- """Generate arc based on radius and angle from actual start."""
- assert self.end_pose is not None
- # Get start and end positions
- start_xyz = np.array(effective_start_pose[:3])
- end_xyz = np.array(self.end_pose[:3])
-
- # If we have a transformed normal (TRF), use it to define the arc plane
- if self.normal_vector is not None:
- normal = np.array(self.normal_vector)
-
- # Find center of arc using radius and angle
- chord_vec = end_xyz - start_xyz
- chord_length = np.linalg.norm(chord_vec)
-
- if chord_length > 2 * self.radius:
- logger.warning(
- f" -> Warning: Points too far apart ({chord_length:.1f}mm) for radius {self.radius}mm"
- )
- self.radius = float(chord_length) / 2 + 1
-
- # Calculate center position using the normal vector
- chord_mid = (start_xyz + end_xyz) / 2
-
- # Height from chord midpoint to arc center
- if chord_length > 0:
- _hval = max(0.0, float(self.radius**2 - (chord_length / 2) ** 2))
- h = float(np.sqrt(_hval))
-
- # Find perpendicular in the plane defined by normal
- chord_dir = chord_vec / chord_length
- perp_in_plane = np.cross(normal, chord_dir)
- if np.linalg.norm(perp_in_plane) > 0.001:
- perp_in_plane = perp_in_plane / np.linalg.norm(perp_in_plane)
- else:
- # Chord is parallel to normal (shouldn't happen)
- perp_in_plane = np.array([1, 0, 0])
-
- # Arc center
- if self.clockwise:
- center_3d = chord_mid - h * perp_in_plane
- else:
- center_3d = chord_mid + h * perp_in_plane
- else:
- center_3d = start_xyz
-
- else:
- # WRF - use XY plane (standard behavior)
- normal = np.array([0, 0, 1])
-
- # Calculate arc center in XY plane
- start_xy = start_xyz[:2]
- end_xy = end_xyz[:2]
-
- # Midpoint
- mid = (start_xy + end_xy) / 2
-
- # Distance between points
- d = np.linalg.norm(end_xy - start_xy)
-
- if d > 2 * self.radius:
- logger.warning(
- f" -> Warning: Points too far apart ({d:.1f}mm) for radius {self.radius}mm"
- )
- self.radius = float(d) / 2 + 1
-
- # Height of arc center from midpoint
- _hval2 = max(0.0, float(self.radius**2 - (d / 2) ** 2))
- h = float(np.sqrt(_hval2))
-
- # Perpendicular direction
- if d > 0:
- perp = np.array([-(end_xy[1] - start_xy[1]), end_xy[0] - start_xy[0]])
- perp = perp / np.linalg.norm(perp)
- else:
- perp = np.array([1, 0])
-
- # Arc center (choose based on clockwise)
- if self.clockwise:
- center_2d = mid - h * perp
- else:
- center_2d = mid + h * perp
-
- # Use average Z for center
- center_3d = [center_2d[0], center_2d[1], (start_xyz[2] + end_xyz[2]) / 2]
-
- # Generate arc trajectory with direct velocity profile
- motion_gen = CircularMotion()
-
- # Use new direct profile generation method
- trajectory = motion_gen.generate_arc_with_profile(
- effective_start_pose,
- self.end_pose,
- center_3d,
- normal=normal if self.normal_vector is not None else None,
- clockwise=self.clockwise,
- duration=self.duration,
- trajectory_type=self.trajectory_type,
- jerk_limit=self.jerk_limit,
- )
-
- return trajectory
-
-
-@register_command("SMOOTH_HELIX")
-class SmoothHelixCommand(BaseSmoothMotionCommand):
- """Execute smooth helical motion."""
-
- __slots__ = (
- "center",
- "radius",
- "pitch",
- "height",
- "duration",
- "clockwise",
- "frame",
- "trajectory_type",
- "jerk_limit",
- "helix_axis",
- "up_vector",
- )
-
- def __init__(self) -> None:
- super().__init__(description="smooth helix")
- self.center: list[float] | None = None
- self.radius: float = 100.0
- self.pitch: float = 10.0
- self.height: float = 100.0
- self.duration: float = 5.0
- self.clockwise: bool = False
- self.frame: str = "WRF"
- self.trajectory_type: str = "cubic"
- self.jerk_limit: float | None = None
- self.helix_axis: NDArray | None = None
- self.up_vector: NDArray | None = None
-
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """
- Parse SMOOTH_HELIX command.
- Format: SMOOTH_HELIX|center_x,y,z|radius|pitch|height|frame|start_pose|timing_type|timing_value|[trajectory_type]|[jerk_limit]|[clockwise]
- """
- if parts[0].upper() != "SMOOTH_HELIX":
- return False, None
-
- if len(parts) < 9:
- return False, "SMOOTH_HELIX requires at least 9 parameters"
-
- try:
- # Parse center
- self.center = list(map(float, parts[1].split(",")))
- if len(self.center) != 3:
- return False, "Center must have 3 coordinates"
-
- # Parse radius, pitch, height
- self.radius = float(parts[2])
- self.pitch = float(parts[3])
- self.height = float(parts[4])
-
- # Parse frame
- self.frame = parts[5].upper()
- if self.frame not in ["WRF", "TRF"]:
- return False, f"Invalid frame: {self.frame}"
-
- # Parse start pose
- self.specified_start_pose = self.parse_start_pose(parts[6])
-
- # Parse timing
- timing_type = parts[7].upper()
- timing_value = float(parts[8])
- turns = self.height / self.pitch if self.pitch > 0 else 1
- path_length = np.sqrt(
- (2 * np.pi * self.radius * turns) ** 2 + self.height**2
- )
- self.duration = self.parse_timing(timing_type, timing_value, path_length)
-
- # Parse optional trajectory type and jerk limit
- idx = 9
- if idx < len(parts):
- self.trajectory_type, self.jerk_limit, idx = self.parse_trajectory_type(
- parts, idx
- )
-
- # Parse optional clockwise flag
- if idx < len(parts) and parts[idx].upper() in ["CW", "CLOCKWISE", "TRUE"]:
- self.clockwise = True
-
- # Initialize description
- self.description = (
- f"helix (h={self.height}mm, {self.frame}, {self.trajectory_type})"
- )
-
- return True, None
-
- except (ValueError, IndexError) as e:
- return False, f"Invalid SMOOTH_HELIX parameters: {e}"
-
- def do_setup(self, state: "ControllerState") -> None:
- """Transform parameters if in TRF."""
-
- if self.frame == "TRF":
- params = {"center": self.center}
- transformed = transform_command_params_to_wrf(
- "SMOOTH_HELIX", params, "TRF", state.Position_in
- )
- self.center = transformed["center"]
- self.helix_axis = transformed.get("helix_axis", [0, 0, 1])
- self.up_vector = transformed.get("up_vector", [0, 1, 0])
-
- if self.specified_start_pose is not None:
- params = {"start_pose": self.specified_start_pose.tolist()}
- transformed = transform_command_params_to_wrf(
- "SMOOTH_HELIX", params, "TRF", state.Position_in
- )
- _sp = transformed.get("start_pose")
- self.specified_start_pose = (
- np.asarray(_sp, dtype=np.float64) if _sp is not None else None
- )
-
- return super().do_setup(state)
-
- def generate_main_trajectory(self, effective_start_pose):
- """Generate helix with entry trajectory if needed and proper trajectory profile."""
- helix_gen = HelixMotion()
-
- # Get helix axis (default Z for WRF, transformed for TRF)
- if self.helix_axis is not None:
- axis = self.helix_axis
- else:
- axis = np.array([0, 0, 1], dtype=float) # Default vertical
-
- # Calculate distance from start position to helix start point
- start_pos = np.array(effective_start_pose[:3])
- center_np = np.array(self.center)
-
- # Project start position onto the helix plane (perpendicular to axis)
- axis_np = np.array(axis)
- axis_np = axis_np / np.linalg.norm(axis_np)
- to_start = start_pos - center_np
- to_start_plane = to_start - np.dot(to_start, axis_np) * axis_np
- dist_to_center = np.linalg.norm(to_start_plane)
-
- # Check if entry trajectory is needed
- entry_trajectory = None
- distance_from_perimeter = abs(dist_to_center - self.radius)
-
- if (
- distance_from_perimeter > self.radius * 0.05
- ): # More than 5% off the perimeter
- logger.info(
- f" Generating helix entry trajectory (distance from perimeter: {distance_from_perimeter:.1f}mm)"
- )
-
- # Calculate entry duration based on distance (0.5s min, 2.0s max)
- entry_duration = float(
- min(2.0, max(0.5, float(distance_from_perimeter) / 50.0))
- )
-
- # Generate entry trajectory to helix start position
- CircularMotion()
-
- # Calculate the target position on the helix perimeter
- if dist_to_center > 0.001:
- direction = to_start_plane / dist_to_center
- else:
- # If exactly at center, move to any point on perimeter
- u = (
- np.array([1, 0, 0])
- if abs(axis_np[0]) < 0.9
- else np.array([0, 1, 0])
- )
- u = u - np.dot(u, axis_np) * axis_np
- direction = u / np.linalg.norm(u)
-
- target_on_perimeter = center_np + direction * self.radius
- # For helix, we want to start at the correct Z level
- target_on_perimeter[2] = start_pos[2] # Keep same Z as start
-
- # Generate smooth approach trajectory
- entry_trajectory = self._generate_radial_entry(
- effective_start_pose.tolist()
- if hasattr(effective_start_pose, "tolist")
- else list(effective_start_pose),
- center_np.tolist(),
- axis_np.tolist(),
- float(self.radius),
- entry_duration,
- )
-
- if entry_trajectory is not None and len(entry_trajectory) > 0:
- logger.info(
- f" Helix entry trajectory generated: {len(entry_trajectory)} points over {entry_duration:.1f}s"
- )
-
- # Generate main helix trajectory
- trajectory = helix_gen.generate_helix_with_profile(
- center=center_np,
- radius=self.radius,
- pitch=self.pitch,
- height=self.height,
- axis=axis_np,
- duration=self.duration,
- trajectory_type=self.trajectory_type,
- jerk_limit=self.jerk_limit,
- start_point=effective_start_pose,
- clockwise=self.clockwise,
- )
-
- # Update orientations to match effective start
- for i in range(len(trajectory)):
- trajectory[i][3:] = effective_start_pose[3:]
-
- # Concatenate entry trajectory if it exists
- if entry_trajectory is not None and len(entry_trajectory) > 0:
- full_trajectory = np.concatenate([entry_trajectory, trajectory])
- return full_trajectory
- else:
- return np.array(trajectory)
-
-
-@register_command("SMOOTH_SPLINE")
-class SmoothSplineCommand(BaseSmoothMotionCommand):
- """Execute smooth spline motion through waypoints."""
-
- __slots__ = (
- "waypoints",
- "duration",
- "frame",
- "trajectory_type",
- "jerk_limit",
- "current_position_in",
- )
-
- def __init__(self) -> None:
- super().__init__(description="smooth spline")
- self.waypoints: list[list[float]] | None = None
- self.duration: float = 5.0
- self.frame: str = "WRF"
- self.trajectory_type: str = "cubic"
- self.jerk_limit: float | None = None
- self.current_position_in: NDArray[np.int32] | None = None
-
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """
- Parse SMOOTH_SPLINE command.
- Format: SMOOTH_SPLINE|wp1_x,y,z,rx,ry,rz;wp2_x,y,z,rx,ry,rz;...|frame|start_pose|timing_type|timing_value|[trajectory_type]|[jerk_limit]
- """
- if parts[0].upper() != "SMOOTH_SPLINE":
- return False, None
-
- if len(parts) < 6:
- return False, "SMOOTH_SPLINE requires at least 6 parameters"
-
- # Support alternate wire format:
- # SMOOTH_SPLINE||||||[trajectory_type]|[jerk?]|
- if len(parts) >= 7 and parts[1].isdigit():
- try:
- num = int(parts[1])
- self.frame = parts[2].upper()
- if self.frame not in ["WRF", "TRF"]:
- return False, f"Invalid frame: {self.frame}"
- self.specified_start_pose = self.parse_start_pose(parts[3])
- timing_type = parts[4].upper()
- timing_value = float(parts[5])
- idx = 6
- if idx < len(parts) and parts[idx].lower() in [
- "cubic",
- "quintic",
- "s_curve",
- ]:
- self.trajectory_type = parts[idx].lower()
- idx += 1
- if self.trajectory_type == "s_curve" and idx < len(parts):
- self.jerk_limit = float(parts[idx])
- idx += 1
- needed = num * 6
- if len(parts) - idx < needed:
- return False, "Insufficient waypoint values"
- vals = list(map(float, parts[idx : idx + needed]))
- self.waypoints = [vals[i : i + 6] for i in range(0, needed, 6)]
- # Estimate path length
- path_length = 0.0
- for i in range(1, len(self.waypoints)):
- path_length += float(
- np.linalg.norm(
- np.array(self.waypoints[i][:3])
- - np.array(self.waypoints[i - 1][:3])
- )
- )
- self.duration = self.parse_timing(
- timing_type, timing_value, path_length
- )
- self.description = f"spline ({len(self.waypoints or [])} points, {self.frame}, {self.trajectory_type})"
- return True, None
- except Exception as e:
- return False, f"Invalid SMOOTH_SPLINE parameters: {e}"
-
- try:
- # Parse waypoints (semicolon separated)
- waypoint_strs = parts[1].split(";")
- self.waypoints = []
- for wp_str in waypoint_strs:
- wp = list(map(float, wp_str.split(",")))
- if len(wp) != 6:
- return False, "Each waypoint must have 6 values (x,y,z,rx,ry,rz)"
- self.waypoints.append(wp)
-
- if len(self.waypoints) < 2:
- return False, "SMOOTH_SPLINE requires at least 2 waypoints"
-
- # Parse frame
- self.frame = parts[2].upper()
- if self.frame not in ["WRF", "TRF"]:
- return False, f"Invalid frame: {self.frame}"
-
- # Parse start pose
- self.specified_start_pose = self.parse_start_pose(parts[3])
-
- # Parse timing
- timing_type = parts[4].upper()
- timing_value = float(parts[5])
- # Estimate path length from waypoints
- path_length = 0.0
- for i in range(1, len(self.waypoints)):
- path_length += float(
- np.linalg.norm(
- np.array(self.waypoints[i][:3])
- - np.array(self.waypoints[i - 1][:3])
- )
- )
- self.duration = self.parse_timing(
- timing_type, timing_value, float(path_length)
- )
-
- # Parse optional trajectory type and jerk limit
- idx = 6
- if idx < len(parts):
- self.trajectory_type, self.jerk_limit, idx = self.parse_trajectory_type(
- parts, idx
- )
-
- # Initialize description
- self.description = f"spline ({len(self.waypoints or [])} points, {self.frame}, {self.trajectory_type})"
-
- return True, None
-
- except (ValueError, IndexError) as e:
- return False, f"Invalid SMOOTH_SPLINE parameters: {e}"
-
- def do_setup(self, state: "ControllerState") -> None:
- """Transform parameters if in TRF, then prepare normally."""
-
- self.current_position_in = state.Position_in
-
- if self.frame == "TRF":
- # Transform waypoints to WRF
- params = {"waypoints": self.waypoints}
- transformed = transform_command_params_to_wrf(
- "SMOOTH_SPLINE", params, "TRF", state.Position_in
- )
-
- # Update with transformed values
- self.waypoints = transformed["waypoints"]
-
- logger.info(
- f" -> TRF Spline: transformed {len(self.waypoints or [])} waypoints to WRF"
- )
-
- # Transform start_pose if specified
- _sp = transform_start_pose_if_needed(
- self.specified_start_pose.tolist()
- if self.specified_start_pose is not None
- else None,
- self.frame,
- state.Position_in,
- )
- self.specified_start_pose = (
- np.asarray(_sp, dtype=np.float64) if _sp is not None else None
- )
-
- return super().do_setup(state)
-
- def generate_main_trajectory(self, effective_start_pose):
- """Generate spline starting from actual position."""
- assert self.waypoints is not None
- wps = self.waypoints
- motion_gen = SplineMotion()
-
- # Always start from the effective start pose
- first_wp_error = np.linalg.norm(
- np.array(wps[0][:3]) - np.array(effective_start_pose[:3])
- )
-
- if first_wp_error > 5.0:
- # First waypoint is far, prepend the start position
- modified_waypoints = [effective_start_pose] + wps
- logger.info(
- f" Added start position as first waypoint (distance: {first_wp_error:.1f}mm)"
- )
- else:
- # Replace first waypoint with actual start to ensure continuity
- modified_waypoints = [effective_start_pose] + wps[1:]
- logger.info(" Replaced first waypoint with actual start position")
-
- timestamps = np.linspace(0, self.duration, len(modified_waypoints))
-
- # Generate the spline trajectory based on type
- if self.trajectory_type == "quintic":
- trajectory = motion_gen.generate_quintic_spline(
- modified_waypoints, timestamps=None
- )
- elif self.trajectory_type == "s_curve":
- trajectory = motion_gen.generate_scurve_spline(
- modified_waypoints,
- duration=self.duration,
- jerk_limit=self.jerk_limit if self.jerk_limit else 1000,
- )
- else: # cubic (default)
- trajectory = motion_gen.generate_cubic_spline(
- modified_waypoints, timestamps.tolist()
- )
-
- logger.debug(f" Generated spline with {len(trajectory)} points")
-
- return trajectory
-
-
-@register_command("SMOOTH_BLEND")
-class SmoothBlendCommand(BaseSmoothMotionCommand):
- """Execute smooth blended trajectory through multiple segments."""
-
- __slots__ = (
- "segment_definitions",
- "blend_time",
- "frame",
- "trajectory_type",
- "jerk_limit",
- "current_position_in",
- )
-
- def __init__(self) -> None:
- super().__init__(description="smooth blend")
- self.segment_definitions: list[dict] | None = None
- self.blend_time: float = 0.5
- self.frame: str = "WRF"
- self.trajectory_type: str = "cubic"
- self.jerk_limit: float | None = None
- self.current_position_in: NDArray[np.int32] | None = None
-
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """
- Parse SMOOTH_BLEND command.
- Format: SMOOTH_BLEND|segments_json|blend_time|frame|start_pose|[trajectory_type]|[jerk_limit]
- """
- if parts[0].upper() != "SMOOTH_BLEND":
- return False, None
-
- if len(parts) < 5:
- return False, "SMOOTH_BLEND requires at least 5 parameters"
-
- # New wire format: SMOOTH_BLEND|num_segments|blend_time|frame|start_pose|timing|SEG1||SEG2||...
- if parts[1].isdigit():
- try:
- int(parts[1])
- self.blend_time = float(parts[2])
- self.frame = parts[3].upper()
- if self.frame not in ["WRF", "TRF"]:
- return False, f"Invalid frame: {self.frame}"
- self.specified_start_pose = self.parse_start_pose(parts[4])
- # parts[5] timing token (DEFAULT/DURATION/SPEED) not strictly needed for segments
- # Reconstruct remainder and split by '||' to obtain segments
- remainder = "|".join(parts[6:])
- raw_segments = [s for s in remainder.split("||") if s.strip() != ""]
- seg_defs = []
- for seg_str in raw_segments:
- tokens = [t for t in seg_str.split("|") if t != ""]
- if not tokens:
- continue
- seg_type = tokens[0].upper()
- if seg_type == "LINE":
- if len(tokens) < 3:
- return False, "LINE segment requires end pose and duration"
- end = list(map(float, tokens[1].split(",")))
- duration = float(tokens[2])
- seg_defs.append(
- {"type": "LINE", "end": end, "duration": duration}
- )
- elif seg_type == "CIRCLE":
- if len(tokens) < 6:
- return (
- False,
- "CIRCLE segment requires center,radius,plane,duration,clockwise",
- )
- center = list(map(float, tokens[1].split(",")))
- radius = float(tokens[2])
- plane = tokens[3].upper()
- duration = float(tokens[4])
- clockwise = tokens[5] in (
- "1",
- "TRUE",
- "True",
- "true",
- "CW",
- "CLOCKWISE",
- )
- seg_defs.append(
- {
- "type": "CIRCLE",
- "center": center,
- "radius": radius,
- "plane": plane,
- "duration": duration,
- "clockwise": clockwise,
- }
- )
- elif seg_type == "ARC":
- if len(tokens) < 5:
- return (
- False,
- "ARC segment requires end,center,duration,clockwise",
- )
- end = list(map(float, tokens[1].split(",")))
- center = list(map(float, tokens[2].split(",")))
- duration = float(tokens[3])
- clockwise = tokens[4] in (
- "1",
- "TRUE",
- "True",
- "true",
- "CW",
- "CLOCKWISE",
- )
- seg_defs.append(
- {
- "type": "ARC",
- "end": end,
- "center": center,
- "duration": duration,
- "clockwise": clockwise,
- }
- )
- elif seg_type == "SPLINE":
- if len(tokens) < 4:
- return (
- False,
- "SPLINE segment requires count,waypoints,duration",
- )
- count = int(tokens[1])
- wp_tokens = tokens[2].split(";")
- if len(wp_tokens) != count:
- return False, "SPLINE waypoint count mismatch"
- waypoints = [
- list(map(float, wp.split(","))) for wp in wp_tokens
- ]
- duration = float(tokens[3])
- seg_defs.append(
- {
- "type": "SPLINE",
- "waypoints": waypoints,
- "duration": duration,
- }
- )
- else:
- return False, f"Invalid segment type: {seg_type}"
- self.segment_definitions = seg_defs
- self.description = f"blended ({len(self.segment_definitions or [])} segments, {self.frame}, {self.trajectory_type})"
- return True, None
- except Exception as e:
- return False, f"Invalid SMOOTH_BLEND parameters: {e}"
-
- try:
- # Parse segment definitions (JSON format)
- self.segment_definitions = json.loads(parts[1])
-
- # Validate segment definitions
- if not isinstance(self.segment_definitions, list):
- return False, "Segments must be a list"
-
- for seg in self.segment_definitions:
- if "type" not in seg:
- return False, "Each segment must have a 'type' field"
- if seg["type"] not in ["LINE", "ARC", "CIRCLE", "SPLINE"]:
- return False, f"Invalid segment type: {seg['type']}"
-
- # Parse blend time
- self.blend_time = float(parts[2])
-
- # Parse frame
- self.frame = parts[3].upper()
- if self.frame not in ["WRF", "TRF"]:
- return False, f"Invalid frame: {self.frame}"
-
- # Parse start pose
- self.specified_start_pose = self.parse_start_pose(parts[4])
-
- # Parse optional trajectory type and jerk limit
- idx = 5
- if idx < len(parts):
- self.trajectory_type, self.jerk_limit, idx = self.parse_trajectory_type(
- parts, idx
- )
-
- # Initialize description
- self.description = f"blended ({len(self.segment_definitions)} segments, {self.frame}, {self.trajectory_type})"
-
- return True, None
-
- except (ValueError, IndexError, json.JSONDecodeError) as e:
- return False, f"Invalid SMOOTH_BLEND parameters: {e}"
-
- def do_setup(self, state: "ControllerState") -> None:
- """Transform parameters if in TRF, then prepare normally."""
-
- self.current_position_in = state.Position_in
-
- if self.frame == "TRF":
- # Transform all segment definitions to WRF
- params = {"segments": self.segment_definitions}
- transformed = transform_command_params_to_wrf(
- "SMOOTH_BLEND", params, "TRF", state.Position_in
- )
-
- # Update with transformed values
- self.segment_definitions = transformed["segments"]
-
- logger.info(
- f" -> TRF Blend: transformed {len(self.segment_definitions or [])} segments to WRF"
- )
-
- # Transform start_pose if specified
- _sp = transform_start_pose_if_needed(
- self.specified_start_pose.tolist()
- if self.specified_start_pose is not None
- else None,
- self.frame,
- state.Position_in,
- )
- self.specified_start_pose = (
- np.asarray(_sp, dtype=np.float64) if _sp is not None else None
- )
-
- return super().do_setup(state)
-
- def generate_main_trajectory(self, effective_start_pose):
- """Generate blended trajectory starting from actual position."""
- assert self.segment_definitions is not None
- trajectories: list[np.ndarray] = []
- motion_gen_circle = CircularMotion()
- motion_gen_spline = SplineMotion()
-
- # Always start from effective start pose
- last_end_pose = effective_start_pose
-
- for i, seg_def in enumerate(self.segment_definitions):
- seg_type = seg_def["type"]
-
- # First segment always starts from effective_start_pose
- segment_start = effective_start_pose if i == 0 else last_end_pose
-
- if seg_type == "LINE":
- end = seg_def["end"]
- duration = seg_def["duration"]
-
- # Generate line segment from actual position
- num_points = int(duration * 100)
- timestamps = np.linspace(0, duration, num_points)
-
- points: list[list[float]] = []
- for t in timestamps:
- s = t / duration if duration > 0 else 1
- # Interpolate position
- pos = [segment_start[j] * (1 - s) + end[j] * s for j in range(3)]
- # Interpolate orientation
- orient = [
- segment_start[j + 3] * (1 - s) + end[j + 3] * s
- for j in range(3)
- ]
- points.append(pos + orient)
-
- traj_arr = np.array(points, dtype=float)
- trajectories.append(traj_arr)
- last_end_pose = end
-
- elif seg_type == "ARC":
- end = seg_def["end"]
- center = seg_def["center"]
- duration = seg_def["duration"]
- clockwise = seg_def.get("clockwise", False)
-
- # Check if we have a transformed normal (from TRF)
- normal = seg_def.get("normal_vector", None)
-
- traj_arr = np.array(
- motion_gen_circle.generate_arc_3d(
- segment_start,
- end,
- center,
- normal=normal, # Use transformed normal if available
- clockwise=clockwise,
- duration=duration,
- )
- )
- trajectories.append(traj_arr)
- last_end_pose = end
-
- elif seg_type == "CIRCLE":
- center = seg_def["center"]
- radius = seg_def["radius"]
- plane = seg_def.get("plane", "XY")
- duration = seg_def["duration"]
- clockwise = seg_def.get("clockwise", False)
-
- # Use transformed normal if available (from TRF)
- if "normal_vector" in seg_def:
- normal = seg_def["normal_vector"]
- else:
- plane_normals = {"XY": [0, 0, 1], "XZ": [0, 1, 0], "YZ": [1, 0, 0]}
- normal = plane_normals.get(plane, [0, 0, 1])
-
- traj_arr = np.array(
- motion_gen_circle.generate_circle_3d(
- center,
- radius,
- normal,
- start_point=segment_start[:3],
- duration=duration,
- )
- )
-
- if clockwise:
- traj_arr = traj_arr[::-1]
-
- # Update orientations
- for j in range(len(traj_arr)):
- traj_arr[j][3:] = segment_start[3:]
-
- trajectories.append(traj_arr)
- # Circle returns to start, so last pose is last point of trajectory
- last_end_pose = traj_arr[-1]
-
- elif seg_type == "SPLINE":
- waypoints = seg_def["waypoints"]
- duration = seg_def["duration"]
-
- # Check if first waypoint is close to segment start
- wp_error = np.linalg.norm(
- np.array(waypoints[0][:3]) - np.array(segment_start[:3])
- )
-
- if wp_error > 5.0:
- full_waypoints = [segment_start] + waypoints
- else:
- full_waypoints = [segment_start] + waypoints[1:]
-
- timestamps = np.linspace(0, duration, len(full_waypoints))
- traj_arr = np.array(
- motion_gen_spline.generate_cubic_spline(
- full_waypoints, timestamps.tolist()
- )
- )
- trajectories.append(traj_arr)
- last_end_pose = waypoints[-1]
-
- # Blend all trajectories with advanced blending
- if len(trajectories) > 1:
- # Select blend method based on trajectory type
- if self.trajectory_type == "quintic":
- blend_method = "quintic"
- elif self.trajectory_type == "s_curve":
- blend_method = "s_curve"
- elif self.trajectory_type == "cubic":
- blend_method = "cubic"
- else:
- blend_method = "smoothstep" # Legacy compatibility
-
- # Create advanced blender
- advanced_blender = AdvancedMotionBlender(sample_rate=100.0)
- blended = trajectories[0]
-
- # Use auto sizing if blend_time is small, otherwise use specified time
- if self.blend_time < 0.1:
- auto_size = True
- blend_samples = None
- else:
- auto_size = False
- blend_samples = int(self.blend_time * 100)
-
- for i in range(1, len(trajectories)):
- blended = advanced_blender.blend_trajectories(
- blended,
- trajectories[i],
- method=blend_method,
- blend_samples=blend_samples,
- auto_size=auto_size,
- )
-
- logger.info(
- f" Blended {len(trajectories)} segments into {len(blended)} points using {blend_method}"
- )
- return blended
- elif trajectories:
- return trajectories[0]
- else:
- raise ValueError("No trajectories generated in blend")
-
-
-@register_command("SMOOTH_WAYPOINTS")
-class SmoothWaypointsCommand(BaseSmoothMotionCommand):
- """Execute waypoint trajectory with corner cutting."""
-
- __slots__ = (
- "waypoints",
- "blend_radii",
- "blend_mode",
- "via_modes",
- "max_velocity",
- "max_acceleration",
- "trajectory_type",
- "frame",
- "duration",
- )
-
- def __init__(self) -> None:
- super().__init__(description="smooth waypoints")
- self.waypoints: list[list[float]] | None = None
- self.blend_radii: Any = "auto"
- self.blend_mode: str = "parabolic"
- self.via_modes: list[str] | None = None
- self.max_velocity: float = 100.0
- self.max_acceleration: float = 500.0
- self.trajectory_type: str = "quintic"
- self.frame: str = "WRF"
- self.duration: float | None = None
-
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """
- Parse SMOOTH_WAYPOINTS command.
- Format: SMOOTH_WAYPOINTS|wp1;wp2;...|blend_radii|blend_mode|via_modes|max_vel|max_accel|frame|[trajectory_type]|[duration]
- """
- if parts[0].upper() != "SMOOTH_WAYPOINTS":
- return False, None
-
- if len(parts) < 8:
- return False, "SMOOTH_WAYPOINTS requires at least 8 parameters"
-
- try:
- # Parse waypoints (semicolon separated)
- waypoint_strs = parts[1].split(";")
- self.waypoints = []
- for wp_str in waypoint_strs:
- wp = list(map(float, wp_str.split(",")))
- if len(wp) != 6:
- return False, "Each waypoint must have 6 values (x,y,z,rx,ry,rz)"
- self.waypoints.append(wp)
-
- if len(self.waypoints) < 2:
- return False, "SMOOTH_WAYPOINTS requires at least 2 waypoints"
-
- # Parse blend radii
- if parts[2].upper() == "AUTO":
- self.blend_radii = "auto"
- else:
- self.blend_radii = list(map(float, parts[2].split(",")))
- if len(self.blend_radii) != len(self.waypoints) - 2:
- return False, f"Blend radii count must be {len(self.waypoints) - 2}"
-
- # Parse blend mode
- self.blend_mode = parts[3].lower()
- if self.blend_mode not in ["parabolic", "circular", "none"]:
- return False, f"Invalid blend mode: {self.blend_mode}"
-
- # Parse via modes
- via_mode_strs = parts[4].split(",")
- self.via_modes = []
- for vm in via_mode_strs:
- vm = vm.lower()
- if vm not in ["via", "stop"]:
- return False, f"Invalid via mode: {vm}"
- self.via_modes.append(vm)
-
- if len(self.via_modes) != len(self.waypoints):
- return False, "Via modes count must match waypoint count"
-
- # Parse velocity and acceleration constraints
- self.max_velocity = float(parts[5])
- self.max_acceleration = float(parts[6])
-
- # Parse frame
- self.frame = parts[7].upper()
- if self.frame not in ["WRF", "TRF"]:
- return False, f"Invalid frame: {self.frame}"
-
- # Parse optional trajectory type
- idx = 8
- if idx < len(parts):
- self.trajectory_type = parts[idx].lower()
- if self.trajectory_type not in ["cubic", "quintic", "s_curve"]:
- self.trajectory_type = "quintic"
- idx += 1
-
- # Parse optional duration
- if idx < len(parts):
- self.duration = float(parts[idx])
-
- # Initialize description
- self.description = f"waypoints ({len(self.waypoints or [])} points, {self.frame}, {self.blend_mode})"
-
- return True, None
-
- except (ValueError, IndexError) as e:
- return False, f"Invalid SMOOTH_WAYPOINTS parameters: {e}"
-
- def do_setup(self, state: "ControllerState") -> None:
- """Transform waypoints if in TRF."""
- # Ensure waypoints present for typing
- if self.waypoints is None:
- self.fail("At least 2 waypoints required")
- return
-
- if self.frame == "TRF":
- # Transform all waypoints to WRF
- tool_pose = get_fkine_se3()
- transformed_waypoints = []
- for wp in self.waypoints:
- transformed_wp = pose6_trf_to_wrf(wp, tool_pose)
- transformed_waypoints.append(transformed_wp)
-
- self.waypoints = transformed_waypoints
- logger.info(
- f" -> TRF Waypoints: transformed {len(self.waypoints)} points to WRF"
- )
-
- # Basic validation
- if len(self.waypoints) < 2:
- self.fail("At least 2 waypoints required")
- return
-
- return super().do_setup(state)
-
- def generate_main_trajectory(self, effective_start_pose):
- """Generate waypoint trajectory with corner cutting."""
- assert self.waypoints is not None
- assert self.via_modes is not None
- wps = self.waypoints
- vms = self.via_modes
-
- # Ensure first waypoint matches effective start pose
- first_wp_error = np.linalg.norm(
- np.array(wps[0][:3]) - np.array(effective_start_pose[:3])
- )
-
- if first_wp_error > 10.0:
- # Prepend effective start pose as first waypoint
- full_waypoints = [effective_start_pose] + wps
- if self.blend_radii != "auto" and isinstance(self.blend_radii, list):
- # Prepend 0 blend radius for start
- full_blend_radii = [0] + self.blend_radii
- else:
- full_blend_radii = self.blend_radii
- full_via_modes = ["via"] + vms
- else:
- # Replace first waypoint with effective start pose
- full_waypoints = [effective_start_pose] + wps[1:]
- full_blend_radii = self.blend_radii
- full_via_modes = vms
-
- # Set up constraints
- constraints = {
- "max_velocity": self.max_velocity,
- "max_acceleration": self.max_acceleration,
- "max_jerk": 5000.0, # Default jerk limit
- }
-
- # Create planner
- planner = WaypointTrajectoryPlanner(
- full_waypoints, constraints=constraints, sample_rate=100.0
- )
-
- # Determine blend mode for planner
- if self.blend_mode == "none":
- planner_blend_mode = "none"
- elif self.blend_radii == "auto":
- planner_blend_mode = "auto"
- else:
- planner_blend_mode = "manual"
-
- # Generate trajectory with direct profile support
- if planner_blend_mode == "manual" and isinstance(full_blend_radii, list):
- opt_radii = [float(r) for r in full_blend_radii]
- else:
- opt_radii = None
- trajectory = planner.plan_trajectory(
- blend_mode=planner_blend_mode,
- blend_radii=opt_radii,
- via_modes=full_via_modes,
- trajectory_type=self.trajectory_type,
- jerk_limit=constraints["max_jerk"]
- if self.trajectory_type == "s_curve"
- else None,
- )
-
- # Apply duration scaling if specified
- if self.duration and self.duration > 0:
- current_duration = len(trajectory) / 100.0
- if current_duration > 0:
- scale_factor = self.duration / current_duration
- if scale_factor != 1.0:
- # Resample trajectory for desired duration
- new_length = int(self.duration * 100)
- old_indices = np.linspace(0, len(trajectory) - 1, new_length)
- resampled = []
- for idx in old_indices:
- if idx < len(trajectory) - 1:
- i = int(idx)
- alpha = idx - i
- pose = (
- trajectory[i] * (1 - alpha) + trajectory[i + 1] * alpha
- )
- else:
- pose = trajectory[-1]
- resampled.append(pose)
- trajectory = np.array(resampled)
-
- logger.info(f" Generated waypoint trajectory with {len(trajectory)} points")
- return trajectory
diff --git a/parol6/commands/system_commands.py b/parol6/commands/system_commands.py
index 0c1ac22..3e3c054 100644
--- a/parol6/commands/system_commands.py
+++ b/parol6/commands/system_commands.py
@@ -1,7 +1,7 @@
"""
System control commands that can execute regardless of controller enable state.
-These commands control the overall state of the robot controller (enable/disable, stop, etc.)
+These commands control the overall state of the robot controller (resume/halt, etc.)
and can execute even when the controller is disabled.
"""
@@ -11,10 +11,22 @@
import os
from typing import TYPE_CHECKING
-from parol6.commands.base import ExecutionStatus, SystemCommand, parse_bool, parse_int
+from parol6.commands.base import ExecutionStatusCode, SystemCommand
from parol6.config import save_com_port
+from parol6.protocol.wire import (
+ CmdType,
+ ConnectHardwareCmd,
+ HaltCmd,
+ ResumeCmd,
+ SelectProfileCmd,
+ SetTcpOffsetCmd,
+ SimulatorCmd,
+ WriteIOCmd,
+)
from parol6.protocol.wire import CommandCode
from parol6.server.command_registry import register_command
+from parol6.utils.error_catalog import make_error
+from parol6.utils.error_codes import ErrorCode
if TYPE_CHECKING:
from parol6.server.state import ControllerState
@@ -22,273 +34,163 @@
logger = logging.getLogger(__name__)
-@register_command("STOP")
-class StopCommand(SystemCommand):
- """Emergency stop command - immediately stops all motion."""
+@register_command(CmdType.RESUME)
+class ResumeCommand(SystemCommand[ResumeCmd]):
+ """Re-enable the robot controller, allowing motion commands."""
- __slots__ = ()
-
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """Check if this is a STOP command."""
- if parts[0].upper() == "STOP":
- if len(parts) != 1:
- return False, "STOP command takes no parameters"
- return True, None
- return False, None
-
- def execute_step(self, state: ControllerState) -> ExecutionStatus:
- """Execute stop - set all speeds to zero and command to IDLE."""
- logger.info("STOP command executed")
- state.Speed_out.fill(0)
- state.Command_out = CommandCode.IDLE
-
- # Clear any active commands in the controller
- # This will be handled by the controller when it sees this command
-
- self.finish()
- return ExecutionStatus.completed("Robot stopped")
-
-
-@register_command("ENABLE")
-class EnableCommand(SystemCommand):
- """Enable the robot controller."""
+ PARAMS_TYPE = ResumeCmd
__slots__ = ()
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """Check if this is an ENABLE command."""
- if parts[0].upper() == "ENABLE":
- if len(parts) != 1:
- return False, "ENABLE command takes no parameters"
- return True, None
- return False, None
-
- def execute_step(self, state: ControllerState) -> ExecutionStatus:
- """Execute enable - set controller to enabled state."""
- logger.info("ENABLE command executed")
+ def execute_step(self, state: ControllerState) -> ExecutionStatusCode:
+ """Execute resume - set controller to enabled state."""
+ logger.info("RESUME command executed")
state.enabled = True
state.disabled_reason = ""
state.Command_out = CommandCode.ENABLE
self.finish()
- return ExecutionStatus.completed("Controller enabled")
+ return ExecutionStatusCode.COMPLETED
-@register_command("DISABLE")
-class DisableCommand(SystemCommand):
- """Disable the robot controller."""
+@register_command(CmdType.HALT)
+class HaltCommand(SystemCommand[HaltCmd]):
+ """Halt the robot — stop all motion and disable."""
+
+ PARAMS_TYPE = HaltCmd
__slots__ = ()
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """Check if this is a DISABLE command."""
- if parts[0].upper() == "DISABLE":
- if len(parts) != 1:
- return False, "DISABLE command takes no parameters"
- return True, None
- return False, None
-
- def execute_step(self, state: ControllerState) -> ExecutionStatus:
- """Execute disable - zero speeds and set controller to disabled state."""
- logger.info("DISABLE command executed")
- state.Speed_out.fill(0) # Zero all speeds first
+ def execute_step(self, state: ControllerState) -> ExecutionStatusCode:
+ """Execute halt - zero speeds and set controller to disabled state."""
+ logger.info("HALT command executed")
+ state.Speed_out.fill(0)
state.enabled = False
- state.disabled_reason = "User requested disable"
+ state.disabled_reason = "User requested halt"
state.Command_out = CommandCode.DISABLE
self.finish()
- return ExecutionStatus.completed("Controller disabled")
+ return ExecutionStatusCode.COMPLETED
-@register_command("SET_IO")
-class SetIOCommand(SystemCommand):
+@register_command(CmdType.WRITE_IO)
+class WriteIOCommand(SystemCommand[WriteIOCmd]):
"""Set a digital I/O port state."""
- __slots__ = ("port_index", "port_value")
+ PARAMS_TYPE = WriteIOCmd
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """
- Parse SET_IO command.
-
- Format: SET_IO|port_index|value
- Example: SET_IO|0|1
- """
- if parts[0].upper() != "SET_IO":
- return False, None
-
- if len(parts) != 3:
- return False, "SET_IO requires 2 parameters: port_index, value"
-
- self.port_index = parse_int(parts[1])
- self.port_value = parse_int(parts[2])
-
- if self.port_index is None or self.port_value is None:
- return False, "Port index and value must be integers"
-
- # Validate port index (0-7 for 8 I/O ports)
- if not 0 <= self.port_index <= 7:
- return False, f"Port index must be 0-7, got {self.port_index}"
-
- # Validate port value (0 or 1)
- if self.port_value not in (0, 1):
- return False, f"Port value must be 0 or 1, got {self.port_value}"
-
- logger.info(f"Parsed SET_IO: port {self.port_index} = {self.port_value}")
- return True, None
+ __slots__ = ()
- def execute_step(self, state: ControllerState) -> ExecutionStatus:
+ def execute_step(self, state: ControllerState) -> ExecutionStatusCode:
"""Execute set port - update I/O port state."""
- if self.port_index is None or self.port_value is None:
- self.fail("Port index or value not set")
- return ExecutionStatus.failed("Port parameters not set")
-
- logger.info(f"SET_IO: Setting port {self.port_index} to {self.port_value}")
+ logger.info(f"WRITE_IO: Setting port {self.p.port_index} to {self.p.value}")
- # Update the output port state
- state.InOut_out[self.port_index] = self.port_value
+ state.InOut_out[self.p.port_index] = self.p.value
self.finish()
- return ExecutionStatus.completed(
- f"Port {self.port_index} set to {self.port_value}"
- )
+ return ExecutionStatusCode.COMPLETED
-@register_command("SET_PORT")
-class SetSerialPortCommand(SystemCommand):
+@register_command(CmdType.CONNECT_HARDWARE)
+class ConnectHardwareCommand(SystemCommand[ConnectHardwareCmd]):
"""Set the serial COM port used by the controller."""
- __slots__ = ("port_str",)
+ PARAMS_TYPE = ConnectHardwareCmd
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """
- Parse SET_PORT command.
+ __slots__ = ()
- Format: SET_PORT|serial_port
- Example: SET_PORT|/dev/ttyACM0
- """
- if parts[0].upper() != "SET_PORT":
- return False, None
+ def execute_step(self, state: ControllerState) -> ExecutionStatusCode:
+ """Persist the serial port selection and signal controller to reconnect."""
+ ok = save_com_port(self.p.port_str)
+ if not ok:
+ self.fail(make_error(ErrorCode.SYS_PORT_SAVE_FAILED))
+ return ExecutionStatusCode.FAILED
- if len(parts) != 2:
- return False, "SET_PORT requires 1 parameter: serial_port"
+ self._switch_port = self.p.port_str
+ self.finish()
+ return ExecutionStatusCode.COMPLETED
- port = (parts[1] or "").strip()
- if not port:
- return False, "Serial port cannot be empty"
- self.port_str = port
- logger.info(f"Parsed SET_PORT: serial_port={self.port_str}")
- return True, None
+@register_command(CmdType.SIMULATOR)
+class SimulatorCommand(SystemCommand[SimulatorCmd]):
+ """Toggle simulator (fake serial) mode on/off."""
- def execute_step(self, state: ControllerState) -> ExecutionStatus:
- """Persist the serial port selection; controller may reconnect based on this."""
- if not self.port_str:
- self.fail("No serial port provided")
- return ExecutionStatus.failed("No serial port provided")
+ PARAMS_TYPE = SimulatorCmd
- ok = save_com_port(self.port_str)
- if not ok:
- self.fail("Failed to save COM port")
- return ExecutionStatus.failed("Failed to save COM port")
+ __slots__ = ()
- self.finish()
- # Include details so the controller can reconnect immediately
- return ExecutionStatus.completed(
- "Serial port saved", details={"serial_port": self.port_str}
- )
+ def execute_step(self, state: ControllerState) -> ExecutionStatusCode:
+ """Execute simulator toggle by setting env var and signaling reconfiguration."""
+ os.environ["PAROL6_FAKE_SERIAL"] = "1" if self.p.on else "0"
+ logger.info(f"SIMULATOR command executed: {'ON' if self.p.on else 'OFF'}")
+ self._switch_simulator = self.p.on
+ self.finish()
+ return ExecutionStatusCode.COMPLETED
-@register_command("STREAM")
-class StreamCommand(SystemCommand):
- """Toggle stream mode for real-time jogging."""
- __slots__ = ("stream_mode",)
+# Valid motion profile types
+VALID_PROFILES = frozenset(("TOPPRA", "RUCKIG", "QUINTIC", "TRAPEZOID", "LINEAR"))
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """
- Parse STREAM command.
- Format: STREAM|on/off
- Example: STREAM|on
- """
- if parts[0].upper() != "STREAM":
- return False, None
+@register_command(CmdType.SELECT_PROFILE)
+class SelectProfileCommand(SystemCommand[SelectProfileCmd]):
+ """
+ Set the motion profile for all moves.
- if len(parts) != 2:
- return False, "STREAM requires 1 parameter: on/off"
+ Format: [CmdType.SELECT_PROFILE, profile_type]
- self.stream_mode = parse_bool(parts[1])
- if parts[1].lower() not in ("on", "off", "1", "0", "true", "false"):
- return False, f"STREAM mode must be 'on' or 'off', got '{parts[1]}'"
+ Profile Types:
+ TOPPRA - Time-optimal path parameterization (default)
+ RUCKIG - Time-optimal jerk-limited (point-to-point only, joint moves only)
+ QUINTIC - C² smooth polynomial trajectories
+ TRAPEZOID - Linear segments with parabolic blends
+ LINEAR - Direct interpolation (no smoothing)
- logger.info(f"Parsed STREAM: mode = {self.stream_mode}")
- return True, None
+ Note: RUCKIG is point-to-point and cannot follow Cartesian paths.
+ Cartesian moves will use TOPPRA when RUCKIG is set.
+ """
- def execute_step(self, state: ControllerState) -> ExecutionStatus:
- """Execute stream mode toggle."""
- if self.stream_mode is None:
- self.fail("Stream mode not set")
- return ExecutionStatus.failed("Stream mode not set")
+ PARAMS_TYPE = SelectProfileCmd
- # The controller will handle the actual stream mode setting
- # This is just a placeholder that sets a flag
- logger.info(f"STREAM: Setting stream mode to {self.stream_mode}")
+ __slots__ = ()
- state.stream_mode = self.stream_mode
+ def do_setup(self, state: ControllerState) -> None:
+ """Validate profile name against VALID_PROFILES."""
+ profile = self.p.profile.upper()
+ if profile not in VALID_PROFILES:
+ err = ValueError(f"Invalid profile '{self.p.profile}'")
+ err.robot_error = make_error( # type: ignore[attr-defined, ty:unresolved-attribute]
+ ErrorCode.SYS_PROFILE_INVALID, detail=self.p.profile
+ )
+ raise err
+
+ def execute_step(self, state: ControllerState) -> ExecutionStatusCode:
+ """Execute profile change."""
+ profile = self.p.profile.upper()
+
+ old_profile = state.motion_profile
+ state.motion_profile = profile
+ logger.info(
+ f"SELECT_PROFILE: Changed motion profile from {old_profile} to {profile}"
+ )
self.finish()
- return ExecutionStatus.completed(
- f"Stream mode {'enabled' if self.stream_mode else 'disabled'}"
- )
+ return ExecutionStatusCode.COMPLETED
-@register_command("SIMULATOR")
-class SimulatorCommand(SystemCommand):
- """Toggle simulator (fake serial) mode on/off."""
+@register_command(CmdType.SET_TCP_OFFSET)
+class SetTcpOffsetCommand(SystemCommand[SetTcpOffsetCmd]):
+ """Set the TCP offset in the tool's local frame."""
- __slots__ = ("mode_on",)
-
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """
- Parse SIMULATOR command.
-
- Format: SIMULATOR|on/off
- Example: SIMULATOR|on
- """
- if parts[0].upper() != "SIMULATOR":
- return False, None
-
- if len(parts) != 2:
- return False, "SIMULATOR requires 1 parameter: on/off"
-
- self.mode_on = parse_bool(parts[1])
- if parts[1].lower() not in (
- "on",
- "off",
- "1",
- "0",
- "true",
- "false",
- "yes",
- "no",
- ):
- return False, "SIMULATOR parameter must be 'on' or 'off'"
-
- logger.info(f"Parsed SIMULATOR: mode_on={self.mode_on}")
- return True, None
-
- def execute_step(self, state: ControllerState) -> ExecutionStatus:
- """Execute simulator toggle by setting env var and returning details to trigger reconfiguration."""
- if self.mode_on is None:
- self.fail("Simulator mode not set")
- return ExecutionStatus.failed("Simulator mode not set")
-
- # Set environment variable used by transport factory
- os.environ["PAROL6_FAKE_SERIAL"] = "1" if self.mode_on else "0"
- logger.info(f"SIMULATOR command executed: {'ON' if self.mode_on else 'OFF'}")
+ PARAMS_TYPE = SetTcpOffsetCmd
+
+ __slots__ = ()
+
+ def execute_step(self, state: ControllerState) -> ExecutionStatusCode:
+ """Convert mm to meters and apply TCP offset."""
+ offset_m = (self.p.x / 1000.0, self.p.y / 1000.0, self.p.z / 1000.0)
+ state.set_tcp_offset(offset_m)
self.finish()
- return ExecutionStatus.completed(
- f"Simulator {'ON' if self.mode_on else 'OFF'}",
- details={"simulator_mode": "on" if self.mode_on else "off"},
- )
+ return ExecutionStatusCode.COMPLETED
diff --git a/parol6/commands/tool_action_command.py b/parol6/commands/tool_action_command.py
new file mode 100644
index 0000000..723ff3e
--- /dev/null
+++ b/parol6/commands/tool_action_command.py
@@ -0,0 +1,55 @@
+"""
+Generic tool action command — dispatches to gripper commands by config type.
+"""
+
+import logging
+
+from parol6.commands.base import ExecutionStatusCode, MotionCommand
+from parol6.protocol.wire import CmdType, ToolActionCmd
+from parol6.server.command_registry import register_command
+from parol6.server.state import ControllerState
+from parol6.tools import get_registry
+from parol6.utils.error_catalog import make_error
+from parol6.utils.error_codes import ErrorCode
+
+logger = logging.getLogger(__name__)
+
+
+@register_command(CmdType.TOOL_ACTION)
+class ToolActionCommand(MotionCommand[ToolActionCmd]):
+ """Dispatch tool actions to the appropriate 100 Hz command engine."""
+
+ PARAMS_TYPE = ToolActionCmd
+
+ __slots__ = ("_delegate",)
+
+ def __init__(self, p: ToolActionCmd) -> None:
+ super().__init__(p)
+ self._delegate: MotionCommand | None = None
+
+ def do_setup(self, state: ControllerState) -> None:
+ key = self.p.tool_key.strip().upper()
+ action = self.p.action.strip().lower()
+ params = self.p.params
+
+ cfg = get_registry().get(key)
+ if cfg is None:
+ raise ValueError(f"Unknown tool '{key}'")
+
+ delegate = cfg.create_command(action, params)
+ if delegate is None:
+ raise ValueError(f"Tool '{key}' does not support actions")
+
+ delegate.setup(state)
+ self._delegate = delegate
+
+ def execute_step(self, state: ControllerState) -> ExecutionStatusCode:
+ if self._delegate is None:
+ self.fail(make_error(ErrorCode.MOTN_GRIPPER_UNKNOWN))
+ return ExecutionStatusCode.FAILED
+ result = self._delegate.tick(state)
+ if self._delegate.is_finished:
+ self.is_finished = True
+ self.error_state = self._delegate.error_state
+ self.robot_error = self._delegate.robot_error
+ return result
diff --git a/parol6/commands/utility_commands.py b/parol6/commands/utility_commands.py
index 4c54b49..29914a6 100644
--- a/parol6/commands/utility_commands.py
+++ b/parol6/commands/utility_commands.py
@@ -1,11 +1,23 @@
"""
Utility Commands
-Contains utility commands like Delay
+Contains utility commands like Delay and Reset
"""
import logging
-from parol6.commands.base import CommandBase, ExecutionStatus, parse_float
+from parol6.commands.base import (
+ CommandBase,
+ ExecutionStatusCode,
+ MotionCommand,
+ SystemCommand,
+)
+from parol6.protocol.wire import (
+ CheckpointCmd,
+ CmdType,
+ DelayCmd,
+ ResetCmd,
+ ResetLoopStatsCmd,
+)
from parol6.protocol.wire import CommandCode
from parol6.server.command_registry import register_command
from parol6.server.state import ControllerState
@@ -13,66 +25,87 @@
logger = logging.getLogger(__name__)
-@register_command("DELAY")
-class DelayCommand(CommandBase):
+@register_command(CmdType.DELAY)
+class DelayCommand(CommandBase[DelayCmd]):
"""
A non-blocking command that pauses execution for a specified duration.
- During the delay, it ensures the robot remains idle by sending the
- appropriate commands.
"""
- __slots__ = ("duration",)
-
- def __init__(self):
- """
- Initializes the Delay command.
- Parameters are parsed in do_match() method.
- """
- super().__init__()
- self.duration: float | None = None
-
- def do_match(self, parts: list[str]) -> tuple[bool, str | None]:
- """
- Parse DELAY command parameters.
-
- Format: DELAY|duration
- Example: DELAY|2.5
- """
- if len(parts) != 2:
- return (False, "DELAY requires 1 parameter: duration")
-
- self.duration = parse_float(parts[1])
- if self.duration is None or self.duration <= 0:
- return (False, f"Delay duration must be positive, got {parts[1]}")
- logger.info(f"Parsed Delay command for {self.duration} seconds")
- self.is_valid = True
- return (True, None)
-
- def setup(self, state: "ControllerState") -> None:
+ PARAMS_TYPE = DelayCmd
+
+ __slots__ = ()
+
+ def do_setup(self, state: "ControllerState") -> None:
"""Start the delay timer."""
- if self.duration:
- self.start_timer(self.duration)
- logger.info(f" -> Delay starting for {self.duration} seconds...")
-
- def execute_step(self, state: "ControllerState") -> ExecutionStatus:
- """
- Keep the robot idle during the delay and report status via ExecutionStatus.
- """
- if self.is_finished or not self.is_valid:
- return (
- ExecutionStatus.completed("Already finished")
- if self.is_finished
- else ExecutionStatus.failed("Invalid command")
- )
-
- # Keep the robot idle during the delay
+ self.start_timer(self.p.seconds)
+ logger.info(f" -> Delay starting for {self.p.seconds} seconds...")
+
+ def execute_step(self, state: "ControllerState") -> ExecutionStatusCode:
+ """Keep the robot idle during the delay."""
state.Command_out = CommandCode.IDLE
state.Speed_out.fill(0)
- # Check for completion
if self.timer_expired():
- logger.info(f"Delay finished after {self.duration} seconds.")
- self.is_finished = True
- return ExecutionStatus.completed("Delay complete")
+ logger.info(f"Delay finished after {self.p.seconds} seconds.")
+ self.finish()
+ return ExecutionStatusCode.COMPLETED
+
+ return ExecutionStatusCode.EXECUTING
+
+
+@register_command(CmdType.RESET)
+class ResetCommand(SystemCommand[ResetCmd]):
+ """
+ Instantly reset controller state to initial values.
+ """
+
+ PARAMS_TYPE = ResetCmd
+
+ __slots__ = ()
+
+ def execute_step(self, state: "ControllerState") -> ExecutionStatusCode:
+ """Reset state immediately."""
+ state.reset()
+ self._sync_mock = True
+ self.finish()
+ return ExecutionStatusCode.COMPLETED
+
+
+@register_command(CmdType.RESET_LOOP_STATS)
+class ResetLoopStatsCommand(SystemCommand[ResetLoopStatsCmd]):
+ """
+ Reset control loop timing statistics without affecting controller state.
+
+ Resets: min/max period, overrun count, rolling statistics.
+ Preserves: loop_count (uptime), robot state, command queues.
+ """
+
+ PARAMS_TYPE = ResetLoopStatsCmd
+
+ __slots__ = ()
+
+ def execute_step(self, state: "ControllerState") -> ExecutionStatusCode:
+ """Signal controller to reset loop stats."""
+ state.loop_stats_reset_pending = True
+ logger.debug("RESET_LOOP_STATS command executed")
+ self.finish()
+ return ExecutionStatusCode.COMPLETED
+
+
+@register_command(CmdType.CHECKPOINT)
+class CheckpointCommand(MotionCommand[CheckpointCmd]):
+ """Queue marker that sets state.last_checkpoint on execution.
+
+ Completes immediately on first tick. Used for progress tracking
+ without affecting motion.
+ """
+
+ PARAMS_TYPE = CheckpointCmd
+
+ __slots__ = ()
- return ExecutionStatus.executing("Delaying")
+ def execute_step(self, state: ControllerState) -> ExecutionStatusCode:
+ state.last_checkpoint = self.p.label
+ self.finish()
+ self.log_info("Checkpoint reached: %s", self.p.label)
+ return ExecutionStatusCode.COMPLETED
diff --git a/parol6/config.py b/parol6/config.py
index f42db81..8c3ef16 100644
--- a/parol6/config.py
+++ b/parol6/config.py
@@ -2,12 +2,29 @@
Central configuration for PAROL6 tunables and shared constants.
"""
+from __future__ import annotations
+
import logging
import os
+import threading
+from dataclasses import dataclass
from pathlib import Path
+from typing import Union
+
+import numpy as np
+from numba import njit
+from numpy.typing import ArrayLike, NDArray
TRACE: int = 5
logging.addLevelName(TRACE, "TRACE")
+
+# Command queue limits
+MAX_COMMAND_QUEUE_SIZE: int = 100
+MAX_BLEND_LOOKAHEAD: int = int(os.getenv("PAROL6_MAX_BLEND_LOOKAHEAD", "100"))
+MAX_POLL_COUNT: int = 25 # Max UDP messages to read per control tick
+
+# Serial transport defaults
+SERIAL_RX_RING_DEFAULT: int = 262144
# Add Logger.trace if missing
if not hasattr(logging.Logger, "trace"):
@@ -15,25 +32,25 @@ def _trace(self, msg, *args, **kwargs):
if self.isEnabledFor(TRACE):
self._log(TRACE, msg, args, **kwargs)
- logging.Logger.trace = _trace # type: ignore[attr-defined]
- logging.TRACE = TRACE # type: ignore[attr-defined]
+ logging.Logger.trace = _trace # type: ignore[attr-defined, ty:unresolved-attribute]
+ logging.TRACE = TRACE # type: ignore[attr-defined, ty:unresolved-attribute]
TRACE_ENABLED = str(os.getenv("PAROL_TRACE", "0")).lower() in ("1", "true", "yes", "on")
logger = logging.getLogger(__name__)
# Default control/sample rates (Hz)
-CONTROL_RATE_HZ: float = float(os.getenv("PAROL6_CONTROL_RATE_HZ", "250"))
-
-# Velocity/acceleration safety margins
-VELOCITY_SAFETY_SCALE: float = 1.2 # e.g., clamp at 1.2x of budget
+CONTROL_RATE_HZ: float = float(os.getenv("PAROL6_CONTROL_RATE_HZ", "100"))
-DEFAULT_ACCEL_PERCENT: float = 50.0
+DEFAULT_ACCEL_PERCENT: float = 100.0
# Motion thresholds (mm)
NEAR_MM_TOL_MM: float = 2.0 # Proximity threshold for considering positions "near" (mm)
ENTRY_MM_TOL_MM: float = 5.0 # Entry trajectory threshold for smooth motion (mm)
+# Trajectory path sampling (fixed samples for TOPP-RA input)
+PATH_SAMPLES: int = int(os.getenv("PAROL6_PATH_SAMPLES", "50"))
+
# Centralized loop interval (seconds).
INTERVAL_S: float = max(1e-6, 1.0 / max(CONTROL_RATE_HZ, 1.0))
@@ -66,26 +83,23 @@ def _trace(self, msg, *args, **kwargs):
# Status update/broadcast rates
STATUS_RATE_HZ: float = float(os.getenv("PAROL6_STATUS_RATE_HZ", "50"))
-STATUS_STALE_S: float = float(os.getenv("PAROL6_STATUS_STALE_S", "0.2"))
+STATUS_STALE_S: float = float(os.getenv("PAROL6_STATUS_STALE_S", "0.5"))
+# Validate STATUS_RATE_HZ divides evenly into CONTROL_RATE_HZ for polling
+if int(CONTROL_RATE_HZ) % int(STATUS_RATE_HZ) != 0:
+ raise ValueError(
+ f"STATUS_RATE_HZ ({STATUS_RATE_HZ}) must divide evenly into "
+ f"CONTROL_RATE_HZ ({CONTROL_RATE_HZ})"
+ )
+STATUS_BROADCAST_INTERVAL: int = int(CONTROL_RATE_HZ) // int(STATUS_RATE_HZ)
-# Homing posture (degrees) for simulation/tests; can be overridden via env "PAROL6_HOME_ANGLES_DEG" (CSV)
-def _parse_home_angles() -> list[float]:
- raw = os.getenv("PAROL6_HOME_ANGLES_DEG")
- if not raw:
- return [90.0, -90.0, 180.0, 0.0, 0.0, 180.0]
- try:
- parts = [p.strip() for p in raw.split(",")]
- vals = [float(p) for p in parts]
- # Ensure length 6
- if len(vals) != 6:
- return [90.0, -90.0, 180.0, 0.0, 0.0, 180.0]
- return vals
- except Exception:
- return [90.0, -90.0, 180.0, 0.0, 0.0, 180.0]
-
+# Max ticks to hold MOVE at trajectory endpoint waiting for Position_in to converge.
+# At 100Hz control rate, 20 ticks = 200ms. If the robot hasn't reached the target
+# by then, the segment completes anyway to avoid blocking the pipeline.
+SETTLE_MAX_TICKS: int = int(os.getenv("PAROL6_SETTLE_MAX_TICKS", "20"))
-HOME_ANGLES_DEG: list[float] = _parse_home_angles()
+# Loop timing tuning - busy threshold before deadline to switch from sleep to busy-wait
+BUSY_THRESHOLD_MS: float = float(os.getenv("PAROL6_BUSY_THRESHOLD_MS", "1.0"))
# Ack/Tracking policy toggles
@@ -168,3 +182,386 @@ def get_com_port_with_fallback() -> str:
return saved_port
return ""
+
+
+import parol6.PAROL6_ROBOT as PAROL6_ROBOT # noqa: E402 - must be after steps_to_rad() definition due to circular import
+
+# Type alias for conversion function return types
+IndexArg = Union[int, NDArray[np.int_], None]
+
+# Import robot-specific constants
+_degree_per_step = PAROL6_ROBOT.degree_per_step_constant
+_radian_per_step = PAROL6_ROBOT.radian_per_step_constant
+_joint_ratio = PAROL6_ROBOT.joint.ratio
+
+# Standby/home position in degrees - pass-through from robot definition
+STANDBY_ANGLES_DEG: list[float] = list(PAROL6_ROBOT.joint.standby_deg)
+# Alias
+HOME_ANGLES_DEG: list[float] = STANDBY_ANGLES_DEG
+
+
+# JIT helper for rad_to_steps (needs wrapper because of thread-local scratch buffer)
+@njit(cache=True)
+def _rad_to_steps_jit(
+ rad: NDArray[np.float64],
+ out: NDArray[np.int32],
+ scratch: NDArray[np.float64],
+ radian_per_step_inv: float,
+ joint_ratio: NDArray[np.float64],
+) -> NDArray[np.int32]:
+ np.multiply(rad, radian_per_step_inv, scratch)
+ np.multiply(scratch, joint_ratio, scratch)
+ np.rint(scratch, scratch)
+ out[:] = scratch.astype(np.int32)
+ return out
+
+
+@njit(cache=True)
+def deg_to_steps(deg: NDArray[np.float64], out: NDArray[np.int32]) -> NDArray[np.int32]:
+ """Convert degrees to steps (gear ratio aware). Zero-allocation when out is provided."""
+ for i in range(6):
+ out[i] = np.int32(np.rint((deg[i] / _degree_per_step) * _joint_ratio[i]))
+ return out
+
+
+@njit(cache=True)
+def deg_to_steps_scalar(deg: float, idx: int) -> np.int32:
+ """Convert single degree value to steps."""
+ return np.int32(np.rint((deg / _degree_per_step) * _joint_ratio[idx]))
+
+
+@njit(cache=True)
+def steps_to_deg(
+ steps: NDArray[np.int32], out: NDArray[np.float64]
+) -> NDArray[np.float64]:
+ """Convert steps to degrees (gear ratio aware). Zero-allocation."""
+ np.multiply(steps, _degree_per_step, out)
+ np.divide(out, _joint_ratio, out)
+ return out
+
+
+@njit(cache=True)
+def steps_to_deg_scalar(steps: int, idx: int) -> np.float64:
+ """Convert single steps value to degrees."""
+ return np.float64((float(steps) * _degree_per_step) / _joint_ratio[idx])
+
+
+# Thread-local scratch buffer for rad_to_steps intermediate calculation
+_tls = threading.local()
+
+
+def _get_scratch_f64() -> NDArray[np.float64]:
+ """Get thread-local float64 scratch buffer (6 elements)."""
+ buf = getattr(_tls, "scratch_f64", None)
+ if buf is None:
+ buf = np.zeros(6, dtype=np.float64)
+ _tls.scratch_f64 = buf
+ return buf
+
+
+def rad_to_steps(
+ rad: ArrayLike, out: NDArray[np.int32], idx: IndexArg = None
+) -> NDArray[np.int32]:
+ """Convert radians to steps. Zero-allocation (uses thread-local scratch)."""
+ scratch = _get_scratch_f64()
+ return _rad_to_steps_jit(
+ np.asarray(rad, dtype=np.float64),
+ out,
+ scratch,
+ 1.0 / _radian_per_step,
+ _joint_ratio,
+ )
+
+
+@njit(cache=True)
+def rad_to_steps_scalar(rad: float, idx: int) -> np.int32:
+ """Convert single radian value to steps."""
+ return np.int32(np.rint((rad / _radian_per_step) * _joint_ratio[idx]))
+
+
+@njit(cache=True)
+def steps_to_rad(
+ steps: NDArray[np.int32], out: NDArray[np.float64]
+) -> NDArray[np.float64]:
+ """Convert steps to radians. Zero-allocation. JIT-compiled."""
+ np.multiply(steps, _radian_per_step, out)
+ np.divide(out, _joint_ratio, out)
+ return out
+
+
+@njit(cache=True)
+def steps_to_rad_scalar(steps: int, idx: int) -> np.float64:
+ """Convert single steps value to radians."""
+ return np.float64((float(steps) * _radian_per_step) / _joint_ratio[idx])
+
+
+@njit(cache=True)
+def speed_steps_to_deg(
+ sps: NDArray[np.int32], out: NDArray[np.float64]
+) -> NDArray[np.float64]:
+ """Convert speed: steps/s to deg/s. Zero-allocation."""
+ np.multiply(sps, _degree_per_step, out)
+ np.divide(out, _joint_ratio, out)
+ return out
+
+
+@njit(cache=True)
+def speed_steps_to_deg_scalar(sps: float, idx: int) -> np.float64:
+ """Convert single speed value: steps/s to deg/s."""
+ return np.float64((sps * _degree_per_step) / _joint_ratio[idx])
+
+
+@njit(cache=True)
+def speed_deg_to_steps(
+ dps: NDArray[np.float64], out: NDArray[np.int32]
+) -> NDArray[np.int32]:
+ """Convert speed: deg/s to steps/s. Zero-allocation."""
+ for i in range(6):
+ out[i] = np.int32((dps[i] / _degree_per_step) * _joint_ratio[i])
+ return out
+
+
+@njit(cache=True)
+def speed_deg_to_steps_scalar(dps: float, idx: int) -> np.int32:
+ """Convert single speed value: deg/s to steps/s."""
+ return np.int32((dps / _degree_per_step) * _joint_ratio[idx])
+
+
+@njit(cache=True)
+def speed_steps_to_rad(
+ sps: NDArray[np.int32], out: NDArray[np.float64]
+) -> NDArray[np.float64]:
+ """Convert speed: steps/s to rad/s. Zero-allocation."""
+ np.multiply(sps, _radian_per_step, out)
+ np.divide(out, _joint_ratio, out)
+ return out
+
+
+@njit(cache=True)
+def speed_steps_to_rad_scalar(sps: float, idx: int) -> np.float64:
+ """Convert single speed value: steps/s to rad/s."""
+ return np.float64((sps * _radian_per_step) / _joint_ratio[idx])
+
+
+@njit(cache=True)
+def speed_rad_to_steps(
+ rps: NDArray[np.float64], out: NDArray[np.int32]
+) -> NDArray[np.int32]:
+ """Convert speed: rad/s to steps/s. Zero-allocation."""
+ for i in range(6):
+ out[i] = np.int32((rps[i] / _radian_per_step) * _joint_ratio[i])
+ return out
+
+
+@njit(cache=True)
+def speed_rad_to_steps_scalar(rps: float, idx: int) -> np.int32:
+ """Convert single speed value: rad/s to steps/s."""
+ return np.int32((rps / _radian_per_step) * _joint_ratio[idx])
+
+
+# -----------------------------------------------------------------------------
+# Robot Limits - Unified SI-unit hierarchy
+# -----------------------------------------------------------------------------
+# All values in SI units: rad/s, rad/s², rad/s³ for joint; m/s, m/s², m/s³ for cart
+#
+# Usage:
+# LIMITS.joint.hard.velocity → [6] joint velocity limits (rad/s)
+# LIMITS.joint.jog.velocity → [6] jog velocity limits (rad/s)
+# LIMITS.joint.position.rad → [6,2] position limits [min,max] (rad)
+# LIMITS.joint.position.rad[:, 0] → [6] min position limits (rad)
+# LIMITS.cart.hard.velocity.linear → linear velocity limit (m/s)
+# LIMITS.cart.hard.velocity.angular → angular velocity limit (rad/s)
+# LIMITS.cart.jog.velocity.linear → jog linear velocity limit (m/s)
+
+
+@dataclass(frozen=True, slots=True)
+class Kinodynamic:
+ """Joint kinodynamic limits (velocity, acceleration, jerk)."""
+
+ # SI units for algorithms
+ velocity: NDArray[np.float64] # rad/s
+ acceleration: NDArray[np.float64] # rad/s²
+ jerk: NDArray[np.float64] # rad/s³
+ # Step units for hardware/simulation
+ velocity_steps: NDArray[np.int32] # steps/s
+ acceleration_steps: NDArray[np.int32] # steps/s²
+ jerk_steps: NDArray[np.int32] # steps/s³
+
+
+@dataclass(frozen=True, slots=True)
+class JointPosition:
+ """Joint position limits in various units."""
+
+ deg: NDArray[np.float64] # [6, 2] - [min, max] per joint
+ rad: NDArray[np.float64] # [6, 2]
+ steps: NDArray[np.int32] # [6, 2]
+
+
+@dataclass(frozen=True, slots=True)
+class JointLimits:
+ """All joint limits."""
+
+ hard: Kinodynamic # Hardware limits
+ jog: Kinodynamic # Jog limits (reduced for safety)
+ position: JointPosition
+
+
+@dataclass(frozen=True, slots=True)
+class LinearAngular:
+ """Cartesian linear/angular component pair (SI units)."""
+
+ linear: float # m/s, m/s², or m/s³
+ angular: float # rad/s, rad/s², or rad/s³
+
+
+@dataclass(frozen=True, slots=True)
+class CartKinodynamic:
+ """Cartesian kinodynamic limits with linear/angular components."""
+
+ velocity: LinearAngular
+ acceleration: LinearAngular
+ jerk: LinearAngular
+
+
+@dataclass(frozen=True, slots=True)
+class CartLimits:
+ """All Cartesian limits."""
+
+ hard: CartKinodynamic
+ jog: CartKinodynamic
+
+
+@dataclass(frozen=True, slots=True)
+class RobotLimits:
+ """Unified robot limits namespace."""
+
+ joint: JointLimits
+ cart: CartLimits
+
+
+def _build_kinodynamic(
+ v_steps: ArrayLike, a_steps: ArrayLike, j_steps: ArrayLike
+) -> Kinodynamic:
+ """Build Kinodynamic from step-based limits, with both SI and step units."""
+ v_steps_arr = np.asarray(v_steps, dtype=np.int32)
+ a_steps_arr = np.asarray(a_steps, dtype=np.int32)
+ j_steps_arr = np.asarray(j_steps, dtype=np.int32)
+ v_rad = np.array(
+ [float(speed_steps_to_rad_scalar(v_steps_arr[i], i)) for i in range(6)]
+ )
+ a_rad = np.array(
+ [float(speed_steps_to_rad_scalar(a_steps_arr[i], i)) for i in range(6)]
+ )
+ j_rad = np.array(
+ [float(speed_steps_to_rad_scalar(j_steps_arr[i], i)) for i in range(6)]
+ )
+ return Kinodynamic(
+ velocity=v_rad,
+ acceleration=a_rad,
+ jerk=j_rad,
+ velocity_steps=v_steps_arr,
+ acceleration_steps=a_steps_arr,
+ jerk_steps=j_steps_arr,
+ )
+
+
+def _build_joint_position(limits_deg: NDArray) -> JointPosition:
+ """Build JointPosition from degree limits."""
+ limits_rad = np.deg2rad(limits_deg)
+ # Allocate once for module init (not hot path)
+ tmp = np.zeros(6, dtype=np.int32)
+ limits_steps = np.column_stack(
+ [
+ deg_to_steps(limits_deg[:, 0], tmp).copy(),
+ deg_to_steps(limits_deg[:, 1], tmp).copy(),
+ ]
+ )
+ return JointPosition(deg=limits_deg.copy(), rad=limits_rad, steps=limits_steps)
+
+
+def _build_cart_kinodynamic(
+ vel_lin_mm: float,
+ vel_ang_deg: float,
+ acc_lin_mm: float,
+ acc_ang_deg: float,
+ jerk_lin_mm: float,
+ jerk_ang_deg: float,
+) -> CartKinodynamic:
+ """Build CartKinodynamic from mm/deg values, converting to SI."""
+ return CartKinodynamic(
+ velocity=LinearAngular(
+ linear=vel_lin_mm / 1000.0, angular=np.radians(vel_ang_deg)
+ ),
+ acceleration=LinearAngular(
+ linear=acc_lin_mm / 1000.0, angular=np.radians(acc_ang_deg)
+ ),
+ jerk=LinearAngular(
+ linear=jerk_lin_mm / 1000.0, angular=np.radians(jerk_ang_deg)
+ ),
+ )
+
+
+# Build the unified LIMITS structure
+LIMITS: RobotLimits = RobotLimits(
+ joint=JointLimits(
+ hard=_build_kinodynamic(
+ PAROL6_ROBOT.joint.speed_max,
+ PAROL6_ROBOT.joint.acc_max,
+ PAROL6_ROBOT.joint.jerk_max,
+ ),
+ jog=_build_kinodynamic(
+ PAROL6_ROBOT.joint.jog_speed_max,
+ PAROL6_ROBOT.joint.acc_max, # Same acc for jog
+ PAROL6_ROBOT.joint.jerk_max, # Same jerk for jog
+ ),
+ position=_build_joint_position(PAROL6_ROBOT.joint.limits_deg),
+ ),
+ cart=CartLimits(
+ hard=_build_cart_kinodynamic(
+ PAROL6_ROBOT.cart.vel.linear.max,
+ PAROL6_ROBOT.cart.vel.angular.max,
+ PAROL6_ROBOT.cart.acc.linear.max,
+ PAROL6_ROBOT.cart.acc.angular.max,
+ PAROL6_ROBOT.cart.jerk.linear.max,
+ PAROL6_ROBOT.cart.jerk.angular.max,
+ ),
+ jog=_build_cart_kinodynamic(
+ PAROL6_ROBOT.cart.vel.jog.max,
+ PAROL6_ROBOT.cart.vel.angular.max * 0.8,
+ PAROL6_ROBOT.cart.acc.linear.max,
+ PAROL6_ROBOT.cart.acc.angular.max,
+ PAROL6_ROBOT.cart.jerk.linear.max,
+ PAROL6_ROBOT.cart.jerk.angular.max,
+ ),
+ ),
+)
+
+# Validate limits at module load
+if np.any(LIMITS.joint.hard.velocity <= 0) or np.any(
+ LIMITS.joint.hard.acceleration <= 0
+):
+ raise ValueError("Joint limits must be positive. Check PAROL6_ROBOT config.")
+
+# Jog min speeds - derived from control rate (1 step per tick minimum)
+JOG_MIN_STEPS: int = int(CONTROL_RATE_HZ) # steps/s
+CART_LIN_JOG_MIN: float = CONTROL_RATE_HZ / 100 # mm/s (scales with control rate)
+CART_ANG_JOG_MIN: float = 1.0 # deg/s
+
+# Per-joint IK safety margins (radians) - [min_margin, max_margin] per joint
+# Direction-aware: J3 backwards bend (max) is a trap, but inward (min) is safe
+IK_SAFETY_MARGINS_RAD: NDArray[np.float64] = np.array(
+ [
+ [0.0, 0.0], # J1 - base rotation, symmetric
+ [0.00, 0.05], # J2 - shoulder, symmetric
+ [0.03, 0.8], # J3 - elbow: min=inward (safe), max=backwards bend (TRAP)
+ [0.0, 0.0], # J4 - wrist, symmetric
+ [0.0, 0.0], # J5 - wrist, symmetric
+ [0.03, 0.03], # J6 - tool rotation, symmetric
+ ],
+ dtype=np.float64,
+)
+
+
+# -----------------------------------------------------------------------------
+# Utility Functions
+# -----------------------------------------------------------------------------
diff --git a/parol6/gcode/__init__.py b/parol6/gcode/__init__.py
deleted file mode 100644
index 60b1676..0000000
--- a/parol6/gcode/__init__.py
+++ /dev/null
@@ -1,28 +0,0 @@
-"""
-GCODE Implementation for PAROL6 Robot
-
-This module provides GCODE parsing and execution capabilities for the PAROL6 robot.
-It translates standard GCODE commands into robot motion commands.
-
-Main components:
-- parser.py: GCODE tokenization and parsing
-- state.py: Modal state tracking for GCODE execution
-- commands.py: Command mapping from GCODE to robot commands
-- coordinates.py: Work coordinate system management
-- interpreter.py: Main GCODE interpreter
-- utils.py: Utility functions for conversions and calculations
-"""
-
-from .coordinates import WorkCoordinateSystem
-from .interpreter import GcodeInterpreter
-from .parser import GcodeParser, GcodeToken
-from .state import GcodeState
-
-__version__ = "0.1.0"
-__all__ = [
- "GcodeParser",
- "GcodeToken",
- "GcodeState",
- "GcodeInterpreter",
- "WorkCoordinateSystem",
-]
diff --git a/parol6/gcode/commands.py b/parol6/gcode/commands.py
deleted file mode 100644
index 774accb..0000000
--- a/parol6/gcode/commands.py
+++ /dev/null
@@ -1,643 +0,0 @@
-"""
-GCODE Command Mappings for PAROL6 Robot
-
-Maps GCODE commands to robot motion commands.
-Implements command objects that interface with the existing robot API.
-"""
-
-import numpy as np
-
-from parol6.PAROL6_ROBOT import cart
-
-from .coordinates import WorkCoordinateSystem
-from .parser import GcodeToken
-from .state import GcodeState
-from .utils import ijk_to_center, radius_to_center, validate_arc
-
-
-class GcodeCommand:
- """Base class for GCODE commands"""
-
- def __init__(self):
- super().__init__()
-
- def to_robot_command(self) -> str:
- """
- Convert to robot API command string
-
- Returns:
- Command string for robot API
- """
- return ""
-
-
-class G0Command(GcodeCommand):
- """G0 - Rapid positioning command"""
-
- def __init__(
- self,
- target_position: dict[str, float],
- state: GcodeState,
- coord_system: WorkCoordinateSystem,
- ):
- """
- Initialize G0 rapid move command
-
- Args:
- target_position: Target position in work coordinates
- state: Current GCODE state
- coord_system: Work coordinate system
- """
- super().__init__()
- self.target_position = target_position
- self.state = state
- self.coord_system = coord_system
-
- # Convert to machine coordinates
- self.machine_position = coord_system.work_to_machine(target_position)
-
- # Convert to robot coordinates [X, Y, Z, RX, RY, RZ]
- self.robot_position = coord_system.gcode_to_robot_coords(self.machine_position)
-
- def to_robot_command(self) -> str:
- """
- Convert to MovePose command for robot API
-
- G0 uses rapid movement (100% speed)
- """
- # Format: MOVEPOSE|X|Y|Z|RX|RY|RZ|duration|speed
- # Where duration="None" for speed-based, speed="None" for duration-based
- x, y, z = self.robot_position[0:3]
- rx, ry, rz = (
- self.robot_position[3:6] if len(self.robot_position) >= 6 else [0, 0, 0]
- )
-
- # G0 uses rapid speed (100%)
- speed_percentage = 100
- duration = "None" # Speed-based movement
-
- command = f"MOVEPOSE|{x:.3f}|{y:.3f}|{z:.3f}|{rx:.3f}|{ry:.3f}|{rz:.3f}|{duration}|{speed_percentage}"
- return command
-
-
-class G1Command(GcodeCommand):
- """G1 - Linear interpolation command"""
-
- def __init__(
- self,
- target_position: dict[str, float],
- state: GcodeState,
- coord_system: WorkCoordinateSystem,
- ):
- """
- Initialize G1 linear move command
-
- Args:
- target_position: Target position in work coordinates
- state: Current GCODE state
- coord_system: Work coordinate system
- """
- super().__init__()
- self.target_position = target_position
- self.state = state
- self.coord_system = coord_system
-
- # Convert to machine coordinates
- self.machine_position = coord_system.work_to_machine(target_position)
-
- # Convert to robot coordinates
- self.robot_position = coord_system.gcode_to_robot_coords(self.machine_position)
-
- # Get feed rate from state (mm/min)
- self.feed_rate = state.feed_rate
-
- def to_robot_command(self) -> str:
- """
- Convert to MoveCart command for robot API
-
- G1 uses linear interpolation with specified feed rate
- """
- # Format: MOVECART|X|Y|Z|RX|RY|RZ|duration|speed
- x, y, z = self.robot_position[0:3]
- rx, ry, rz = (
- self.robot_position[3:6] if len(self.robot_position) >= 6 else [0, 0, 0]
- )
-
- # Convert feed rate (mm/min) to speed percentage
- # Import robot speed limits from configuration
- # Values are in m/s, convert to mm/min
- max_speed_mm_min = cart.vel.linear.max * 1000 * 60 # m/s to mm/min
- min_speed_mm_min = cart.vel.linear.min * 1000 * 60 # m/s to mm/min
-
- # Map feed rate to percentage (0-100)
- speed_percentage = np.interp(
- self.feed_rate, [min_speed_mm_min, max_speed_mm_min], [0, 100]
- )
- speed_percentage = np.clip(speed_percentage, 0, 100)
-
- duration = "None" # Speed-based movement
-
- command = f"MOVECART|{x:.3f}|{y:.3f}|{z:.3f}|{rx:.3f}|{ry:.3f}|{rz:.3f}|{duration}|{speed_percentage:.1f}"
- return command
-
-
-class G2Command(GcodeCommand):
- """G2 - Clockwise circular interpolation command"""
-
- def __init__(
- self,
- target_position: dict[str, float],
- arc_params: dict[str, float],
- state: GcodeState,
- coord_system: WorkCoordinateSystem,
- ):
- """
- Initialize G2 clockwise arc command
-
- Args:
- target_position: Target (end) position in work coordinates
- arc_params: Arc parameters (I, J, K offsets or R radius)
- state: Current GCODE state
- coord_system: Work coordinate system
- """
- super().__init__()
- self.target_position = target_position
- self.arc_params = arc_params
- self.state = state
- self.coord_system = coord_system
-
- # Get current position
- self.start_position = state.current_position.copy()
-
- # Determine arc center based on parameters
- if "R" in arc_params:
- # Radius format
- self.center = radius_to_center(
- self.start_position,
- target_position,
- arc_params["R"],
- clockwise=True,
- plane=state.plane,
- )
- else:
- # IJK offset format
- ijk = {}
- for key in ["I", "J", "K"]:
- if key in arc_params:
- ijk[key] = arc_params[key]
- self.center = ijk_to_center(self.start_position, ijk, plane=state.plane)
-
- # Validate arc
- if not validate_arc(
- self.start_position, target_position, self.center, state.plane
- ):
- self.is_valid = False
- self.error_message = "Invalid arc: start and end radii don't match"
-
- # Convert positions to machine coordinates
- self.machine_start = coord_system.work_to_machine(self.start_position)
- self.machine_end = coord_system.work_to_machine(target_position)
- self.machine_center = coord_system.work_to_machine(self.center)
-
- # Convert to robot coordinates
- self.robot_start = coord_system.gcode_to_robot_coords(self.machine_start)
- self.robot_end = coord_system.gcode_to_robot_coords(self.machine_end)
- self.robot_center = coord_system.gcode_to_robot_coords(self.machine_center)
-
- # Get feed rate from state
- self.feed_rate = state.feed_rate
-
- def to_robot_command(self) -> str:
- """
- Convert to SMOOTH_ARC_CENTER command for robot API
-
- G2 uses clockwise arc interpolation with specified feed rate
- """
- # Format: SMOOTH_ARC_CENTER|end_x|end_y|end_z|end_rx|end_ry|end_rz|center_x|center_y|center_z|frame|start_x|start_y|start_z|start_rx|start_ry|start_rz|duration|speed|clockwise
-
- # Extract positions
- end_x, end_y, end_z = self.robot_end[0:3]
- end_rx, end_ry, end_rz = (
- self.robot_end[3:6] if len(self.robot_end) >= 6 else [0, 0, 0]
- )
-
- center_x, center_y, center_z = self.robot_center[0:3]
-
- start_x, start_y, start_z = self.robot_start[0:3]
- start_rx, start_ry, start_rz = (
- self.robot_start[3:6] if len(self.robot_start) >= 6 else [0, 0, 0]
- )
-
- # Convert feed rate to speed percentage
- max_speed_mm_min = cart.vel.linear.max * 1000 * 60
- min_speed_mm_min = cart.vel.linear.min * 1000 * 60
-
- speed_percentage = np.interp(
- self.feed_rate, [min_speed_mm_min, max_speed_mm_min], [0, 100]
- )
- speed_percentage = np.clip(speed_percentage, 0, 100)
-
- # Build command string
- end_str = f"{end_x:.3f}|{end_y:.3f}|{end_z:.3f}|{end_rx:.3f}|{end_ry:.3f}|{end_rz:.3f}"
- center_str = f"{center_x:.3f}|{center_y:.3f}|{center_z:.3f}"
- start_str = f"{start_x:.3f}|{start_y:.3f}|{start_z:.3f}|{start_rx:.3f}|{start_ry:.3f}|{start_rz:.3f}"
-
- # Use speed-based movement
- duration = "None"
- frame = "0" # World frame
- clockwise = "True" # G2 is clockwise
-
- command = f"SMOOTH_ARC_CENTER|{end_str}|{center_str}|{frame}|{start_str}|{duration}|{speed_percentage:.1f}|{clockwise}"
- return command
-
-
-class G3Command(GcodeCommand):
- """G3 - Counter-clockwise circular interpolation command"""
-
- def __init__(
- self,
- target_position: dict[str, float],
- arc_params: dict[str, float],
- state: GcodeState,
- coord_system: WorkCoordinateSystem,
- ):
- """
- Initialize G3 counter-clockwise arc command
-
- Args:
- target_position: Target (end) position in work coordinates
- arc_params: Arc parameters (I, J, K offsets or R radius)
- state: Current GCODE state
- coord_system: Work coordinate system
- """
- super().__init__()
- self.target_position = target_position
- self.arc_params = arc_params
- self.state = state
- self.coord_system = coord_system
-
- # Get current position
- self.start_position = state.current_position.copy()
-
- # Determine arc center based on parameters
- if "R" in arc_params:
- # Radius format
- self.center = radius_to_center(
- self.start_position,
- target_position,
- arc_params["R"],
- clockwise=False, # G3 is counter-clockwise
- plane=state.plane,
- )
- else:
- # IJK offset format
- ijk = {}
- for key in ["I", "J", "K"]:
- if key in arc_params:
- ijk[key] = arc_params[key]
- self.center = ijk_to_center(self.start_position, ijk, plane=state.plane)
-
- # Validate arc
- if not validate_arc(
- self.start_position, target_position, self.center, state.plane
- ):
- self.is_valid = False
- self.error_message = "Invalid arc: start and end radii don't match"
-
- # Convert positions to machine coordinates
- self.machine_start = coord_system.work_to_machine(self.start_position)
- self.machine_end = coord_system.work_to_machine(target_position)
- self.machine_center = coord_system.work_to_machine(self.center)
-
- # Convert to robot coordinates
- self.robot_start = coord_system.gcode_to_robot_coords(self.machine_start)
- self.robot_end = coord_system.gcode_to_robot_coords(self.machine_end)
- self.robot_center = coord_system.gcode_to_robot_coords(self.machine_center)
-
- # Get feed rate from state
- self.feed_rate = state.feed_rate
-
- def to_robot_command(self) -> str:
- """
- Convert to SMOOTH_ARC_CENTER command for robot API
-
- G3 uses counter-clockwise arc interpolation with specified feed rate
- """
- # Format: SMOOTH_ARC_CENTER|end_x|end_y|end_z|end_rx|end_ry|end_rz|center_x|center_y|center_z|frame|start_x|start_y|start_z|start_rx|start_ry|start_rz|duration|speed|clockwise
-
- # Extract positions
- end_x, end_y, end_z = self.robot_end[0:3]
- end_rx, end_ry, end_rz = (
- self.robot_end[3:6] if len(self.robot_end) >= 6 else [0, 0, 0]
- )
-
- center_x, center_y, center_z = self.robot_center[0:3]
-
- start_x, start_y, start_z = self.robot_start[0:3]
- start_rx, start_ry, start_rz = (
- self.robot_start[3:6] if len(self.robot_start) >= 6 else [0, 0, 0]
- )
-
- # Convert feed rate to speed percentage
- max_speed_mm_min = cart.vel.linear.max * 1000 * 60
- min_speed_mm_min = cart.vel.linear.min * 1000 * 60
-
- speed_percentage = np.interp(
- self.feed_rate, [min_speed_mm_min, max_speed_mm_min], [0, 100]
- )
- speed_percentage = np.clip(speed_percentage, 0, 100)
-
- # Build command string
- end_str = f"{end_x:.3f}|{end_y:.3f}|{end_z:.3f}|{end_rx:.3f}|{end_ry:.3f}|{end_rz:.3f}"
- center_str = f"{center_x:.3f}|{center_y:.3f}|{center_z:.3f}"
- start_str = f"{start_x:.3f}|{start_y:.3f}|{start_z:.3f}|{start_rx:.3f}|{start_ry:.3f}|{start_rz:.3f}"
-
- # Use speed-based movement
- duration = "None"
- frame = "0" # World frame
- clockwise = "False" # G3 is counter-clockwise
-
- command = f"SMOOTH_ARC_CENTER|{end_str}|{center_str}|{frame}|{start_str}|{duration}|{speed_percentage:.1f}|{clockwise}"
- return command
-
-
-class G4Command(GcodeCommand):
- """G4 - Dwell command"""
-
- def __init__(self, dwell_time: float):
- """
- Initialize G4 dwell command
-
- Args:
- dwell_time: Dwell time in seconds
- """
- super().__init__()
- # Validate and clamp dwell time
- if dwell_time < 0.0:
- self.dwell_time = 0.0
- elif dwell_time > 300.0: # Max 5 minutes
- self.dwell_time = 300.0
- else:
- self.dwell_time = dwell_time
-
- def to_robot_command(self) -> str:
- """
- Convert to Delay command for robot API
- """
- # Format: DELAY|seconds
- command = f"DELAY|{self.dwell_time:.3f}"
- return command
-
-
-class G28Command(GcodeCommand):
- """G28 - Return to home command"""
-
- def __init__(self):
- """Initialize G28 home command"""
- super().__init__()
-
- def to_robot_command(self) -> str:
- """
- Convert to Home command for robot API
- """
- # Format: HOME
- command = "HOME"
- return command
-
-
-class M3Command(GcodeCommand):
- """M3 - Spindle/Gripper on CW (close gripper)"""
-
- def __init__(self, gripper_port: int = 1):
- """Initialize M3 gripper close command"""
- super().__init__()
- # Validate gripper port
- if gripper_port not in [1, 2]:
- self.is_valid = False
- self.error_message = f"Invalid gripper port {gripper_port}. Must be 1 or 2"
- self.gripper_port = 1 # Default to port 1
- else:
- self.gripper_port = gripper_port
-
- def to_robot_command(self) -> str:
- """
- Convert to Gripper command for robot API
- """
- # Format: PNEUMATICGRIPPER|action|port
- # M3 maps to gripper close
- command = f"PNEUMATICGRIPPER|close|{self.gripper_port}"
- return command
-
-
-class M5Command(GcodeCommand):
- """M5 - Spindle/Gripper off (open gripper)"""
-
- def __init__(self, gripper_port: int = 1):
- """Initialize M5 gripper open command"""
- super().__init__()
- # Validate gripper port
- if gripper_port not in [1, 2]:
- self.is_valid = False
- self.error_message = f"Invalid gripper port {gripper_port}. Must be 1 or 2"
- self.gripper_port = 1 # Default to port 1
- else:
- self.gripper_port = gripper_port
-
- def to_robot_command(self) -> str:
- """
- Convert to Gripper command for robot API
- """
- # Format: PNEUMATICGRIPPER|action|port
- # M5 maps to gripper open
- command = f"PNEUMATICGRIPPER|open|{self.gripper_port}"
- return command
-
-
-class M0Command(GcodeCommand):
- """M0 - Program stop"""
-
- def __init__(self):
- """Initialize M0 stop command"""
- super().__init__()
- # This command will need special handling in the interpreter
- self.requires_resume = True
-
- def to_robot_command(self) -> str:
- """
- M0 doesn't directly map to a robot command
- It's handled by the interpreter
- """
- return ""
-
-
-class M1Command(GcodeCommand):
- """M1 - Optional program stop"""
-
- def __init__(self):
- """Initialize M1 optional stop command"""
- super().__init__()
- # This command will need special handling in the interpreter
- # It only stops if optional_stop is enabled
- self.requires_resume = True
- self.is_optional = True
-
- def to_robot_command(self) -> str:
- """
- M1 doesn't directly map to a robot command
- It's handled by the interpreter based on optional_stop setting
- """
- return ""
-
-
-class M4Command(GcodeCommand):
- """M4 - Spindle/Gripper on CCW (alternative gripper action)"""
-
- def __init__(self, gripper_port: int = 1):
- """Initialize M4 gripper CCW command"""
- super().__init__()
- # Validate gripper port
- if gripper_port not in [1, 2]:
- self.is_valid = False
- self.error_message = f"Invalid gripper port {gripper_port}. Must be 1 or 2"
- self.gripper_port = 1 # Default to port 1
- else:
- self.gripper_port = gripper_port
-
- def to_robot_command(self) -> str:
- """
- Convert to Gripper command for robot API
-
- Note: M4 typically means counter-clockwise spindle rotation.
- For a gripper, this could map to a different action or be unsupported.
- Currently mapping to gripper toggle or alternative action.
- """
- # For PAROL6, M4 might not have a direct gripper equivalent
- # Could be used for electric gripper with different mode
- # For now, we'll treat it similar to M3 but document the difference
- command = f"PNEUMATICGRIPPER|close|{self.gripper_port}"
- return command
-
-
-class M30Command(GcodeCommand):
- """M30 - Program end"""
-
- def __init__(self):
- """Initialize M30 end command"""
- super().__init__()
- self.is_program_end = True
-
- def to_robot_command(self) -> str:
- """
- M30 doesn't directly map to a robot command
- It signals the end of the program
- """
- return ""
-
-
-def create_command_from_token(
- token: GcodeToken, state: GcodeState, coord_system: WorkCoordinateSystem
-) -> GcodeCommand | None:
- """
- Create a command object from a GCODE token
-
- Args:
- token: GcodeToken object
- state: Current GCODE state
- coord_system: Work coordinate system
-
- Returns:
- GcodeCommand object or None
- """
- if token.code_type == "G":
- code = int(token.code_number)
-
- if code == 0: # Rapid positioning
- # Calculate target position
- target = state.calculate_target_position(token.parameters)
- return G0Command(target, state, coord_system)
-
- elif code == 1: # Linear interpolation
- # Calculate target position
- target = state.calculate_target_position(token.parameters)
- return G1Command(target, state, coord_system)
-
- elif code == 2: # Clockwise circular interpolation
- # Calculate target position
- target = state.calculate_target_position(token.parameters)
- # Extract arc parameters (I, J, K or R)
- arc_params = {}
- for key in ["I", "J", "K", "R"]:
- if key in token.parameters:
- arc_params[key] = token.parameters[key]
-
- if not arc_params:
- # No arc parameters provided, treat as linear move
- return G1Command(target, state, coord_system)
-
- return G2Command(target, arc_params, state, coord_system)
-
- elif code == 3: # Counter-clockwise circular interpolation
- # Calculate target position
- target = state.calculate_target_position(token.parameters)
- # Extract arc parameters (I, J, K or R)
- arc_params = {}
- for key in ["I", "J", "K", "R"]:
- if key in token.parameters:
- arc_params[key] = token.parameters[key]
-
- if not arc_params:
- # No arc parameters provided, treat as linear move
- return G1Command(target, state, coord_system)
-
- return G3Command(target, arc_params, state, coord_system)
-
- elif code == 4: # Dwell
- # Get dwell time from P (milliseconds) or S (seconds)
- if "P" in token.parameters:
- dwell_time = token.parameters["P"] / 1000.0 # Convert ms to seconds
- elif "S" in token.parameters:
- dwell_time = token.parameters["S"]
- else:
- dwell_time = 0
- return G4Command(dwell_time)
-
- elif code == 28: # Home
- return G28Command()
-
- # Modal commands that change state but don't generate movement
- elif code in [17, 18, 19, 20, 21, 54, 55, 56, 57, 58, 59, 90, 91]:
- # These are handled by state updates
- return None
-
- elif token.code_type == "M":
- code = int(token.code_number)
-
- if code == 0: # Program stop
- return M0Command()
-
- elif code == 1: # Optional program stop
- return M1Command()
-
- elif code == 3: # Gripper close
- # Check for P parameter for port number (default 1)
- port = int(token.parameters.get("P", 1))
- return M3Command(gripper_port=port)
-
- elif code == 4: # Gripper CCW / alternative action
- # Check for P parameter for port number (default 1)
- port = int(token.parameters.get("P", 1))
- return M4Command(gripper_port=port)
-
- elif code == 5: # Gripper open
- # Check for P parameter for port number (default 1)
- port = int(token.parameters.get("P", 1))
- return M5Command(gripper_port=port)
-
- elif code == 30: # Program end
- return M30Command()
-
- elif token.code_type in ["F", "S", "T", "COMMENT"]:
- # These don't generate commands, just update state
- return None
-
- return None
diff --git a/parol6/gcode/coordinates.py b/parol6/gcode/coordinates.py
deleted file mode 100644
index 2be00ad..0000000
--- a/parol6/gcode/coordinates.py
+++ /dev/null
@@ -1,336 +0,0 @@
-"""
-Work Coordinate System Implementation for GCODE
-
-Manages G54-G59 work coordinate systems and transformations between
-work coordinates, machine coordinates, and robot coordinates.
-"""
-
-import json
-import os
-
-
-class WorkCoordinateSystem:
- """Manages work coordinate systems and transformations"""
-
- def __init__(self, config_file: str | None = None):
- """
- Initialize work coordinate system
-
- Args:
- config_file: Path to JSON file for persistent storage
- """
- self.config_file = config_file or os.path.join(
- os.path.dirname(__file__), "work_coordinates.json"
- )
-
- # Initialize default work coordinate offsets (in mm)
- self.offsets = {
- "G54": {"X": 0.0, "Y": 0.0, "Z": 0.0, "A": 0.0, "B": 0.0, "C": 0.0},
- "G55": {"X": 0.0, "Y": 0.0, "Z": 0.0, "A": 0.0, "B": 0.0, "C": 0.0},
- "G56": {"X": 0.0, "Y": 0.0, "Z": 0.0, "A": 0.0, "B": 0.0, "C": 0.0},
- "G57": {"X": 0.0, "Y": 0.0, "Z": 0.0, "A": 0.0, "B": 0.0, "C": 0.0},
- "G58": {"X": 0.0, "Y": 0.0, "Z": 0.0, "A": 0.0, "B": 0.0, "C": 0.0},
- "G59": {"X": 0.0, "Y": 0.0, "Z": 0.0, "A": 0.0, "B": 0.0, "C": 0.0},
- }
-
- # Tool offsets
- self.tool_offsets = {
- 0: {"Z": 0.0}, # No tool
- 1: {"Z": 0.0}, # Tool 1
- 2: {"Z": 0.0}, # Tool 2
- # Add more tools as needed
- }
-
- # Current active coordinate system
- self.active_system = "G54"
-
- # Current tool number
- self.active_tool = 0
-
- # Load saved offsets if they exist
- self.load_offsets()
-
- def set_offset(self, coord_system: str, axis: str, value: float) -> bool:
- """
- Set work coordinate offset for a specific axis
-
- Args:
- coord_system: Work coordinate system (G54-G59)
- axis: Axis name (X, Y, Z, A, B, C)
- value: Offset value in mm
-
- Returns:
- True if successful, False otherwise
- """
- if coord_system not in self.offsets:
- return False
-
- if axis not in self.offsets[coord_system]:
- return False
-
- self.offsets[coord_system][axis] = value
- self.save_offsets()
- return True
-
- def get_offset(self, coord_system: str | None = None) -> dict[str, float]:
- """
- Get work coordinate offset
-
- Args:
- coord_system: Work coordinate system (G54-G59) or None for active
-
- Returns:
- Dictionary of axis offsets
- """
- system = coord_system or self.active_system
- return self.offsets.get(system, {}).copy()
-
- def set_active_system(self, coord_system: str) -> bool:
- """
- Set the active work coordinate system
-
- Args:
- coord_system: Work coordinate system (G54-G59)
-
- Returns:
- True if successful, False otherwise
- """
- if coord_system in self.offsets:
- self.active_system = coord_system
- return True
- return False
-
- def set_tool_offset(self, tool_number: int, z_offset: float) -> None:
- """
- Set tool length offset
-
- Args:
- tool_number: Tool number
- z_offset: Z-axis offset in mm
- """
- if tool_number not in self.tool_offsets:
- self.tool_offsets[tool_number] = {}
- self.tool_offsets[tool_number]["Z"] = z_offset
- self.save_offsets()
-
- def get_tool_offset(self, tool_number: int | None = None) -> float:
- """
- Get tool length offset
-
- Args:
- tool_number: Tool number or None for active tool
-
- Returns:
- Z-axis offset in mm
- """
- tool = tool_number if tool_number is not None else self.active_tool
- return self.tool_offsets.get(tool, {}).get("Z", 0.0)
-
- def work_to_machine(
- self,
- work_pos: dict[str, float],
- coord_system: str | None = None,
- apply_tool_offset: bool = True,
- ) -> dict[str, float]:
- """
- Convert work coordinates to machine coordinates
-
- Args:
- work_pos: Position in work coordinates
- coord_system: Work coordinate system or None for active
- apply_tool_offset: Whether to apply tool offset
-
- Returns:
- Position in machine coordinates
- """
- system = coord_system or self.active_system
- offset = self.get_offset(system)
- machine_pos = {}
-
- for axis in ["X", "Y", "Z", "A", "B", "C"]:
- if axis in work_pos:
- machine_pos[axis] = work_pos[axis] + offset.get(axis, 0.0)
-
- # Apply tool offset to Z axis
- if axis == "Z" and apply_tool_offset:
- machine_pos[axis] += self.get_tool_offset()
-
- return machine_pos
-
- def machine_to_work(
- self,
- machine_pos: dict[str, float],
- coord_system: str | None = None,
- apply_tool_offset: bool = True,
- ) -> dict[str, float]:
- """
- Convert machine coordinates to work coordinates
-
- Args:
- machine_pos: Position in machine coordinates
- coord_system: Work coordinate system or None for active
- apply_tool_offset: Whether to apply tool offset
-
- Returns:
- Position in work coordinates
- """
- system = coord_system or self.active_system
- offset = self.get_offset(system)
- work_pos = {}
-
- for axis in ["X", "Y", "Z", "A", "B", "C"]:
- if axis in machine_pos:
- work_pos[axis] = machine_pos[axis] - offset.get(axis, 0.0)
-
- # Remove tool offset from Z axis
- if axis == "Z" and apply_tool_offset:
- work_pos[axis] -= self.get_tool_offset()
-
- return work_pos
-
- def apply_incremental(
- self, current_pos: dict[str, float], incremental: dict[str, float]
- ) -> dict[str, float]:
- """
- Apply incremental movement to current position
-
- Args:
- current_pos: Current position
- incremental: Incremental movement values
-
- Returns:
- New position after incremental movement
- """
- new_pos = current_pos.copy()
-
- for axis in ["X", "Y", "Z", "A", "B", "C"]:
- if axis in incremental:
- new_pos[axis] = current_pos.get(axis, 0.0) + incremental[axis]
-
- return new_pos
-
- def robot_to_gcode_coords(self, robot_pos: list[float]) -> dict[str, float]:
- """
- Convert robot Cartesian position to GCODE coordinates
-
- Args:
- robot_pos: Robot position [X, Y, Z, RX, RY, RZ] in mm and degrees
-
- Returns:
- GCODE coordinates {X, Y, Z, A, B, C}
- """
- # For PAROL6, the mapping is straightforward
- # X, Y, Z are Cartesian positions
- # A, B, C are rotational axes (RX, RY, RZ)
-
- gcode_coords = {}
-
- if len(robot_pos) >= 3:
- gcode_coords["X"] = robot_pos[0]
- gcode_coords["Y"] = robot_pos[1]
- gcode_coords["Z"] = robot_pos[2]
-
- if len(robot_pos) >= 6:
- gcode_coords["A"] = robot_pos[3] # RX
- gcode_coords["B"] = robot_pos[4] # RY
- gcode_coords["C"] = robot_pos[5] # RZ
-
- return gcode_coords
-
- def gcode_to_robot_coords(self, gcode_pos: dict[str, float]) -> list[float]:
- """
- Convert GCODE coordinates to robot Cartesian position
-
- Args:
- gcode_pos: GCODE coordinates {X, Y, Z, A, B, C}
-
- Returns:
- Robot position [X, Y, Z, RX, RY, RZ] in mm and degrees
- """
- robot_pos = [
- gcode_pos.get("X", 0.0),
- gcode_pos.get("Y", 0.0),
- gcode_pos.get("Z", 0.0),
- gcode_pos.get("A", 0.0), # RX
- gcode_pos.get("B", 0.0), # RY
- gcode_pos.get("C", 0.0), # RZ
- ]
-
- return robot_pos
-
- def save_offsets(self) -> None:
- """Save offsets to JSON file"""
- data = {
- "work_offsets": self.offsets,
- "tool_offsets": self.tool_offsets,
- "active_system": self.active_system,
- "active_tool": self.active_tool,
- }
-
- os.makedirs(os.path.dirname(self.config_file), exist_ok=True)
- with open(self.config_file, "w") as f:
- json.dump(data, f, indent=2)
-
- def load_offsets(self) -> None:
- """Load offsets from JSON file"""
- if os.path.exists(self.config_file):
- try:
- with open(self.config_file) as f:
- data = json.load(f)
-
- self.offsets = data.get("work_offsets", self.offsets)
- self.tool_offsets = data.get("tool_offsets", self.tool_offsets)
- self.active_system = data.get("active_system", "G54")
- self.active_tool = data.get("active_tool", 0)
- except Exception as e:
- print(f"Error loading work coordinate offsets: {e}")
-
- def reset_offset(self, coord_system: str | None = None) -> None:
- """
- Reset work coordinate offset to zero
-
- Args:
- coord_system: System to reset, or None to reset all
- """
- if coord_system:
- if coord_system in self.offsets:
- self.offsets[coord_system] = {
- "X": 0.0,
- "Y": 0.0,
- "Z": 0.0,
- "A": 0.0,
- "B": 0.0,
- "C": 0.0,
- }
- else:
- # Reset all systems
- for system in self.offsets:
- self.offsets[system] = {
- "X": 0.0,
- "Y": 0.0,
- "Z": 0.0,
- "A": 0.0,
- "B": 0.0,
- "C": 0.0,
- }
-
- self.save_offsets()
-
- def set_current_as_zero(
- self, machine_pos: dict[str, float], coord_system: str | None = None
- ) -> None:
- """
- Set current machine position as zero in work coordinates
-
- Args:
- machine_pos: Current machine position
- coord_system: Work coordinate system or None for active
- """
- system = coord_system or self.active_system
-
- # Set offsets such that current machine position becomes 0,0,0
- for axis in ["X", "Y", "Z", "A", "B", "C"]:
- if axis in machine_pos:
- self.offsets[system][axis] = machine_pos[axis]
-
- self.save_offsets()
diff --git a/parol6/gcode/interpreter.py b/parol6/gcode/interpreter.py
deleted file mode 100644
index befb078..0000000
--- a/parol6/gcode/interpreter.py
+++ /dev/null
@@ -1,369 +0,0 @@
-"""
-Main GCODE Interpreter for PAROL6 Robot
-
-Processes GCODE programs and converts them to robot commands.
-Manages state, coordinates, and command execution.
-"""
-
-import os
-
-import numpy as np
-
-from .commands import create_command_from_token
-from .coordinates import WorkCoordinateSystem
-from .parser import GcodeParser
-from .state import GcodeState
-
-
-class GcodeInterpreter:
- """Main GCODE interpreter that processes GCODE into robot commands"""
-
- def __init__(self, state_file: str | None = None, coord_file: str | None = None):
- """
- Initialize GCODE interpreter
-
- Args:
- state_file: Path to state persistence file
- coord_file: Path to coordinate system persistence file
- """
- # Initialize components
- self.parser = GcodeParser()
-
- # Use default paths if not provided
- if state_file is None:
- state_file = os.path.join(os.path.dirname(__file__), "gcode_state.json")
- if coord_file is None:
- coord_file = os.path.join(
- os.path.dirname(__file__), "work_coordinates.json"
- )
-
- self.state = GcodeState(state_file)
- self.coord_system = WorkCoordinateSystem(coord_file)
-
- # Program execution state
- self.program_lines: list[str] = []
- self.current_line: int = 0
- self.is_running: bool = False
- self.is_paused: bool = False
- self.single_block: bool = False
-
- # Command queue
- self.command_queue: list[str] = []
-
- # Error tracking
- self.errors: list[str] = []
-
- def parse_line(self, gcode_line: str) -> list[str]:
- """
- Parse a single GCODE line and return robot commands
-
- Args:
- gcode_line: Single line of GCODE
-
- Returns:
- List of robot command strings
- """
- robot_commands = []
-
- # Parse the line into tokens
- tokens = self.parser.parse_line(gcode_line)
-
- for token in tokens:
- # Validate token
- is_valid, error_msg = self.parser.validate_token(token)
- if not is_valid:
- self.errors.append(error_msg or "Invalid token")
- if self.state.program_running:
- # Stop on error during program execution
- self.stop_program()
- continue
-
- # Update state with modal commands
- self.state.update_from_token(token)
-
- # Handle work coordinate changes
- if token.code_type == "G" and int(token.code_number) in [
- 54,
- 55,
- 56,
- 57,
- 58,
- 59,
- ]:
- self.coord_system.set_active_system(f"G{int(token.code_number)}")
-
- # Create command object
- command = create_command_from_token(token, self.state, self.coord_system)
-
- if command:
- # Get robot command string
- robot_cmd = command.to_robot_command()
- if robot_cmd:
- robot_commands.append(robot_cmd)
-
- # Update position after movement commands
- if hasattr(command, "target_position"):
- self.state.update_position(command.target_position)
-
- # Handle special commands
- if hasattr(command, "is_program_end") and command.is_program_end:
- self.stop_program()
- elif hasattr(command, "requires_resume") and command.requires_resume:
- # Check if this is an optional stop (M1)
- if hasattr(command, "is_optional") and command.is_optional:
- # Only pause if optional_stop is enabled
- if self.state.optional_stop:
- self.pause_program()
- else:
- # M0 always pauses
- self.pause_program()
-
- return robot_commands
-
- def parse_program(self, program: str | list[str]) -> list[str]:
- """
- Parse a complete GCODE program
-
- Args:
- program: GCODE program as string or list of lines
-
- Returns:
- List of all robot commands
- """
- if isinstance(program, str):
- lines = program.split("\n")
- else:
- lines = program
-
- all_commands = []
- self.errors = []
-
- for line in lines:
- commands = self.parse_line(line)
- all_commands.extend(commands)
-
- # Stop if errors encountered
- if self.errors and not self.state.program_running:
- break
-
- return all_commands
-
- def load_program(self, program: str | list[str]) -> bool:
- """
- Load a GCODE program for execution
-
- Args:
- program: GCODE program as string or list of lines
-
- Returns:
- True if program loaded successfully
- """
- if isinstance(program, str):
- self.program_lines = program.split("\n")
- else:
- self.program_lines = program
-
- self.current_line = 0
- self.errors = []
- self.command_queue = []
-
- # Validate program with proper line number tracking
- for line_num, line in enumerate(self.program_lines, 1):
- tokens = self.parser.parse_line(line)
- for token in tokens:
- is_valid, error_msg = self.parser.validate_token(token)
- if not is_valid:
- self.errors.append(
- f"Line {line_num}: {error_msg or 'Invalid token'}"
- )
-
- return len(self.errors) == 0
-
- def load_file(self, filepath: str) -> bool:
- """
- Load GCODE program from file
-
- Args:
- filepath: Path to GCODE file
-
- Returns:
- True if file loaded successfully
- """
- try:
- with open(filepath) as f:
- program = f.read()
- return self.load_program(program)
- except Exception as e:
- self.errors.append(f"Error loading file: {e}")
- return False
-
- def start_program(self) -> bool:
- """
- Start or resume program execution
-
- Returns:
- True if program started successfully
- """
- if not self.program_lines:
- self.errors.append("No program loaded")
- return False
-
- self.is_running = True
- self.is_paused = False
- self.state.program_running = True
- return True
-
- def pause_program(self) -> None:
- """Pause program execution"""
- self.is_paused = True
- self.state.program_running = False
- # Note: Command queue is not cleared so position in program is maintained
- # Commands already in queue can be optionally processed or discarded by the caller
-
- def stop_program(self) -> None:
- """Stop program execution and reset"""
- self.is_running = False
- self.is_paused = False
- self.current_line = 0
- self.state.program_running = False
- self.command_queue = []
-
- def set_optional_stop(self, enabled: bool) -> None:
- """
- Enable or disable optional stop (M1)
-
- Args:
- enabled: True to enable optional stop, False to disable
- """
- self.state.optional_stop = enabled
-
- def get_next_command(self) -> str | None:
- """
- Get next robot command from the program
-
- Returns:
- Robot command string or None if no more commands
- """
- # Return queued commands first
- if self.command_queue:
- return self.command_queue.pop(0)
-
- # Check if program is running
- if not self.is_running or self.is_paused:
- return None
-
- # Process lines until we get a command or reach end
- while self.current_line < len(self.program_lines):
- line = self.program_lines[self.current_line]
- self.current_line += 1
-
- # Parse line and get commands
- commands = self.parse_line(line)
-
- if commands:
- # Add to queue
- self.command_queue.extend(commands)
-
- # Return first command
- if self.command_queue:
- command = self.command_queue.pop(0)
-
- # Check for single block mode
- if self.single_block:
- self.pause_program()
-
- return command
-
- # Check for errors
- if self.errors:
- self.stop_program()
- return None
-
- # End of program
- self.stop_program()
- return None
-
- def set_work_offset(self, coord_system: str, axis: str, value: float) -> bool:
- """
- Set work coordinate offset
-
- Args:
- coord_system: Work coordinate system (G54-G59)
- axis: Axis (X, Y, Z, A, B, C)
- value: Offset value in mm
-
- Returns:
- True if successful
- """
- # Validate coordinate system
- if coord_system not in ["G54", "G55", "G56", "G57", "G58", "G59"]:
- self.errors.append(f"Invalid coordinate system: {coord_system}")
- return False
-
- # Validate axis
- if axis not in ["X", "Y", "Z", "A", "B", "C"]:
- self.errors.append(f"Invalid axis: {axis}")
- return False
-
- return self.coord_system.set_offset(coord_system, axis, value)
-
- def set_current_as_zero(self, machine_position: list[float]) -> None:
- """
- Set current position as zero in active work coordinate system
-
- Args:
- machine_position: Current machine position [X, Y, Z, RX, RY, RZ]
- """
- # Convert to dictionary
- pos_dict = {
- "X": machine_position[0],
- "Y": machine_position[1],
- "Z": machine_position[2],
- "A": machine_position[3] if len(machine_position) > 3 else 0,
- "B": machine_position[4] if len(machine_position) > 4 else 0,
- "C": machine_position[5] if len(machine_position) > 5 else 0,
- }
-
- self.coord_system.set_current_as_zero(pos_dict)
-
- def get_status(self) -> dict:
- """
- Get interpreter status
-
- Returns:
- Dictionary with status information
- """
- return {
- "state": self.state.get_status(),
- "coord_system": self.coord_system.active_system,
- "program_running": self.is_running,
- "program_paused": self.is_paused,
- "current_line": self.current_line,
- "total_lines": len(self.program_lines),
- "errors": self.errors[-5:] if self.errors else [], # Last 5 errors
- }
-
- def reset(self) -> None:
- """Reset interpreter to initial state"""
- self.state.reset()
- self.stop_program()
- self.errors = []
-
- def set_feed_override(self, percentage: float) -> None:
- """
- Set feed rate override percentage
-
- Args:
- percentage: Override percentage (0-200)
- """
- self.state.feed_override = np.clip(percentage, 0, 200)
-
- def set_single_block(self, enabled: bool) -> None:
- """
- Enable/disable single block mode
-
- Args:
- enabled: True to enable single block mode
- """
- self.single_block = enabled
- self.state.single_block = enabled
diff --git a/parol6/gcode/parser.py b/parol6/gcode/parser.py
deleted file mode 100644
index b2316d1..0000000
--- a/parol6/gcode/parser.py
+++ /dev/null
@@ -1,307 +0,0 @@
-"""
-GCODE Parser for PAROL6 Robot
-
-Tokenizes and parses GCODE lines into structured data.
-Supports standard GCODE syntax including G-codes, M-codes, and parameters.
-"""
-
-import re
-from dataclasses import dataclass
-
-
-@dataclass
-class GcodeToken:
- """Represents a parsed GCODE token"""
-
- code_type: str # 'G', 'M', 'T', 'N', etc.
- code_number: float # The numeric value
- parameters: dict[str, float] # Associated parameters (X, Y, Z, F, etc.)
- comment: str | None = None
- line_number: int | None = None
- raw_line: str = ""
-
- def __str__(self):
- result = f"{self.code_type}{self.code_number:.10g}".rstrip("0").rstrip(".")
- for key, val in self.parameters.items():
- result += f" {key}{val:.10g}".rstrip("0").rstrip(".")
- if self.comment:
- result += f" ; {self.comment}"
- return result
-
-
-class GcodeParser:
- """GCODE parser that tokenizes and validates GCODE lines"""
-
- # Regex patterns for parsing
- COMMENT_PATTERN = re.compile(r"\((.*?)\)|;(.*)$")
- LINE_NUMBER_PATTERN = re.compile(r"^N(\d+)", re.IGNORECASE)
- CODE_PATTERN = re.compile(r"([GMT])(\d+(?:\.\d+)?)", re.IGNORECASE)
- PARAM_PATTERN = re.compile(r"([XYZABCIJKRFSPQD])([+-]?\d*\.?\d+)", re.IGNORECASE)
-
- # Valid GCODE commands we support
- SUPPORTED_G_CODES = {
- 0: "Rapid positioning",
- 1: "Linear interpolation",
- 2: "Clockwise arc",
- 3: "Counter-clockwise arc",
- 4: "Dwell",
- 17: "XY plane selection",
- 18: "XZ plane selection",
- 19: "YZ plane selection",
- 20: "Inch units",
- 21: "Millimeter units",
- 28: "Return to home",
- 54: "Work coordinate 1",
- 55: "Work coordinate 2",
- 56: "Work coordinate 3",
- 57: "Work coordinate 4",
- 58: "Work coordinate 5",
- 59: "Work coordinate 6",
- 90: "Absolute positioning",
- 91: "Incremental positioning",
- }
-
- SUPPORTED_M_CODES = {
- 0: "Program stop",
- 1: "Optional stop",
- 3: "Spindle/Gripper on CW",
- 4: "Spindle/Gripper on CCW",
- 5: "Spindle/Gripper off",
- 30: "Program end",
- }
-
- def __init__(self):
- self.line_count = 0
- self.errors = []
-
- def parse_line(self, line: str) -> list[GcodeToken]:
- """
- Parse a single line of GCODE into tokens
-
- Args:
- line: Raw GCODE line
-
- Returns:
- List of GcodeToken objects parsed from the line
- """
- self.line_count += 1
- tokens: list[GcodeToken] = []
-
- # Store original line
- original_line = line.strip()
- if not original_line:
- return tokens
-
- # Extract and remove comments
- comment = None
- comment_match = self.COMMENT_PATTERN.search(line)
- if comment_match:
- comment = comment_match.group(1) or comment_match.group(2)
- line = self.COMMENT_PATTERN.sub("", line)
-
- # Extract line number if present
- line_number = None
- line_num_match = self.LINE_NUMBER_PATTERN.match(line)
- if line_num_match:
- line_number = int(line_num_match.group(1))
- line = line[line_num_match.end() :]
-
- # Convert to uppercase for parsing
- line = line.upper().strip()
-
- # Parse all parameters first
- parameters: dict[str, float] = {}
- for match in self.PARAM_PATTERN.finditer(line):
- param_letter = match.group(1)
- try:
- param_value = float(match.group(2))
- # Validate feed rate
- if param_letter == "F" and param_value <= 0:
- self.errors.append(
- f"Line {self.line_count}: Invalid feed rate: {param_value}"
- )
- continue
- # Validate spindle speed
- if param_letter == "S" and param_value < 0:
- self.errors.append(
- f"Line {self.line_count}: Invalid spindle speed: {param_value}"
- )
- continue
- parameters[param_letter] = param_value
- except ValueError:
- self.errors.append(
- f"Line {self.line_count}: Invalid numeric value for {param_letter}: {match.group(2)}"
- )
-
- # Parse G and M codes
- codes_found: list[tuple[str, float]] = []
- for match in self.CODE_PATTERN.finditer(line):
- code_type = match.group(1)
- code_number = float(match.group(2))
- codes_found.append((code_type, code_number))
-
- # Create tokens for each code found
- if codes_found:
- for code_type, code_number in codes_found:
- # For motion commands, include coordinate parameters
- if code_type == "G" and code_number in [0, 1, 2, 3]:
- motion_params = {
- k: v
- for k, v in parameters.items()
- if k in ["X", "Y", "Z", "A", "B", "C", "I", "J", "K", "R", "F"]
- }
- token = GcodeToken(
- code_type=code_type,
- code_number=code_number,
- parameters=motion_params,
- comment=comment,
- line_number=line_number,
- raw_line=original_line,
- )
- else:
- # Other codes get remaining parameters
- token = GcodeToken(
- code_type=code_type,
- code_number=code_number,
- parameters={
- k: v
- for k, v in parameters.items()
- if k
- not in [
- "X",
- "Y",
- "Z",
- "A",
- "B",
- "C",
- "I",
- "J",
- "K",
- "R",
- "F",
- ]
- },
- comment=comment,
- line_number=line_number,
- raw_line=original_line,
- )
- tokens.append(token)
-
- # Handle standalone feed rate
- elif "F" in parameters and not codes_found:
- token = GcodeToken(
- code_type="F",
- code_number=parameters["F"],
- parameters={},
- comment=comment,
- line_number=line_number,
- raw_line=original_line,
- )
- tokens.append(token)
-
- # Handle comment-only lines
- elif comment:
- token = GcodeToken(
- code_type="COMMENT",
- code_number=0,
- parameters={},
- comment=comment,
- line_number=line_number,
- raw_line=original_line,
- )
- tokens.append(token)
-
- return tokens
-
- def validate_token(self, token: GcodeToken) -> tuple[bool, str | None]:
- """
- Validate a GCODE token
-
- Args:
- token: GcodeToken to validate
-
- Returns:
- Tuple of (is_valid, error_message)
- """
- if token.code_type == "G":
- if token.code_number not in self.SUPPORTED_G_CODES:
- return False, f"Unsupported G-code: G{token.code_number}"
-
- # Validate required parameters for motion commands
- if token.code_number in [0, 1]: # Linear motion
- if not any(
- k in token.parameters for k in ["X", "Y", "Z", "A", "B", "C"]
- ):
- return (
- False,
- f"G{token.code_number} requires at least one coordinate",
- )
-
- elif token.code_number in [2, 3]: # Arc motion
- if not any(k in token.parameters for k in ["X", "Y", "Z"]):
- return False, f"G{token.code_number} requires endpoint coordinates"
- if not (
- ("I" in token.parameters or "J" in token.parameters)
- or "R" in token.parameters
- ):
- return (
- False,
- f"G{token.code_number} requires either IJK offsets or R radius",
- )
-
- elif token.code_number == 4: # Dwell
- if "P" not in token.parameters and "S" not in token.parameters:
- return (
- False,
- "G4 dwell requires P (milliseconds) or S (seconds) parameter",
- )
-
- elif token.code_type == "M":
- if token.code_number not in self.SUPPORTED_M_CODES:
- return False, f"Unsupported M-code: M{token.code_number}"
-
- elif token.code_type in ["F", "T", "S", "COMMENT"]:
- # These are always valid if parsed
- pass
-
- else:
- return False, f"Unknown code type: {token.code_type}"
-
- return True, None
-
- def parse_program(self, program: str | list[str]) -> list[GcodeToken]:
- """
- Parse a complete GCODE program
-
- Args:
- program: Either a string with newlines or a list of lines
-
- Returns:
- List of all tokens in the program
- """
- if isinstance(program, str):
- lines = program.split("\n")
- else:
- lines = program
-
- all_tokens = []
- self.errors = []
- self.line_count = 0
-
- for line in lines:
- try:
- tokens = self.parse_line(line)
- for token in tokens:
- is_valid, error_msg = self.validate_token(token)
- if not is_valid:
- self.errors.append(f"Line {self.line_count}: {error_msg}")
- else:
- all_tokens.append(token)
- except Exception as e:
- self.errors.append(f"Line {self.line_count}: Parse error - {e!s}")
-
- return all_tokens
-
- def get_errors(self) -> list[str]:
- """Get list of parsing errors"""
- return self.errors
diff --git a/parol6/gcode/state.py b/parol6/gcode/state.py
deleted file mode 100644
index a31be41..0000000
--- a/parol6/gcode/state.py
+++ /dev/null
@@ -1,346 +0,0 @@
-"""
-GCODE State Management for PAROL6 Robot
-
-Tracks modal states during GCODE execution including:
-- Coordinate systems (G54-G59)
-- Positioning modes (G90/G91)
-- Units (G20/G21)
-- Feed rates and spindle speeds
-- Active plane (G17/G18/G19)
-"""
-
-import json
-import os
-from dataclasses import dataclass, field
-
-from parol6.gcode.parser import GcodeToken
-
-
-@dataclass
-class GcodeState:
- """Tracks modal GCODE state during execution"""
-
- # Motion modes
- motion_mode: str = "G0" # G0, G1, G2, G3
- positioning_mode: str = "G90" # G90 (absolute) or G91 (incremental)
-
- # Coordinate system
- work_coordinate: str = "G54" # G54-G59
- work_offsets: dict[str, dict[str, float]] = field(
- default_factory=lambda: {
- "G54": {"X": 0, "Y": 0, "Z": 0, "A": 0, "B": 0, "C": 0},
- "G55": {"X": 0, "Y": 0, "Z": 0, "A": 0, "B": 0, "C": 0},
- "G56": {"X": 0, "Y": 0, "Z": 0, "A": 0, "B": 0, "C": 0},
- "G57": {"X": 0, "Y": 0, "Z": 0, "A": 0, "B": 0, "C": 0},
- "G58": {"X": 0, "Y": 0, "Z": 0, "A": 0, "B": 0, "C": 0},
- "G59": {"X": 0, "Y": 0, "Z": 0, "A": 0, "B": 0, "C": 0},
- }
- )
-
- # Current position (in work coordinates)
- current_position: dict[str, float] = field(
- default_factory=lambda: {"X": 0, "Y": 0, "Z": 0, "A": 0, "B": 0, "C": 0}
- )
-
- # Machine position (absolute, no offsets)
- machine_position: dict[str, float] = field(
- default_factory=lambda: {"X": 0, "Y": 0, "Z": 0, "A": 0, "B": 0, "C": 0}
- )
-
- # Units and scaling
- units: str = "G21" # G20 (inches) or G21 (mm)
- units_scale: float = 1.0 # Multiplier to convert to mm
-
- # Feed and speed
- feed_rate: float = 100.0 # mm/min
- spindle_speed: float = 0.0 # RPM
-
- # Plane selection for arcs
- plane: str = "G17" # G17 (XY), G18 (XZ), G19 (YZ)
-
- # Tool
- tool_number: int = 0
- tool_length_offset: float = 0.0
-
- # Program control
- program_running: bool = False
- single_block: bool = False
- optional_stop: bool = False # M1 optional stop enable
- feed_override: float = 100.0 # Percentage
-
- def __init__(self, state_file: str | None = None):
- """
- Initialize GCODE state, optionally loading from file
-
- Args:
- state_file: Path to JSON file for persistent state
- """
- # Initialize all dataclass fields with their defaults first
- self.motion_mode = "G0"
- self.positioning_mode = "G90"
- self.work_coordinate = "G54"
- self.work_offsets = {
- "G54": {"X": 0, "Y": 0, "Z": 0, "A": 0, "B": 0, "C": 0},
- "G55": {"X": 0, "Y": 0, "Z": 0, "A": 0, "B": 0, "C": 0},
- "G56": {"X": 0, "Y": 0, "Z": 0, "A": 0, "B": 0, "C": 0},
- "G57": {"X": 0, "Y": 0, "Z": 0, "A": 0, "B": 0, "C": 0},
- "G58": {"X": 0, "Y": 0, "Z": 0, "A": 0, "B": 0, "C": 0},
- "G59": {"X": 0, "Y": 0, "Z": 0, "A": 0, "B": 0, "C": 0},
- }
- self.current_position = {"X": 0, "Y": 0, "Z": 0, "A": 0, "B": 0, "C": 0}
- self.machine_position = {"X": 0, "Y": 0, "Z": 0, "A": 0, "B": 0, "C": 0}
- self.units = "G21"
- self.units_scale = 1.0
- self.feed_rate = 100.0
- self.spindle_speed = 0.0
- self.plane = "G17"
- self.tool_number = 0
- self.tool_length_offset = 0.0
- self.program_running = False
- self.single_block = False
- self.optional_stop = False
- self.feed_override = 100.0
-
- # Now handle the state file
- self.state_file = state_file
- if state_file and os.path.exists(state_file):
- self.load_state()
-
- def update_from_token(self, token: GcodeToken) -> None:
- """
- Update state based on a GCODE token
-
- Args:
- token: GcodeToken object
- """
- if token.code_type == "G":
- code = int(token.code_number)
-
- # Motion modes
- if code in [0, 1, 2, 3]:
- self.motion_mode = f"G{code}"
-
- # Plane selection
- elif code in [17, 18, 19]:
- self.plane = f"G{code}"
-
- # Units
- elif code == 20: # Inches
- self.units = "G20"
- self.units_scale = 25.4 # Convert inches to mm
- elif code == 21: # Millimeters
- self.units = "G21"
- self.units_scale = 1.0
-
- # Work coordinates
- elif code in [54, 55, 56, 57, 58, 59]:
- self.work_coordinate = f"G{code}"
-
- # Positioning mode
- elif code == 90:
- self.positioning_mode = "G90"
- elif code == 91:
- self.positioning_mode = "G91"
-
- elif token.code_type == "F":
- # Feed rate
- self.feed_rate = token.code_number * self.units_scale
-
- elif token.code_type == "S":
- # Spindle speed
- self.spindle_speed = token.code_number
-
- elif token.code_type == "T":
- # Tool number
- self.tool_number = int(token.code_number)
-
- def get_work_offset(self) -> dict[str, float]:
- """Get current work coordinate offset"""
- return self.work_offsets[self.work_coordinate]
-
- def set_work_offset(self, axis: str, value: float) -> None:
- """
- Set work coordinate offset for an axis
-
- Args:
- axis: Axis name (X, Y, Z, A, B, C)
- value: Offset value in mm
- """
- if axis in self.work_offsets[self.work_coordinate]:
- self.work_offsets[self.work_coordinate][axis] = value
- if self.state_file:
- self.save_state()
-
- def work_to_machine(self, work_coords: dict[str, float]) -> dict[str, float]:
- """
- Convert work coordinates to machine coordinates
-
- Args:
- work_coords: Dictionary of work coordinates
-
- Returns:
- Dictionary of machine coordinates
- """
- machine_coords = {}
- offset = self.get_work_offset()
-
- for axis in ["X", "Y", "Z", "A", "B", "C"]:
- if axis in work_coords:
- # Apply work offset and tool offset (for Z)
- machine_coords[axis] = work_coords[axis] + offset.get(axis, 0)
- if axis == "Z":
- machine_coords[axis] += self.tool_length_offset
-
- return machine_coords
-
- def machine_to_work(self, machine_coords: dict[str, float]) -> dict[str, float]:
- """
- Convert machine coordinates to work coordinates
-
- Args:
- machine_coords: Dictionary of machine coordinates
-
- Returns:
- Dictionary of work coordinates
- """
- work_coords = {}
- offset = self.get_work_offset()
-
- for axis in ["X", "Y", "Z", "A", "B", "C"]:
- if axis in machine_coords:
- # Remove work offset and tool offset (for Z)
- work_coords[axis] = machine_coords[axis] - offset.get(axis, 0)
- if axis == "Z":
- work_coords[axis] -= self.tool_length_offset
-
- return work_coords
-
- def calculate_target_position(
- self, parameters: dict[str, float]
- ) -> dict[str, float]:
- """
- Calculate target position based on positioning mode and parameters
-
- Args:
- parameters: Dictionary of axis values from GCODE
-
- Returns:
- Dictionary of target positions in work coordinates
- """
- target = self.current_position.copy()
-
- for axis in ["X", "Y", "Z", "A", "B", "C"]:
- if axis in parameters:
- value = parameters[axis] * self.units_scale
-
- if self.positioning_mode == "G90": # Absolute
- target[axis] = value
- else: # G91 - Incremental
- target[axis] = self.current_position[axis] + value
-
- return target
-
- def update_position(self, new_position: dict[str, float]) -> None:
- """
- Update current position
-
- Args:
- new_position: New position in work coordinates
- """
- self.current_position.update(new_position)
- # Update machine position
- machine_pos = self.work_to_machine(new_position)
- self.machine_position.update(machine_pos)
-
- def reset(self) -> None:
- """Reset state to defaults"""
- self.motion_mode = "G0"
- self.positioning_mode = "G90"
- self.work_coordinate = "G54"
- self.units = "G21"
- self.units_scale = 1.0
- self.feed_rate = 100.0
- self.spindle_speed = 0.0
- self.plane = "G17"
- self.tool_number = 0
- self.tool_length_offset = 0.0
- self.program_running = False
- self.single_block = False
- self.feed_override = 100.0
-
- # Keep work offsets but reset positions
- self.current_position = {"X": 0, "Y": 0, "Z": 0, "A": 0, "B": 0, "C": 0}
- self.machine_position = {"X": 0, "Y": 0, "Z": 0, "A": 0, "B": 0, "C": 0}
-
- def save_state(self) -> None:
- """Save state to JSON file"""
- if self.state_file:
- # Save complete modal state
- state_dict = {
- "work_offsets": self.work_offsets,
- "units": self.units,
- "work_coordinate": self.work_coordinate,
- "tool_length_offset": self.tool_length_offset,
- "motion_mode": self.motion_mode,
- "positioning_mode": self.positioning_mode,
- "plane": self.plane,
- "feed_rate": self.feed_rate,
- "spindle_speed": self.spindle_speed,
- "current_position": self.current_position,
- "machine_position": self.machine_position,
- }
-
- # Create directory if needed (only if path has a directory component)
- dir_name = os.path.dirname(self.state_file)
- if dir_name:
- os.makedirs(dir_name, exist_ok=True)
- with open(self.state_file, "w") as f:
- json.dump(state_dict, f, indent=2)
-
- def load_state(self) -> None:
- """Load state from JSON file"""
- if self.state_file and os.path.exists(self.state_file):
- try:
- with open(self.state_file) as f:
- state_dict = json.load(f)
-
- # Load complete modal state
- self.work_offsets = state_dict.get("work_offsets", self.work_offsets)
- self.units = state_dict.get("units", self.units)
- self.work_coordinate = state_dict.get(
- "work_coordinate", self.work_coordinate
- )
- self.tool_length_offset = state_dict.get("tool_length_offset", 0.0)
- self.motion_mode = state_dict.get("motion_mode", self.motion_mode)
- self.positioning_mode = state_dict.get(
- "positioning_mode", self.positioning_mode
- )
- self.plane = state_dict.get("plane", self.plane)
- self.feed_rate = state_dict.get("feed_rate", self.feed_rate)
- self.spindle_speed = state_dict.get("spindle_speed", self.spindle_speed)
- if "current_position" in state_dict:
- self.current_position = state_dict["current_position"]
- if "machine_position" in state_dict:
- self.machine_position = state_dict["machine_position"]
-
- # Update units scale
- self.units_scale = 25.4 if self.units == "G20" else 1.0
- except Exception as e:
- print(f"Error loading state file: {e}")
-
- def get_status(self) -> dict:
- """Get current state as dictionary for status reporting"""
- return {
- "motion_mode": self.motion_mode,
- "positioning_mode": self.positioning_mode,
- "work_coordinate": self.work_coordinate,
- "units": self.units,
- "feed_rate": self.feed_rate,
- "spindle_speed": self.spindle_speed,
- "plane": self.plane,
- "tool_number": self.tool_number,
- "current_position": self.current_position.copy(),
- "machine_position": self.machine_position.copy(),
- "program_running": self.program_running,
- "optional_stop": self.optional_stop,
- }
diff --git a/parol6/gcode/utils.py b/parol6/gcode/utils.py
deleted file mode 100644
index a79cfd9..0000000
--- a/parol6/gcode/utils.py
+++ /dev/null
@@ -1,360 +0,0 @@
-"""
-Utility functions for GCODE processing
-
-Provides conversion functions, calculations, and helpers for GCODE implementation.
-"""
-
-import math
-
-import numpy as np
-
-
-def feed_rate_to_duration(distance: float, feed_rate: float) -> float:
- """
- Convert feed rate to duration for a given distance
-
- Args:
- distance: Distance to travel in mm
- feed_rate: Feed rate in mm/min
-
- Returns:
- Duration in seconds
- """
- if feed_rate <= 0:
- return 0
-
- # Convert mm/min to mm/s
- feed_rate_mm_s = feed_rate / 60.0
-
- # Calculate duration
- duration = distance / feed_rate_mm_s
-
- return duration
-
-
-def feed_rate_to_speed_percentage(
- feed_rate: float, min_speed: float = 120.0, max_speed: float = 3600.0
-) -> float:
- """
- Convert feed rate to speed percentage
-
- Args:
- feed_rate: Feed rate in mm/min
- min_speed: Minimum speed in mm/min (default 120 = 2 mm/s)
- max_speed: Maximum speed in mm/min (default 3600 = 60 mm/s)
-
- Returns:
- Speed percentage (0-100)
- """
- # Clamp feed rate to valid range
- feed_rate = float(np.clip(feed_rate, min_speed, max_speed))
-
- # Map to percentage
- speed_percentage = float(np.interp(feed_rate, [min_speed, max_speed], [0.0, 100.0]))
-
- return speed_percentage
-
-
-def speed_percentage_to_feed_rate(
- speed_percentage: float, min_speed: float = 120.0, max_speed: float = 3600.0
-) -> float:
- """
- Convert speed percentage to feed rate
-
- Args:
- speed_percentage: Speed percentage (0-100)
- min_speed: Minimum speed in mm/min
- max_speed: Maximum speed in mm/min
-
- Returns:
- Feed rate in mm/min
- """
- # Clamp percentage
- speed_percentage = float(np.clip(speed_percentage, 0.0, 100.0))
-
- # Map to feed rate
- feed_rate = float(np.interp(speed_percentage, [0.0, 100.0], [min_speed, max_speed]))
-
- return feed_rate
-
-
-def calculate_distance(start: dict[str, float], end: dict[str, float]) -> float:
- """
- Calculate Euclidean distance between two points
-
- Args:
- start: Starting position {X, Y, Z, ...}
- end: Ending position {X, Y, Z, ...}
-
- Returns:
- Distance in mm
- """
- distance: float = 0.0
- for axis in ["X", "Y", "Z"]:
- if axis in start and axis in end:
- distance += (end[axis] - start[axis]) ** 2
-
- return float(math.sqrt(distance))
-
-
-def ijk_to_center(
- start: dict[str, float], ijk: dict[str, float], plane: str = "G17"
-) -> dict[str, float]:
- """
- Convert IJK offsets to arc center point
-
- Args:
- start: Starting position
- ijk: IJK offset values (relative to start)
- plane: Active plane (G17=XY, G18=XZ, G19=YZ)
-
- Returns:
- Center point coordinates
- """
- center = start.copy()
-
- if plane == "G17": # XY plane
- if "I" in ijk:
- center["X"] = start.get("X", 0) + ijk["I"]
- if "J" in ijk:
- center["Y"] = start.get("Y", 0) + ijk["J"]
- elif plane == "G18": # XZ plane
- if "I" in ijk:
- center["X"] = start.get("X", 0) + ijk["I"]
- if "K" in ijk:
- center["Z"] = start.get("Z", 0) + ijk["K"]
- elif plane == "G19": # YZ plane
- if "J" in ijk:
- center["Y"] = start.get("Y", 0) + ijk["J"]
- if "K" in ijk:
- center["Z"] = start.get("Z", 0) + ijk["K"]
-
- return center
-
-
-def radius_to_center(
- start: dict[str, float],
- end: dict[str, float],
- radius: float,
- clockwise: bool = True,
- plane: str = "G17",
-) -> dict[str, float]:
- """
- Calculate arc center from radius
-
- Args:
- start: Starting position
- end: Ending position
- radius: Arc radius (positive for <180°, negative for >180°)
- clockwise: True for G2, False for G3
- plane: Active plane
-
- Returns:
- Center point coordinates
- """
- # Get the two axes involved in the arc based on plane
- if plane == "G17": # XY plane
- axis1, axis2 = "X", "Y"
- elif plane == "G18": # XZ plane
- axis1, axis2 = "X", "Z"
- else: # G19 - YZ plane
- axis1, axis2 = "Y", "Z"
-
- # Get start and end coordinates
- x1 = start.get(axis1, 0)
- y1 = start.get(axis2, 0)
- x2 = end.get(axis1, 0)
- y2 = end.get(axis2, 0)
-
- # Calculate midpoint
- mx = (x1 + x2) / 2
- my = (y1 + y2) / 2
-
- # Calculate distance between points
- dx = x2 - x1
- dy = y2 - y1
- d = math.sqrt(dx**2 + dy**2)
-
- # Check if arc is possible
- if d > 2 * abs(radius):
- raise ValueError(f"Arc radius {radius} too small for distance {d}")
-
- # Calculate distance from midpoint to center
- if abs(d) < 1e-10: # Points are the same
- return start.copy()
-
- h = math.sqrt(radius**2 - (d / 2) ** 2)
-
- # Calculate perpendicular direction
- px = -dy / d
- py = dx / d
-
- # Determine direction based on radius sign and clockwise flag
- if radius < 0:
- h = -h
- if not clockwise:
- h = -h
-
- # Calculate center
- center = start.copy()
- center[axis1] = mx + h * px
- center[axis2] = my + h * py
-
- return center
-
-
-def validate_arc(
- start: dict[str, float],
- end: dict[str, float],
- center: dict[str, float],
- plane: str = "G17",
-) -> bool:
- """
- Validate arc parameters
-
- Args:
- start: Starting position
- end: Ending position
- center: Arc center
- plane: Active plane
-
- Returns:
- True if arc is valid
- """
- # Get the two axes involved
- if plane == "G17":
- axis1, axis2 = "X", "Y"
- elif plane == "G18":
- axis1, axis2 = "X", "Z"
- else:
- axis1, axis2 = "Y", "Z"
-
- # Calculate radii from center to start and end
- r_start = math.sqrt(
- (start.get(axis1, 0) - center.get(axis1, 0)) ** 2
- + (start.get(axis2, 0) - center.get(axis2, 0)) ** 2
- )
-
- r_end = math.sqrt(
- (end.get(axis1, 0) - center.get(axis1, 0)) ** 2
- + (end.get(axis2, 0) - center.get(axis2, 0)) ** 2
- )
-
- # Check if radii are approximately equal (within 0.01mm)
- return abs(r_start - r_end) < 0.01
-
-
-def estimate_motion_time(
- start: dict[str, float],
- end: dict[str, float],
- feed_rate: float,
- is_rapid: bool = False,
-) -> float:
- """
- Estimate time for a motion command
-
- Args:
- start: Starting position
- end: Ending position
- feed_rate: Feed rate in mm/min
- is_rapid: True for G0 rapid moves
-
- Returns:
- Estimated time in seconds
- """
- if is_rapid:
- # Rapid moves use maximum speed
- # Use robot's actual max linear velocity (60 mm/s)
- rapid_speed = 60.0 # mm/s (from PAROL6_ROBOT.Cartesian_linear_velocity_max)
- distance = calculate_distance(start, end)
- return distance / rapid_speed
- else:
- # Regular moves use feed rate
- distance = calculate_distance(start, end)
- return feed_rate_to_duration(distance, feed_rate)
-
-
-def parse_gcode_file(filepath: str) -> list[str]:
- """
- Parse GCODE file and return list of lines
-
- Args:
- filepath: Path to GCODE file
-
- Returns:
- List of GCODE lines
- """
- lines = []
- try:
- with open(filepath) as f:
- for line in f:
- # Remove whitespace and convert to uppercase
- line = line.strip()
- if line and not line.startswith("%"): # Skip empty lines and % markers
- lines.append(line)
- except Exception as e:
- print(f"Error reading GCODE file: {e}")
-
- return lines
-
-
-def format_gcode_number(value: float, decimals: int = 3) -> str:
- """
- Format number for GCODE output
-
- Args:
- value: Numeric value
- decimals: Number of decimal places
-
- Returns:
- Formatted string without trailing zeros
- """
- formatted = f"{value:.{decimals}f}"
- # Remove trailing zeros and decimal point if not needed
- formatted = formatted.rstrip("0").rstrip(".")
- return formatted
-
-
-def split_into_segments(
- start: dict[str, float], end: dict[str, float], max_segment_length: float = 10.0
-) -> list[dict[str, float]]:
- """
- Split long moves into smaller segments
-
- Args:
- start: Starting position
- end: Ending position
- max_segment_length: Maximum segment length in mm
-
- Returns:
- List of intermediate positions
- """
- distance = calculate_distance(start, end)
-
- if distance <= max_segment_length:
- return [end]
-
- # Calculate number of segments
- num_segments = int(math.ceil(distance / max_segment_length))
-
- # Generate intermediate points
- points = []
- for i in range(1, num_segments + 1):
- t = i / num_segments
- point = {}
- for axis in ["X", "Y", "Z", "A", "B", "C"]:
- if axis in start and axis in end:
- point[axis] = start[axis] + t * (end[axis] - start[axis])
- points.append(point)
-
- return points
-
-
-def inches_to_mm(value: float) -> float:
- """Convert inches to millimeters"""
- return value * 25.4
-
-
-def mm_to_inches(value: float) -> float:
- """Convert millimeters to inches"""
- return value / 25.4
diff --git a/parol6/motion/__init__.py b/parol6/motion/__init__.py
new file mode 100644
index 0000000..db520f5
--- /dev/null
+++ b/parol6/motion/__init__.py
@@ -0,0 +1,57 @@
+"""
+Motion pipeline for trajectory generation and execution.
+
+This module provides a unified pipeline for all motion commands:
+- Cartesian commands (MoveCart, MoveCartRelTrf)
+- Joint commands (MovePose, MoveJoint)
+- Smooth shapes (Circle, Arc, Helix, Spline)
+
+All commands produce a JointPath that gets converted to a Trajectory
+via time-optimal path parameterization (TOPP-RA).
+
+Streaming executors provide real-time jerk-limited motion for jogging.
+
+Geometry generators provide path geometry for visualization and preview.
+"""
+
+from parol6.motion.geometry import (
+ CircularMotion,
+ SplineMotion,
+ blend_path_into,
+ build_composite_cartesian_path,
+ build_composite_joint_path,
+ compute_circle_from_3_points,
+ joint_path_to_tcp_poses,
+)
+from parol6.motion.streaming_executors import (
+ CartesianStreamingExecutor,
+ RuckigExecutorBase,
+ StreamingExecutor,
+)
+from parol6.motion.trajectory import (
+ JointPath,
+ ProfileType,
+ Trajectory,
+ TrajectoryBuilder,
+)
+
+__all__ = [
+ # Trajectory pipeline
+ "JointPath",
+ "Trajectory",
+ "TrajectoryBuilder",
+ "ProfileType",
+ # Streaming executors
+ "StreamingExecutor",
+ "CartesianStreamingExecutor",
+ "RuckigExecutorBase",
+ # Geometry generators
+ "CircularMotion",
+ "SplineMotion",
+ "joint_path_to_tcp_poses",
+ # Blend infrastructure
+ "blend_path_into",
+ "build_composite_cartesian_path",
+ "build_composite_joint_path",
+ "compute_circle_from_3_points",
+]
diff --git a/parol6/motion/geometry.py b/parol6/motion/geometry.py
new file mode 100644
index 0000000..be971fe
--- /dev/null
+++ b/parol6/motion/geometry.py
@@ -0,0 +1,742 @@
+"""
+Geometry generation for smooth motion paths.
+
+This module provides pure geometry generators for arcs and splines,
+plus blend zone computation for fly-by motion between consecutive segments.
+
+All generators are stateless - they produce Cartesian path geometry without
+depending on controller state or executing any motion.
+"""
+
+import logging
+from typing import TYPE_CHECKING, Any
+
+import numpy as np
+from numpy.typing import NDArray
+from pinokin import batch_se3_interp, se3_from_rpy, se3_interp, so3_rpy
+from scipy.interpolate import CubicSpline
+from scipy.spatial.transform import Rotation, Slerp
+
+if TYPE_CHECKING:
+ from pinokin import Robot
+
+from parol6.config import CONTROL_RATE_HZ, PATH_SAMPLES
+
+logger = logging.getLogger(__name__)
+
+# Default control rate for geometry sampling
+DEFAULT_CONTROL_RATE = CONTROL_RATE_HZ
+
+
+class _ShapeGenerator:
+ """Base class for geometry generation."""
+
+ def __init__(self, control_rate: float | None = None):
+ self.control_rate = (
+ control_rate if control_rate is not None else DEFAULT_CONTROL_RATE
+ )
+
+
+class CircularMotion(_ShapeGenerator):
+ """Generate arc trajectories in 3D space.
+
+ Returns (N, 6) arrays of [x, y, z, rx, ry, rz] poses.
+ Position units match input units (typically mm).
+ Orientation is in degrees.
+ """
+
+ def generate_arc(
+ self,
+ start_pose: NDArray,
+ end_pose: NDArray,
+ center: NDArray,
+ normal: NDArray | None = None,
+ clockwise: bool = False,
+ n_samples: int = PATH_SAMPLES,
+ ) -> np.ndarray:
+ """Generate a 3D circular arc trajectory (uniformly sampled geometry).
+
+ Args:
+ start_pose: Start pose [x, y, z, rx, ry, rz] (mm, degrees)
+ end_pose: End pose [x, y, z, rx, ry, rz] (mm, degrees)
+ center: Arc center point [x, y, z] (mm)
+ normal: Normal vector defining arc plane (auto-computed if None)
+ clockwise: If True, arc goes clockwise when viewed from normal
+ n_samples: Number of sample points along the arc
+
+ Returns:
+ (N, 4, 4) array of SE3 poses along the arc (meters, radians).
+ """
+ start_pos = start_pose[:3]
+ end_pos = end_pose[:3]
+
+ r1 = start_pos - center
+ r2 = end_pos - center
+
+ if normal is None:
+ normal = np.cross(r1, r2)
+ if np.linalg.norm(normal) < 1e-6:
+ normal = np.array([0, 0, 1])
+ normal_unit = normal / np.linalg.norm(normal)
+
+ r1_norm = r1 / np.linalg.norm(r1)
+ r2_norm = r2 / np.linalg.norm(r2)
+ cos_angle = np.clip(np.dot(r1_norm, r2_norm), -1, 1)
+ arc_angle = np.arccos(cos_angle)
+
+ # Full circle: start ≈ end → 2π arc, not zero
+ if arc_angle < 1e-6 and float(np.linalg.norm(r1 - r2)) < 1.0:
+ arc_angle = 2 * np.pi
+
+ cross = np.cross(r1_norm, r2_norm)
+ if np.dot(cross, normal_unit) < 0:
+ arc_angle = 2 * np.pi - arc_angle
+ if clockwise:
+ arc_angle = -arc_angle
+
+ num_points = max(2, n_samples)
+
+ # Vectorized arc position generation using scipy Rotation
+ t_values = np.linspace(0, 1, num_points) if num_points > 1 else np.array([1.0])
+ angles = t_values * arc_angle
+
+ rotvecs = np.outer(angles, normal_unit) # (num_points, 3)
+ rotations = Rotation.from_rotvec(rotvecs)
+ positions = center + rotations.apply(r1) # (num_points, 3) in mm
+
+ # Build SE3 start/end from 6D poses, then interpolate orientation in
+ # SE3 space (Lie-algebra geodesic) to avoid gimbal-lock Euler issues.
+ start_se3 = np.empty((4, 4), dtype=np.float64)
+ end_se3 = np.empty((4, 4), dtype=np.float64)
+ se3_from_rpy(
+ start_pose[0] / 1000.0,
+ start_pose[1] / 1000.0,
+ start_pose[2] / 1000.0,
+ np.radians(start_pose[3]),
+ np.radians(start_pose[4]),
+ np.radians(start_pose[5]),
+ start_se3,
+ )
+ se3_from_rpy(
+ end_pose[0] / 1000.0,
+ end_pose[1] / 1000.0,
+ end_pose[2] / 1000.0,
+ np.radians(end_pose[3]),
+ np.radians(end_pose[4]),
+ np.radians(end_pose[5]),
+ end_se3,
+ )
+
+ trajectory = np.empty((num_points, 4, 4), dtype=np.float64)
+ batch_se3_interp(start_se3, end_se3, t_values, trajectory)
+
+ # Override translations with the arc-geometry positions (mm → meters)
+ trajectory[:, :3, 3] = positions / 1000.0
+
+ return trajectory
+
+
+class SplineMotion(_ShapeGenerator):
+ """Generate smooth spline trajectories through waypoints.
+
+ Uses cubic spline interpolation for position and SLERP for orientation.
+ """
+
+ def generate_spline(
+ self,
+ waypoints: NDArray,
+ timestamps: NDArray | None = None,
+ duration: float | None = None,
+ velocity_start: NDArray | None = None,
+ velocity_end: NDArray | None = None,
+ ) -> np.ndarray:
+ """Generate spline trajectory (uniformly sampled geometry).
+
+ Args:
+ waypoints: (N, 6) array of [x, y, z, rx, ry, rz] waypoints
+ timestamps: Optional timestamps for each waypoint
+ duration: Total duration (overrides timestamps scaling)
+ velocity_start: Start velocity for position [vx, vy, vz]
+ velocity_end: End velocity for position [vx, vy, vz]
+
+ Returns:
+ (N, 6) array of poses along the spline
+ """
+ waypoints_arr = np.asarray(waypoints, dtype=float)
+ num_waypoints = len(waypoints_arr)
+
+ if num_waypoints < 2:
+ return waypoints_arr
+
+ if timestamps is None:
+ total_dist = 0.0
+ for i in range(1, num_waypoints):
+ dist = np.linalg.norm(waypoints_arr[i, :3] - waypoints_arr[i - 1, :3])
+ total_dist += float(dist)
+
+ if duration is not None:
+ total_time = duration
+ else:
+ total_time = max(0.1, total_dist / 50.0)
+
+ timestamps_arr = np.linspace(0, total_time, num_waypoints)
+ else:
+ timestamps_arr = np.asarray(timestamps, dtype=float)
+ if duration is not None:
+ scale = duration / timestamps_arr[-1] if timestamps_arr[-1] > 0 else 1.0
+ timestamps_arr = timestamps_arr * scale
+
+ if len(timestamps_arr) != len(waypoints_arr):
+ raise ValueError(
+ f"Timestamps length ({len(timestamps_arr)}) must match "
+ f"waypoints length ({len(waypoints_arr)})"
+ )
+
+ pos_splines = []
+ for i in range(3):
+ bc: Any
+ if velocity_start is not None and velocity_end is not None:
+ bc = ((1, float(velocity_start[i])), (1, float(velocity_end[i])))
+ else:
+ bc = "not-a-knot"
+ spline = CubicSpline(timestamps_arr, waypoints_arr[:, i], bc_type=bc)
+ pos_splines.append(spline)
+
+ # Batch convert euler angles to rotations (vectorized)
+ euler_angles = waypoints_arr[:, 3:]
+ key_rots = Rotation.from_euler("xyz", euler_angles, degrees=True)
+ slerp = Slerp(timestamps_arr, key_rots)
+
+ total_time = float(timestamps_arr[-1])
+ num_points = max(2, int(total_time * self.control_rate))
+ t_eval = np.linspace(0, total_time, num_points)
+
+ trajectory = np.empty((num_points, 6), dtype=np.float64)
+ for i, spline in enumerate(pos_splines):
+ trajectory[:, i] = spline(t_eval)
+ trajectory[:, 3:] = slerp(t_eval).as_euler("xyz", degrees=True)
+
+ return trajectory
+
+
+def joint_path_to_tcp_poses(
+ joint_positions: NDArray[np.float64],
+ robot: "Robot | None" = None,
+) -> NDArray[np.float64]:
+ """Convert joint-space path to TCP poses using forward kinematics.
+
+ This is useful for visualizing the actual TCP trajectory that results
+ from joint-space interpolation (which traces an arc, not a straight line).
+
+ Args:
+ joint_positions: (N, 6) array of joint angles in radians
+ robot: pinokin Robot model (uses PAROL6_ROBOT.robot if None)
+
+ Returns:
+ (N, 6) array of [x_mm, y_mm, z_mm, rx_deg, ry_deg, rz_deg] poses
+ """
+ if robot is None:
+ import parol6.PAROL6_ROBOT as PAROL6_ROBOT
+
+ robot = PAROL6_ROBOT.robot
+
+ # Batch FK in C++ (single call, no Python loop overhead)
+ transforms = robot.batch_fk(joint_positions)
+
+ n_points = len(joint_positions)
+ tcp_poses = np.empty((n_points, 6), dtype=np.float64)
+ rpy_buf = np.empty(3, dtype=np.float64)
+
+ for i, T in enumerate(transforms):
+ tcp_poses[i, :3] = T[:3, 3] * 1000.0 # m -> mm
+ so3_rpy(T[:3, :3], rpy_buf)
+ np.degrees(rpy_buf, out=tcp_poses[i, 3:])
+
+ return tcp_poses
+
+
+def compute_circle_from_3_points(
+ p1: NDArray[np.float64],
+ p2: NDArray[np.float64],
+ p3: NDArray[np.float64],
+) -> tuple[NDArray[np.float64], float, NDArray[np.float64]]:
+ """Compute the circumscribed circle through 3 non-collinear 3D points.
+
+ Args:
+ p1, p2, p3: 3D points (shape (3,))
+
+ Returns:
+ (center, radius, normal):
+ center: Circle center point (3,)
+ radius: Circle radius
+ normal: Unit normal of the plane containing the circle (3,)
+
+ Raises:
+ ValueError: If the 3 points are collinear (no unique circle).
+ """
+ p1 = np.asarray(p1, dtype=np.float64)
+ p2 = np.asarray(p2, dtype=np.float64)
+ p3 = np.asarray(p3, dtype=np.float64)
+
+ # Vectors from p1 to p2 and p3
+ a = p2 - p1
+ b = p3 - p1
+
+ # Full circle: start ≈ end (p1 ≈ p3), via is diametrically opposite.
+ # Threshold accounts for FK/IK precision (~0.1 mm).
+ if float(np.linalg.norm(b)) < 1.0:
+ a_len = float(np.linalg.norm(a))
+ if a_len < 1e-12:
+ raise ValueError("All three points are coincident.")
+ center = (p1 + p2) / 2.0
+ radius = a_len / 2.0
+ d = a / a_len
+ ref = (
+ np.array([0.0, 0.0, 1.0]) if abs(d[2]) < 0.9 else np.array([1.0, 0.0, 0.0])
+ )
+ normal = np.cross(d, ref)
+ normal /= np.linalg.norm(normal)
+ return center, radius, normal
+
+ # Normal to the plane
+ normal = np.asarray(np.cross(a, b), dtype=np.float64)
+ normal_len = float(np.linalg.norm(normal))
+ if normal_len < 1e-12:
+ raise ValueError("Points are collinear; no unique circle exists.")
+ np.divide(normal, normal_len, out=normal)
+
+ # Circumcenter via perpendicular bisector intersection in the plane.
+ # C = p1 + s*a + t*b, where s and t satisfy:
+ # (C - p1)·a = |a|²/2 and (C - p1)·b = |b|²/2
+ # Expanding: s*(a·a) + t*(b·a) = (a·a)/2
+ # s*(a·b) + t*(b·b) = (b·b)/2
+ aa = float(np.dot(a, a))
+ bb = float(np.dot(b, b))
+ ab = float(np.dot(a, b))
+
+ det = aa * bb - ab * ab
+ if abs(det) < 1e-20:
+ raise ValueError("Degenerate configuration; cannot compute circle center.")
+
+ s = (bb * aa - ab * bb) / (2.0 * det)
+ t = (aa * bb - ab * aa) / (2.0 * det)
+ center = p1 + s * a + t * b
+ radius = float(np.linalg.norm(center - p1))
+
+ return center, radius, normal
+
+
+def blend_path_into(
+ entry_pose: NDArray[np.float64],
+ waypoint_pose: NDArray[np.float64],
+ exit_pose: NDArray[np.float64],
+ out: NDArray[np.float64],
+ skip: int = 0,
+) -> None:
+ """Write quadratic Bezier blend zone into pre-allocated buffer.
+
+ The blend zone smoothly rounds a corner between two linear Cartesian
+ segments. It is tangent to the incoming segment at t=0 and to the outgoing
+ segment at t=1.
+
+ Position follows a quadratic Bezier curve::
+
+ P(t) = (1-t)^2*E + 2t(1-t)*W + t^2*X
+
+ Orientation is geodesic (SLERP) from entry to exit.
+
+ Args:
+ entry_pose: SE3 pose at blend zone entry (4x4)
+ waypoint_pose: SE3 pose at the corner being rounded (4x4)
+ exit_pose: SE3 pose at blend zone exit (4x4)
+ out: Output array, shape (n_samples, 4, 4). Written in-place.
+ skip: Number of initial samples to skip (for junction dedup).
+ """
+ E = entry_pose[:3, 3]
+ W = waypoint_pose[:3, 3]
+ X = exit_pose[:3, 3]
+
+ n_total = out.shape[0] + skip
+ t = np.linspace(0.0, 1.0, n_total)[skip:]
+
+ # Batch SLERP for orientation (entry -> exit)
+ batch_se3_interp(entry_pose, exit_pose, t, out)
+
+ # Override translation with quadratic Bezier position
+ omt = 1.0 - t
+ out[:, :3, 3] = (
+ np.outer(omt * omt, E) + np.outer(2.0 * omt * t, W) + np.outer(t * t, X)
+ )
+
+
+def build_composite_cartesian_path(
+ waypoints: list[NDArray[np.float64]],
+ blend_radii: list[float],
+ samples_per_segment: int = PATH_SAMPLES,
+) -> NDArray[np.float64]:
+ """Build a composite Cartesian path with blend zones at intermediate waypoints.
+
+ Concatenates linear Cartesian segments connected by quadratic Bezier blend
+ zones. Implements ABB-style zone overlap clamping: if two adjacent blend
+ zones would overlap, both radii are proportionally reduced so they don't
+ exceed half the segment length.
+
+ Args:
+ waypoints: List of SE3 poses (4x4) defining the path corners.
+ Must have at least 2 waypoints.
+ blend_radii: Blend radius (mm) for each intermediate waypoint.
+ Length must equal ``len(waypoints) - 2`` (no blend at start/end).
+ ``r=0`` means stop at the waypoint (no blending).
+ samples_per_segment: Number of linear interpolation samples per segment
+
+ Returns:
+ (M, 4, 4) ndarray of SE3 poses forming the complete path.
+
+ Raises:
+ ValueError: If inputs are inconsistent.
+ """
+ n = len(waypoints)
+ if n < 2:
+ raise ValueError("Need at least 2 waypoints")
+ if len(blend_radii) != max(0, n - 2):
+ raise ValueError(
+ f"Expected {max(0, n - 2)} blend radii, got {len(blend_radii)}"
+ )
+
+ # No blending for 2-waypoint path
+ if n == 2:
+ out = np.empty((samples_per_segment, 4, 4), dtype=np.float64)
+ _linear_se3_segment_into(waypoints[0], waypoints[1], out)
+ return out
+
+ # Compute segment lengths (mm)
+ seg_lengths: list[float] = [0.0] * (n - 1)
+ for i in range(n - 1):
+ seg_lengths[i] = (
+ float(np.linalg.norm(waypoints[i + 1][:3, 3] - waypoints[i][:3, 3]))
+ * 1000.0
+ )
+
+ # Clamp blend radii (zone overlap prevention)
+ clamped = list(blend_radii)
+ for i in range(len(clamped)):
+ half_before = seg_lengths[i] / 2.0
+ half_after = seg_lengths[i + 1] / 2.0
+ clamped[i] = min(clamped[i], half_before, half_after)
+
+ # Adjacent blends: if clamped[i] + clamped[i+1] > seg_lengths[i+1], scale both
+ for i in range(len(clamped) - 1):
+ seg_len = seg_lengths[i + 1]
+ total = clamped[i] + clamped[i + 1]
+ if total > seg_len and total > 0:
+ scale = seg_len / total
+ clamped[i] *= scale
+ clamped[i + 1] *= scale
+
+ # Pre-compute per-segment trim fractions
+ seg_exit_frac = [0.0] * (n - 1)
+ seg_entry_frac = [0.0] * (n - 1)
+ for i in range(len(clamped)):
+ if clamped[i] > 0:
+ if seg_lengths[i] > 0:
+ seg_exit_frac[i] = clamped[i] / seg_lengths[i]
+ if seg_lengths[i + 1] > 0:
+ seg_entry_frac[i + 1] = clamped[i] / seg_lengths[i + 1]
+
+ # Interleaved precompute: count linear segments and blend zones in order
+ total_rows = 0
+ for seg_idx in range(n - 1):
+ s_start = seg_entry_frac[seg_idx]
+ s_end = 1.0 - seg_exit_frac[seg_idx]
+ if s_end > s_start + 1e-9:
+ rows = samples_per_segment
+ if total_rows > 0 and seg_idx > 0:
+ rows -= 1
+ total_rows += rows
+ if seg_idx < len(clamped) and clamped[seg_idx] > 0:
+ avg_seg_len = (seg_lengths[seg_idx] + seg_lengths[seg_idx + 1]) / 2.0
+ frac = clamped[seg_idx] / avg_seg_len if avg_seg_len > 1e-6 else 0.0
+ bs = _blend_sample_count(frac, samples_per_segment)
+ rows = bs
+ if total_rows > 0:
+ rows -= 1
+ total_rows += rows
+
+ out = np.empty((total_rows, 4, 4), dtype=np.float64)
+ row = 0
+
+ # Workspace buffers for blend zone endpoints (hoisted out of loop)
+ entry_buf = np.zeros((4, 4), dtype=np.float64)
+ exit_buf = np.zeros((4, 4), dtype=np.float64)
+
+ for seg_idx in range(n - 1):
+ start = waypoints[seg_idx]
+ end = waypoints[seg_idx + 1]
+
+ s_start = seg_entry_frac[seg_idx]
+ s_end = 1.0 - seg_exit_frac[seg_idx]
+
+ # Linear segment
+ if s_end > s_start + 1e-9:
+ skip = 1 if (row > 0 and seg_idx > 0) else 0
+ n_write = samples_per_segment - skip
+ _linear_se3_segment_into(
+ start,
+ end,
+ out[row : row + n_write],
+ s_start,
+ s_end,
+ skip=skip,
+ )
+ row += n_write
+
+ # Blend zone at end of this segment
+ if seg_idx < len(clamped) and clamped[seg_idx] > 0:
+ se3_interp(start, end, 1.0 - seg_exit_frac[seg_idx], entry_buf)
+ corner = end
+ next_end = waypoints[seg_idx + 2]
+ se3_interp(end, next_end, seg_entry_frac[seg_idx + 1], exit_buf)
+
+ avg_seg_len = (seg_lengths[seg_idx] + seg_lengths[seg_idx + 1]) / 2.0
+ frac = clamped[seg_idx] / avg_seg_len if avg_seg_len > 1e-6 else 0.0
+ bs = _blend_sample_count(frac, samples_per_segment)
+ skip = 1 if row > 0 else 0
+ n_write = bs - skip
+ blend_path_into(
+ entry_buf,
+ corner,
+ exit_buf,
+ out[row : row + n_write],
+ skip=skip,
+ )
+ row += n_write
+
+ return out[:row]
+
+
+def _linear_se3_segment_into(
+ start: NDArray[np.float64],
+ end: NDArray[np.float64],
+ out: NDArray[np.float64],
+ s_start: float = 0.0,
+ s_end: float = 1.0,
+ skip: int = 0,
+) -> None:
+ """Write linearly interpolated SE3 poses into pre-allocated buffer.
+
+ Args:
+ start: Start SE3 pose (4x4)
+ end: End SE3 pose (4x4)
+ out: Output array, shape (n_samples, 4, 4). Written in-place.
+ s_start: Start interpolation fraction (0-1)
+ s_end: End interpolation fraction (0-1)
+ skip: Number of initial samples to skip (for junction dedup).
+ """
+ n_total = out.shape[0] + skip
+ s_values = np.linspace(s_start, s_end, n_total)[skip:]
+ batch_se3_interp(start, end, s_values, out)
+
+
+def _blend_sample_count(frac: float, samples_per_segment: int) -> int:
+ """Compute adaptive blend zone sample count from blend fraction.
+
+ The blend zone replaces *frac* of each of two adjacent segments,
+ so its effective arc length is ~2*frac segments. The 2x multiplier
+ keeps the sample density roughly uniform with the linear segments.
+ """
+ return max(5, int(2.0 * frac * samples_per_segment + 0.5))
+
+
+# ---------------------------------------------------------------------------
+# Joint-space composite path with blend zones
+# ---------------------------------------------------------------------------
+
+
+def build_composite_joint_path(
+ waypoints: list[NDArray[np.float64]],
+ blend_fracs: list[tuple[float, float]],
+ samples_per_segment: int = 50,
+) -> NDArray[np.float64]:
+ """Build a composite joint-space path with Bezier blend zones.
+
+ Mirrors :func:`build_composite_cartesian_path` but operates entirely in
+ joint space. Blend zone sizes are expressed as fractions of each adjacent
+ segment (pre-computed by the caller from FK-based mm->fraction conversion).
+
+ Args:
+ waypoints: Joint-angle arrays (radians), length N >= 2.
+ blend_fracs: For each of the N-2 intermediate waypoints, a
+ ``(frac_before, frac_after)`` tuple giving the fraction of the
+ incoming / outgoing segment consumed by the blend zone.
+ Values are clamped internally to prevent overlap.
+ samples_per_segment: Linear interpolation samples per segment.
+
+ Returns:
+ (M, ndof) ndarray of joint positions along the composite path.
+
+ Raises:
+ ValueError: If inputs are inconsistent.
+ """
+ n = len(waypoints)
+ ndof = len(waypoints[0])
+ if n < 2:
+ raise ValueError("Need at least 2 waypoints")
+ if len(blend_fracs) != max(0, n - 2):
+ raise ValueError(
+ f"Expected {max(0, n - 2)} blend_fracs, got {len(blend_fracs)}"
+ )
+
+ # Trivial 2-waypoint path — no blending needed
+ if n == 2:
+ out = np.empty((samples_per_segment, ndof), dtype=np.float64)
+ _linear_joint_segment_into(
+ waypoints[0],
+ waypoints[1],
+ out,
+ 0.0,
+ 1.0,
+ )
+ return out
+
+ # Clamp fractions to [0, 0.5]
+ exit_frac = [min(max(f[0], 0.0), 0.5) for f in blend_fracs]
+ entry_frac = [min(max(f[1], 0.0), 0.5) for f in blend_fracs]
+
+ # Build per-segment trim arrays
+ seg_start_trim = [0.0] * (n - 1)
+ seg_end_trim = [0.0] * (n - 1)
+ for i in range(len(blend_fracs)):
+ wp_idx = i + 1
+ seg_end_trim[wp_idx - 1] = exit_frac[i]
+ seg_start_trim[wp_idx] = entry_frac[i]
+
+ # Clamp overlapping trims on any segment
+ for s in range(n - 1):
+ total = seg_start_trim[s] + seg_end_trim[s]
+ if total > 1.0:
+ scale = 1.0 / total
+ seg_start_trim[s] *= scale
+ seg_end_trim[s] *= scale
+
+ # Interleaved precompute: count linear segments and blend zones in order
+ total_rows = 0
+ for seg_idx in range(n - 1):
+ s_start = seg_start_trim[seg_idx]
+ s_end = 1.0 - seg_end_trim[seg_idx]
+ if s_end > s_start + 1e-9:
+ rows = samples_per_segment
+ if total_rows > 0 and seg_idx > 0:
+ rows -= 1
+ total_rows += rows
+ blend_idx = seg_idx
+ if blend_idx < len(blend_fracs) and (
+ exit_frac[blend_idx] > 0 or entry_frac[blend_idx] > 0
+ ):
+ avg_frac = (exit_frac[blend_idx] + entry_frac[blend_idx]) / 2.0
+ bs = _blend_sample_count(avg_frac, samples_per_segment)
+ rows = bs
+ if total_rows > 0:
+ rows -= 1
+ total_rows += rows
+
+ out = np.empty((total_rows, ndof), dtype=np.float64)
+ row = 0
+
+ for seg_idx in range(n - 1):
+ start = waypoints[seg_idx]
+ end = waypoints[seg_idx + 1]
+ s_start = seg_start_trim[seg_idx]
+ s_end = 1.0 - seg_end_trim[seg_idx]
+
+ if s_end > s_start + 1e-9:
+ skip = 1 if (row > 0 and seg_idx > 0) else 0
+ n_write = samples_per_segment - skip
+ _linear_joint_segment_into(
+ start,
+ end,
+ out[row : row + n_write],
+ s_start,
+ s_end,
+ skip=skip,
+ )
+ row += n_write
+
+ blend_idx = seg_idx
+ if blend_idx < len(blend_fracs) and (
+ exit_frac[blend_idx] > 0 or entry_frac[blend_idx] > 0
+ ):
+ entry_q = start + (1.0 - seg_end_trim[seg_idx]) * (end - start)
+ corner_q = end
+ next_end = waypoints[seg_idx + 2]
+ exit_q = end + seg_start_trim[seg_idx + 1] * (next_end - end)
+
+ avg_frac = (exit_frac[blend_idx] + entry_frac[blend_idx]) / 2.0
+ bs = _blend_sample_count(avg_frac, samples_per_segment)
+ skip = 1 if row > 0 else 0
+ n_write = bs - skip
+ _blend_joint_path_into(
+ entry_q,
+ corner_q,
+ exit_q,
+ out[row : row + n_write],
+ skip=skip,
+ )
+ row += n_write
+
+ return out[:row]
+
+
+def _linear_joint_segment_into(
+ start: NDArray[np.float64],
+ end: NDArray[np.float64],
+ out: NDArray[np.float64],
+ s_start: float = 0.0,
+ s_end: float = 1.0,
+ skip: int = 0,
+) -> None:
+ """Write linearly interpolated joint positions into pre-allocated buffer.
+
+ Args:
+ start: Start joint configuration.
+ end: End joint configuration.
+ out: Output array, shape (n_samples, ndof). Written in-place.
+ s_start: Start interpolation fraction (0-1).
+ s_end: End interpolation fraction (0-1).
+ skip: Number of initial samples to skip (for junction dedup).
+ """
+ n_total = out.shape[0] + skip
+ delta = end - start
+ t = np.linspace(s_start, s_end, n_total)[skip:]
+ np.outer(t, delta, out)
+ out += start
+
+
+def _blend_joint_path_into(
+ entry_q: NDArray[np.float64],
+ waypoint_q: NDArray[np.float64],
+ exit_q: NDArray[np.float64],
+ out: NDArray[np.float64],
+ skip: int = 0,
+) -> None:
+ """Write quadratic Bezier blend zone into pre-allocated buffer.
+
+ Per-joint: ``q(t) = (1-t)^2 E + 2t(1-t) W + t^2 X``
+
+ Tangent at t=0 matches incoming segment, tangent at t=1 matches outgoing
+ segment, giving C1 (velocity) continuity at blend boundaries.
+
+ Args:
+ entry_q: Joint angles at blend entry.
+ waypoint_q: Joint angles at the corner being rounded.
+ exit_q: Joint angles at blend exit.
+ out: Output array, shape (n_samples, ndof). Written in-place.
+ skip: Number of initial samples to skip.
+ """
+ n_total = out.shape[0] + skip
+ t = np.linspace(0.0, 1.0, n_total)[skip:]
+ omt = 1.0 - t
+ out[:] = (
+ np.outer(omt * omt, entry_q)
+ + np.outer(2.0 * omt * t, waypoint_q)
+ + np.outer(t * t, exit_q)
+ )
diff --git a/parol6/motion/streaming_executors.py b/parol6/motion/streaming_executors.py
new file mode 100644
index 0000000..ceffda9
--- /dev/null
+++ b/parol6/motion/streaming_executors.py
@@ -0,0 +1,747 @@
+"""
+Streaming executors for online robot motion using Ruckig.
+
+Provides jerk-limited motion execution for real-time control:
+- StreamingExecutor: Joint-space jogging and streaming
+- CartesianStreamingExecutor: Cartesian-space jogging and streaming
+
+Precomputed trajectories bypass these executors and go directly to the controller,
+since they're already time-optimal (TOPPRA/RUCKIG) or validated (QUINTIC/TRAPEZOID).
+"""
+
+import logging
+from abc import ABC, abstractmethod
+
+import numpy as np
+from numba import njit
+from numpy.typing import NDArray
+from ruckig import ControlInterface, InputParameter, OutputParameter, Result, Ruckig # type: ignore[unresolved-import, ty:unresolved-import]
+
+import parol6.PAROL6_ROBOT as PAROL6_ROBOT
+from parol6.config import INTERVAL_S, LIMITS
+from pinokin import se3_exp_ws, se3_inverse, se3_log_ws, se3_mul
+
+logger = logging.getLogger(__name__)
+
+_IDENTITY_SE3: np.ndarray = np.eye(4, dtype=np.float64)
+_IDENTITY_SE3.flags.writeable = False
+
+
+@njit(cache=True)
+def _pose_to_tangent_jit(
+ ref_pose: np.ndarray,
+ pose: np.ndarray,
+ ref_inv: np.ndarray,
+ delta: np.ndarray,
+ out: np.ndarray,
+ omega_ws: np.ndarray,
+ R_ws: np.ndarray,
+ V_inv_ws: np.ndarray,
+) -> None:
+ """Convert SE3 pose to 6D tangent vector relative to reference.
+
+ Uses workspace variants for zero internal allocation.
+
+ Args:
+ ref_pose: Reference pose (4x4 SE3)
+ pose: Pose to convert (4x4 SE3)
+ ref_inv: Workspace buffer for reference inverse (4x4)
+ delta: Workspace buffer for delta transform (4x4)
+ out: Output tangent vector (6,) [vx, vy, vz, wx, wy, wz]
+ omega_ws: Workspace buffer for axis-angle (3,)
+ R_ws: Workspace buffer for rotation matrix (3,3)
+ V_inv_ws: Workspace buffer for V inverse matrix (3,3)
+ """
+ se3_inverse(ref_pose, ref_inv)
+ se3_mul(ref_inv, pose, delta)
+ se3_log_ws(delta, out, omega_ws, R_ws, V_inv_ws)
+
+
+@njit(cache=True)
+def _tangent_to_pose_jit(
+ ref_pose: np.ndarray,
+ tangent: np.ndarray,
+ delta: np.ndarray,
+ out: np.ndarray,
+ omega_ws: np.ndarray,
+ R_ws: np.ndarray,
+ V_ws: np.ndarray,
+) -> None:
+ """Convert 6D tangent vector back to SE3 pose.
+
+ Uses workspace variants for zero internal allocation.
+
+ Args:
+ ref_pose: Reference pose (4x4 SE3)
+ tangent: Tangent vector (6,) [vx, vy, vz, wx, wy, wz]
+ delta: Workspace buffer for delta transform (4x4)
+ out: Output pose (4x4 SE3)
+ omega_ws: Workspace buffer for axis-angle (3,)
+ R_ws: Workspace buffer for rotation matrix (3,3)
+ V_ws: Workspace buffer for V matrix (3,3)
+ """
+ se3_exp_ws(tangent, delta, omega_ws, R_ws, V_ws)
+ se3_mul(ref_pose, delta, out)
+
+
+# Module-level constant for error checking (avoids tuple creation per check)
+_RUCKIG_ERRORS = (Result.Error, Result.ErrorInvalidInput)
+
+
+# =============================================================================
+# Base Class
+# =============================================================================
+
+
+class RuckigExecutorBase(ABC):
+ """
+ Abstract base class for Ruckig-based streaming executors.
+
+ Provides common infrastructure for jerk-limited motion execution:
+ - Velocity/acceleration limit scaling
+ - Common tick() error handling
+ - Graceful stop implementation
+
+ Subclasses implement space-specific logic (joint vs Cartesian).
+
+ Note: Position/velocity buffers returned by tick() are reused across calls
+ to minimize allocations in the 250Hz control loop. Callers must copy data
+ if they need to retain values across ticks.
+ """
+
+ def __init__(self, num_dofs: int, dt: float = INTERVAL_S):
+ self.num_dofs = num_dofs
+ self.dt = dt
+ self.ruckig = Ruckig(num_dofs, dt)
+ self.inp = InputParameter(num_dofs)
+ self.out = OutputParameter(num_dofs)
+ self.active = False
+ self._vel_scale: float = 1.0
+ self._acc_scale: float = 1.0
+
+ # Pre-allocated output buffers (reused every tick to avoid allocations)
+ self._pos_out = np.zeros(num_dofs)
+ self._vel_out = np.zeros(num_dofs)
+ self._zeros_np = np.zeros(num_dofs)
+ self._zeros: list[float] = [0.0] * num_dofs
+
+ self._init_limits()
+ self._init_state()
+
+ @abstractmethod
+ def _init_limits(self) -> None:
+ """Initialize hardware limits from config. Called by __init__."""
+ ...
+
+ @abstractmethod
+ def _init_state(self) -> None:
+ """Initialize Ruckig input parameters. Called by __init__."""
+ ...
+
+ @abstractmethod
+ def _apply_limits(self) -> None:
+ """Apply current limits (with scaling) to Ruckig parameters."""
+ ...
+
+ def set_limits(self, velocity_frac: float = 1.0, accel_frac: float = 1.0) -> None:
+ """Set velocity/acceleration as fraction of limits (0.0-1.0)."""
+ self._vel_scale = max(0.01, min(1.0, velocity_frac))
+ self._acc_scale = max(0.01, min(1.0, accel_frac))
+ self._apply_limits()
+
+ def _tick_ruckig(self) -> tuple[Result, np.ndarray, np.ndarray]:
+ """
+ Common Ruckig update logic.
+
+ Returns:
+ (result, new_position, new_velocity) - uses pre-allocated buffers
+ """
+ result = self.ruckig.update(self.inp, self.out)
+ if result in _RUCKIG_ERRORS:
+ logger.error(f"Ruckig error: {result}")
+ self.active = False
+ else:
+ self.out.pass_to_input(self.inp)
+ # Copy into pre-allocated buffers (avoids list() allocation per tick)
+ self._pos_out[:] = self.out.new_position
+ self._vel_out[:] = self.out.new_velocity
+ return result, self._pos_out, self._vel_out
+
+ def stop(self) -> None:
+ """Request graceful stop - decelerate to zero velocity."""
+ self.inp.control_interface = ControlInterface.Velocity
+ for i in range(self.num_dofs):
+ self.inp.target_velocity[i] = 0.0
+ self.inp.target_acceleration[i] = 0.0
+
+
+# =============================================================================
+# Joint-Space Executor
+# =============================================================================
+
+
+class StreamingExecutor(RuckigExecutorBase):
+ """
+ Streaming execution layer for online robot motion in joint space.
+
+ Used only for jogging and streaming commands. Precomputed trajectories
+ bypass this executor entirely.
+
+ Key features:
+ - Jerk-limited smoothing via Ruckig
+ - Automatic Cartesian velocity limiting when enabled
+ - Smooth motion to position targets
+ - Online modification: changing targets mid-motion produces smooth blending
+ - Preserves velocity/acceleration state across ticks
+ """
+
+ def __init__(self, num_dofs: int = 6, dt: float = INTERVAL_S):
+ """
+ Initialize streaming executor.
+
+ Args:
+ num_dofs: Number of degrees of freedom (joints)
+ dt: Control cycle time in seconds
+ """
+ # Cartesian velocity limit (mm/s), None = no cart limiting
+ # Must be set before super().__init__ calls _init_limits/_init_state
+ self._cart_vel_limit: float | None = None
+
+ # Pre-allocated buffers for cart velocity limit calculations (avoids per-call allocations)
+ self._q_current_buf = np.zeros(num_dofs, dtype=np.float64)
+ self._q_target_buf = np.zeros(num_dofs, dtype=np.float64)
+ self._dq_buf = np.zeros(num_dofs, dtype=np.float64)
+ self._jacob0_buf = np.zeros((6, num_dofs), dtype=np.float64, order="F")
+
+ # Pre-allocated buffers for Ruckig parameters - each has ONE semantic purpose
+ # Position sync (current/target position share same values)
+ self._sync_pos_buf: list[float] = [0.0] * num_dofs
+ # Limit parameters (max_velocity, max_acceleration, max_jerk)
+ self._max_vel_buf: list[float] = [0.0] * num_dofs
+ self._max_acc_buf: list[float] = [0.0] * num_dofs
+ self._max_jerk_buf: list[float] = [0.0] * num_dofs
+ # Target velocity for jogging
+ self._target_vel_buf: list[float] = [0.0] * num_dofs
+
+ super().__init__(num_dofs, dt)
+
+ def _init_limits(self) -> None:
+ """Initialize hardware limits from centralized config."""
+ self._hardware_v_max = LIMITS.joint.hard.velocity
+ self._hardware_a_max = LIMITS.joint.hard.acceleration
+ self._hardware_j_max = LIMITS.joint.hard.jerk
+ self._jog_v_max = LIMITS.joint.jog.velocity
+
+ def _init_state(self) -> None:
+ """Initialize Ruckig input parameters."""
+ n = self.num_dofs
+ self.inp.current_position = [0.0] * n
+ self.inp.current_velocity = [0.0] * n
+ self.inp.current_acceleration = [0.0] * n
+ self.inp.target_position = [0.0] * n
+ self.inp.target_velocity = [0.0] * n
+ self.inp.target_acceleration = [0.0] * n
+ self._apply_limits()
+
+ def _apply_limits(self) -> None:
+ """Apply current limits (with scaling) to Ruckig parameters."""
+ for i in range(self.num_dofs):
+ self._max_vel_buf[i] = self._hardware_v_max[i] * self._vel_scale
+ self._max_acc_buf[i] = self._hardware_a_max[i] * self._acc_scale
+ self._max_jerk_buf[i] = self._hardware_j_max[i]
+ self.inp.max_velocity = self._max_vel_buf
+ self.inp.max_acceleration = self._max_acc_buf
+ self.inp.max_jerk = self._max_jerk_buf
+
+ def set_cart_velocity_limit(self, limit_mm_s: float | None) -> None:
+ """
+ Set Cartesian velocity limit for subsequent position targets.
+
+ When set, joint velocity limits are dynamically adjusted based on
+ the direction to target to ensure TCP velocity stays within limit.
+
+ Args:
+ limit_mm_s: Cartesian velocity limit in mm/s, or None to disable
+ """
+ self._cart_vel_limit = limit_mm_s
+
+ def sync_position(self, pos: list[float] | np.ndarray) -> None:
+ """
+ Sync current position from robot feedback.
+
+ Call this when idle to ensure executor starts from the actual robot position.
+ Should not be called while active (mid-motion).
+
+ Args:
+ pos: Current joint positions in radians
+ """
+ if not self.active:
+ self._sync_pos_buf[:] = pos
+ self.inp.current_position = self._sync_pos_buf
+ self.inp.current_velocity = self._zeros
+ self.inp.current_acceleration = self._zeros
+ self.inp.target_position = self._sync_pos_buf
+
+ def set_position_target(self, q_target: list[float]) -> None:
+ """
+ Set position target with automatic Cartesian velocity limiting.
+
+ If cart_vel_limit is set, joint velocity limits are dynamically
+ adjusted based on the direction to target using the local tangent
+ method to ensure TCP velocity stays within the Cartesian limit.
+
+ Args:
+ q_target: Target joint positions in radians
+ """
+ # Apply Cartesian velocity limiting if enabled
+ if self._cart_vel_limit is not None and self._cart_vel_limit > 0:
+ self._apply_cart_velocity_limit(q_target)
+ else:
+ # Reset to hardware limits (reuse buffer)
+ self._max_vel_buf[:] = self._hardware_v_max
+ self.inp.max_velocity = self._max_vel_buf
+
+ self.inp.control_interface = ControlInterface.Position
+ self._sync_pos_buf[:] = q_target
+ self.inp.target_position = self._sync_pos_buf
+ self.inp.target_velocity = self._zeros # Stop at target
+ self.active = True
+
+ def set_jog_velocity(self, joint_velocities: NDArray[np.float64]) -> None:
+ """
+ Set target velocity for jogging using Ruckig velocity control.
+
+ Ruckig will smoothly accelerate/decelerate to reach target velocity.
+ Call with [0,0,0,0,0,0] to smoothly stop.
+
+ Args:
+ joint_velocities: Desired velocity for each joint in rad/s (signed)
+ """
+ # Use jog-specific velocity limits (~80% of hardware limits) - reuse buffers
+ for i in range(self.num_dofs):
+ self._max_vel_buf[i] = self._jog_v_max[i] * self._vel_scale
+ self._max_acc_buf[i] = self._hardware_a_max[i] * self._acc_scale
+ self.inp.max_velocity = self._max_vel_buf
+ self.inp.max_acceleration = self._max_acc_buf
+
+ self.inp.control_interface = ControlInterface.Velocity
+ self._target_vel_buf[:] = joint_velocities
+ self.inp.target_velocity = self._target_vel_buf
+ self.inp.target_acceleration = self._zeros
+ self.active = True
+
+ def _apply_cart_velocity_limit(self, q_target: list[float]) -> None:
+ """
+ Compute and apply Cartesian-aware joint velocity limits.
+
+ Uses the local tangent method: computes joint velocity limits that
+ ensure TCP velocity along the direction to target stays within the
+ Cartesian velocity limit.
+ """
+ # Use pre-allocated buffers to avoid allocations
+ self._q_current_buf[:] = self.inp.current_position
+ self._q_target_buf[:] = q_target
+ np.subtract(self._q_target_buf, self._q_current_buf, out=self._dq_buf)
+
+ # Get the linear part of the Jacobian (first 3 rows)
+ PAROL6_ROBOT.robot.jacob0_into(self._q_current_buf, self._jacob0_buf)
+ J_lin = self._jacob0_buf[:3, :]
+
+ # Compute Cartesian velocity per unit "scale" along dq direction
+ cart_vel_per_scale = np.linalg.norm(J_lin @ self._dq_buf)
+
+ if cart_vel_per_scale > 1e-6:
+ v_max_m_s = (
+ self._cart_vel_limit / 1000.0 if self._cart_vel_limit else 0.0
+ ) # mm/s to m/s
+ max_scale = v_max_m_s / cart_vel_per_scale
+
+ # Reuse pre-allocated buffer for velocity limits
+ for j in range(self.num_dofs):
+ # Joint velocity = dq[j] * scale, so max joint vel = |dq[j]| * max_scale
+ q_dot_max = min(
+ abs(self._dq_buf[j]) * max_scale, self._hardware_v_max[j]
+ )
+ # Ensure non-zero minimum to avoid Ruckig issues
+ self._max_vel_buf[j] = max(q_dot_max, 1e-6)
+
+ self.inp.max_velocity = self._max_vel_buf
+ else:
+ # Near-zero motion, use hardware limits (reuse buffer)
+ self._max_vel_buf[:] = self._hardware_v_max
+ self.inp.max_velocity = self._max_vel_buf
+
+ def tick(self) -> tuple[np.ndarray, np.ndarray, bool]:
+ """
+ Execute one control cycle.
+
+ Warning: Returned arrays are reused across calls. Copy if needed across ticks.
+
+ Returns:
+ Tuple of (position, velocity, finished):
+ - position: Current commanded position in radians
+ - velocity: Current commanded velocity in rad/s
+ - finished: True if target reached (position mode) or velocity reached (velocity mode)
+ """
+ if not self.active:
+ # Sync _pos_out with current position for inactive state
+ self._pos_out[:] = self.inp.current_position
+ return self._pos_out, self._zeros_np, True
+
+ result, pos, vel = self._tick_ruckig()
+
+ if result in _RUCKIG_ERRORS:
+ return self._pos_out, self._zeros_np, True
+
+ # pos/vel are pre-allocated buffers from _tick_ruckig
+ return pos, vel, result == Result.Finished
+
+ def reset_limits(self) -> None:
+ """Reset velocity, acceleration, and jerk limits to hardware defaults."""
+ self._vel_scale = 1.0
+ self._acc_scale = 1.0
+ self._apply_limits()
+
+ def reset(self) -> None:
+ """Reset executor state."""
+ self._vel_scale = 1.0
+ self._acc_scale = 1.0
+ self.active = False
+ self._cart_vel_limit = None
+ self._init_state()
+
+ @property
+ def cart_vel_limit(self) -> float | None:
+ """Get current Cartesian velocity limit in mm/s, or None if disabled."""
+ return self._cart_vel_limit
+
+
+# =============================================================================
+# Cartesian-Space Executor
+# =============================================================================
+
+
+class CartesianStreamingExecutor(RuckigExecutorBase):
+ """
+ Cartesian-space Ruckig executor for smooth TCP motion.
+
+ Uses SE3 Lie algebra representation internally:
+ - Position: [x, y, z] in meters
+ - Orientation: [wx, wy, wz] as axis-angle vector (radians)
+
+ Ruckig operates on this 6D tangent space representation, ensuring
+ smooth interpolation without gimbal lock issues.
+
+ Key features:
+ - Jerk-limited smoothing via Ruckig in Cartesian space
+ - Position mode for MOVECART (straight-line TCP motion)
+ - Velocity mode for CARTJOG (1-DOF jogging)
+ - WRF/TRF frame support for jogging
+ """
+
+ def __init__(self, dt: float = INTERVAL_S):
+ """
+ Initialize Cartesian streaming executor.
+
+ Args:
+ dt: Control cycle time in seconds
+ """
+ # Reference pose for tangent space computations
+ # Must be set before super().__init__ calls _init_limits/_init_state
+ self.reference_pose: np.ndarray | None = None
+
+ # Pre-allocated arrays for Ruckig parameters (reused to avoid per-tick allocations)
+ # Ruckig copies values on assignment, so we update in-place then assign same array
+ # Must be created before super().__init__() since _apply_limits() is called during init
+ self._max_velocity_arr = np.zeros(6, dtype=np.float64)
+ self._max_acceleration_arr = np.zeros(6, dtype=np.float64)
+ self._max_jerk_arr = np.zeros(6, dtype=np.float64)
+ self._target_velocity_arr = np.zeros(6, dtype=np.float64)
+ self._target_acceleration_arr = np.zeros(6, dtype=np.float64)
+
+ super().__init__(num_dofs=6, dt=dt) # 6-DOF: [x, y, z, wx, wy, wz]
+
+ # Pre-allocated numpy arrays for hot path (avoids allocations per tick)
+ self._tangent_buf = np.zeros(6, dtype=np.float64)
+ self._vel_np_buf = np.zeros(6, dtype=np.float64)
+ self._world_vel_buf = np.zeros(6, dtype=np.float64)
+
+ # SE3 workspace buffers for JIT functions (avoids allocations in pose conversions)
+ self._ref_inv_buf = np.zeros((4, 4), dtype=np.float64)
+ self._delta_buf = np.zeros((4, 4), dtype=np.float64)
+ self._result_pose_buf = np.zeros((4, 4), dtype=np.float64)
+ # Additional workspace for se3_log_ws/se3_exp_ws (zero internal allocation)
+ self._omega_ws = np.zeros(3, dtype=np.float64)
+ self._R_ws = np.zeros((3, 3), dtype=np.float64)
+ self._V_ws = np.zeros((3, 3), dtype=np.float64) # Reused for V and V_inv
+
+ def _init_limits(self) -> None:
+ """Initialize Cartesian velocity/acceleration/jerk limits from centralized config."""
+ # Use jog limits for streaming (servo/jog share the same executor)
+ # Linear limits (SI: m/s, m/s², m/s³)
+ self._v_lin_max = LIMITS.cart.jog.velocity.linear
+ self._a_lin_max = LIMITS.cart.jog.acceleration.linear
+ self._j_lin_max = LIMITS.cart.jog.jerk.linear
+ # Angular limits (SI: rad/s, rad/s², rad/s³)
+ self._v_ang_max = LIMITS.cart.jog.velocity.angular
+ self._a_ang_max = LIMITS.cart.jog.acceleration.angular
+ self._j_ang_max = LIMITS.cart.jog.jerk.angular
+
+ def _init_state(self) -> None:
+ """Initialize Ruckig input parameters."""
+ self.inp.current_position = self._zeros
+ self.inp.current_velocity = self._zeros
+ self.inp.current_acceleration = self._zeros
+ self.inp.target_position = self._zeros
+ self.inp.target_velocity = self._zeros
+ self.inp.target_acceleration = self._zeros
+ self._apply_limits()
+
+ def _apply_limits(self) -> None:
+ """Apply current limits (with scaling) to Ruckig parameters.
+
+ Uses pre-allocated numpy arrays to avoid per-tick allocations.
+ """
+ # Update velocity limits in-place
+ self._max_velocity_arr[:3] = self._v_lin_max * self._vel_scale
+ self._max_velocity_arr[3:] = self._v_ang_max * self._vel_scale
+ self.inp.max_velocity = self._max_velocity_arr
+
+ # Update acceleration limits in-place
+ self._max_acceleration_arr[:3] = self._a_lin_max * self._acc_scale
+ self._max_acceleration_arr[3:] = self._a_ang_max * self._acc_scale
+ self.inp.max_acceleration = self._max_acceleration_arr
+
+ # Update jerk limits in-place
+ self._max_jerk_arr[:3] = self._j_lin_max
+ self._max_jerk_arr[3:] = self._j_ang_max
+ self.inp.max_jerk = self._max_jerk_arr
+
+ def sync_pose(self, current_pose: np.ndarray) -> None:
+ """
+ Sync current pose from robot feedback.
+
+ Call this when idle to ensure executor starts from actual robot pose.
+ Sets the reference pose for tangent space computations.
+
+ Args:
+ current_pose: Current TCP pose as 4x4 SE3 matrix
+ """
+ self.reference_pose = current_pose.copy() # avoid aliasing with cached FK
+ # Reset Ruckig state to origin (relative to reference)
+ self.inp.current_position = self._zeros
+ self.inp.current_velocity = self._zeros
+ self.inp.current_acceleration = self._zeros
+ self.inp.target_position = self._zeros
+ self.active = False
+
+ def _pose_to_tangent(self, pose: np.ndarray) -> np.ndarray:
+ """
+ Convert SE3 pose to 6D tangent vector relative to reference.
+
+ The tangent vector is the Lie algebra representation (twist):
+ [vx, vy, vz, wx, wy, wz] where v is linear and w is angular.
+
+ Args:
+ pose: 4x4 SE3 matrix to convert
+
+ Returns:
+ Pre-allocated 6D buffer (reused across calls; Ruckig copies on assignment)
+ """
+ if self.reference_pose is None:
+ self._tangent_buf.fill(0.0)
+ return self._tangent_buf
+ # Use JIT function with pre-allocated workspace buffers (zero allocation)
+ _pose_to_tangent_jit(
+ self.reference_pose,
+ pose,
+ self._ref_inv_buf,
+ self._delta_buf,
+ self._tangent_buf,
+ self._omega_ws,
+ self._R_ws,
+ self._V_ws,
+ )
+ return self._tangent_buf
+
+ def _tangent_to_pose(self, tangent: np.ndarray) -> np.ndarray:
+ """
+ Convert 6D tangent vector back to SE3 pose.
+
+ Args:
+ tangent: 6D tangent vector [x, y, z, wx, wy, wz]
+
+ Returns:
+ 4x4 SE3 matrix
+ """
+ if self.reference_pose is None:
+ return np.eye(4, dtype=np.float64)
+ # Copy tangent to buffer and use JIT function with pre-allocated workspace
+ self._tangent_buf[:] = tangent
+ _tangent_to_pose_jit(
+ self.reference_pose,
+ self._tangent_buf,
+ self._delta_buf,
+ self._result_pose_buf,
+ self._omega_ws,
+ self._R_ws,
+ self._V_ws,
+ )
+ return self._result_pose_buf
+
+ def set_pose_target(self, target_pose: np.ndarray) -> None:
+ """
+ Set target pose for position mode (MOVECART).
+
+ Ruckig will smoothly interpolate from current pose to target
+ along a straight line in Cartesian space.
+
+ Args:
+ target_pose: Target TCP pose as SE3
+ """
+ target_tangent = self._pose_to_tangent(target_pose)
+
+ self.inp.control_interface = ControlInterface.Position
+ self.inp.target_position = target_tangent
+ self.inp.target_velocity = self._zeros # Stop at target
+
+ self._apply_limits()
+ self.active = True
+
+ def set_jog_velocity_1dof(
+ self, axis: int, velocity: float, is_rotation: bool
+ ) -> None:
+ """
+ Set 1-DOF jog velocity in body frame (TRF - Tool Reference Frame).
+
+ The tangent space is relative to reference_pose, so velocities are
+ naturally in body/tool frame. Use this for TRF jogging.
+
+ Uses velocity mode - Ruckig smoothly accelerates/decelerates
+ to reach target velocity. Call with velocity=0 to stop.
+
+ Args:
+ axis: Axis index (0=X, 1=Y, 2=Z)
+ velocity: Target velocity (m/s for linear, rad/s for rotation)
+ is_rotation: True for rotation axes (RX, RY, RZ)
+ """
+ # Update target velocity in-place (zero allocation)
+ self._target_velocity_arr.fill(0.0)
+ if is_rotation:
+ self._target_velocity_arr[3 + axis] = velocity
+ else:
+ self._target_velocity_arr[axis] = velocity
+
+ self.inp.control_interface = ControlInterface.Velocity
+ self.inp.target_velocity = self._target_velocity_arr
+ self._target_acceleration_arr.fill(0.0)
+ self.inp.target_acceleration = self._target_acceleration_arr
+
+ self._apply_limits()
+ self.active = True
+
+ def set_jog_velocity_1dof_wrf(
+ self,
+ axis: int,
+ velocity: float,
+ is_rotation: bool,
+ ) -> None:
+ """
+ Set 1-DOF jog velocity in world reference frame (WRF).
+
+ Transforms the velocity from world frame to body frame (tangent space)
+ before applying to Ruckig. Requires reference_pose to be set.
+
+ Args:
+ axis: Axis index (0=X, 1=Y, 2=Z)
+ velocity: Target velocity (m/s for linear, rad/s for rotation)
+ is_rotation: True for rotation axes (RX, RY, RZ)
+ """
+ if self.reference_pose is None:
+ logger.warning("set_jog_velocity_1dof_wrf called without reference_pose")
+ return
+
+ # Reuse pre-allocated buffer for world velocity
+ self._world_vel_buf.fill(0.0)
+ if is_rotation:
+ self._world_vel_buf[3 + axis] = velocity
+ else:
+ self._world_vel_buf[axis] = velocity
+
+ # Transform from world frame to body frame (tangent space)
+ # Body velocity = R^T @ world velocity
+ R = self.reference_pose[:3, :3]
+
+ # JIT-compiled transform into target buffer (zero allocation)
+ np.dot(R.T, self._world_vel_buf[:3], self._target_velocity_arr[:3])
+ np.dot(R.T, self._world_vel_buf[3:], self._target_velocity_arr[3:])
+
+ self.inp.control_interface = ControlInterface.Velocity
+ self.inp.target_velocity = self._target_velocity_arr
+ self._target_acceleration_arr.fill(0.0)
+ self.inp.target_acceleration = self._target_acceleration_arr
+
+ self._apply_limits()
+ self.active = True
+
+ def tick(self) -> tuple[np.ndarray, NDArray[np.float64], bool]:
+ """
+ Execute one control cycle.
+
+ Warning: Returned pose and velocity arrays are reused across calls.
+ Copy if needed across ticks.
+
+ Returns:
+ Tuple of (smoothed_pose, velocity, finished):
+ - smoothed_pose: The smoothed Cartesian pose for this tick (buffer, reused)
+ - velocity: Current 6D velocity [vx, vy, vz, wx, wy, wz] (buffer, reused)
+ - finished: True if target reached (position mode) or
+ target velocity reached (velocity mode)
+ """
+ if not self.active or self.reference_pose is None:
+ self._vel_np_buf.fill(0.0)
+ return (
+ self.reference_pose
+ if self.reference_pose is not None
+ else _IDENTITY_SE3,
+ self._vel_np_buf,
+ True,
+ )
+
+ result, pos, vel = self._tick_ruckig()
+
+ if result in _RUCKIG_ERRORS:
+ self._vel_np_buf.fill(0.0)
+ return (
+ self.reference_pose
+ if self.reference_pose is not None
+ else _IDENTITY_SE3,
+ self._vel_np_buf,
+ True,
+ )
+
+ # Convert tangent back to pose
+ smoothed_pose = self._tangent_to_pose(pos)
+ # Copy velocity into pre-allocated buffer
+ self._vel_np_buf[:] = vel
+
+ # Don't auto-deactivate in velocity mode - caller controls via set_jog_velocity(0)
+ return smoothed_pose, self._vel_np_buf, result == Result.Finished
+
+ def correct_position(self, actual_pose: np.ndarray) -> None:
+ """Correct CSE position to match actual TCP after joint-space clamping.
+
+ Only updates position — velocity and acceleration are left as-is
+ since the robot is still moving. Ruckig handles the slight
+ state inconsistency and re-plans smoothly.
+ """
+ self._pose_to_tangent(actual_pose)
+ self.inp.current_position = self._tangent_buf
+
+ def reset(self) -> None:
+ """Reset executor state."""
+ self._vel_scale = 1.0
+ self._acc_scale = 1.0
+ self.reference_pose = None
+ self.active = False
+ self._init_state()
diff --git a/parol6/motion/trajectory.py b/parol6/motion/trajectory.py
new file mode 100644
index 0000000..95c0a20
--- /dev/null
+++ b/parol6/motion/trajectory.py
@@ -0,0 +1,1227 @@
+"""
+Unified trajectory generation pipeline using TOPP-RA for time-optimal path parameterization.
+
+This module provides the shared trajectory infrastructure for all motion commands.
+Path generation (geometry) stays in command files; this handles time parameterization.
+
+Pipeline:
+ 1. Command generates Cartesian poses (for cart commands) or joint targets
+ 2. JointPath holds uniformly-sampled joint positions
+ 3. TrajectoryBuilder applies TOPP-RA + motion profile to produce Trajectory
+ 4. Trajectory contains motor steps ready for tick-by-tick execution
+"""
+
+from __future__ import annotations
+
+import logging
+from dataclasses import dataclass
+from enum import Enum
+
+import numpy as np
+from numpy.typing import NDArray
+from ruckig import InputParameter, OutputParameter, Result, Ruckig # type: ignore[unresolved-import, ty:unresolved-import]
+
+from scipy.interpolate import PPoly
+
+import toppra as ta
+import toppra.algorithm as algo
+import toppra.constraint as constraint
+from toppra.interpolator import SplineInterpolator
+
+import parol6.PAROL6_ROBOT as PAROL6_ROBOT
+from parol6.config import INTERVAL_S, LIMITS, rad_to_steps
+
+
+from pinokin import Damping, IKSolver, se3_from_rpy
+
+
+logger = logging.getLogger(__name__)
+
+
+def _rad_to_steps_alloc(rad: NDArray) -> NDArray[np.int32]:
+ """Convert radians to steps, allocating output. For planning phase only."""
+ out = np.zeros(rad.shape, dtype=np.int32)
+ if rad.ndim == 1:
+ rad_to_steps(rad, out)
+ else:
+ # 2D array (trajectory): convert row by row
+ for i in range(rad.shape[0]):
+ rad_to_steps(rad[i], out[i])
+ return out
+
+
+class _LinearPath:
+ """Piecewise linear path wrapper for TOPPRA compatibility.
+
+ Wraps a scipy PPoly (degree-1) to satisfy the toppra path interface:
+ __call__(s, order), .dof, .path_interval. Linear segments prevent
+ overshoot between waypoints — critical near wrist singularities where
+ cubic spline bulge amplifies orientation error.
+ """
+
+ __slots__ = ("_pp", "dof", "path_interval")
+
+ def __init__(self, ppoly: PPoly, dof: int) -> None:
+ self._pp = ppoly
+ self.dof = dof
+ self.path_interval = [float(ppoly.x[0]), float(ppoly.x[-1])]
+
+ def __call__(self, s_in: float | NDArray, order: int = 0) -> NDArray[np.float64]:
+ scalar = np.isscalar(s_in)
+ s = np.atleast_1d(np.asarray(s_in, dtype=float))
+ if order <= 1:
+ result = self._pp(s, order)
+ else:
+ # Second+ derivative of piecewise linear is zero
+ result = np.zeros((len(s), self.dof))
+ return result[0] if scalar and result.ndim > 1 else result
+
+
+class ProfileType(Enum):
+ """Available trajectory profile types for motion planning."""
+
+ TOPPRA = "toppra" # Time-optimal path following (default)
+ RUCKIG = "ruckig" # Point-to-point jerk-limited (can't follow Cartesian paths)
+ QUINTIC = "quintic" # Quintic polynomial (C² smooth, predictable shape)
+ TRAPEZOID = "trapezoid" # Trapezoidal velocity profile
+ LINEAR = "linear" # Direct linear interpolation (no smoothing)
+
+ @classmethod
+ def from_string(cls, name: str) -> ProfileType:
+ """Convert string to ProfileType, case-insensitive."""
+ name_upper = name.upper()
+ if name_upper == "NONE":
+ return cls.LINEAR
+ try:
+ return cls[name_upper]
+ except KeyError:
+ logger.warning("Unknown profile type '%s', using TOPPRA", name)
+ return cls.TOPPRA
+
+
+@dataclass
+class JointPath:
+ """
+ Joint-space path uniformly sampled in path space.
+
+ This is the common abstraction for all motion commands. Cartesian commands
+ solve IK to produce this; joint commands interpolate directly.
+
+ Attributes:
+ positions: (N, 6) array of joint angles in radians
+ valid: Per-row IK validity. None means all rows are valid.
+ """
+
+ positions: NDArray[np.float64] # (N, 6) joint angles in radians
+ valid: NDArray[np.bool_] | None = None # (N,) per-row validity, None = all valid
+
+ @property
+ def is_partial(self) -> bool:
+ """True if some IK solutions failed."""
+ return self.valid is not None
+
+ def __len__(self) -> int:
+ return len(self.positions)
+
+ def __getitem__(self, idx: int) -> NDArray[np.float64]:
+ return self.positions[idx]
+
+ @classmethod
+ def from_poses(
+ cls,
+ poses: NDArray[np.float64] | list[np.ndarray],
+ seed_q: NDArray[np.float64],
+ stop_on_failure: bool = True,
+ ) -> JointPath:
+ """
+ Solve IK for poses with seeded chain.
+
+ Each IK solve uses the previous solution as seed, maintaining continuity.
+
+ Args:
+ poses: Either (N, 6) array of [x_mm, y_mm, z_mm, rx_deg, ry_deg, rz_deg]
+ or list of SE3 poses
+ seed_q: Initial joint angles for IK seeding (radians)
+ stop_on_failure: If True, stop solving after first IK failure
+ (real controller). If False, solve all poses (diagnostic).
+
+ Returns:
+ JointPath with solved joint positions. If some poses failed,
+ ``valid`` is set to a per-row bool array (``is_partial`` is True).
+
+ Raises:
+ IKError: If fewer than 2 consecutive valid poses from the start.
+ """
+ from parol6.utils.error_catalog import make_error
+ from parol6.utils.error_codes import ErrorCode
+ from parol6.utils.errors import IKError
+
+ # Convert to list of SE3 (4x4) matrices for batch_ik
+ if isinstance(poses, np.ndarray) and poses.ndim == 3:
+ se3_poses = [poses[i] for i in range(len(poses))]
+ elif isinstance(poses, np.ndarray):
+ n = len(poses)
+ se3_poses = [np.empty((4, 4), dtype=np.float64) for _ in range(n)]
+ for i, p in enumerate(poses):
+ se3_from_rpy(
+ p[0] / 1000.0,
+ p[1] / 1000.0,
+ p[2] / 1000.0,
+ np.radians(p[3]),
+ np.radians(p[4]),
+ np.radians(p[5]),
+ se3_poses[i],
+ )
+ else:
+ se3_poses = poses
+
+ solver = IKSolver(
+ PAROL6_ROBOT.robot,
+ damping=Damping.Sugihara,
+ tol=1e-12,
+ lm_lambda=0.0,
+ max_iter=10,
+ max_restarts=10,
+ )
+ result = solver.batch_ik(
+ se3_poses,
+ np.asarray(seed_q, dtype=np.float64),
+ stop_on_failure=stop_on_failure,
+ )
+
+ if result.all_valid:
+ return cls(positions=result.joint_positions)
+
+ valid = np.array(result.valid, dtype=np.bool_)
+ # Count consecutive valid from start
+ first_fail = int(np.argmin(valid)) # first False index
+ if first_fail < 2:
+ if stop_on_failure:
+ raise IKError(
+ make_error(
+ ErrorCode.IK_PARTIAL_PATH,
+ valid=str(first_fail),
+ total=str(len(se3_poses)),
+ )
+ )
+ # Diagnostic mode: return partial data for visualization
+ return cls(positions=result.joint_positions, valid=valid)
+
+ return cls(positions=result.joint_positions, valid=valid)
+
+ @classmethod
+ def interpolate(
+ cls,
+ start_rad: NDArray[np.float64],
+ end_rad: NDArray[np.float64],
+ n_samples: int,
+ ) -> JointPath:
+ """
+ Direct joint-space linear interpolation (for MovePose/MoveJoint).
+
+ Args:
+ start_rad: Starting joint angles in radians
+ end_rad: Ending joint angles in radians
+ n_samples: Number of samples (minimum 2)
+
+ Returns:
+ JointPath with interpolated positions
+ """
+ n_samples = max(2, n_samples)
+ start = np.asarray(start_rad, dtype=np.float64)
+ end = np.asarray(end_rad, dtype=np.float64)
+
+ # Vectorized interpolation using broadcasting
+ t = np.linspace(0, 1, n_samples).reshape(-1, 1)
+ positions = start + t * (end - start)
+
+ return cls(positions=positions)
+
+ def append(self, other: JointPath) -> JointPath:
+ """
+ Concatenate paths (for path blending).
+
+ Args:
+ other: Path to append
+
+ Returns:
+ New JointPath with concatenated positions
+ """
+ combined = np.concatenate([self.positions, other.positions], axis=0)
+ return JointPath(positions=combined)
+
+ def sample(self, s: float) -> NDArray[np.float64]:
+ """
+ Sample path at normalized position s in [0, 1].
+
+ Uses linear interpolation between path points.
+
+ Args:
+ s: Path position from 0 (start) to 1 (end)
+
+ Returns:
+ Interpolated joint position
+ """
+ s = np.clip(s, 0.0, 1.0)
+ n = len(self.positions)
+ if n < 2:
+ return self.positions[0].copy()
+
+ idx_float = s * (n - 1)
+ idx_lo = int(idx_float)
+ idx_hi = min(idx_lo + 1, n - 1)
+ frac = idx_float - idx_lo
+
+ return self.positions[idx_lo] * (1 - frac) + self.positions[idx_hi] * frac
+
+ def sample_many(self, s_values: NDArray[np.float64]) -> NDArray[np.float64]:
+ """
+ Vectorized sampling at multiple path positions.
+
+ Args:
+ s_values: Array of path positions from 0 (start) to 1 (end)
+
+ Returns:
+ (N, 6) array of interpolated joint positions
+ """
+ s_values = np.clip(s_values, 0.0, 1.0)
+ n = len(self.positions)
+ if n < 2:
+ return np.tile(self.positions[0], (len(s_values), 1))
+
+ idx_float = s_values * (n - 1)
+ idx_lo = idx_float.astype(np.intp)
+ idx_hi = np.minimum(idx_lo + 1, n - 1)
+ frac = (idx_float - idx_lo).reshape(-1, 1)
+
+ return self.positions[idx_lo] * (1 - frac) + self.positions[idx_hi] * frac
+
+
+@dataclass
+class Trajectory:
+ """
+ Ready-to-execute trajectory with motor steps at control rate.
+
+ Precomputed trajectories are sent directly to the controller without smoothing.
+ StreamingExecutor is only used for online targets (jogging/streaming).
+
+ Attributes:
+ steps: (M, 6) motor steps at each control tick
+ duration: Actual duration in seconds
+ """
+
+ steps: NDArray[np.int32] # (M, 6) motor steps
+ duration: float # seconds
+
+ def __len__(self) -> int:
+ return len(self.steps)
+
+ def __getitem__(self, idx: int) -> NDArray[np.int32]:
+ return self.steps[idx]
+
+
+class TrajectoryBuilder:
+ """
+ Converts JointPath to executable Trajectory.
+
+ Uses TOPP-RA to compute maximum allowable path speed, then applies
+ the selected motion profile (clamped to TOPP-RA limits).
+
+ All limits come from PAROL6_ROBOT config - no hardcoded fallbacks.
+ """
+
+ def __init__(
+ self,
+ joint_path: JointPath,
+ profile: ProfileType | str,
+ velocity_frac: float = 1.0,
+ accel_frac: float = 1.0,
+ jerk_frac: float = 1.0,
+ duration: float | None = None,
+ dt: float = INTERVAL_S,
+ cart_vel_limit: float | None = None,
+ cart_acc_limit: float | None = None,
+ ):
+ """
+ Initialize trajectory builder.
+
+ Args:
+ joint_path: Path in joint space
+ profile: Motion profile to apply
+ velocity_frac: Scale joint velocity limits (0.0-1.0), default 1.0
+ accel_frac: Scale joint acceleration limits (0.0-1.0), default 1.0
+ jerk_frac: Scale jerk limits (0.0-1.0), default 1.0
+ duration: Override duration (stretches profile if longer than TOPP-RA min)
+ dt: Control loop time step
+ cart_vel_limit: Cartesian linear velocity limit in m/s (for Cartesian commands)
+ cart_acc_limit: Cartesian linear acceleration limit in m/s² (for Cartesian commands)
+ """
+ self.joint_path = joint_path
+ self.profile = (
+ ProfileType.from_string(profile) if isinstance(profile, str) else profile
+ )
+
+ # RUCKIG is point-to-point only - if Cartesian limits are set, we need path following
+ if self.profile == ProfileType.RUCKIG and (
+ cart_vel_limit is not None or cart_acc_limit is not None
+ ):
+ logger.warning("RUCKIG cannot follow Cartesian paths, using TOPPRA")
+ self.profile = ProfileType.TOPPRA
+
+ self.velocity_frac = velocity_frac
+ self.accel_frac = accel_frac
+ self.jerk_frac = jerk_frac
+ self.duration = duration
+ self.dt = dt
+ self.cart_vel_limit = cart_vel_limit
+ self.cart_acc_limit = cart_acc_limit
+
+ # Joint limits scaled by user fractions.
+ # Apply 1% safety margin to account for floating-point precision in
+ # trajectory libraries and integer rounding in rad→steps conversion.
+ limit_margin = 0.99
+ self.v_max = LIMITS.joint.hard.velocity * self.velocity_frac * limit_margin
+ self.a_max = LIMITS.joint.hard.acceleration * self.accel_frac * limit_margin
+ self.j_max = LIMITS.joint.hard.jerk * self.jerk_frac * limit_margin
+
+ # Pre-compute limit arrays for TOPP-RA (avoids allocation per build() call)
+ self._vlim = np.column_stack([-self.v_max, self.v_max])
+ self._alim = np.column_stack([-self.a_max, self.a_max])
+
+ def build(self) -> Trajectory:
+ """
+ Generate time-parameterized trajectory.
+
+ Uses TOPP-RA to compute time-optimal trajectory, then samples it directly
+ at the control rate. No interpolation of the original joint path is needed
+ since TOPP-RA's trajectory already provides smooth, continuous positions.
+
+ For RUCKIG profile: Uses Ruckig for point-to-point motion (ignores path waypoints)
+ For other profiles: Uses TOPP-RA trajectory directly
+
+ Returns:
+ Trajectory ready for execution
+ """
+ if len(self.joint_path) < 2:
+ # Trivial path - single point
+ steps = _rad_to_steps_alloc(
+ self.joint_path.positions[0:1] # Keep 2D shape (1, 6)
+ )
+ return Trajectory(steps=steps, duration=0.0)
+
+ # Route to appropriate trajectory builder based on profile
+ if self.profile == ProfileType.RUCKIG:
+ # Point-to-point jerk-limited motion (doesn't follow path waypoints)
+ return self._build_ruckig_trajectory()
+ elif self.profile == ProfileType.LINEAR:
+ # Simple linear interpolation - no velocity smoothing
+ return self._build_simple_trajectory()
+ elif self.profile == ProfileType.QUINTIC:
+ # Quintic polynomial timing
+ return self._build_quintic_trajectory()
+ elif self.profile == ProfileType.TRAPEZOID:
+ # Trapezoidal velocity profile along path
+ return self._build_trapezoid_trajectory()
+ else:
+ # TOPPRA is default - time-optimal path following
+ return self._build_toppra_trajectory()
+
+ def _build_toppra_trajectory(self) -> Trajectory:
+ """
+ Build trajectory using TOPP-RA's time-optimal path parameterization.
+
+ Uses piecewise linear interpolation through waypoints (no overshoot)
+ and computes time-optimal velocity profile respecting joint limits
+ and optional Cartesian velocity limits.
+ """
+ positions = self.joint_path.positions
+ n_points = len(positions)
+
+ # Uniform parameterization for path knots
+ ss_waypoints = np.linspace(0.0, 1.0, n_points)
+
+ # Piecewise linear PPoly — prevents cubic spline overshoot that
+ # amplifies orientation error near wrist singularities
+ n_seg = n_points - 1
+ dof = positions.shape[1]
+ c = np.zeros((2, n_seg, dof))
+ for i in range(n_seg):
+ dx = ss_waypoints[i + 1] - ss_waypoints[i]
+ c[0, i, :] = (positions[i + 1] - positions[i]) / dx
+ c[1, i, :] = positions[i]
+ path = _LinearPath(PPoly(c, ss_waypoints), dof)
+
+ # Use pre-computed limit arrays for constraints
+ joint_vel_constraint = constraint.JointVelocityConstraint(self._vlim)
+ joint_acc_constraint = constraint.JointAccelerationConstraint(self._alim)
+ constraints: list[constraint.Constraint] = [
+ joint_vel_constraint,
+ joint_acc_constraint,
+ ]
+
+ # Add Cartesian velocity constraint if specified
+ if self.cart_vel_limit is not None and self.cart_vel_limit > 0:
+ cart_constraint = self._build_cart_vel_constraint(path, ss_waypoints)
+ if cart_constraint is not None:
+ constraints.append(cart_constraint)
+
+ try:
+ # Use evenly-spaced gridpoints - TOPPRA docs recommend "at least a few times
+ # the number of waypoints". Auto-selection can cluster points near
+ # discontinuities, causing TOPPRAsd to produce incorrect durations.
+ n_gridpoints = n_points * 3
+ gridpoints = np.linspace(0.0, 1.0, n_gridpoints)
+
+ # Use TOPPRAsd if duration is specified, otherwise time-optimal TOPPRA
+ if self.duration is not None and self.duration > 0:
+ instance = algo.TOPPRAsd(constraints, path, gridpoints=gridpoints)
+ instance.set_desired_duration(self.duration)
+ jnt_traj = instance.compute_trajectory()
+ if jnt_traj is not None:
+ duration = self.duration
+ logger.debug(
+ "TrajectoryBuilder: TOPPRAsd target_duration=%.3f, path_len=%d",
+ duration,
+ n_points,
+ )
+ else:
+ # Fall back to time-optimal if TOPPRAsd fails
+ logger.warning("TOPPRAsd failed, trying time-optimal TOPPRA")
+ instance = algo.TOPPRA(constraints, path, gridpoints=gridpoints)
+ jnt_traj = instance.compute_trajectory()
+ else:
+ instance = algo.TOPPRA(constraints, path, gridpoints=gridpoints)
+ jnt_traj = instance.compute_trajectory()
+
+ if not isinstance(jnt_traj, SplineInterpolator):
+ raise RuntimeError("TOPP-RA failed to compute trajectory")
+
+ duration = float(jnt_traj.duration)
+
+ logger.debug(
+ "TrajectoryBuilder: TOPP-RA duration=%.3f, path_len=%d",
+ duration,
+ n_points,
+ )
+
+ # Sample trajectory at control rate, including endpoint (vectorized)
+ n_output = max(2, int(np.floor(duration / self.dt)) + 1)
+ times = np.arange(n_output - 1) * self.dt
+ trajectory_rad = np.empty((n_output, 6), dtype=np.float64)
+ trajectory_rad[:-1] = jnt_traj(times)
+ trajectory_rad[-1] = jnt_traj(duration)
+
+ logger.debug(
+ "TrajectoryBuilder: output_samples=%d, duration=%.3f",
+ len(trajectory_rad),
+ duration,
+ )
+
+ # Convert to motor steps (vectorized)
+ steps = _rad_to_steps_alloc(trajectory_rad)
+
+ return Trajectory(steps=steps, duration=duration)
+
+ except Exception as e:
+ logger.warning("TOPPRA failed: %s. Falling back to LINEAR profile.", e)
+ return self._build_simple_trajectory()
+
+ def _build_simple_trajectory(self) -> Trajectory:
+ """
+ Build trajectory with simple linear interpolation.
+
+ Uses uniform s-spacing with local slowdown where velocity limits
+ would be exceeded. This handles singularities and wrist flips by
+ stretching only the affected segments.
+ """
+ duration = (
+ self.duration
+ if self.duration and self.duration > 0
+ else self._compute_joint_duration_linear()
+ )
+
+ # Sample path uniformly
+ n_output = max(2, int(np.ceil(duration / self.dt)))
+ s_values = np.linspace(0.0, 1.0, n_output)
+ trajectory_rad = self.joint_path.sample_many(s_values)
+
+ # Enforce velocity limits by stretching segments where needed
+ trajectory_rad, duration = self._enforce_segment_limits(
+ trajectory_rad, duration
+ )
+
+ # Convert to motor steps
+ steps = _rad_to_steps_alloc(trajectory_rad)
+
+ return Trajectory(steps=steps, duration=duration)
+
+ def _is_cartesian_path(self) -> bool:
+ """Check if this is a Cartesian path (has Cartesian velocity limits set)."""
+ return self.cart_vel_limit is not None and self.cart_vel_limit > 0
+
+ def _compute_s_profile_limits(self) -> tuple[float, float, float]:
+ """
+ Compute path parameter (s) limits derived from joint limits.
+
+ For a linear path in joint space:
+ joint_velocity = joint_delta * (ds/dt)
+ joint_acceleration = joint_delta * (d²s/dt²)
+ joint_jerk = joint_delta * (d³s/dt³)
+
+ So the s-profile limits are:
+ vmax_s = min(v_max[j] / |delta[j]|) for all joints
+ amax_s = min(a_max[j] / |delta[j]|) for all joints
+ jmax_s = min(j_max[j] / |delta[j]|) for all joints
+
+ Returns:
+ (vmax_s, amax_s, jmax_s): Limits for the path parameter profile
+ """
+ positions = self.joint_path.positions
+ if len(positions) < 2:
+ return (1.0, 1.0, 1.0)
+
+ total_delta = np.abs(positions[-1] - positions[0])
+
+ # Avoid division by zero for joints that don't move
+ with np.errstate(divide="ignore", invalid="ignore"):
+ vmax_s_per_joint = np.where(
+ total_delta > 1e-9, self.v_max / total_delta, np.inf
+ )
+ amax_s_per_joint = np.where(
+ total_delta > 1e-9, self.a_max / total_delta, np.inf
+ )
+ jmax_s_per_joint = np.where(
+ total_delta > 1e-9, self.j_max / total_delta, np.inf
+ )
+
+ # The limiting joint determines the s-profile limits
+ vmax_s = float(np.min(vmax_s_per_joint))
+ amax_s = float(np.min(amax_s_per_joint))
+ jmax_s = float(np.min(jmax_s_per_joint))
+
+ return (vmax_s, amax_s, jmax_s)
+
+ def _enforce_segment_limits(
+ self,
+ trajectory_rad: NDArray[np.float64],
+ duration: float,
+ ) -> tuple[NDArray[np.float64], float]:
+ """
+ Enforce velocity limits by locally stretching segments that exceed limits.
+
+ Walks through each segment and checks if joint velocities exceed limits.
+ Where they do, stretches that segment's time. This handles singularities
+ and wrist flips by slowing only where necessary, not globally.
+
+ Args:
+ trajectory_rad: Joint positions in radians, shape (N, 6)
+ duration: Initial trajectory duration
+
+ Returns:
+ (adjusted_trajectory, adjusted_duration): Resampled trajectory with
+ locally stretched segments and new total duration
+ """
+ n_points = len(trajectory_rad)
+ if n_points < 2:
+ return trajectory_rad, duration
+
+ # Initial uniform time per segment
+ initial_dt = duration / (n_points - 1)
+
+ # Compute per-segment joint deltas
+ deltas = np.diff(trajectory_rad, axis=0) # (N-1, 6)
+
+ # For each segment, compute minimum time needed to respect velocity limits
+ # time_needed = max(|delta[j]| / v_max[j]) for all joints
+ min_segment_times = np.max(np.abs(deltas) / self.v_max, axis=1) # (N-1,)
+
+ # Also check acceleration limits between segments
+ # This is approximate - we check if velocity change between segments is feasible
+ if n_points > 2:
+ velocities = deltas / initial_dt # Approximate velocities per segment
+ accel = np.diff(velocities, axis=0) / initial_dt # (N-2, 6)
+ accel_times = np.zeros(n_points - 1)
+ # Segments that cause high acceleration need more time
+ for i in range(len(accel)):
+ max_accel_ratio = np.max(np.abs(accel[i]) / self.a_max)
+ if max_accel_ratio > 1.0:
+ # Spread the extra time across adjacent segments
+ stretch = np.sqrt(max_accel_ratio)
+ accel_times[i] = max(accel_times[i], min_segment_times[i] * stretch)
+ accel_times[i + 1] = max(
+ accel_times[i + 1], min_segment_times[i + 1] * stretch
+ )
+ min_segment_times = np.maximum(min_segment_times, accel_times)
+
+ # Ensure minimum dt per segment
+ min_segment_times = np.maximum(min_segment_times, self.dt)
+
+ # Compute actual segment times: max of initial_dt and min_segment_times
+ segment_times = np.maximum(min_segment_times, initial_dt)
+
+ # Check if any stretching was needed
+ new_duration = float(np.sum(segment_times))
+ if new_duration <= duration * 1.001: # No significant change
+ return trajectory_rad, duration
+
+ logger.warning(
+ "Extending duration from %.3fs to %.3fs (%.1f%% increase) to respect velocity/acceleration limits",
+ duration,
+ new_duration,
+ (new_duration / duration - 1) * 100,
+ )
+
+ # Resample trajectory at control rate with new timing
+ cumulative_times = np.zeros(n_points)
+ cumulative_times[1:] = np.cumsum(segment_times)
+
+ n_output = max(2, int(np.ceil(new_duration / self.dt)))
+ output_times = np.linspace(0.0, new_duration, n_output)
+
+ # Interpolate each joint
+ new_trajectory = np.empty((n_output, 6), dtype=np.float64)
+ for j in range(6):
+ new_trajectory[:, j] = np.interp(
+ output_times, cumulative_times, trajectory_rad[:, j]
+ )
+
+ return new_trajectory, new_duration
+
+ def _compute_joint_duration_trapezoid(self) -> float:
+ """
+ Compute duration for joint paths using trapezoidal profile.
+
+ For each joint, uses InterpolatePy to compute the minimum duration
+ for its displacement given its velocity/acceleration limits.
+ Returns the maximum (slowest joint determines overall duration).
+ """
+ from interpolatepy.trapezoidal import (
+ TrajectoryParams as TrapParams,
+ TrapezoidalTrajectory,
+ )
+
+ positions = self.joint_path.positions
+ if len(positions) < 2:
+ return self.dt * 2
+
+ total_delta = positions[-1] - positions[0]
+ max_duration = 0.0
+
+ for j in range(6):
+ delta = abs(total_delta[j])
+ if delta < 1e-6:
+ continue
+
+ params = TrapParams(
+ q0=0.0,
+ q1=delta,
+ v0=0.0,
+ v1=0.0,
+ vmax=self.v_max[j],
+ amax=self.a_max[j],
+ )
+ _, duration = TrapezoidalTrajectory.generate_trajectory(params)
+ max_duration = max(max_duration, duration)
+
+ return max(max_duration, self.dt * 2)
+
+ def _compute_joint_duration_quintic(self) -> float:
+ """
+ Compute duration for joint paths using quintic polynomial profile.
+
+ For quintic polynomials with zero-velocity endpoints:
+ - Peak velocity at t=T/2: v_peak = 1.875 * delta / T
+ - Peak acceleration at t=T*(3-sqrt(3))/6: a_peak = 5.77 * delta / T²
+
+ For velocity limit: T = 1.875 * delta / v_max
+ For acceleration limit: T = sqrt(5.77 * delta / a_max)
+
+ Returns the maximum duration across all joints.
+ """
+ positions = self.joint_path.positions
+ if len(positions) < 2:
+ return self.dt * 2
+
+ total_delta = np.abs(positions[-1] - positions[0])
+
+ # Velocity-limited duration: T = 1.875 * delta / v_max
+ time_vel = 1.875 * total_delta / self.v_max
+
+ # Acceleration-limited duration: T = sqrt(5.77 * delta / a_max)
+ with np.errstate(divide="ignore", invalid="ignore"):
+ time_acc = np.where(
+ self.a_max > 0,
+ np.sqrt(5.77 * total_delta / self.a_max),
+ 0.0,
+ )
+
+ # Take the maximum per joint, then across all joints
+ time_per_joint = np.maximum(time_vel, time_acc)
+ return max(float(np.max(time_per_joint)), self.dt * 2)
+
+ def _compute_joint_duration_linear(self) -> float:
+ """
+ Compute duration for joint paths using linear interpolation.
+
+ Accounts for both velocity and acceleration limits:
+ - For velocity limit: T_vel = delta / v_max
+ - For acceleration limit (triangular profile): T_acc = 2 * sqrt(2 * delta / a_max)
+
+ Returns the maximum duration across all joints.
+ """
+ positions = self.joint_path.positions
+ if len(positions) < 2:
+ return self.dt * 2
+
+ total_delta = np.abs(positions[-1] - positions[0])
+
+ # Velocity-limited duration
+ time_vel = total_delta / self.v_max
+
+ # Acceleration-limited duration (for triangular velocity profile)
+ # Time to reach target with constant accel then decel: T = 2 * sqrt(2 * d / a)
+ with np.errstate(divide="ignore", invalid="ignore"):
+ time_acc = np.where(
+ self.a_max > 0,
+ 2.0 * np.sqrt(2.0 * total_delta / self.a_max),
+ 0.0,
+ )
+
+ # Take the maximum per joint, then across all joints
+ time_per_joint = np.maximum(time_vel, time_acc)
+ return max(float(np.max(time_per_joint)), self.dt * 2)
+
+ def _compute_cartesian_duration_from_path(self) -> float:
+ """
+ Compute duration for Cartesian paths based on per-segment joint requirements.
+
+ This properly handles singularities and wrist flips by analyzing
+ the maximum joint movement required in each path segment, not just
+ the total start-to-end displacement.
+
+ Returns the sum of minimum segment times, ensuring the path can be
+ traversed without violating joint velocity limits at any point.
+ """
+ positions = self.joint_path.positions
+ if len(positions) < 2:
+ return self.dt * 2
+
+ # Compute per-segment time based on max joint movement in each segment
+ deltas = np.diff(positions, axis=0) # (N-1, 6)
+ segment_times = np.max(np.abs(deltas) / self.v_max, axis=1) # (N-1,)
+
+ # Ensure minimum time per segment
+ segment_times = np.maximum(segment_times, self.dt)
+
+ return max(float(np.sum(segment_times)), self.dt * 2)
+
+ def _build_quintic_trajectory(self) -> Trajectory:
+ """
+ Build trajectory with quintic polynomial velocity profile.
+
+ For joint moves: each joint follows its own quintic profile.
+ For Cartesian moves: TCP follows quintic profile along path.
+ """
+ if self._is_cartesian_path():
+ return self._build_quintic_trajectory_cartesian()
+ else:
+ return self._build_quintic_trajectory_joint()
+
+ def _build_quintic_trajectory_joint(self) -> Trajectory:
+ """
+ Build per-joint quintic trajectory.
+
+ Each joint independently follows a quintic polynomial profile,
+ synchronized to finish at the same time.
+ """
+ from interpolatepy import BoundaryCondition, PolynomialTrajectory, TimeInterval
+
+ start_pos = self.joint_path.positions[0]
+ end_pos = self.joint_path.positions[-1]
+
+ if self.duration:
+ duration = self.duration
+ else:
+ duration = self._compute_joint_duration_quintic()
+
+ n_output = max(2, int(np.ceil(duration / self.dt)))
+ times = np.linspace(0.0, duration, n_output)
+ trajectory_rad = np.empty((n_output, 6), dtype=np.float64)
+
+ # Generate quintic profile for each joint
+ for j in range(6):
+ delta = end_pos[j] - start_pos[j]
+ if abs(delta) < 1e-9:
+ # Joint doesn't move
+ trajectory_rad[:, j] = start_pos[j]
+ continue
+
+ # Create quintic trajectory for this joint
+ bc_start = BoundaryCondition(
+ position=start_pos[j], velocity=0.0, acceleration=0.0
+ )
+ bc_end = BoundaryCondition(
+ position=end_pos[j], velocity=0.0, acceleration=0.0
+ )
+ interval = TimeInterval(start=0.0, end=duration)
+ traj = PolynomialTrajectory.order_5_trajectory(bc_start, bc_end, interval)
+
+ # Sample the trajectory
+ for i, t in enumerate(times):
+ trajectory_rad[i, j] = traj(t)[0]
+
+ # Enforce velocity limits by stretching segments where needed
+ trajectory_rad, duration = self._enforce_segment_limits(
+ trajectory_rad, duration
+ )
+
+ # Convert to motor steps
+ steps = _rad_to_steps_alloc(trajectory_rad)
+
+ return Trajectory(steps=steps, duration=duration)
+
+ def _build_quintic_trajectory_cartesian(self) -> Trajectory:
+ """
+ Build Cartesian quintic trajectory.
+
+ TCP follows quintic polynomial profile along the path, with local
+ slowdown where velocity limits would be exceeded.
+ """
+ from interpolatepy import BoundaryCondition, PolynomialTrajectory, TimeInterval
+
+ if self.duration:
+ duration = self.duration
+ else:
+ # Use per-segment analysis to handle singularities and wrist flips
+ duration = self._compute_cartesian_duration_from_path()
+
+ # Create quintic trajectory from s=0 to s=1 over duration
+ bc_start = BoundaryCondition(position=0.0, velocity=0.0, acceleration=0.0)
+ bc_end = BoundaryCondition(position=1.0, velocity=0.0, acceleration=0.0)
+ interval = TimeInterval(start=0.0, end=duration)
+ traj = PolynomialTrajectory.order_5_trajectory(bc_start, bc_end, interval)
+
+ n_output = max(2, int(np.ceil(duration / self.dt)))
+ times = np.linspace(0.0, duration, n_output)
+
+ # Evaluate quintic trajectory to get profile-shaped s values
+ profile_s = np.empty(n_output, dtype=np.float64)
+ for i in range(n_output):
+ profile_s[i] = traj(float(times[i]))[0]
+
+ # Sample path at quintic-shaped s values
+ trajectory_rad = self.joint_path.sample_many(profile_s)
+
+ # Enforce velocity limits by stretching segments where needed
+ trajectory_rad, duration = self._enforce_segment_limits(
+ trajectory_rad, duration
+ )
+
+ # Convert to motor steps
+ steps = _rad_to_steps_alloc(trajectory_rad)
+
+ return Trajectory(steps=steps, duration=duration)
+
+ def _build_trapezoid_trajectory(self) -> Trajectory:
+ """
+ Build trajectory with trapezoidal velocity profile.
+
+ For joint moves: each joint follows its own trapezoidal profile.
+ For Cartesian moves: TCP follows trapezoidal profile along path.
+ """
+ if self._is_cartesian_path():
+ return self._build_trapezoid_trajectory_cartesian()
+ else:
+ return self._build_trapezoid_trajectory_joint()
+
+ def _build_trapezoid_trajectory_joint(self) -> Trajectory:
+ """
+ Build per-joint trapezoidal trajectory.
+
+ Each joint independently follows a trapezoidal velocity profile,
+ synchronized to finish at the same time.
+ """
+ from interpolatepy.trapezoidal import (
+ TrajectoryParams as TrapParams,
+ TrapezoidalTrajectory,
+ )
+
+ start_pos = self.joint_path.positions[0]
+ end_pos = self.joint_path.positions[-1]
+
+ if self.duration:
+ duration = self.duration
+ else:
+ duration = self._compute_joint_duration_trapezoid()
+
+ n_output = max(2, int(np.ceil(duration / self.dt)))
+ times = np.linspace(0.0, duration, n_output)
+ trajectory_rad = np.empty((n_output, 6), dtype=np.float64)
+
+ # Generate trapezoidal profile for each joint
+ for j in range(6):
+ delta = end_pos[j] - start_pos[j]
+ if abs(delta) < 1e-9:
+ # Joint doesn't move
+ trajectory_rad[:, j] = start_pos[j]
+ continue
+
+ # Create trapezoidal profile for this joint
+ params = TrapParams(
+ q0=start_pos[j],
+ q1=end_pos[j],
+ v0=0.0,
+ v1=0.0,
+ vmax=self.v_max[j],
+ amax=self.a_max[j],
+ )
+ traj_fn, profile_duration = TrapezoidalTrajectory.generate_trajectory(
+ params
+ )
+
+ # Scale times to match synchronized duration
+ time_scale = profile_duration / duration if duration > 0 else 1.0
+
+ # Sample the trajectory
+ for i, t in enumerate(times):
+ trajectory_rad[i, j] = traj_fn(t * time_scale)[0]
+
+ # Enforce velocity limits by stretching segments where needed
+ trajectory_rad, duration = self._enforce_segment_limits(
+ trajectory_rad, duration
+ )
+
+ # Convert to motor steps
+ steps = _rad_to_steps_alloc(trajectory_rad)
+
+ return Trajectory(steps=steps, duration=duration)
+
+ def _build_trapezoid_trajectory_cartesian(self) -> Trajectory:
+ """
+ Build Cartesian trapezoidal trajectory.
+
+ TCP follows trapezoidal velocity profile along the path, with local
+ slowdown where velocity limits would be exceeded.
+ """
+ from interpolatepy.trapezoidal import (
+ TrajectoryParams as TrapParams,
+ TrapezoidalTrajectory,
+ )
+
+ if self.duration:
+ duration = self.duration
+ else:
+ # Use per-segment analysis to handle singularities and wrist flips
+ duration = self._compute_cartesian_duration_from_path()
+
+ # Compute s-profile limits from joint limits
+ vmax_s, amax_s, _ = self._compute_s_profile_limits()
+
+ # Create trapezoidal profile for path parameter s (0 to 1)
+ params = TrapParams(
+ q0=0.0,
+ q1=1.0,
+ v0=0.0,
+ v1=0.0,
+ vmax=vmax_s,
+ amax=amax_s,
+ )
+ traj_fn, profile_duration = TrapezoidalTrajectory.generate_trajectory(params)
+
+ # If user specified longer duration, scale to match
+ if self.duration and self.duration > profile_duration:
+ time_scale = profile_duration / self.duration
+ duration = self.duration
+ else:
+ time_scale = 1.0
+ duration = profile_duration
+
+ n_output = max(2, int(np.ceil(duration / self.dt)))
+ times = np.linspace(0.0, duration, n_output)
+
+ # Evaluate profile at scaled times to get profile-shaped s values
+ profile_s = np.array(
+ [traj_fn(t * time_scale)[0] for t in times], dtype=np.float64
+ )
+
+ # Sample path at trapezoid-shaped s values
+ trajectory_rad = self.joint_path.sample_many(profile_s)
+
+ # Enforce velocity limits by stretching segments where needed
+ trajectory_rad, duration = self._enforce_segment_limits(
+ trajectory_rad, duration
+ )
+
+ # Convert to motor steps
+ steps = _rad_to_steps_alloc(trajectory_rad)
+
+ return Trajectory(steps=steps, duration=duration)
+
+ def _build_cart_vel_constraint(
+ self, path: ta.SplineInterpolator | _LinearPath, ss_waypoints: NDArray
+ ) -> constraint.JointVelocityConstraintVarying | None:
+ """
+ Build Cartesian velocity constraint for TOPP-RA using path-tangent method.
+
+ Uses the path tangent (dq/ds) to compute accurate Cartesian velocity limits.
+ At each path point s:
+ - cart_vel = J_lin @ q_dot = J_lin @ (dq/ds * s_dot)
+ - ||cart_vel|| = ||J_lin @ dq/ds|| * |s_dot|
+ - For ||cart_vel|| <= v_max: |s_dot| <= v_max / ||J_lin @ dq/ds||
+
+ This is more accurate than the column-norm method as it considers the
+ actual direction of motion along the path.
+
+ Args:
+ path: The spline path through joint space
+ ss_waypoints: Path parameter values at each waypoint
+
+ Returns:
+ JointVelocityConstraintVarying with path-dependent limits, or None if error
+ """
+ if self.cart_vel_limit is None or self.cart_vel_limit <= 0:
+ return None
+
+ try:
+ robot = PAROL6_ROBOT.robot
+
+ # cart_vel_limit is already in m/s (SI units)
+ v_max_m_s = self.cart_vel_limit
+ # Use scaled joint limits (respects user's velocity_frac)
+ v_max_joint = self.v_max
+
+ # Pre-allocate buffers for velocity limits (avoids per-call allocation)
+ vlim_buffer = np.empty((6, 2), dtype=np.float64)
+ _jac_buf = np.zeros((6, 6), dtype=np.float64, order="F")
+
+ def vlim_func(s: float) -> NDArray:
+ """Compute velocity limits at path position s using path tangent."""
+ q = path(s)
+ dq_ds = path(s, 1) # Path tangent (first derivative)
+
+ # Get the linear part of the Jacobian (first 3 rows)
+ robot.jacob0_into(q, _jac_buf)
+ J_lin = _jac_buf[:3, :]
+
+ # Cartesian velocity per unit s_dot along path tangent
+ cart_vel_per_sdot = np.linalg.norm(J_lin @ dq_ds)
+
+ if cart_vel_per_sdot < 1e-6:
+ # Near-zero path tangent (at waypoint or singular), use joint limits
+ vlim_buffer[:, 0] = -v_max_joint
+ vlim_buffer[:, 1] = v_max_joint
+ return vlim_buffer.copy()
+
+ # Maximum s_dot to satisfy Cartesian velocity constraint
+ max_sdot = v_max_m_s / cart_vel_per_sdot
+
+ # The Cartesian constraint limits s_dot, not individual joint velocities.
+ # We scale ALL joint velocity limits uniformly by the ratio of
+ # (Cartesian-limited s_dot) / (fastest achievable s_dot from joint limits).
+ #
+ # This ensures the path velocity respects the Cartesian limit while
+ # keeping joints at their relative proportions.
+ abs_dq_ds = np.abs(dq_ds)
+
+ # Compute s_dot limit from each joint's velocity limit
+ with np.errstate(divide="ignore", invalid="ignore"):
+ s_dot_per_joint = np.where(
+ abs_dq_ds > 1e-9,
+ v_max_joint / abs_dq_ds,
+ np.inf,
+ )
+
+ # The binding joint limit determines max achievable s_dot
+ s_dot_from_joints = float(np.min(s_dot_per_joint))
+
+ # If Cartesian constraint is more restrictive, scale down all limits
+ if max_sdot < s_dot_from_joints and s_dot_from_joints > 0:
+ scale = max_sdot / s_dot_from_joints
+ q_dot_max = v_max_joint * scale
+ else:
+ # Cartesian constraint is not binding, use joint limits
+ q_dot_max = v_max_joint
+
+ vlim_buffer[:, 0] = -q_dot_max
+ vlim_buffer[:, 1] = q_dot_max
+
+ return vlim_buffer.copy()
+
+ return constraint.JointVelocityConstraintVarying(vlim_func)
+
+ except Exception as e:
+ logger.warning("Failed to build Cartesian velocity constraint: %s", e)
+ return None
+
+ def _build_ruckig_trajectory(self) -> Trajectory:
+ """
+ Build trajectory using Ruckig for jerk-limited point-to-point motion.
+
+ Note: This does NOT follow the path waypoints - it goes directly from
+ start to end. Use TOPP-RA profiles for path-following motion.
+ """
+ n_dofs = 6
+ gen = Ruckig(n_dofs, self.dt)
+ inp = InputParameter(n_dofs)
+ out = OutputParameter(n_dofs)
+
+ start_pos = self.joint_path.positions[0]
+ end_pos = self.joint_path.positions[-1]
+
+ # Ruckig requires Python lists for input parameters
+ inp.current_position = start_pos.tolist()
+ inp.current_velocity = [0.0] * n_dofs
+ inp.current_acceleration = [0.0] * n_dofs
+ inp.target_position = end_pos.tolist()
+ inp.target_velocity = [0.0] * n_dofs
+ inp.target_acceleration = [0.0] * n_dofs
+ inp.max_velocity = self.v_max.tolist()
+ inp.max_acceleration = self.a_max.tolist()
+ inp.max_jerk = self.j_max.tolist()
+
+ # Pre-allocate buffer (estimate max iterations from simple duration + margin)
+ est_duration = self._estimate_simple_duration()
+ max_iters = int(est_duration / self.dt) + 500 # generous margin
+ trajectory_rad = np.empty((max_iters, n_dofs), dtype=np.float64)
+
+ count = 0
+ result = Result.Working
+
+ while result == Result.Working:
+ result = gen.update(inp, out)
+ if count >= len(trajectory_rad):
+ new_buf = np.empty((len(trajectory_rad) * 2, n_dofs), dtype=np.float64)
+ new_buf[:count] = trajectory_rad[:count]
+ trajectory_rad = new_buf
+ trajectory_rad[count] = out.new_position
+ count += 1
+ out.pass_to_input(inp)
+
+ if result == Result.Error:
+ raise RuntimeError("Ruckig failed to compute trajectory")
+
+ actual_duration = out.trajectory.duration
+
+ # Trim to actual size
+ trajectory_rad = trajectory_rad[:count]
+
+ # Convert to motor steps (vectorized)
+ steps = _rad_to_steps_alloc(trajectory_rad)
+
+ return Trajectory(steps=steps, duration=actual_duration)
+
+ def _estimate_simple_duration(self) -> float:
+ """Estimate minimum duration based on joint velocity limits.
+
+ With adaptive time distribution, each segment gets time proportional
+ to its joint movement, so total duration is sum of per-segment times.
+ """
+ positions = self.joint_path.positions
+ if len(positions) < 2:
+ return self.dt * 2
+
+ # Compute per-segment time based on max joint movement
+ deltas = np.diff(positions, axis=0) # (N-1, 6)
+ segment_times = np.max(np.abs(deltas) / self.v_max, axis=1) # (N-1,)
+
+ return max(float(np.sum(segment_times)), self.dt * 2)
diff --git a/parol6/protocol/types.py b/parol6/protocol/types.py
deleted file mode 100644
index 2f098f7..0000000
--- a/parol6/protocol/types.py
+++ /dev/null
@@ -1,109 +0,0 @@
-"""
-Type definitions for PAROL6 protocol.
-
-Defines enums, TypedDicts, and dataclasses used across the public API.
-"""
-
-from datetime import datetime
-from enum import Enum
-from typing import Literal, TypedDict
-
-
-# Stream mode state enum
-class StreamModeState(Enum):
- """Stream mode state for jog commands."""
-
- OFF = 0 # Stream mode disabled (default FIFO queueing)
- ON = 1 # Stream mode enabled (latest-wins for jog commands)
-
-
-# Frame literals
-Frame = Literal["WRF", "TRF"]
-
-# Axis literals
-Axis = Literal[
- "X+", "X-", "Y+", "Y-", "Z+", "Z-", "RX+", "RX-", "RY+", "RY-", "RZ+", "RZ-"
-]
-
-# Acknowledgment status literals
-AckStatus = Literal[
- "SENT",
- "QUEUED",
- "EXECUTING",
- "COMPLETED",
- "FAILED",
- "INVALID",
- "CANCELLED",
- "TIMEOUT",
- "NO_TRACKING",
-]
-
-
-class IOStatus(TypedDict):
- """Digital I/O status."""
-
- in1: int
- in2: int
- out1: int
- out2: int
- estop: int
-
-
-class GripperStatus(TypedDict):
- """Electric gripper status."""
-
- id: int
- position: int
- speed: int
- current: int
- status_byte: int
- object_detect: int
-
-
-class StatusAggregate(TypedDict, total=False):
- """Aggregate robot status."""
-
- pose: list[float] # 4x4 transformation matrix flattened (len=16)
- angles: list[float] # 6 joint angles in degrees
- io: IOStatus | list[int] # Back-compat with existing server format
- gripper: GripperStatus | list[int]
- action_current: str # Current executing action/command name
- action_state: str # Current action state (IDLE, EXECUTING, etc)
- action_next: str # Next non-streamable action in queue
-
-
-class TrackingStatus(TypedDict):
- """Command tracking status."""
-
- command_id: str | None
- status: AckStatus
- details: str
- completed: bool
- ack_time: datetime | None
-
-
-class SendResult(TypedDict):
- """Standardized result for command-sending APIs."""
-
- command_id: str | None
- status: AckStatus
- details: str
- completed: bool
- ack_time: datetime | None
-
-
-class WireResponse(TypedDict):
- """Typed wrapper for parsed wire responses."""
-
- type: Literal[
- "PONG",
- "POSE",
- "ANGLES",
- "IO",
- "GRIPPER",
- "SPEEDS",
- "STATUS",
- "GCODE_STATUS",
- "SERVER_STATE",
- ]
- payload: dict | list | str
diff --git a/parol6/protocol/wire.py b/parol6/protocol/wire.py
index b95dbd0..fba49c8 100644
--- a/parol6/protocol/wire.py
+++ b/parol6/protocol/wire.py
@@ -1,50 +1,1479 @@
"""
-Wire protocol helpers for UDP encoding/decoding.
-
-This module centralizes encoding of command strings and decoding of common
-response payloads used by the headless controller.
+Wire protocol for PAROL6 robot communication.
+
+This module contains all protocol definitions:
+- Binary serial frame packing/unpacking (firmware communication)
+- Msgpack message types and structs (UDP communication)
+- Command/response encoding and decoding
+
+Wire format uses msgpack arrays with integer type codes:
+- OK: MsgType.OK (just the integer)
+- ERROR: [MsgType.ERROR, message]
+- STATUS: [MsgType.STATUS, pose, angles, speeds, io, action_current, action_state, joint_en, cart_en_wrf, cart_en_trf, executing_index, completed_index, last_checkpoint, error, queued_segments, queued_duration, action_params, tool_status, tcp_speed, simulator_active]
+- RESPONSE: [MsgType.RESPONSE, query_type, value]
+- COMMAND: [CmdType.XXX, ...params]
"""
import logging
-from collections.abc import Sequence
-
-# Centralized binary wire protocol helpers (pack/unpack + codes)
-from enum import IntEnum
-from typing import Literal, cast
+from dataclasses import dataclass, field
+from enum import IntEnum, auto
+from typing import Annotated, TypeAlias, Union, cast
+import msgspec
import numpy as np
+import ormsgpack
+from numba import njit
+
+from parol6.config import LIMITS
+from waldoctl import ActionState, ToolStatus
+from waldoctl.tools import ToolState
-from .types import Axis, Frame, StatusAggregate
+from parol6.tools import get_registry, list_tools
+from parol6.utils.error_catalog import RobotError, make_error
+from parol6.utils.error_codes import ErrorCode
logger = logging.getLogger(__name__)
-# Precomputed bit-unpack lookup table for 0..255 (MSB..LSB)
-# Using NumPy ensures fast vectorized selection without per-call allocations.
-_BIT_UNPACK = np.unpackbits(
- np.arange(256, dtype=np.uint8)[:, None], axis=1, bitorder="big"
+
+# =============================================================================
+# Numpy msgpack encoding hooks
+# =============================================================================
+
+
+def _enc_hook(obj: object) -> object:
+ """Custom encoder hook for numpy types."""
+ if isinstance(obj, np.ndarray):
+ return obj.tolist() # type: ignore[no-matching-overload, ty:no-matching-overload]
+ if isinstance(obj, (np.integer, np.floating)):
+ return obj.item() # Convert numpy scalar to Python native type
+ raise NotImplementedError(f"Cannot encode {type(obj)}")
+
+
+# Module-level encoder with numpy support (thread-safe, reusable)
+_encoder = msgspec.msgpack.Encoder(enc_hook=_enc_hook)
+
+# Module-level decoder for generic msgpack
+_decoder = msgspec.msgpack.Decoder()
+
+
+# =============================================================================
+# Message Types
+# =============================================================================
+
+
+class MsgType(IntEnum):
+ """Message type codes for responses."""
+
+ OK = auto()
+ ERROR = auto()
+ STATUS = auto()
+ RESPONSE = auto()
+
+
+class QueryType(IntEnum):
+ """Query type codes for responses."""
+
+ PING = auto()
+ STATUS = auto()
+ ANGLES = auto()
+ POSE = auto()
+ IO = auto()
+ SPEEDS = auto()
+ TOOL = auto()
+ QUEUE = auto()
+ CURRENT_ACTION = auto()
+ LOOP_STATS = auto()
+ PROFILE = auto()
+ TOOL_STATUS = auto()
+ ENABLEMENT = auto()
+ ERROR = auto()
+ TCP_SPEED = auto()
+ IS_SIMULATOR = auto()
+ TCP_OFFSET = auto()
+
+
+class CmdType(IntEnum):
+ """Command type codes for incoming commands.
+
+ Wire format: [CmdType.XXX, ...params]
+ """
+
+ # Query commands (immediate, read-only)
+ PING = auto()
+ STATUS = auto()
+ ANGLES = auto()
+ POSE = auto()
+ IO = auto()
+ JOINT_SPEEDS = auto()
+ TOOLS = auto()
+ QUEUE = auto()
+ ACTIVITY = auto()
+ LOOP_STATS = auto()
+ PROFILE = auto()
+ REACHABLE = auto()
+ ERROR = auto()
+ TCP_SPEED = auto()
+ TCP_OFFSET = auto()
+
+ # System commands (execute regardless of enable state)
+ RESUME = auto()
+ HALT = auto()
+ WRITE_IO = auto()
+ CONNECT_HARDWARE = auto()
+ SIMULATOR = auto()
+ SELECT_PROFILE = auto()
+ RESET = auto()
+ RESET_LOOP_STATS = auto()
+
+ # Motion commands — queued, pre-computed trajectory
+ HOME = auto()
+ MOVEJ = auto()
+ MOVEJ_POSE = auto()
+ MOVEL = auto()
+ MOVEC = auto()
+ MOVES = auto()
+ MOVEP = auto()
+ SELECT_TOOL = auto()
+ SET_TCP_OFFSET = auto()
+ DELAY = auto()
+ CHECKPOINT = auto()
+
+ # Streaming commands — position (servo) and velocity (jog)
+ TELEPORT = auto()
+ SERVOJ = auto()
+ SERVOJ_POSE = auto()
+ SERVOL = auto()
+ JOGJ = auto()
+ JOGL = auto()
+
+ # Tool commands
+ TOOL_ACTION = auto()
+ TOOL_STATUS = auto()
+
+ # Simulator state query
+ IS_SIMULATOR = auto()
+
+
+# =============================================================================
+# Command Structs - Tagged Union for single-pass decode
+# Wire format: [CmdType.XXX, ...fields]
+# =============================================================================
+
+
+def _check_speed_accel(speed: float, accel: float, *, signed: bool = False) -> None:
+ """Validate speed/accel are in the expected fractional range."""
+ lo = -1.0 if signed else 0.0
+ if not (lo <= speed <= 1.0):
+ raise ValueError(
+ f"speed={speed} is out of range [{lo}, 1.0]. "
+ "Speed is a fraction of max velocity, not a percentage."
+ )
+ if not (0.0 <= accel <= 1.0):
+ raise ValueError(
+ f"accel={accel} is out of range [0.0, 1.0]. "
+ "Accel is a fraction of max acceleration, not a percentage."
+ )
+
+
+class MotionParamsMixin:
+ """Mixin providing resolved motion parameters for wire structs.
+
+ Handles both sentinel patterns:
+ - Move commands use 0.0 as "not specified"
+ - Curved/spline commands use None as "not specified"
+
+ Field declarations live on the concrete Struct subclasses, not here,
+ to avoid Pylance invariance errors on override.
+ """
+
+ __slots__ = ()
+
+ accel: float
+
+ @property
+ def resolved_duration(self) -> float | None:
+ """Duration in seconds, or None for velocity-based timing."""
+ d = cast("float | None", getattr(self, "duration"))
+ return d if d is not None and d > 0.0 else None
+
+ @property
+ def resolved_speed(self) -> float:
+ """Velocity fraction 0-1, defaults to 1.0 (full speed)."""
+ s = cast("float | None", getattr(self, "speed"))
+ return s if s is not None and s > 0.0 else 1.0
+
+
+# -- Queued move commands (pre-computed trajectory) --
+
+
+class MoveJCmd(
+ MotionParamsMixin,
+ msgspec.Struct,
+ tag=int(CmdType.MOVEJ),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """MOVEJ: joint-space move to target angles (degrees)."""
+
+ angles: Annotated[list[float], msgspec.Meta(min_length=6, max_length=6)]
+ duration: Annotated[float, msgspec.Meta(ge=0.0)] = 0.0
+ speed: Annotated[float, msgspec.Meta(ge=0.0, le=1.0)] = 0.0
+ accel: Annotated[float, msgspec.Meta(gt=0.0, le=1.0)] = 1.0
+ r: Annotated[float, msgspec.Meta(ge=0.0)] = 0.0
+ rel: bool = False
+
+ def __post_init__(self) -> None:
+ has_duration = self.duration > 0.0
+ has_speed = self.speed > 0.0
+ if not has_duration and not has_speed:
+ raise ValueError("MOVEJ requires either duration > 0 or speed > 0")
+ if has_duration and has_speed:
+ raise ValueError("MOVEJ requires only one of duration or speed")
+ _check_speed_accel(self.speed, self.accel)
+ if not self.rel:
+ for i in range(6):
+ if not (
+ LIMITS.joint.position.deg[i, 0]
+ <= self.angles[i]
+ <= LIMITS.joint.position.deg[i, 1]
+ ):
+ raise ValueError(
+ f"Joint {i + 1} target ({self.angles[i]:.1f} deg) is out of range"
+ )
+
+
+class MoveJPoseCmd(
+ MotionParamsMixin,
+ msgspec.Struct,
+ tag=int(CmdType.MOVEJ_POSE),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """MOVEJ_POSE: joint-space move to a Cartesian pose (IK at target)."""
+
+ pose: Annotated[list[float], msgspec.Meta(min_length=6, max_length=6)]
+ duration: Annotated[float, msgspec.Meta(ge=0.0)] = 0.0
+ speed: Annotated[float, msgspec.Meta(ge=0.0, le=1.0)] = 0.0
+ accel: Annotated[float, msgspec.Meta(gt=0.0, le=1.0)] = 1.0
+ r: Annotated[float, msgspec.Meta(ge=0.0)] = 0.0
+
+ def __post_init__(self) -> None:
+ has_duration = self.duration > 0.0
+ has_speed = self.speed > 0.0
+ if not has_duration and not has_speed:
+ raise ValueError("MOVEJ_POSE requires either duration > 0 or speed > 0")
+ if has_duration and has_speed:
+ raise ValueError("MOVEJ_POSE requires only one of duration or speed")
+ _check_speed_accel(self.speed, self.accel)
+
+
+class MoveLCmd(
+ MotionParamsMixin,
+ msgspec.Struct,
+ tag=int(CmdType.MOVEL),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """MOVEL: linear Cartesian move to target pose."""
+
+ pose: Annotated[list[float], msgspec.Meta(min_length=6, max_length=6)]
+ frame: Annotated[str, msgspec.Meta(pattern=r"^(WRF|TRF)$")] = "WRF"
+ duration: Annotated[float, msgspec.Meta(ge=0.0)] = 0.0
+ speed: Annotated[float, msgspec.Meta(ge=0.0, le=1.0)] = 0.0
+ accel: Annotated[float, msgspec.Meta(gt=0.0, le=1.0)] = 1.0
+ r: Annotated[float, msgspec.Meta(ge=0.0)] = 0.0
+ rel: bool = False
+
+ def __post_init__(self) -> None:
+ has_duration = self.duration > 0.0
+ has_speed = self.speed > 0.0
+ if not has_duration and not has_speed:
+ raise ValueError("MOVEL requires either duration > 0 or speed > 0")
+ if has_duration and has_speed:
+ raise ValueError("MOVEL requires only one of duration or speed")
+ _check_speed_accel(self.speed, self.accel)
+
+
+class MoveCCmd(
+ MotionParamsMixin,
+ msgspec.Struct,
+ tag=int(CmdType.MOVEC),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """MOVEC: circular arc through current → via → end."""
+
+ via: Annotated[list[float], msgspec.Meta(min_length=6, max_length=6)]
+ end: Annotated[list[float], msgspec.Meta(min_length=6, max_length=6)]
+ frame: Annotated[str, msgspec.Meta(pattern=r"^(WRF|TRF)$")] = "WRF"
+ duration: Annotated[float, msgspec.Meta(ge=0.0)] | None = None
+ speed: Annotated[float, msgspec.Meta(gt=0.0, le=1.0)] | None = None
+ accel: Annotated[float, msgspec.Meta(gt=0.0, le=1.0)] = 1.0
+ r: Annotated[float, msgspec.Meta(ge=0.0)] = 0.0
+
+ def __post_init__(self) -> None:
+ has_duration = self.duration is not None and self.duration > 0.0
+ has_speed = self.speed is not None and self.speed > 0.0
+ if not has_duration and not has_speed:
+ raise ValueError("MOVEC requires either duration > 0 or speed > 0")
+ if has_duration and has_speed:
+ raise ValueError("MOVEC requires only one of duration or speed")
+ if self.speed is not None:
+ _check_speed_accel(self.speed, self.accel)
+
+
+class MoveSCmd(
+ MotionParamsMixin,
+ msgspec.Struct,
+ tag=int(CmdType.MOVES),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """MOVES: cubic spline through waypoints."""
+
+ waypoints: Annotated[list[list[float]], msgspec.Meta(min_length=2)]
+ frame: Annotated[str, msgspec.Meta(pattern=r"^(WRF|TRF)$")] = "WRF"
+ duration: Annotated[float, msgspec.Meta(ge=0.0)] | None = None
+ speed: Annotated[float, msgspec.Meta(gt=0.0, le=1.0)] | None = None
+ accel: Annotated[float, msgspec.Meta(gt=0.0, le=1.0)] = 1.0
+
+ def __post_init__(self) -> None:
+ has_duration = self.duration is not None and self.duration > 0.0
+ has_speed = self.speed is not None and self.speed > 0.0
+ if not has_duration and not has_speed:
+ raise ValueError("MOVES requires either duration > 0 or speed > 0")
+ if has_duration and has_speed:
+ raise ValueError("MOVES requires only one of duration or speed")
+ if self.speed is not None:
+ _check_speed_accel(self.speed, self.accel)
+ waypoints = self.waypoints
+ for i in range(len(waypoints)):
+ if len(waypoints[i]) != 6:
+ raise ValueError(f"Waypoint {i} must have 6 values (x,y,z,rx,ry,rz)")
+
+
+class MovePCmd(
+ MotionParamsMixin,
+ msgspec.Struct,
+ tag=int(CmdType.MOVEP),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """MOVEP: process move — constant TCP speed with auto-blending at corners."""
+
+ waypoints: Annotated[list[list[float]], msgspec.Meta(min_length=2)]
+ frame: Annotated[str, msgspec.Meta(pattern=r"^(WRF|TRF)$")] = "WRF"
+ duration: Annotated[float, msgspec.Meta(ge=0.0)] | None = None
+ speed: Annotated[float, msgspec.Meta(gt=0.0, le=1.0)] | None = None
+ accel: Annotated[float, msgspec.Meta(gt=0.0, le=1.0)] = 1.0
+
+ def __post_init__(self) -> None:
+ has_duration = self.duration is not None and self.duration > 0.0
+ has_speed = self.speed is not None and self.speed > 0.0
+ if not has_duration and not has_speed:
+ raise ValueError("MOVEP requires either duration > 0 or speed > 0")
+ if has_duration and has_speed:
+ raise ValueError("MOVEP requires only one of duration or speed")
+ if self.speed is not None:
+ _check_speed_accel(self.speed, self.accel)
+ waypoints = self.waypoints
+ for i in range(len(waypoints)):
+ if len(waypoints[i]) != 6:
+ raise ValueError(f"Waypoint {i} must have 6 values (x,y,z,rx,ry,rz)")
+
+
+class CheckpointCmd(
+ msgspec.Struct,
+ tag=int(CmdType.CHECKPOINT),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """CHECKPOINT: queue marker for progress tracking."""
+
+ label: Annotated[str, msgspec.Meta(min_length=1, max_length=128)]
+
+
+# -- Streaming commands: servo (position) --
+
+
+class ServoJCmd(
+ msgspec.Struct,
+ tag=int(CmdType.SERVOJ),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """SERVOJ: streaming joint position target (degrees)."""
+
+ angles: Annotated[list[float], msgspec.Meta(min_length=6, max_length=6)]
+ speed: Annotated[float, msgspec.Meta(gt=0.0, le=1.0)] = 1.0
+ accel: Annotated[float, msgspec.Meta(gt=0.0, le=1.0)] = 1.0
+
+
+class ServoJPoseCmd(
+ msgspec.Struct,
+ tag=int(CmdType.SERVOJ_POSE),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """SERVOJ_POSE: streaming joint position target via Cartesian pose (IK)."""
+
+ pose: Annotated[list[float], msgspec.Meta(min_length=6, max_length=6)]
+ speed: Annotated[float, msgspec.Meta(gt=0.0, le=1.0)] = 1.0
+ accel: Annotated[float, msgspec.Meta(gt=0.0, le=1.0)] = 1.0
+
+
+class ServoLCmd(
+ msgspec.Struct,
+ tag=int(CmdType.SERVOL),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """SERVOL: streaming linear Cartesian position target."""
+
+ pose: Annotated[list[float], msgspec.Meta(min_length=6, max_length=6)]
+ speed: Annotated[float, msgspec.Meta(gt=0.0, le=1.0)] = 1.0
+ accel: Annotated[float, msgspec.Meta(gt=0.0, le=1.0)] = 1.0
+
+
+# -- Streaming commands: jog (velocity) --
+
+
+class JogJCmd(
+ msgspec.Struct,
+ tag=int(CmdType.JOGJ),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """JOGJ: streaming joint velocity. Static 6-element signed speed fractions."""
+
+ speeds: Annotated[list[float], msgspec.Meta(min_length=6, max_length=6)]
+ duration: Annotated[float, msgspec.Meta(gt=0.0)]
+ accel: Annotated[float, msgspec.Meta(gt=0.0, le=1.0)] = 1.0
+
+ def __post_init__(self) -> None:
+ for i in range(6):
+ if not (-1.0 <= self.speeds[i] <= 1.0):
+ raise ValueError(
+ f"Speed[{i}]={self.speeds[i]} out of range [-1.0, 1.0]"
+ )
+
+
+class JogLCmd(
+ msgspec.Struct,
+ tag=int(CmdType.JOGL),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """JOGL: streaming Cartesian velocity. Static 6-element [vx,vy,vz,wx,wy,wz]."""
+
+ velocities: Annotated[list[float], msgspec.Meta(min_length=6, max_length=6)]
+ duration: Annotated[float, msgspec.Meta(gt=0.0)]
+ frame: Annotated[str, msgspec.Meta(pattern=r"^(WRF|TRF)$")] = "WRF"
+ accel: Annotated[float, msgspec.Meta(gt=0.0, le=1.0)] = 1.0
+
+ def __post_init__(self) -> None:
+ for i in range(6):
+ if not (-1.0 <= self.velocities[i] <= 1.0):
+ raise ValueError(
+ f"Velocity[{i}]={self.velocities[i]} out of range [-1.0, 1.0]"
+ )
+
+
+class HomeCmd(
+ msgspec.Struct, tag=int(CmdType.HOME), array_like=True, frozen=True, gc=False
+):
+ """HOME: [CmdType.HOME]"""
+
+ pass
+
+
+class ResumeCmd(
+ msgspec.Struct, tag=int(CmdType.RESUME), array_like=True, frozen=True, gc=False
+):
+ """RESUME: [CmdType.RESUME] — re-enable the controller."""
+
+ pass
+
+
+class HaltCmd(
+ msgspec.Struct, tag=int(CmdType.HALT), array_like=True, frozen=True, gc=False
+):
+ """HALT: [CmdType.HALT] — stop all motion and disable."""
+
+ pass
+
+
+class ResetCmd(
+ msgspec.Struct, tag=int(CmdType.RESET), array_like=True, frozen=True, gc=False
+):
+ """RESET: [CmdType.RESET]"""
+
+ pass
+
+
+class ResetLoopStatsCmd(
+ msgspec.Struct,
+ tag=int(CmdType.RESET_LOOP_STATS),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """RESET_LOOP_STATS: [CmdType.RESET_LOOP_STATS]
+
+ Reset timing statistics (min/max/overrun counts) without affecting controller state.
+ """
+
+ pass
+
+
+class TeleportCmd(
+ msgspec.Struct, tag=int(CmdType.TELEPORT), array_like=True, frozen=True, gc=False
+):
+ """TELEPORT: instantly set joint angles in degrees (simulator only)."""
+
+ angles: Annotated[list[float], msgspec.Meta(min_length=6, max_length=6)]
+ tool_positions: list[float] | None = None
+
+
+class WriteIOCmd(
+ msgspec.Struct, tag=int(CmdType.WRITE_IO), array_like=True, frozen=True, gc=False
+):
+ """WRITE_IO: [CmdType.WRITE_IO, port_index, value]
+
+ port_index: 0-7 (8-bit I/O port)
+ value: 0 or 1
+ """
+
+ port_index: Annotated[int, msgspec.Meta(ge=0, le=7)]
+ value: Annotated[int, msgspec.Meta(ge=0, le=1)]
+
+
+class ConnectHardwareCmd(
+ msgspec.Struct,
+ tag=int(CmdType.CONNECT_HARDWARE),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """CONNECT_HARDWARE: [CmdType.CONNECT_HARDWARE, port_str]"""
+
+ port_str: Annotated[str, msgspec.Meta(min_length=1, max_length=256)]
+
+
+class SimulatorCmd(
+ msgspec.Struct, tag=int(CmdType.SIMULATOR), array_like=True, frozen=True, gc=False
+):
+ """SIMULATOR: [CmdType.SIMULATOR, on]"""
+
+ on: bool
+
+
+class DelayCmd(
+ msgspec.Struct, tag=int(CmdType.DELAY), array_like=True, frozen=True, gc=False
+):
+ """DELAY: [CmdType.DELAY, seconds]"""
+
+ seconds: Annotated[float, msgspec.Meta(gt=0.0)]
+
+
+class SelectToolCmd(
+ msgspec.Struct, tag=int(CmdType.SELECT_TOOL), array_like=True, frozen=True, gc=False
+):
+ """SELECT_TOOL: [CmdType.SELECT_TOOL, tool_name, variant_key]"""
+
+ tool_name: Annotated[str, msgspec.Meta(min_length=1, max_length=64)]
+ variant_key: str = ""
+
+ def __post_init__(self) -> None:
+ name = self.tool_name.strip().upper()
+ if name not in get_registry():
+ raise ValueError(f"Unknown tool '{name}'. Available: {list_tools()}")
+
+
+class SetTcpOffsetCmd(
+ msgspec.Struct,
+ tag=int(CmdType.SET_TCP_OFFSET),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """SET_TCP_OFFSET: offset the effective TCP in the tool's local frame (mm)."""
+
+ x: float = 0.0
+ y: float = 0.0
+ z: float = 0.0
+
+
+class SelectProfileCmd(
+ msgspec.Struct,
+ tag=int(CmdType.SELECT_PROFILE),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """SELECT_PROFILE: [CmdType.SELECT_PROFILE, profile]"""
+
+ profile: Annotated[str, msgspec.Meta(min_length=1, max_length=32)]
+
+
+class ToolActionCmd(
+ msgspec.Struct,
+ tag=int(CmdType.TOOL_ACTION),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """TOOL_ACTION: [CmdType.TOOL_ACTION, tool_key, action, params]
+
+ Generic tool action command. The controller validates *tool_key*
+ against the registry and delegates to the appropriate 100 Hz command.
+ """
+
+ tool_key: Annotated[str, msgspec.Meta(min_length=1, max_length=64)]
+ action: Annotated[str, msgspec.Meta(min_length=1, max_length=64)]
+ params: list = []
+
+ def __post_init__(self) -> None:
+ key = self.tool_key.strip().upper()
+ registry = get_registry()
+ if key not in registry:
+ raise ValueError(f"Unknown tool '{key}'. Available: {list(registry)}")
+
+
+class ToolStatusCmd(
+ msgspec.Struct,
+ tag=int(CmdType.TOOL_STATUS),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """TOOL_STATUS: [CmdType.TOOL_STATUS]"""
+
+ pass
+
+
+class IsSimulatorCmd(
+ msgspec.Struct,
+ tag=int(CmdType.IS_SIMULATOR),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """IS_SIMULATOR: query whether the controller is in simulator mode."""
+
+ pass
+
+
+class ReachableCmd(
+ msgspec.Struct,
+ tag=int(CmdType.REACHABLE),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """REACHABLE: [CmdType.REACHABLE]"""
+
+ pass
+
+
+class ErrorCmd(
+ msgspec.Struct,
+ tag=int(CmdType.ERROR),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """ERROR: [CmdType.ERROR]"""
+
+ pass
+
+
+class TcpSpeedCmd(
+ msgspec.Struct,
+ tag=int(CmdType.TCP_SPEED),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """TCP_SPEED: [CmdType.TCP_SPEED]"""
+
+ pass
+
+
+class TcpOffsetCmd(
+ msgspec.Struct,
+ tag=int(CmdType.TCP_OFFSET),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """TCP_OFFSET: query current TCP offset."""
+
+ pass
+
+
+# Query commands (no params, just the tag)
+class PingCmd(
+ msgspec.Struct, tag=int(CmdType.PING), array_like=True, frozen=True, gc=False
+):
+ """PING: [CmdType.PING]"""
+
+ pass
+
+
+class StatusCmd(
+ msgspec.Struct, tag=int(CmdType.STATUS), array_like=True, frozen=True, gc=False
+):
+ """STATUS: [CmdType.STATUS]"""
+
+ pass
+
+
+class AnglesCmd(
+ msgspec.Struct, tag=int(CmdType.ANGLES), array_like=True, frozen=True, gc=False
+):
+ """ANGLES: [CmdType.ANGLES]"""
+
+ pass
+
+
+class PoseCmd(
+ msgspec.Struct, tag=int(CmdType.POSE), array_like=True, frozen=True, gc=False
+):
+ """POSE: [CmdType.POSE, frame]"""
+
+ frame: Annotated[str, msgspec.Meta(pattern=r"^(WRF|TRF)$")] | None = None
+
+
+class IOCmd(
+ msgspec.Struct, tag=int(CmdType.IO), array_like=True, frozen=True, gc=False
+):
+ """IO: [CmdType.IO]"""
+
+ pass
+
+
+class JointSpeedsCmd(
+ msgspec.Struct,
+ tag=int(CmdType.JOINT_SPEEDS),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """JOINT_SPEEDS: [CmdType.JOINT_SPEEDS]"""
+
+ pass
+
+
+class ToolsCmd(
+ msgspec.Struct, tag=int(CmdType.TOOLS), array_like=True, frozen=True, gc=False
+):
+ """TOOLS: [CmdType.TOOLS]"""
+
+ pass
+
+
+class QueueCmd(
+ msgspec.Struct, tag=int(CmdType.QUEUE), array_like=True, frozen=True, gc=False
+):
+ """QUEUE: [CmdType.QUEUE]"""
+
+ pass
+
+
+class ActivityCmd(
+ msgspec.Struct,
+ tag=int(CmdType.ACTIVITY),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """ACTIVITY: [CmdType.ACTIVITY]"""
+
+ pass
+
+
+class LoopStatsCmd(
+ msgspec.Struct,
+ tag=int(CmdType.LOOP_STATS),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """LOOP_STATS: [CmdType.LOOP_STATS]"""
+
+ pass
+
+
+class ProfileCmd(
+ msgspec.Struct, tag=int(CmdType.PROFILE), array_like=True, frozen=True, gc=False
+):
+ """PROFILE: [CmdType.PROFILE]"""
+
+ pass
+
+
+# =============================================================================
+# Auto-generated Command union and STRUCT_TO_CMDTYPE
+# =============================================================================
+
+
+def _collect_command_structs() -> list[type]:
+ """Collect all command struct classes from this module."""
+ import sys
+
+ module = sys.modules[__name__]
+ structs = []
+ for name, cls in vars(module).items():
+ if not name.endswith("Cmd"):
+ continue
+ if not isinstance(cls, type):
+ continue
+ if not issubclass(cls, msgspec.Struct):
+ continue
+ # Check for tag in struct config
+ config = getattr(cls, "__struct_config__", None)
+ if config is not None and config.tag is not None:
+ structs.append(cls)
+ return structs
+
+
+def _build_struct_to_cmdtype(structs: list[type]) -> dict[type, CmdType]:
+ """Auto-generate struct -> CmdType mapping from tagged structs."""
+ mapping: dict[type, CmdType] = {}
+ for struct_cls in structs:
+ config = getattr(struct_cls, "__struct_config__", None)
+ if config is None:
+ continue
+ tag = getattr(config, "tag", None)
+ if tag is None:
+ continue
+ try:
+ cmd_type = CmdType(tag)
+ mapping[struct_cls] = cmd_type
+ except ValueError:
+ pass # Not a valid CmdType tag
+ return mapping
+
+
+# Build at import time
+_COMMAND_STRUCTS = _collect_command_structs()
+STRUCT_TO_CMDTYPE: dict[type, CmdType] = _build_struct_to_cmdtype(_COMMAND_STRUCTS)
+
+# Build Command union dynamically from collected structs
+Command: TypeAlias = Union[tuple(_COMMAND_STRUCTS)]
+
+# Module-level decoder for single-pass command decode
+_command_decoder = msgspec.msgpack.Decoder(Command)
+
+
+def decode_command(data: bytes) -> Command:
+ """Decode raw bytes to typed command struct.
+
+ Args:
+ data: Raw msgpack-encoded command bytes
+
+ Returns:
+ Typed command struct
+
+ Raises:
+ msgspec.ValidationError: If data is invalid or doesn't match any command type
+ """
+ return _command_decoder.decode(data)
+
+
+def encode_command(cmd: Command) -> bytes:
+ """Encode a typed command struct to bytes.
+
+ Args:
+ cmd: Typed command struct
+
+ Returns:
+ Raw msgpack-encoded bytes
+ """
+ return _encoder.encode(cmd)
+
+
+def encode_command_into(cmd: Command, buf: bytearray) -> bytearray:
+ """Encode a typed command struct into a pre-allocated bytearray.
+
+ The buffer is resized to exactly fit the encoded output.
+ Reuses the same bytearray object across calls to avoid per-send
+ ``bytes`` allocations on fire-and-forget paths.
+
+ Args:
+ cmd: Typed command struct
+ buf: Pre-allocated bytearray (will be resized in-place)
+
+ Returns:
+ The same *buf* object, now containing the encoded bytes.
+ """
+ _encoder.encode_into(cmd, buf)
+ return buf
+
+
+# =============================================================================
+# Response Structs - Tagged Union for single-pass decode
+# Wire format: [MsgType.RESPONSE, QueryType.XXX, ...fields]
+# =============================================================================
+
+
+class StatusResultStruct(
+ msgspec.Struct, tag=int(QueryType.STATUS), array_like=True, frozen=True, gc=False
+):
+ """Aggregate robot status."""
+
+ pose: list[float]
+ angles: list[float]
+ speeds: list[float]
+ io: list[int]
+ tool_status: list
+
+
+class LoopStatsResultStruct(
+ msgspec.Struct,
+ tag=int(QueryType.LOOP_STATS),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """Control loop runtime metrics."""
+
+ target_hz: float
+ loop_count: int
+ overrun_count: int
+ mean_period_s: float
+ std_period_s: float
+ min_period_s: float
+ max_period_s: float
+ p95_period_s: float
+ p99_period_s: float
+ mean_hz: float
+
+
+class ToolResultStruct(
+ msgspec.Struct, tag=int(QueryType.TOOL), array_like=True, frozen=True, gc=False
+):
+ """Tool configuration."""
+
+ tool: str
+ available: list[str]
+
+
+class CurrentActionResultStruct(
+ msgspec.Struct,
+ tag=int(QueryType.CURRENT_ACTION),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """Current executing action."""
+
+ current: str
+ state: str
+ next: str
+ params: str = ""
+
+
+class PingResultStruct(
+ msgspec.Struct, tag=int(QueryType.PING), array_like=True, frozen=True, gc=False
+):
+ """Ping response with hardware connectivity status."""
+
+ hardware_connected: int # 0 or 1
+
+
+class AnglesResultStruct(
+ msgspec.Struct, tag=int(QueryType.ANGLES), array_like=True, frozen=True, gc=False
+):
+ """Joint angles response."""
+
+ angles: list[float]
+
+
+class PoseResultStruct(
+ msgspec.Struct, tag=int(QueryType.POSE), array_like=True, frozen=True, gc=False
+):
+ """Pose response."""
+
+ pose: list[float]
+
+
+class IOResultStruct(
+ msgspec.Struct, tag=int(QueryType.IO), array_like=True, frozen=True, gc=False
+):
+ """I/O status response."""
+
+ io: list[int]
+
+
+class SpeedsResultStruct(
+ msgspec.Struct, tag=int(QueryType.SPEEDS), array_like=True, frozen=True, gc=False
+):
+ """Speeds response."""
+
+ speeds: list[float]
+
+
+class ProfileResultStruct(
+ msgspec.Struct, tag=int(QueryType.PROFILE), array_like=True, frozen=True, gc=False
+):
+ """Motion profile response."""
+
+ profile: str
+
+
+class QueueResultStruct(
+ msgspec.Struct, tag=int(QueryType.QUEUE), array_like=True, frozen=True, gc=False
+):
+ """Queue status response with progress tracking."""
+
+ queue: list
+ executing_index: int = -1
+ completed_index: int = -1
+ last_checkpoint: str = ""
+ queued_duration: float = 0.0
+
+
+class ToolStatusResultStruct(
+ msgspec.Struct,
+ tag=int(QueryType.TOOL_STATUS),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """Tool status response — full 7-field ToolStatus."""
+
+ tool_key: str
+ state: ToolState
+ engaged: bool
+ part_detected: bool
+ fault_code: int
+ positions: list[float]
+ channels: list[float]
+
+
+class EnablementResultStruct(
+ msgspec.Struct,
+ tag=int(QueryType.ENABLEMENT),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """Joint and Cartesian enablement flags."""
+
+ joint_en: list[int]
+ cart_en_wrf: list[int]
+ cart_en_trf: list[int]
+
+
+class ErrorResultStruct(
+ msgspec.Struct,
+ tag=int(QueryType.ERROR),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """Current error state (None-safe wire representation)."""
+
+ error: list | None
+
+
+class TcpSpeedResultStruct(
+ msgspec.Struct,
+ tag=int(QueryType.TCP_SPEED),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """TCP linear speed in mm/s."""
+
+ speed: float
+
+
+class IsSimulatorResultStruct(
+ msgspec.Struct,
+ tag=int(QueryType.IS_SIMULATOR),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """Simulator mode state."""
+
+ active: bool
+
+
+class TcpOffsetResultStruct(
+ msgspec.Struct,
+ tag=int(QueryType.TCP_OFFSET),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """Current TCP offset in mm (tool-local frame)."""
+
+ x: float
+ y: float
+ z: float
+
+
+# Tagged Union for responses
+Response = (
+ StatusResultStruct
+ | LoopStatsResultStruct
+ | ToolResultStruct
+ | CurrentActionResultStruct
+ | PingResultStruct
+ | AnglesResultStruct
+ | PoseResultStruct
+ | IOResultStruct
+ | SpeedsResultStruct
+ | ProfileResultStruct
+ | QueueResultStruct
+ | ToolStatusResultStruct
+ | EnablementResultStruct
+ | ErrorResultStruct
+ | TcpSpeedResultStruct
+ | IsSimulatorResultStruct
+ | TcpOffsetResultStruct
)
-START = b"\xff\xff\xff"
-END = b"\x01\x02"
-PAYLOAD_LEN = 52 # matches existing firmware expectation
-__all__ = [
- "CommandCode",
- "pack_tx_frame_into",
- "unpack_rx_frame_into",
- "encode_move_joint",
- "encode_move_pose",
- "encode_move_cartesian",
- "encode_move_cartesian_rel_trf",
- "encode_jog_joint",
- "encode_cart_jog",
- "encode_gcode",
- "encode_gcode_program_inline",
- "decode_simple",
- "decode_status",
- "split_to_3_bytes",
- "fuse_3_bytes",
- "fuse_2_bytes",
-]
+
+# Typed message classes for parsed responses
+
+
+class OkMsg(
+ msgspec.Struct,
+ tag=int(MsgType.OK),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """OK response, optionally carrying a command index for queued commands."""
+
+ index: int | None = None
+
+
+class ErrorMsg(
+ msgspec.Struct,
+ tag=int(MsgType.ERROR),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """Error response carrying a RobotError wire representation."""
+
+ message: list
+
+
+class ResponseMsg(
+ msgspec.Struct,
+ tag=int(MsgType.RESPONSE),
+ array_like=True,
+ frozen=True,
+ gc=False,
+):
+ """Query response carrying a typed result struct."""
+
+ result: Response
+
+
+# Tagged union for single-pass decode of server replies
+Message: TypeAlias = Union[OkMsg, ErrorMsg, ResponseMsg]
+_message_decoder = msgspec.msgpack.Decoder(Message)
+
+
+def decode_message(data: bytes) -> Message:
+ """Decode raw msgpack bytes into a typed Message.
+
+ Raises:
+ msgspec.ValidationError: If data doesn't match any message type.
+ """
+ return _message_decoder.decode(data)
+
+
+# =============================================================================
+# Generic msgpack encode/decode functions
+# =============================================================================
+
+
+def encode(obj: object) -> bytes:
+ """Encode any msgspec struct or Python object to bytes with numpy support."""
+ return _encoder.encode(obj)
+
+
+def decode(data: bytes) -> object:
+ """Decode msgpack bytes to a Python object."""
+ return _decoder.decode(data)
+
+
+# Pre-packed common responses (avoid repeated packing)
+OK_PACKED = _encoder.encode(OkMsg())
+
+# Cache for common error responses (3x faster for repeated errors)
+_UNKNOWN_CMD_ERROR = make_error(ErrorCode.COMM_UNKNOWN_COMMAND)
+_QUEUE_FULL_ERROR = make_error(ErrorCode.COMM_QUEUE_FULL)
+_ERROR_CACHE: dict[int, bytes] = {
+ ErrorCode.COMM_UNKNOWN_COMMAND: _encoder.encode(
+ ErrorMsg(_UNKNOWN_CMD_ERROR.to_wire())
+ ),
+ ErrorCode.COMM_QUEUE_FULL: _encoder.encode(ErrorMsg(_QUEUE_FULL_ERROR.to_wire())),
+}
+
+
+def pack_ok() -> bytes:
+ """Pack an OK response (no command index)."""
+ return OK_PACKED
+
+
+def pack_ok_index(index: int) -> bytes:
+ """Pack an OK response with a command index for queued commands."""
+ return _encoder.encode(OkMsg(index=index))
+
+
+def pack_error(error: RobotError) -> bytes:
+ """Pack an error response: [ERROR, [command_index, code, title, cause, effect, remedy]].
+
+ Common errors are cached by ErrorCode for performance.
+ """
+ cached = _ERROR_CACHE.get(error.code)
+ if cached is not None:
+ return cached
+ return _encoder.encode(ErrorMsg(error.to_wire()))
+
+
+def pack_response(result: Response) -> bytes:
+ """Pack a query response: [RESPONSE, [query_type_tag, ...fields]]."""
+ return _encoder.encode(ResponseMsg(result))
+
+
+def pack_status(
+ pose: np.ndarray,
+ angles: np.ndarray,
+ speeds: np.ndarray,
+ io: np.ndarray,
+ action_current: str,
+ action_state: ActionState,
+ joint_en: np.ndarray,
+ cart_en_wrf: np.ndarray,
+ cart_en_trf: np.ndarray,
+ executing_index: int = -1,
+ completed_index: int = -1,
+ last_checkpoint: str = "",
+ error: RobotError | None = None,
+ queued_segments: int = 0,
+ queued_duration: float = 0.0,
+ action_params: str = "",
+ tool_status: ToolStatus | None = None,
+ tcp_speed: float = 0.0,
+ simulator_active: bool = False,
+) -> bytes:
+ """Pack a status broadcast message.
+
+ Uses ormsgpack with OPT_SERIALIZE_NUMPY for ~80x fewer allocations
+ compared to msgspec with enc_hook (reads numpy buffers directly via C API).
+ """
+ ts = tool_status
+ return ormsgpack.packb(
+ (
+ MsgType.STATUS,
+ pose,
+ angles,
+ speeds,
+ io,
+ action_current,
+ action_state,
+ joint_en,
+ cart_en_wrf,
+ cart_en_trf,
+ executing_index,
+ completed_index,
+ last_checkpoint,
+ error.to_wire() if error is not None else None,
+ queued_segments,
+ queued_duration,
+ action_params,
+ (
+ ts.key,
+ ts.state,
+ ts.engaged,
+ ts.part_detected,
+ ts.fault_code,
+ ts.positions,
+ ts.channels,
+ )
+ if ts is not None
+ else None,
+ tcp_speed,
+ simulator_active,
+ ),
+ option=ormsgpack.OPT_SERIALIZE_NUMPY,
+ )
+
+
+# =============================================================================
+# Status Buffer (for zero-allocation status parsing)
+# =============================================================================
+
+
+@dataclass
+class StatusBuffer:
+ """Preallocated buffer for zero-allocation status parsing.
+
+ All numeric arrays are numpy for cache-friendly access and potential numba use.
+ Use decode_status_bin_into() to fill this buffer without allocating new objects.
+ """
+
+ pose: np.ndarray = field(default_factory=lambda: np.zeros(16, dtype=np.float64))
+ angles: np.ndarray = field(default_factory=lambda: np.zeros(6, dtype=np.float64))
+ speeds: np.ndarray = field(default_factory=lambda: np.zeros(6, dtype=np.float64))
+ io: np.ndarray = field(default_factory=lambda: np.zeros(5, dtype=np.int32))
+ tool_status: ToolStatus = field(default_factory=ToolStatus)
+ joint_en: np.ndarray = field(default_factory=lambda: np.ones(12, dtype=np.int32))
+ cart_en_wrf: np.ndarray = field(default_factory=lambda: np.ones(12, dtype=np.int32))
+ cart_en_trf: np.ndarray = field(default_factory=lambda: np.ones(12, dtype=np.int32))
+ action_current: str = ""
+ action_params: str = ""
+ action_state: ActionState = ActionState.IDLE
+ executing_index: int = -1
+ completed_index: int = -1
+ last_checkpoint: str = ""
+ error: RobotError | None = None
+ queued_segments: int = 0
+ queued_duration: float = 0.0
+ tcp_speed: float = 0.0
+ simulator_active: bool = False
+
+ def __post_init__(self) -> None:
+ self._cart_en_dict: dict[str, np.ndarray] = {
+ "WRF": self.cart_en_wrf,
+ "TRF": self.cart_en_trf,
+ }
+
+ @property
+ def cart_en(self) -> dict[str, np.ndarray]:
+ """Frame name → (12,) int32 Cartesian enable envelope."""
+ return self._cart_en_dict
+
+ def copy(self) -> "StatusBuffer":
+ """Return a deep copy with all arrays copied."""
+ ts = self.tool_status
+ return StatusBuffer(
+ pose=self.pose.copy(),
+ angles=self.angles.copy(),
+ speeds=self.speeds.copy(),
+ io=self.io.copy(),
+ tool_status=ToolStatus(
+ key=ts.key,
+ state=ts.state,
+ engaged=ts.engaged,
+ part_detected=ts.part_detected,
+ fault_code=ts.fault_code,
+ positions=ts.positions,
+ channels=ts.channels,
+ ),
+ joint_en=self.joint_en.copy(),
+ cart_en_wrf=self.cart_en_wrf.copy(),
+ cart_en_trf=self.cart_en_trf.copy(),
+ action_current=self.action_current,
+ action_params=self.action_params,
+ action_state=self.action_state,
+ executing_index=self.executing_index,
+ completed_index=self.completed_index,
+ last_checkpoint=self.last_checkpoint,
+ error=self.error,
+ queued_segments=self.queued_segments,
+ queued_duration=self.queued_duration,
+ tcp_speed=self.tcp_speed,
+ simulator_active=self.simulator_active,
+ )
+
+
+def decode_status_bin_into(data: bytes, buf: StatusBuffer) -> bool:
+ """Zero-allocation decode of STATUS message into preallocated buffer.
+
+ Message format: [MsgType.STATUS, pose, angles, speeds, io,
+ action_current, action_state, joint_en, cart_en_wrf, cart_en_trf,
+ executing_index, completed_index, last_checkpoint,
+ error, queued_segments, queued_duration, action_params,
+ tool_status_tuple, tcp_speed, simulator_active]
+
+ Args:
+ data: Raw msgpack bytes
+ buf: Preallocated StatusBuffer to fill
+
+ Returns:
+ True if valid STATUS message, False otherwise.
+ """
+ try:
+ msg = _decoder.decode(data)
+ if (
+ not isinstance(msg, (list, tuple))
+ or len(msg) < 17
+ or msg[0] != MsgType.STATUS
+ ):
+ return False
+
+ buf.pose[:] = msg[1]
+ buf.angles[:] = msg[2]
+ buf.speeds[:] = msg[3]
+ buf.io[:] = msg[4]
+ buf.action_current = msg[5]
+ buf.action_state = ActionState(msg[6])
+ buf.joint_en[:] = msg[7]
+ buf.cart_en_wrf[:] = msg[8]
+ buf.cart_en_trf[:] = msg[9]
+ buf.executing_index = msg[10]
+ buf.completed_index = msg[11]
+ buf.last_checkpoint = msg[12]
+ raw_error = msg[13]
+ buf.error = RobotError.from_wire(raw_error) if raw_error is not None else None
+ buf.queued_segments = msg[14]
+ buf.queued_duration = msg[15]
+ buf.action_params = msg[16]
+
+ raw_ts = msg[17] if len(msg) > 17 else None
+ ts = buf.tool_status
+ if (
+ raw_ts is not None
+ and isinstance(raw_ts, (list, tuple))
+ and len(raw_ts) >= 7
+ ):
+ ts.key = raw_ts[0]
+ ts.state = ToolState(raw_ts[1])
+ ts.engaged = raw_ts[2]
+ ts.part_detected = raw_ts[3]
+ ts.fault_code = raw_ts[4]
+ ts.positions = tuple(raw_ts[5]) if raw_ts[5] else ()
+ ts.channels = tuple(raw_ts[6]) if raw_ts[6] else ()
+
+ if len(msg) > 18:
+ buf.tcp_speed = float(msg[18])
+
+ if len(msg) > 19:
+ buf.simulator_active = bool(msg[19])
+
+ return True
+ except Exception as e:
+ logger.debug("decode_status_bin_into: %s", e)
+ return False
+
+
+# =============================================================================
+# Binary serial frame packing/unpacking (firmware communication)
+# =============================================================================
class CommandCode(IntEnum):
@@ -55,25 +1484,16 @@ class CommandCode(IntEnum):
DISABLE = 102
JOG = 123
MOVE = 156
+ TELEPORT = 200
IDLE = 255
-def split_bitfield(byte_val: int) -> list[int]:
- """Split an 8-bit integer into a big-endian list of bits (MSB..LSB)."""
- return [(byte_val >> i) & 1 for i in range(7, -1, -1)]
-
-
-def fuse_bitfield_2_bytearray(bits: list[int] | Sequence[int]) -> bytes:
- """
- Fuse a big-endian list of 8 bits (MSB..LSB) into a single byte.
- Any truthy value is treated as 1.
- """
- number = 0
- for b in bits[:8]:
- number = (number << 1) | (1 if b else 0)
- return bytes([number])
+START = b"\xff\xff\xff"
+END = b"\x01\x02"
+PAYLOAD_LEN = 52 # matches existing firmware expectation
+@njit(cache=True)
def split_to_3_bytes(n: int) -> tuple[int, int, int]:
"""
Convert int to signed 24-bit big-endian (two's complement) encoded bytes (b0,b1,b2).
@@ -82,6 +1502,7 @@ def split_to_3_bytes(n: int) -> tuple[int, int, int]:
return ((n24 >> 16) & 0xFF, (n24 >> 8) & 0xFF, n24 & 0xFF)
+@njit(cache=True)
def fuse_3_bytes(b0: int, b1: int, b2: int) -> int:
"""
Convert 3 bytes (big-endian) into a signed 24-bit integer.
@@ -90,6 +1511,7 @@ def fuse_3_bytes(b0: int, b1: int, b2: int) -> int:
return val - 0x1000000 if (val & 0x800000) else val
+@njit(cache=True)
def fuse_2_bytes(b0: int, b1: int) -> int:
"""
Convert 2 bytes (big-endian) into a signed 16-bit integer.
@@ -98,24 +1520,62 @@ def fuse_2_bytes(b0: int, b1: int) -> int:
return val - 0x10000 if (val & 0x8000) else val
-def _get_array_value(arr: np.ndarray | memoryview, index: int, default: int = 0) -> int:
- """
- Safely get value from array-like object with bounds checking.
- Optimized for zero-copy access when possible.
- """
- try:
- if index < len(arr):
- return int(arr[index])
- return default
- except (IndexError, TypeError):
- return default
+@njit(cache=True)
+def _pack_positions(
+ out: np.ndarray | memoryview, values: np.ndarray, offset: int
+) -> None:
+ for i in range(6):
+ v = int(values[i]) & 0xFFFFFF
+ j = offset + i * 3
+ out[j] = (v >> 16) & 0xFF
+ out[j + 1] = (v >> 8) & 0xFF
+ out[j + 2] = v & 0xFF
+
+
+@njit(cache=True)
+def _unpack_positions(data: np.ndarray | memoryview, out: np.ndarray) -> None:
+ for i in range(6):
+ j = i * 3
+ val = (int(data[j]) << 16) | (int(data[j + 1]) << 8) | int(data[j + 2])
+ if val >= 0x800000:
+ val -= 0x1000000
+ out[i] = val
+
+
+@njit(cache=True)
+def _pack_bitfield(arr: np.ndarray) -> int:
+ """Pack 8-element array into a single byte (MSB first)."""
+ return (
+ (int(arr[0] != 0) << 7)
+ | (int(arr[1] != 0) << 6)
+ | (int(arr[2] != 0) << 5)
+ | (int(arr[3] != 0) << 4)
+ | (int(arr[4] != 0) << 3)
+ | (int(arr[5] != 0) << 2)
+ | (int(arr[6] != 0) << 1)
+ | int(arr[7] != 0)
+ )
+
+@njit(cache=True)
+def _unpack_bitfield(byte_val: int, out: np.ndarray) -> None:
+ """Unpack a byte into 8 bits (MSB first) into output array."""
+ out[0] = (byte_val >> 7) & 1
+ out[1] = (byte_val >> 6) & 1
+ out[2] = (byte_val >> 5) & 1
+ out[3] = (byte_val >> 4) & 1
+ out[4] = (byte_val >> 3) & 1
+ out[5] = (byte_val >> 2) & 1
+ out[6] = (byte_val >> 1) & 1
+ out[7] = byte_val & 1
+
+@njit(cache=True)
def pack_tx_frame_into(
out: memoryview,
position_out: np.ndarray,
speed_out: np.ndarray,
- command_code: int | CommandCode,
+ command_code: int,
affected_joint_out: np.ndarray,
inout_out: np.ndarray,
timeout_out: int,
@@ -124,98 +1584,55 @@ def pack_tx_frame_into(
"""
Pack a full TX frame into the provided memoryview without allocations.
- Expects 'out' to be a writable buffer of length >= 56 bytes:
- - 3 start bytes + 1 length byte + 52-byte payload
-
- Layout of the 52-byte payload:
- - 6x position (3 bytes each) = 18
- - 6x speed (3 bytes each) = 18
- - 1 byte command
- - 1 byte affected joint bitfield
- - 1 byte in/out bitfield
- - 1 byte timeout
- - 2 bytes reserved (legacy)
- - 2 bytes gripper position
- - 2 bytes gripper speed
- - 2 bytes gripper current
- - 1 byte gripper command
- - 1 byte gripper mode
- - 1 byte gripper id
- - 1 byte CRC (placeholder 228)
- - 2 bytes end markers (0x01, 0x02)
+ Expects 'out' to be a writable buffer of length >= 56 bytes.
"""
- # Header
- out[0:3] = START
- out[3] = PAYLOAD_LEN
- offset = 4
+ # Header: 0xFF 0xFF 0xFF + payload length
+ out[0] = 0xFF
+ out[1] = 0xFF
+ out[2] = 0xFF
+ out[3] = 52
- # Positions: 6 * 3 bytes
- for i in range(6):
- val = _get_array_value(position_out, i, 0)
- b0, b1, b2 = split_to_3_bytes(val)
- out[offset] = b0
- out[offset + 1] = b1
- out[offset + 2] = b2
- offset += 3
-
- # Speeds: 6 * 3 bytes
- for i in range(6):
- val = _get_array_value(speed_out, i, 0)
- b0, b1, b2 = split_to_3_bytes(val)
- out[offset] = b0
- out[offset + 1] = b1
- out[offset + 2] = b2
- offset += 3
+ # Positions and speeds: JIT-compiled packing
+ _pack_positions(out, position_out, 4)
+ _pack_positions(out, speed_out, 22)
# Command
- out[offset] = int(command_code)
- offset += 1
-
- # Affected joints as bitfield byte
- bitfield_val = 0
- for i in range(8):
- if _get_array_value(affected_joint_out, i, 0):
- bitfield_val |= 1 << (7 - i)
- out[offset] = bitfield_val
- offset += 1
-
- # In/Out as bitfield byte
- bitfield_val = 0
- for i in range(8):
- if _get_array_value(inout_out, i, 0):
- bitfield_val |= 1 << (7 - i)
- out[offset] = bitfield_val
- offset += 1
+ out[40] = command_code
+
+ # Bitfields
+ out[41] = _pack_bitfield(affected_joint_out)
+ out[42] = _pack_bitfield(inout_out)
# Timeout
- out[offset] = int(timeout_out) & 0xFF
- offset += 1
+ out[43] = int(timeout_out) & 0xFF
# Gripper: position, speed, current as 2 bytes each (big-endian)
- for idx in range(3):
- v = _get_array_value(gripper_data_out, idx, 0) & 0xFFFF
- out[offset] = (v >> 8) & 0xFF
- out[offset + 1] = v & 0xFF
- offset += 2
+ g0 = int(gripper_data_out[0]) & 0xFFFF
+ g1 = int(gripper_data_out[1]) & 0xFFFF
+ g2 = int(gripper_data_out[2]) & 0xFFFF
+ out[44] = (g0 >> 8) & 0xFF
+ out[45] = g0 & 0xFF
+ out[46] = (g1 >> 8) & 0xFF
+ out[47] = g1 & 0xFF
+ out[48] = (g2 >> 8) & 0xFF
+ out[49] = g2 & 0xFF
# Gripper command, mode, id
- out[offset] = _get_array_value(gripper_data_out, 3, 0) & 0xFF
- out[offset + 1] = _get_array_value(gripper_data_out, 4, 0) & 0xFF
- out[offset + 2] = _get_array_value(gripper_data_out, 5, 0) & 0xFF
- offset += 3
+ out[50] = int(gripper_data_out[3]) & 0xFF
+ out[51] = int(gripper_data_out[4]) & 0xFF
+ out[52] = int(gripper_data_out[5]) & 0xFF
- # CRC (placeholder - unchanged from legacy)
- out[offset] = 228
- offset += 1
+ # CRC placeholder byte (0xE4) — fixed value, not computed
+ out[53] = 228
# End bytes
- out[offset] = 0x01
- out[offset + 1] = 0x02
+ out[54] = 0x01
+ out[55] = 0x02
+@njit(cache=True)
def unpack_rx_frame_into(
data: memoryview,
- *,
pos_out: np.ndarray,
spd_out: np.ndarray,
homed_out: np.ndarray,
@@ -233,308 +1650,144 @@ def unpack_rx_frame_into(
- timing_out: shape (1,), dtype=int32
- grip_out: shape (6,), dtype=int32 [device_id, pos, spd, cur, status, obj]
"""
- try:
- if len(data) < 52:
- logger.warning(
- f"unpack_rx_frame_into: payload too short ({len(data)} bytes)"
- )
- return False
-
- mv = memoryview(data)
-
- # Positions (0..17) and speeds (18..35), 3 bytes per value, big-endian signed 24-bit
- b = np.frombuffer(mv[:36], dtype=np.uint8).reshape(12, 3)
- pos3 = b[:6]
- spd3 = b[6:]
-
- pos = (
- (pos3[:, 0].astype(np.int32) << 16)
- | (pos3[:, 1].astype(np.int32) << 8)
- | pos3[:, 2].astype(np.int32)
- )
- spd = (
- (spd3[:, 0].astype(np.int32) << 16)
- | (spd3[:, 1].astype(np.int32) << 8)
- | spd3[:, 2].astype(np.int32)
- )
-
- # Sign-correct 24-bit to int32
- pos[pos >= (1 << 23)] -= 1 << 24
- spd[spd >= (1 << 23)] -= 1 << 24
-
- np.copyto(pos_out, pos, casting="no")
- np.copyto(spd_out, spd, casting="no")
-
- homed_byte = mv[36]
- io_byte = mv[37]
- temp_err_byte = mv[38]
- pos_err_byte = mv[39]
- timing_b0 = mv[40]
- timing_b1 = mv[41]
- # indices 42..43 exist in some variants (timeout/xtr), legacy code ignores
-
- device_id = mv[44]
- grip_pos_b0, grip_pos_b1 = mv[45], mv[46]
- grip_spd_b0, grip_spd_b1 = mv[47], mv[48]
- grip_cur_b0, grip_cur_b1 = mv[49], mv[50]
- status_byte = mv[51]
-
- # Bitfields (MSB..LSB) via LUT (no per-call Python loops)
- homed_out[:] = _BIT_UNPACK[int(homed_byte)]
- io_out[:] = _BIT_UNPACK[int(io_byte)]
- temp_out[:] = _BIT_UNPACK[int(temp_err_byte)]
- poserr_out[:] = _BIT_UNPACK[int(pos_err_byte)]
-
- # Timing (legacy semantics: fuse_3_bytes(0, b0, b1))
- timing_val = fuse_3_bytes(0, int(timing_b0), int(timing_b1))
- timing_out[0] = int(timing_val)
-
- # Gripper values
- grip_pos = fuse_2_bytes(int(grip_pos_b0), int(grip_pos_b1))
- grip_spd = fuse_2_bytes(int(grip_spd_b0), int(grip_spd_b1))
- grip_cur = fuse_2_bytes(int(grip_cur_b0), int(grip_cur_b1))
-
- sbits = _BIT_UNPACK[int(status_byte)]
- obj_detection = (int(sbits[4]) << 1) | int(sbits[5])
-
- grip_out[0] = int(device_id)
- grip_out[1] = int(grip_pos)
- grip_out[2] = int(grip_spd)
- grip_out[3] = int(grip_cur)
- grip_out[4] = int(status_byte)
- grip_out[5] = int(obj_detection)
-
- return True
- except Exception as e:
- logger.error(f"unpack_rx_frame_into: exception {e}")
+ if len(data) < 52:
return False
+ _unpack_positions(data, pos_out)
+ _unpack_positions(data[18:], spd_out)
-# =========================
-# Encoding helpers
-# =========================
+ _unpack_bitfield(int(data[36]), homed_out)
+ _unpack_bitfield(int(data[37]), io_out)
+ _unpack_bitfield(int(data[38]), temp_out)
+ _unpack_bitfield(int(data[39]), poserr_out)
+ timing_out[0] = fuse_3_bytes(0, int(data[40]), int(data[41]))
-def _opt(value: object | None, none_token: str = "NONE") -> str:
- """Format an optional value as a string, using none_token for None."""
- return none_token if value is None else str(value)
+ device_id = int(data[44])
+ grip_pos = fuse_2_bytes(int(data[45]), int(data[46]))
+ grip_spd = fuse_2_bytes(int(data[47]), int(data[48]))
+ grip_cur = fuse_2_bytes(int(data[49]), int(data[50]))
+ status_byte = int(data[51])
+ obj_detection = ((status_byte >> 3) & 1) << 1 | ((status_byte >> 2) & 1)
-def encode_move_joint(
- angles: Sequence[float],
- duration: float | None,
- speed: float | None,
-) -> str:
- """
- MOVEJOINT|j1|j2|j3|j4|j5|j6|DUR|SPD
- Use "NONE" for omitted duration/speed.
- Note: Validation (requiring one of duration/speed) is left to caller.
- """
- angles_str = "|".join(str(a) for a in angles)
- return f"MOVEJOINT|{angles_str}|{_opt(duration)}|{_opt(speed)}"
+ grip_out[0] = device_id
+ grip_out[1] = grip_pos
+ grip_out[2] = grip_spd
+ grip_out[3] = grip_cur
+ grip_out[4] = status_byte
+ grip_out[5] = obj_detection
+ return True
-def encode_move_pose(
- pose: Sequence[float],
- duration: float | None,
- speed: float | None,
-) -> str:
- """
- MOVEPOSE|x|y|z|rx|ry|rz|DUR|SPD
- Use "NONE" for omitted duration/speed.
- """
- pose_str = "|".join(str(v) for v in pose)
- return f"MOVEPOSE|{pose_str}|{_opt(duration)}|{_opt(speed)}"
+# =============================================================================
+# Module exports
+# =============================================================================
-def encode_move_cartesian(
- pose: Sequence[float],
- duration: float | None,
- speed: float | None,
-) -> str:
- """
- MOVECART|x|y|z|rx|ry|rz|DUR|SPD
- Use "NONE" for omitted duration/speed.
- """
- pose_str = "|".join(str(v) for v in pose)
- return f"MOVECART|{pose_str}|{_opt(duration)}|{_opt(speed)}"
-
-
-def encode_move_cartesian_rel_trf(
- deltas: Sequence[float], # [dx, dy, dz, rx, ry, rz] in mm/deg relative to TRF
- duration: float | None,
- speed: float | None,
- accel: int | None,
- profile: str | None,
- tracking: str | None,
-) -> str:
- """
- MOVECARTRELTRF|dx|dy|dz|rx|ry|rz|DUR|SPD|ACC|PROFILE|TRACKING
- Non-required fields should use "NONE".
- """
- delta_str = "|".join(str(v) for v in deltas)
- prof_str = (profile or "NONE").upper()
- track_str = (tracking or "NONE").upper()
- return (
- f"MOVECARTRELTRF|{delta_str}|{_opt(duration)}|{_opt(speed)}|"
- f"{_opt(accel)}|{prof_str}|{track_str}"
- )
-
-
-def encode_jog_joint(
- joint_index: int,
- speed_percentage: int,
- duration: float | None,
- distance_deg: float | None,
-) -> str:
- """
- JOG|joint_index|speed_pct|DUR|DIST
- duration and distance_deg are optional; use "NONE" if omitted.
- """
- return f"JOG|{joint_index}|{speed_percentage}|{_opt(duration)}|{_opt(distance_deg)}"
-
-
-def encode_cart_jog(
- frame: Frame,
- axis: Axis,
- speed_percentage: int,
- duration: float,
-) -> str:
- """
- CARTJOG|FRAME|AXIS|speed_pct|duration
- """
- return f"CARTJOG|{frame}|{axis}|{speed_percentage}|{duration}"
-
-
-def encode_gcode(line: str) -> str:
- """
- GCODE|
- The caller should ensure that '|' is not present in the line.
- """
- return f"GCODE|{line}"
-
-
-def encode_gcode_program_inline(lines: Sequence[str]) -> str:
- """
- GCODE_PROGRAM|INLINE|line1;line2;...
- The caller should ensure that '|' is not present inside any line.
- """
- program_str = ";".join(lines)
- return f"GCODE_PROGRAM|INLINE|{program_str}"
-
-
-# =========================
-# Decoding helpers
-# =========================
-def decode_simple(
- resp: str, expected_prefix: Literal["ANGLES", "IO", "GRIPPER", "SPEEDS", "POSE"]
-) -> list[float] | list[int] | None:
- """
- Decode simple prefixed payloads like:
- ANGLES|a0,a1,a2,a3,a4,a5
- IO|in1,in2,out1,out2,estop
- GRIPPER|id,pos,spd,cur,status,obj
- SPEEDS|s0,s1,s2,s3,s4,s5
- POSE|p0,p1,...,p15
-
- Returns list[float] or list[int] depending on the expected_prefix.
- """
- if not resp:
- logger.debug(
- f"decode_simple: Empty response for expected prefix '{expected_prefix}'"
- )
- return None
- parts = resp.strip().split("|", 1)
- if len(parts) != 2 or parts[0] != expected_prefix:
- logger.warning(
- f"decode_simple: Invalid response format. Expected '{expected_prefix}|...' but got '{resp}'"
- )
- return None
- payload = parts[1]
- tokens = [t for t in payload.split(",") if t != ""]
-
- # IO and GRIPPER are integer-based; others default to float
- if expected_prefix in ("IO", "GRIPPER"):
- try:
- return [int(t) for t in tokens]
- except ValueError as e:
- logger.error(
- f"decode_simple: Failed to parse integers for {expected_prefix}. Payload: '{payload}', Error: {e}"
- )
- return None
- else:
- try:
- return [float(t) for t in tokens]
- except ValueError as e:
- logger.error(
- f"decode_simple: Failed to parse floats for {expected_prefix}. Payload: '{payload}', Error: {e}"
- )
- return None
-
-
-def decode_status(resp: str) -> StatusAggregate | None:
- """
- Decode aggregate status:
- STATUS|POSE=p0,p1,...,p15|ANGLES=a0,...,a5|SPEEDS=s0,...,s5|IO=in1,in2,out1,out2,estop|GRIPPER=id,pos,spd,cur,status,obj|
- ACTION_CURRENT=...|ACTION_STATE=...
-
- Returns a dict matching StatusAggregate or None on parse failure.
- """
- if not resp or not resp.startswith("STATUS|"):
- return None
-
- # Split top-level sections after "STATUS|"
- sections = resp.split("|")[1:]
- result: dict[str, object] = {
- "pose": None,
- "angles": None,
- "speeds": None,
- "io": None,
- "gripper": None,
- "action_current": None,
- "action_state": None,
- "joint_en": None,
- "cart_en_wrf": None,
- "cart_en_trf": None,
- }
- for sec in sections:
- if sec.startswith("POSE="):
- vals = [float(x) for x in sec[len("POSE=") :].split(",") if x]
- result["pose"] = vals
- elif sec.startswith("ANGLES="):
- vals = [float(x) for x in sec[len("ANGLES=") :].split(",") if x]
- result["angles"] = vals
- elif sec.startswith("SPEEDS="):
- vals = [float(x) for x in sec[len("SPEEDS=") :].split(",") if x]
- result["speeds"] = vals
- elif sec.startswith("IO="):
- vals = [int(x) for x in sec[len("IO=") :].split(",") if x]
- result["io"] = vals
- elif sec.startswith("GRIPPER="):
- vals = [int(x) for x in sec[len("GRIPPER=") :].split(",") if x]
- result["gripper"] = vals
- elif sec.startswith("ACTION_CURRENT="):
- result["action_current"] = sec[len("ACTION_CURRENT=") :]
- elif sec.startswith("ACTION_STATE="):
- result["action_state"] = sec[len("ACTION_STATE=") :]
- elif sec.startswith("JOINT_EN="):
- vals = [int(x) for x in sec[len("JOINT_EN=") :].split(",") if x]
- result["joint_en"] = vals
- elif sec.startswith("CART_EN_WRF="):
- vals = [int(x) for x in sec[len("CART_EN_WRF=") :].split(",") if x]
- result["cart_en_wrf"] = vals
- elif sec.startswith("CART_EN_TRF="):
- vals = [int(x) for x in sec[len("CART_EN_TRF=") :].split(",") if x]
- result["cart_en_trf"] = vals
-
- # Basic validation: accept if at least one of the core groups is present
- if (
- result["pose"] is None
- and result["angles"] is None
- and result["io"] is None
- and result["gripper"] is None
- and result["action_current"] is None
- ):
- return None
-
- return cast(StatusAggregate, result)
+__all__ = [
+ # Enums
+ "MsgType",
+ "QueryType",
+ "CmdType",
+ "CommandCode",
+ # Command structs — motion (queued)
+ "MoveJCmd",
+ "MoveJPoseCmd",
+ "MoveLCmd",
+ "MoveCCmd",
+ "MoveSCmd",
+ "MovePCmd",
+ "HomeCmd",
+ "CheckpointCmd",
+ # Command structs — streaming (servo/jog)
+ "ServoJCmd",
+ "ServoJPoseCmd",
+ "ServoLCmd",
+ "JogJCmd",
+ "JogLCmd",
+ # Command structs — system/control
+ "ResumeCmd",
+ "HaltCmd",
+ "ResetCmd",
+ "ResetLoopStatsCmd",
+ "WriteIOCmd",
+ "ConnectHardwareCmd",
+ "SimulatorCmd",
+ "DelayCmd",
+ "TeleportCmd",
+ "SelectToolCmd",
+ "SetTcpOffsetCmd",
+ "SelectProfileCmd",
+ "ToolActionCmd",
+ # Command structs — query
+ "ToolStatusCmd",
+ "IsSimulatorCmd",
+ "ReachableCmd",
+ "ErrorCmd",
+ "TcpSpeedCmd",
+ "TcpOffsetCmd",
+ "PingCmd",
+ "StatusCmd",
+ "AnglesCmd",
+ "PoseCmd",
+ "IOCmd",
+ "JointSpeedsCmd",
+ "ToolsCmd",
+ "QueueCmd",
+ "ActivityCmd",
+ "LoopStatsCmd",
+ "ProfileCmd",
+ "Command",
+ # Mixin
+ "MotionParamsMixin",
+ # Response structs
+ "StatusResultStruct",
+ "LoopStatsResultStruct",
+ "ToolResultStruct",
+ "CurrentActionResultStruct",
+ "PingResultStruct",
+ "AnglesResultStruct",
+ "PoseResultStruct",
+ "IOResultStruct",
+ "SpeedsResultStruct",
+ "ProfileResultStruct",
+ "QueueResultStruct",
+ "ToolStatusResultStruct",
+ "EnablementResultStruct",
+ "ErrorResultStruct",
+ "TcpSpeedResultStruct",
+ "IsSimulatorResultStruct",
+ "TcpOffsetResultStruct",
+ "Response",
+ # Message types
+ "OkMsg",
+ "ErrorMsg",
+ "ResponseMsg",
+ "Message",
+ # Encode/decode
+ "decode_command",
+ "encode_command",
+ "encode_command_into",
+ "STRUCT_TO_CMDTYPE",
+ "decode_message",
+ "encode",
+ "decode",
+ "pack_ok",
+ "pack_ok_index",
+ "pack_error",
+ "pack_response",
+ "pack_status",
+ # Status buffer
+ "StatusBuffer",
+ "decode_status_bin_into",
+ # Binary frame protocol
+ "START",
+ "END",
+ "PAYLOAD_LEN",
+ "split_to_3_bytes",
+ "fuse_3_bytes",
+ "fuse_2_bytes",
+ "pack_tx_frame_into",
+ "unpack_rx_frame_into",
+]
diff --git a/parol6/robot.py b/parol6/robot.py
new file mode 100644
index 0000000..ac444f5
--- /dev/null
+++ b/parol6/robot.py
@@ -0,0 +1,887 @@
+"""Unified PAROL6 robot — lifecycle, configuration, kinematics, and factories.
+
+Inherits from ``waldoctl.Robot`` ABC.
+All parol6-specific details (subprocess management, pinokin, IK solver, etc.)
+are encapsulated here.
+"""
+
+from __future__ import annotations
+
+import asyncio
+import logging
+import os
+import re
+import socket
+import subprocess
+import sys
+import threading
+import time
+from dataclasses import dataclass
+from importlib.resources import files as pkg_files
+from pathlib import Path
+from typing import Any, Literal
+
+import numpy as np
+from numpy.typing import NDArray
+from pinokin import Robot as PinokinRobot
+from pinokin import se3_from_rpy, so3_rpy
+from collections.abc import Callable
+from waldoctl import (
+ CartesianKinodynamicLimits,
+ ChannelDescriptor,
+ DryRunClient,
+ ElectricGripperTool,
+ HomePosition,
+ IKResult,
+ JointLimits,
+ JointsSpec,
+ KinodynamicLimits,
+ LinearAngularLimits,
+ PneumaticGripperTool,
+ PositionLimits,
+ Robot as _RobotABC,
+ ToolSpec,
+ ToolsSpec,
+ ToolStatus,
+ ToolType,
+)
+
+from parol6.client.async_client import AsyncRobotClient
+from parol6.client.dry_run_client import DryRunRobotClient
+from parol6.client.sync_client import RobotClient as SyncRobotClient
+from parol6.config import HOME_ANGLES_DEG, LIMITS
+from parol6.motion.trajectory import ProfileType
+from parol6.protocol.wire import CmdType, MsgType, decode, encode
+from parol6.tools import (
+ ElectricGripperConfig,
+ PneumaticGripperConfig,
+ get_registry,
+)
+from parol6.utils.ik import check_limits, solve_ik
+
+logger = logging.getLogger(__name__)
+
+# Precompiled regex for server log normalization
+_SIMPLE_FORMAT_RE = re.compile(
+ r"^\s*(\d{2}:\d{2}:\d{2})\s+(DEBUG|INFO|WARNING|ERROR|CRITICAL|TRACE)\s+([A-Za-z0-9_.-]+):\s+(.*)$"
+)
+
+
+# ===========================================================================
+# Server lifecycle (private)
+# ===========================================================================
+
+
+def _is_server_running(
+ host: str = "127.0.0.1",
+ port: int = 5001,
+ timeout: float = 1.0,
+) -> bool:
+ """Return True if a PAROL6 controller responds to UDP PING at host:port."""
+ try:
+ with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
+ sock.settimeout(timeout)
+ ping_msg = encode((CmdType.PING,))
+ sock.sendto(ping_msg, (host, port))
+ data, _ = sock.recvfrom(1024)
+ resp = decode(data)
+ return (
+ isinstance(resp, (list, tuple))
+ and len(resp) >= 1
+ and resp[0] == MsgType.RESPONSE
+ )
+ except (OSError, socket.timeout):
+ return False
+
+
+class _ServerManager:
+ """Manages the lifecycle of the PAROL6 controller subprocess."""
+
+ def __init__(self, normalize_logs: bool = False) -> None:
+ self._proc: subprocess.Popen | None = None
+ self._reader_thread: threading.Thread | None = None
+ self._stop_reader = threading.Event()
+ self.normalize_logs = normalize_logs
+
+ @property
+ def pid(self) -> int | None:
+ return self._proc.pid if self._proc and self._proc.poll() is None else None
+
+ def is_running(self) -> bool:
+ return self._proc is not None and self._proc.poll() is None
+
+ def start_controller(
+ self,
+ com_port: str | None = None,
+ no_autohome: bool = True,
+ extra_env: dict | None = None,
+ server_host: str | None = None,
+ server_port: int | None = None,
+ ) -> None:
+ """Start the controller if not already running."""
+ if self.is_running():
+ return
+
+ # repo root: parol6/robot.py -> parents[1]
+ cwd = Path(__file__).resolve().parents[1]
+
+ env = os.environ.copy()
+ if no_autohome:
+ env["PAROL6_NOAUTOHOME"] = "1"
+ if extra_env:
+ env.update(extra_env)
+ if server_host:
+ env["PAROL6_CONTROLLER_IP"] = server_host
+ if server_port is not None:
+ env["PAROL6_CONTROLLER_PORT"] = str(server_port)
+
+ existing_py_path = env.get("PYTHONPATH", "")
+ env["PYTHONPATH"] = (
+ f"{cwd}{os.pathsep}{existing_py_path}" if existing_py_path else str(cwd)
+ )
+
+ args = [sys.executable, "-u", "-m", "parol6.server.cli"]
+
+ root_logger = logging.getLogger()
+ root_level = root_logger.level
+
+ parol_trace_flag = str(env.get("PAROL_TRACE", "0")).strip().lower()
+ if parol_trace_flag in ("1", "true", "yes", "on"):
+ level_name = "TRACE"
+ else:
+ level_name = logging.getLevelName(root_level)
+ if isinstance(level_name, str) and level_name.upper().startswith("LEVEL"):
+ if root_level == 5:
+ level_name = "TRACE"
+ else:
+ level_name = "INFO"
+
+ args.append(f"--log-level={level_name}")
+ if com_port:
+ args.append(f"--serial={com_port}")
+
+ try:
+ self._proc = subprocess.Popen(
+ args,
+ cwd=str(cwd),
+ env=env,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ text=True,
+ bufsize=1,
+ )
+ except Exception as e:
+ raise RuntimeError(f"Failed to start controller: {e}") from e
+
+ if self._proc and self._proc.stdout is not None:
+ self._stop_reader.clear()
+ self._reader_thread = threading.Thread(
+ target=self._stream_output,
+ args=(self._proc,),
+ name="ServerOutputReader",
+ daemon=True,
+ )
+ self._reader_thread.start()
+
+ def _stream_output(self, proc: subprocess.Popen) -> None:
+ """Read controller stdout and forward to logging."""
+ try:
+ assert proc.stdout is not None
+ last_logger = "parol6.server"
+
+ for raw_line in iter(proc.stdout.readline, ""):
+ if self._stop_reader.is_set():
+ break
+ line = raw_line.rstrip("\r\n")
+ if not line:
+ continue
+
+ if self.normalize_logs:
+ level = logging.INFO
+ logger_name: str | None = None
+ msg = line
+
+ s = _SIMPLE_FORMAT_RE.match(line)
+ if s:
+ _, level_str, logger_name, actual_message = s.groups()
+ logger_name = (logger_name or "").strip()
+ msg = actual_message
+ level = getattr(
+ logging, (level_str or "INFO").upper(), logging.INFO
+ )
+ elif line.startswith("Traceback"):
+ level = logging.ERROR
+
+ target_logger_name = logger_name or last_logger or "parol6.server"
+ target_logger = logging.getLogger(target_logger_name)
+ target_logger.log(level, msg)
+
+ if logger_name:
+ last_logger = logger_name
+ else:
+ print(line)
+ except Exception as e:
+ logging.warning("_ServerManager: output reader stopped: %s", e)
+
+ def stop_controller(self, timeout: float = 2.0) -> None:
+ """Stop the controller process if running."""
+ self._stop_reader.set()
+ if self._reader_thread and self._reader_thread.is_alive():
+ self._reader_thread.join(timeout=timeout)
+ self._reader_thread = None
+ if self._proc and self._proc.poll() is None:
+ logging.debug("Stopping Controller...")
+ try:
+ self._proc.terminate()
+ self._proc.wait(timeout=timeout)
+ except Exception as e:
+ logging.warning("stop_controller: terminate/wait failed: %s", e)
+
+ if self._proc and self._proc.poll() is None:
+ logging.warning(
+ "Controller did not exit after SIGTERM within %.1fs, sending SIGKILL",
+ timeout,
+ )
+ try:
+ self._proc.kill()
+ self._proc.wait(timeout=timeout)
+ except Exception as e:
+ logging.warning("stop_controller: kill/wait failed: %s", e)
+ self._proc = None
+
+ def await_ready(
+ self,
+ host: str = "127.0.0.1",
+ port: int = 5001,
+ timeout: float = 10.0,
+ poll_interval: float = 0.2,
+ ) -> bool:
+ """Block until the controller responds to PING over UDP."""
+ deadline = time.monotonic() + max(0.0, float(timeout))
+ while time.monotonic() < deadline:
+ if _is_server_running(host, port, timeout=min(0.5, poll_interval)):
+ return True
+ remain = deadline - time.monotonic()
+ if remain <= 0:
+ return False
+ time.sleep(min(poll_interval, remain))
+ return False
+
+
+# ===========================================================================
+# Concrete tool implementations (inherit waldoctl ABCs)
+# ===========================================================================
+
+
+class _ToolBase:
+ """Dispatch infrastructure for concrete tool implementations.
+
+ Provides ``_execute`` callback binding and ``_cmd()`` dispatch.
+ The ``_execute`` callback is set by ``create_async_client()`` via
+ shallow copy to bind the tool to a client's ``tool_action`` method.
+ ``_get_status`` is bound to the client's private ``_tool_status`` query.
+ """
+
+ _execute: Callable[..., Any] | None = None
+ _get_status: Callable[..., Any] | None = None
+ key: str # provided by ToolSpec (mixed in by concrete subclasses)
+
+ async def _cmd(
+ self, action: str, params: list[Any] | None = None, **kwargs: object
+ ) -> int:
+ if self._execute is None:
+ raise RuntimeError("Tool not bound to a client. Access via client.tool.")
+ return await self._execute(self.key, action, params or [], **kwargs)
+
+ async def status(self) -> ToolStatus:
+ if self._get_status is None:
+ raise RuntimeError("Tool not bound to a client. Access via client.tool.")
+ return await self._get_status()
+
+
+class _ToolImpl(_ToolBase, ToolSpec):
+ """Concrete ToolSpec for passive/no-action tools."""
+
+ def __init__(self, *, tool_type: ToolType = ToolType.NONE, **kwargs: Any) -> None:
+ super().__init__(tool_type=tool_type, **kwargs)
+
+
+class _PneumaticGripperImpl(_ToolBase, PneumaticGripperTool):
+ """Concrete PneumaticGripperTool for PAROL6."""
+
+ def __init__(self, *, io_port: int = 1, **kwargs: Any) -> None:
+ super().__init__(io_port=io_port, **kwargs)
+
+ async def set_position(self, position: float, **kwargs: float | int) -> int:
+ """Binary position: < 0.5 opens, >= 0.5 closes."""
+ if position < 0.5:
+ return await self.open(**kwargs)
+ return await self.close(**kwargs)
+
+ async def open(self, **kwargs: float | int) -> int:
+ return await self._cmd("open", params=None, **kwargs)
+
+ async def close(self, **kwargs: float | int) -> int:
+ return await self._cmd("close", params=None, **kwargs)
+
+
+class _ElectricGripperImpl(_ToolBase, ElectricGripperTool):
+ """Concrete ElectricGripperTool for PAROL6."""
+
+ def __init__(
+ self,
+ *,
+ position_range: tuple[float, float] = (0.0, 1.0),
+ speed_range: tuple[float, float] = (0.0, 1.0),
+ current_range: tuple[int, int],
+ **kwargs: Any,
+ ) -> None:
+ kwargs.setdefault("action_r_labels", ("Calibrate", "Calibrate"))
+ kwargs.setdefault("action_r_icons", ("build", "build"))
+ super().__init__(
+ position_range=position_range,
+ speed_range=speed_range,
+ current_range=current_range,
+ **kwargs,
+ )
+
+ async def set_position(self, position: float, **kwargs: float | int) -> int:
+ speed = float(kwargs.get("speed", 0.5))
+ current = int(kwargs.get("current", self.current_range[0]))
+ return await self._cmd("move", [position, speed, current])
+
+ async def calibrate(self, **kwargs: object) -> int:
+ return await self._cmd("calibrate")
+
+ async def action_r(self, engaged: bool) -> None:
+ await self.calibrate()
+
+ async def open(self, **kwargs: float | int) -> int:
+ return await self.set_position(0.0, **kwargs)
+
+ async def close(self, **kwargs: float | int) -> int:
+ return await self.set_position(1.0, **kwargs)
+
+ @property
+ def adjust_step(self) -> int:
+ """Default current step: ~10% of range, rounded to nearest 10 mA."""
+ lo, hi = self.current_range
+ return max(10, round((hi - lo) / 10 / 10) * 10)
+
+ @property
+ def adjust_labels(self) -> tuple[str, str]:
+ return ("Less current", "More current")
+
+ @property
+ def adjust_icons(self) -> tuple[str, str]:
+ return ("remove", "add")
+
+ @property
+ def channel_descriptors(self) -> tuple[ChannelDescriptor, ...]:
+ return (
+ ChannelDescriptor(
+ name="Current", unit="mA", max=float(self.current_range[1])
+ ),
+ )
+
+
+class _ToolsCollection(ToolsSpec):
+ """Concrete ToolsSpec for PAROL6."""
+
+ def __init__(self, tools: tuple[ToolSpec, ...]) -> None:
+ self._tools = tools
+ self._by_key = {t.key: t for t in tools}
+
+ @property
+ def available(self) -> tuple[ToolSpec, ...]:
+ return self._tools
+
+ @property
+ def default(self) -> ToolSpec:
+ return self._by_key.get("NONE", self._tools[0])
+
+ def __getitem__(self, key: str) -> ToolSpec:
+ return self._by_key[key]
+
+ def __contains__(self, item: object) -> bool:
+ if isinstance(item, str):
+ return item in self._by_key
+ if isinstance(item, ToolType):
+ return any(t.tool_type == item for t in self._tools)
+ return False
+
+ def by_type(self, tool_type: ToolType) -> tuple[ToolSpec, ...]:
+ return tuple(t for t in self._tools if t.tool_type == tool_type)
+
+
+# ===========================================================================
+# Helper builders
+# ===========================================================================
+
+
+def _build_joints() -> JointsSpec:
+ """Build JointsSpec from parol6 LIMITS and HOME_ANGLES_DEG."""
+ home_deg = np.array(HOME_ANGLES_DEG, dtype=np.float64)
+ return JointsSpec(
+ count=6,
+ names=("Base", "Shoulder", "Elbow", "Wrist 1", "Wrist 2", "Wrist 3"),
+ limits=JointLimits(
+ position=PositionLimits(
+ deg=LIMITS.joint.position.deg,
+ rad=LIMITS.joint.position.rad,
+ ),
+ hard=KinodynamicLimits(
+ velocity=LIMITS.joint.hard.velocity,
+ acceleration=LIMITS.joint.hard.acceleration,
+ jerk=LIMITS.joint.hard.jerk,
+ ),
+ jog=KinodynamicLimits(
+ velocity=LIMITS.joint.jog.velocity,
+ acceleration=LIMITS.joint.jog.acceleration,
+ jerk=LIMITS.joint.jog.jerk,
+ ),
+ ),
+ home=HomePosition(
+ deg=home_deg,
+ rad=np.deg2rad(home_deg),
+ ),
+ )
+
+
+def _decompose_transform(
+ T: NDArray[np.float64],
+) -> tuple[tuple[float, float, float], tuple[float, float, float]]:
+ """Extract (origin_m, rpy_rad) from a 4x4 homogeneous transform."""
+ origin = (float(T[0, 3]), float(T[1, 3]), float(T[2, 3]))
+ rpy_buf = np.zeros(3, dtype=np.float64)
+ so3_rpy(T[:3, :3], rpy_buf)
+ rpy = (float(rpy_buf[0]), float(rpy_buf[1]), float(rpy_buf[2]))
+ return origin, rpy
+
+
+def _build_tools() -> _ToolsCollection:
+ """Build typed tool specs from the parol6 tool registry."""
+ tools: list[ToolSpec] = []
+ for key, cfg in get_registry().items():
+ origin, rpy = _decompose_transform(cfg.transform)
+ common = dict(
+ key=key,
+ display_name=cfg.name,
+ description=cfg.description,
+ tcp_origin=origin,
+ tcp_rpy=rpy,
+ meshes=cfg.meshes,
+ motions=cfg.motions,
+ variants=cfg.variants,
+ )
+
+ if isinstance(cfg, PneumaticGripperConfig):
+ tools.append(_PneumaticGripperImpl(**common, io_port=cfg.io_port))
+ elif isinstance(cfg, ElectricGripperConfig):
+ tools.append(
+ _ElectricGripperImpl(
+ **common,
+ position_range=cfg.position_range,
+ speed_range=cfg.speed_range,
+ current_range=cfg.current_range,
+ )
+ )
+ else:
+ tools.append(_ToolImpl(**common, tool_type=ToolType.NONE))
+
+ return _ToolsCollection(tuple(tools))
+
+
+def _resolve_urdf_path() -> str:
+ urdf_res = pkg_files("parol6") / "urdf_model" / "urdf" / "PAROL6.urdf"
+ return str(Path(str(urdf_res)).resolve())
+
+
+def _resolve_mesh_dir() -> str:
+ urdf = Path(_resolve_urdf_path())
+ return str(urdf.parent.parent)
+
+
+# ===========================================================================
+# IK result type (parol6-native, structurally satisfies the Protocol)
+# ===========================================================================
+
+
+@dataclass
+class Parol6IKResult:
+ """IK result — structurally compatible with the web commander's IKResult Protocol."""
+
+ q: NDArray[np.float64] # radians
+ success: bool
+ violations: str | None = None
+ iterations: int = 0
+ residual: float = 0.0
+
+
+# ===========================================================================
+# Robot class
+# ===========================================================================
+
+
+class Robot(_RobotABC):
+ """Unified PAROL6 robot — inherits from waldoctl.Robot ABC.
+
+ Combines identity, configuration, FK/IK kinematics, controller lifecycle,
+ and client factories. Supports both sync and async context managers::
+
+ # Sync
+ with Robot() as robot:
+ client = robot.create_sync_client()
+
+ # Async
+ async with Robot() as robot:
+ client = robot.create_async_client()
+ """
+
+ def __init__(
+ self,
+ *,
+ host: str = "127.0.0.1",
+ port: int = 5001,
+ timeout: float = 10.0,
+ normalize_logs: bool = False,
+ ) -> None:
+ self._host = host
+ self._port = port
+ self._timeout = timeout
+ self._manager = _ServerManager(normalize_logs=normalize_logs)
+
+ # Build configuration eagerly
+ self._joints = _build_joints()
+ self._tools = _build_tools()
+ self._urdf_path = _resolve_urdf_path()
+ self._mesh_dir = _resolve_mesh_dir()
+ self._motion_profiles = tuple(p.value.upper() for p in ProfileType)
+
+ cj = LIMITS.cart.jog
+ self._cartesian_limits = CartesianKinodynamicLimits(
+ velocity=LinearAngularLimits(
+ linear=cj.velocity.linear,
+ angular=cj.velocity.angular,
+ ),
+ acceleration=LinearAngularLimits(
+ linear=cj.acceleration.linear,
+ angular=cj.acceleration.angular,
+ ),
+ )
+
+ # Initialize pinokin for FK/IK
+ self._pinokin = PinokinRobot(self._urdf_path)
+
+ # Pre-allocated buffers for FK/IK
+ self._q_buf = np.zeros(self._pinokin.nq, dtype=np.float64)
+ self._T_buf = np.asfortranarray(np.zeros((4, 4), dtype=np.float64))
+ self._rpy_buf = np.zeros(3, dtype=np.float64)
+ self._T_target_buf = np.zeros((4, 4), dtype=np.float64)
+
+ # -- Identity -----------------------------------------------------------
+
+ @property
+ def name(self) -> str:
+ return "PAROL6"
+
+ # -- Structured sub-objects ---------------------------------------------
+
+ @property
+ def joints(self) -> JointsSpec:
+ return self._joints
+
+ @property
+ def tools(self) -> _ToolsCollection:
+ return self._tools
+
+ @property
+ def cartesian_limits(self) -> CartesianKinodynamicLimits:
+ return self._cartesian_limits
+
+ # -- Unit preferences ---------------------------------------------------
+
+ @property
+ def position_unit(self) -> Literal["mm", "m"]:
+ return "mm"
+
+ # -- Capability flags ---------------------------------------------------
+
+ @property
+ def has_force_torque(self) -> bool:
+ return False
+
+ @property
+ def has_freedrive(self) -> bool:
+ return False
+
+ @property
+ def digital_outputs(self) -> int:
+ return 2
+
+ @property
+ def digital_inputs(self) -> int:
+ return 2
+
+ # -- Visualization ------------------------------------------------------
+
+ @property
+ def urdf_path(self) -> str:
+ return self._urdf_path
+
+ @property
+ def mesh_dir(self) -> str:
+ return self._mesh_dir
+
+ @property
+ def joint_index_mapping(self) -> tuple[int, ...]:
+ return (0, 1, 2, 3, 4, 5)
+
+ # -- Motion configuration -----------------------------------------------
+
+ @property
+ def motion_profiles(self) -> tuple[str, ...]:
+ return self._motion_profiles
+
+ @property
+ def cartesian_frames(self) -> tuple[str, ...]:
+ return ("WRF", "TRF")
+
+ # -- Backend injection --------------------------------------------------
+
+ @property
+ def backend_package(self) -> str:
+ return "parol6"
+
+ @property
+ def sync_client_class(self) -> type:
+ return SyncRobotClient
+
+ @property
+ def async_client_class(self) -> type:
+ return AsyncRobotClient
+
+ # -- Kinematics ---------------------------------------------------------
+
+ def _load_q_buf(self, q_rad: NDArray[np.float64]) -> None:
+ """Copy joint radians into the padded pinokin q buffer."""
+ n = min(len(q_rad), self._pinokin.nq)
+ self._q_buf[:n] = q_rad[:n]
+ self._q_buf[n:] = 0.0
+
+ def set_active_tool(
+ self,
+ tool_key: str,
+ tcp_offset_m: tuple[float, float, float] | None = None,
+ variant_key: str | None = None,
+ ) -> None:
+ """Apply tool transform to the local FK/IK model.
+
+ When set, ``fk()`` returns TCP position instead of flange position.
+
+ *tcp_offset_m*: optional (x, y, z) user offset in meters, composed
+ on top of the tool's registered transform.
+ *variant_key*: optional variant whose TCP overrides the tool default.
+ """
+ from parol6.tools import get_tool_transform
+
+ T_tool = get_tool_transform(tool_key, variant_key=variant_key)
+
+ if tcp_offset_m is not None and any(v != 0 for v in tcp_offset_m):
+ T_offset = np.eye(4)
+ T_offset[0, 3] = tcp_offset_m[0]
+ T_offset[1, 3] = tcp_offset_m[1]
+ T_offset[2, 3] = tcp_offset_m[2]
+ T_tool = T_tool @ T_offset
+
+ if tool_key != "NONE" and not np.allclose(T_tool, np.eye(4)):
+ self._pinokin.set_tool_transform(T_tool)
+ else:
+ self._pinokin.clear_tool_transform()
+
+ def fk(
+ self, q_rad: NDArray[np.float64], out: NDArray[np.float64]
+ ) -> NDArray[np.float64]:
+ self._load_q_buf(q_rad)
+ self._pinokin.fkine_into(self._q_buf, self._T_buf)
+ so3_rpy(self._T_buf[:3, :3], self._rpy_buf)
+ out[0] = self._T_buf[0, 3]
+ out[1] = self._T_buf[1, 3]
+ out[2] = self._T_buf[2, 3]
+ out[3] = self._rpy_buf[0]
+ out[4] = self._rpy_buf[1]
+ out[5] = self._rpy_buf[2]
+ return out
+
+ def ik(
+ self, pose: NDArray[np.float64], q_seed_rad: NDArray[np.float64]
+ ) -> Parol6IKResult:
+ se3_from_rpy(
+ pose[0],
+ pose[1],
+ pose[2],
+ pose[3],
+ pose[4],
+ pose[5],
+ self._T_target_buf,
+ )
+ result = solve_ik(
+ robot=self._pinokin,
+ target_pose=self._T_target_buf,
+ current_q=q_seed_rad,
+ quiet_logging=True,
+ )
+ return Parol6IKResult(
+ q=result.q.copy(),
+ success=result.success,
+ violations=result.violations,
+ iterations=result.iterations,
+ residual=result.residual,
+ )
+
+ def check_limits(self, q_rad: NDArray[np.float64]) -> bool:
+ return check_limits(q_rad, log=False)
+
+ def fk_batch(self, joint_path_rad: NDArray[np.float64]) -> NDArray[np.float64]:
+ transforms = self._pinokin.batch_fk(joint_path_rad)
+ n = len(transforms)
+ result = np.empty((n, 6), dtype=np.float64)
+ rpy = self._rpy_buf
+ for i, T in enumerate(transforms):
+ result[i, 0] = T[0, 3]
+ result[i, 1] = T[1, 3]
+ result[i, 2] = T[2, 3]
+ so3_rpy(T[:3, :3], rpy)
+ result[i, 3] = rpy[0]
+ result[i, 4] = rpy[1]
+ result[i, 5] = rpy[2]
+ return result
+
+ def ik_batch(
+ self,
+ poses: NDArray[np.float64],
+ q_start_rad: NDArray[np.float64],
+ ) -> list[IKResult]:
+ results: list[IKResult] = []
+ q_current = q_start_rad.copy()
+ for i in range(poses.shape[0]):
+ p = poses[i]
+ se3_from_rpy(p[0], p[1], p[2], p[3], p[4], p[5], self._T_target_buf)
+ result = solve_ik(
+ robot=self._pinokin,
+ target_pose=self._T_target_buf,
+ current_q=q_current,
+ quiet_logging=True,
+ )
+ ik_result = Parol6IKResult(
+ q=result.q.copy(),
+ success=result.success,
+ violations=result.violations,
+ iterations=result.iterations,
+ residual=result.residual,
+ )
+ results.append(ik_result)
+ if result.success:
+ q_current[:] = result.q
+ return results
+
+ # -- Lifecycle ----------------------------------------------------------
+
+ def start(self, **kwargs: Any) -> None:
+ """Start the controller subprocess and block until ready.
+
+ Keyword args override constructor defaults:
+ host, port, timeout, com_port, extra_env
+ """
+ host: str = kwargs.get("host", self._host)
+ port: int = kwargs.get("port", self._port)
+ timeout: float = kwargs.get("timeout", self._timeout)
+ com_port: str | None = kwargs.get("com_port")
+ extra_env: dict[str, str] | None = kwargs.get("extra_env")
+
+ if _is_server_running(host, port):
+ raise RuntimeError(f"Server already running at {host}:{port}")
+
+ self._manager.start_controller(
+ com_port=com_port,
+ server_host=host,
+ server_port=port,
+ extra_env=extra_env,
+ )
+
+ if not self._manager.await_ready(host=host, port=port, timeout=timeout):
+ self._manager.stop_controller()
+ raise RuntimeError("Controller failed to become ready")
+
+ def stop(self) -> None:
+ """Stop the controller subprocess."""
+ self._manager.stop_controller()
+
+ def is_available(self, **kwargs: Any) -> bool:
+ """Check if a controller is reachable via UDP PING."""
+ host: str = kwargs.get("host", self._host)
+ port: int = kwargs.get("port", self._port)
+ return _is_server_running(host=host, port=port)
+
+ # -- Context managers ---------------------------------------------------
+
+ def __enter__(self) -> Robot:
+ self.start()
+ return self
+
+ def __exit__(self, *exc: object) -> None:
+ self.stop()
+
+ async def __aenter__(self) -> Robot:
+ await asyncio.to_thread(self.start)
+ return self
+
+ async def __aexit__(self, *exc: object) -> None:
+ self.stop()
+
+ # -- Factories ----------------------------------------------------------
+
+ def create_async_client(self, **kwargs: Any) -> AsyncRobotClient:
+ import copy
+
+ host: str = kwargs.get("host", self._host)
+ port: int = kwargs.get("port", self._port)
+ timeout: float = kwargs.get("timeout", 5.0)
+ client = AsyncRobotClient(host=host, port=port, timeout=timeout)
+ bound: dict[str, ToolSpec] = {}
+ for spec in self.tools.available:
+ bound_spec = copy.copy(spec)
+ bound_spec._execute = client.tool_action # type: ignore[attr-defined, ty:unresolved-attribute]
+ bound_spec._get_status = client._tool_status # type: ignore[attr-defined, ty:unresolved-attribute]
+ bound[spec.key] = bound_spec
+ client._bound_tools = bound
+ return client
+
+ def create_sync_client(self, **kwargs: Any) -> SyncRobotClient:
+ import copy
+
+ from parol6.client.sync_client import _run
+ from waldoctl.sync_tools import make_sync_tool
+
+ host: str = kwargs.get("host", self._host)
+ port: int = kwargs.get("port", self._port)
+ timeout: float = kwargs.get("timeout", 5.0)
+ client = SyncRobotClient(host=host, port=port, timeout=timeout)
+ # Bind async tools to the inner async client
+ async_bound: dict[str, ToolSpec] = {}
+ for spec in self.tools.available:
+ bound_spec = copy.copy(spec)
+ bound_spec._execute = client._inner.tool_action # type: ignore[attr-defined, ty:unresolved-attribute]
+ bound_spec._get_status = client._inner._tool_status # type: ignore[attr-defined, ty:unresolved-attribute]
+ async_bound[spec.key] = bound_spec
+ client._inner._bound_tools = async_bound
+ # Wrap async-bound tools in sync adapters
+ bound: dict[str, ToolSpec] = {}
+ for key, async_tool in async_bound.items():
+ bound[key] = make_sync_tool(async_tool, _run)
+ client._bound_tools = bound
+ return client
+
+ def create_dry_run_client(self, **kwargs: Any) -> DryRunClient | None:
+ initial_joints_deg: list[float] | None = kwargs.get("initial_joints_deg")
+ return DryRunRobotClient(initial_joints_deg=initial_joints_deg) # ty: ignore[invalid-return-type]
diff --git a/parol6/server/__init__.py b/parol6/server/__init__.py
index 8b1b3a8..0c40a2f 100644
--- a/parol6/server/__init__.py
+++ b/parol6/server/__init__.py
@@ -1 +1,45 @@
"""Server management modules."""
+
+import ctypes
+import multiprocessing
+import signal
+import sys
+
+# Use spawn method on all platforms to avoid fork issues with multi-threaded processes.
+# This must be done before any multiprocessing is used. On Windows/macOS this is already
+# the default, but on Linux it defaults to fork which causes warnings/deadlocks.
+if multiprocessing.get_start_method(allow_none=True) is None:
+ multiprocessing.set_start_method("spawn")
+
+
+def set_pdeathsig() -> None:
+ """Arrange for SIGTERM when the parent process exits.
+
+ Linux: instant kernel-level notification via prctl(PR_SET_PDEATHSIG).
+ macOS: daemon thread polls os.getppid() every second.
+ Windows: no-op (OS cleans up named shared memory when handles close).
+ """
+ import os
+
+ if sys.platform == "linux":
+ PR_SET_PDEATHSIG = 1
+ ctypes.CDLL("libc.so.6").prctl(PR_SET_PDEATHSIG, signal.SIGTERM)
+ return
+
+ if sys.platform == "win32":
+ return
+
+ # macOS: poll parent PID (changes from original to 1 when parent exits)
+ import threading
+ import time
+
+ parent_pid = os.getppid()
+
+ def _watch_parent() -> None:
+ while True:
+ time.sleep(1.0)
+ if os.getppid() != parent_pid:
+ break
+ os.kill(os.getpid(), signal.SIGTERM)
+
+ threading.Thread(target=_watch_parent, daemon=True).start()
diff --git a/parol6/server/async_logging.py b/parol6/server/async_logging.py
new file mode 100644
index 0000000..4f62358
--- /dev/null
+++ b/parol6/server/async_logging.py
@@ -0,0 +1,91 @@
+"""Async logging using QueueHandler to move I/O off the main thread."""
+
+import logging
+import queue
+from logging.handlers import QueueHandler, QueueListener
+
+
+class AsyncLogHandler:
+ """Non-blocking logging using QueueHandler + QueueListener.
+
+ Replaces synchronous logging with queue-based async logging.
+ Log records are queued immediately (non-blocking) and processed
+ by a background thread, preventing logging I/O from introducing
+ jitter in timing-critical loops.
+ """
+
+ def __init__(self, logger_name: str = "parol6.server.controller"):
+ """Initialize the async log handler.
+
+ Args:
+ logger_name: Name of the logger to wrap with async handling.
+ """
+ self._queue: queue.Queue[logging.LogRecord] = queue.Queue(-1)
+ self._listener: QueueListener | None = None
+ self._logger = logging.getLogger(logger_name)
+ self._original_handlers: list[logging.Handler] = []
+ self._started = False
+
+ def start(self) -> None:
+ """Replace logger handlers with queue-based async handler.
+
+ Call once at startup before entering the main loop.
+ """
+ if self._started:
+ return
+
+ # Get the root logger's handlers if this logger has none
+ if self._logger.handlers:
+ target_handlers = self._logger.handlers[:]
+ else:
+ # Walk up to find handlers (usually on root logger)
+ target_handlers = []
+ current: logging.Logger | None = self._logger
+ while current is not None:
+ if current.handlers:
+ target_handlers = current.handlers[:]
+ break
+ if not current.propagate:
+ break
+ current = current.parent
+
+ if not target_handlers:
+ # No handlers found, nothing to wrap
+ return
+
+ # Store original handlers
+ self._original_handlers = target_handlers
+
+ # Create QueueHandler
+ queue_handler = QueueHandler(self._queue)
+
+ # For the controller logger specifically, replace its handlers
+ # and stop propagation so we control all output
+ self._logger.handlers = [queue_handler]
+ self._logger.propagate = False
+
+ # Start listener thread to process queued records
+ self._listener = QueueListener(
+ self._queue, *self._original_handlers, respect_handler_level=True
+ )
+ self._listener.start()
+ self._started = True
+
+ def stop(self) -> None:
+ """Stop listener and restore original handlers.
+
+ Call at shutdown to ensure all queued messages are flushed.
+ """
+ if not self._started:
+ return
+
+ if self._listener:
+ self._listener.stop()
+ self._listener = None
+
+ # Restore propagation and clear our handler
+ self._logger.handlers = []
+ self._logger.propagate = True
+
+ self._original_handlers = []
+ self._started = False
diff --git a/parol6/server/cli.py b/parol6/server/cli.py
new file mode 100644
index 0000000..e979003
--- /dev/null
+++ b/parol6/server/cli.py
@@ -0,0 +1,145 @@
+"""Command-line interface for the PAROL6 controller."""
+
+import argparse
+import logging
+import os
+import signal
+
+import parol6.config as cfg
+from parol6.config import TRACE
+from parol6.server.controller import Controller, ControllerConfig
+
+logger = logging.getLogger("parol6.server.cli")
+
+
+def main() -> int:
+ """Main entry point for the controller."""
+ # Parse arguments first to get logging level
+ parser = argparse.ArgumentParser(description="PAROL6 Robot Controller")
+ parser.add_argument("--host", default="0.0.0.0", help="UDP host address")
+ parser.add_argument("--port", type=int, default=5001, help="UDP port")
+ parser.add_argument("--serial", help="Serial port (e.g., /dev/ttyUSB0 or COM3)")
+ parser.add_argument("--baudrate", type=int, default=3000000, help="Serial baudrate")
+
+ # Verbose logging options
+ parser.add_argument(
+ "-v",
+ "--verbose",
+ action="count",
+ default=0,
+ help="Increase verbosity; -v=INFO, -vv=DEBUG, -vvv=TRACE",
+ )
+ parser.add_argument(
+ "-q",
+ "--quiet",
+ action="store_true",
+ help="Enable quiet logging (WARNING level)",
+ )
+ parser.add_argument(
+ "--log-level",
+ choices=["TRACE", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
+ help="Set specific log level",
+ )
+ args = parser.parse_args()
+
+ # Determine log level
+ # Precedence:
+ # 1) Explicit --log-level
+ # 2) Verbose / quiet flags
+ # 3) Environment-driven TRACE (PAROL_TRACE=1 via TRACE_ENABLED)
+ # 4) Default INFO
+ if args.log_level:
+ if args.log_level == "TRACE":
+ log_level = TRACE
+ cfg.TRACE_ENABLED = True
+ else:
+ log_level = getattr(logging, args.log_level)
+ elif args.verbose >= 3:
+ log_level = TRACE
+ cfg.TRACE_ENABLED = True
+ elif args.verbose >= 2:
+ log_level = logging.DEBUG
+ elif args.verbose == 1:
+ log_level = logging.INFO
+ elif args.quiet:
+ log_level = logging.WARNING
+ elif cfg.TRACE_ENABLED:
+ # Enable TRACE when PAROL_TRACE=1 and no CLI override is given
+ log_level = TRACE
+ else:
+ log_level = logging.INFO
+
+ logging.basicConfig(
+ level=log_level,
+ format="%(asctime)s %(levelname)s %(name)s: %(message)s",
+ datefmt="%H:%M:%S",
+ )
+
+ # At INFO and above, push noisy third-party libraries up to WARNING so a
+ # clean startup stays quiet. DEBUG/TRACE users still see everything.
+ quiet_level = (
+ max(log_level, logging.WARNING) if log_level >= logging.INFO else log_level
+ )
+ for name in ("toppra", "numba", "numba.core", "numba.cuda"):
+ logging.getLogger(name).setLevel(quiet_level)
+
+ # Pre-compile numba JIT functions to avoid mid-loop compilation stalls
+ from parol6.utils.warmup import warmup_jit
+
+ warmup_jit()
+
+ # Create configuration (env vars may override defaults)
+ env_host = os.getenv("PAROL6_CONTROLLER_IP")
+ env_port = os.getenv("PAROL6_CONTROLLER_PORT")
+ udp_host = env_host.strip() if env_host else args.host
+ try:
+ udp_port = int(env_port) if env_port else args.port
+ except (TypeError, ValueError):
+ udp_port = args.port
+
+ logger.debug(f"Controller bind: host={udp_host} port={udp_port}")
+
+ config = ControllerConfig(
+ udp_host=udp_host,
+ udp_port=udp_port,
+ serial_port=args.serial,
+ serial_baudrate=args.baudrate,
+ )
+
+ # Create and run controller
+ controller = None
+
+ def handle_sigterm(signum, frame):
+ """Handle SIGTERM signal for graceful shutdown."""
+ logger.info("Received SIGTERM, shutting down...")
+ if controller:
+ controller.stop()
+ raise SystemExit(0)
+
+ # Install signal handler for graceful shutdown on SIGTERM
+ signal.signal(signal.SIGTERM, handle_sigterm)
+
+ try:
+ controller = Controller(config)
+
+ if controller.is_initialized():
+ try:
+ controller.start()
+ except KeyboardInterrupt:
+ # Controller's "Controller stopped" line is the one summary;
+ # no need to announce the interrupt itself.
+ pass
+ finally:
+ controller.stop()
+ else:
+ logger.error("Controller not properly initialized")
+ return 1
+ except RuntimeError as e:
+ logger.error(f"Failed to create controller: {e}")
+ return 1
+
+ return 0
+
+
+if __name__ == "__main__":
+ raise SystemExit(main())
diff --git a/parol6/server/command_executor.py b/parol6/server/command_executor.py
new file mode 100644
index 0000000..bea8899
--- /dev/null
+++ b/parol6/server/command_executor.py
@@ -0,0 +1,383 @@
+"""Command queue management and execution."""
+
+import logging
+import re
+import time
+from collections import deque
+from dataclasses import dataclass, field
+from typing import TYPE_CHECKING
+
+from parol6.commands.base import (
+ CommandBase,
+ ExecutionStatusCode,
+ MotionCommand,
+)
+from parol6.config import MAX_COMMAND_QUEUE_SIZE, TRACE
+from parol6.protocol.wire import Command, decode_command
+from waldoctl import ActionState
+
+if TYPE_CHECKING:
+ from parol6.server.state import ControllerState, StateManager
+
+logger = logging.getLogger("parol6.server.command_executor")
+
+_LONG_FLOAT_RE = re.compile(r"-?\d+\.\d{3,}")
+_MAX_ACTION_PARAMS_LEN = 100
+
+
+def _format_cmd_params(p: object) -> str:
+ """Format wire command params for action log display.
+
+ Strips the class name prefix and rounds long floats to 2 decimal places.
+ """
+ s = repr(p)
+ i = s.find("(")
+ if i >= 0:
+ s = s[i:]
+ return _LONG_FLOAT_RE.sub(lambda m: f"{float(m.group()):.2f}", s)[
+ :_MAX_ACTION_PARAMS_LEN
+ ]
+
+
+class QueueFullError(Exception):
+ """Raised when the command queue is at capacity."""
+
+
+@dataclass
+class QueuedCommand:
+ """Represents a command in the queue with metadata."""
+
+ command: CommandBase
+ command_id: str | None = None
+ command_index: int = -1
+ address: tuple[str, int] | None = None
+ queued_time: float = field(default_factory=time.time)
+ activated: bool = False
+ first_tick_logged: bool = False
+
+
+class CommandExecutor:
+ """Manages the motion command queue and 100Hz execution loop.
+
+ Responsibilities:
+ - Queue management (queue_command, clear_queue, clear_streamable_commands)
+ - 100Hz tick loop (execute_active_command)
+ - Streaming fast-path (try_stream_fast_path)
+ - Cancel operations (cancel_active_command, cancel_active_streamable)
+
+ Immediate commands (system, query) are handled directly by the controller.
+ """
+
+ def __init__(self, state_manager: "StateManager"):
+ self._state_manager = state_manager
+
+ self.command_queue: deque[QueuedCommand] = deque(maxlen=MAX_COMMAND_QUEUE_SIZE)
+ self.active_command: QueuedCommand | None = None
+
+ def _update_queue_state(self, state: "ControllerState") -> None:
+ """Update queue snapshot and next action in state."""
+ # Reuse list to avoid allocation (clear + extend pattern)
+ state.queue_nonstreamable.clear()
+ for qc in self.command_queue:
+ if not (isinstance(qc.command, MotionCommand) and qc.command.streamable):
+ state.queue_nonstreamable.append(type(qc.command).__name__)
+ state.action_next = (
+ state.queue_nonstreamable[0] if state.queue_nonstreamable else ""
+ )
+
+ # ---- Streaming fast-path ----
+
+ def try_stream_fast_path(
+ self,
+ data: bytes,
+ state: "ControllerState",
+ ) -> bool | Command:
+ """Attempt stream fast-path for active streamable command.
+
+ When in stream mode with an active streamable command, this allows
+ updating the command's parameters without full command creation/queueing.
+
+ Returns:
+ True if command was handled via fast-path.
+ False if no decode was attempted (no active streamable, or decode failed).
+ A Command struct if decoded successfully but type didn't match active command.
+ """
+ if not self.active_command:
+ return False
+
+ active_inst = self.active_command.command
+ if not (isinstance(active_inst, MotionCommand) and active_inst.streamable):
+ return False
+
+ # Decode incoming command
+ try:
+ cmd_struct = decode_command(data)
+ except Exception as e:
+ logger.debug("Stream fast-path decode failed: %s", e)
+ return False
+
+ # Check if struct type matches active command's expected type
+ active_params_type = getattr(active_inst, "PARAMS_TYPE", None)
+ if active_params_type is None or type(cmd_struct) is not active_params_type:
+ return cmd_struct # Return decoded struct for caller to reuse
+
+ logger.log(
+ TRACE,
+ "stream_fast_path active=%s incoming=%s",
+ type(active_inst).__name__,
+ type(cmd_struct).__name__,
+ )
+
+ # Assign new params (validation already done by decode)
+ active_inst.assign_params(cmd_struct)
+
+ # Re-setup with new params
+ try:
+ active_inst.setup(state)
+ logger.log(TRACE, "stream_fast_path applied")
+ return True
+ except Exception as e:
+ logger.error("Stream fast-path setup failed: %s", e)
+ return False
+
+ # ---- Command queueing ----
+
+ def queue_command(
+ self,
+ address: tuple[str, int] | None,
+ command: CommandBase,
+ command_id: str | None = None,
+ ) -> int:
+ """Add a command to the execution queue.
+
+ Args:
+ address: Optional (ip, port) for acknowledgments.
+ command: The command to queue.
+ command_id: Optional ID for tracking.
+
+ Returns:
+ The assigned command index.
+
+ Raises:
+ QueueFullError: If the queue is at capacity.
+ """
+ if len(self.command_queue) >= MAX_COMMAND_QUEUE_SIZE:
+ logger.warning("Command queue full (max %d)", MAX_COMMAND_QUEUE_SIZE)
+ raise QueueFullError("Queue full")
+
+ # Assign monotonic command index
+ state = self._state_manager.get_state()
+ cmd_index = state.next_command_index
+ state.next_command_index += 1
+
+ # Create queued command
+ queued_cmd = QueuedCommand(
+ command=command,
+ command_id=command_id,
+ command_index=cmd_index,
+ address=address,
+ )
+
+ self.command_queue.append(queued_cmd)
+
+ # Update queue snapshot
+ self._update_queue_state(state)
+
+ logger.log(
+ TRACE,
+ "Queued command: %s (ID: %s, index: %d)",
+ type(command).__name__,
+ command_id,
+ cmd_index,
+ )
+
+ return cmd_index
+
+ # ---- Active command execution (called every control loop tick) ----
+
+ def execute_active_command(self) -> None:
+ """Execute one step of the active command from the queue."""
+ if not self._activate_next():
+ return
+
+ ac = self.active_command
+ assert ac is not None # _activate_next guarantees this
+
+ try:
+ state = self._state_manager.get_state()
+
+ if not state.enabled:
+ self.cancel_active_command("Controller disabled")
+ return
+
+ # One-time setup on first activation
+ if not ac.activated:
+ self._setup_active(ac, state)
+ state.action_current = type(ac.command).__name__
+ state.action_params = _format_cmd_params(ac.command.p)
+ state.action_state = ActionState.EXECUTING
+ state.executing_command_index = ac.command_index
+ ac.activated = True
+ logger.log(
+ TRACE,
+ "Activated command: %s (id=%s, index=%d)",
+ type(ac.command).__name__,
+ ac.command_id,
+ ac.command_index,
+ )
+
+ # Execute one tick
+ if not ac.first_tick_logged:
+ logger.log(TRACE, "tick_start name=%s", type(ac.command).__name__)
+ ac.first_tick_logged = True
+
+ code = ac.command.tick(state)
+ self._process_tick_result(ac, code, state)
+
+ except Exception as e:
+ logger.error("Command execution error: %s", e)
+ state.action_current = ""
+ state.action_params = ""
+ state.action_state = ActionState.IDLE
+ self._update_queue_state(state)
+ self.active_command = None
+
+ def _activate_next(self) -> bool:
+ """Promote next queued command to active.
+
+ Returns True if there is an active command (existing or newly promoted).
+ """
+ if self.active_command is not None:
+ return True
+ if not self.command_queue:
+ return False
+ self.active_command = self.command_queue.popleft()
+ self.active_command.activated = False
+ return True
+
+ def _setup_active(self, ac: QueuedCommand, state: "ControllerState") -> None:
+ """Run one-time setup for a command."""
+ ac.command.setup(state)
+
+ def _process_tick_result(
+ self,
+ ac: QueuedCommand,
+ code: ExecutionStatusCode,
+ state: "ControllerState",
+ ) -> None:
+ """Handle post-tick bookkeeping: completion, failure."""
+ if code == ExecutionStatusCode.COMPLETED:
+ logger.log(
+ TRACE,
+ "Command completed: %s (id=%s, index=%d) at t=%f",
+ type(ac.command).__name__,
+ ac.command_id,
+ ac.command_index,
+ time.time(),
+ )
+
+ state.action_current = ""
+ state.action_params = ""
+ state.action_state = ActionState.IDLE
+ state.completed_command_index = ac.command_index
+ self._update_queue_state(state)
+ self.active_command = None
+
+ elif code == ExecutionStatusCode.FAILED:
+ logger.debug(
+ "Command failed: %s (id=%s) - %s at t=%.6f",
+ type(ac.command).__name__,
+ ac.command_id,
+ ac.command.robot_error,
+ time.time(),
+ )
+
+ state.action_current = ""
+ state.action_params = ""
+ state.action_state = ActionState.IDLE
+
+ # Clear queued streamable commands on failure to prevent pileup
+ if isinstance(ac.command, MotionCommand) and ac.command.streamable:
+ removed = self.clear_streamable_commands(
+ f"Active streamable command failed: {ac.command.robot_error}"
+ )
+ if removed > 0:
+ logger.info(
+ "Cleared %d queued streamable commands due to active command failure",
+ removed,
+ )
+
+ self._update_queue_state(state)
+ self.active_command = None
+
+ # ---- Cancellation and queue management ----
+
+ def cancel_active_command(self, reason: str = "Cancelled by user") -> None:
+ """Cancel the currently active command."""
+ if not self.active_command:
+ return
+
+ logger.info(
+ "Cancelling active command: %s - %s",
+ type(self.active_command.command).__name__,
+ reason,
+ )
+
+ state = self._state_manager.get_state()
+ state.action_current = ""
+ state.action_params = ""
+ state.action_state = ActionState.IDLE
+
+ self.active_command = None
+
+ def cancel_active_streamable(self) -> bool:
+ """Cancel active command if it's a streamable motion command.
+
+ Returns:
+ True if a command was cancelled.
+ """
+ ac = self.active_command
+ if ac and isinstance(ac.command, MotionCommand) and ac.command.streamable:
+ state = self._state_manager.get_state()
+ state.action_current = ""
+ state.action_params = ""
+ state.action_state = ActionState.IDLE
+ self.active_command = None
+ return True
+ return False
+
+ def clear_queue(self, reason: str = "Queue cleared") -> None:
+ """Clear all queued commands."""
+ count = len(self.command_queue)
+ self.command_queue.clear()
+
+ logger.info("Cleared %d commands from queue: %s", count, reason)
+
+ state = self._state_manager.get_state()
+ state.queue_nonstreamable.clear()
+ state.action_next = ""
+
+ def clear_streamable_commands(
+ self, reason: str = "Streamable commands cleared"
+ ) -> int:
+ """Clear all queued streamable motion commands."""
+ removed_count = 0
+ to_remove: list[QueuedCommand] = []
+
+ for queued_cmd in self.command_queue:
+ if (
+ isinstance(queued_cmd.command, MotionCommand)
+ and queued_cmd.command.streamable
+ ):
+ to_remove.append(queued_cmd)
+
+ for queued_cmd in to_remove:
+ self.command_queue.remove(queued_cmd)
+ removed_count += 1
+
+ if removed_count > 0:
+ logger.debug(
+ "Cleared %d streamable commands from queue: %s", removed_count, reason
+ )
+
+ return removed_count
diff --git a/parol6/server/command_registry.py b/parol6/server/command_registry.py
index 7641a11..fe40116 100644
--- a/parol6/server/command_registry.py
+++ b/parol6/server/command_registry.py
@@ -10,12 +10,29 @@
import logging
import pkgutil
-import time
from collections.abc import Callable
+from enum import Enum
from importlib import import_module
+from typing import TypeVar
-from parol6.commands.base import CommandBase
+import msgspec
+
+from parol6.commands.base import CommandBase, QueryCommand, SystemCommand
from parol6.config import TRACE
+from parol6.protocol.wire import (
+ CmdType,
+ Command,
+ decode_command,
+)
+
+
+class CommandCategory(Enum):
+ """Category of command, determining execution semantics."""
+
+ SYSTEM = "system" # Execute immediately, controller sends OK/error
+ QUERY = "query" # Execute immediately, controller sends query response
+ MOTION = "motion" # Queue for execution in control loop
+
logger = logging.getLogger(__name__)
@@ -27,12 +44,16 @@ class CommandRegistry:
Commands register themselves using the @register_command decorator.
The registry supports auto-discovery of decorated commands and
provides a centralized lookup mechanism.
+
+ Supports single-pass decode via msgspec typed structs.
"""
_instance: CommandRegistry | None = None
- _commands: dict[str, type[CommandBase]] = {}
- _class_to_name: dict[type[CommandBase], str] = {}
- _discovered: bool = False
+ _commands: dict[CmdType, type[CommandBase]]
+ _class_to_type: dict[type[CommandBase], CmdType]
+ _struct_to_command: dict[type[Command], type[CommandBase]]
+ _class_to_category: dict[type[CommandBase], CommandCategory]
+ _discovered: bool
def __new__(cls) -> CommandRegistry:
"""Ensure singleton pattern."""
@@ -44,40 +65,64 @@ def __init__(self):
"""Initialize the registry (only runs once due to singleton)."""
if not hasattr(self, "_initialized"):
self._commands = {}
- self._class_to_name = {}
+ self._class_to_type = {}
+ self._struct_to_command = {}
+ self._class_to_category = {}
self._discovered = False
self._initialized = True
- def register(self, name: str, command_class: type[CommandBase]) -> None:
+ @staticmethod
+ def _determine_category(command_class: type[CommandBase]) -> CommandCategory:
+ """Determine the category of a command class based on its inheritance."""
+ if issubclass(command_class, SystemCommand):
+ return CommandCategory.SYSTEM
+ elif issubclass(command_class, QueryCommand):
+ return CommandCategory.QUERY
+ else:
+ return CommandCategory.MOTION
+
+ def register(self, cmd_type: CmdType, command_class: type[CommandBase]) -> None:
"""
- Register a command class with the given name.
+ Register a command class with the given CmdType.
Args:
- name: The command name/identifier
+ cmd_type: The command type enum value
command_class: The command class to register
Raises:
- ValueError: If a command with the same name is already registered
+ ValueError: If a command with the same type is already registered
"""
- if name in self._commands:
- existing = self._commands[name]
+ if cmd_type in self._commands:
+ existing = self._commands[cmd_type]
if existing != command_class:
raise ValueError(
- f"Command '{name}' is already registered with class {existing.__name__}. "
+ f"Command {cmd_type.name} is already registered with class {existing.__name__}. "
f"Cannot register with {command_class.__name__}"
)
else:
- self._commands[name] = command_class
- # Maintain reverse mapping for fast class -> name lookup
- self._class_to_name[command_class] = name
- logger.debug(f"Registered command '{name}' -> {command_class.__name__}")
+ self._commands[cmd_type] = command_class
+ # Maintain reverse mapping for fast class -> type lookup
+ self._class_to_type[command_class] = cmd_type
+
+ # Determine and store category at registration time
+ category = self._determine_category(command_class)
+ self._class_to_category[command_class] = category
+
+ # Also register by struct type if PARAMS_TYPE is set
+ params_type = getattr(command_class, "PARAMS_TYPE", None)
+ if params_type is not None:
+ self._struct_to_command[params_type] = command_class
- def get_command_class(self, name: str) -> type[CommandBase] | None:
+ logger.debug(
+ f"Registered command {cmd_type.name} -> {command_class.__name__} ({category.value})"
+ )
+
+ def get_command_class(self, cmd_type: CmdType) -> type[CommandBase] | None:
"""
- Retrieve a command class by name.
+ Retrieve a command class by CmdType.
Args:
- name: The command name to look up
+ cmd_type: The command type to look up
Returns:
The command class if found, None otherwise
@@ -86,31 +131,31 @@ def get_command_class(self, name: str) -> type[CommandBase] | None:
if not self._discovered:
self.discover_commands()
- return self._commands.get(name)
+ return self._commands.get(cmd_type)
- def get_name_for_class(self, cls: type[CommandBase]) -> str | None:
+ def get_type_for_class(self, cls: type[CommandBase]) -> CmdType | None:
"""
- Retrieve the registered command name for a given command class.
+ Retrieve the registered CmdType for a given command class.
Returns None if the class is not registered.
"""
# Ensure commands are discovered
if not self._discovered:
self.discover_commands()
# Prefer explicit reverse map; fall back to class attribute set by decorator
- return self._class_to_name.get(cls) or getattr(cls, "_registered_name", None)
+ return self._class_to_type.get(cls) or getattr(cls, "_cmd_type", None)
- def list_registered_commands(self) -> list[str]:
+ def list_registered_commands(self) -> list[CmdType]:
"""
- Return a list of all registered command names.
+ Return a list of all registered command types.
Returns:
- List of command names (sorted)
+ List of CmdType values (sorted by int value)
"""
# Ensure commands are discovered
if not self._discovered:
self.discover_commands()
- return sorted(self._commands.keys())
+ return sorted(self._commands.keys(), key=lambda x: int(x))
def discover_commands(self) -> None:
"""
@@ -122,7 +167,7 @@ def discover_commands(self) -> None:
if self._discovered:
return
- logger.info("Discovering commands...")
+ logger.debug("Discovering commands...")
# Import parol6.commands package
try:
@@ -133,7 +178,7 @@ def discover_commands(self) -> None:
# Iterate through all modules in the commands package
package_path = commands_package.__path__
- for importer, modname, ispkg in pkgutil.iter_modules(package_path):
+ for _, modname, ispkg in pkgutil.iter_modules(package_path):
if ispkg:
continue # Skip subpackages
@@ -153,75 +198,80 @@ def discover_commands(self) -> None:
logger.error(f"Error loading {full_module_name}: {e}")
self._discovered = True
- logger.info(
+ logger.debug(
f"Command discovery complete. {len(self._commands)} commands registered."
)
- def create_command_from_parts(
- self, parts: list[str]
- ) -> tuple[CommandBase | None, str | None]:
+ def get_command_for_struct(
+ self, struct_type: type[Command]
+ ) -> type[CommandBase] | None:
+ """Return the command class registered for a given struct type.
+
+ Unlike create_command_from_struct(), this does not instantiate the
+ command or assign parameters — it just returns the class.
+ """
+ if not self._discovered:
+ self.discover_commands()
+ return self._struct_to_command.get(struct_type)
+
+ def get_category(self, command_class: type[CommandBase]) -> CommandCategory:
+ """Return the category for a registered command class."""
+ if not self._discovered:
+ self.discover_commands()
+ return self._class_to_category.get(command_class, CommandCategory.MOTION)
+
+ def create_command(
+ self, data: bytes
+ ) -> tuple[CommandBase | None, CommandCategory | None, str | None]:
"""
- Create a command instance from pre-split message parts.
+ Create a command instance from raw bytes via single-pass msgspec decode.
Args:
- parts: Pre-split message parts
+ data: Raw msgpack-encoded command bytes
Returns:
- A tuple of (command, error_message):
- - (command, None) if successful
- - (None, None) if command name not registered
- - (None, error_message) if command is recognized but has invalid parameters
+ A tuple of (command, category, error_message):
+ - (command, category, None) if successful
+ - (None, None, error_message) if decode fails or validation fails
"""
- # Ensure commands are discovered
- if not self._discovered:
- self.discover_commands()
+ try:
+ cmd_struct = decode_command(data)
+ except msgspec.ValidationError as e:
+ logger.log(TRACE, "decode_error err=%s", e)
+ return None, None, str(e)
+ except Exception as e:
+ logger.log(TRACE, "decode_error exc=%s", e)
+ return None, None, f"Decode error: {e}"
- if not parts:
- logger.debug("Empty message parts")
- return None, None
+ return self.create_command_from_struct(cmd_struct)
- command_name = parts[0].upper()
- start_t = time.perf_counter()
- logger.log(TRACE, "match_start name=%s parts=%d", command_name, len(parts))
+ def create_command_from_struct(
+ self, cmd_struct: Command
+ ) -> tuple[CommandBase | None, CommandCategory | None, str | None]:
+ """
+ Create a command instance from a pre-validated Command struct.
- # Direct O(1) lookup of command class
- command_class = self._commands.get(command_name)
+ Args:
+ cmd_struct: Pre-validated Command struct (e.g., MovePoseCmd, HomeCmd)
- if command_class is None:
- logger.log(TRACE, "match_unknown name=%s", command_name)
- logger.debug(f"No command registered for: {command_name}")
- return None, None
+ Returns:
+ A tuple of (command, category, error_message):
+ - (command, category, None) if successful
+ - (None, None, error_message) if no handler found
+ """
+ if not self._discovered:
+ self.discover_commands()
- try:
- # Create instance and let it parse parameters
- command = command_class()
- can_handle, error = command.match(parts) # Pass pre-split parts
-
- if can_handle:
- dur_ms = (time.perf_counter() - start_t) * 1000.0
- logger.log(TRACE, "match_ok name=%s dur_ms=%.2f", command_name, dur_ms)
- return command, None
- elif error:
- dur_ms = (time.perf_counter() - start_t) * 1000.0
- logger.log(
- TRACE,
- "match_error name=%s dur_ms=%.2f err=%s",
- command_name,
- dur_ms,
- error,
- )
- logger.warning(f"Command '{command_name}' rejected: {error}")
- return None, error
+ struct_type = type(cmd_struct)
+ command_class = self._struct_to_command.get(struct_type)
+ if command_class is None:
+ logger.log(TRACE, "no_handler struct=%s", struct_type.__name__)
+ return None, None, f"No handler for {struct_type.__name__}"
- except Exception as e:
- dur_ms = (time.perf_counter() - start_t) * 1000.0
- logger.log(
- TRACE, "match_error name=%s dur_ms=%.2f exc=%s", command_name, dur_ms, e
- )
- logger.error(f"Error creating command '{command_name}': {e}")
- return None, str(e)
+ command = command_class(cmd_struct)
+ category = self._class_to_category.get(command_class, CommandCategory.MOTION)
- return None, "Command validation failed"
+ return command, category, None
def clear(self) -> None:
"""
@@ -230,6 +280,9 @@ def clear(self) -> None:
This is mainly useful for testing.
"""
self._commands.clear()
+ self._class_to_type.clear()
+ self._struct_to_command.clear()
+ self._class_to_category.clear()
self._discovered = False
logger.debug("Command registry cleared")
@@ -238,32 +291,37 @@ def clear(self) -> None:
_registry = CommandRegistry()
-def register_command(name: str) -> Callable[[type[CommandBase]], type[CommandBase]]:
+_CmdT = TypeVar("_CmdT", bound=CommandBase)
+
+
+def register_command(
+ cmd_type: CmdType,
+) -> Callable[[type[_CmdT]], type[_CmdT]]:
"""
Decorator to register a command class.
Usage:
- @register_command("MoveJ")
- class MoveJointCommand(CommandBase):
+ @register_command(CmdType.MOVEJOINT)
+ class MoveJCommand(CommandBase):
...
Args:
- name: The command name/identifier
+ cmd_type: The command type enum value
Returns:
Decorator function that registers the class
"""
- def decorator(cls: type[CommandBase]) -> type[CommandBase]:
+ def decorator(cls: type[_CmdT]) -> type[_CmdT]:
# Verify it's a CommandBase subclass
if not issubclass(cls, CommandBase):
raise TypeError(f"Class {cls.__name__} must inherit from CommandBase")
# Register with the global registry
- _registry.register(name, cls)
+ _registry.register(cmd_type, cls)
- # Add the command name as a class attribute for reference
- cls._registered_name = name
+ # Add the command type as a class attribute for reference
+ cls._cmd_type = cmd_type
return cls
@@ -275,5 +333,7 @@ def decorator(cls: type[CommandBase]) -> type[CommandBase]:
list_registered_commands = _registry.list_registered_commands
discover_commands = _registry.discover_commands
clear_registry = _registry.clear
-create_command_from_parts = _registry.create_command_from_parts
-get_name_for_class = _registry.get_name_for_class
+create_command = _registry.create_command
+create_command_from_struct = _registry.create_command_from_struct
+get_type_for_class = _registry.get_type_for_class
+get_command_for_struct = _registry.get_command_for_struct
diff --git a/parol6/server/controller.py b/parol6/server/controller.py
index 33b54e1..899b75e 100644
--- a/parol6/server/controller.py
+++ b/parol6/server/controller.py
@@ -1,77 +1,79 @@
"""
Main controller for PAROL6 robot server.
+
+Runs the fixed-rate control loop, dispatches UDP commands to the command
+executor, and manages serial/simulator transport and status broadcasting.
"""
-import argparse
import logging
-import os
-import socket
+import signal
+import sys
import threading
import time
-from collections import deque
-from dataclasses import dataclass, field
+from dataclasses import dataclass
from typing import Any
-import numpy as np
from parol6.ack_policy import AckPolicy
from parol6.commands.base import (
CommandBase,
- CommandContext,
- ExecutionStatus,
ExecutionStatusCode,
MotionCommand,
QueryCommand,
SystemCommand,
)
-from parol6.gcode import GcodeInterpreter
-from parol6.protocol.wire import CommandCode, unpack_rx_frame_into
-from parol6.server.command_registry import create_command_from_parts, discover_commands
+from parol6.commands.system_commands import SelectProfileCommand
+from parol6.commands.utility_commands import ResetCommand
+from parol6.server.command_executor import CommandExecutor, QueueFullError
+from parol6.server.motion_planner import MotionPlanner, PlanCommand
+from parol6.server.segment_player import SegmentPlayer
+from parol6.protocol.wire import (
+ CommandCode,
+ ToolActionCmd,
+ pack_error,
+ pack_ok,
+ pack_ok_index,
+ unpack_rx_frame_into,
+)
+from parol6.utils.error_catalog import RobotError, extract_robot_error, make_error
+from parol6.utils.error_codes import ErrorCode
+from parol6.server.command_registry import (
+ CommandCategory,
+ create_command,
+ create_command_from_struct,
+ discover_commands,
+)
from parol6.server.state import ControllerState, StateManager
+from waldoctl import ActionState
from parol6.server.status_broadcast import StatusBroadcaster
-from parol6.server.status_cache import get_cache
-from parol6.server.transports import create_and_connect_transport, is_simulation_mode
+from parol6.server.async_logging import AsyncLogHandler
+from parol6.server.loop_timer import (
+ EventRateMetrics,
+ GCTracker,
+ LoopTimer,
+ PhaseTimer,
+ format_hz_summary,
+)
+from parol6.server.status_cache import close_cache, get_cache
+from parol6.server.transport_manager import TransportManager
from parol6.server.transports.mock_serial_transport import MockSerialTransport
-from parol6.server.transports.serial_transport import SerialTransport
from parol6.server.transports.udp_transport import UDPTransport
-import parol6.config as cfg
from parol6.config import (
TRACE,
INTERVAL_S,
+ MAX_POLL_COUNT,
MCAST_GROUP,
MCAST_PORT,
MCAST_IF,
MCAST_TTL,
STATUS_RATE_HZ,
STATUS_STALE_S,
- get_com_port_with_fallback,
+ STATUS_BROADCAST_INTERVAL,
)
-logger = logging.getLogger("parol6.server.controller")
-
+import psutil
-@dataclass
-class ExecutionContext:
- """Context passed to commands during execution."""
-
- udp_transport: UDPTransport | None
- serial_transport: SerialTransport | MockSerialTransport | None
- gcode_interpreter: GcodeInterpreter | None
- addr: tuple[str, int] | None
- state: ControllerState
-
-
-@dataclass
-class QueuedCommand:
- """Represents a command in the queue with metadata."""
-
- command: CommandBase
- command_id: str | None = None
- address: tuple[str, int] | None = None
- queued_time: float = field(default_factory=time.time)
- activated: bool = False
- initialized: bool = False
- first_tick_logged: bool = False
+logger = logging.getLogger("parol6.server.controller")
@dataclass
@@ -84,7 +86,6 @@ class ControllerConfig:
serial_baudrate: int = 3000000
loop_interval: float = INTERVAL_S
estop_recovery_delay: float = 1.0
- auto_home: bool = False
class Controller:
@@ -113,79 +114,53 @@ def __init__(self, config: ControllerConfig):
# Core components
self.state_manager = StateManager()
self.udp_transport: UDPTransport | None = None
- self.serial_transport: SerialTransport | MockSerialTransport | None = None
- # ACK management
- self.ack_socket: socket.socket | None = None
- try:
- self.ack_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- except Exception as e:
- logger.error(f"Failed to create ACK socket: {e}")
+ # E-stop state tracking (start as released to avoid false positive on first check)
+ self.estop_active: bool = False
- # Command queue and tracking (merged from CommandExecutor)
- self.command_queue: deque[QueuedCommand] = deque(maxlen=100)
- self.active_command: QueuedCommand | None = None
-
- # Command tracking
- self.current_command: CommandBase | None = None
- self.command_id_map: dict[str, Any] = {}
+ # Status multicast broadcaster
+ self._status_broadcaster: Any | None = None
- # E-stop recovery
- self.estop_active: bool | None = (
- None # None = unknown, True = active, False = released
+ # Helper classes
+ self._timer = LoopTimer(self.config.loop_interval)
+ self._phase_timer = PhaseTimer(
+ [
+ "read", # _read_from_firmware
+ "poll_cmd", # _poll_commands
+ "status", # _status_broadcaster.tick
+ "estop", # _handle_estop
+ "execute", # _execute_commands
+ "write", # _write_to_firmware
+ "sim", # tick_simulation
+ ]
+ )
+ self._cmd_rate = EventRateMetrics() # Command reception rate
+ self._gc_tracker = GCTracker() # GC frequency and duration tracking
+ self._ack_policy = AckPolicy()
+ self._async_log = AsyncLogHandler()
+ self._transport_mgr = TransportManager(
+ shutdown_event=self.shutdown_event,
+ serial_port=self.config.serial_port,
+ serial_baudrate=self.config.serial_baudrate,
+ )
+ self._executor = CommandExecutor(
+ state_manager=self.state_manager,
)
- self.first_frame_received = False # Track if we've received data from robot
- self._serial_last_version = 0 # Version of last decoded serial frame
-
- # TX coalescing state (reduce redundant serial writes)
- self._last_tx: dict[str, Any] | None = None
- self._tx_keepalive_s = float(os.getenv("PAROL6_TX_KEEPALIVE_S", "0.2"))
-
- # Thread for command processing
- self.command_thread: threading.Thread | None = None
- # GCODE interpreter
- self.gcode_interpreter = GcodeInterpreter()
+ # Motion pipeline: planner subprocess computes trajectories,
+ # segment player consumes them in the control loop
+ self._planner = MotionPlanner()
+ self._segment_player = SegmentPlayer(self._planner)
- # Status services (updater + multicast broadcaster)
- self._status_updater: Any | None = None
- self._status_broadcaster: Any | None = None
+ # Tool action side channel — runs concurrently with both streaming
+ # and trajectory execution (writes to gripper_hw, not Position_out)
+ self._tool_cmd: CommandBase | None = None
+ self._tool_cmd_activated: bool = False
+ self._tool_cmd_index: int = -1
# Initialize components on construction
self._initialize_components()
- def _send_ack(
- self, cmd_id: str, status: str, details: str, addr: tuple[str, int]
- ) -> None:
- """
- Send an acknowledgment message.
-
- Args:
- cmd_id: Command ID to acknowledge
- status: Status (QUEUED, EXECUTING, COMPLETED, FAILED, CANCELLED)
- details: Optional details message
- addr: Address tuple (host, port) to send to
- """
- if not cmd_id or not self.ack_socket:
- return
-
- # Debug/Trace log all outgoing ACKs
- logger.log(
- TRACE,
- "ack_send id=%s status=%s details=%s addr=%s",
- cmd_id,
- status,
- details,
- addr,
- )
-
- message = f"ACK|{cmd_id}|{status}|{details}".encode("ascii")
-
- try:
- self.ack_socket.sendto(message, addr)
- except Exception as e:
- logger.error(f"Failed to send ACK to {addr[0]}:{addr[1]} - {e}")
-
def _initialize_components(self) -> None:
"""
Initialize all components during construction.
@@ -198,7 +173,7 @@ def _initialize_components(self) -> None:
discover_commands()
# Initialize UDP transport
- logger.info(
+ logger.debug(
f"Starting UDP server on {self.config.udp_host}:{self.config.udp_port}"
)
self.udp_transport = UDPTransport(
@@ -207,78 +182,18 @@ def _initialize_components(self) -> None:
if not self.udp_transport.create_socket():
raise RuntimeError("Failed to create UDP socket")
- # Load persisted COM port if not provided
- try:
- if not self.config.serial_port:
- persisted = get_com_port_with_fallback()
- if persisted:
- self.config.serial_port = persisted
- logger.info(f"Using persisted serial port: {persisted}")
- except Exception as e:
- logger.debug(f"Failed to load persisted COM port: {e}")
-
- # Initialize Serial transport using factory
- if self.config.serial_port or is_simulation_mode():
- self.serial_transport = create_and_connect_transport(
- port=self.config.serial_port,
- baudrate=self.config.serial_baudrate,
- auto_find_port=True,
- )
-
- if self.serial_transport:
- # Only announce connected and start reader if actually connected
- if self.serial_transport.is_connected():
- logger.info(
- "Connected to transport: %s", self.serial_transport.port
- )
- try:
- self.serial_transport.start_reader(self.shutdown_event)
- logger.info("Serial reader thread started")
- except Exception as e:
- logger.warning("Failed to start serial reader: %s", e)
- else:
- logger.warning(
- "Serial transport not connected at startup (port=%s)",
- self.config.serial_port,
- )
- else:
- logger.warning(
- "No serial port configured. Waiting for SET_PORT via UDP or set --serial/PAROL6_COM_PORT/com_port.txt before connecting."
- )
-
# Initialize robot state
self.state_manager.reset_state()
- # Initialize TX coalescing state
- state = self.state_manager.get_state()
- self._last_tx = {
- "pos": np.empty_like(state.Position_out),
- "spd": np.empty_like(state.Speed_out),
- "cmd": None,
- "aff": np.empty_like(state.Affected_joint_out),
- "io": np.empty_like(state.InOut_out),
- "tout": None,
- "grip": np.empty_like(state.Gripper_data_out),
- "last_sent": 0.0,
- }
-
- # Optionally queue auto-home per policy (default OFF)
- if self.config.auto_home:
- try:
- home_cmd, _ = create_command_from_parts(["HOME"])
- if home_cmd:
- # Queue without address/id for auto-home
- self._queue_command(("127.0.0.1", 0), home_cmd, None)
- logger.info("Auto-home queued")
- except Exception as e:
- logger.warning(f"Failed to queue auto-home: {e}")
-
- # Start status updater and broadcaster (ASCII multicast at configured rate)
+ # Initialize serial transport via TransportManager
+ self._transport_mgr.initialize()
+
+ # Create status broadcaster
try:
- logger.info(
+ logger.debug(
f"StatusBroadcaster config: group={MCAST_GROUP} port={MCAST_PORT} ttl={MCAST_TTL} iface={MCAST_IF} rate_hz={STATUS_RATE_HZ} stale_s={STATUS_STALE_S}"
)
- broadcaster = StatusBroadcaster(
+ self._status_broadcaster = StatusBroadcaster(
state_mgr=self.state_manager,
group=MCAST_GROUP,
port=MCAST_PORT,
@@ -287,14 +202,12 @@ def _initialize_components(self) -> None:
rate_hz=STATUS_RATE_HZ,
stale_s=STATUS_STALE_S,
)
-
- broadcaster.start()
- logger.info("Status updater and broadcaster started")
+ logger.debug("StatusBroadcaster initialized")
except Exception as e:
- logger.warning(f"Failed to start status services: {e}")
+ logger.warning(f"Failed to create status broadcaster: {e}")
self._initialized = True
- logger.info("Controller initialized successfully")
+ logger.debug("Controller initialized successfully")
except Exception as e:
logger.error(f"Failed to initialize controller: {e}")
@@ -311,1130 +224,642 @@ def start(self):
logger.warning("Controller already running")
return
+ self._priority_elevated = self._set_high_priority()
+ self._overbudget_times: list[float] = []
self.running = True
- # Start command processing thread
- self.command_thread = threading.Thread(target=self._command_processing_loop)
- self.command_thread.start()
+ # Start async logging to move I/O off the control loop thread
+ self._async_log.start()
+
+ # Start motion planner subprocess
+ self._planner.start()
+
+ # Disable automatic GC — collections are deferred to slack time
+ self._gc_tracker.take_control()
# Start main control loop
- logger.info("Starting main control loop")
+ logger.debug("Starting main control loop")
+ self._timer.metrics.mark_started(time.perf_counter())
+ logger.info(
+ "Controller ready on %s:%s",
+ self.config.udp_host,
+ self.config.udp_port,
+ )
self._main_control_loop()
def stop(self):
"""Stop the controller and clean up resources."""
- logger.info("Stopping controller...")
+ if not self.running:
+ return
+ logger.debug("Stopping controller...")
self.running = False
self.shutdown_event.set()
- # Wait for threads to finish
- if self.command_thread and self.command_thread.is_alive():
- self.command_thread.join(timeout=2.0)
+ # Stop motion planner subprocess
+ try:
+ self._planner.stop()
+ except Exception as e:
+ logger.debug("Error stopping motion planner: %s", e)
+
+ # Stop IK worker subprocess
+ try:
+ close_cache()
+ except Exception as e:
+ logger.debug("Error stopping IK worker: %s", e)
- # Stop status services
+ # Close status broadcaster
try:
if self._status_broadcaster:
- self._status_broadcaster.stop()
- if self._status_updater:
- self._status_updater.stop()
- except Exception:
- pass
+ self._status_broadcaster.close()
+ except Exception as e:
+ logger.debug("Error closing status broadcaster: %s", e)
# Clean up transports
if self.udp_transport:
self.udp_transport.close_socket()
- if self.serial_transport:
- self.serial_transport.disconnect()
+ self._transport_mgr.disconnect()
- logger.info("Controller stopped")
+ # Re-enable automatic GC and remove tracker callback
+ self._gc_tracker.shutdown()
- def _main_control_loop(self):
- """
- Main control loop that:
- 1. Reads from firmware (serial)
- 2. Handles E-stop and recovery
- 3. Executes active command or fetches from GCODE
- 4. Writes to firmware (serial)
- 5. Maintains timing
- """
+ # Stop async logging (flushes queued messages)
+ self._async_log.stop()
- tick = self.config.loop_interval
- next_t = time.perf_counter()
- prev_t = next_t # for period measurement
- prev_print = next_t
+ logger.info("Controller stopped")
- while self.running:
+ def _read_from_firmware(self, state: ControllerState) -> None:
+ """Phase 1: Poll serial for data, unpack frames, handle auto-reconnect."""
+ if self._transport_mgr.is_connected():
+ # Poll serial for new data
+ self._transport_mgr.poll_serial()
try:
- state = self.state_manager.get_state()
-
- # 1. Read from firmware
- if self.serial_transport and self.serial_transport.is_connected():
- try:
- mv, ver, ts = self.serial_transport.get_latest_frame_view()
- if mv is not None and ver != self._serial_last_version:
- ok = unpack_rx_frame_into(
- mv,
- pos_out=state.Position_in,
- spd_out=state.Speed_in,
- homed_out=state.Homed_in,
- io_out=state.InOut_in,
- temp_out=state.Temperature_error_in,
- poserr_out=state.Position_error_in,
- timing_out=state.Timing_data_in,
- grip_out=state.Gripper_data_in,
- )
- if ok:
- get_cache().mark_serial_observed()
- if not self.first_frame_received:
- self.first_frame_received = True
- logger.info("First frame received from robot")
- self._serial_last_version = ver
- except Exception as e:
- logger.warning(f"Error decoding latest serial frame: {e}")
-
- # Serial auto-reconnect when a port is known
- if self.serial_transport and not self.serial_transport.is_connected():
- if self.serial_transport.auto_reconnect():
- try:
- self.serial_transport.start_reader(self.shutdown_event)
- self.first_frame_received = False
- # Reset TX keepalive to force prompt write after reconnect
- if self._last_tx is not None:
- self._last_tx["last_sent"] = 0.0
- logger.info(
- "Serial reader thread started (after reconnect)"
- )
- except Exception as e:
- logger.warning(
- "Failed to start serial reader after reconnect: %s", e
- )
-
- # 2. Handle E-stop (only check when connected to robot and received first frame)
- if (
- self.serial_transport
- and self.serial_transport.is_connected()
- and self.first_frame_received
- ):
- if (
- state.InOut_in[4] == 0
- ): # E-stop pressed (0 = pressed, 1 = released)
- if not self.estop_active: # Not already in E-stop state
- logger.warning("E-STOP activated")
- self.estop_active = True
- # Cancel active command
- if self.active_command:
- self._cancel_active_command("E-Stop activated")
- # Clear queue
- self._clear_queue("E-Stop activated")
- # Stop robot
- state.Command_out = CommandCode.DISABLE
- state.Speed_out.fill(0)
- elif state.InOut_in[4] == 1: # E-stop released (1 = released)
- if self.estop_active: # Was in E-stop state
- # E-stop was released - automatic recovery
- logger.info("E-STOP released - automatic recovery")
- self.estop_active = False
- # Re-enable immediately per policy
- state.enabled = True
- state.disabled_reason = ""
- state.Command_out = CommandCode.IDLE
- state.Speed_out.fill(0)
-
- # 3. Execute commands if not in E-stop (or E-stop state unknown)
- if (
- not self.estop_active
- ): # Execute if E-stop is False or None (unknown)
- # Execute active command
- if self.active_command or self.command_queue:
- self._execute_active_command()
- # Check for GCODE commands if program is running
- elif self.gcode_interpreter.is_running:
- self._fetch_gcode_commands()
- else:
- # No commands - idle
- state.Command_out = CommandCode.IDLE
- state.Speed_out.fill(0)
- np.copyto(state.Position_out, state.Position_in, casting="no")
-
- # 4. Write to firmware
- if (
- self.serial_transport
- and self.serial_transport.is_connected()
- and self._last_tx is not None
- ):
- # Check if state has changed or keepalive needed
- now = time.perf_counter()
- dirty = (
- (state.Command_out.value != self._last_tx["cmd"])
- or (
- not np.array_equal(state.Position_out, self._last_tx["pos"])
- )
- or (not np.array_equal(state.Speed_out, self._last_tx["spd"]))
- or (
- not np.array_equal(
- state.Affected_joint_out, self._last_tx["aff"]
- )
- )
- or (not np.array_equal(state.InOut_out, self._last_tx["io"]))
- or (int(state.Timeout_out) != int(self._last_tx["tout"]))
- or (
- not np.array_equal(
- state.Gripper_data_out, self._last_tx["grip"]
- )
- )
- )
-
- # Write if dirty or keepalive timeout reached
- if dirty or (
- now - self._last_tx["last_sent"] >= self._tx_keepalive_s
- ):
- ok = self.serial_transport.write_frame(
- state.Position_out,
- state.Speed_out,
- state.Command_out.value,
- state.Affected_joint_out,
- state.InOut_out,
- state.Timeout_out,
- state.Gripper_data_out,
- )
- if ok:
- # Update last TX snapshot
- self._last_tx["cmd"] = state.Command_out.value
- np.copyto(self._last_tx["pos"], state.Position_out)
- np.copyto(self._last_tx["spd"], state.Speed_out)
- np.copyto(self._last_tx["aff"], state.Affected_joint_out)
- np.copyto(self._last_tx["io"], state.InOut_out)
- self._last_tx["tout"] = int(state.Timeout_out)
- np.copyto(self._last_tx["grip"], state.Gripper_data_out)
- self._last_tx["last_sent"] = now
-
- # Auto-reset one-shot gripper modes after successful send
- if state.Gripper_data_out[4] in (1, 2):
- state.Gripper_data_out[4] = 0
-
- # 5. Maintain loop timing using deadline scheduling + update loop metrics
- now = time.perf_counter()
- # Update period metrics
- period = now - prev_t
- prev_t = now
- state.loop_count += 1
- state.last_period_s = float(period)
- if state.ema_period_s <= 0.0:
- state.ema_period_s = float(period)
- else:
- # EMA with alpha=0.1
- state.ema_period_s = 0.1 * float(period) + 0.9 * float(
- state.ema_period_s
+ mv, ver, ts = self._transport_mgr.get_latest_frame()
+ if mv is not None and ver != self._transport_mgr._last_version:
+ ok = unpack_rx_frame_into(
+ mv,
+ pos_out=state.Position_in,
+ spd_out=state.Speed_in,
+ homed_out=state.Homed_in,
+ io_out=state.InOut_in,
+ temp_out=state.Temperature_error_in,
+ poserr_out=state.Position_error_in,
+ timing_out=state.Timing_data_in,
+ grip_out=state.Gripper_data_in,
)
-
- next_t += tick
- sleep = next_t - time.perf_counter()
- if sleep > 0:
- time.sleep(sleep)
- else:
- # Overrun; catch up
- state.overrun_count += 1
- next_t = time.perf_counter()
-
- if now - prev_print > 5:
- # Warn only if short-term average period degraded >10% vs target
- if state.ema_period_s > tick * 1.10:
- logger.warning(
- f"Control loop avg period degraded by +{((state.ema_period_s / tick) - 1.0) * 100.0:.0f}% "
- f"(avg={state.ema_period_s:.4f}s target={tick:.4f}s); latest overrun={-sleep:.4f}s"
- )
- prev_print = now
- # Calculate command frequency
- cmd_hz = 0.0
- if state.ema_command_period_s > 0.0:
- cmd_hz = 1.0 / state.ema_command_period_s
-
- # Calculate short-term command rate from recent timestamps
- short_term_cmd_hz = 0.0
- if len(state.command_timestamps) >= 2:
- # Calculate rate from first and last timestamp in window
- time_span = (
- state.command_timestamps[-1] - state.command_timestamps[0]
- )
- if time_span > 0:
- short_term_cmd_hz = (
- len(state.command_timestamps) - 1
- ) / time_span
-
- logger.debug(
- "loop=%.2fHz cmd=%.2fHz s=%.2f/%d q=%d ov=%d",
- (
- (1.0 / state.ema_period_s)
- if state.ema_period_s > 0.0
- else 0.0
- ),
- cmd_hz,
- short_term_cmd_hz,
- max(0, len(state.command_timestamps) - 1),
- state.command_count,
- state.overrun_count,
- )
-
- except KeyboardInterrupt:
- logger.info("Keyboard interrupt received")
- self.stop()
- break
+ if ok:
+ get_cache().mark_serial_observed()
+ if not self._transport_mgr.first_frame_received:
+ self._transport_mgr.first_frame_received = True
+ logger.debug("First frame received from robot")
+ self._transport_mgr._last_version = ver
except Exception as e:
- logger.error(f"Error in main control loop: {e}", exc_info=True)
- # Continue running despite errors
+ logger.warning(f"Error decoding latest serial frame: {e}")
- def _command_processing_loop(self):
- """
- Separate thread for processing incoming commands from UDP.
- """
- while self.running and self.udp_transport:
- try:
- # Check for new commands from UDP (blocking with short timeout)
- tup = self.udp_transport.receive_one()
- if tup is None:
- continue
- cmd_str, addr = tup
- try:
- _n = (
- cmd_str.split("|", 1)[0].upper()
- if isinstance(cmd_str, str)
- else "UNKNOWN"
- )
- except Exception:
- _n = "UNKNOWN"
- logger.log(TRACE, "cmd_received name=%s from=%s", _n, addr)
- state = self.state_manager.get_state()
-
- # Track command reception for frequency metrics
- now = time.time()
- if state.last_command_time > 0:
- # Calculate period between commands
- period = now - state.last_command_time
- state.last_command_period_s = period
-
- # Update EMA of command period (alpha=0.1)
- if state.ema_command_period_s <= 0.0:
- state.ema_command_period_s = period
- else:
- state.ema_command_period_s = (
- 0.1 * period + 0.9 * state.ema_command_period_s
- )
-
- state.last_command_time = now
- state.command_count += 1
- state.command_timestamps.append(now)
- # No command IDs on wire; treat entire datagram as the command
- cmd_parts = cmd_str.split("|")
- cmd_name = cmd_parts[0].upper()
- # Build server-side ack/wait policy
- policy = AckPolicy.from_env(lambda: state.stream_mode)
-
- # Stream fast-path: if an active streamable command of the same type exists,
- # reuse the instance by calling match/setup and skip object creation/queueing.
- if state.stream_mode and self.active_command:
- logger.log(
- TRACE,
- "stream_fast_path_considered active=%s incoming=%s",
- type(self.active_command.command).__name__,
- cmd_name,
- )
- active_inst = self.active_command.command
- if (
- isinstance(active_inst, MotionCommand)
- and active_inst.streamable
- ):
- active_name = active_inst._registered_name
- if active_name == cmd_name:
- can_handle, match_err = active_inst.match(cmd_parts)
- if can_handle:
- try:
- active_inst.bind(
- CommandContext(
- udp_transport=self.udp_transport,
- addr=addr,
- gcode_interpreter=self.gcode_interpreter,
- dt=self.config.loop_interval,
- )
- )
- active_inst.setup(state)
- except Exception as _e:
- logger.error(
- "Stream fast-path setup failed for %s: %s",
- active_name,
- _e,
- )
- else:
- logger.log(
- TRACE,
- "stream_fast_path_applied name=%s",
- active_name,
- )
- continue
- else:
- if match_err:
- if self.udp_transport and policy.requires_ack(
- cmd_str
- ):
- self.udp_transport.send_response(
- f"ERROR|{match_err}", addr
- )
- logger.log(
- TRACE,
- "Stream fast-path match failed for %s: %s",
- active_name,
- match_err,
- )
-
- # Create command instance from message
- command, error = create_command_from_parts(cmd_parts)
- if not command:
- if error:
- # Known command but invalid parameters
- logger.warning(
- f"Command validation failed: {cmd_str} - {error}"
- )
- if self.udp_transport:
- self.udp_transport.send_response(f"ERROR|{error}", addr)
- else:
- # Unknown command
- logger.warning(f"Unknown command: {cmd_str}")
- if self.udp_transport:
- self.udp_transport.send_response(
- "ERROR|Unknown command", addr
- )
- continue
-
- # Handle system commands (they can execute regardless of enable state)
- if isinstance(command, SystemCommand):
- # System commands execute immediately without queueing
- command.bind(
- CommandContext(
- udp_transport=self.udp_transport,
- addr=addr,
- gcode_interpreter=self.gcode_interpreter,
- dt=self.config.loop_interval,
- )
- )
- logger.log(
- TRACE, "syscmd_execute_start name=%s", type(command).__name__
- )
- command.setup(state)
- status = command.tick(state)
- logger.log(
- TRACE,
- "syscmd_execute_%s name=%s msg=%s",
- "ok" if status.code == ExecutionStatusCode.COMPLETED else "err",
- type(command).__name__,
- status.message,
- )
+ state.hardware_connected = (
+ not isinstance(self._transport_mgr.transport, MockSerialTransport)
+ and self._transport_mgr.is_connected()
+ and self._transport_mgr.first_frame_received
+ )
- # Handle side-effects for certain system commands (e.g., SET_PORT)
- try:
- if (
- status.details
- and isinstance(status.details, dict)
- and "serial_port" in status.details
- ):
- port = status.details.get("serial_port")
- if port:
- # Remember configured port
- self.config.serial_port = port
- try:
- # (Re)connect transport immediately using provided port
- self.serial_transport = (
- create_and_connect_transport(
- port=port,
- baudrate=self.config.serial_baudrate,
- auto_find_port=False,
- )
- )
- if self.serial_transport and hasattr(
- self.serial_transport, "start_reader"
- ):
- self.serial_transport.start_reader(
- self.shutdown_event
- )
- self.first_frame_received = False
- # Reset TX keepalive to force prompt write after reconnect
- if self._last_tx is not None:
- self._last_tx["last_sent"] = 0.0
- logger.info(
- "Serial reader thread started (after SET_PORT)"
- )
- except Exception as e:
- logger.warning(
- f"Failed to (re)connect serial on SET_PORT: {e}"
- )
-
- # Handle SIMULATOR toggle
- if (
- status.details
- and isinstance(status.details, dict)
- and "simulator_mode" in status.details
- ):
- mode = str(status.details.get("simulator_mode", "")).lower()
- try:
- # Update env to drive transport_factory.is_simulation_mode()
- os.environ["PAROL6_FAKE_SERIAL"] = (
- "1" if mode in ("on", "1", "true", "yes") else "0"
- )
-
- # Pre-switch safety: stop and clear motion before transport switch
- try:
- state = self.state_manager.get_state()
- # Immediately stop motion
- state.Command_out = CommandCode.IDLE
- state.Speed_out.fill(0)
- # Cancel active and clear queued streamable/non-streamable commands
- self._cancel_active_command("Simulator mode toggle")
- self._clear_queue("Simulator mode toggle")
- except Exception as _e:
- logger.debug(
- "Simulator toggle pre-switch safety failed: %s",
- _e,
- )
-
- # Disconnect any existing transport
- if self.serial_transport:
- try:
- self.serial_transport.disconnect()
- except Exception:
- pass
- # Recreate transport according to new mode; auto_find_port for real serial
- self.serial_transport = create_and_connect_transport(
- port=self.config.serial_port,
- baudrate=self.config.serial_baudrate,
- auto_find_port=True,
- )
- # If enabled, sync simulator to last known controller state so pose continuity is preserved
- try:
- if mode in (
- "on",
- "1",
- "true",
- "yes",
- ) and isinstance(
- self.serial_transport, MockSerialTransport
- ):
- self.serial_transport.sync_from_controller_state(
- state
- )
- except Exception as _e:
- logger.warning(
- "Failed to sync simulator from controller state: %s",
- _e,
- )
-
- if self.serial_transport:
- self.serial_transport.start_reader(
- self.shutdown_event
- )
- self.first_frame_received = False
- # Reset TX keepalive to force prompt write after transport switch
- if self._last_tx is not None:
- self._last_tx["last_sent"] = 0.0
- logger.info(
- "Serial reader thread started (after SIMULATOR toggle)"
- )
- except Exception as e:
- logger.warning(
- f"Failed to (re)configure transport on SIMULATOR: {e}"
- )
- except Exception as e:
- logger.debug(f"System command side-effect handling failed: {e}")
-
- # Always respond for system commands
- if self.udp_transport:
- if status.code == ExecutionStatusCode.COMPLETED:
- self.udp_transport.send_response("OK", addr)
- else:
- msg = status.message or "Failed"
- self.udp_transport.send_response(f"ERROR|{msg}", addr)
- continue
-
- # Check if controller is enabled for motion commands
- if isinstance(command, MotionCommand) and not state.enabled:
- # Inform client only if policy requires ACK for motion
- if self.udp_transport and policy.requires_ack(cmd_str):
- reason = state.disabled_reason or "Controller disabled"
- self.udp_transport.send_response(f"ERROR|{reason}", addr)
- logger.warning(
- f"Motion command rejected - controller disabled: {cmd_name}"
- )
- continue
-
- # Query commands execute immediately (bypass queue)
- if isinstance(command, QueryCommand):
- # Execute query immediately with context
- command.bind(
- CommandContext(
- udp_transport=self.udp_transport,
- addr=addr,
- gcode_interpreter=self.gcode_interpreter,
- dt=self.config.loop_interval,
- )
- )
- command.setup(state)
- _ = command.tick(state)
- # Query commands send their own responses
- continue
+ # Serial auto-reconnect when a port is known
+ if self._transport_mgr.auto_reconnect():
+ # Flush stale commands so the robot doesn't replay old moves
+ self._segment_player.cancel(state)
+ self._planner.cancel()
+ self._executor.cancel_active_command("Serial reconnect")
+ self._executor.clear_queue("Serial reconnect")
+
+ def _handle_estop(self, state: ControllerState) -> None:
+ """Phase 2: Handle E-stop activation and recovery."""
+ if not (
+ self._transport_mgr.is_connected()
+ and self._transport_mgr.first_frame_received
+ ):
+ return
- # Apply stream mode logic for streamable motion commands
+ if state.InOut_in[4] == 0: # E-stop pressed
+ if not self.estop_active:
+ logger.warning("E-STOP activated")
+ self.estop_active = True
+ self._segment_player.cancel(state)
+ self._planner.sync_tool(
+ state.current_tool,
+ variant_key=state.current_tool_variant,
+ tcp_offset_m=state.tcp_offset_m,
+ )
+ if self._executor.active_command:
+ self._executor.cancel_active_command("E-Stop activated")
+ self._executor.clear_queue("E-Stop activated")
+ state.Command_out = CommandCode.DISABLE
+ state.Speed_out.fill(0)
+ state.error = make_error(ErrorCode.SYS_ESTOP_ACTIVE)
+ elif state.InOut_in[4] == 1: # E-stop released
+ if self.estop_active:
+ logger.info("E-STOP released - automatic recovery")
+ self.estop_active = False
+ state.enabled = True
+ state.disabled_reason = ""
+ state.Command_out = CommandCode.IDLE
+ state.Speed_out.fill(0)
if (
- state.stream_mode
- and isinstance(command, MotionCommand)
- and command.streamable
+ state.error is not None
+ and state.error.code == ErrorCode.SYS_ESTOP_ACTIVE
):
- # Drain UDP buffer to discard stale commands before processing new one
- if self.udp_transport:
- drained = self.udp_transport.drain_buffer()
- if drained > 0:
- logger.log(TRACE, "udp_buffer_drained count=%d", drained)
-
- # Cancel any active streamable command and replace it (suppress per-command ACK to reduce UDP chatter)
- if (
- self.active_command
- and isinstance(self.active_command.command, MotionCommand)
- and getattr(self.active_command.command, "streamable", False)
- ):
- self.active_command = None
-
- # Clear any queued streamable commands without per-command ACKs to reduce UDP chatter
- removed = 0
- for queued_cmd in list(self.command_queue):
- if isinstance(queued_cmd.command, MotionCommand) and getattr(
- queued_cmd.command, "streamable", False
- ):
- self.command_queue.remove(queued_cmd)
- removed += 1
- if removed:
- logger.log(TRACE, "queued_streamables_removed count=%d", removed)
-
- # Queue the command
- status = self._queue_command(addr, command, None)
- logger.log(
- TRACE, "Command %s queued with status: %s", cmd_name, status.code
- )
+ state.error = None
- # For motion commands: ACK acceptance only if policy requires ACK
- if isinstance(command, MotionCommand) and self.udp_transport:
- if policy.requires_ack(cmd_str):
- if status.code == ExecutionStatusCode.QUEUED:
- self.udp_transport.send_response("OK", addr)
- else:
- msg = status.message or "Queue error"
- self.udp_transport.send_response(f"ERROR|{msg}", addr)
-
- # Start execution if no active command
- if not self.active_command:
- self._execute_active_command()
- except Exception as e:
- logger.error(f"Error in command processing: {e}", exc_info=True)
+ def _execute_commands(self, state: ControllerState) -> None:
+ """Phase 3: Execute active command."""
+ # Tool action side channel — ticks concurrently with everything
+ self._tick_tool_cmd(state)
- def _queue_command(
- self,
- address: tuple[str, int] | None,
- command: CommandBase,
- command_id: str | None = None,
- ) -> ExecutionStatus:
- """
- Add a command to the execution queue.
+ # Segment player handles trajectory + inline commands from planner
+ if self._segment_player.tick(state):
+ return
- Args:
- command: The command to queue
- command_id: Optional ID for tracking
- address: Optional (ip, port) for acknowledgments
- priority: Priority level (higher = executed first)
+ # Streaming command executor (jog/servo)
+ if self._executor.active_command or self._executor.command_queue:
+ self._executor.execute_active_command()
+ else:
+ state.Command_out = CommandCode.IDLE
+ state.Speed_out.fill(0)
- Returns:
- ExecutionStatus indicating queue status
- """
- # Check if queue is full
- if len(self.command_queue) >= 100: # max_queue_size
- logger.warning("Command queue full (max 100)")
- if command_id and address:
- self._send_ack(command_id, "FAILED", "Queue full", address)
- return ExecutionStatus.failed("Queue full")
-
- # Create queued command
- queued_cmd = QueuedCommand(
- command=command, command_id=command_id, address=address
- )
+ def _tick_tool_cmd(self, state: ControllerState) -> None:
+ """Tick tool action side channel (concurrent with motion)."""
+ if self._tool_cmd is None:
+ return
- # Bind dynamic context so the command can reply/inspect interpreter when executed
- command.bind(
- CommandContext(
- udp_transport=self.udp_transport,
- addr=address,
- gcode_interpreter=self.gcode_interpreter,
- dt=self.config.loop_interval,
- )
- )
+ if not self._tool_cmd_activated:
+ self._tool_cmd.setup(state)
+ self._tool_cmd_activated = True
- self.command_queue.append(queued_cmd)
+ code = self._tool_cmd.tick(state)
- # Update non-streamable queue snapshot
- state = self.state_manager.get_state()
- state.queue_nonstreamable = [
- type(qc.command).__name__
- for qc in self.command_queue
- if not (
- isinstance(qc.command, MotionCommand)
- and getattr(qc.command, "streamable", False)
+ if code == ExecutionStatusCode.COMPLETED:
+ state.completed_command_index = max(
+ state.completed_command_index, self._tool_cmd_index
)
- ]
-
- # Update next action
- if state.queue_nonstreamable:
- state.action_next = state.queue_nonstreamable[0]
- else:
- state.action_next = ""
-
- # Send acknowledgment
- if command_id and address:
- queue_pos = len(self.command_queue)
- self._send_ack(
- command_id, "QUEUED", f"Position {queue_pos} in queue", address
+ self._tool_cmd = None
+ self._tool_cmd_activated = False
+ elif code == ExecutionStatusCode.FAILED:
+ logger.error(
+ "Tool action failed: %s - %s",
+ type(self._tool_cmd).__name__,
+ self._tool_cmd.robot_error,
+ )
+ state.error = self._tool_cmd.robot_error or make_error(
+ ErrorCode.MOTN_TICK_FAILED, detail=type(self._tool_cmd).__name__
)
+ state.action_state = ActionState.ERROR
+ state.completed_command_index = max(
+ state.completed_command_index, self._tool_cmd_index
+ )
+ self._tool_cmd = None
+ self._tool_cmd_activated = False
+
+ def _write_to_firmware(self, state: ControllerState) -> None:
+ """Phase 4: Write state to serial transport."""
+ ok = self._transport_mgr.write_frame(
+ state.Position_out,
+ state.Speed_out,
+ state.Command_out.value,
+ state.Affected_joint_out,
+ state.InOut_out,
+ state.Timeout_out,
+ state.Gripper_data_out,
+ )
+ if ok:
+ # Auto-reset one-shot gripper modes after successful send
+ if state.Gripper_data_out[4] in (1, 2):
+ state.Gripper_data_out[4] = 0
+
+ def _sync_timer_metrics(self, state: ControllerState) -> None:
+ """Copy timing metrics from LoopTimer and PhaseTimer to controller state."""
+ m = self._timer.metrics
+
+ # Check if loop stats reset was requested
+ if state.loop_stats_reset_pending:
+ m.reset_stats(include_counters=True)
+ state.loop_stats_reset_pending = False
+ logger.debug("Loop stats reset completed")
+
+ state.loop_count = m.loop_count
+ state.overrun_count = m.overrun_count
+
+ # Only copy rolling stats when they were updated (every stats_interval loops)
+ if m.loop_count % self._timer._stats_interval == 0:
+ state.mean_period_s = m.mean_period_s
+ state.std_period_s = m.std_period_s
+ state.min_period_s = m.min_period_s
+ state.max_period_s = m.max_period_s
+ state.p95_period_s = m.p95_period_s
+ state.p99_period_s = m.p99_period_s
+
+ def _log_periodic_status(self, state: ControllerState) -> None:
+ """Log performance metrics every 3 seconds."""
+ now = time.perf_counter()
+ m = self._timer.metrics
+
+ # Rate-limited overbudget warning (grace period handled in LoopMetrics)
+ should_warn, pct = m.check_degraded(now, 0.25, 3.0)
+ if should_warn:
+ gc_dur = self._gc_tracker.recent_duration_ms()
+ # With elevated priority, overbudget is unexpected — always warn.
+ # Without priority, occasional overbudget is normal (OS scheduling);
+ # only escalate to warning if frequent (>3 in the last 30s).
+ if self._priority_elevated:
+ log = logger.warning
+ else:
+ self._overbudget_times.append(now)
+ cutoff = now - 60.0
+ while self._overbudget_times and self._overbudget_times[0] < cutoff:
+ self._overbudget_times.pop(0)
+ log = (
+ logger.warning if len(self._overbudget_times) > 3 else logger.debug
+ )
+ log(
+ "loop overbudget by +%.0f%% (%s) gc=%.2fms",
+ pct,
+ format_hz_summary(m),
+ gc_dur,
+ )
+
+ # Rate-limited debug log every 3s
+ if not m.should_log(now, 3.0):
+ return
- logger.log(
- TRACE, "Queued command: %s (ID: %s)", type(command).__name__, command_id
+ # Command rate from EventRateMetrics (decays to 0 when idle)
+ cmd_hz = self._cmd_rate.rate_hz(now, max_age_s=6.0)
+ gc_hz, gc_ms = self._gc_tracker.stats(now, max_age_s=6.0)
+
+ logger.debug(
+ "loop: %s cmd=%.1fHz ov=%d overshoot_p99=%.2fµs gc=%.1fHz/%.2fms",
+ format_hz_summary(m),
+ cmd_hz,
+ state.overrun_count,
+ m.p99_overshoot_s * 1_000_000,
+ gc_hz,
+ gc_ms,
)
- return ExecutionStatus(
- code=ExecutionStatusCode.QUEUED,
- message=f"Command queued at position {len(self.command_queue)}",
+ # Log phase breakdown (p99 values to catch spikes)
+ phases = self._phase_timer.phases
+ logger.debug(
+ "phases p99: read=%.2fms poll_cmd=%.2fms status=%.2fms estop=%.2fms exec=%.2fms write=%.2fms sim=%.2fms",
+ phases["read"].p99_s * 1000,
+ phases["poll_cmd"].p99_s * 1000,
+ phases["status"].p99_s * 1000,
+ phases["estop"].p99_s * 1000,
+ phases["execute"].p99_s * 1000,
+ phases["write"].p99_s * 1000,
+ phases["sim"].p99_s * 1000,
)
- def _execute_active_command(self) -> ExecutionStatus | None:
- """
- Execute one step of the active command from the queue.
+ def _main_control_loop(self):
+ """Main control loop with phase-based structure and precise timing."""
+ self._timer.start()
+ pt = self._phase_timer
+ tick_count = 0
+ broadcast_interval = STATUS_BROADCAST_INTERVAL
- Returns:
- ExecutionStatus of the execution, or None if no active command
- """
- # Check if we need to activate a new command from queue
- if self.active_command is None:
- if not self.command_queue:
- return None
- # Get next command from queue
- self.active_command = self.command_queue.popleft()
- # mark not yet activated
- self.active_command.activated = False
-
- # Execute active command step
- if self.active_command:
- ac = self.active_command
+ while self.running:
try:
state = self.state_manager.get_state()
-
- # Check if controller is enabled
- if state.enabled:
- # Perform setup and EXECUTING ACK only once
- if ac and not getattr(ac, "activated", False):
- ac.command.setup(state)
-
- # Update action tracking
- state.action_current = type(ac.command).__name__
- state.action_state = "EXECUTING"
-
- # Send executing acknowledgment once
- if ac.command_id and ac.address:
- self._send_ack(
- ac.command_id,
- "EXECUTING",
- f"Starting {type(ac.command).__name__}",
- ac.address,
- )
-
- ac.activated = True
- logger.log(
- TRACE,
- "Activated command: %s (id=%s)",
- type(ac.command).__name__,
- ac.command_id,
- )
-
- else:
- # Cancel command due to disabled controller
- self._cancel_active_command("Controller disabled")
- return ExecutionStatus(
- code=ExecutionStatusCode.CANCELLED,
- message="Controller disabled",
- )
-
- # Execute command step
- if not getattr(ac, "first_tick_logged", False):
- logger.log(TRACE, "tick_start name=%s", type(ac.command).__name__)
- ac.first_tick_logged = True
- status = ac.command.tick(state)
-
- # Enqueue any generated commands (e.g., from GCODE parsed in queued mode)
- if (
- status.details
- and isinstance(status.details, dict)
- and "enqueue" in status.details
- ):
- try:
- for robot_cmd_str in status.details["enqueue"]:
- cmd_obj, _ = create_command_from_parts(
- robot_cmd_str.split("|")
- )
- if cmd_obj:
- # Queue without address/id for generated commands
- self._queue_command(("127.0.0.1", 0), cmd_obj, None)
- except Exception as e:
- logger.error(f"Error enqueuing generated commands: {e}")
-
- # Check if command is finished
- if status.code == ExecutionStatusCode.COMPLETED:
- # Command completed successfully
- name = type(ac.command).__name__
- cid, addr = ac.command_id, ac.address
- logger.log(
- TRACE,
- "Command completed: %s (id=%s) at t=%f",
- name,
- cid,
- time.time(),
- )
-
- # Send completion acknowledgment
- if cid and addr:
- self._send_ack(cid, "COMPLETED", status.message, addr)
-
- # Update action tracking to idle
- state.action_current = ""
- state.action_state = "IDLE"
-
- # Update queue snapshot and next action
- state.queue_nonstreamable = [
- type(qc.command).__name__
- for qc in self.command_queue
- if not (
- isinstance(qc.command, MotionCommand)
- and getattr(qc.command, "streamable", False)
- )
- ]
- state.action_next = (
- state.queue_nonstreamable[0]
- if state.queue_nonstreamable
- else ""
+ tick_count += 1
+
+ with pt.phase("read"):
+ self._read_from_firmware(state)
+
+ with pt.phase("poll_cmd"):
+ self._poll_commands(state)
+
+ with pt.phase("estop"):
+ self._handle_estop(state)
+
+ if not self.estop_active:
+ with pt.phase("execute"):
+ self._execute_commands(state)
+
+ if tick_count % broadcast_interval == 0:
+ with pt.phase("status"):
+ if self._status_broadcaster:
+ self._status_broadcaster.tick()
+
+ with pt.phase("write"):
+ self._write_to_firmware(state)
+
+ with pt.phase("sim"):
+ # Pass tool teleport position if set by TeleportCommand
+ tool_tp = state.tool_teleport_pos
+ if tool_tp >= 0:
+ state.tool_teleport_pos = -1.0 # consume
+ # Cancel in-flight tool action so it doesn't re-arm the ramp
+ self._tool_cmd = None
+ self._tool_cmd_activated = False
+ self._transport_mgr.tick_simulation(
+ state.current_tool,
+ tool_teleport_pos=tool_tp,
)
- # Record and clear
- self.active_command = None
-
- elif status.code == ExecutionStatusCode.FAILED:
- # Command failed
- name = type(ac.command).__name__
- cid, addr = ac.command_id, ac.address
- logger.debug(
- f"Command failed: {name} (id={cid}) - {status.message} at t={time.time():.6f}"
- )
+ pt.tick()
+ self._sync_timer_metrics(state)
+ self._log_periodic_status(state)
+ self._gc_tracker.collect_deferred(
+ self._timer.time_to_next_deadline(), tick_count
+ )
+ self._timer.wait_for_next_tick()
- # Send failure acknowledgment
- if cid and addr:
- self._send_ack(cid, "FAILED", status.message, addr)
-
- # Update action tracking to idle
- state.action_current = ""
- state.action_state = "IDLE"
-
- # If this is a streamable command, clear all queued streamable commands
- # to prevent pileup when commands fail repeatedly (e.g., IK failures at limits)
- if isinstance(ac.command, MotionCommand) and getattr(
- ac.command, "streamable", False
- ):
- removed = self._clear_streamable_commands(
- f"Active streamable command failed: {status.message}"
- )
- if removed > 0:
- logger.info(
- f"Cleared {removed} queued streamable commands due to active command failure"
- )
-
- # Update queue snapshot and next action
- state.queue_nonstreamable = [
- type(qc.command).__name__
- for qc in self.command_queue
- if not (
- isinstance(qc.command, MotionCommand)
- and getattr(qc.command, "streamable", False)
- )
- ]
- state.action_next = (
- state.queue_nonstreamable[0]
- if state.queue_nonstreamable
- else ""
- )
+ except KeyboardInterrupt:
+ logger.debug("Keyboard interrupt received")
+ # Block SIGINT during shutdown so child processes aren't
+ # interrupted while we join them (avoids hang from numba's
+ # internal ProcessPoolExecutor workers catching SIGINT).
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ self.stop()
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ break
+ except Exception as e:
+ logger.error(f"Error in main control loop: {e}", exc_info=True)
+ state.Command_out = CommandCode.IDLE
+ state.Speed_out.fill(0)
+
+ def _poll_commands(self, state: ControllerState) -> None:
+ """Poll and process UDP commands (non-blocking)."""
+ assert self.udp_transport is not None
+
+ msgs = self.udp_transport.poll_receive_all(max_count=MAX_POLL_COUNT)
+ for data, addr in msgs:
+ self._process_command(data, addr, state)
+
+ def _reply_error(self, addr: tuple[str, int], error: RobotError) -> None:
+ """Send error response to client. Caller must ensure udp_transport is not None."""
+ assert self.udp_transport is not None
+ self.udp_transport.send(pack_error(error), addr)
+
+ def _reply_ok(self, addr: tuple[str, int]) -> None:
+ """Send OK response to client. Caller must ensure udp_transport is not None."""
+ assert self.udp_transport is not None
+ self.udp_transport.send(pack_ok(), addr)
+
+ def _reply_ok_index(self, addr: tuple[str, int], index: int) -> None:
+ """Send OK response with command index. Caller must ensure udp_transport is not None."""
+ assert self.udp_transport is not None
+ self.udp_transport.send(pack_ok_index(index), addr)
+
+ def _process_command(
+ self, data: bytes, addr: tuple[str, int], state: ControllerState
+ ) -> None:
+ """Process a single command from UDP.
- self.active_command = None
+ Args:
+ data: Raw msgpack-encoded command bytes
+ addr: Client address tuple (host, port)
+ state: Controller state
+ """
+ self._cmd_rate.record(time.perf_counter())
- return status
+ # Try stream fast-path first (avoids full command creation)
+ result = self._executor.try_stream_fast_path(data, state)
+ if result is True:
+ return
- except Exception as e:
- logger.error(f"Command execution error: {e}")
+ # If fast-path returned a decoded struct, reuse it; otherwise decode from bytes
+ if result is not False:
+ command, category, error = create_command_from_struct(result)
+ else:
+ command, category, error = create_command(data)
- # Handle execution error - save command info before clearing
- cid = ac.command_id if ac else None
- addr = ac.address if ac else None
+ if not command or category is None:
+ if error:
+ logger.warning(f"Command validation failed: {error}")
+ self._reply_error(
+ addr, make_error(ErrorCode.COMM_VALIDATION_ERROR, detail=error)
+ )
+ else:
+ logger.warning("Unknown command")
+ self._reply_error(addr, make_error(ErrorCode.COMM_UNKNOWN_COMMAND))
+ return
- if cid and addr:
- self._send_ack(cid, "FAILED", f"Execution error: {e!s}", addr)
- self.active_command = None
+ cmd_name = type(command).__name__
+ logger.log(TRACE, "cmd_received name=%s from=%s", cmd_name, addr)
- return ExecutionStatus.failed(f"Execution error: {e!s}", error=e)
+ # Dispatch by category (determined at registration time, no isinstance needed)
+ match category:
+ case CommandCategory.QUERY:
+ self._handle_query(command, state, addr) # type: ignore[arg-type, ty:invalid-argument-type]
+ case CommandCategory.SYSTEM:
+ self._handle_system_command(command, state, addr) # type: ignore[arg-type, ty:invalid-argument-type]
+ case CommandCategory.MOTION:
+ self._handle_motion_command(command, state, addr) # type: ignore[arg-type, ty:invalid-argument-type]
- return None
+ def _handle_motion_command(
+ self, command: MotionCommand, state: ControllerState, addr: tuple[str, int]
+ ) -> None:
+ """Queue motion command for execution."""
+ cmd_name = type(command).__name__
+
+ cmd_type = command._cmd_type
+ if not state.enabled:
+ if cmd_type and self._ack_policy.requires_ack(cmd_type):
+ reason = state.disabled_reason or "Controller disabled"
+ self._reply_error(
+ addr, make_error(ErrorCode.SYS_CONTROLLER_DISABLED, detail=reason)
+ )
+ logger.warning(
+ "Motion command rejected - controller disabled: %s", cmd_name
+ )
+ return
- def _cancel_active_command(self, reason: str = "Cancelled by user") -> None:
- """
- Cancel the currently active command.
+ # Streaming commands: cancel segment playback + existing streamable handling
+ if getattr(command, "streamable", False):
+ self._segment_player.cancel(state)
+ if self.udp_transport:
+ drained = self.udp_transport.drain_buffer()
+ if drained > 0:
+ logger.log(TRACE, "udp_buffer_drained count=%d", drained)
+ self._executor.cancel_active_streamable()
+ removed = self._executor.clear_streamable_commands(
+ "Streaming command prepare"
+ )
+ if removed:
+ logger.log(TRACE, "queued_streamables_removed count=%d", removed)
+ try:
+ cmd_index = self._executor.queue_command(addr, command, None)
+ logger.log(TRACE, "Command %s queued (index=%d)", cmd_name, cmd_index)
+ if cmd_type and self._ack_policy.requires_ack(cmd_type):
+ self._reply_ok_index(addr, cmd_index)
+ except QueueFullError:
+ if cmd_type and self._ack_policy.requires_ack(cmd_type):
+ self._reply_error(addr, make_error(ErrorCode.COMM_QUEUE_FULL))
+ return
- Args:
- reason: Reason for cancellation
- """
- if not self.active_command:
+ # Tool actions bypass planner — execute directly via side channel
+ # (writes to gripper_hw, not Position_out, so concurrent with everything)
+ if isinstance(command.p, ToolActionCmd):
+ cmd_obj, _, error_msg = create_command_from_struct(command.p)
+ if cmd_obj is None:
+ logger.error("Failed to create tool command: %s", error_msg)
+ if cmd_type and self._ack_policy.requires_ack(cmd_type):
+ self._reply_error(
+ addr,
+ make_error(ErrorCode.COMM_DECODE_ERROR, detail=error_msg or ""),
+ )
+ return
+ # New tool action replaces any in-flight one
+ self._tool_cmd = cmd_obj
+ self._tool_cmd_activated = False
+ cmd_index = self._assign_command_index(state)
+ self._tool_cmd_index = cmd_index
+ logger.log(
+ TRACE, "Command %s → tool side channel (index=%d)", cmd_name, cmd_index
+ )
+ if cmd_type and self._ack_policy.requires_ack(cmd_type):
+ self._reply_ok_index(addr, cmd_index)
return
- logger.info(
- f"Cancelling active command: {type(self.active_command.command).__name__} - {reason}"
+ # Non-streaming commands → planner
+ # Cancel active streaming command to avoid Position_in race
+ self._executor.cancel_active_streamable()
+
+ # Clear error state from previous pipeline failure
+ if state.error is not None:
+ state.error = None
+ state.action_state = ActionState.IDLE
+
+ cmd_index = self._assign_command_index(state)
+ # Only sync Position_in when segment player is idle — if segments are
+ # active/queued (e.g. homing), the planner's internal tracking is correct
+ # and Position_in may reflect a mid-motion position.
+ segment_idle = not self._segment_player.active
+ pos_snapshot = state.Position_in.copy() if segment_idle else None
+ self._planner.submit(
+ PlanCommand(
+ command_index=cmd_index,
+ params=command.p,
+ position_in=pos_snapshot,
+ )
)
+ if cmd_type and self._ack_policy.requires_ack(cmd_type):
+ self._reply_ok_index(addr, cmd_index)
- # Send cancellation acknowledgment
- if self.active_command.command_id and self.active_command.address:
- self._send_ack(
- self.active_command.command_id,
- "CANCELLED",
- reason,
- self.active_command.address,
+ def _handle_query(
+ self,
+ command: QueryCommand,
+ state: ControllerState,
+ addr: tuple[str, int],
+ ) -> None:
+ """Execute query command and send response directly."""
+ try:
+ command.setup(state)
+ response = command.compute(state)
+ assert self.udp_transport is not None
+ self.udp_transport.send(response, addr)
+ except Exception as e:
+ logger.error("Query error: %s", e)
+ self._reply_error(
+ addr, make_error(ErrorCode.COMM_DECODE_ERROR, detail=str(e))
)
- # Update action tracking to idle
- state = self.state_manager.get_state()
- state.action_current = ""
- state.action_state = "IDLE"
-
- # Record and clear
- self.active_command = None
-
- def _clear_queue(
- self, reason: str = "Queue cleared"
- ) -> list[tuple[str, ExecutionStatus]]:
- """
- Clear all queued commands.
-
- Args:
- reason: Reason for clearing the queue
-
- Returns:
- List of (command_id, status) for cleared commands
- """
- cleared = []
- # TODO: don't send out an ack for every queued command, just one signalling queues been cleared
- while self.command_queue:
- queued_cmd = self.command_queue.popleft()
-
- # Send cancellation acknowledgment
- if queued_cmd.command_id and queued_cmd.address:
- self._send_ack(
- queued_cmd.command_id, "CANCELLED", reason, queued_cmd.address
+ def _handle_system_command(
+ self,
+ command: SystemCommand,
+ state: ControllerState,
+ addr: tuple[str, int],
+ ) -> None:
+ """Execute system command, apply side effects, and send reply."""
+ try:
+ command.setup(state)
+ code = command.tick(state)
+
+ # Reset: cancel motion pipeline so stale segments don't play.
+ # Also sync the (now-cleared) tool state to the planner subprocess
+ # so its PAROL6_ROBOT singleton matches the controller's.
+ if isinstance(command, ResetCommand):
+ self._segment_player.cancel(state)
+ self._executor.cancel_active_command("Reset")
+ self._executor.clear_queue("Reset")
+ self._planner.sync_tool(
+ state.current_tool,
+ variant_key=state.current_tool_variant,
+ tcp_offset_m=state.tcp_offset_m,
)
- # Record cleared command
- if queued_cmd.command_id:
- status = ExecutionStatus(
- code=ExecutionStatusCode.CANCELLED, message=reason
+ # Infrastructure side effects (only 2-3 commands trigger these)
+ if command._switch_simulator is not None:
+ state.Command_out = CommandCode.IDLE
+ state.Speed_out.fill(0)
+ self._segment_player.cancel(state)
+ self._executor.cancel_active_command("Simulator mode toggle")
+ self._executor.clear_queue("Simulator mode toggle")
+ success, error = self._transport_mgr.switch_simulator_mode(
+ command._switch_simulator, sync_state=state
)
- cleared.append((queued_cmd.command_id, status))
-
- logger.info(f"Cleared {len(cleared)} commands from queue: {reason}")
-
- # Update action tracking
- state = self.state_manager.get_state()
- state.queue_nonstreamable = []
- state.action_next = ""
-
- return cleared
-
- def _clear_streamable_commands(
- self, reason: str = "Streamable commands cleared"
- ) -> int:
- """
- Clear all queued streamable motion commands.
-
- This is used to prevent command pileup when streamable commands fail repeatedly
- (e.g., IK failures when jogging at kinematic limits).
-
- Args:
- reason: Reason for clearing streamable commands
-
- Returns:
- Number of commands cleared
- """
- removed_count = 0
-
- # Iterate through a copy of the queue to safely remove items
- for queued_cmd in list(self.command_queue):
- if isinstance(queued_cmd.command, MotionCommand) and getattr(
- queued_cmd.command, "streamable", False
- ):
- self.command_queue.remove(queued_cmd)
- removed_count += 1
-
- # Send cancellation acknowledgment (though streamable commands typically don't have IDs)
- if queued_cmd.command_id and queued_cmd.address:
- self._send_ack(
- queued_cmd.command_id, "CANCELLED", reason, queued_cmd.address
- )
+ if not success:
+ raise RuntimeError(error or "Simulator toggle failed")
+ if command._switch_port is not None:
+ self._transport_mgr.switch_to_port(command._switch_port)
+ if command._sync_mock:
+ self._transport_mgr.sync_mock_from_state(state)
+
+ # Sync motion profile to planner (SelectProfile is a SystemCommand)
+ if isinstance(command, SelectProfileCommand):
+ self._planner.sync_profile(state.motion_profile)
+
+ if code == ExecutionStatusCode.COMPLETED:
+ self._reply_ok(addr)
+ else:
+ robot_error = command.robot_error or make_error(
+ ErrorCode.MOTN_TICK_FAILED, detail="System command failed"
+ )
+ self._reply_error(addr, robot_error)
- if removed_count > 0:
- logger.debug(
- f"Cleared {removed_count} streamable commands from queue: {reason}"
+ except Exception as e:
+ logger.error("System command error: %s", e)
+ self._reply_error(
+ addr, extract_robot_error(e, ErrorCode.MOTN_SETUP_FAILED, detail=str(e))
)
- return removed_count
+ def _assign_command_index(self, state: ControllerState) -> int:
+ """Assign a monotonically increasing command index."""
+ idx = state.next_command_index
+ state.next_command_index += 1
+ return idx
- def _fetch_gcode_commands(self):
- """
- Fetch next command from GCODE interpreter if program is running.
- Converts GCODE output to command objects and queues them.
- """
- if not self.gcode_interpreter.is_running:
- return
+ def _set_high_priority(self) -> bool:
+ """Set highest non-privileged process priority and pin to CPU core.
+ Returns True if priority was successfully elevated.
+ """
+ elevated = False
try:
- # Get next command from GCODE program
- next_gcode_cmd = self.gcode_interpreter.get_next_command()
- if not next_gcode_cmd:
- return
+ p = psutil.Process()
- # Use command registry to create command object
- command_obj, _ = create_command_from_parts(next_gcode_cmd.split("|"))
-
- if command_obj:
- # Queue without address/id for GCODE commands
- self._queue_command(("127.0.0.1", 0), command_obj, None)
- cmd_name = (
- next_gcode_cmd.split("|")[0]
- if "|" in next_gcode_cmd
- else next_gcode_cmd
- )
- logger.debug(f"Queued GCODE command: {cmd_name}")
+ # Set priority
+ if sys.platform == "win32":
+ p.nice(psutil.HIGH_PRIORITY_CLASS)
+ logger.debug("Set process priority to HIGH_PRIORITY_CLASS")
+ elevated = True
else:
- logger.warning(f"Unknown GCODE command generated: {next_gcode_cmd}")
+ try:
+ p.nice(-10)
+ logger.debug("Set process nice value to -10")
+ elevated = True
+ except psutil.AccessDenied:
+ logger.debug("Cannot set negative nice value without privileges")
+
+ # Pin to last CPU core (usually less contention from system tasks)
+ if hasattr(p, "cpu_affinity"):
+ try:
+ cpus = p.cpu_affinity()
+ if cpus and len(cpus) > 1:
+ target_core = cpus[-1]
+ p.cpu_affinity([target_core])
+ logger.debug(f"Pinned process to CPU core {target_core}")
+ except (AttributeError, NotImplementedError):
+ logger.debug("CPU affinity not supported on this platform")
+ except psutil.AccessDenied:
+ logger.debug("Cannot set CPU affinity without privileges")
except Exception as e:
- logger.error(f"Error fetching GCODE commands: {e}")
-
-
-def main():
- """Main entry point for the controller."""
- # Parse arguments first to get logging level
- parser = argparse.ArgumentParser(description="PAROL6 Robot Controller")
- parser.add_argument("--host", default="0.0.0.0", help="UDP host address")
- parser.add_argument("--port", type=int, default=5001, help="UDP port")
- parser.add_argument("--serial", help="Serial port (e.g., /dev/ttyUSB0 or COM3)")
- parser.add_argument("--baudrate", type=int, default=3000000, help="Serial baudrate")
- parser.add_argument(
- "--auto-home", action="store_true", help="Queue HOME on startup (default: off)"
- )
-
- # Verbose logging options (from controller.py)
- parser.add_argument(
- "-v",
- "--verbose",
- action="count",
- default=0,
- help="Increase verbosity; -v=INFO, -vv=DEBUG, -vvv=TRACE",
- )
- parser.add_argument(
- "-q",
- "--quiet",
- action="store_true",
- help="Enable quiet logging (WARNING level)",
- )
- parser.add_argument(
- "--log-level",
- choices=["TRACE", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
- help="Set specific log level",
- )
- args = parser.parse_args()
-
- # Determine log level
- # Precedence:
- # 1) Explicit --log-level
- # 2) Verbose / quiet flags
- # 3) Environment-driven TRACE (PAROL_TRACE=1 via TRACE_ENABLED)
- # 4) Default INFO
- if args.log_level:
- if args.log_level == "TRACE":
- log_level = TRACE
- cfg.TRACE_ENABLED = True
- else:
- log_level = getattr(logging, args.log_level)
- elif args.verbose >= 3:
- log_level = TRACE
- cfg.TRACE_ENABLED = True
- elif args.verbose >= 2:
- log_level = logging.DEBUG
- elif args.verbose == 1:
- log_level = logging.INFO
- elif args.quiet:
- log_level = logging.WARNING
- elif cfg.TRACE_ENABLED:
- # Enable TRACE when PAROL_TRACE=1 and no CLI override is given
- log_level = TRACE
- else:
- log_level = logging.INFO
-
- logging.basicConfig(
- level=log_level,
- format="%(asctime)s %(levelname)s %(name)s: %(message)s",
- datefmt="%H:%M:%S",
- )
-
- # Create configuration (env vars may override defaults)
- env_host = os.getenv("PAROL6_CONTROLLER_IP")
- env_port = os.getenv("PAROL6_CONTROLLER_PORT")
- udp_host = env_host.strip() if env_host else args.host
- try:
- udp_port = int(env_port) if env_port else args.port
- except (TypeError, ValueError):
- udp_port = args.port
-
- logger.info(f"Controller bind: host={udp_host} port={udp_port}")
-
- config = ControllerConfig(
- udp_host=udp_host,
- udp_port=udp_port,
- serial_port=args.serial,
- serial_baudrate=args.baudrate,
- auto_home=bool(args.auto_home),
- )
-
- # Create and run controller
- try:
- controller = Controller(config)
-
- if controller.is_initialized():
- try:
- controller.start()
- except KeyboardInterrupt:
- logger.info("Shutting down...")
- finally:
- controller.stop()
- else:
- logger.error("Controller not properly initialized")
- return 1
- except RuntimeError as e:
- logger.error(f"Failed to create controller: {e}")
- return 1
-
- return 0
-
-
-if __name__ == "__main__":
- exit(main())
+ logger.warning(f"Failed to set process priority/affinity: {e}")
+ return elevated
diff --git a/parol6/server/ik_layout.py b/parol6/server/ik_layout.py
new file mode 100644
index 0000000..ba6727b
--- /dev/null
+++ b/parol6/server/ik_layout.py
@@ -0,0 +1,40 @@
+"""Shared memory layout constants for the IK enablement pipeline.
+
+Both ``status_cache`` (writer) and ``ik_worker`` (reader) must agree on
+the byte offsets and sizes. Keeping them in one place avoids drift.
+"""
+
+import sys
+from multiprocessing.shared_memory import SharedMemory
+
+# ── Input buffer (status_cache → ik_worker) ──────────────────────
+IK_INPUT_Q_OFFSET = 0 # float64[6] = 48 bytes
+IK_INPUT_T_OFFSET = 48 # float64[16] = 128 bytes
+IK_INPUT_TOOL_OFFSET = 176 # float64[16] = 128 bytes (4x4 tool transform)
+IK_INPUT_SIZE = 304
+
+# ── Output buffer (ik_worker → status_cache) ─────────────────────
+IK_OUTPUT_JOINT_OFFSET = 0 # uint8[12] = 12 bytes
+IK_OUTPUT_CART_WRF_OFFSET = 12 # uint8[12] = 12 bytes
+IK_OUTPUT_CART_TRF_OFFSET = 24 # uint8[12] = 12 bytes
+IK_OUTPUT_VERSION_OFFSET = 36 # uint64 = 8 bytes
+IK_OUTPUT_SIZE = 44
+
+# ── SharedMemory tracking ────────────────────────────────────────
+# Python 3.13+ supports track=False to skip the resource_tracker daemon.
+# On pre-3.13, we must manually unregister to prevent orphaned tracker processes.
+SHM_EXTRA_KWARGS: dict = {"track": False} if sys.version_info >= (3, 13) else {}
+
+
+def unregister_shm(shm: SharedMemory) -> None:
+ """Unregister a SharedMemory segment from the resource_tracker (pre-3.13).
+
+ On Python 3.13+ with track=False this is a no-op. On older versions, this
+ prevents the resource_tracker daemon from lingering as an orphan process.
+ Skipped on Windows where the resource_tracker uses _posixsubprocess.
+ """
+ if sys.version_info >= (3, 13) or sys.platform == "win32":
+ return
+ from multiprocessing.resource_tracker import unregister
+
+ unregister("/" + shm.name, "shared_memory")
diff --git a/parol6/server/ik_worker.py b/parol6/server/ik_worker.py
new file mode 100644
index 0000000..d8d91a9
--- /dev/null
+++ b/parol6/server/ik_worker.py
@@ -0,0 +1,305 @@
+"""
+IK Worker subprocess.
+
+This module runs the computationally expensive IK enablement calculations
+in a separate process, communicating with the main process via shared memory.
+"""
+
+import logging
+import signal
+from multiprocessing.shared_memory import SharedMemory
+from multiprocessing.synchronize import Event
+
+import numpy as np
+from numba import njit
+
+from pinokin import se3_from_trans, se3_mul, se3_rx, se3_ry, se3_rz
+
+from parol6.server.ik_layout import (
+ IK_INPUT_Q_OFFSET,
+ IK_INPUT_T_OFFSET,
+ IK_INPUT_TOOL_OFFSET,
+ IK_OUTPUT_CART_TRF_OFFSET,
+ IK_OUTPUT_CART_WRF_OFFSET,
+ IK_OUTPUT_JOINT_OFFSET,
+ IK_OUTPUT_VERSION_OFFSET,
+ SHM_EXTRA_KWARGS,
+ unregister_shm,
+)
+
+logger = logging.getLogger(__name__)
+
+
+def ik_enablement_worker_main(
+ input_shm_name: str,
+ output_shm_name: str,
+ shutdown_event: Event,
+ request_event: Event,
+) -> None:
+ """
+ Main entry point for IK enablement worker subprocess.
+
+ This worker waits for request signals, computes joint and cartesian
+ enablement, and writes results to the output shared memory.
+
+ Args:
+ input_shm_name: Name of input shared memory segment
+ output_shm_name: Name of output shared memory segment
+ shutdown_event: Event to signal shutdown
+ request_event: Event signaled when new request is available
+ """
+ # Ignore SIGINT in worker - main process handles shutdown via shutdown_event
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ from parol6.server import set_pdeathsig
+
+ set_pdeathsig()
+
+ # Attach to shared memory
+ input_shm = SharedMemory(name=input_shm_name, create=False, **SHM_EXTRA_KWARGS)
+ output_shm = SharedMemory(name=output_shm_name, create=False, **SHM_EXTRA_KWARGS)
+ unregister_shm(input_shm)
+ unregister_shm(output_shm)
+ assert input_shm.buf is not None
+ assert output_shm.buf is not None
+ input_mv = memoryview(input_shm.buf)
+ output_mv = memoryview(output_shm.buf)
+
+ # Zero-alloc input views: read directly from shared memory
+ q_rad = np.frombuffer(
+ input_shm.buf, dtype=np.float64, count=6, offset=IK_INPUT_Q_OFFSET
+ )
+ T_flat = np.frombuffer(
+ input_shm.buf, dtype=np.float64, count=16, offset=IK_INPUT_T_OFFSET
+ )
+ T_matrix = T_flat.reshape((4, 4)) # View, no copy
+
+ # Tool transform view for detecting tool changes
+ tool_T_flat = np.frombuffer(
+ input_shm.buf, dtype=np.float64, count=16, offset=IK_INPUT_TOOL_OFFSET
+ )
+ tool_T = tool_T_flat.reshape(4, 4)
+ last_tool_T = np.eye(4)
+ _eye4 = np.eye(4)
+
+ # Zero-alloc output views: write directly to shared memory
+ joint_en = np.frombuffer(
+ output_shm.buf, dtype=np.uint8, count=12, offset=IK_OUTPUT_JOINT_OFFSET
+ )
+ cart_en_wrf = np.frombuffer(
+ output_shm.buf, dtype=np.uint8, count=12, offset=IK_OUTPUT_CART_WRF_OFFSET
+ )
+ cart_en_trf = np.frombuffer(
+ output_shm.buf, dtype=np.uint8, count=12, offset=IK_OUTPUT_CART_TRF_OFFSET
+ )
+ version_view = np.frombuffer(
+ output_shm.buf, dtype=np.uint64, count=1, offset=IK_OUTPUT_VERSION_OFFSET
+ )
+
+ # Initialize outputs
+ joint_en[:] = 1
+ cart_en_wrf[:] = 1
+ cart_en_trf[:] = 1
+
+ # Initialize robot model in this process
+ import parol6.PAROL6_ROBOT as PAROL6_ROBOT
+ from parol6.utils.ik import solve_ik
+
+ robot = PAROL6_ROBOT.robot
+ qlim = robot.qlim
+
+ response_version = 0
+
+ # Pre-allocate work array for cartesian targets
+ cart_targets = np.zeros((12, 4, 4), dtype=np.float64)
+
+ logger.debug("IK worker subprocess started")
+
+ try:
+ while not shutdown_event.is_set():
+ # Wait for request signal or timeout for shutdown check
+ if not request_event.wait(timeout=0.1):
+ continue # Timeout - loop back to check shutdown
+
+ request_event.clear()
+
+ # Apply tool transform if it changed since last request
+ if not np.array_equal(tool_T, last_tool_T):
+ last_tool_T[:] = tool_T
+ if np.allclose(tool_T, _eye4):
+ robot.clear_tool_transform()
+ else:
+ robot.set_tool_transform(tool_T.copy())
+ logger.info("IK worker: tool transform updated")
+
+ # Input data is already available via views (q_rad, T_matrix)
+
+ # Compute joint enablement
+ if qlim is not None:
+ _compute_joint_enable(q_rad, qlim, joint_en)
+ # else: joint_en stays all ones (pre-allocated default)
+
+ # Compute cartesian enablement for both frames
+ _compute_cart_enable(
+ T_matrix,
+ True,
+ q_rad,
+ robot,
+ solve_ik,
+ _AXIS_DIRS,
+ cart_targets,
+ cart_en_wrf,
+ )
+ _compute_cart_enable(
+ T_matrix,
+ False,
+ q_rad,
+ robot,
+ solve_ik,
+ _AXIS_DIRS,
+ cart_targets,
+ cart_en_trf,
+ )
+
+ # Output data written directly via views, just update version
+ response_version += 1
+ version_view[0] = response_version
+
+ except (EOFError, OSError, BrokenPipeError, KeyboardInterrupt):
+ # Expected when the parent shuts down: shared memory or the request
+ # event get torn down before our shutdown_event check fires.
+ pass
+ except Exception as e:
+ logger.exception("IK worker subprocess error: %s", e)
+ finally:
+ # Release numpy views before closing shared memory
+ del q_rad, T_flat, T_matrix, tool_T_flat, tool_T
+ del joint_en, cart_en_wrf, cart_en_trf, version_view
+
+ # Release memoryviews
+ try:
+ input_mv.release()
+ except BufferError:
+ pass
+ try:
+ output_mv.release()
+ except BufferError:
+ pass
+
+ input_shm.close()
+ output_shm.close()
+ logger.debug("IK worker subprocess exiting")
+
+
+@njit(cache=True)
+def _compute_joint_enable(
+ q_rad: np.ndarray,
+ qlim: np.ndarray,
+ out: np.ndarray,
+ delta_rad: float = np.radians(0.2),
+) -> None:
+ """
+ Compute per-joint +/- enable bits based on joint limits and a small delta.
+
+ Writes to out array (12 elements): [J1+, J1-, J2+, J2-, ..., J6+, J6-]
+ """
+ for i in range(6):
+ out[i * 2] = 1 if (q_rad[i] + delta_rad) <= qlim[1, i] else 0
+ out[i * 2 + 1] = 1 if (q_rad[i] - delta_rad) >= qlim[0, i] else 0
+
+
+# Axis directions: [dx, dy, dz, rx, ry, rz] for each of 12 axes
+# Order: X+, X-, Y+, Y-, Z+, Z-, RX+, RX-, RY+, RY-, RZ+, RZ-
+_AXIS_DIRS = np.array(
+ [
+ [1, 0, 0, 0, 0, 0], # X+
+ [-1, 0, 0, 0, 0, 0], # X-
+ [0, 1, 0, 0, 0, 0], # Y+
+ [0, -1, 0, 0, 0, 0], # Y-
+ [0, 0, 1, 0, 0, 0], # Z+
+ [0, 0, -1, 0, 0, 0], # Z-
+ [0, 0, 0, 1, 0, 0], # RX+
+ [0, 0, 0, -1, 0, 0], # RX-
+ [0, 0, 0, 0, 1, 0], # RY+
+ [0, 0, 0, 0, -1, 0], # RY-
+ [0, 0, 0, 0, 0, 1], # RZ+
+ [0, 0, 0, 0, 0, -1], # RZ-
+ ],
+ dtype=np.float64,
+)
+
+
+@njit(cache=True)
+def _compute_target_poses(
+ T: np.ndarray,
+ t_step: float,
+ r_step: float,
+ is_wrf: bool,
+ axis_dirs: np.ndarray,
+ targets: np.ndarray,
+) -> None:
+ """
+ Compute 12 target poses for cartesian enablement checking.
+
+ Args:
+ T: Current pose (4x4 matrix)
+ t_step: Translation step in meters
+ r_step: Rotation step in radians
+ is_wrf: True for world reference frame, False for tool reference frame
+ axis_dirs: (12, 6) array of axis directions [dx, dy, dz, rx, ry, rz]
+ targets: Output array (12, 4, 4) for target poses
+ """
+ dT = np.zeros((4, 4), dtype=np.float64)
+
+ for i in range(12):
+ d = axis_dirs[i]
+ dx, dy, dz = d[0] * t_step, d[1] * t_step, d[2] * t_step
+ rx, ry, rz = d[3] * r_step, d[4] * r_step, d[5] * r_step
+
+ # Build delta transform (only one of trans/rot is non-zero per axis)
+ if dx != 0.0 or dy != 0.0 or dz != 0.0:
+ se3_from_trans(dx, dy, dz, dT)
+ elif rx != 0.0:
+ se3_rx(rx, dT)
+ elif ry != 0.0:
+ se3_ry(ry, dT)
+ elif rz != 0.0:
+ se3_rz(rz, dT)
+
+ # Apply in specified frame
+ if is_wrf:
+ se3_mul(dT, T, targets[i])
+ else:
+ se3_mul(T, dT, targets[i])
+
+
+def _compute_cart_enable(
+ T: np.ndarray,
+ is_wrf: bool,
+ q_rad: np.ndarray,
+ robot,
+ solve_ik,
+ axis_dirs: np.ndarray,
+ targets: np.ndarray,
+ out: np.ndarray,
+ delta_mm: float = 0.5,
+ delta_deg: float = 0.5,
+) -> None:
+ """
+ Compute per-axis +/- enable bits for the given frame via small-step IK.
+
+ Writes to out array (12 elements) in axis order:
+ X+, X-, Y+, Y-, Z+, Z-, RX+, RX-, RY+, RY-, RZ+, RZ-
+ """
+ t_step = delta_mm / 1000.0
+ r_step = np.radians(delta_deg)
+
+ # Compute all 12 target poses in one numba call
+ _compute_target_poses(T, t_step, r_step, is_wrf, axis_dirs, targets)
+
+ # Check IK for each target
+ for i in range(12):
+ try:
+ ik = solve_ik(robot, targets[i], q_rad, quiet_logging=True)
+ out[i] = 1 if ik.success else 0
+ except Exception:
+ out[i] = 0
diff --git a/parol6/server/loop_timer.py b/parol6/server/loop_timer.py
new file mode 100644
index 0000000..adb3581
--- /dev/null
+++ b/parol6/server/loop_timer.py
@@ -0,0 +1,813 @@
+"""Loop timing with hybrid sleep + busy-loop for precise deadline scheduling."""
+
+import gc
+import time
+from typing import TYPE_CHECKING
+
+import numpy as np
+from numba import njit
+
+from parol6 import config as cfg
+
+if TYPE_CHECKING:
+ from typing import Self
+
+
+# =============================================================================
+# Constants for power-of-2 buffer operations
+# =============================================================================
+# ~5 seconds of data, rounded up to next power of 2 for fast modulo via bitmask
+_TARGET_BUFFER_SECONDS = 5.0
+_raw_size = int(cfg.CONTROL_RATE_HZ * _TARGET_BUFFER_SECONDS)
+BUFFER_SIZE = 1 << (_raw_size - 1).bit_length() # Next power of 2
+BUFFER_MASK = BUFFER_SIZE - 1
+
+
+# =============================================================================
+# Numba-accelerated statistics functions (cached to disk)
+# =============================================================================
+
+
+@njit(cache=True)
+def _quickselect_partition(arr: np.ndarray, left: int, right: int) -> int:
+ """Partition array around last element as pivot. Returns pivot index."""
+ pivot = arr[right]
+ i = left - 1
+ for j in range(left, right):
+ if arr[j] <= pivot:
+ i += 1
+ arr[i], arr[j] = arr[j], arr[i]
+ arr[i + 1], arr[right] = arr[right], arr[i + 1]
+ return i + 1
+
+
+@njit(cache=True)
+def _quickselect(arr: np.ndarray, k: int) -> float:
+ """Find k-th smallest element in-place. Modifies arr."""
+ left = 0
+ right = len(arr) - 1
+ while left < right:
+ pivot_idx = _quickselect_partition(arr, left, right)
+ if pivot_idx == k:
+ return arr[k]
+ elif pivot_idx < k:
+ left = pivot_idx + 1
+ else:
+ right = pivot_idx - 1
+ return arr[k]
+
+
+@njit(cache=True)
+def _compute_phase_stats(
+ samples: np.ndarray, scratch: np.ndarray, n: int
+) -> tuple[float, float, float]:
+ """Compute phase stats: mean, max, p99. Uses pre-allocated scratch buffer."""
+ if n == 0:
+ return 0.0, 0.0, 0.0
+
+ # Compute mean and max in single pass
+ total = 0.0
+ max_val = samples[0]
+ for i in range(n):
+ total += samples[i]
+ if samples[i] > max_val:
+ max_val = samples[i]
+ mean = total / n
+
+ # p99 via quickselect (copy to scratch first to preserve original)
+ if n >= 20:
+ for i in range(n):
+ scratch[i] = samples[i]
+ k = int(n * 0.99)
+ p99 = _quickselect(scratch[:n], k)
+ else:
+ p99 = max_val
+
+ return mean, max_val, p99
+
+
+@njit(cache=True)
+def _compute_loop_stats(
+ samples: np.ndarray, scratch: np.ndarray, n: int
+) -> tuple[float, float, float, float, float, float]:
+ """Compute loop stats using single-pass Welford's algorithm for mean+std.
+
+ Uses pre-allocated scratch buffer for percentile computation.
+ Only one copy to scratch (p99 first, then p95 on same data).
+ """
+ if n == 0:
+ return 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
+
+ # Single-pass Welford's algorithm for mean and variance + min/max
+ mean = 0.0
+ m2 = 0.0 # Sum of squared differences
+ min_val = samples[0]
+ max_val = samples[0]
+
+ for i in range(n):
+ x = samples[i]
+ # Welford's online algorithm
+ delta = x - mean
+ mean += delta / (i + 1)
+ delta2 = x - mean
+ m2 += delta * delta2
+ # Track min/max
+ if x < min_val:
+ min_val = x
+ if x > max_val:
+ max_val = x
+
+ std = np.sqrt(m2 / n) if n > 0 else 0.0
+
+ # p95 and p99 via quickselect with single copy
+ if n >= 20:
+ # Copy to scratch once
+ for i in range(n):
+ scratch[i] = samples[i]
+
+ # Compute p99 first (higher index)
+ k99 = int(n * 0.99)
+ p99 = _quickselect(scratch[:n], k99)
+
+ # Compute p95 on same array (works because k95 < k99)
+ k95 = int(n * 0.95)
+ p95 = _quickselect(scratch[:n], k95)
+ else:
+ p95 = max_val
+ p99 = max_val
+
+ return mean, std, min_val, max_val, p95, p99
+
+
+# =============================================================================
+# EventRateMetrics - track rate of sporadic events
+# =============================================================================
+
+
+@njit(cache=True)
+def _compute_event_rate(
+ buffer: np.ndarray, count: int, now: float, max_age_s: float
+) -> float:
+ """Compute event rate from timestamps in buffer.
+
+ Args:
+ buffer: Circular buffer of event timestamps
+ count: Number of valid entries in buffer
+ now: Current timestamp
+ max_age_s: Only count events within this window
+
+ Returns:
+ Events per second, or 0.0 if insufficient data.
+ """
+ if count < 2:
+ return 0.0
+
+ cutoff = now - max_age_s
+ events_in_window = 0
+ oldest = now
+ newest = 0.0
+
+ for i in range(count):
+ ts = buffer[i]
+ if ts >= cutoff:
+ events_in_window += 1
+ if ts < oldest:
+ oldest = ts
+ if ts > newest:
+ newest = ts
+
+ if events_in_window < 2:
+ return 0.0
+
+ time_span = newest - oldest
+ if time_span < 0.001:
+ return 0.0
+
+ return (events_in_window - 1) / time_span
+
+
+class EventRateMetrics:
+ """Track rate of sporadic events (e.g., commands) with proper decay.
+
+ Unlike LoopMetrics which measures fixed-rate loop periods, this tracks
+ events that may arrive in bursts or not at all. Returns 0 when no
+ recent events.
+
+ Uses a circular buffer of timestamps and numba-accelerated rate calculation.
+ """
+
+ __slots__ = (
+ "_buffer",
+ "_buffer_idx",
+ "_buffer_count",
+ "_buffer_mask",
+ "_last_event_time",
+ "event_count",
+ )
+
+ def __init__(self, buffer_size: int = 64) -> None:
+ """Initialize with a timestamp buffer.
+
+ Args:
+ buffer_size: Number of timestamps to retain. Rounded up to power of 2.
+ """
+ # Round up to power of 2 for fast modulo via bitmask
+ size = 1
+ while size < buffer_size:
+ size <<= 1
+ self._buffer = np.zeros(size, dtype=np.float64)
+ self._buffer_mask = size - 1
+ self._buffer_idx = 0
+ self._buffer_count = 0
+ self._last_event_time = 0.0
+ self.event_count = 0
+
+ def record(self, now: float) -> None:
+ """Record an event at the given timestamp."""
+ self._last_event_time = now
+ self.event_count += 1
+ self._buffer[self._buffer_idx] = now
+ self._buffer_idx = (self._buffer_idx + 1) & self._buffer_mask
+ if self._buffer_count < len(self._buffer):
+ self._buffer_count += 1
+
+ def rate_hz(self, now: float, max_age_s: float = 3.0) -> float:
+ """Calculate event rate from recent events.
+
+ Args:
+ now: Current timestamp (perf_counter)
+ max_age_s: Only count events within this window. Also used to
+ determine staleness - returns 0 if no events within max_age_s.
+
+ Returns:
+ Events per second, or 0.0 if no recent events.
+ """
+ # Fast path: check staleness before calling into numba
+ if self._buffer_count < 2 or now - self._last_event_time > max_age_s:
+ return 0.0
+ return _compute_event_rate(self._buffer, self._buffer_count, now, max_age_s)
+
+ def reset(self) -> None:
+ """Reset all state."""
+ self._buffer[:] = 0.0
+ self._buffer_idx = 0
+ self._buffer_count = 0
+ self._last_event_time = 0.0
+ self.event_count = 0
+
+
+class GCTracker:
+ """Track garbage collection frequency and duration, with optional deferred collection.
+
+ Registers a callback with gc.callbacks to record GC events.
+ Provides rate (collections/sec) and duration statistics.
+
+ When ``take_control()`` is called, automatic GC is disabled and the caller
+ is responsible for calling ``collect_deferred()`` each loop iteration.
+ Collections are scheduled during slack time to avoid disrupting the hot path.
+ """
+
+ __slots__ = (
+ "_timestamps",
+ "_durations",
+ "_idx",
+ "_count",
+ "_buffer_mask",
+ "_gc_start",
+ "_last_duration",
+ "_controlled",
+ "total_count",
+ "total_time",
+ )
+
+ def __init__(self, buffer_size: int = 64) -> None:
+ # Round up to power of 2
+ size = 1
+ while size < buffer_size:
+ size <<= 1
+ self._timestamps = np.zeros(size, dtype=np.float64)
+ self._durations = np.zeros(size, dtype=np.float64)
+ self._buffer_mask = size - 1
+ self._idx = 0
+ self._count = 0
+ self._gc_start = 0.0
+ self._last_duration = 0.0
+ self._controlled = False
+ self.total_count = 0
+ self.total_time = 0.0
+ gc.callbacks.append(self._callback)
+
+ def _callback(self, phase: str, info: dict) -> None:
+ if phase == "start":
+ self._gc_start = time.perf_counter()
+ elif phase == "stop":
+ now = time.perf_counter()
+ duration = now - self._gc_start
+ self._last_duration = duration
+ self.total_count += 1
+ self.total_time += duration
+ idx = self._idx & self._buffer_mask
+ self._timestamps[idx] = now
+ self._durations[idx] = duration
+ self._idx += 1
+ if self._count < len(self._timestamps):
+ self._count += 1
+
+ def stats(self, now: float, max_age_s: float = 3.0) -> tuple[float, float]:
+ """Get windowed GC stats: (rate_hz, mean_duration_ms).
+
+ Both values are computed from events within max_age_s window.
+ Returns (0.0, 0.0) if no recent GC events.
+ """
+ if self._count == 0:
+ return 0.0, 0.0
+ last_ts = self._timestamps[(self._idx - 1) & self._buffer_mask]
+ if now - last_ts > max_age_s:
+ return 0.0, 0.0
+
+ # Compute rate and windowed mean duration
+ cutoff = now - max_age_s
+ total_dur = 0.0
+ count_in_window = 0
+ for i in range(self._count):
+ ts = self._timestamps[i]
+ if ts >= cutoff:
+ total_dur += self._durations[i]
+ count_in_window += 1
+
+ if count_in_window < 2:
+ return 0.0, 0.0
+
+ rate = _compute_event_rate(self._timestamps, self._count, now, max_age_s)
+ mean_ms = (total_dur / count_in_window) * 1000.0
+ return rate, mean_ms
+
+ def recent_duration_ms(self) -> float:
+ """Duration of most recent GC in milliseconds."""
+ return self._last_duration * 1000.0
+
+ def take_control(self) -> None:
+ """Disable automatic GC. Caller must call collect_deferred() each iteration."""
+ gc.disable()
+ gc.collect() # clean slate before entering controlled mode
+ self._controlled = True
+
+ def release_control(self) -> None:
+ """Re-enable automatic GC and run a full collection."""
+ if self._controlled:
+ self._controlled = False
+ gc.enable()
+ gc.collect()
+
+ def collect_deferred(self, slack_s: float, tick_count: int) -> None:
+ """Run a deferred GC collection if there is enough slack time.
+
+ Call once per loop iteration after all work phases complete.
+
+ Strategy:
+ - gen-0 every tick if slack > 0.5ms (~50-100µs typical)
+ - gen-1 every 1000 ticks (~10s) if slack > 2ms
+ - gen-2 every 10000 ticks (~100s) if slack > 3ms
+ """
+ if not self._controlled or slack_s < 0.0005:
+ return
+ if tick_count % 10000 == 0 and slack_s > 0.003:
+ gc.collect(2)
+ elif tick_count % 1000 == 0 and slack_s > 0.002:
+ gc.collect(1)
+ else:
+ gc.collect(0)
+
+ def shutdown(self) -> None:
+ """Remove callback and re-enable GC on shutdown."""
+ self.release_control()
+ try:
+ gc.callbacks.remove(self._callback)
+ except ValueError:
+ pass
+
+
+# =============================================================================
+# PhaseMetrics - regular Python class (no jitclass overhead)
+# =============================================================================
+
+
+class PhaseMetrics:
+ """Rolling statistics for a single phase.
+
+ Uses pre-allocated numpy arrays and calls @njit helper functions
+ for the heavy computation.
+ """
+
+ __slots__ = (
+ "_buffer",
+ "_scratch",
+ "_buffer_idx",
+ "_buffer_count",
+ "last_s",
+ "mean_s",
+ "max_s",
+ "p99_s",
+ )
+
+ def __init__(self) -> None:
+ self._buffer = np.zeros(BUFFER_SIZE, dtype=np.float64)
+ self._scratch = np.zeros(BUFFER_SIZE, dtype=np.float64)
+ self._buffer_idx = 0
+ self._buffer_count = 0
+ self.last_s = 0.0
+ self.mean_s = 0.0
+ self.max_s = 0.0
+ self.p99_s = 0.0
+
+ def record(self, duration: float) -> None:
+ """Record a duration sample."""
+ self.last_s = duration
+ self._buffer[self._buffer_idx] = duration
+ self._buffer_idx = (self._buffer_idx + 1) & BUFFER_MASK
+ if self._buffer_count < BUFFER_SIZE:
+ self._buffer_count += 1
+
+ def compute_stats(self) -> None:
+ """Compute statistics from buffer."""
+ if self._buffer_count == 0:
+ return
+ mean, max_val, p99 = _compute_phase_stats(
+ self._buffer, self._scratch, self._buffer_count
+ )
+ self.mean_s = mean
+ self.max_s = max_val
+ self.p99_s = p99
+
+
+class PhaseTimer:
+ """Timer for measuring durations of multiple named phases in a loop.
+
+ Usage:
+ timer = PhaseTimer(["read", "execute", "write"])
+ while True:
+ with timer.phase("read"):
+ do_read()
+ with timer.phase("execute"):
+ do_execute()
+ timer.tick() # Compute stats periodically
+ """
+
+ def __init__(self, phase_names: list[str], stats_interval: int = 50):
+ self._phases: dict[str, PhaseMetrics] = {
+ name: PhaseMetrics() for name in phase_names
+ }
+ self._stats_interval = stats_interval
+ self._tick_count = 0
+ self._current_phase: str | None = None
+ self._phase_start: float = 0.0
+
+ @property
+ def phases(self) -> dict[str, PhaseMetrics]:
+ """Access phase metrics by name."""
+ return self._phases
+
+ def start(self, phase: str) -> None:
+ """Start timing a phase."""
+ self._current_phase = phase
+ self._phase_start = time.perf_counter()
+
+ def stop(self) -> float:
+ """Stop timing current phase and record duration. Returns duration."""
+ if self._current_phase is None:
+ return 0.0
+ duration = time.perf_counter() - self._phase_start
+ self._phases[self._current_phase].record(duration)
+ self._current_phase = None
+ return duration
+
+ def phase(self, name: str) -> "PhaseContext":
+ """Context manager for timing a phase."""
+ return PhaseContext(self, name)
+
+ def tick(self) -> None:
+ """Call once per loop iteration to compute stats periodically."""
+ self._tick_count += 1
+ if self._tick_count % self._stats_interval == 0:
+ for p in self._phases.values():
+ p.compute_stats()
+
+ def get_summary(self) -> dict[str, dict[str, float]]:
+ """Get summary of all phases as dict."""
+ return {
+ name: {
+ "mean_ms": p.mean_s * 1000,
+ "max_ms": p.max_s * 1000,
+ "p99_ms": p.p99_s * 1000,
+ }
+ for name, p in self._phases.items()
+ }
+
+
+class PhaseContext:
+ """Context manager for timing a phase."""
+
+ __slots__ = ("_timer", "_phase")
+
+ def __init__(self, timer: PhaseTimer, phase: str):
+ self._timer = timer
+ self._phase = phase
+
+ def __enter__(self) -> "Self":
+ self._timer.start(self._phase)
+ return self
+
+ def __exit__(self, *args: object) -> None:
+ self._timer.stop()
+
+
+# =============================================================================
+# LoopMetrics
+# =============================================================================
+
+
+class LoopMetrics:
+ """Metrics tracked by the loop timer with rolling statistics.
+
+ Provides unified timing, logging, and degradation checking across subsystems.
+ Use configure() to set target period, then call tick() each iteration.
+
+ Uses pre-allocated numpy arrays and calls @njit helper functions
+ for the heavy computation.
+ """
+
+ __slots__ = (
+ "loop_count",
+ "overrun_count",
+ "mean_period_s",
+ "std_period_s",
+ "min_period_s",
+ "max_period_s",
+ "p95_period_s",
+ "p99_period_s",
+ # Overshoot tracking (how much we miss the deadline by)
+ "mean_overshoot_s",
+ "max_overshoot_s",
+ "p99_overshoot_s",
+ "_buffer",
+ "_scratch",
+ "_buffer_idx",
+ "_buffer_count",
+ "_overshoot_buffer",
+ "_overshoot_idx",
+ "_overshoot_count",
+ "_target_period_s",
+ "_prev_time",
+ "_last_log_time",
+ "_last_warn_time",
+ "_stats_interval",
+ "_start_time",
+ "_grace_period_s",
+ )
+
+ def __init__(self) -> None:
+ self.loop_count = 0
+ self.overrun_count = 0
+ self.mean_period_s = 0.0
+ self.std_period_s = 0.0
+ self.min_period_s = 0.0
+ self.max_period_s = 0.0
+ self.p95_period_s = 0.0
+ self.p99_period_s = 0.0
+ # Overshoot stats
+ self.mean_overshoot_s = 0.0
+ self.max_overshoot_s = 0.0
+ self.p99_overshoot_s = 0.0
+ self._buffer = np.zeros(BUFFER_SIZE, dtype=np.float64)
+ self._scratch = np.zeros(BUFFER_SIZE, dtype=np.float64)
+ self._buffer_idx = 0
+ self._buffer_count = 0
+ self._overshoot_buffer = np.zeros(BUFFER_SIZE, dtype=np.float64)
+ self._overshoot_idx = 0
+ self._overshoot_count = 0
+ self._target_period_s = 0.0
+ self._prev_time = 0.0
+ self._last_log_time = 0.0
+ self._last_warn_time = 0.0
+ self._stats_interval = 50
+ self._start_time = 0.0
+ self._grace_period_s = 10.0
+
+ def configure(
+ self, target_period_s: float, stats_interval: int, grace_period_s: float = 15.0
+ ) -> None:
+ """Configure target period, stats interval, and startup grace period.
+
+ Args:
+ target_period_s: Target loop period in seconds
+ stats_interval: How often to compute rolling statistics (in loop iterations)
+ grace_period_s: Duration after mark_started() during which overbudget
+ warnings are suppressed. Defaults to 15s to allow for JIT warmup
+ and process pool initialization.
+ """
+ self._target_period_s = target_period_s
+ self._stats_interval = stats_interval
+ self._grace_period_s = grace_period_s
+
+ def mark_started(self, now: float) -> None:
+ """Mark the start time for grace period calculation."""
+ self._start_time = now
+
+ def tick(self, now: float) -> None:
+ """Call once per iteration. Auto-records period and computes stats periodically."""
+ if self._prev_time > 0:
+ self.record_period(now - self._prev_time)
+ self._prev_time = now
+ self.loop_count += 1
+ if self.loop_count % self._stats_interval == 0:
+ self.compute_stats()
+
+ def should_log(self, now: float, interval: float) -> bool:
+ """Returns True and updates timestamp if interval has passed."""
+ if now - self._last_log_time >= interval:
+ self._last_log_time = now
+ return True
+ return False
+
+ def check_degraded(
+ self, now: float, threshold: float, rate_limit: float
+ ) -> tuple[bool, float]:
+ """Check if p99 exceeds target by threshold. Returns (should_warn, degradation_pct).
+
+ Rate-limited to once per rate_limit seconds. Suppressed during startup grace period.
+ """
+ if self._target_period_s <= 0 or self.p99_period_s <= 0:
+ return False, 0.0
+ # Suppress warnings during startup grace period
+ if self._start_time > 0 and (now - self._start_time) < self._grace_period_s:
+ return False, 0.0
+ if now - self._last_warn_time < rate_limit:
+ return False, 0.0
+ if self.p99_period_s > self._target_period_s * (1.0 + threshold):
+ degradation = (self.p99_period_s / self._target_period_s - 1.0) * 100.0
+ self._last_warn_time = now
+ return True, degradation
+ return False, 0.0
+
+ def record_period(self, period: float) -> None:
+ """Record a period sample into the circular buffer."""
+ self._buffer[self._buffer_idx] = period
+ self._buffer_idx = (self._buffer_idx + 1) & BUFFER_MASK
+ if self._buffer_count < BUFFER_SIZE:
+ self._buffer_count += 1
+
+ def record_overshoot(self, overshoot: float) -> None:
+ """Record busy-loop overshoot (time past deadline) into buffer."""
+ self._overshoot_buffer[self._overshoot_idx] = overshoot
+ self._overshoot_idx = (self._overshoot_idx + 1) & BUFFER_MASK
+ if self._overshoot_count < BUFFER_SIZE:
+ self._overshoot_count += 1
+
+ def compute_stats(self) -> None:
+ """Compute statistics from buffers."""
+ if self._buffer_count > 0:
+ mean, std, min_val, max_val, p95, p99 = _compute_loop_stats(
+ self._buffer, self._scratch, self._buffer_count
+ )
+ self.mean_period_s = mean
+ self.std_period_s = std
+ self.min_period_s = min_val
+ self.max_period_s = max_val
+ self.p95_period_s = p95
+ self.p99_period_s = p99
+
+ # Compute overshoot stats using the simpler phase stats function
+ if self._overshoot_count > 0:
+ mean, max_val, p99 = _compute_phase_stats(
+ self._overshoot_buffer, self._scratch, self._overshoot_count
+ )
+ self.mean_overshoot_s = mean
+ self.max_overshoot_s = max_val
+ self.p99_overshoot_s = p99
+
+ def reset_stats(self, include_counters: bool = False) -> None:
+ """Reset rolling statistics.
+
+ Args:
+ include_counters: If True, also reset overrun_count (loop_count is preserved).
+ """
+ self._buffer.fill(0.0)
+ self._buffer_idx = 0
+ self._buffer_count = 0
+ self.mean_period_s = 0.0
+ self.std_period_s = 0.0
+ self.min_period_s = 0.0
+ self.max_period_s = 0.0
+ self.p95_period_s = 0.0
+ self.p99_period_s = 0.0
+ # Reset overshoot stats
+ self._overshoot_buffer.fill(0.0)
+ self._overshoot_idx = 0
+ self._overshoot_count = 0
+ self.mean_overshoot_s = 0.0
+ self.max_overshoot_s = 0.0
+ self.p99_overshoot_s = 0.0
+ if include_counters:
+ self.overrun_count = 0
+
+
+def format_hz_summary(m: LoopMetrics) -> str:
+ """Format metrics as 'XXX.XHz σ=X.XXms p99=X.XXms'."""
+ if m.mean_period_s <= 0:
+ return "0.0Hz σ=0.00ms p99=0.00ms"
+ hz = 1.0 / m.mean_period_s
+ return (
+ f"{hz:.1f}Hz σ={m.std_period_s * 1000:.2f}ms p99={m.p99_period_s * 1000:.2f}ms"
+ )
+
+
+class LoopTimer:
+ """Deadline-based loop timing with hybrid sleep + busy-loop.
+
+ Uses time.sleep() for most of the wait time to reduce CPU usage,
+ then switches to a busy-loop for the final portion to achieve
+ precise timing without OS scheduling jitter.
+ """
+
+ def __init__(
+ self,
+ interval_s: float,
+ busy_threshold_s: float | None = None,
+ stats_interval: int = 50,
+ ):
+ """Initialize the loop timer.
+
+ Args:
+ interval_s: Target loop interval in seconds.
+ busy_threshold_s: Time before deadline to switch from sleep to busy-loop.
+ Default from PAROL6_BUSY_THRESHOLD_MS env var (1ms).
+ stats_interval: Compute stats every N loops (default 50 = 5Hz at 250Hz loop).
+ """
+ self._interval = interval_s
+ self._busy_threshold = (
+ busy_threshold_s
+ if busy_threshold_s is not None
+ else cfg.BUSY_THRESHOLD_MS / 1000.0
+ )
+ self._stats_interval = stats_interval
+ self._next_deadline = 0.0
+ self._prev_t = 0.0
+ self.metrics = LoopMetrics()
+ self.metrics.configure(interval_s, stats_interval)
+
+ @property
+ def interval(self) -> float:
+ """Target loop interval in seconds."""
+ return self._interval
+
+ def start(self) -> None:
+ """Initialize timing at loop start. Call once before entering the loop."""
+ now = time.perf_counter()
+ self._next_deadline = now
+ self._prev_t = now
+
+ def time_to_next_deadline(self) -> float:
+ """Remaining seconds until the next tick deadline.
+
+ Call after work phases complete, before ``wait_for_next_tick()``.
+ Positive means ahead of schedule (slack time available).
+ """
+ return (self._next_deadline + self._interval) - time.perf_counter()
+
+ def wait_for_next_tick(self) -> None:
+ """Wait until next deadline using hybrid sleep + busy-loop.
+
+ Updates metrics (loop_count, period, stats) and handles overruns.
+ Call at the end of each loop iteration.
+ """
+ self.metrics.loop_count += 1
+
+ # Compute stats periodically (not every loop)
+ if self.metrics.loop_count % self._stats_interval == 0:
+ self.metrics.compute_stats()
+
+ # Advance deadline
+ self._next_deadline += self._interval
+ sleep_time = self._next_deadline - time.perf_counter()
+
+ if sleep_time > self._busy_threshold:
+ # Sleep for most of the time, leaving headroom for busy-loop
+ time.sleep(sleep_time - self._busy_threshold)
+
+ if sleep_time > 0:
+ # Busy-loop for remaining time (precise timing)
+ while time.perf_counter() < self._next_deadline:
+ pass
+ # Track how much we overshot the deadline
+ now = time.perf_counter()
+ self.metrics.record_overshoot(now - self._next_deadline)
+ else:
+ # Overrun - reset deadline to avoid perpetual catch-up
+ self.metrics.overrun_count += 1
+ now = time.perf_counter()
+ self._next_deadline = now
+
+ # Measure period from deadline-to-deadline (not affected by work jitter)
+ if self._prev_t > 0:
+ self.metrics.record_period(now - self._prev_t)
+ self._prev_t = now
diff --git a/parol6/server/motion_planner.py b/parol6/server/motion_planner.py
new file mode 100644
index 0000000..82fe06f
--- /dev/null
+++ b/parol6/server/motion_planner.py
@@ -0,0 +1,704 @@
+"""Motion pipeline: async trajectory planning in a worker process.
+
+The MotionPlanner offloads trajectory computation (TOPPRA, IK chains) from
+the 100Hz control loop to a separate process. Commands flow in via
+``command_queue`` and computed segments flow back via ``segment_queue``.
+
+Non-trajectory motion commands (Home, SelectTool, Gripper, Checkpoint, Delay)
+are forwarded as ``InlineSegment`` tokens so that the SegmentPlayer can
+execute them in the control loop while preserving command ordering.
+
+TrajectoryPlanner holds the shared planning logic used by both the real-time
+PlannerWorker subprocess and the DryRunRobotClient (diagnostic mode).
+"""
+
+from __future__ import annotations
+
+import logging
+import multiprocessing
+import queue
+import signal
+from dataclasses import dataclass, field
+from typing import TYPE_CHECKING, Union, cast
+
+import numpy as np
+
+from parol6.protocol.wire import HomeCmd, SelectToolCmd, SetTcpOffsetCmd, ToolActionCmd
+from parol6.server.command_executor import _format_cmd_params
+from parol6.utils.error_catalog import RobotError, extract_robot_error
+from parol6.utils.error_codes import ErrorCode
+
+from parol6.server.state import ControllerState
+
+if TYPE_CHECKING:
+ from multiprocessing.synchronize import Event as EventType
+ from parol6.commands.base import TrajectoryMoveCommandBase
+
+logger = logging.getLogger(__name__)
+
+# ---------------------------------------------------------------------------
+# Segment types (planner → player via segment_queue)
+# ---------------------------------------------------------------------------
+
+
+@dataclass
+class TrajectorySegment:
+ """Pre-computed trajectory waypoints ready for 100Hz playback."""
+
+ command_index: int
+ trajectory_steps: np.ndarray # (M, 6) int32
+ duration: float
+ command_name: str = ""
+ action_params: str = ""
+ blend_consumed_indices: list[int] = field(default_factory=list)
+
+
+@dataclass
+class InlineSegment:
+ """Forwarded non-trajectory command for execution in the control loop."""
+
+ command_index: int
+ params: object # wire struct (msgspec.Struct — picklable)
+
+
+@dataclass
+class ErrorSegment:
+ """Planning failure — surfaces error through the pipeline."""
+
+ command_index: int
+ error: RobotError
+ cartesian_path: np.ndarray | None = None # (N, 6) full TCP path
+ ik_valid: np.ndarray | None = None # (N,) per-pose bool
+
+
+Segment = Union[TrajectorySegment, InlineSegment, ErrorSegment]
+
+# ---------------------------------------------------------------------------
+# Message types (main → planner via command_queue)
+# ---------------------------------------------------------------------------
+
+
+@dataclass
+class PlanCommand:
+ """Submit a motion command for planning or forwarding."""
+
+ command_index: int
+ params: object # wire struct (MoveJCmd, SelectToolCmd, HomeCmd, …)
+ position_in: np.ndarray | None = (
+ None # current Position_in (None = use planner internal)
+ )
+
+
+@dataclass
+class SyncPosition:
+ """Update the planner's internal position tracking."""
+
+ position_in: np.ndarray
+
+
+@dataclass
+class SyncProfile:
+ """Update the planner's motion profile setting."""
+
+ profile: str
+
+
+@dataclass
+class SyncTool:
+ """Update the planner's tool state (e.g. after E-stop cancel)."""
+
+ tool_name: str
+ variant_key: str = ""
+ tcp_offset_m: tuple[float, float, float] = (0.0, 0.0, 0.0)
+
+
+@dataclass
+class CancelAll:
+ """Clear the planner's internal state and discard pending work."""
+
+
+PlannerMessage = Union[PlanCommand, SyncPosition, SyncProfile, SyncTool, CancelAll]
+
+
+# ---------------------------------------------------------------------------
+# Lightweight state for planner subprocess
+# ---------------------------------------------------------------------------
+
+
+@dataclass
+class PlannerState:
+ """Minimal state for trajectory computation.
+
+ Carries the fields that trajectory ``do_setup()`` reads: joint position,
+ motion profile, and FK cache. Tool state is tracked as a string for
+ change-detection; the actual tool transform lives on ``PAROL6_ROBOT.robot``.
+ """
+
+ Position_in: np.ndarray = field(default_factory=lambda: np.zeros(6, dtype=np.int32))
+ motion_profile: str = "TOPPRA"
+ current_tool: str = "NONE"
+ current_tool_variant: str = ""
+ tcp_offset_m: tuple[float, float, float] = (0.0, 0.0, 0.0)
+ stop_on_failure: bool = True
+
+ # Forward kinematics cache (same layout as ControllerState — needed by
+ # get_fkine_se3/ensure_fkine_updated called from cartesian do_setup)
+ _fkine_last_pos_in: np.ndarray = field(
+ default_factory=lambda: np.zeros(6, dtype=np.int32)
+ )
+ _fkine_last_tool_name: str = ""
+ _fkine_last_tool_variant: str = ""
+ _fkine_last_tcp_offset: tuple[float, float, float] = (0.0, 0.0, 0.0)
+ _fkine_mat: np.ndarray = field(
+ default_factory=lambda: np.asfortranarray(np.eye(4, dtype=np.float64))
+ )
+ _fkine_flat_mm: np.ndarray = field(
+ default_factory=lambda: np.zeros(16, dtype=np.float64)
+ )
+ _fkine_q_rad: np.ndarray = field(
+ default_factory=lambda: np.zeros(6, dtype=np.float64)
+ )
+
+
+# ---------------------------------------------------------------------------
+# TrajectoryPlanner — shared planning logic
+# ---------------------------------------------------------------------------
+
+
+class TrajectoryPlanner:
+ """Core trajectory planning logic shared by PlannerWorker and DryRunRobotClient.
+
+ Dispatches commands to trajectory or inline handlers, manages blend buffering,
+ and emits ErrorSegment on failure instead of raising.
+
+ Args:
+ diagnostic: If True, sets stop_on_failure=False on PlannerState so that
+ batch_ik solves all poses (needed for per-pose red/green visualization).
+ """
+
+ def __init__(self, diagnostic: bool = False) -> None:
+ import parol6.PAROL6_ROBOT as PAROL6_ROBOT # noqa: N811
+ from parol6.commands.base import TrajectoryMoveCommandBase
+ from parol6.config import MAX_BLEND_LOOKAHEAD, deg_to_steps
+ from parol6.server.command_registry import CommandRegistry
+
+ self.state = PlannerState()
+ if diagnostic:
+ self.state.stop_on_failure = False
+ self._diagnostic = diagnostic
+ self._registry = CommandRegistry()
+ self._trajectory_base: type[TrajectoryMoveCommandBase] = (
+ TrajectoryMoveCommandBase
+ )
+ self._max_blend_lookahead = MAX_BLEND_LOOKAHEAD
+ self._robot_module = PAROL6_ROBOT
+ self._blend_buffer: list[tuple[int, TrajectoryMoveCommandBase]] = []
+ self._output: list[Segment] = []
+
+ # Pre-compute home position in steps
+ self._home_steps = np.zeros(6, dtype=np.int32)
+ _home_deg = np.array(PAROL6_ROBOT.joint.standby_deg, dtype=np.float64)
+ deg_to_steps(_home_deg, self._home_steps)
+
+ def process(self, params: object, command_index: int = 0) -> list[Segment]:
+ """Plan a single command. Returns list of resulting segments."""
+ self._output.clear()
+
+ cmd_class = self._registry.get_command_for_struct(type(params))
+ if cmd_class is not None and issubclass(cmd_class, self._trajectory_base):
+ self._handle_trajectory(command_index, params, cmd_class) # type: ignore[invalid-argument-type, ty:invalid-argument-type]
+ else:
+ # Tool actions run concurrently with motion — don't flush blend
+ if not isinstance(params, ToolActionCmd) and self._blend_buffer:
+ self._flush_blend()
+ self._handle_inline(command_index, params)
+
+ return list(self._output)
+
+ def flush(self) -> list[Segment]:
+ """Flush any pending blend buffer. Returns resulting segments."""
+ self._output.clear()
+ if self._blend_buffer:
+ self._flush_blend()
+ return list(self._output)
+
+ def cancel(self) -> None:
+ """Clear blend buffer."""
+ self._blend_buffer.clear()
+
+ def sync_tool(
+ self,
+ tool_name: str,
+ variant_key: str = "",
+ tcp_offset_m: tuple[float, float, float] = (0.0, 0.0, 0.0),
+ ) -> None:
+ """Sync tool state (e.g. after E-stop cancel)."""
+ self.state.current_tool = tool_name
+ self.state.current_tool_variant = variant_key
+ self.state.tcp_offset_m = tcp_offset_m
+ self._robot_module.apply_tool(
+ tool_name, variant_key=variant_key, tcp_offset_m=tcp_offset_m
+ )
+
+ # -- trajectory handling --
+
+ def _handle_trajectory(
+ self,
+ command_index: int,
+ params: object,
+ cmd_class: type[TrajectoryMoveCommandBase],
+ ) -> None:
+ """Buffer for blending or compute trajectory immediately."""
+ cmd = cmd_class(params)
+
+ if cmd.blend_radius > 0:
+ self._blend_buffer.append((command_index, cmd))
+ if len(self._blend_buffer) > self._max_blend_lookahead:
+ self._flush_blend()
+ return
+
+ if self._blend_buffer:
+ self._blend_buffer.append((command_index, cmd))
+ self._flush_blend()
+ return
+
+ # Single non-blended command
+ state = cast(ControllerState, self.state)
+ try:
+ cmd.do_setup(state)
+ except Exception as e:
+ self._emit_error(command_index, cmd, e)
+ return
+ self._emit_trajectory(command_index, cmd, params)
+
+ def _flush_blend(self) -> None:
+ """Flush the blend buffer — either blend or single-command setup."""
+ buf = self._blend_buffer
+ if not buf:
+ return
+
+ state = cast(ControllerState, self.state)
+ head_idx, head_cmd = buf[0]
+
+ if len(buf) == 1:
+ try:
+ head_cmd.do_setup(state)
+ except Exception as e:
+ buf.clear()
+ self._emit_error(head_idx, head_cmd, e)
+ return
+ self._emit_trajectory(head_idx, head_cmd, head_cmd.p)
+ else:
+ rest_cmds = [cmd for _, cmd in buf[1:]]
+ try:
+ consumed = head_cmd.do_setup_with_blend(state, rest_cmds)
+ except Exception as e:
+ if not self._diagnostic:
+ buf.clear()
+ self._emit_error(head_idx, head_cmd, e)
+ return
+ # Diagnostic mode: emit error for head, process rest individually
+ self._emit_error(head_idx, head_cmd, e)
+ remaining = list(buf[1:])
+ buf.clear()
+ for uc_idx, uc_cmd in remaining:
+ try:
+ uc_cmd.do_setup(state)
+ except Exception as e2:
+ self._emit_error(uc_idx, uc_cmd, e2)
+ continue
+ self._emit_trajectory(uc_idx, uc_cmd, uc_cmd.p)
+ return
+
+ if consumed < len(rest_cmds):
+ logger.warning(
+ "Blend zone degraded: requested %d segments, achieved %d",
+ len(rest_cmds),
+ consumed,
+ )
+
+ consumed_indices = [idx for idx, _ in buf[1 : 1 + consumed]]
+
+ self._output.append(
+ TrajectorySegment(
+ command_index=head_idx,
+ trajectory_steps=head_cmd.trajectory_steps.copy(),
+ duration=head_cmd._duration,
+ command_name=type(head_cmd).__name__,
+ action_params=_format_cmd_params(head_cmd.p),
+ blend_consumed_indices=consumed_indices,
+ )
+ )
+ self.state.Position_in[:] = head_cmd.trajectory_steps[-1]
+
+ # Unconsumed tail commands: compute individually
+ for i in range(1 + consumed, len(buf)):
+ uc_idx, uc_cmd = buf[i]
+ try:
+ uc_cmd.do_setup(state)
+ except Exception as e:
+ if not self._diagnostic:
+ buf.clear()
+ self._emit_error(uc_idx, uc_cmd, e)
+ return
+ self._emit_error(uc_idx, uc_cmd, e)
+ continue
+ self._emit_trajectory(uc_idx, uc_cmd, uc_cmd.p)
+
+ buf.clear()
+
+ def _emit_trajectory(
+ self,
+ command_index: int,
+ cmd: TrajectoryMoveCommandBase,
+ params: object | None = None,
+ ) -> None:
+ """Append a TrajectorySegment to output and advance position."""
+ self._output.append(
+ TrajectorySegment(
+ command_index=command_index,
+ trajectory_steps=cmd.trajectory_steps.copy(),
+ duration=cmd._duration,
+ command_name=type(cmd).__name__,
+ action_params=_format_cmd_params(params) if params is not None else "",
+ )
+ )
+ self.state.Position_in[:] = cmd.trajectory_steps[-1]
+
+ def _emit_error(
+ self, command_index: int, cmd: TrajectoryMoveCommandBase, exc: Exception
+ ) -> None:
+ """Append an ErrorSegment to output, with diagnostic data if available."""
+ cartesian_path = None
+ ik_valid = None
+ if self._diagnostic:
+ diag = getattr(cmd, "cartesian_diagnostic", None)
+ if diag is not None:
+ cartesian_path = diag.get("tcp_poses")
+ ik_valid = diag.get("ik_valid")
+
+ robot_error = extract_robot_error(
+ exc, ErrorCode.MOTN_SETUP_FAILED, command_index, detail=str(exc)
+ )
+ self._output.append(
+ ErrorSegment(
+ command_index=command_index,
+ error=robot_error,
+ cartesian_path=cartesian_path,
+ ik_valid=ik_valid,
+ )
+ )
+
+ if self._diagnostic:
+ self._try_advance_past_error(cmd)
+
+ def _try_advance_past_error(self, cmd: TrajectoryMoveCommandBase) -> None:
+ """Best-effort advance Position_in to intended target after a failed command.
+
+ Only used in diagnostic mode so subsequent commands start from the
+ intended position even when the current command failed.
+ """
+ from parol6.commands.joint_commands import JointMoveCommandBase
+ from parol6.config import rad_to_steps, steps_to_rad
+
+ state = cast(ControllerState, self.state)
+ q_rad = np.zeros(6, dtype=np.float64)
+ steps_to_rad(state.Position_in, q_rad)
+
+ try:
+ if isinstance(cmd, JointMoveCommandBase):
+ target_rad = cmd._get_target_rad(state, q_rad)
+ else:
+ # Cartesian commands: try IK on just the endpoint.
+ # Use best-effort solution even if IK reports failure —
+ # for preview, an approximate position is better than
+ # staying at the previous position.
+ target_pose = getattr(cmd, "target_pose", None)
+ if target_pose is None:
+ return
+ from parol6.utils.ik import solve_ik
+
+ ik_result = solve_ik(
+ self._robot_module.robot,
+ target_pose,
+ q_rad,
+ quiet_logging=True,
+ )
+ target_rad = ik_result.q
+ except Exception:
+ return
+
+ target_steps = np.zeros(6, dtype=np.int32)
+ rad_to_steps(target_rad, target_steps)
+ self.state.Position_in[:] = target_steps
+
+ # -- inline command handling --
+
+ def _handle_inline(self, command_index: int, params: object) -> None:
+ """Emit an InlineSegment and predict state changes."""
+ self._output.append(
+ InlineSegment(
+ command_index=command_index,
+ params=params,
+ )
+ )
+
+ # Predict state for subsequent trajectory planning
+ if isinstance(params, SelectToolCmd):
+ self.state.current_tool = params.tool_name
+ self.state.current_tool_variant = params.variant_key
+ self.state.tcp_offset_m = (0.0, 0.0, 0.0)
+ self._robot_module.apply_tool(
+ params.tool_name, variant_key=params.variant_key
+ )
+ elif isinstance(params, SetTcpOffsetCmd):
+ offset_m = (params.x / 1000.0, params.y / 1000.0, params.z / 1000.0)
+ self.state.tcp_offset_m = offset_m
+ self._robot_module.apply_tool(
+ self.state.current_tool,
+ variant_key=self.state.current_tool_variant,
+ tcp_offset_m=offset_m,
+ )
+ elif isinstance(params, HomeCmd):
+ self.state.Position_in[:] = self._home_steps
+
+
+# ---------------------------------------------------------------------------
+# PlannerWorker — thin subprocess wrapper around TrajectoryPlanner
+# ---------------------------------------------------------------------------
+
+
+class PlannerWorker:
+ """Wraps TrajectoryPlanner for use inside the planner subprocess.
+
+ Receives PlanCommand messages, delegates to TrajectoryPlanner, and puts
+ resulting segments on the segment queue.
+ """
+
+ def __init__(self, segment_queue: multiprocessing.Queue) -> None:
+ self._segment_queue = segment_queue
+ self._planner = TrajectoryPlanner(diagnostic=False)
+
+ @property
+ def state(self) -> PlannerState:
+ return self._planner.state
+
+ def process_command(self, msg: PlanCommand) -> None:
+ """Route a PlanCommand through the planner and emit segments."""
+ if msg.position_in is not None:
+ self._planner.state.Position_in[:] = msg.position_in
+
+ segments = self._planner.process(msg.params, msg.command_index)
+ for seg in segments:
+ self._segment_queue.put(seg)
+
+ def flush_stale_blend(self) -> None:
+ """Flush any pending blend buffer (called on queue timeout)."""
+ segments = self._planner.flush()
+ for seg in segments:
+ self._segment_queue.put(seg)
+
+ def cancel(self) -> None:
+ """Clear blend buffer on CancelAll."""
+ self._planner.cancel()
+
+ def apply_tool(
+ self,
+ tool_name: str,
+ variant_key: str = "",
+ tcp_offset_m: tuple[float, float, float] = (0.0, 0.0, 0.0),
+ ) -> None:
+ """Sync tool state (e.g. after E-stop)."""
+ self._planner.sync_tool(
+ tool_name, variant_key=variant_key, tcp_offset_m=tcp_offset_m
+ )
+
+
+# ---------------------------------------------------------------------------
+# Worker process entry point
+# ---------------------------------------------------------------------------
+
+
+def motion_planner_main(
+ command_queue: multiprocessing.Queue,
+ segment_queue: multiprocessing.Queue,
+ shutdown_event: EventType,
+) -> None:
+ """Worker process main loop — compute trajectories and forward inline commands."""
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ from parol6.server import set_pdeathsig
+
+ set_pdeathsig()
+
+ worker = PlannerWorker(segment_queue)
+
+ logger.debug(
+ "Motion planner subprocess started (PID %d)",
+ multiprocessing.current_process().pid,
+ )
+
+ try:
+ while not shutdown_event.is_set():
+ try:
+ msg = command_queue.get(timeout=0.1)
+ except queue.Empty:
+ worker.flush_stale_blend()
+ continue
+
+ if isinstance(msg, CancelAll):
+ worker.cancel()
+ continue
+
+ if isinstance(msg, SyncPosition):
+ worker.state.Position_in[:] = msg.position_in
+ continue
+
+ if isinstance(msg, SyncProfile):
+ worker.state.motion_profile = msg.profile
+ continue
+
+ if isinstance(msg, SyncTool):
+ worker.apply_tool(
+ msg.tool_name,
+ variant_key=msg.variant_key,
+ tcp_offset_m=msg.tcp_offset_m,
+ )
+ continue
+
+ if isinstance(msg, PlanCommand):
+ try:
+ worker.process_command(msg)
+ except Exception as e:
+ logger.exception(
+ "Planner failed on command index=%d (%s)",
+ msg.command_index,
+ type(msg.params).__name__,
+ )
+ robot_error = extract_robot_error(
+ e,
+ ErrorCode.MOTN_SETUP_FAILED,
+ msg.command_index,
+ detail=str(e),
+ )
+ segment_queue.put(
+ ErrorSegment(
+ command_index=msg.command_index,
+ error=robot_error,
+ )
+ )
+ worker.cancel()
+ _drain_queue(command_queue)
+
+ except (EOFError, OSError, BrokenPipeError, KeyboardInterrupt):
+ # Expected when the parent process is shutting down: the queue's
+ # underlying pipe gets torn down before our shutdown_event check
+ # fires. Nothing to log.
+ pass
+ except Exception:
+ logger.exception("Motion planner subprocess error")
+ finally:
+ logger.debug("Motion planner subprocess exiting")
+
+
+# ---------------------------------------------------------------------------
+# MotionPlanner — main-process handle for the worker
+# ---------------------------------------------------------------------------
+
+
+class MotionPlanner:
+ """Manages the trajectory planner subprocess.
+
+ Provides a non-blocking interface for the controller to submit commands
+ and poll for computed segments.
+ """
+
+ def __init__(self) -> None:
+ self._command_queue: multiprocessing.Queue = multiprocessing.Queue()
+ self._segment_queue: multiprocessing.Queue = multiprocessing.Queue()
+ self._shutdown_event: EventType = multiprocessing.Event()
+ self._process: multiprocessing.Process | None = None
+
+ # -- lifecycle --
+
+ def start(self) -> None:
+ """Start the planner subprocess."""
+ if self._process is not None and self._process.is_alive():
+ return
+ self._shutdown_event.clear()
+ self._process = multiprocessing.Process(
+ target=motion_planner_main,
+ args=(self._command_queue, self._segment_queue, self._shutdown_event),
+ daemon=True,
+ name="MotionPlannerProcess",
+ )
+ self._process.start()
+ logger.debug("Motion planner started, PID: %s", self._process.pid)
+
+ def stop(self) -> None:
+ """Shut down the planner subprocess gracefully."""
+ self._shutdown_event.set()
+ if self._process is not None and self._process.is_alive():
+ self._process.join(timeout=2.0)
+ if self._process.is_alive():
+ logger.warning("Motion planner did not exit cleanly, terminating")
+ self._process.terminate()
+ self._process.join(timeout=1.0)
+ # Drain queues to avoid BrokenPipeError on GC
+ _drain_queue(self._command_queue)
+ _drain_queue(self._segment_queue)
+ logger.debug("Motion planner stopped")
+
+ @property
+ def alive(self) -> bool:
+ return self._process is not None and self._process.is_alive()
+
+ # -- main → planner --
+
+ def submit(self, msg: PlannerMessage) -> None:
+ """Send a message to the planner (non-blocking)."""
+ self._command_queue.put_nowait(msg)
+
+ def sync_position(self, position_in: np.ndarray) -> None:
+ """Update the planner's position tracking."""
+ self.submit(SyncPosition(position_in=position_in))
+
+ def sync_profile(self, profile: str) -> None:
+ """Update the planner's motion profile."""
+ self.submit(SyncProfile(profile=profile))
+
+ def sync_tool(
+ self,
+ tool_name: str,
+ variant_key: str = "",
+ tcp_offset_m: tuple[float, float, float] = (0.0, 0.0, 0.0),
+ ) -> None:
+ """Update the planner's tool state."""
+ self.submit(
+ SyncTool(
+ tool_name=tool_name,
+ variant_key=variant_key,
+ tcp_offset_m=tcp_offset_m,
+ )
+ )
+
+ def cancel(self) -> None:
+ """Cancel all pending work in the planner."""
+ self.submit(CancelAll())
+
+ # -- planner → main --
+
+ def poll_segment(self) -> Segment | None:
+ """Non-blocking poll for a computed segment. Returns None if empty."""
+ try:
+ return self._segment_queue.get_nowait()
+ except queue.Empty:
+ return None
+
+
+def _drain_queue(q: multiprocessing.Queue) -> None:
+ """Drain a queue, discarding all items."""
+ try:
+ while True:
+ q.get_nowait()
+ except queue.Empty:
+ pass
diff --git a/parol6/server/segment_player.py b/parol6/server/segment_player.py
new file mode 100644
index 0000000..e01b131
--- /dev/null
+++ b/parol6/server/segment_player.py
@@ -0,0 +1,261 @@
+"""Segment player — consumes planned segments in the 100Hz control loop.
+
+The SegmentPlayer is the execution-side counterpart of MotionPlanner.
+It receives ``TrajectorySegment`` and ``InlineSegment`` objects from the
+planner's output queue and executes them in order:
+
+- **TrajectorySegment**: index into pre-computed waypoints at 100Hz
+ (zero-allocation hot path, identical to the old execute_step()).
+- **InlineSegment**: create the command object from its wire params and
+ tick it in the control loop until completion (Home, Gripper, etc.).
+"""
+
+from __future__ import annotations
+
+import logging
+from collections import deque
+from typing import TYPE_CHECKING
+
+from pinokin import arrays_equal_n
+
+from parol6.commands.base import CommandBase, ExecutionStatusCode
+from parol6.config import SETTLE_MAX_TICKS
+from parol6.protocol.wire import CommandCode
+from parol6.server.command_executor import _format_cmd_params
+from parol6.server.command_registry import create_command_from_struct
+from parol6.server.motion_planner import (
+ ErrorSegment,
+ InlineSegment,
+ MotionPlanner,
+ Segment,
+ TrajectorySegment,
+)
+from parol6.utils.error_catalog import RobotError, make_error
+from parol6.utils.error_codes import ErrorCode
+from waldoctl import ActionState
+
+if TYPE_CHECKING:
+ from parol6.server.state import ControllerState
+
+logger = logging.getLogger(__name__)
+
+
+class SegmentPlayer:
+ """Consumes segments from the planner and executes them at 100Hz.
+
+ Handles both trajectory playback (zero-alloc waypoint indexing) and
+ inline command execution (setup/tick lifecycle) while maintaining
+ strict ordering.
+ """
+
+ __slots__ = (
+ "_planner",
+ "_active",
+ "_step",
+ "_buffer",
+ "_inline_cmd",
+ "_inline_activated",
+ "_settling",
+ "_settle_ticks",
+ )
+
+ def __init__(self, planner: MotionPlanner) -> None:
+ self._planner = planner
+ self._active: Segment | None = None
+ self._step: int = 0
+ self._buffer: deque[Segment] = deque()
+ self._inline_cmd: CommandBase | None = None
+ self._inline_activated: bool = False
+ self._settling: bool = False
+ self._settle_ticks: int = 0
+
+ @property
+ def active(self) -> bool:
+ """True if playing a segment or has buffered segments."""
+ return self._active is not None or bool(self._buffer)
+
+ def tick(self, state: ControllerState) -> bool:
+ """Execute one tick. Returns True if actively playing/executing.
+
+ Called from the 100Hz control loop. For trajectory segments this is
+ a zero-allocation hot path (array index + copy).
+ """
+ # Drain planner's output queue into local buffer (non-blocking)
+ seg = self._planner.poll_segment()
+ while seg is not None:
+ self._buffer.append(seg)
+ state.queued_segments += 1
+ if isinstance(seg, TrajectorySegment):
+ state.queued_duration += seg.duration
+ seg = self._planner.poll_segment()
+
+ # Process active segment or activate next
+ max_immediate = 8 # prevent infinite recursion on back-to-back instant commands
+ for _ in range(max_immediate):
+ # Activate next segment if idle
+ if self._active is None:
+ if not self._buffer:
+ return False
+ self._activate_next(state)
+
+ active = self._active
+
+ # --- Trajectory segment: index into waypoints ---
+ if isinstance(active, TrajectorySegment):
+ if self._step < len(active.trajectory_steps):
+ state.Position_out[:] = active.trajectory_steps[self._step]
+ state.Command_out = CommandCode.MOVE
+ self._step += 1
+ self._settling = False
+ return True
+ # All waypoints sent — hold MOVE at target until Position_in converges
+ target = active.trajectory_steps[-1]
+ if not self._settling:
+ self._settling = True
+ self._settle_ticks = 0
+ self._settle_ticks += 1
+ if (
+ arrays_equal_n(state.Position_in[:6], target[:6])
+ or self._settle_ticks > SETTLE_MAX_TICKS
+ ):
+ self._settling = False
+ self._complete_segment(active, state)
+ continue
+ # Keep commanding the target so firmware continues tracking
+ state.Position_out[:] = target
+ state.Command_out = CommandCode.MOVE
+ return True
+
+ # --- Inline segment: tick the command ---
+ if isinstance(active, InlineSegment):
+ result = self._tick_inline(active, state)
+ if result is None:
+ # Instant completion — try next immediately
+ continue
+ return result
+
+ # --- Error segment: halt advance run ---
+ if isinstance(active, ErrorSegment):
+ logger.error(
+ "Command %d failed: %s", active.command_index, active.error
+ )
+ state.error = active.error
+ state.action_state = ActionState.ERROR
+ state.action_current = ""
+ state.action_params = ""
+ self._active = None
+ # Halt: cancel all remaining planned work
+ self._buffer.clear()
+ self._planner.cancel()
+ self._drain_planner_queue(state)
+ return False
+
+ # Unknown segment type
+ logger.error("Unknown segment type: %s", type(active).__name__)
+ self._active = None
+ continue
+
+ # Exhausted immediate iterations (unlikely)
+ return self._active is not None
+
+ def _activate_next(self, state: ControllerState) -> None:
+ """Promote next buffered segment to active."""
+ self._active = self._buffer.popleft()
+ self._step = 0
+ self._inline_cmd = None
+ self._inline_activated = False
+ state.executing_command_index = self._active.command_index
+ state.action_state = ActionState.EXECUTING
+ # Populate action info for trajectory segments (inline segments set these later)
+ if isinstance(self._active, TrajectorySegment):
+ state.action_current = self._active.command_name
+ state.action_params = self._active.action_params
+
+ def _tick_inline(self, seg: InlineSegment, state: ControllerState) -> bool | None:
+ """Tick an inline command. Returns True (executing), False (failed), or None (completed)."""
+ if self._inline_cmd is None:
+ cmd, _, error_msg = create_command_from_struct(seg.params)
+ if cmd is None:
+ logger.error("Failed to create inline command: %s", error_msg)
+ error = make_error(
+ ErrorCode.COMM_DECODE_ERROR,
+ seg.command_index,
+ detail=error_msg or "unknown command",
+ )
+ self._on_failure(seg, error, state)
+ return False
+
+ self._inline_cmd = cmd
+
+ cmd = self._inline_cmd
+ if not self._inline_activated:
+ cmd.setup(state)
+ state.action_current = type(cmd).__name__
+ state.action_params = _format_cmd_params(seg.params)
+ self._inline_activated = True
+
+ code = cmd.tick(state)
+
+ if code == ExecutionStatusCode.COMPLETED:
+ self._complete_segment(seg, state)
+ return None # signal caller to try next immediately
+
+ if code == ExecutionStatusCode.FAILED:
+ logger.error(
+ "Inline command failed: %s - %s",
+ type(cmd).__name__,
+ cmd.robot_error,
+ )
+ error = cmd.robot_error or make_error(
+ ErrorCode.MOTN_TICK_FAILED, seg.command_index, detail=type(cmd).__name__
+ )
+ self._on_failure(seg, error, state)
+ return False
+
+ return True # EXECUTING — continue next tick
+
+ def _complete_segment(self, seg: Segment, state: ControllerState) -> None:
+ """Mark segment as completed and update tracking indices."""
+ final_idx = seg.command_index
+ if isinstance(seg, TrajectorySegment):
+ for idx in seg.blend_consumed_indices:
+ if idx > final_idx:
+ final_idx = idx
+ state.queued_duration -= seg.duration
+ state.queued_segments -= 1
+ state.completed_command_index = final_idx
+ state.action_current = ""
+ state.action_params = ""
+ state.action_state = ActionState.IDLE
+ self._active = None
+
+ def _on_failure(
+ self, seg: Segment, error: RobotError, state: ControllerState
+ ) -> None:
+ """Handle inline command failure: set error state, clear buffer, cancel planner."""
+ state.error = error
+ state.action_current = ""
+ state.action_params = ""
+ state.action_state = ActionState.ERROR
+ self._active = None
+ self._buffer.clear()
+ self._planner.cancel()
+ self._drain_planner_queue(state)
+
+ def cancel(self, state: ControllerState) -> None:
+ """Clear buffer, drain stale segments, and stop playback."""
+ self._active = None
+ self._step = 0
+ self._inline_cmd = None
+ self._inline_activated = False
+ self._buffer.clear()
+ self._planner.cancel()
+ # Drain stale segments from planner output queue
+ self._drain_planner_queue(state)
+
+ def _drain_planner_queue(self, state: ControllerState) -> None:
+ """Drain any remaining segments from the planner's output queue."""
+ while self._planner.poll_segment() is not None:
+ pass
+ state.queued_segments = 0
+ state.queued_duration = 0.0
diff --git a/parol6/server/state.py b/parol6/server/state.py
index 79ad91d..a45ca5f 100644
--- a/parol6/server/state.py
+++ b/parol6/server/state.py
@@ -1,16 +1,140 @@
from __future__ import annotations
+import atexit
import logging
-import threading
-from collections import deque
from dataclasses import dataclass, field
-from typing import Any, cast
+from typing import Any
import numpy as np
-from spatialmath import SE3
import parol6.PAROL6_ROBOT as PAROL6_ROBOT
+from pinokin import arrays_equal_6
+from parol6.config import CONTROL_RATE_HZ, steps_to_rad
+from parol6.motion import CartesianStreamingExecutor, StreamingExecutor
from parol6.protocol.wire import CommandCode
+from parol6.utils.error_catalog import RobotError
+from waldoctl import ActionState
+
+
+class GripperHWState:
+ """Named wrapper over the raw gripper numpy arrays.
+
+ Provides human-readable property access while keeping the underlying
+ int32 arrays intact for serial frame packing (zero-copy).
+
+ ``data_out`` layout: [target_position, target_speed, target_current,
+ command_bits, mode, device_id]
+ ``data_in`` layout: [device_id, feedback_position, feedback_speed,
+ feedback_current, status_byte, object_detection]
+ """
+
+ __slots__ = ("_out", "_in")
+
+ def __init__(self, data_out: np.ndarray, data_in: np.ndarray) -> None:
+ self._out = data_out
+ self._in = data_in
+
+ # -- Output (commands to gripper) --
+
+ @property
+ def target_position(self) -> int:
+ return int(self._out[0])
+
+ @target_position.setter
+ def target_position(self, v: int) -> None:
+ self._out[0] = v
+
+ @property
+ def target_speed(self) -> int:
+ return int(self._out[1])
+
+ @target_speed.setter
+ def target_speed(self, v: int) -> None:
+ self._out[1] = v
+
+ @property
+ def target_current(self) -> int:
+ return int(self._out[2])
+
+ @target_current.setter
+ def target_current(self, v: int) -> None:
+ self._out[2] = v
+
+ @property
+ def command_bits(self) -> int:
+ return int(self._out[3])
+
+ @command_bits.setter
+ def command_bits(self, v: int) -> None:
+ self._out[3] = v
+
+ # -- Command bit helpers (MSB-first 8-bit field) --
+ #
+ # Bit layout: [enable, move_active, estop, grip_enable, 0, 0, 0, 0]
+ # bit 7 (0x80): enable — always 1
+ # bit 6 (0x40): move_active — 1 while moving, 0 when idle
+ # bit 5 (0x20): estop — inverted e-stop sense
+ # bit 4 (0x10): grip_enable — always 1
+ # bits 3-0: reserved (0)
+
+ _CMD_ENABLE: int = 0x80
+ _CMD_MOVE_ACTIVE: int = 0x40
+ _CMD_ESTOP: int = 0x20
+ _CMD_GRIP_ENABLE: int = 0x10
+
+ def set_command_bits(self, *, move_active: bool, estop: bool) -> None:
+ """Pack and write the command bits byte.
+
+ ``enable`` and ``grip_enable`` are always set.
+ """
+ v = self._CMD_ENABLE | self._CMD_GRIP_ENABLE
+ if move_active:
+ v |= self._CMD_MOVE_ACTIVE
+ if estop:
+ v |= self._CMD_ESTOP
+ self._out[3] = v
+
+ @property
+ def mode(self) -> int:
+ return int(self._out[4])
+
+ @mode.setter
+ def mode(self, v: int) -> None:
+ self._out[4] = v
+
+ @property
+ def device_id_out(self) -> int:
+ return int(self._out[5])
+
+ @device_id_out.setter
+ def device_id_out(self, v: int) -> None:
+ self._out[5] = v
+
+ # -- Input (feedback from gripper) --
+
+ @property
+ def device_id(self) -> int:
+ return int(self._in[0])
+
+ @property
+ def feedback_position(self) -> int:
+ return int(self._in[1])
+
+ @property
+ def feedback_speed(self) -> int:
+ return int(self._in[2])
+
+ @property
+ def feedback_current(self) -> int:
+ return int(self._in[3])
+
+ @property
+ def status_byte(self) -> int:
+ return int(self._in[4])
+
+ @property
+ def object_detection(self) -> int:
+ return int(self._in[5])
@dataclass
@@ -31,6 +155,7 @@ class ControllerState:
# Serial/transport
ser: Any = None
+ hardware_connected: bool = False
last_reconnect_attempt: float = 0.0
# Safety and control flags
@@ -38,20 +163,23 @@ class ControllerState:
soft_error: bool = False
disabled_reason: str = ""
e_stop_active: bool = False
- stream_mode: bool = False
+
+ # Motion profile for all moves (TOPPRA, RUCKIG, QUINTIC, TRAPEZOID, LINEAR)
+ # Note: RUCKIG is point-to-point only; Cartesian moves fall back to TOPPRA
+ motion_profile: str = "TOPPRA"
+
+ # Streaming executors for online motion (jogging/streaming)
+ streaming_executor: StreamingExecutor = field(
+ default_factory=lambda: StreamingExecutor(num_dofs=6, dt=1.0 / CONTROL_RATE_HZ)
+ )
+ cartesian_streaming_executor: CartesianStreamingExecutor = field(
+ default_factory=lambda: CartesianStreamingExecutor(dt=1.0 / CONTROL_RATE_HZ)
+ )
# Tool configuration (affects kinematics and visualization)
_current_tool: str = "NONE"
-
- # I/O buffers and protocol tracking (serial frame parsing state)
- input_byte: int = 0
- start_cond1: int = 0
- start_cond2: int = 0
- start_cond3: int = 0
- good_start: int = 0
- data_len: int = 0
- data_buffer: list[bytes] = field(default_factory=lambda: [b""] * 255)
- data_counter: int = 0
+ _current_tool_variant: str = ""
+ _tcp_offset_m: tuple[float, float, float] = (0.0, 0.0, 0.0)
# Robot telemetry and command buffers - using ndarray for efficiency
Command_out: CommandCode = CommandCode.IDLE # The command code to send to firmware
@@ -78,6 +206,9 @@ class ControllerState:
default_factory=lambda: np.zeros((6,), dtype=np.int32)
)
+ # Tool teleport: when >= 0, snap gripper to this position (0-255) on next tick
+ tool_teleport_pos: float = -1.0
+
# uint8 flag/bitfield buffers
Affected_joint_out: np.ndarray = field(
default_factory=lambda: np.zeros((8,), dtype=np.uint8)
@@ -97,22 +228,29 @@ class ControllerState:
Timeout_out: int = 0
XTR_data: int = 0
- # Command queueing and tracking
- command_queue: deque[Any] = field(default_factory=deque)
- incoming_command_buffer: deque[tuple[str, tuple[str, int]]] = field(
- default_factory=deque
- )
- command_id_map: dict[Any, tuple[str, tuple[str, int]]] = field(default_factory=dict)
- active_command: Any = None
- active_command_id: str | None = None
- last_command_time: float = 0.0
-
# Action tracking for status broadcast and queries
action_current: str = ""
- action_state: str = "IDLE" # IDLE, EXECUTING, COMPLETED, FAILED
+ action_params: str = ""
+ action_state: ActionState = ActionState.IDLE # IDLE, EXECUTING, ERROR
action_next: str = ""
queue_nonstreamable: list[str] = field(default_factory=list)
+ # Queue progress tracking (monotonically increasing command indices)
+ next_command_index: int = 0
+ executing_command_index: int = -1
+ completed_command_index: int = -1
+ last_checkpoint: str = ""
+
+ # Planning behavior (stop on first IK failure vs solve all for diagnostic)
+ stop_on_failure: bool = True
+
+ # Error state (set by segment player on planning failure)
+ error: RobotError | None = None
+
+ # Pipeline depth (maintained by segment player)
+ queued_segments: int = 0
+ queued_duration: float = 0.0
+
# Network setup and uptime
ip: str = "127.0.0.1"
port: int = 5001
@@ -125,107 +263,204 @@ class ControllerState:
# Control loop runtime metrics (used by benchmarks/monitoring)
loop_count: int = 0
overrun_count: int = 0
- last_period_s: float = 0.0
- ema_period_s: float = 0.0
- # Command frequency metrics
- command_count: int = 0
- last_command_period_s: float = 0.0
- ema_command_period_s: float = 0.0
- command_timestamps: deque[float] = field(default_factory=lambda: deque(maxlen=10))
+ # Rolling statistics from loop timer
+ mean_period_s: float = 0.0
+ std_period_s: float = 0.0
+ min_period_s: float = 0.0
+ max_period_s: float = 0.0
+ p95_period_s: float = 0.0
+ p99_period_s: float = 0.0
+
+ # Flag to signal loop stats reset (picked up by controller)
+ loop_stats_reset_pending: bool = False
# Forward kinematics cache (invalidated when Position_in or current_tool changes)
_fkine_last_pos_in: np.ndarray = field(
default_factory=lambda: np.zeros((6,), dtype=np.int32)
)
- _fkine_last_tool: str = ""
- _fkine_se3: Any = None # SE3 instance from spatialmath
- _fkine_mat: np.ndarray = field(default_factory=lambda: np.eye(4, dtype=np.float64))
+ _fkine_last_tool_name: str = ""
+ _fkine_last_tool_variant: str = ""
+ _fkine_last_tcp_offset: tuple[float, float, float] = (0.0, 0.0, 0.0)
+ _fkine_mat: np.ndarray = field(
+ default_factory=lambda: np.asfortranarray(np.eye(4, dtype=np.float64))
+ )
_fkine_flat_mm: np.ndarray = field(
default_factory=lambda: np.zeros((16,), dtype=np.float64)
)
+ _fkine_q_rad: np.ndarray = field(
+ default_factory=lambda: np.zeros((6,), dtype=np.float64)
+ )
+
+ # Named wrapper over raw gripper arrays (initialized in __post_init__)
+ gripper_hw: GripperHWState = field(init=False, repr=False)
+
+ def __post_init__(self) -> None:
+ """Initialize E-stop to released state and named gripper wrapper."""
+ self.InOut_in[4] = 1 # E-STOP released (0=pressed, 1=released)
+ self.gripper_hw = GripperHWState(self.Gripper_data_out, self.Gripper_data_in)
+
+ def reset(self) -> None:
+ """
+ Reset robot state to initial values without losing connection state.
+
+ Preserves: ser, ip, port, start_time
+ Resets: positions, speeds, I/O, queues, tool, errors, etc.
+ """
+ # Safety and control flags
+ self.enabled = True
+ self.soft_error = False
+ self.disabled_reason = ""
+ self.e_stop_active = False
+ self.motion_profile = "TOPPRA"
+
+ # Tool back to none
+ self._current_tool = "NONE"
+ self._current_tool_variant = ""
+ self._tcp_offset_m = (0.0, 0.0, 0.0)
+ PAROL6_ROBOT.apply_tool("NONE")
+
+ # Command and telemetry buffers - zero out
+ self.Command_out = CommandCode.IDLE
+ self.Position_out.fill(0)
+ self.Speed_out.fill(0)
+ self.Gripper_data_out.fill(0)
+ self.Position_in.fill(0)
+ self.Speed_in.fill(0)
+ self.Timing_data_in.fill(0)
+ self.Gripper_data_in.fill(0)
+ self.Affected_joint_out.fill(0)
+ self.InOut_out.fill(0)
+ self.InOut_in.fill(0)
+ self.InOut_in[4] = 1 # E-STOP released (0=pressed, 1=released)
+ self.Homed_in.fill(0)
+ self.Temperature_error_in.fill(0)
+ self.Position_error_in.fill(0)
+ self.Timeout_out = 0
+ self.XTR_data = 0
+
+ # Action tracking
+ self.action_current = ""
+ self.action_params = ""
+ self.action_state = ActionState.IDLE
+ self.action_next = ""
+ self.queue_nonstreamable.clear()
+
+ # Queue progress tracking
+ self.next_command_index = 0
+ self.executing_command_index = -1
+ self.completed_command_index = -1
+ self.last_checkpoint = ""
+
+ # Error and pipeline depth
+ self.error = None
+ self.queued_segments = 0
+ self.queued_duration = 0.0
+
+ # Gripper mode tracker
+ self.gripper_mode_tracker = GripperModeResetTracker()
+
+ # Invalidate fkine cache (SE3 is pre-allocated, just reset tracking)
+ self._fkine_last_pos_in.fill(0)
+ self._fkine_last_tool_name = ""
+ self._fkine_last_tool_variant = ""
+
+ # Reset streaming executors (clears reference_pose and Ruckig state)
+ self.streaming_executor.reset()
+ self.cartesian_streaming_executor.reset()
+
+ logger.debug("Controller state reset (preserving connection)")
@property
def current_tool(self) -> str:
"""Get the current tool name."""
return self._current_tool
- @current_tool.setter
- def current_tool(self, tool_name: str) -> None:
- """Set the current tool and apply it to the robot model."""
- if tool_name != self._current_tool:
+ @property
+ def current_tool_variant(self) -> str:
+ """Get the current tool variant key."""
+ return self._current_tool_variant
+
+ def set_tool(self, tool_name: str, variant_key: str = "") -> None:
+ """Set the current tool and apply it to the robot model.
+
+ Resets TCP offset to zero (changing tools invalidates any prior offset).
+ """
+ if tool_name != self._current_tool or variant_key != self._current_tool_variant:
self._current_tool = tool_name
- # Apply tool to robot model (rebuilds with tool as final link)
- PAROL6_ROBOT.apply_tool(tool_name)
- # Invalidate cache
- self._fkine_se3 = None
- logger.info(f"Tool changed to {tool_name}, fkine cache invalidated")
+ self._current_tool_variant = variant_key
+ self._tcp_offset_m = (0.0, 0.0, 0.0)
+ PAROL6_ROBOT.apply_tool(tool_name, variant_key=variant_key)
+ label = f"{tool_name}:{variant_key}" if variant_key else tool_name
+ logger.info(f"Tool changed to {label}")
+
+ @property
+ def tcp_offset_m(self) -> tuple[float, float, float]:
+ """Current TCP offset in meters (tool-local frame)."""
+ return self._tcp_offset_m
+
+ def set_tcp_offset(self, offset_m: tuple[float, float, float]) -> None:
+ """Set TCP offset and reapply tool transform with the composed offset."""
+ self._tcp_offset_m = offset_m
+ PAROL6_ROBOT.apply_tool(
+ self._current_tool,
+ variant_key=self._current_tool_variant,
+ tcp_offset_m=offset_m,
+ )
+ logger.debug(
+ "TCP offset set to (%.1f, %.1f, %.1f) mm",
+ offset_m[0] * 1000,
+ offset_m[1] * 1000,
+ offset_m[2] * 1000,
+ )
logger = logging.getLogger(__name__)
class StateManager:
- """
- Singleton manager for ControllerState with thread-safe operations.
-
- This class ensures that all state access is synchronized and provides
- convenience methods for common state operations.
- """
+ """Manager for ControllerState."""
- _instance: StateManager | None = None
- _lock: threading.Lock = threading.Lock()
_state: ControllerState | None = None
- def __new__(cls) -> StateManager:
- """Ensure singleton pattern with thread safety."""
- if cls._instance is None:
- with cls._lock:
- # Double-check locking pattern
- if cls._instance is None:
- cls._instance = super().__new__(cls)
- return cls._instance
-
def __init__(self):
- """Initialize the state manager (only runs once due to singleton)."""
- if not hasattr(self, "_initialized"):
- self._state = ControllerState()
- self._state_lock = threading.RLock() # Use RLock for re-entrant locking
- self._initialized = True
- logger.info("StateManager initialized with NumPy buffers")
+ """Initialize the state manager."""
+ self._state = ControllerState()
def get_state(self) -> ControllerState:
"""
Get the current controller state.
- Note: This returns the actual state object. Modifications to it
- should be done through StateManager methods to ensure thread safety.
-
Returns:
The current ControllerState instance
"""
- with self._state_lock:
- if self._state is None:
- self._state = ControllerState()
- return self._state
+ if self._state is None:
+ self._state = ControllerState()
+ return self._state
def reset_state(self) -> None:
"""
Reset the controller state to a fresh instance.
This is useful at controller startup to ensure buffers are initialized
- to known defaults. Callers must ensure they hold appropriate locks in
- higher layers if concurrent access is possible.
+ to known defaults.
"""
- with self._state_lock:
- self._state = ControllerState()
- logger.info("Controller state reset")
+ self._state = ControllerState()
+ logger.debug("Controller state reset")
# Global singleton instance accessor
_state_manager: StateManager | None = None
+@atexit.register
+def _cleanup_state_manager() -> None:
+ global _state_manager
+ if _state_manager is not None:
+ _state_manager._state = None
+ _state_manager = None
+
+
def get_instance() -> StateManager:
"""
Get the global StateManager instance.
@@ -248,14 +483,20 @@ def get_state() -> ControllerState:
# -----------------------------
-def invalidate_fkine_cache() -> None:
+def invalidate_fkine_cache(state: ControllerState | None = None) -> None:
"""
Invalidate the fkine cache, forcing recomputation on next access.
- Call this when the robot model changes (e.g., tool change).
+ Called when the robot model changes (e.g., tool change).
+
+ Parameters
+ ----------
+ state : ControllerState, optional
+ The controller state to invalidate. If not provided, uses the global state.
"""
- state = get_state()
- state._fkine_se3 = None
- state._fkine_last_tool = ""
+ if state is None:
+ state = get_state()
+ state._fkine_last_tool_name = ""
+ state._fkine_last_tool_variant = ""
logger.debug("fkine cache invalidated")
@@ -264,68 +505,44 @@ def ensure_fkine_updated(state: ControllerState) -> None:
Ensure the fkine cache is up to date with current Position_in and tool.
If Position_in or current_tool has changed, recalculate fkine and update cache.
- This function is thread-safe when called with state from get_state().
-
Parameters
----------
state : ControllerState
The controller state to update
"""
- # Check if cache is valid
- pos_changed = not np.array_equal(state.Position_in, state._fkine_last_pos_in)
- tool_changed = state.current_tool != state._fkine_last_tool
-
- if pos_changed or tool_changed or state._fkine_se3 is None:
- # Recompute fkine
- q = PAROL6_ROBOT.ops.steps_to_rad(state.Position_in)
- assert PAROL6_ROBOT.robot is not None
- T = cast(SE3, cast(Any, PAROL6_ROBOT.robot).fkine(q))
-
- # Cache SE3 object
- state._fkine_se3 = T
-
- # Cache as 4x4 matrix
- mat = np.asarray(T.A, dtype=np.float64).copy()
- np.copyto(state._fkine_mat, mat)
-
- # Cache as flattened 16-vector with mm translation
- flat = mat.reshape(-1).copy()
- flat[3] *= 1000.0 # X translation to mm
- flat[7] *= 1000.0 # Y translation to mm
- flat[11] *= 1000.0 # Z translation to mm
- np.copyto(state._fkine_flat_mm, flat)
-
- # Update cache tracking
- np.copyto(state._fkine_last_pos_in, state.Position_in)
- state._fkine_last_tool = state.current_tool
+ pos_changed = not arrays_equal_6(state.Position_in, state._fkine_last_pos_in)
+ tool_changed = (
+ state.current_tool != state._fkine_last_tool_name
+ or state.current_tool_variant != state._fkine_last_tool_variant
+ or state.tcp_offset_m != state._fkine_last_tcp_offset
+ )
+ if pos_changed or tool_changed:
+ steps_to_rad(state.Position_in, state._fkine_q_rad)
+ PAROL6_ROBOT.robot.fkine_into(state._fkine_q_rad, state._fkine_mat)
-def get_fkine_se3(state: ControllerState | None = None) -> SE3:
- """
- Get the current end-effector pose as an SE3 object.
- Automatically updates cache if needed.
+ # Cache as flattened 16-vector with mm translation (zero-allocation)
+ state._fkine_flat_mm.reshape(4, 4)[:] = state._fkine_mat
+ state._fkine_flat_mm[3] *= 1000.0 # X translation to mm
+ state._fkine_flat_mm[7] *= 1000.0 # Y translation to mm
+ state._fkine_flat_mm[11] *= 1000.0 # Z translation to mm
- Returns
- -------
- SE3
- Current end-effector pose
- """
- if state is None:
- state = get_state()
- ensure_fkine_updated(state)
- return state._fkine_se3
+ # Update cache tracking
+ state._fkine_last_pos_in[:] = state.Position_in
+ state._fkine_last_tool_name = state.current_tool
+ state._fkine_last_tool_variant = state.current_tool_variant
+ state._fkine_last_tcp_offset = state.tcp_offset_m
-def get_fkine_matrix(state: ControllerState | None = None) -> np.ndarray:
+def get_fkine_se3(state: ControllerState | None = None) -> np.ndarray:
"""
- Get the current end-effector pose as a 4x4 homogeneous transformation matrix.
+ Get the current end-effector pose as a 4x4 SE3 transformation matrix.
Automatically updates cache if needed.
- Translation is in meters.
Returns
-------
np.ndarray
- 4x4 transformation matrix (translation in meters)
+ 4x4 SE3 transformation matrix (translation in meters)
"""
if state is None:
state = get_state()
diff --git a/parol6/server/status_broadcast.py b/parol6/server/status_broadcast.py
index 3104acd..c2828cb 100644
--- a/parol6/server/status_broadcast.py
+++ b/parol6/server/status_broadcast.py
@@ -2,7 +2,7 @@
import logging
import socket
-import threading
+import sys
import time
from parol6 import config as cfg
@@ -12,9 +12,9 @@
logger = logging.getLogger(__name__)
-class StatusBroadcaster(threading.Thread):
+class StatusBroadcaster:
"""
- Broadcasts ASCII STATUS frames via UDP.
+ Broadcasts binary msgpack STATUS frames via UDP. Called from main loop.
Transport:
- cfg.STATUS_TRANSPORT: "MULTICAST" (default) or "UNICAST"
@@ -36,45 +36,40 @@ class StatusBroadcaster(threading.Thread):
def __init__(
self,
state_mgr: StateManager,
- group: str = "239.255.0.101",
- port: int = 50510,
- ttl: int = 1,
- iface_ip: str = "127.0.0.1",
+ group: str = cfg.MCAST_GROUP,
+ port: int = cfg.MCAST_PORT,
+ ttl: int = cfg.MCAST_TTL,
+ iface_ip: str = cfg.MCAST_IF,
rate_hz: float = cfg.STATUS_RATE_HZ,
stale_s: float = cfg.STATUS_STALE_S,
) -> None:
- super().__init__(daemon=True)
self._state_mgr = state_mgr
self.group = group
self.port = port
self.ttl = ttl
self.iface_ip = iface_ip
- self._period = 1.0 / max(rate_hz, 1.0)
self._stale_s = stale_s
# Negotiated transport (can be forced via env or auto-fallback at runtime)
self._use_unicast: bool = cfg.STATUS_TRANSPORT == "UNICAST"
self._sock: socket.socket | None = None
- self._running = threading.Event()
- self._running.set()
-
- # EMA rate tracking for TX
- self._tx_count = 0
- self._tx_last_time = time.monotonic()
- self._tx_ema_period = 1.0 / rate_hz # Initialize with expected period
- self._tx_last_log_time = time.monotonic() # For 3-second logging interval
# Failure tracking for runtime fallback
self._send_failures = 0
self._max_send_failures = 3
+ self._last_fail_log_time = 0.0
+
+ # Setup socket on construction
+ self._setup_socket()
def _detect_primary_ip(self) -> str:
tmp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
tmp.connect(("1.1.1.1", 80))
return tmp.getsockname()[0]
- except Exception:
+ except Exception as e:
+ logger.debug("IP detect: %s", e)
return "127.0.0.1"
finally:
try:
@@ -139,6 +134,7 @@ def _setup_socket(self) -> None:
if self._use_unicast:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1 << 20)
+ sock.setblocking(False)
self._sock = sock
logger.info(
f"StatusBroadcaster (UNICAST) -> dest={cfg.STATUS_UNICAST_HOST}:{self.port}"
@@ -148,7 +144,10 @@ def _setup_socket(self) -> None:
# MULTICAST: configure multicast TTL/IF with verification and fallback
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, self.ttl)
- sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, 1)
+ # macOS requires loopback enabled for multicast to work on localhost
+ if sys.platform == "darwin":
+ sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, 1)
+ sock.setblocking(False)
try:
sock.setsockopt(
@@ -194,90 +193,59 @@ def _setup_socket(self) -> None:
f"StatusBroadcaster (MULTICAST) -> group={self.group} port={self.port} iface={self.iface_ip} ttl={self.ttl}"
)
- def run(self) -> None:
- self._setup_socket()
+ def tick(self) -> None:
+ """Broadcast status if cache is fresh. Called from main loop."""
cache = get_cache()
-
- # Validate socket exists
- if self._sock is None:
- logger.error("StatusBroadcaster socket not initialized")
+ try:
+ state = self._state_mgr.get_state()
+ cache.update_from_state(state)
+ except Exception as e:
+ logger.debug("StatusBroadcaster cache refresh failed: %s", e)
return
- # Deadline-based timing to maintain consistent rate
- next_deadline = time.monotonic() + self._period
-
- while self._running.is_set():
- # Always refresh cache from latest state before considering broadcast
- try:
- state = self._state_mgr.get_state()
- cache.update_from_state(state)
- except Exception as e:
- logger.debug("StatusBroadcaster cache refresh failed: %s", e)
-
- # Skip broadcast if cache is stale (e.g., serial disconnected)
- if cache.age_s() <= self._stale_s:
- payload = cache.to_ascii().encode("ascii", errors="ignore")
- # Refresh socket and destination each loop in case we switched transports
- sock = self._sock
- if sock is None:
- # Socket disappeared unexpectedly; try to switch to unicast and continue
- self._switch_to_unicast()
- sock = self._sock
- dest = (
- (cfg.STATUS_UNICAST_HOST, self.port)
- if self._use_unicast
- else (self.group, self.port)
- )
- try:
- sock.sendto(memoryview(payload), dest) # type: ignore[arg-type]
- except OSError as e:
- self._send_failures += 1
- # Log occasionally to avoid flooding
- if time.monotonic() - self._tx_last_log_time >= 5.0:
- logger.warning(f"StatusBroadcaster send failed: {e}")
- self._tx_last_log_time = time.monotonic()
- # If too many failures and we are on multicast, fall back to unicast
- if (
- not self._use_unicast
- and self._send_failures >= self._max_send_failures
- ):
- logger.info(
- f"StatusBroadcaster: {self._send_failures} consecutive send errors; switching to UNICAST"
- )
- self._switch_to_unicast()
- else:
- # Reset failure count on success
- self._send_failures = 0
- # Track TX rate with EMA
- now = time.monotonic()
- if self._tx_count > 0: # Skip first sample for period calculation
- period = now - self._tx_last_time
- if period > 0:
- # EMA update: 0.1 * new + 0.9 * old
- self._tx_ema_period = (
- 0.1 * period + 0.9 * self._tx_ema_period
- )
- self._tx_last_time = now
- self._tx_count += 1
-
- # Log rate every 3 seconds
- if now - self._tx_last_log_time >= 3.0 and self._tx_ema_period > 0:
- tx_hz = 1.0 / self._tx_ema_period
- logger.debug(
- f"Status TX: {tx_hz:.1f} Hz (count={self._tx_count})"
- )
- self._tx_last_log_time = now
+ if cache.age_s() > self._stale_s:
+ return
- # Sleep until next deadline (compensates for work time)
- sleep_time = next_deadline - time.monotonic()
- if sleep_time > 0:
- time.sleep(sleep_time)
- next_deadline += self._period
+ payload = cache.to_binary()
+ sock = self._sock
+ if sock is None:
+ self._switch_to_unicast()
+ sock = self._sock
+ assert sock is not None # _switch_to_unicast always sets _sock
+
+ dest = (
+ (cfg.STATUS_UNICAST_HOST, self.port)
+ if self._use_unicast
+ else (self.group, self.port)
+ )
- def stop(self) -> None:
- self._running.clear()
try:
- if self._sock:
+ sock.sendto(payload, dest)
+ self._send_failures = 0
+
+ except BlockingIOError:
+ pass # Transient kernel buffer pressure — not a multicast failure
+ except OSError as e:
+ self._handle_send_failure(e)
+
+ def _handle_send_failure(self, e: OSError) -> None:
+ """Handle send failure with logging and fallback."""
+ self._send_failures += 1
+ now = time.monotonic()
+ if now - self._last_fail_log_time >= 5.0:
+ logger.warning(f"StatusBroadcaster send failed: {e}")
+ self._last_fail_log_time = now
+ if not self._use_unicast and self._send_failures >= self._max_send_failures:
+ logger.info(
+ f"StatusBroadcaster: {self._send_failures} consecutive send errors; switching to UNICAST"
+ )
+ self._switch_to_unicast()
+
+ def close(self) -> None:
+ """Close socket."""
+ if self._sock:
+ try:
self._sock.close()
- except Exception:
- pass
+ except Exception:
+ pass
+ self._sock = None
diff --git a/parol6/server/status_cache.py b/parol6/server/status_cache.py
index 2ab6a43..5adbda2 100644
--- a/parol6/server/status_cache.py
+++ b/parol6/server/status_cache.py
@@ -1,277 +1,532 @@
-import threading
+"""
+Cache of the aggregate STATUS payload for binary msgpack broadcasting.
+
+The heavy IK enablement computations are delegated to a separate subprocess
+for true CPU parallelism, communicating via shared memory.
+"""
+
+import logging
+import multiprocessing
import time
+from multiprocessing import Process
+from multiprocessing.shared_memory import SharedMemory
+from multiprocessing.synchronize import Event
import numpy as np
-from numpy.typing import ArrayLike
-
-import parol6.PAROL6_ROBOT as PAROL6_ROBOT
+from numba import njit
+from pinokin import arrays_equal_6
+from waldoctl import ActionState, ToolStatus
+
+from parol6.config import speed_steps_to_rad, steps_to_deg, steps_to_rad
+from parol6.protocol.wire import pack_status
+from parol6.tools import get_registry
+from parol6.utils.error_catalog import RobotError
+from parol6.server.ik_layout import (
+ IK_INPUT_Q_OFFSET,
+ IK_INPUT_SIZE,
+ IK_INPUT_T_OFFSET,
+ IK_INPUT_TOOL_OFFSET,
+ IK_OUTPUT_SIZE,
+ SHM_EXTRA_KWARGS,
+ unregister_shm,
+)
+from parol6.server.ik_worker import ik_enablement_worker_main
from parol6.server.state import ControllerState, get_fkine_flat_mm, get_fkine_se3
-from parol6.utils.ik import AXIS_MAP, solve_ik
-from spatialmath import SE3
-from typing import Any
-import math
+from parol6.tools import get_tool_transform
+from parol6 import config as _cfg
+
+logger = logging.getLogger(__name__)
+
+
+def _cleanup_shm(shm: SharedMemory | None) -> None:
+ """Safely close and unlink a shared memory segment."""
+ if shm is None:
+ return
+ try:
+ shm.close()
+ except Exception:
+ pass
+ try:
+ shm.unlink()
+ except Exception:
+ pass
+
+
+@njit(cache=True)
+def _unpack_ik_response_into(
+ buf_arr: np.ndarray,
+ last_version: int,
+ joint_en: np.ndarray,
+ cart_en_wrf: np.ndarray,
+ cart_en_trf: np.ndarray,
+) -> int:
+ """
+ Check version and copy IK response if changed (zero-alloc hot path).
+
+ Returns new version if data was copied, 0 if unchanged.
+ """
+ # Version is at offset 36, little-endian uint64
+ version = np.uint64(0)
+ for i in range(8):
+ version |= np.uint64(buf_arr[36 + i]) << np.uint64(i * 8)
+
+ if version == last_version or version == 0:
+ return 0
+
+ for i in range(12):
+ joint_en[i] = buf_arr[i]
+ cart_en_wrf[i] = buf_arr[12 + i]
+ cart_en_trf[i] = buf_arr[24 + i]
+
+ return int(version)
+
+
+# Export for warmup.py
+unpack_ik_response_into = _unpack_ik_response_into
+
+
+@njit(cache=True)
+def _update_arrays(
+ pos_in: np.ndarray,
+ io_in: np.ndarray,
+ spd_in: np.ndarray,
+ pos_last: np.ndarray,
+ angles_deg: np.ndarray,
+ q_rad_buf: np.ndarray,
+ io_cached: np.ndarray,
+ spd_cached: np.ndarray,
+) -> tuple[bool, bool, bool]:
+ """
+ Check for changes and update cached arrays.
+ Returns (pos_changed, io_changed, spd_changed).
+ """
+ pos_changed = not np.array_equal(pos_in, pos_last)
+ io_changed = not np.array_equal(io_in, io_cached)
+ spd_changed = not np.array_equal(spd_in, spd_cached)
+
+ if pos_changed:
+ pos_last[:] = pos_in
+ steps_to_deg(pos_in, angles_deg)
+ steps_to_rad(pos_in, q_rad_buf)
+ if io_changed:
+ io_cached[:] = io_in
+ if spd_changed:
+ spd_cached[:] = spd_in
+
+ return pos_changed, io_changed, spd_changed
class StatusCache:
"""
- Thread-safe cache of the aggregate STATUS payload components and formatted ASCII.
+ Cache of the aggregate STATUS payload components.
Fields:
- angles_deg: 6 floats
- speeds: 6 ints (steps/sec)
- io: 5 ints [in1,in2,out1,out2,estop]
- - gripper: >=6 ints [id,pos,spd,cur,status,obj]
+ - tool_status: pre-allocated ToolStatus (mutated in-place)
- pose: 16 floats (flattened transform)
- - last_update_s: wall clock time of last cache update
+ - last_serial_s: wall clock time of last cache update
"""
def __init__(self) -> None:
- self._lock = threading.RLock()
-
# Public snapshots (materialized only when they change)
self.angles_deg: np.ndarray = np.zeros((6,), dtype=np.float64)
self.speeds: np.ndarray = np.zeros((6,), dtype=np.int32)
+ self.speeds_rad_s: np.ndarray = np.zeros((6,), dtype=np.float64)
self.io: np.ndarray = np.zeros((5,), dtype=np.uint8)
- self.gripper: np.ndarray = np.zeros((6,), dtype=np.int32)
self.pose: np.ndarray = np.zeros((16,), dtype=np.float64)
+ self.tcp_speed: float = 0.0 # TCP linear velocity in mm/s
+
+ # Pre-allocated ToolStatus — mutated in-place by populate_status()
+ self.tool_status: ToolStatus = ToolStatus()
- self.last_update_s: float = 0.0 # last cache build (any section)
self.last_serial_s: float = 0.0 # last time a fresh serial frame was observed
self._last_tool_name: str = "NONE" # Track tool changes
-
- # Cached ASCII fragments to reduce allocations
- self._angles_ascii: str = "0,0,0,0,0,0"
- self._speeds_ascii: str = "0,0,0,0,0,0"
- self._io_ascii: str = "0,0,0,0,0"
- self._gripper_ascii: str = "0,0,0,0,0,0"
- self._pose_ascii: str = ",".join("0" for _ in range(16))
+ self._last_tool_variant: str = "" # Track variant changes
+ self._last_tool_positions: tuple[float, ...] = () # Track tool DOF changes
# Action tracking fields
self._action_current: str = ""
- self._action_state: str = "IDLE"
-
- # Enablement arrays (12 ints each)
- self.joint_en = np.ones((12,), dtype=np.uint8)
- self.cart_en_wrf = np.ones((12,), dtype=np.uint8)
- self.cart_en_trf = np.ones((12,), dtype=np.uint8)
- self._joint_en_ascii: str = ",".join(str(int(v)) for v in self.joint_en)
- self._cart_en_wrf_ascii: str = ",".join(str(int(v)) for v in self.cart_en_wrf)
- self._cart_en_trf_ascii: str = ",".join(str(int(v)) for v in self.cart_en_trf)
-
- self._ascii_full: str = (
- f"STATUS|POSE={self._pose_ascii}"
- f"|ANGLES={self._angles_ascii}"
- f"|SPEEDS={self._speeds_ascii}"
- f"|IO={self._io_ascii}"
- f"|GRIPPER={self._gripper_ascii}"
- f"|ACTION_CURRENT={self._action_current}"
- f"|ACTION_STATE={self._action_state}"
- f"|JOINT_EN={self._joint_en_ascii}"
- f"|CART_EN_WRF={self._cart_en_wrf_ascii}"
- f"|CART_EN_TRF={self._cart_en_trf_ascii}"
- )
+ self._action_params: str = ""
+ self._action_state: ActionState = ActionState.IDLE
+
+ # Queue tracking fields
+ self._executing_index: int = -1
+ self._completed_index: int = -1
+ self._last_checkpoint: str = ""
+
+ # Error tracking field
+ self._error: RobotError | None = None
+
+ # Pipeline depth fields
+ self._queued_segments: int = 0
+ self._queued_duration: float = 0.0
+
+ # Binary cache
+ self._binary_cache: bytes = b""
+ self._binary_dirty: bool = True
# Change-detection caches to avoid expensive recomputation when inputs unchanged
self._last_pos_in: np.ndarray = np.zeros((6,), dtype=np.int32)
+ self._last_io_buf: np.ndarray = np.zeros((5,), dtype=np.uint8)
+
+ # Pre-allocated buffer for IK request (avoids allocation per position change)
+ self._q_rad_buf: np.ndarray = np.zeros(6, dtype=np.float64)
+ # Dirty-check: last q_rad submitted to the IK worker
+ self._ik_last_q_rad: np.ndarray = np.full(6, np.nan, dtype=np.float64)
+
+ # TCP speed computation state
+ self._prev_tcp_pos: np.ndarray = np.zeros(3, dtype=np.float64)
+ self._tcp_pos_buf: np.ndarray = np.zeros(3, dtype=np.float64)
+ self._tcp_pos_initialized: bool = False
+
+ self._status_rate_hz: float = _cfg.STATUS_RATE_HZ
+
+ # IK enablement results (pre-allocated for zero-alloc reads)
+ self._joint_en = np.ones(12, dtype=np.uint8)
+ self._cart_en_wrf = np.ones(12, dtype=np.uint8)
+ self._cart_en_trf = np.ones(12, dtype=np.uint8)
+
+ # IK worker subprocess state
+ self._ik_stopped = False
+ self._ik_last_version = 0
+ shm_suffix = f"_{id(self)}"
+ input_name = f"parol6_ik_in{shm_suffix}"
+ output_name = f"parol6_ik_out{shm_suffix}"
+
+ # Create shared memory segments
+ self._ik_input_shm = SharedMemory(
+ name=input_name, create=True, size=IK_INPUT_SIZE, **SHM_EXTRA_KWARGS
+ )
+ self._ik_output_shm = SharedMemory(
+ name=output_name, create=True, size=IK_OUTPUT_SIZE, **SHM_EXTRA_KWARGS
+ )
+ unregister_shm(self._ik_input_shm)
+ unregister_shm(self._ik_output_shm)
+
+ # SharedMemory.buf is always non-None after successful __init__
+ input_buf = self._ik_input_shm.buf
+ output_buf = self._ik_output_shm.buf
+ assert input_buf is not None
+ assert output_buf is not None
+
+ # Initialize with zeros
+ np.frombuffer(input_buf, dtype=np.uint8)[:] = 0
+ np.frombuffer(output_buf, dtype=np.uint8)[:] = 0
+
+ # Memoryviews for cleanup
+ self._ik_input_mv = memoryview(input_buf)
+ self._ik_output_mv = memoryview(output_buf)
+
+ # Zero-alloc input views: write directly into shared memory
+ self._ik_input_q_view = np.frombuffer(
+ input_buf,
+ dtype=np.float64,
+ count=6,
+ offset=IK_INPUT_Q_OFFSET,
+ )
+ self._ik_input_T_view = np.frombuffer(
+ input_buf,
+ dtype=np.float64,
+ count=16,
+ offset=IK_INPUT_T_OFFSET,
+ )
+ # Tool transform view for syncing tool changes to IK worker
+ self._ik_input_tool_view = np.frombuffer(
+ input_buf,
+ dtype=np.float64,
+ count=16,
+ offset=IK_INPUT_TOOL_OFFSET,
+ )
+ # Initialize to identity (no tool)
+ self._ik_input_tool_view.reshape(4, 4)[:] = np.eye(4)
+
+ # Zero-alloc output view for numba reader
+ self._ik_output_arr = np.frombuffer(output_buf, dtype=np.uint8)
+
+ # Spawn subprocess
+ self._ik_shutdown_event: Event = multiprocessing.Event()
+ self._ik_request_event: Event = multiprocessing.Event()
+ self._ik_process: Process = Process(
+ target=ik_enablement_worker_main,
+ args=(
+ input_name,
+ output_name,
+ self._ik_shutdown_event,
+ self._ik_request_event,
+ ),
+ daemon=True,
+ name="IKWorkerProcess",
+ )
+ self._ik_process.start()
+ logger.debug(f"IK worker started, PID: {self._ik_process.pid}")
- def _format_csv_from_list(self, vals: ArrayLike) -> str:
- # Using str() on each value preserves prior formatting semantics
- return ",".join(str(v) for v in vals) # type: ignore
-
- def _compute_joint_enable(
- self, q_rad: np.ndarray, delta_rad: float = math.radians(0.2)
- ) -> None:
- """Compute per-joint +/- enable bits based on joint limits and a small delta."""
- # Be robust to uninitialized robot in type-checked context
- robot: Any = getattr(PAROL6_ROBOT, "robot", None)
- if robot is None:
- self.joint_en[:] = 1
+ def _stop_ik_worker(self) -> None:
+ """Shut down the IK worker subprocess and release resources."""
+ if self._ik_stopped:
return
- qlim = getattr(robot, "qlim", None)
- if qlim is None:
- self.joint_en[:] = 1
+ self._ik_stopped = True
+
+ # Signal shutdown
+ self._ik_shutdown_event.set()
+
+ # Wait for process to exit
+ if self._ik_process.is_alive():
+ self._ik_process.join(timeout=2.0)
+ if self._ik_process.is_alive():
+ logger.warning("IK worker did not exit cleanly, terminating")
+ self._ik_process.terminate()
+ self._ik_process.join(timeout=1.0)
+
+ # Wait for exitcode to be set (subprocess's finally block completed)
+ deadline = time.time() + 1.0
+ while self._ik_process.exitcode is None and time.time() < deadline:
+ time.sleep(0.01)
+
+ # Release numpy views before closing shared memory
+ del self._ik_input_q_view
+ del self._ik_input_T_view
+ del self._ik_input_tool_view
+ del self._ik_output_arr
+
+ # Release memoryviews
+ try:
+ self._ik_input_mv.release()
+ except BufferError:
+ pass
+ try:
+ self._ik_output_mv.release()
+ except BufferError:
+ pass
+
+ # Clean up shared memory
+ _cleanup_shm(self._ik_input_shm)
+ _cleanup_shm(self._ik_output_shm)
+ logger.debug("IK worker stopped")
+
+ def _submit_ik_request(self, q_rad: np.ndarray, T_matrix: np.ndarray) -> None:
+ """Submit an IK enablement request (non-blocking, zero-alloc).
+
+ Skips submission if q_rad hasn't changed since the last request.
+ """
+ if self._ik_stopped:
+ return
+ if arrays_equal_6(q_rad[:6], self._ik_last_q_rad):
return
- allow_plus = (q_rad + delta_rad) <= qlim[1, :]
- allow_minus = (q_rad - delta_rad) >= qlim[0, :]
- # Pack into [J1+,J1-,J2+,J2-,...,J6+,J6-]
- bits = []
- for i in range(6):
- bits.append(1 if allow_plus[i] else 0)
- bits.append(1 if allow_minus[i] else 0)
- self.joint_en[:] = np.asarray(bits, dtype=np.uint8)
- self._joint_en_ascii = self._format_csv_from_list(self.joint_en.tolist())
-
- def _compute_cart_enable(
- self,
- T: SE3,
- frame: str,
- q_rad: np.ndarray,
- delta_mm: float = 0.5,
- delta_deg: float = 0.5,
- ) -> None:
- """Compute per-axis +/- enable bits for the given frame (WRF/TRF) via small-step IK."""
- bits = []
- # Build small delta transforms
- t_step_m = delta_mm / 1000.0
- r_step_rad = math.radians(delta_deg)
- for axis, (v_lin, v_rot) in AXIS_MAP.items():
- # Compose delta SE3 for this axis
- dT = SE3()
- # Translation
- dx = v_lin[0] * t_step_m
- dy = v_lin[1] * t_step_m
- dz = v_lin[2] * t_step_m
- if abs(dx) > 0 or abs(dy) > 0 or abs(dz) > 0:
- dT = dT * SE3(dx, dy, dz)
- # Rotation
- rx = v_rot[0] * r_step_rad
- ry = v_rot[1] * r_step_rad
- rz = v_rot[2] * r_step_rad
- if abs(rx) > 0:
- dT = dT * SE3.Rx(rx)
- if abs(ry) > 0:
- dT = dT * SE3.Ry(ry)
- if abs(rz) > 0:
- dT = dT * SE3.Rz(rz)
-
- # Apply in specified frame
- if frame == "WRF":
- T_target = dT * T
- else: # TRF
- T_target = T * dT
+ self._ik_last_q_rad[:] = q_rad[:6]
+ self._ik_input_q_view[:] = q_rad[:6]
+ self._ik_input_T_view[:] = T_matrix.flat[:16]
+ self._ik_request_event.set()
+
+ def _poll_ik_results(self) -> bool:
+ """Check for new IK results (non-blocking, zero-alloc). Returns True if updated."""
+ if self._ik_stopped:
+ return False
+ new_version = _unpack_ik_response_into(
+ self._ik_output_arr,
+ self._ik_last_version,
+ self._joint_en,
+ self._cart_en_wrf,
+ self._cart_en_trf,
+ )
+ if new_version > 0:
+ self._ik_last_version = new_version
+ return True
+ return False
- try:
- ik = solve_ik(
- PAROL6_ROBOT.robot,
- T_target,
- q_rad,
- jogging=True,
- quiet_logging=True,
- )
- bits.append(1 if ik.success else 0)
- except Exception:
- bits.append(0)
-
- arr = np.asarray(bits, dtype=np.uint8)
- if frame == "WRF":
- self.cart_en_wrf[:] = arr
- self._cart_en_wrf_ascii = self._format_csv_from_list(arr.tolist())
- else:
- self.cart_en_trf[:] = arr
- self._cart_en_trf_ascii = self._format_csv_from_list(arr.tolist())
+ def close(self) -> None:
+ """Shut down the IK worker subprocess and release resources."""
+ self._stop_ik_worker()
+
+ def __del__(self) -> None:
+ """Safety net: ensure IK worker is stopped if close() was not called."""
+ self.close()
def update_from_state(self, state: ControllerState) -> None:
"""
Update cache from current controller state with change gating:
- - Only recompute angles/pose when Position_in changes (and beyond optional deadband)
- - Only refresh IO/speeds/gripper when their inputs actually change
+ - Only recompute angles/pose when Position_in changes
+ - Only refresh IO/speeds when their inputs actually change
+ - Tool status populated via tool config's populate_status()
+ - IK enablement is computed asynchronously in a subprocess
"""
- now = time.time()
- changed_any = False
-
- with self._lock:
- # Check if position or tool changed
- tool_changed = state.current_tool != self._last_tool_name
- pos_changed = self._last_pos_in is None or not np.array_equal(
- state.Position_in, self._last_pos_in
+ # Do change detection
+ self._last_io_buf[:] = state.InOut_in[:5]
+ pos_changed, io_changed, spd_changed = _update_arrays(
+ state.Position_in,
+ self._last_io_buf,
+ state.Speed_in,
+ self._last_pos_in,
+ self.angles_deg,
+ self._q_rad_buf,
+ self.io,
+ self.speeds,
+ )
+ tool_changed = (
+ state.current_tool != self._last_tool_name
+ or state.current_tool_variant != self._last_tool_variant
+ )
+
+ # Convert speeds from steps/s to rad/s when they change
+ if spd_changed:
+ speed_steps_to_rad(self.speeds, self.speeds_rad_s)
+
+ if tool_changed:
+ self._last_tool_name = state.current_tool
+ self._last_tool_variant = state.current_tool_variant
+ self._tcp_pos_initialized = (
+ False # avoid speed spike from TCP offset change
)
+ # Sync tool transform to IK worker
+ T_tool = get_tool_transform(
+ state.current_tool, variant_key=state.current_tool_variant
+ )
+ self._ik_input_tool_view.reshape(4, 4)[:] = T_tool
+
+ if pos_changed or tool_changed:
+ self.pose[:] = get_fkine_flat_mm(state)
+
+ # Compute TCP speed from consecutive FK positions (mm/s)
+ # pose is row-major 4x4: translation at indices 3,7,11
+ self._tcp_pos_buf[0] = self.pose[3]
+ self._tcp_pos_buf[1] = self.pose[7]
+ self._tcp_pos_buf[2] = self.pose[11]
+ if self._tcp_pos_initialized:
+ dt = 1.0 / self._status_rate_hz
+ dx = self._tcp_pos_buf[0] - self._prev_tcp_pos[0]
+ dy = self._tcp_pos_buf[1] - self._prev_tcp_pos[1]
+ dz = self._tcp_pos_buf[2] - self._prev_tcp_pos[2]
+ self.tcp_speed = (dx * dx + dy * dy + dz * dz) ** 0.5 / dt
+ else:
+ self._tcp_pos_initialized = True
+ self._prev_tcp_pos[:] = self._tcp_pos_buf
+ else:
+ # Robot not moving — reset TCP speed to zero
+ self.tcp_speed = 0.0
- if pos_changed or tool_changed:
- if pos_changed:
- np.copyto(self._last_pos_in, state.Position_in)
- if tool_changed:
- self._last_tool_name = state.current_tool
-
- # Vectorized steps->deg
- self.angles_deg = np.asarray(
- PAROL6_ROBOT.ops.steps_to_deg(state.Position_in)
- ) # float64, shape (6,)
- # Publish angles list and ASCII
- self._angles_ascii = self._format_csv_from_list(self.angles_deg)
- changed_any = True
-
- # Get cached fkine (automatically updates if needed)
- pose_flat_mm = get_fkine_flat_mm(state) # Already in mm for translation
- np.copyto(self.pose, pose_flat_mm)
- self._pose_ascii = self._format_csv_from_list(self.pose)
- changed_any = True
-
- # Compute enablement arrays at 50 Hz when pose/angles change
- try:
- q_rad = np.asarray(
- PAROL6_ROBOT.ops.steps_to_rad(state.Position_in), dtype=float
- )
- except Exception:
- q_rad = np.zeros((6,), dtype=float)
- try:
- T = get_fkine_se3(state)
- except Exception:
- T = SE3()
- # JOINT_EN
- self._compute_joint_enable(q_rad)
- # CART_EN for both frames
- self._compute_cart_enable(T, "WRF", q_rad)
- self._compute_cart_enable(T, "TRF", q_rad)
-
- # 2) IO (first 5)
- if not np.array_equal(self.io, state.InOut_in[:5]):
- np.copyto(self.io, state.InOut_in[:5])
- self._io_ascii = self._format_csv_from_list(self.io)
- changed_any = True
-
- # 3) Speeds (steps/sec from Speed_in)
- if not np.array_equal(self.speeds, state.Speed_in):
- np.copyto(self.speeds, state.Speed_in)
- self._speeds_ascii = self._format_csv_from_list(self.speeds)
- changed_any = True
-
- # 4) Gripper
- if not np.array_equal(self.gripper, state.Gripper_data_in):
- np.copyto(self.gripper, state.Gripper_data_in)
- self._gripper_ascii = self._format_csv_from_list(self.gripper)
- changed_any = True
-
- # 5) Action tracking
- if (
- self._action_current != state.action_current
- or self._action_state != state.action_state
- ):
- self._action_current = state.action_current
- self._action_state = state.action_state
- changed_any = True
-
- # 6) Assemble full ASCII only if any section changed
- if changed_any:
- self._ascii_full = (
- f"STATUS|POSE={self._pose_ascii}"
- f"|ANGLES={self._angles_ascii}"
- f"|SPEEDS={self._speeds_ascii}"
- f"|IO={self._io_ascii}"
- f"|GRIPPER={self._gripper_ascii}"
- f"|ACTION_CURRENT={self._action_current}"
- f"|ACTION_STATE={self._action_state}"
- f"|JOINT_EN={self._joint_en_ascii}"
- f"|CART_EN_WRF={self._cart_en_wrf_ascii}"
- f"|CART_EN_TRF={self._cart_en_trf_ascii}"
- )
- self.last_update_s = now
-
- def to_ascii(self) -> str:
- """Return the full ASCII STATUS payload."""
- with self._lock:
- return self._ascii_full
+ # Submit IK request asynchronously
+ try:
+ T_matrix = get_fkine_se3(state)
+ self._submit_ik_request(self._q_rad_buf, T_matrix)
+ except (ValueError, OSError):
+ pass
+
+ # Populate tool status from hardware state via the tool config
+ ts = self.tool_status
+ ts.key = state.current_tool
+ cfg = get_registry().get(state.current_tool)
+ if cfg is not None:
+ cfg.populate_status(state, ts)
+
+ # Track tool DOF position changes (gripper jaw, spindle, etc.)
+ tool_status_changed = ts.positions != self._last_tool_positions
+ if tool_status_changed:
+ self._last_tool_positions = ts.positions
+
+ # Poll for async IK results (non-blocking, zero-alloc)
+ ik_changed = self._poll_ik_results()
+
+ action_changed = (
+ self._action_current != state.action_current
+ or self._action_params != state.action_params
+ or self._action_state != state.action_state
+ )
+ if action_changed:
+ self._action_current = state.action_current
+ self._action_params = state.action_params
+ self._action_state = state.action_state
+
+ queue_changed = (
+ self._executing_index != state.executing_command_index
+ or self._completed_index != state.completed_command_index
+ or self._last_checkpoint != state.last_checkpoint
+ )
+ if queue_changed:
+ self._executing_index = state.executing_command_index
+ self._completed_index = state.completed_command_index
+ self._last_checkpoint = state.last_checkpoint
+
+ error_changed = self._error is not state.error
+ if error_changed:
+ self._error = state.error
+
+ depth_changed = (
+ self._queued_segments != state.queued_segments
+ or self._queued_duration != state.queued_duration
+ )
+ if depth_changed:
+ self._queued_segments = state.queued_segments
+ self._queued_duration = state.queued_duration
+
+ # Mark binary cache dirty if anything changed
+ if (
+ pos_changed
+ or tool_changed
+ or tool_status_changed
+ or io_changed
+ or spd_changed
+ or ik_changed
+ or action_changed
+ or queue_changed
+ or error_changed
+ or depth_changed
+ ):
+ self._binary_dirty = True
+
+ def to_binary(self) -> bytes:
+ """Return the msgpack-encoded STATUS payload."""
+ if self._binary_dirty:
+ from parol6.server.transports.transport_factory import is_simulation_mode
+
+ self._binary_cache = pack_status(
+ self.pose,
+ self.angles_deg,
+ self.speeds_rad_s,
+ self.io,
+ self._action_current,
+ self._action_state,
+ self._joint_en,
+ self._cart_en_wrf,
+ self._cart_en_trf,
+ self._executing_index,
+ self._completed_index,
+ self._last_checkpoint,
+ self._error,
+ self._queued_segments,
+ self._queued_duration,
+ self._action_params,
+ self.tool_status,
+ self.tcp_speed,
+ simulator_active=is_simulation_mode(),
+ )
+ self._binary_dirty = False
+ return self._binary_cache
def mark_serial_observed(self) -> None:
"""Mark that a fresh serial frame was observed just now."""
- with self._lock:
- self.last_serial_s = time.time()
+ self.last_serial_s = time.monotonic()
def age_s(self) -> float:
"""Seconds since last fresh serial observation (used to gate broadcasting)."""
- with self._lock:
- if self.last_serial_s <= 0:
- return 1e9
- return time.time() - self.last_serial_s
+ if self.last_serial_s <= 0:
+ return 1e9
+ return time.monotonic() - self.last_serial_s
+
+ @property
+ def joint_en(self) -> np.ndarray:
+ """Joint enablement flags (12 elements)."""
+ return self._joint_en
+
+ @property
+ def cart_en_wrf(self) -> np.ndarray:
+ """Cartesian enablement flags in world reference frame (12 elements)."""
+ return self._cart_en_wrf
+
+ @property
+ def cart_en_trf(self) -> np.ndarray:
+ """Cartesian enablement flags in tool reference frame (12 elements)."""
+ return self._cart_en_trf
# Module-level singleton
@@ -283,3 +538,11 @@ def get_cache() -> StatusCache:
if _status_cache is None:
_status_cache = StatusCache()
return _status_cache
+
+
+def close_cache() -> None:
+ """Shut down the global cache if it exists."""
+ global _status_cache
+ if _status_cache is not None:
+ _status_cache.close()
+ _status_cache = None
diff --git a/parol6/server/transport_manager.py b/parol6/server/transport_manager.py
new file mode 100644
index 0000000..31877a3
--- /dev/null
+++ b/parol6/server/transport_manager.py
@@ -0,0 +1,328 @@
+"""Transport lifecycle management for serial and mock transports."""
+
+import logging
+import os
+import threading
+import time
+from typing import Any
+
+import numpy as np
+
+from parol6.config import get_com_port_with_fallback
+from parol6.server.transports import create_and_connect_transport, is_simulation_mode
+from parol6.server.transports.mock_serial_transport import MockSerialTransport
+from parol6.server.transports.serial_transport import SerialTransport
+
+logger = logging.getLogger("parol6.server.transport_manager")
+
+
+class TransportManager:
+ """Manages serial transport lifecycle (connect, disconnect, reconnect, switching).
+
+ Encapsulates transport creation, connection management, and switching between
+ real serial and simulator transports.
+ """
+
+ def __init__(
+ self,
+ shutdown_event: threading.Event,
+ serial_port: str | None = None,
+ serial_baudrate: int = 3000000,
+ ):
+ """Initialize the transport manager.
+
+ Args:
+ shutdown_event: Event to signal shutdown to transport threads.
+ serial_port: Initial serial port (or None for auto-detect).
+ serial_baudrate: Serial baud rate.
+ """
+ self._shutdown_event = shutdown_event
+ self.serial_port = serial_port
+ self.serial_baudrate = serial_baudrate
+
+ self.transport: SerialTransport | MockSerialTransport | None = None
+ self.first_frame_received = False
+ self._last_version = 0
+
+ def initialize(self) -> bool:
+ """Create and connect initial transport.
+
+ Returns:
+ True if transport was created (may not be connected yet).
+ """
+ # Load persisted COM port if not provided
+ try:
+ if not self.serial_port:
+ persisted = get_com_port_with_fallback()
+ if persisted:
+ self.serial_port = persisted
+ logger.debug("Using persisted serial port: %s", persisted)
+ except Exception as e:
+ logger.debug("Failed to load persisted COM port: %s", e)
+
+ # Create transport
+ if self.serial_port or is_simulation_mode():
+ self.transport = create_and_connect_transport(
+ port=self.serial_port,
+ baudrate=self.serial_baudrate,
+ auto_find_port=True,
+ )
+
+ if self.transport:
+ if self.transport.is_connected():
+ logger.debug("Connected to transport: %s", self.transport.port)
+ else:
+ logger.warning(
+ "Serial transport not connected at startup (port=%s)",
+ self.serial_port,
+ )
+ return True
+ else:
+ logger.warning(
+ "No serial port configured. Waiting for CONNECT_HARDWARE via UDP or set --serial/PAROL6_COM_PORT/com_port.txt before connecting."
+ )
+
+ return False
+
+ def is_connected(self) -> bool:
+ """Check if transport is connected."""
+ return self.transport is not None and self.transport.is_connected()
+
+ def poll_serial(self) -> bool:
+ """Poll serial for new data. Called each tick."""
+ if not self.transport or not self.transport.is_connected():
+ return False
+ return self.transport.poll_read()
+
+ def auto_reconnect(self) -> bool:
+ """Attempt reconnection if disconnected.
+
+ Returns:
+ True if reconnection was successful.
+ """
+ if not self.transport or self.transport.is_connected():
+ return False
+
+ if self.transport.auto_reconnect():
+ self.first_frame_received = False
+ logger.info("Serial reconnected")
+ return True
+
+ return False
+
+ def switch_to_port(self, port: str) -> bool:
+ """Switch to a new serial port (handles CONNECT_HARDWARE command).
+
+ Args:
+ port: New serial port path.
+
+ Returns:
+ True if switch was successful.
+ """
+ if self.transport is not None:
+ try:
+ self.transport.disconnect()
+ except Exception as e:
+ logger.debug("Error disconnecting transport before switch: %s", e)
+
+ self.serial_port = port
+
+ try:
+ self.transport = create_and_connect_transport(
+ port=port,
+ baudrate=self.serial_baudrate,
+ auto_find_port=False,
+ )
+ if self.transport and self.transport.is_connected():
+ self.first_frame_received = False
+ logger.info("Serial switched to port %s", port)
+ return True
+ except Exception as e:
+ logger.warning("Failed to (re)connect serial on CONNECT_HARDWARE: %s", e)
+
+ return False
+
+ def switch_simulator_mode(
+ self, enable: bool, sync_state: Any | None = None
+ ) -> tuple[bool, str | None]:
+ """Switch between real serial and simulator transport.
+
+ Args:
+ enable: True to enable simulator, False for real serial.
+ sync_state: Optional ControllerState to sync simulator to.
+
+ Returns:
+ Tuple of (success, error_message).
+ """
+ mode_str = "on" if enable else "off"
+
+ # Skip if already in the desired mode
+ already_simulator = isinstance(self.transport, MockSerialTransport)
+ if enable == already_simulator and self.transport is not None:
+ logger.debug("Already in simulator mode=%s, skipping switch", mode_str)
+ return True, None
+
+ try:
+ # Update env to drive transport_factory.is_simulation_mode()
+ os.environ["PAROL6_FAKE_SERIAL"] = "1" if enable else "0"
+
+ # Disconnect existing transport
+ if self.transport:
+ try:
+ self.transport.disconnect()
+ except Exception as e:
+ logger.debug("Transport disconnect: %s", e)
+
+ # Recreate transport according to new mode
+ self.transport = create_and_connect_transport(
+ port=self.serial_port,
+ baudrate=self.serial_baudrate,
+ auto_find_port=True,
+ )
+
+ # If enabling simulator, sync to last known controller state
+ if (
+ enable
+ and sync_state is not None
+ and isinstance(self.transport, MockSerialTransport)
+ ):
+ try:
+ self.transport.sync_from_controller_state(sync_state)
+ except Exception as e:
+ logger.warning(
+ "Failed to sync simulator from controller state: %s", e
+ )
+
+ if self.transport:
+ self.first_frame_received = False
+ logger.info("Simulator mode toggled to %s", "on" if enable else "off")
+
+ # Wait for first frame with timeout
+ if not self._wait_for_first_frame(timeout=0.5):
+ logger.warning(
+ "Timeout waiting for first frame after SIMULATOR toggle"
+ )
+
+ return True, None
+
+ return False, "Failed to create transport"
+
+ except Exception as e:
+ logger.warning("Failed to (re)configure transport on SIMULATOR: %s", e)
+ return False, f"Transport switch failed: {e}"
+
+ def get_latest_frame(self) -> tuple[memoryview | None, int, float]:
+ """Get latest frame from transport.
+
+ Returns:
+ Tuple of (memoryview, version, timestamp) or (None, 0, 0.0) if unavailable.
+ """
+ if not self.transport or not self.transport.is_connected():
+ return None, 0, 0.0
+
+ try:
+ return self.transport.get_latest_frame_view()
+ except Exception as e:
+ logger.warning("Error getting latest frame: %s", e)
+ return None, 0, 0.0
+
+ def write_frame(
+ self,
+ position_out: np.ndarray,
+ speed_out: np.ndarray,
+ command_value: int,
+ affected_joint_out: np.ndarray,
+ inout_out: np.ndarray,
+ timeout_out: int,
+ gripper_data_out: np.ndarray,
+ ) -> bool:
+ """Write frame to transport every tick.
+
+ Args:
+ position_out: Position output array.
+ speed_out: Speed output array.
+ command_value: Command code value.
+ affected_joint_out: Affected joint array.
+ inout_out: I/O output array.
+ timeout_out: Timeout value.
+ gripper_data_out: Gripper data array.
+
+ Returns:
+ True if frame was written successfully.
+ """
+ if not self.transport or not self.transport.is_connected():
+ return False
+
+ try:
+ return self.transport.write_frame(
+ position_out,
+ speed_out,
+ command_value,
+ affected_joint_out,
+ inout_out,
+ timeout_out,
+ gripper_data_out,
+ )
+ except Exception as e:
+ logger.warning("Error writing frame: %s", e)
+ return False
+
+ def disconnect(self) -> None:
+ """Disconnect transport."""
+ if self.transport:
+ try:
+ self.transport.disconnect()
+ except Exception as e:
+ logger.debug("Error disconnecting transport: %s", e)
+ self.transport = None
+
+ def sync_mock_from_state(self, state: Any) -> None:
+ """Sync mock transport from controller state after RESET.
+
+ Args:
+ state: ControllerState to sync from.
+ """
+ if isinstance(self.transport, MockSerialTransport):
+ self.transport.sync_from_controller_state(state)
+ # Skip stale frames
+ _, ver, _ = self.transport.get_latest_frame_view()
+ self._last_version = ver
+
+ def tick_simulation(
+ self,
+ tool_name: str = "NONE",
+ tool_teleport_pos: float = -1.0,
+ ) -> None:
+ """Tick mock transport simulation if using MockSerialTransport.
+
+ Called by controller each loop iteration for lockstep simulation.
+ No-op for real serial transport.
+ """
+ if isinstance(self.transport, MockSerialTransport):
+ self.transport.tick_simulation(tool_name, tool_teleport_pos)
+
+ def _wait_for_first_frame(self, timeout: float = 0.5) -> bool:
+ """Wait for first frame with timeout.
+
+ Args:
+ timeout: Maximum wait time in seconds.
+
+ Returns:
+ True if frame was received.
+ """
+ if not self.transport:
+ return False
+
+ wait_start = time.perf_counter()
+ while time.perf_counter() - wait_start < timeout:
+ self.transport.poll_read()
+ mv, ver, _ = self.transport.get_latest_frame_view()
+ if mv is not None and ver > 0:
+ self.first_frame_received = True
+ logger.info(
+ "First frame received (%.3fs)", time.perf_counter() - wait_start
+ )
+ return True
+ time.sleep(0.01)
+
+ return False
diff --git a/parol6/server/transports/mock_serial_transport.py b/parol6/server/transports/mock_serial_transport.py
index 0bbc7d8..4cb4c38 100644
--- a/parol6/server/transports/mock_serial_transport.py
+++ b/parol6/server/transports/mock_serial_transport.py
@@ -8,19 +8,290 @@
"""
import logging
-import threading
import time
from dataclasses import dataclass, field
+from typing import TYPE_CHECKING
import numpy as np
-import parol6.PAROL6_ROBOT as PAROL6_ROBOT
from parol6 import config as cfg
-from parol6.protocol.wire import CommandCode, split_to_3_bytes
+from parol6.config import LIMITS
+from numba import njit
+
+from parol6.protocol.wire import (
+ CommandCode,
+ _pack_bitfield,
+ _pack_positions,
+)
from parol6.server.state import ControllerState
+if TYPE_CHECKING:
+ from parol6.tools import ToolSimulator
+
logger = logging.getLogger(__name__)
+# Gripper ramp array indices and flag values (used in @njit functions)
+_RAMP_TARGET = 0 # target position byte (0–255)
+_RAMP_SPEED = 1 # speed byte (0–255)
+_RAMP_ACTIVE = 2 # active flag: _RAMP_ON or _RAMP_OFF
+_RAMP_ON = 1.0
+_RAMP_OFF = 0.0
+
+
+@njit(cache=True)
+def _simulate_motion_jit(
+ position_f: np.ndarray,
+ position_in: np.ndarray,
+ speed_in: np.ndarray,
+ speed_out: np.ndarray,
+ position_out: np.ndarray,
+ homed_in: np.ndarray,
+ io_in: np.ndarray,
+ prev_pos_f: np.ndarray,
+ vmax_f: np.ndarray,
+ jmin_f: np.ndarray,
+ jmax_f: np.ndarray,
+ home_angles_deg: np.ndarray,
+ command_out: int,
+ dt: float,
+ homing_countdown: int,
+) -> tuple[int, int]:
+ """JIT-compiled motion simulation. Returns (new_homing_countdown, new_command_out).
+
+ Note: CommandCode enums are used directly inside the function (resolved at compile time).
+ Passing enums as arguments would add ~90µs overhead per call.
+ """
+ # Handle homing countdown
+ if homing_countdown > 0:
+ homing_countdown -= 1
+ if homing_countdown == 0:
+ # Homing complete
+ homed_in.fill(1)
+ for i in range(6):
+ steps = cfg.deg_to_steps_scalar(home_angles_deg[i], i)
+ position_in[i] = steps
+ position_f[i] = float(steps)
+ speed_in[i] = 0
+ command_out = CommandCode.IDLE
+
+ # Ensure E-stop stays released
+ io_in[4] = 1
+
+ # Simulate motion based on command type
+ if command_out == CommandCode.HOME:
+ if homing_countdown == 0:
+ for i in range(6):
+ homed_in[i] = 0
+ homing_countdown = max(1, int(0.2 / cfg.INTERVAL_S + 0.5))
+ for i in range(6):
+ speed_in[i] = 0
+
+ elif command_out == CommandCode.JOG or command_out == 123:
+ prev_pos_f[:] = position_f
+
+ # Scalar loop avoids allocations from np.clip().astype() and array arithmetic
+ for i in range(6):
+ v = float(speed_out[i])
+ vmax = vmax_f[i]
+ if v > vmax:
+ v = vmax
+ elif v < -vmax:
+ v = -vmax
+
+ new_pos = position_f[i] + v * dt
+ if new_pos < jmin_f[i]:
+ new_pos = jmin_f[i]
+ elif new_pos > jmax_f[i]:
+ new_pos = jmax_f[i]
+ position_f[i] = new_pos
+
+ if dt > 0:
+ inv_dt = 1.0 / dt
+ for i in range(6):
+ v = round((position_f[i] - prev_pos_f[i]) * inv_dt)
+ vmax = vmax_f[i]
+ if v > vmax:
+ v = vmax
+ elif v < -vmax:
+ v = -vmax
+ speed_in[i] = int(v)
+ else:
+ speed_in.fill(0)
+
+ elif command_out == CommandCode.MOVE or command_out == 156:
+ prev_pos_f[:] = position_f
+
+ for i in range(6):
+ target = float(position_out[i])
+ current_f = position_f[i]
+ err_f = target - current_f
+
+ max_step_f = vmax_f[i] * dt
+ if max_step_f < 1.0:
+ max_step_f = 1.0
+
+ move = err_f
+ if move > max_step_f:
+ move = max_step_f
+ elif move < -max_step_f:
+ move = -max_step_f
+
+ pos_f = current_f + move
+ if pos_f < jmin_f[i]:
+ pos_f = jmin_f[i]
+ elif pos_f > jmax_f[i]:
+ pos_f = jmax_f[i]
+ position_f[i] = pos_f
+
+ if dt > 0:
+ inv_dt = 1.0 / dt
+ for i in range(6):
+ v = round((position_f[i] - prev_pos_f[i]) * inv_dt)
+ vmax = vmax_f[i]
+ if v > vmax:
+ v = vmax
+ elif v < -vmax:
+ v = -vmax
+ speed_in[i] = int(v)
+ else:
+ speed_in.fill(0)
+
+ elif command_out == CommandCode.TELEPORT:
+ # Instant position set — no ramping
+ for i in range(6):
+ position_in[i] = position_out[i]
+ position_f[i] = float(position_out[i])
+ speed_in[i] = 0
+ command_out = CommandCode.IDLE
+
+ else:
+ for i in range(6):
+ speed_in[i] = 0
+
+ # Sync integer position from float accumulator
+ for i in range(6):
+ position_in[i] = int(round(position_f[i]))
+
+ return homing_countdown, command_out
+
+
+@njit(cache=True)
+def _write_frame_jit(
+ state_position_out: np.ndarray,
+ state_speed_out: np.ndarray,
+ state_gripper_data_in: np.ndarray,
+ position_out: np.ndarray,
+ speed_out: np.ndarray,
+ gripper_data_out: np.ndarray,
+ gripper_ramp: np.ndarray,
+) -> None:
+ """JIT-compiled frame write processing."""
+ state_position_out[:] = position_out
+ state_speed_out[:] = speed_out
+
+ # Simulate gripper state updates
+ if gripper_data_out[4] == 1: # Calibration mode
+ state_gripper_data_in[0] = gripper_data_out[5]
+ state_gripper_data_in[4] = 0x40
+ elif gripper_data_out[4] == 2: # Error clear mode
+ state_gripper_data_in[4] &= ~0x20
+
+ if gripper_data_out[3] != 0:
+ new_target = float(gripper_data_out[0])
+ new_speed = float(gripper_data_out[1])
+ # Only (re-)activate ramp when target or speed changes, or ramp is idle.
+ # ElectricGripperCommand sends command_bits every tick — without this
+ # guard, the ramp would be re-activated every tick with the same target.
+ if (
+ gripper_ramp[_RAMP_ACTIVE] < _RAMP_ON
+ or gripper_ramp[_RAMP_TARGET] != new_target
+ or gripper_ramp[_RAMP_SPEED] != new_speed
+ ):
+ gripper_ramp[_RAMP_TARGET] = new_target
+ gripper_ramp[_RAMP_SPEED] = new_speed
+ gripper_ramp[_RAMP_ACTIVE] = _RAMP_ON
+ # Echo speed to feedback (not position — ramp handles that)
+ state_gripper_data_in[2] = gripper_data_out[1]
+ # Current: only drawn while ramp is actively moving
+ if gripper_ramp[_RAMP_ACTIVE] >= _RAMP_ON:
+ state_gripper_data_in[3] = gripper_data_out[2]
+ else:
+ state_gripper_data_in[3] = 0
+
+
+@njit(cache=True)
+def _simulate_gripper_ramp_jit(
+ gripper_ramp: np.ndarray,
+ gripper_data_in: np.ndarray,
+ gripper_pos_f: float,
+ dt: float,
+ tick_range: float,
+ min_speed: float,
+ max_speed: float,
+) -> float:
+ """Ramp gripper feedback_position toward target at speed-dependent rate.
+
+ Returns updated gripper_pos_f. Zero heap allocations.
+ """
+ if gripper_ramp[_RAMP_ACTIVE] < _RAMP_ON:
+ return gripper_pos_f
+ if tick_range == 0.0:
+ return gripper_pos_f
+ target = gripper_ramp[_RAMP_TARGET]
+ speed_byte = gripper_ramp[_RAMP_SPEED]
+ velocity_tps = min_speed + (speed_byte / 255.0) * (max_speed - min_speed)
+ pos_delta = (velocity_tps / tick_range) * 255.0 * dt
+ error = target - gripper_pos_f
+ if abs(error) <= pos_delta:
+ gripper_pos_f = target
+ gripper_ramp[_RAMP_ACTIVE] = _RAMP_OFF
+ elif error > 0:
+ gripper_pos_f += pos_delta
+ else:
+ gripper_pos_f -= pos_delta
+ gripper_data_in[1] = int(gripper_pos_f + 0.5) # round to int
+ return gripper_pos_f
+
+
+@njit(cache=True)
+def _encode_payload_jit(
+ out: memoryview,
+ position_in: np.ndarray,
+ speed_in: np.ndarray,
+ homed_in: np.ndarray,
+ io_in: np.ndarray,
+ temp_err_in: np.ndarray,
+ pos_err_in: np.ndarray,
+ timing_in: np.ndarray,
+ gripper_in: np.ndarray,
+) -> None:
+ """JIT-compiled payload encoding."""
+ _pack_positions(out, position_in, 0)
+ _pack_positions(out, speed_in, 18)
+
+ out[36] = _pack_bitfield(homed_in)
+ out[37] = _pack_bitfield(io_in)
+ out[38] = _pack_bitfield(temp_err_in)
+ out[39] = _pack_bitfield(pos_err_in)
+
+ t = int(timing_in[0])
+ out[40] = (t >> 8) & 0xFF
+ out[41] = t & 0xFF
+ out[42] = 0
+ out[43] = 0
+
+ out[44] = int(gripper_in[0]) & 0xFF
+ pos = int(gripper_in[1]) & 0xFFFF
+ spd = int(gripper_in[2]) & 0xFFFF
+ cur = int(gripper_in[3]) & 0xFFFF
+ out[45] = (pos >> 8) & 0xFF
+ out[46] = pos & 0xFF
+ out[47] = (spd >> 8) & 0xFF
+ out[48] = spd & 0xFF
+ out[49] = (cur >> 8) & 0xFF
+ out[50] = cur & 0xFF
+ out[51] = int(gripper_in[4]) & 0xFF
+
@dataclass
class MockRobotState:
@@ -42,6 +313,9 @@ class MockRobotState:
io_in: np.ndarray = field(
default_factory=lambda: np.zeros((8,), dtype=np.uint8)
) # E-stop released
+ io_out: np.ndarray = field(
+ default_factory=lambda: np.zeros((8,), dtype=np.uint8)
+ ) # Commanded outputs (echoed from controller)
# Error states
temperature_error_in: np.ndarray = field(
default_factory=lambda: np.zeros((8,), dtype=np.uint8)
@@ -53,6 +327,15 @@ class MockRobotState:
gripper_data_in: np.ndarray = field(
default_factory=lambda: np.zeros((6,), dtype=np.int32)
)
+ # Gripper ramp state: [target_pos_byte, speed_byte, active_flag(0.0|1.0)]
+ gripper_ramp: np.ndarray = field(
+ default_factory=lambda: np.zeros((3,), dtype=np.float64)
+ )
+ gripper_pos_f: float = 0.0 # float accumulator for smooth ramp (0.0–255.0)
+
+ # Generalized tool ramp for binary-activation tools (pneumatic grippers, etc.)
+ tool_ramp_target: float = 0.0 # target normalized position (0..1)
+ tool_ramp_current: float = 0.0 # current ramp progress (0..1)
# Timing data
timing_data_in: np.ndarray = field(
default_factory=lambda: np.zeros((1,), dtype=np.int32)
@@ -60,7 +343,7 @@ class MockRobotState:
# Simulation parameters
update_rate: float = cfg.INTERVAL_S # match control loop cadence
- last_update: float = field(default_factory=time.time)
+ last_update: float = field(default_factory=time.perf_counter)
homing_countdown: int = 0
# Command state from controller
@@ -76,8 +359,8 @@ def __post_init__(self):
"""Initialize robot to standby position."""
# Set initial positions to standby position for better IK
for i in range(6):
- deg = float(PAROL6_ROBOT.joint.standby.deg[i])
- steps = int(PAROL6_ROBOT.ops.deg_to_steps(deg, i))
+ deg = float(cfg.STANDBY_ANGLES_DEG[i])
+ steps = int(cfg.deg_to_steps_scalar(deg, i))
self.position_in[i] = steps
# Initialize float accumulator from integer steps
self.position_f = self.position_in.astype(np.float64)
@@ -115,15 +398,12 @@ def __init__(
self._state = MockRobotState()
# Frame generation tracking
- self._frames_to_send: list[bytes] = []
- self._last_frame_time = time.time()
self._frame_interval = cfg.INTERVAL_S # match control loop cadence
# Connection state
self._connected = False
# Statistics
- self._frames_sent = 0
self._frames_received = 0
# Latest-frame infrastructure (simulation publishes into this buffer)
@@ -131,22 +411,29 @@ def __init__(
self._frame_mv = memoryview(self._frame_buf)[:52]
self._frame_version = 0
self._frame_ts = 0.0
- self._reader_thread: threading.Thread | None = None
- self._reader_running = False
- # Precompute motion simulation constants
- self._vmax_f = PAROL6_ROBOT.joint.speed.max.astype(np.float64, copy=False)
- self._vmax_i32 = PAROL6_ROBOT.joint.speed.max.astype(np.int32, copy=False)
- lims = np.asarray(PAROL6_ROBOT.joint.limits.steps, dtype=np.int64)
+ # Precompute motion simulation constants from LIMITS
+ self._vmax_f = LIMITS.joint.hard.velocity_steps.astype(np.float64, copy=False)
+ lims = np.asarray(LIMITS.joint.position.steps, dtype=np.int64)
self._jmin_f = lims[:, 0].astype(np.float64, copy=False)
self._jmax_f = lims[:, 1].astype(np.float64, copy=False)
+ self._home_angles_deg = np.array(cfg.HOME_ANGLES_DEG, dtype=np.float64)
- # Scratch buffers for motion simulation
+ # Scratch buffer for motion simulation (stores previous position)
self._prev_pos_f = np.zeros((6,), dtype=np.float64)
+ # Tool simulator (resolved on tool change, not per-tick)
+ self._simulator: ToolSimulator | None = None
+ self._simulator_tool_name: str = ""
+
self._state.last_update = time.perf_counter()
- logger.info("MockSerialTransport initialized - simulation mode active")
+ # Write initial frame so first read returns valid data
+ self._encode_payload_into(self._frame_mv)
+ self._frame_version = 1
+ self._frame_ts = time.time()
+
+ logger.debug("MockSerialTransport initialized - simulation mode active")
def connect(self, port: str | None = None) -> bool:
"""
@@ -163,9 +450,11 @@ def connect(self, port: str | None = None) -> bool:
self._connected = True
self._state = MockRobotState() # Reset state on connect
+ self._simulator = None # Force re-resolve on next tick
+ self._simulator_tool_name = ""
# Initialize time base to perf_counter for consistent scheduling
self._state.last_update = time.perf_counter()
- logger.info(f"MockSerialTransport connected to simulated port: {self.port}")
+ logger.debug(f"MockSerialTransport connected to simulated port: {self.port}")
return True
# Allow controller to sync the simulator pose/homing from live controller state
@@ -186,7 +475,7 @@ def sync_from_controller_state(self, state: ControllerState) -> None:
# Clear speeds and hold position
self._state.speed_in = state.Speed_in.copy()
self._state.command_out = CommandCode.IDLE
- logger.info("MockSerialTransport: state synchronized from controller")
+ logger.debug("MockSerialTransport: state synchronized from controller")
except Exception as e:
logger.warning(
"MockSerialTransport: failed to sync from controller state: %s", e
@@ -195,7 +484,7 @@ def sync_from_controller_state(self, state: ControllerState) -> None:
def disconnect(self) -> None:
"""Simulate serial port disconnection."""
self._connected = False
- logger.info(f"MockSerialTransport disconnected from: {self.port}")
+ logger.debug(f"MockSerialTransport disconnected from: {self.port}")
def is_connected(self) -> bool:
"""
@@ -227,279 +516,131 @@ def write_frame(
timeout_out: int,
gripper_data_out: np.ndarray,
) -> bool:
- """
- Process a command frame from the controller.
-
- Instead of writing to serial, this updates the internal
- simulation state.
-
- Args:
- position_out: Target positions
- speed_out: Speed commands
- command_out: Command code
- affected_joint_out: Affected joint flags
- inout_out: I/O commands
- timeout_out: Timeout value
- gripper_data_out: Gripper commands
-
- Returns:
- True if processed successfully
- """
+ """Process a command frame from the controller."""
if not self._connected:
return False
- # Update simulation state with command
self._state.command_out = command_out
- self._state.position_out = np.array(position_out, dtype=np.int32, copy=False)
- self._state.speed_out = np.array(speed_out, dtype=np.float64, copy=False)
-
- # Track frame reception
self._frames_received += 1
-
- # Simulate gripper state updates
- if gripper_data_out[4] == 1: # Calibration mode
- # Simulate gripper calibration
- self._state.gripper_data_in[0] = gripper_data_out[5] # Set device ID
- self._state.gripper_data_in[4] = 0x40 # Set calibrated bit
- elif gripper_data_out[4] == 2: # Error clear mode
- # Clear gripper errors
- self._state.gripper_data_in[4] &= ~0x20 # Clear error bit
-
- # Update gripper position/speed/current if commanded
- if gripper_data_out[3] != 0: # Gripper command active
- self._state.gripper_data_in[1] = gripper_data_out[0] # Position
- self._state.gripper_data_in[2] = gripper_data_out[1] # Speed
- self._state.gripper_data_in[3] = gripper_data_out[2] # Current
-
+ _write_frame_jit(
+ self._state.position_out,
+ self._state.speed_out,
+ self._state.gripper_data_in,
+ position_out,
+ speed_out,
+ gripper_data_out,
+ self._state.gripper_ramp,
+ )
+ n = min(len(inout_out), len(self._state.io_out))
+ self._state.io_out[:n] = inout_out[:n]
return True
- def _simulate_motion(self, dt: float) -> None:
+ def tick_simulation(
+ self,
+ tool_name: str = "NONE",
+ tool_teleport_pos: float = -1.0,
+ ) -> None:
"""
- Simulate one step of robot motion.
+ Run one physics simulation step. Called by controller each tick.
- Args:
- dt: Time delta since last update
+ This advances the simulation by the elapsed time since last update,
+ encodes the new state into the frame buffer, and increments the
+ frame version for change detection.
"""
- state = self._state
-
- # Handle homing countdown
- if state.homing_countdown > 0:
- state.homing_countdown -= 1
- if state.homing_countdown == 0:
- # Homing complete - mark all joints as homed and move to configured home posture
- target_deg = cfg.HOME_ANGLES_DEG
- # Mark all 8 homed bits as 1 to satisfy status bitfield expectations
- state.homed_in.fill(1)
- for i in range(6):
- steps = int(PAROL6_ROBOT.ops.deg_to_steps(float(target_deg[i]), i))
- state.position_in[i] = steps
- state.position_f[i] = float(steps)
- state.speed_in[i] = 0
- # Clear HOME command to avoid immediately restarting homing
- state.command_out = CommandCode.IDLE
-
- # Ensure E-stop stays released in simulation
- state.io_in[4] = 1
-
- # Simulate motion based on command type
- if state.command_out == CommandCode.HOME:
- # Start homing sequence
- if state.homing_countdown == 0:
- for i in range(6):
- state.homed_in[i] = 0 # Mark as not homed
- # Schedule homing completion after ~0.2s (use fixed frame interval for determinism)
- state.homing_countdown = max(1, int(0.2 / self._frame_interval + 0.5))
- # Zero speeds during homing
- for i in range(6):
- state.speed_in[i] = 0
-
- elif state.command_out == CommandCode.JOG or state.command_out == 123:
- # Speed control mode (vectorized float-accumulated integration)
- np.copyto(self._prev_pos_f, state.position_f)
-
- # Clip commanded speeds to joint limits
- v_cmd = np.clip(
- state.speed_out.astype(np.float64, copy=False),
- -self._vmax_f,
+ if not self._connected:
+ return
+
+ now = time.perf_counter()
+ dt = now - self._state.last_update
+ self._state.last_update = now
+
+ # Snap gripper position if teleport requested
+ if tool_teleport_pos >= 0:
+ self._state.gripper_pos_f = tool_teleport_pos
+ self._state.gripper_data_in[1] = int(tool_teleport_pos + 0.5)
+ self._state.gripper_ramp[_RAMP_TARGET] = tool_teleport_pos
+ self._state.gripper_ramp[_RAMP_ACTIVE] = _RAMP_OFF
+ # Also snap the pneumatic/generic ramp so it doesn't overwrite
+ frac = tool_teleport_pos / 255.0
+ self._state.tool_ramp_current = frac
+ self._state.tool_ramp_target = frac
+
+ if dt > 0:
+ state = self._state
+ # CommandCode enums are resolved at compile time inside the JIT function.
+ # Passing enums as arguments would add ~90µs overhead per call.
+ state.homing_countdown, state.command_out = _simulate_motion_jit(
+ state.position_f,
+ state.position_in,
+ state.speed_in,
+ state.speed_out,
+ state.position_out,
+ state.homed_in,
+ state.io_in,
+ self._prev_pos_f,
self._vmax_f,
+ self._jmin_f,
+ self._jmax_f,
+ self._home_angles_deg,
+ int(state.command_out),
+ dt,
+ state.homing_countdown,
)
- # Integrate position
- new_pos_f = state.position_f + v_cmd * dt
+ # Tool simulation: resolve params on change, then tick
+ if tool_name != self._simulator_tool_name:
+ self._simulator_tool_name = tool_name
+ # Clear stale ramp state from the previous tool
+ state.gripper_ramp[_RAMP_ACTIVE] = _RAMP_OFF
+ state.tool_ramp_current = 0.0
+ state.tool_ramp_target = 0.0
- # Apply joint limits
- np.clip(new_pos_f, self._jmin_f, self._jmax_f, out=state.position_f)
+ from parol6.tools import get_registry
- # Report actual velocity based on realized motion
- if dt > 0:
- realized_v = np.rint((state.position_f - self._prev_pos_f) / dt).astype(
- np.int32
- )
- np.clip(realized_v, -self._vmax_i32, self._vmax_i32, out=state.speed_in)
- else:
- state.speed_in.fill(0)
+ tool_cfg = get_registry().get(tool_name)
+ if tool_cfg is not None:
+ self._simulator = tool_cfg.create_simulator()
+ if self._simulator is not None:
+ self._simulator.resolve_params(tool_cfg)
+ else:
+ self._simulator = None
- elif state.command_out == CommandCode.MOVE or state.command_out == 156:
- # Position control mode (float-accumulated and per-tick speed clamp)
- prev_pos_f = state.position_f.copy()
- for i in range(6):
- target = float(state.position_out[i])
- current_f = float(state.position_f[i])
- err_f = target - current_f
-
- # Calculate max move this tick from per-joint max speed
- max_step_f = float(PAROL6_ROBOT.joint.speed.max[i]) * float(dt)
- if max_step_f < 1.0:
- # ensure some progress at very small dt
- max_step_f = 1.0
-
- move = float(err_f)
- if move > max_step_f:
- move = max_step_f
- elif move < -max_step_f:
- move = -max_step_f
-
- new_pos_f = current_f + move
-
- # Apply joint limits
- jmin, jmax = PAROL6_ROBOT.joint.limits.steps[i]
- if new_pos_f < float(jmin):
- new_pos_f = float(jmin)
- elif new_pos_f > float(jmax):
- new_pos_f = float(jmax)
-
- state.position_f[i] = new_pos_f
-
- # Report actual velocity based on realized motion
- if dt > 0:
- realized_v = np.rint((state.position_f - prev_pos_f) / dt).astype(
- np.int32
- )
- else:
- realized_v = np.zeros(6, dtype=np.int32)
- vmax = PAROL6_ROBOT.joint.speed.max.astype(np.int32)
- state.speed_in[:] = np.clip(realized_v, -vmax, vmax)
+ if self._simulator is not None:
+ self._simulator.tick(state, dt)
- else:
- # Idle or unknown command - hold position
- for i in range(6):
- state.speed_in[i] = 0
+ # Echo commanded outputs into io_in (firmware packs
+ # IO_var[] = {In1, In2, Out1, Out2, Estop, ...})
+ state.io_in[2] = state.io_out[2]
+ state.io_in[3] = state.io_out[3]
- # Sync integer telemetry from high-fidelity accumulator
- state.position_in[:] = np.rint(state.position_f).astype(np.int32)
+ self._encode_payload_into(self._frame_mv)
+ self._frame_version += 1
+ self._frame_ts = time.time()
# ================================
# Latest-frame API (reduced-copy)
# ================================
- def start_reader(self, shutdown_event: threading.Event) -> threading.Thread:
+ def poll_read(self) -> bool:
"""
- Start simulated latest-frame publisher thread.
+ No-op for mock transport. Simulation is driven by tick_simulation().
+ Returns True if connected (frame data is always available).
"""
- if self._reader_thread and self._reader_thread.is_alive():
- return self._reader_thread
-
- def _run():
- self._reader_running = True
- period = self._frame_interval
- next_deadline = time.perf_counter()
-
- try:
- while not shutdown_event.is_set():
- if not self._connected:
- time.sleep(0.05)
- continue
-
- now = time.perf_counter()
- if now >= next_deadline:
- # Advance simulation before publishing a new frame
- dt = now - self._state.last_update
- if dt > 0:
- self._simulate_motion(dt)
- self._state.last_update = now
-
- self._encode_payload_into(self._frame_mv)
- self._frame_version += 1
- self._frame_ts = time.time()
-
- # Advance deadline
- next_deadline += period
- # If we fell far behind, resync to avoid tight catch-up loop
- if next_deadline < now - period:
- next_deadline = now + period
- else:
- # Sleep until next deadline (or at most 2ms to stay responsive)
- sleep_time = min(next_deadline - now, 0.002)
- if sleep_time > 0:
- time.sleep(sleep_time)
- finally:
- self._reader_running = False
-
- t = threading.Thread(target=_run, name="MockSerialReader", daemon=True)
- self._reader_thread = t
- t.start()
- return t
+ return self._connected
def _encode_payload_into(self, out_mv: memoryview) -> None:
- """
- Build a 52-byte payload per firmware layout from the simulated state directly into memoryview.
- Zero-allocation version for use in the reader loop.
- """
+ """Build a 52-byte payload per firmware layout from simulated state."""
st = self._state
- out = out_mv
- # Positions (6 * 3 bytes)
- off = 0
- for i in range(6):
- b0, b1, b2 = split_to_3_bytes(int(st.position_in[i]))
- out[off] = b0
- out[off + 1] = b1
- out[off + 2] = b2
- off += 3
- # Speeds (6 * 3 bytes)
- off = 18
- for i in range(6):
- b0, b1, b2 = split_to_3_bytes(int(st.speed_in[i]))
- out[off] = b0
- out[off + 1] = b1
- out[off + 2] = b2
- off += 3
-
- def bits_to_byte(bits: np.ndarray) -> int:
- val = 0
- for b in bits[:8]:
- val = (val << 1) | (1 if b else 0)
- return val & 0xFF
-
- # Bitfields
- out[36] = bits_to_byte(st.homed_in)
- out[37] = bits_to_byte(st.io_in)
- out[38] = bits_to_byte(st.temperature_error_in)
- out[39] = bits_to_byte(st.position_error_in)
-
- # Timing (two bytes)
- t = int(st.timing_data_in[0]) if st.timing_data_in else 0
- out[40] = (t >> 8) & 0xFF
- out[41] = t & 0xFF
-
- # Reserved
- out[42] = 0
- out[43] = 0
-
- # Gripper
- gd = st.gripper_data_in
- dev_id = int(gd[0]) if gd.all() else 0
- pos = int(gd[1]) & 0xFFFF
- spd = int(gd[2]) & 0xFFFF
- cur = int(gd[3]) & 0xFFFF
- status = int(gd[4]) & 0xFF if gd.all() else 0
-
- out[44] = dev_id & 0xFF
- out[45] = (pos >> 8) & 0xFF
- out[46] = pos & 0xFF
- out[47] = (spd >> 8) & 0xFF
- out[48] = spd & 0xFF
- out[49] = (cur >> 8) & 0xFF
- out[50] = cur & 0xFF
- out[51] = status & 0xFF
+ _encode_payload_jit(
+ out_mv,
+ st.position_in,
+ st.speed_in,
+ st.homed_in,
+ st.io_in,
+ st.temperature_error_in,
+ st.position_error_in,
+ st.timing_data_in,
+ st.gripper_data_in,
+ )
def get_latest_frame_view(self) -> tuple[memoryview | None, int, float]:
"""
@@ -507,39 +648,3 @@ def get_latest_frame_view(self) -> tuple[memoryview | None, int, float]:
"""
mv = self._frame_mv if self._frame_version > 0 else None
return (mv, self._frame_version, self._frame_ts)
-
- def get_info(self) -> dict:
- """
- Get information about the mock transport.
-
- Returns:
- Dictionary with transport information
- """
- return {
- "port": self.port,
- "baudrate": self.baudrate,
- "connected": self._connected,
- "timeout": self.timeout,
- "mode": "MOCK_SERIAL",
- "frames_sent": self._frames_sent,
- "frames_received": self._frames_received,
- "simulation_rate_hz": int(1.0 / self._frame_interval),
- "robot_state": {
- "homed": all(self._state.homed_in[i] == 1 for i in range(6)),
- "estop": self._state.io_in[4] == 0,
- "command": self._state.command_out,
- },
- }
-
-
-def create_mock_serial_transport() -> MockSerialTransport:
- """
- Factory function to create a mock serial transport.
-
- Returns:
- Configured MockSerialTransport instance
- """
- transport = MockSerialTransport()
- transport.connect()
- logger.info("Mock serial transport created and connected")
- return transport
diff --git a/parol6/server/transports/serial_transport.py b/parol6/server/transports/serial_transport.py
index 86169be..22ab705 100644
--- a/parol6/server/transports/serial_transport.py
+++ b/parol6/server/transports/serial_transport.py
@@ -7,18 +7,112 @@
import logging
import os
-import threading
import time
+from typing import cast
+import numba
import numpy as np
import serial
-from parol6.config import INTERVAL_S, get_com_port_with_fallback
+from parol6.config import SERIAL_RX_RING_DEFAULT
from parol6.protocol.wire import pack_tx_frame_into
logger = logging.getLogger(__name__)
+@numba.njit(cache=True)
+def _append_to_ring_numba(
+ rb: np.ndarray, src: np.ndarray, n: int, cap: int, head: int, tail: int
+) -> tuple[int, int]:
+ """JIT-compiled ring buffer append. Returns (new_head, new_tail)."""
+ avail = (head - tail + cap) % cap
+ free = cap - 1 - avail
+ over = max(0, n - free)
+ if over:
+ tail = (tail + over) % cap
+
+ first = min(n, cap - head)
+ for i in range(first):
+ rb[head + i] = src[i]
+ remain = n - first
+ for i in range(remain):
+ rb[i] = src[first + i]
+ head = (head + n) % cap
+
+ return head, tail
+
+
+@numba.njit(cache=True)
+def _parse_frames_njit(
+ rb: np.ndarray,
+ head: int,
+ tail: int,
+ cap: int,
+ frame_buf: np.ndarray,
+) -> tuple[int, int, bool]:
+ """
+ JIT-compiled frame parser. Scans for 0xFF 0xFF 0xFF start sequence,
+ validates end markers 0x01 0x02, extracts 52-byte payload.
+
+ Returns:
+ new_tail: Updated tail position
+ frames_found: Number of complete frames found
+ has_valid_frame: True if frame_buf contains valid data
+ """
+ START0, START1, START2 = 0xFF, 0xFF, 0xFF
+ END0, END1 = 0x01, 0x02
+ frames_found = 0
+ has_valid = False
+
+ while True:
+ avail = (head - tail + cap) % cap
+ if avail < 4:
+ break
+
+ # Find start sequence
+ found = False
+ while avail >= 3:
+ if (
+ rb[tail] == START0
+ and rb[(tail + 1) % cap] == START1
+ and rb[(tail + 2) % cap] == START2
+ ):
+ found = True
+ break
+ tail = (tail + 1) % cap
+ avail = (head - tail + cap) % cap
+
+ if not found or avail < 4:
+ break
+
+ length = rb[(tail + 3) % cap]
+ total_needed = 4 + length
+ if avail < total_needed:
+ break
+
+ # Validate end markers
+ if length >= 2:
+ e0 = rb[(tail + 4 + length - 2) % cap]
+ e1 = rb[(tail + 4 + length - 1) % cap]
+ if not (e0 == END0 and e1 == END1):
+ tail = (tail + 1) % cap
+ continue
+
+ # Extract payload (first 52 bytes)
+ payload_len = 52 if length >= 52 else length
+ start = (tail + 4) % cap
+ for i in range(payload_len):
+ frame_buf[i] = rb[(start + i) % cap]
+
+ if payload_len >= 52:
+ frames_found += 1
+ has_valid = True
+
+ tail = (tail + total_needed) % cap
+
+ return tail, frames_found, has_valid
+
+
class SerialTransport:
"""
Manages serial port communication with the robot.
@@ -51,22 +145,21 @@ def __init__(
self.serial: serial.Serial | None = None
self.last_reconnect_attempt = 0.0
self.reconnect_interval = 1.0 # seconds between reconnect attempts
+ self._reconnect_failures = 0 # count consecutive failures to reduce log spam
- # Reduced-copy latest-frame infrastructure (reader thread will publish here)
- self._scratch = bytearray(4096)
- self._scratch_mv = memoryview(self._scratch)
+ # Reduced-copy latest-frame infrastructure (poll_read will publish here)
+ self._scratch = np.zeros(4096, dtype=np.uint8)
+ self._scratch_mv = memoryview(self._scratch.data)
# Fixed-size ring buffer for RX stream (drop-oldest on overflow)
- _cap = int(os.getenv("PAROL6_SERIAL_RX_RING_CAP", "262144"))
- self._ring = bytearray(_cap)
+ _cap = int(os.getenv("PAROL6_SERIAL_RX_RING_CAP", str(SERIAL_RX_RING_DEFAULT)))
+ self._ring = np.zeros(_cap, dtype=np.uint8)
self._r_cap = _cap
self._r_head = 0
self._r_tail = 0
- self._frame_buf = bytearray(64) # 52-byte payload + headroom
- self._frame_mv = memoryview(self._frame_buf)[:52]
+ self._frame_buf = np.zeros(64, dtype=np.uint8)
+ self._frame_mv = memoryview(self._frame_buf)[:52] # type: ignore[invalid-argument-type, ty:invalid-argument-type]
self._frame_version = 0
self._frame_ts = 0.0
- self._reader_thread: threading.Thread | None = None
- self._reader_running = False
# Preallocated TX buffer (3 start + 1 len + 52 payload = 56 bytes)
self._tx_buf = bytearray(56)
@@ -88,38 +181,13 @@ def connect(self, port: str | None = None) -> bool:
Returns:
True if connection successful, False otherwise
"""
- if port:
- self.port = port
-
- if not self.port:
- logger.warning("No serial port specified")
- return False
-
- try:
- # Close existing connection if any
- if self.serial and self.serial.is_open:
- self.serial.close()
-
- # Create new connection
- self.serial = serial.Serial(
- port=self.port, baudrate=self.baudrate, timeout=self.timeout
- )
-
- if self.serial.is_open:
- logger.info(f"Connected to serial port: {self.port}")
- return True
- else:
- logger.error(f"Failed to open serial port: {self.port}")
- return False
-
- except serial.SerialException as e:
- logger.error(f"Serial connection error: {e}")
- self.serial = None
- return False
- except Exception as e:
- logger.error(f"Unexpected error connecting to serial: {e}")
- self.serial = None
- return False
+ # Reset failure counter on explicit connect call
+ self._reconnect_failures = 0
+ success = self._connect_internal(port, quiet=False)
+ if not success:
+ # Mark as failed so auto_reconnect logs at DEBUG level
+ self._reconnect_failures = 1
+ return success
def disconnect(self) -> None:
"""Disconnect from the serial port."""
@@ -146,7 +214,9 @@ def auto_reconnect(self) -> bool:
"""
Attempt to reconnect to the serial port if disconnected.
- This implements a rate-limited reconnection attempt.
+ This implements a rate-limited reconnection attempt. After the first
+ failure, subsequent reconnection attempts are logged at DEBUG level
+ to reduce log spam.
Returns:
True if reconnection successful, False otherwise
@@ -160,11 +230,72 @@ def auto_reconnect(self) -> bool:
self.last_reconnect_attempt = now
if self.port:
- logger.info(f"Attempting to reconnect to {self.port}...")
- return self.connect(self.port)
+ # Log at INFO only on first attempt, DEBUG on subsequent
+ log_level = logging.DEBUG if self._reconnect_failures > 0 else logging.INFO
+ logger.log(log_level, f"Attempting to reconnect to {self.port}...")
+ success = self._connect_internal(
+ self.port, quiet=self._reconnect_failures > 0
+ )
+ if success:
+ if self._reconnect_failures > 0:
+ logger.info(
+ f"Reconnected to {self.port} after {self._reconnect_failures} failed attempts"
+ )
+ self._reconnect_failures = 0
+ else:
+ self._reconnect_failures += 1
+ return success
return False
+ def _connect_internal(self, port: str | None, quiet: bool = False) -> bool:
+ """
+ Internal connection logic with optional quiet mode for reduced logging.
+
+ Args:
+ port: Port to connect to
+ quiet: If True, log errors at DEBUG instead of ERROR
+
+ Returns:
+ True if connection successful, False otherwise
+ """
+ if port:
+ self.port = port
+
+ if not self.port:
+ if not quiet:
+ logger.warning("No serial port specified")
+ return False
+
+ try:
+ # Close existing connection if any
+ if self.serial and self.serial.is_open:
+ self.serial.close()
+
+ # Create new connection
+ self.serial = serial.Serial(
+ port=self.port, baudrate=self.baudrate, timeout=self.timeout
+ )
+
+ if self.serial.is_open:
+ logger.info(f"Connected to serial port: {self.port}")
+ return True
+ else:
+ log_level = logging.DEBUG if quiet else logging.ERROR
+ logger.log(log_level, f"Failed to open serial port: {self.port}")
+ return False
+
+ except serial.SerialException as e:
+ log_level = logging.DEBUG if quiet else logging.ERROR
+ logger.log(log_level, f"Serial connection error: {e}")
+ self.serial = None
+ return False
+ except Exception as e:
+ log_level = logging.DEBUG if quiet else logging.ERROR
+ logger.log(log_level, f"Unexpected error connecting to serial: {e}")
+ self.serial = None
+ return False
+
def write_frame(
self,
position_out: np.ndarray,
@@ -227,110 +358,45 @@ def write_frame(
# ================================
# Latest-frame API (reduced-copy)
# ================================
- def start_reader(self, shutdown_event: threading.Event) -> threading.Thread:
- """
- Start a dedicated reader thread that parses incoming frames from the serial port
- and publishes the latest 52-byte payload into an internal stable buffer.
-
- Returns the started Thread object. If already running, returns the existing one.
- """
+ def poll_read(self) -> bool:
+ """Non-blocking read from serial. Returns True if data was read."""
if not self.is_connected():
- raise RuntimeError(
- "SerialTransport.start_reader: serial port not connected"
- )
+ return False
+ ser = self.serial
+ if not ser or not ser.is_open:
+ return False
- if self._reader_thread and self._reader_thread.is_alive():
- return self._reader_thread
+ try:
+ iw = ser.in_waiting
+ if iw == 0:
+ return False
- # Ensure a short timeout for responsive shutdown
- if self.serial:
- # Small block so as not to busy loop, but can't use a larger timout
- # because serial will read until buffer is full or timeout.
- self.serial.timeout = INTERVAL_S / 2
+ k = min(iw, len(self._scratch))
+ n = ser.readinto(self._scratch_mv[:k])
+ if n is None or n <= 0:
+ return False
- def _run() -> None:
- self._reader_running = True
- try:
- while not shutdown_event.is_set():
- if not self.is_connected():
- # Backoff a bit to avoid busy loop if disconnected
- time.sleep(0.1)
- continue
- # Race-safe read: hold local ref and check is_open
- ser = self.serial
- if not ser or not getattr(ser, "is_open", False):
- # Disconnected between iterations; back off briefly
- time.sleep(0.1)
- continue
- try:
- # Check if data is available to avoid empty read syscalls
- iw = ser.in_waiting
-
- if iw <= 0:
- # No data available, short sleep and continue
- time.sleep(min(INTERVAL_S, 0.0015))
- continue
-
- # Read up to available bytes into preallocated scratch buffer
- k = min(iw, len(self._scratch))
- n = ser.readinto(self._scratch_mv[:k])
- except serial.SerialException as e:
- logger.error(f"Serial reader error: {e}")
- self.disconnect()
- break
- except (OSError, TypeError, ValueError, AttributeError):
- # fd likely closed during disconnect; stop quietly
- logger.info(
- "Serial reader stopping due to disconnect/closed FD",
- exc_info=False,
- )
- try:
- self.disconnect()
- except Exception:
- pass
- break
- except Exception:
- logger.exception("Serial reader unexpected exception")
- break
-
- if not n:
- # Timeout or no data; loop to check shutdown_event
- continue
-
- # Batch append into ring buffer and parse
- cap = self._r_cap
- head = self._r_head
- tail = self._r_tail
- rb = self._ring
- src = self._scratch
-
- # Calculate overflow and adjust tail if needed
- avail = (head - tail + cap) % cap
- free = (
- cap - 1 - avail
- ) # keep one slot empty to disambiguate full/empty
- over = max(0, n - free)
- if over:
- tail = (tail + over) % cap
-
- # Batch copy into ring buffer using slices
- first = min(n, cap - head)
- rb[head : head + first] = src[:first]
- remain = n - first
- if remain:
- rb[0:remain] = src[first:n]
- head = (head + n) % cap
-
- self._r_head = head
- self._r_tail = tail
- self._parse_ring_for_frames()
- finally:
- self._reader_running = False
+ # Append to ring buffer and parse
+ self._append_to_ring(n)
+ self._parse_ring_for_frames()
+ return True
+ except serial.SerialException as e:
+ logger.error(f"Serial poll error: {e}")
+ self.disconnect()
+ return False
+ except (OSError, TypeError, ValueError, AttributeError) as e:
+ logger.debug("Serial disconnect: %s", e)
+ self.disconnect()
+ return False
- t = threading.Thread(target=_run, name="SerialReader", daemon=True)
- self._reader_thread = t
- t.start()
- return t
+ def _append_to_ring(self, n: int) -> tuple[int, int]:
+ """Append n bytes from _scratch to ring buffer. Returns (new_head, new_tail)."""
+ head, tail = _append_to_ring_numba(
+ self._ring, self._scratch, n, self._r_cap, self._r_head, self._r_tail
+ )
+ self._r_head = head
+ self._r_tail = tail
+ return head, tail
def _parse_ring_for_frames(self) -> None:
"""
@@ -339,99 +405,26 @@ def _parse_ring_for_frames(self) -> None:
Frame format:
[0xFF,0xFF,0xFF] [LEN] [LEN bytes data ...]
"""
- START0, START1, START2 = 0xFF, 0xFF, 0xFF
- END0, END1 = self.END_BYTES[0], self.END_BYTES[1]
- cap = self._r_cap
- head = self._r_head
- tail = self._r_tail
- rb = self._ring
-
- def available(h: int, t: int) -> int:
- return (h - t + cap) % cap
-
- while available(head, tail) >= 4:
- # Find start sequence 0xFF 0xFF 0xFF by advancing tail
- found = False
- while available(head, tail) >= 3:
- b0 = rb[tail]
- b1 = rb[(tail + 1) % cap]
- b2 = rb[(tail + 2) % cap]
- if b0 == START0 and b1 == START1 and b2 == START2:
- found = True
- break
- tail = (tail + 1) % cap
- if not found or available(head, tail) < 4:
- break
+ new_tail, frames_found, has_valid = _parse_frames_njit(
+ self._ring, self._r_head, self._r_tail, self._r_cap, self._frame_buf
+ )
- length = rb[(tail + 3) % cap]
- total_needed = 4 + length
- if available(head, tail) < total_needed:
- # Wait for more data
- break
-
- # Validate end markers if possible
- if length >= 2:
- e0 = rb[(tail + 4 + length - 2) % cap]
- e1 = rb[(tail + 4 + length - 1) % cap]
- if not (e0 == END0 and e1 == END1):
- # Bad frame; skip one byte to resync
- tail = (tail + 1) % cap
- continue
-
- # Publish first 52 bytes if available
- payload_len = 52 if length >= 52 else length
- start = (tail + 4) % cap
- if start + payload_len <= cap:
- self._frame_buf[:payload_len] = rb[start : start + payload_len]
- else:
- first = cap - start
- self._frame_buf[:first] = rb[start:cap]
- self._frame_buf[first:payload_len] = rb[0 : payload_len - first]
-
- if payload_len >= 52:
- self._frame_version += 1
- self._frame_ts = time.time()
-
- # Update Hz tracking if enabled
- self._update_hz_tracking()
-
- # Consume this frame
- tail = (tail + total_needed) % cap
-
- # Publish updated tail
- self._r_tail = tail
+ self._r_tail = new_tail
+ if has_valid:
+ self._frame_version += frames_found
+ self._frame_ts = time.time()
+ self._update_hz_tracking()
def get_latest_frame_view(self) -> tuple[memoryview | None, int, float]:
"""
Return a tuple of (memoryview|None, version:int, timestamp:float).
The memoryview points to a stable 52-byte buffer which is updated by the reader.
"""
- mv = self._frame_mv if self._frame_version > 0 else None
+ mv = cast(
+ "memoryview | None", self._frame_mv if self._frame_version > 0 else None
+ )
return (mv, self._frame_version, self._frame_ts)
- def get_info(self) -> dict:
- """
- Get information about the current serial connection.
-
- Returns:
- Dictionary with connection information
- """
- info = {
- "port": self.port,
- "baudrate": self.baudrate,
- "connected": self.is_connected(),
- "timeout": self.timeout,
- }
-
- if self.serial and self.serial.is_open:
- try:
- info["in_waiting"] = self.serial.in_waiting
- info["out_waiting"] = self.serial.out_waiting
- except Exception as e:
- logger.debug("Failed to read serial wait counts: %s", e)
-
- return info
-
def _update_hz_tracking(self) -> None:
"""
Update EMA Hz tracking and print debug info periodically.
@@ -466,30 +459,3 @@ def _update_hz_tracking(self) -> None:
# Reset interval statistics
self._last_print_time = current_time
self._interval_msg_count = 0
-
-
-def create_serial_transport(
- port: str | None = None, baudrate: int = 2000000
-) -> SerialTransport:
- """
- Factory function to create and optionally connect a serial transport.
-
- Args:
- port: Optional serial port to connect to
- baudrate: Baud rate for communication
-
- Returns:
- SerialTransport instance (may or may not be connected)
- """
- transport = SerialTransport(port=port, baudrate=baudrate)
-
- # Try to connect if port provided
- if port:
- transport.connect(port)
- else:
- # Try to load and connect to saved port
- saved_port = get_com_port_with_fallback()
- if saved_port:
- transport.connect(saved_port)
-
- return transport
diff --git a/parol6/server/transports/transport_factory.py b/parol6/server/transports/transport_factory.py
index f4d11b7..938e1f9 100644
--- a/parol6/server/transports/transport_factory.py
+++ b/parol6/server/transports/transport_factory.py
@@ -3,7 +3,7 @@
This module provides a factory pattern for creating transport instances
based on configuration and environment. It automatically selects between
-real serial, mock serial, or other transport types.
+real serial or mock serial (inline simulation) transport types.
"""
import logging
@@ -31,7 +31,8 @@ def is_simulation_mode() -> bool:
def create_transport(
transport_type: str | None = None,
port: str | None = None,
- baudrate: int = 2000000,
+ *,
+ baudrate: int,
**kwargs: Any,
) -> SerialTransport | MockSerialTransport:
"""
@@ -60,13 +61,13 @@ def create_transport(
# Create appropriate transport
if transport_type == "mock":
- logger.info("Creating MockSerialTransport for simulation")
+ logger.debug("Creating MockSerialTransport for simulation")
transport: MockSerialTransport | SerialTransport = MockSerialTransport(
port=port, baudrate=baudrate, **kwargs
)
elif transport_type == "serial":
- logger.info(f"Creating SerialTransport for port: {port}")
+ logger.debug(f"Creating SerialTransport for port: {port}")
transport = SerialTransport(port=port, baudrate=baudrate, **kwargs)
else:
@@ -78,7 +79,8 @@ def create_transport(
def create_and_connect_transport(
transport_type: str | None = None,
port: str | None = None,
- baudrate: int = 2000000,
+ *,
+ baudrate: int,
auto_find_port: bool = True,
**kwargs: Any,
) -> SerialTransport | MockSerialTransport | None:
@@ -111,7 +113,7 @@ def create_and_connect_transport(
# Try to load saved port
port = get_com_port_with_fallback()
if port:
- logger.info(f"Using saved serial port: {port}")
+ logger.debug(f"Using saved serial port: {port}")
# Create transport
transport = create_transport(transport_type, port=port, baudrate=baudrate, **kwargs)
diff --git a/parol6/server/transports/udp_transport.py b/parol6/server/transports/udp_transport.py
index 1baef3f..2d0879f 100644
--- a/parol6/server/transports/udp_transport.py
+++ b/parol6/server/transports/udp_transport.py
@@ -1,8 +1,8 @@
"""
UDP transport implementation for PAROL6 controller.
-This module handles UDP socket communication, message parsing, and
-response handling for the controller's network interface.
+This module handles UDP socket communication using binary msgpack protocol
+for the controller's network interface.
"""
from __future__ import annotations
@@ -43,6 +43,8 @@ def __init__(
self._running = False
self._rx = bytearray(self.buffer_size)
self._rxv = memoryview(self._rx)
+ # Pre-allocated buffer for poll_receive_all (avoids list allocation per call)
+ self._recv_all_buf: list[tuple[bytes, tuple[str, int]]] = []
def create_socket(self) -> bool:
"""
@@ -55,9 +57,8 @@ def create_socket(self) -> bool:
# Create UDP socket
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- # Use blocking mode with short timeout for responsive shutdown
- self.socket.setblocking(True)
- self.socket.settimeout(0.25)
+ # Non-blocking mode for polling
+ self.socket.setblocking(False)
# Allow address/port reuse for fast restarts
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
@@ -81,7 +82,7 @@ def create_socket(self) -> bool:
time.sleep(_delay_s)
self._running = True
- logger.info(f"UDP socket created and bound to {self.ip}:{self.port}")
+ logger.debug(f"UDP socket created and bound to {self.ip}:{self.port}")
return True
except OSError as e:
@@ -100,44 +101,46 @@ def close_socket(self) -> None:
if self.socket:
try:
self.socket.close()
- logger.info("UDP socket closed")
+ logger.debug("UDP socket closed")
except Exception as e:
- logger.error(f"Error closing UDP socket: {e}")
+ logger.debug(f"Error closing UDP socket: {e}")
finally:
self.socket = None
- def receive_one(self) -> tuple[str, tuple[str, int]] | None:
- """
- Blocking receive of a single datagram using recvfrom_into with a short timeout.
- Returns (message_str, address) on success, or None on timeout/error.
- """
+ def poll_receive(self) -> tuple[bytes, tuple[str, int]] | None:
+ """Non-blocking receive. Returns raw bytes and address, or None if no data."""
if not self.socket or not self._running:
return None
try:
nbytes, address = self.socket.recvfrom_into(self._rxv)
if nbytes <= 0:
return None
- try:
- # Decode ASCII payload and trim only CR/LF to avoid extra copies
- message_str = (
- self._rxv[:nbytes]
- .tobytes()
- .decode("ascii", errors="ignore")
- .rstrip("\r\n")
- )
- except Exception:
- logger.warning(f"Failed to decode UDP datagram from {address}")
- return None
- return (message_str, address)
- except TimeoutError:
+ # Return raw bytes - let caller decode via msgspec
+ return (bytes(self._rxv[:nbytes]), address)
+ except BlockingIOError:
return None
except OSError as e:
- logger.error(f"Socket error receiving UDP message: {e}")
- return None
- except Exception as e:
- logger.error(f"Unexpected error in receive_one: {e}")
+ if e.errno in (11, 35): # EAGAIN/EWOULDBLOCK
+ return None
+ if not self._running:
+ # Socket was closed under us during shutdown — expected.
+ return None
+ logger.error(f"Socket error in poll_receive: {e}")
return None
+ def poll_receive_all(
+ self, max_count: int = 10
+ ) -> list[tuple[bytes, tuple[str, int]]]:
+ """Non-blocking batch receive up to max_count. Reuses internal buffer."""
+ # Returns the internal list directly; caller must consume before next call.
+ self._recv_all_buf.clear()
+ for _ in range(max_count):
+ msg = self.poll_receive()
+ if msg is None:
+ break
+ self._recv_all_buf.append(msg)
+ return self._recv_all_buf
+
def drain_buffer(self) -> int:
"""
Drain all pending messages from the UDP receive buffer.
@@ -149,84 +152,50 @@ def drain_buffer(self) -> int:
return 0
drained_count = 0
- original_timeout = None
-
try:
- # Temporarily switch to non-blocking mode
- original_timeout = self.socket.gettimeout()
- self.socket.setblocking(False)
-
- # Read all pending messages until buffer is empty
+ # Socket is already non-blocking; read all pending messages
while True:
try:
nbytes, _ = self.socket.recvfrom_into(self._rxv)
if nbytes > 0:
drained_count += 1
- except OSError:
+ except (BlockingIOError, OSError):
# No more data available (expected)
break
-
- # Restore original timeout
- self.socket.settimeout(original_timeout)
-
except Exception as e:
- logger.error(f"Error draining UDP buffer: {e}")
- # Try to restore timeout even if draining failed
- try:
- if original_timeout is not None:
- self.socket.settimeout(original_timeout)
- except Exception as e2:
- logger.debug("Failed to restore UDP socket timeout: %s", e2)
+ logger.debug(f"Error draining UDP buffer: {e}")
return drained_count
- def send_response(self, message: str, address: tuple[str, int]) -> bool:
+ def send(self, data: bytes, address: tuple[str, int]) -> bool:
"""
- Send a response message to a specific address.
+ Send raw bytes to a specific address.
Args:
- message: The message string to send
+ data: Pre-packed msgpack bytes to send
address: Tuple of (ip, port) to send to
Returns:
True if successful, False otherwise
"""
if not self.socket or not self._running:
- logger.warning("Cannot send response - socket not available")
+ logger.warning("Cannot send - socket not available")
return False
try:
- # Encode and send the message
- data = message.encode("ascii")
self.socket.sendto(data, address)
return True
except OSError as e:
- logger.error(f"Socket error sending UDP response: {e}")
+ if not self._running:
+ # Socket was closed under us during shutdown — expected.
+ return False
+ logger.error(f"Socket error sending: {e}")
return False
except Exception as e:
- logger.error(f"Unexpected error sending UDP response: {e}")
+ logger.error(f"Unexpected error sending: {e}")
return False
- def broadcast_message(self, message: str, addresses: list[tuple[str, int]]) -> int:
- """
- Broadcast a message to multiple addresses.
-
- Args:
- message: The message to broadcast
- addresses: List of (ip, port) tuples to send to
-
- Returns:
- Number of successful sends
- """
- success_count = 0
-
- for address in addresses:
- if self.send_response(message, address):
- success_count += 1
-
- return success_count
-
def is_running(self) -> bool:
"""
Check if the transport is running.
@@ -259,24 +228,3 @@ def get_socket_info(self) -> dict:
logger.debug("Failed to get UDP sockname: %s", e)
return info
-
-
-def create_udp_transport(
- ip: str = "127.0.0.1", port: int = 5001
-) -> UDPTransport | None:
- """
- Factory function to create and initialize a UDP transport.
-
- Args:
- ip: IP address to bind to
- port: Port number to listen on
-
- Returns:
- Initialized UDPTransport instance, or None if initialization failed
- """
- transport = UDPTransport(ip, port)
-
- if transport.create_socket():
- return transport
- else:
- return None
diff --git a/parol6/smooth_motion/__init__.py b/parol6/smooth_motion/__init__.py
deleted file mode 100644
index 8b99f6f..0000000
--- a/parol6/smooth_motion/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-from .advanced import AdvancedMotionBlender
-from .circle import CircularMotion
-from .helix import HelixMotion
-from .motion_blender import MotionBlender
-from .spline import SplineMotion
-from .waypoints import WaypointTrajectoryPlanner
-
-__all__ = [
- "CircularMotion",
- "SplineMotion",
- "HelixMotion",
- "WaypointTrajectoryPlanner",
- "MotionBlender",
- "AdvancedMotionBlender",
-]
diff --git a/parol6/smooth_motion/advanced.py b/parol6/smooth_motion/advanced.py
deleted file mode 100644
index febc785..0000000
--- a/parol6/smooth_motion/advanced.py
+++ /dev/null
@@ -1,347 +0,0 @@
-"""
-Advanced trajectory blending utilities (C2 continuity, minimum-jerk, etc.).
-"""
-
-import numpy as np
-
-
-class AdvancedMotionBlender:
- """
- Advanced trajectory blender with C2 continuity support.
-
- Provides multiple blending methods including quintic polynomials for true
- smooth motion with continuous position, velocity, and acceleration.
- """
-
- def __init__(self, sample_rate: float = 100.0):
- """
- Initialize advanced motion blender.
-
- Args:
- sample_rate: Trajectory sampling rate in Hz
- """
- self.sample_rate = sample_rate
- self.dt = 1.0 / sample_rate
-
- # Blend method registry
- self.blend_methods = {
- "quintic": self._blend_quintic,
- "s_curve": self._blend_scurve,
- "smoothstep": self._blend_smoothstep, # Legacy compatibility
- "minimum_jerk": self._blend_minimum_jerk,
- "cubic": self._blend_cubic,
- }
-
- def extract_trajectory_state(
- self, trajectory: np.ndarray, index: int
- ) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
- """
- Extract position, velocity, and acceleration at a trajectory point.
-
- Args:
- trajectory: Trajectory array
- index: Point index (-1 for last point)
-
- Returns:
- Tuple of (position, velocity, acceleration)
- """
- if len(trajectory) < 3:
- pos = trajectory[index]
- vel = np.zeros_like(pos)
- acc = np.zeros_like(pos)
- return pos, vel, acc
-
- # Position
- pos = trajectory[index].copy()
-
- # Velocity
- if index == 0 or (index == -1 and len(trajectory) == 1):
- vel = (trajectory[1] - trajectory[0]) / self.dt
- elif index == -1 or index == len(trajectory) - 1:
- vel = (trajectory[-1] - trajectory[-2]) / self.dt
- else:
- vel = (trajectory[index + 1] - trajectory[index - 1]) / (2 * self.dt)
-
- # Acceleration
- if len(trajectory) < 3:
- acc = np.zeros_like(pos)
- elif index == 0:
- v1 = (trajectory[1] - trajectory[0]) / self.dt
- v2 = (trajectory[2] - trajectory[1]) / self.dt
- acc = (v2 - v1) / self.dt
- elif index == -1 or index == len(trajectory) - 1:
- v1 = (trajectory[-2] - trajectory[-3]) / self.dt
- v2 = (trajectory[-1] - trajectory[-2]) / self.dt
- acc = (v2 - v1) / self.dt
- else:
- acc = (
- trajectory[index + 1] - 2 * trajectory[index] + trajectory[index - 1]
- ) / (self.dt**2)
-
- return pos, vel, acc
-
- def calculate_blend_region_size(
- self, traj1: np.ndarray, traj2: np.ndarray, max_accel: float = 1000.0
- ) -> int:
- """
- Calculate optimal blend region size based on velocity mismatch.
-
- Args:
- traj1: First trajectory
- traj2: Second trajectory
- max_accel: Maximum allowed acceleration (mm/s² or deg/s²)
-
- Returns:
- Number of samples for blend region
- """
- _, v1, _ = self.extract_trajectory_state(traj1, -1)
- _, v2, _ = self.extract_trajectory_state(traj2, 0)
-
- delta_v = np.linalg.norm(v2[:3] - v1[:3]) # Focus on position components
-
- if delta_v < 1.0:
- return 20 # Minimal blend
-
- t_blend = delta_v / max_accel
- t_blend *= 1.5 # safety
-
- blend_samples = int(t_blend * self.sample_rate)
- blend_samples = max(20, min(blend_samples, 200))
- return blend_samples
-
- def solve_quintic_coefficients(
- self,
- p0: np.ndarray,
- pf: np.ndarray,
- v0: np.ndarray,
- vf: np.ndarray,
- a0: np.ndarray,
- af: np.ndarray,
- T: float,
- ) -> np.ndarray:
- """
- Solve for quintic polynomial coefficients given boundary conditions.
-
- Returns:
- 6xN array of coefficients [a0, a1, a2, a3, a4, a5] for each dimension
- """
- M = np.array(
- [
- [1, 0, 0, 0, 0, 0], # p(0)
- [1, T, T**2, T**3, T**4, T**5], # p(T)
- [0, 1, 0, 0, 0, 0], # v(0)
- [0, 1, 2 * T, 3 * T**2, 4 * T**3, 5 * T**4], # v(T)
- [0, 0, 2, 0, 0, 0], # a(0)
- [0, 0, 2, 6 * T, 12 * T**2, 20 * T**3], # a(T)
- ]
- )
-
- num_dims = len(p0)
- coeffs = np.zeros((6, num_dims))
-
- for i in range(num_dims):
- b = np.array([p0[i], pf[i], v0[i], vf[i], a0[i], af[i]])
- coeffs[:, i] = np.linalg.solve(M, b)
-
- return coeffs
-
- def evaluate_quintic(
- self, coeffs: np.ndarray, t: float
- ) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
- """
- Evaluate quintic polynomial at time t.
-
- Returns position, velocity, and acceleration.
- """
- pos = (
- coeffs[0]
- + coeffs[1] * t
- + coeffs[2] * t**2
- + coeffs[3] * t**3
- + coeffs[4] * t**4
- + coeffs[5] * t**5
- )
- vel = (
- coeffs[1]
- + 2 * coeffs[2] * t
- + 3 * coeffs[3] * t**2
- + 4 * coeffs[4] * t**3
- + 5 * coeffs[5] * t**4
- )
- acc = (
- 2 * coeffs[2]
- + 6 * coeffs[3] * t
- + 12 * coeffs[4] * t**2
- + 20 * coeffs[5] * t**3
- )
- return pos, vel, acc
-
- def _blend_quintic(
- self, traj1: np.ndarray, traj2: np.ndarray, blend_samples: int
- ) -> np.ndarray:
- """
- Generate quintic polynomial blend with C2 continuity.
-
- This ensures smooth position, velocity, and acceleration transitions.
- """
- p0, v0, a0 = self.extract_trajectory_state(traj1, -1)
- pf, vf, af = self.extract_trajectory_state(traj2, 0)
-
- T = blend_samples * self.dt
- coeffs = self.solve_quintic_coefficients(p0, pf, v0, vf, a0, af, T)
-
- blend_traj = []
- for i in range(blend_samples):
- t = i * T / (blend_samples - 1) if blend_samples > 1 else 0.0
- pos, _, _ = self.evaluate_quintic(coeffs, t)
- blend_traj.append(pos)
-
- return np.array(blend_traj)
-
- def _blend_scurve(
- self, traj1: np.ndarray, traj2: np.ndarray, blend_samples: int
- ) -> np.ndarray:
- """
- Generate S-curve blend with jerk limiting.
- """
- p0, v0, _ = self.extract_trajectory_state(traj1, -1)
- pf, vf, _ = self.extract_trajectory_state(traj2, 0)
-
- try:
- from .scurve import MultiAxisSCurveTrajectory
-
- scurve = MultiAxisSCurveTrajectory(
- list(map(float, p0[:6].tolist())), # assume first 6 are XYZRPY
- list(map(float, pf[:6].tolist())),
- v0=list(map(float, v0[:6].tolist())),
- vf=list(map(float, vf[:6].tolist())),
- T=float(blend_samples * self.dt),
- jerk_limit=5000.0,
- )
- points = scurve.get_trajectory_points(self.dt)
-
- blend_traj = []
- for i in range(len(points["position"])):
- pose = np.zeros_like(p0)
- pose[:6] = points["position"][i]
- if len(p0) > 6:
- alpha = (
- i / (len(points["position"]) - 1)
- if len(points["position"]) > 1
- else 1.0
- )
- pose[6:] = p0[6:] * (1 - alpha) + pf[6:] * alpha
- blend_traj.append(pose)
-
- return np.array(blend_traj)
- except Exception:
- return self._blend_quintic(traj1, traj2, blend_samples)
-
- def _blend_smoothstep(
- self, traj1: np.ndarray, traj2: np.ndarray, blend_samples: int
- ) -> np.ndarray:
- """
- Legacy smoothstep blend for backward compatibility.
- """
- p0 = traj1[-1] if len(traj1) > 0 else traj2[0]
- pf = traj2[0] if len(traj2) > 0 else traj1[-1]
-
- blend_traj = []
- for i in range(blend_samples):
- t = i / (blend_samples - 1) if blend_samples > 1 else 0.0
- s = t * t * (3 - 2 * t)
- pos = p0 * (1 - s) + pf * s
- blend_traj.append(pos)
-
- return np.array(blend_traj)
-
- def _blend_minimum_jerk(
- self, traj1: np.ndarray, traj2: np.ndarray, blend_samples: int
- ) -> np.ndarray:
- """
- Minimum jerk trajectory blend.
-
- Uses the minimum jerk trajectory equation for smooth motion.
- """
- p0, _, _ = self.extract_trajectory_state(traj1, -1)
- pf, _, _ = self.extract_trajectory_state(traj2, 0)
-
- blend_traj = []
- for i in range(blend_samples):
- tau = i / (blend_samples - 1) if blend_samples > 1 else 0.0
- pos = p0 + (pf - p0) * (10 * tau**3 - 15 * tau**4 + 6 * tau**5)
- blend_traj.append(pos)
-
- return np.array(blend_traj)
-
- def _blend_cubic(
- self, traj1: np.ndarray, traj2: np.ndarray, blend_samples: int
- ) -> np.ndarray:
- """
- Cubic spline blend with C1 continuity.
- """
- p0, v0, _ = self.extract_trajectory_state(traj1, -1)
- pf, vf, _ = self.extract_trajectory_state(traj2, 0)
-
- T = blend_samples * self.dt
- blend_traj = []
-
- for i in range(blend_samples):
- t = i * T / (blend_samples - 1) if blend_samples > 1 else 0.0
- tau = t / T if T > 0 else 0.0
-
- h00 = 2 * tau**3 - 3 * tau**2 + 1
- h10 = tau**3 - 2 * tau**2 + tau
- h01 = -2 * tau**3 + 3 * tau**2
- h11 = tau**3 - tau**2
-
- pos = h00 * p0 + h10 * T * v0 + h01 * pf + h11 * T * vf
- blend_traj.append(pos)
-
- return np.array(blend_traj)
-
- def blend_trajectories(
- self,
- traj1: np.ndarray,
- traj2: np.ndarray,
- method: str = "quintic",
- blend_samples: int | None = None,
- auto_size: bool = True,
- ) -> np.ndarray:
- """
- Blend two trajectory segments with specified method.
-
- Args:
- traj1: First trajectory segment
- traj2: Second trajectory segment
- method: Blend method ('quintic', 's_curve', 'smoothstep', 'minimum_jerk', 'cubic')
- blend_samples: Number of blend samples (auto-calculated if None)
- auto_size: Automatically calculate blend region size
-
- Returns:
- Combined trajectory with smooth blend
- """
- if len(traj1) == 0 and len(traj2) == 0:
- return np.array([])
- if len(traj1) == 0:
- return traj2
- if len(traj2) == 0:
- return traj1
-
- if blend_samples is None or auto_size:
- blend_samples = self.calculate_blend_region_size(traj1, traj2)
-
- blend_samples = max(4, blend_samples)
-
- if method not in self.blend_methods:
- print(f"[WARNING] Unknown blend method '{method}', using quintic")
- method = "quintic"
-
- blend_func = self.blend_methods[method]
- blend_traj = blend_func(traj1, traj2, blend_samples)
-
- overlap_start = max(0, len(traj1) - 1)
- overlap_end = min(1, len(traj2))
-
- result = np.vstack([traj1[:overlap_start], blend_traj, traj2[overlap_end:]])
- return result
diff --git a/parol6/smooth_motion/base.py b/parol6/smooth_motion/base.py
deleted file mode 100644
index 8e6c9d7..0000000
--- a/parol6/smooth_motion/base.py
+++ /dev/null
@@ -1,32 +0,0 @@
-"""
-Base trajectory generator.
-
-Provides common timing utilities and constraints for derived generators.
-"""
-
-from typing import Any
-
-import numpy as np
-
-from .motion_constraints import MotionConstraints
-
-
-class TrajectoryGenerator:
- """Base class for trajectory generation with caching support"""
-
- def __init__(self, control_rate: float = 100.0):
- """
- Initialize trajectory generator
-
- Args:
- control_rate: Control loop frequency in Hz
- """
- self.control_rate = control_rate
- self.dt = 1.0 / control_rate
- self.trajectory_cache: dict[str, Any] = {}
- self.constraints = MotionConstraints() # Add constraints
-
- def generate_timestamps(self, duration: float | np.floating) -> np.ndarray:
- """Generate evenly spaced timestamps for trajectory"""
- num_points = int(duration * self.control_rate)
- return np.linspace(0, duration, num_points)
diff --git a/parol6/smooth_motion/circle.py b/parol6/smooth_motion/circle.py
deleted file mode 100644
index 10c9a7c..0000000
--- a/parol6/smooth_motion/circle.py
+++ /dev/null
@@ -1,618 +0,0 @@
-"""
-Circle trajectory generator.
-"""
-
-from collections.abc import Sequence
-
-import numpy as np
-from numpy.typing import NDArray
-from scipy.spatial.transform import Rotation, Slerp
-
-from .base import TrajectoryGenerator
-
-
-class CircularMotion(TrajectoryGenerator):
- """Generate circular and arc trajectories in 3D space"""
-
- def generate_arc_3d(
- self,
- start_pose: Sequence[float],
- end_pose: Sequence[float],
- center: Sequence[float] | NDArray,
- normal: Sequence[float] | NDArray | None = None,
- clockwise: bool = True,
- duration: float | np.floating = 2.0,
- ) -> np.ndarray:
- """
- Generate a 3D circular arc trajectory
-
- Args:
- start_pose: Starting pose [x, y, z, rx, ry, rz] (mm and degrees)
- end_pose: Ending pose [x, y, z, rx, ry, rz] (mm and degrees)
- center: Center point of arc [x, y, z] (mm)
- normal: Normal vector to arc plane (default: z-axis)
- clockwise: Direction of rotation
- duration: Time to complete arc (seconds)
-
- Returns:
- Array of poses along the arc trajectory
- """
- # Convert to numpy arrays
- start_pos = np.array(start_pose[:3])
- end_pos = np.array(end_pose[:3])
- center_pt = np.array(center)
-
- # Arc geometry vectors
- r1 = start_pos - center_pt
- r2 = end_pos - center_pt
-
- # Arc plane normal computation
- if normal is None:
- normal = np.cross(r1, r2)
- if np.linalg.norm(normal) < 1e-6: # Points are collinear
- normal = np.array([0, 0, 1]) # Default to XY plane
- normal_np: np.ndarray = np.array(normal, dtype=float)
- normal_np = normal_np / np.linalg.norm(normal_np)
-
- # Arc sweep angle calculation
- r1_norm = r1 / np.linalg.norm(r1)
- r2_norm = r2 / np.linalg.norm(r2)
- cos_angle = np.clip(np.dot(r1_norm, r2_norm), -1, 1)
- arc_angle = np.arccos(cos_angle)
-
- # Arc direction validation
- cross = np.cross(r1_norm, r2_norm)
- if np.dot(cross, normal_np) < 0:
- arc_angle = 2 * np.pi - arc_angle
-
- if clockwise:
- arc_angle = -arc_angle
-
- # Generate trajectory points
- timestamps = self.generate_timestamps(duration)
- trajectory = []
-
- for i, t in enumerate(timestamps):
- # Interpolation factor
- s = t / duration
-
- # Exact start position for trajectory begin
- if i == 0:
- current_pos = start_pos
- else:
- # Rotate radius vector
- angle = s * arc_angle
- rot_matrix = self._rotation_matrix_from_axis_angle(normal_np, angle)
- current_pos = center_pt + rot_matrix @ r1
-
- # Interpolate orientation (SLERP)
- current_orient = self._slerp_orientation(
- np.asarray(start_pose[3:], dtype=float),
- np.asarray(end_pose[3:], dtype=float),
- float(s),
- )
-
- # Combine position and orientation
- pose = np.concatenate([current_pos, current_orient])
- trajectory.append(pose)
-
- return np.array(trajectory)
-
- def generate_arc_with_profile(
- self,
- start_pose: Sequence[float],
- end_pose: Sequence[float],
- center: Sequence[float] | NDArray,
- normal: Sequence[float] | NDArray | None = None,
- clockwise: bool = True,
- duration: float | np.floating = 2.0,
- trajectory_type: str = "cubic",
- jerk_limit: float | None = None,
- ) -> np.ndarray:
- """
- Generate arc trajectory with specified velocity profile.
-
- This method generates arcs with direct velocity profiles instead of
- re-interpolating, ensuring smooth motion without jerking.
-
- Args:
- start_pose: Starting pose [x, y, z, rx, ry, rz]
- end_pose: Ending pose [x, y, z, rx, ry, rz]
- center: Arc center point [x, y, z]
- normal: Normal vector to arc plane
- clockwise: Direction of rotation
- duration: Time to complete arc (seconds)
- trajectory_type: 'cubic', 'quintic', or 's_curve'
- jerk_limit: Maximum jerk for s_curve (mm/s³)
-
- Returns:
- Array of poses with velocity profile applied
- """
- if trajectory_type == "cubic":
- # Use existing cubic implementation
- return self.generate_arc_3d(
- start_pose, end_pose, center, normal, clockwise, duration
- )
-
- # Convert to numpy arrays
- start_pos = np.array(start_pose[:3])
- end_pos = np.array(end_pose[:3])
- center_pt = np.array(center)
-
- # Arc geometry
- r1 = start_pos - center_pt
- r2 = end_pos - center_pt
-
- # Arc plane normal
- if normal is None:
- normal = np.cross(r1, r2)
- if np.linalg.norm(normal) < 1e-6:
- normal = np.array([0, 0, 1])
- normal_np: np.ndarray = np.array(normal, dtype=float)
- normal_np = normal_np / np.linalg.norm(normal_np)
-
- # Calculate arc angle
- r1_norm = r1 / np.linalg.norm(r1)
- r2_norm = r2 / np.linalg.norm(r2)
- cos_angle = np.clip(np.dot(r1_norm, r2_norm), -1, 1)
- arc_angle = np.arccos(cos_angle)
-
- # Determine arc direction
- cross = np.cross(r1_norm, r2_norm)
- if np.dot(cross, normal_np) < 0:
- arc_angle = 2 * np.pi - arc_angle
- if clockwise:
- arc_angle = -arc_angle
-
- # Generate trajectory points with profile
- num_points = int(duration * self.control_rate)
- trajectory = []
-
- for i in range(num_points):
- # Normalized time [0, 1]
- t = i / (num_points - 1) if num_points > 1 else 0.0
-
- # Apply velocity profile
- if trajectory_type == "quintic":
- # Quintic polynomial for smooth acceleration
- s = 10 * t**3 - 15 * t**4 + 6 * t**5
- elif trajectory_type == "s_curve":
- # S-curve (smoothstep) for jerk-limited motion
- if t <= 0.0:
- s = 0.0
- elif t >= 1.0:
- s = 1.0
- else:
- s = t * t * (3.0 - 2.0 * t)
- else:
- s = t # Linear/cubic fallback
-
- # Current angle along arc
- angle = s * arc_angle
-
- # Rotation matrix for arc
- rot_matrix = self._rotation_matrix_from_axis_angle(normal_np, angle)
- current_pos = center_pt + rot_matrix @ r1
-
- # Interpolate orientation (SLERP)
- current_orient = self._slerp_orientation(
- np.asarray(start_pose[3:], dtype=float),
- np.asarray(end_pose[3:], dtype=float),
- float(s),
- )
-
- # Combine position and orientation
- pose = np.concatenate([current_pos, current_orient])
- trajectory.append(pose)
-
- return np.array(trajectory)
-
- def generate_circle_3d(
- self,
- center: Sequence[float] | NDArray,
- radius: float,
- normal: Sequence[float] | NDArray = [0, 0, 1],
- start_angle: float | None = None,
- duration: float | np.floating = 4.0,
- start_point: Sequence[float] | None = None,
- ) -> np.ndarray:
- """
- Generate a complete circle trajectory that starts at start_point
- """
- timestamps = self.generate_timestamps(duration)
- trajectory = []
-
- # Circle coordinate system
- normal_np: np.ndarray = np.array(normal, dtype=float)
- normal_np = normal_np / np.linalg.norm(normal_np)
- u = self._get_perpendicular_vector(normal_np)
- v = np.cross(normal_np, u)
-
- center_np = np.array(center, dtype=float)
-
- # CRITICAL FIX: Validate and handle geometry
- if start_point is not None:
- start_pos = np.array(start_point[:3])
- # Project start point onto the circle plane
- to_start = start_pos - center_np
- to_start_plane = to_start - np.dot(to_start, normal_np) * normal_np
-
- # Get distance from center in the plane
- dist_in_plane = np.linalg.norm(to_start_plane)
-
- if dist_in_plane < 0.001:
- # Center start point - undefined angle
- print(
- " WARNING: Start point is at circle center, using default position"
- )
- start_angle = 0
- actual_start = center_np + radius * u
- else:
- # Start point angular position
- to_start_normalized = to_start_plane / dist_in_plane
- u_comp = np.dot(to_start_normalized, u)
- v_comp = np.dot(to_start_normalized, v)
- start_angle = np.arctan2(v_comp, u_comp)
-
- # Check if entry trajectory might be needed
- radius_error = abs(dist_in_plane - radius)
- if radius_error > radius * 0.05: # More than 5% off
- print(
- f" INFO: Starting {dist_in_plane:.1f}mm from center (radius: {radius}mm)"
- )
- if radius_error > radius * 0.3: # More than 30% off
- print(
- " WARNING: Large distance from circle - consider using entry trajectory"
- )
-
- actual_start = start_pos
- else:
- start_angle = 0 if start_angle is None else start_angle
- actual_start = None
-
- # Generate the circle
- for i, t in enumerate(timestamps):
- if i == 0 and actual_start is not None:
- # First point MUST be exactly the start point
- pos = actual_start
- else:
- # Generate circle points
- angle = float(start_angle if start_angle is not None else 0.0) + (
- 2 * np.pi * t / duration
- )
- pos = center_np + radius * (np.cos(angle) * u + np.sin(angle) * v)
-
- # Placeholder orientation (will be overridden)
- orient = [0, 0, 0]
- trajectory.append(np.concatenate([pos, orient]))
-
- return np.array(trajectory)
-
- def generate_circle_with_profile(
- self,
- center: Sequence[float] | NDArray,
- radius: float,
- normal: Sequence[float] | NDArray = [0, 0, 1],
- duration: float | np.floating = 4.0,
- trajectory_type: str = "cubic",
- jerk_limit: float | None = None,
- start_angle: float | None = None,
- start_point: Sequence[float] | None = None,
- ) -> np.ndarray:
- """
- Generate circle with specified trajectory profile.
-
- Args:
- center: Center of circle [x, y, z]
- radius: Circle radius in mm
- normal: Normal vector to circle plane
- duration: Time to complete circle (seconds)
- trajectory_type: 'cubic', 'quintic', or 's_curve'
- jerk_limit: Maximum jerk for s_curve profile (mm/s^3)
- start_angle: Starting angle in radians
- start_point: Starting position [x, y, z, rx, ry, rz]
-
- Returns:
- Array of waypoints with shape (N, 6)
- """
- # Calculate adaptive control rate for small circles
- circumference = 2 * np.pi * radius
- min_arc_length = 2.0 # Minimum 2mm between points for smoothness
- min_points = int(circumference / min_arc_length)
-
- # Calculate control rate (100-200Hz range)
- base_rate = self.control_rate
- required_rate = min_points / duration
- adaptive_rate = float(min(200, max(base_rate, required_rate)))
-
- # Temporarily override control rate for small circles
- if radius < 30 and adaptive_rate > base_rate:
- original_rate = self.control_rate
- original_dt = self.dt
- self.control_rate = adaptive_rate
- self.dt = 1.0 / adaptive_rate
- # Use print for debug info since logger not imported here
- print(
- f" [ADAPTIVE] Using {adaptive_rate:.0f}Hz control rate for {radius:.0f}mm radius circle"
- )
- else:
- original_rate = None
- original_dt = None
-
- try:
- if trajectory_type == "cubic":
- # Use existing implementation
- result = self.generate_circle_3d(
- center, radius, normal, start_angle, duration, start_point
- )
- elif trajectory_type == "quintic":
- result = self.generate_quintic_circle(
- center, radius, normal, duration, start_angle, start_point
- )
- elif trajectory_type == "s_curve":
- result = self.generate_scurve_circle(
- center,
- radius,
- normal,
- duration,
- jerk_limit,
- start_angle,
- start_point,
- )
- else:
- raise ValueError(f"Unknown trajectory type: {trajectory_type}")
- finally:
- # Restore original control rate if we changed it
- if original_rate is not None and original_dt is not None:
- self.control_rate = original_rate
- self.dt = original_dt
-
- return result
-
- def generate_quintic_circle(
- self,
- center: Sequence[float] | NDArray,
- radius: float,
- normal: Sequence[float] | NDArray = [0, 0, 1],
- duration: float | np.floating = 4.0,
- start_angle: float | None = None,
- start_point: Sequence[float] | None = None,
- ) -> np.ndarray:
- """
- Generate circle trajectory using quintic polynomial velocity profile.
- Provides smooth acceleration and deceleration in Cartesian space.
- """
- # First generate uniform angular points
- num_points = int(duration * self.control_rate)
-
- # Setup coordinate system
- normal_np: np.ndarray = np.array(normal, dtype=float)
- normal_np = normal_np / np.linalg.norm(normal_np)
- u = self._get_perpendicular_vector(normal_np)
- v = np.cross(normal_np, u)
- center_np = np.array(center, dtype=float)
-
- # Handle start point if provided
- if start_point is not None:
- start_pos = np.array(start_point[:3])
- to_start = start_pos - center_np
- to_start_plane = to_start - np.dot(to_start, normal_np) * normal_np
- dist_in_plane = np.linalg.norm(to_start_plane)
-
- if dist_in_plane < 0.001:
- start_angle = 0
- else:
- to_start_normalized = to_start_plane / dist_in_plane
- u_comp = np.dot(to_start_normalized, u)
- v_comp = np.dot(to_start_normalized, v)
- start_angle = np.arctan2(v_comp, u_comp)
-
- # Check if entry trajectory might be needed
- radius_error = abs(dist_in_plane - radius)
- if radius_error > radius * 0.05: # More than 5% off
- print(
- f" INFO: Starting {dist_in_plane:.1f}mm from center (radius: {radius}mm)"
- )
- if radius_error > radius * 0.2: # More than 20% off
- print(
- " WARNING: Large distance from circle - consider using entry trajectory"
- )
- else:
- start_angle = 0 if start_angle is None else start_angle
-
- # Step 1: Generate uniformly spaced angular points
- angles = np.linspace(
- float(start_angle if start_angle is not None else 0.0),
- float((start_angle if start_angle is not None else 0.0) + 2 * np.pi),
- num_points,
- )
- uniform_positions = []
-
- for angle in angles:
- pos = center_np + radius * (np.cos(angle) * u + np.sin(angle) * v)
- uniform_positions.append(pos)
-
- # Step 2: Apply quintic velocity profile to Cartesian motion
- trajectory = []
- timestamps = np.linspace(0, duration, num_points)
-
- for i, t in enumerate(timestamps):
- tau = t / duration # Normalized time [0, 1]
-
- # Quintic profile for smooth acceleration/deceleration
- # Applied to arc length parameterization, not angular
- s_normalized = 10 * tau**3 - 15 * tau**4 + 6 * tau**5
-
- # Map to position index
- position_index = s_normalized * (num_points - 1)
-
- # Interpolate between positions
- if position_index <= 0:
- pos = uniform_positions[0]
- elif position_index >= num_points - 1:
- pos = uniform_positions[-1]
- else:
- lower_idx = int(position_index)
- upper_idx = min(lower_idx + 1, num_points - 1)
- alpha = position_index - lower_idx
- pos = uniform_positions[lower_idx] + alpha * (
- uniform_positions[upper_idx] - uniform_positions[lower_idx]
- )
-
- # Placeholder orientation
- orient = [0, 0, 0]
- trajectory.append(np.concatenate([pos, orient]))
-
- return np.array(trajectory)
-
- def generate_scurve_circle(
- self,
- center: Sequence[float] | NDArray,
- radius: float,
- normal: Sequence[float] | NDArray = [0, 0, 1],
- duration: float | np.floating = 4.0,
- jerk_limit: float | None = 5000.0,
- start_angle: float | None = None,
- start_point: Sequence[float] | None = None,
- ) -> np.ndarray:
- """
- Generate circle trajectory using S-curve velocity profile.
- Provides jerk-limited motion in Cartesian space for maximum smoothness.
- """
- if jerk_limit is None:
- jerk_limit = 5000.0 # Default jerk limit in mm/s^3
-
- # Generate timestamps at control rate
- num_points = int(duration * self.control_rate)
-
- # Setup coordinate system
- normal_np: np.ndarray = np.array(normal, dtype=float)
- normal_np = normal_np / np.linalg.norm(normal_np)
- u = self._get_perpendicular_vector(normal_np)
- v = np.cross(normal_np, u)
- center_np = np.array(center, dtype=float)
-
- # Handle start point if provided
- if start_point is not None:
- start_pos = np.array(start_point[:3])
- to_start = start_pos - center_np
- to_start_plane = to_start - np.dot(to_start, normal_np) * normal_np
- dist_in_plane = np.linalg.norm(to_start_plane)
-
- if dist_in_plane < 0.001:
- start_angle = 0
- else:
- to_start_normalized = to_start_plane / dist_in_plane
- u_comp = np.dot(to_start_normalized, u)
- v_comp = np.dot(to_start_normalized, v)
- start_angle = np.arctan2(v_comp, u_comp)
-
- # Check if entry trajectory might be needed
- radius_error = abs(dist_in_plane - radius)
- if radius_error > radius * 0.05: # More than 5% off
- print(
- f" INFO: Starting {dist_in_plane:.1f}mm from center (radius: {radius}mm)"
- )
- if radius_error > radius * 0.2: # More than 20% off
- print(
- " WARNING: Large distance from circle - consider using entry trajectory"
- )
- else:
- start_angle = 0 if start_angle is None else start_angle
-
- # Step 1: Generate uniformly spaced angular points
- angles = np.linspace(
- float(start_angle if start_angle is not None else 0.0),
- float((start_angle if start_angle is not None else 0.0) + 2 * np.pi),
- num_points,
- )
- uniform_positions = []
-
- for angle in angles:
- pos = center_np + radius * (np.cos(angle) * u + np.sin(angle) * v)
- uniform_positions.append(pos)
-
- # Step 2: Apply S-curve velocity profile to Cartesian motion
- trajectory = []
- timestamps = np.linspace(0, duration, num_points)
-
- for i, t in enumerate(timestamps):
- tau = t / duration # Normalized time [0, 1]
-
- # S-curve (smoothstep) profile for smooth acceleration
- # Applied to arc length, not angle
- if tau <= 0.0:
- s_normalized = 0.0
- elif tau >= 1.0:
- s_normalized = 1.0
- else:
- # Smoothstep: 3t² - 2t³ for smooth acceleration and deceleration
- s_normalized = tau * tau * (3.0 - 2.0 * tau)
-
- # Map to position index
- position_index = s_normalized * (num_points - 1)
-
- # Interpolate between positions
- if position_index <= 0:
- pos = uniform_positions[0]
- elif position_index >= num_points - 1:
- pos = uniform_positions[-1]
- else:
- lower_idx = int(position_index)
- upper_idx = min(lower_idx + 1, num_points - 1)
- alpha = position_index - lower_idx
- pos = uniform_positions[lower_idx] + alpha * (
- uniform_positions[upper_idx] - uniform_positions[lower_idx]
- )
-
- # Placeholder orientation
- orient = [0, 0, 0]
- trajectory.append(np.concatenate([pos, orient]))
-
- return np.array(trajectory)
-
- def _rotation_matrix_from_axis_angle(
- self, axis: np.ndarray, angle: float
- ) -> np.ndarray:
- """Generate rotation matrix using Rodrigues' formula"""
- axis = axis / np.linalg.norm(axis)
- cos_a = np.cos(angle)
- sin_a = np.sin(angle)
-
- # Cross-product matrix
- K = np.array(
- [[0, -axis[2], axis[1]], [axis[2], 0, -axis[0]], [-axis[1], axis[0], 0]]
- )
-
- # Rodrigues' formula
- R = np.eye(3) + sin_a * K + (1 - cos_a) * K @ K
- return R
-
- def _get_perpendicular_vector(self, v: np.ndarray) -> np.ndarray:
- """Find a vector perpendicular to the given vector"""
- v = np.array(v, dtype=float) # Ensure it's a numpy array
- if abs(v[0]) < 0.9:
- cross = np.cross(v, [1, 0, 0])
- return cross / np.linalg.norm(cross)
- else:
- cross = np.cross(v, [0, 1, 0])
- return cross / np.linalg.norm(cross)
-
- def _slerp_orientation(
- self,
- start_orient: NDArray[np.floating],
- end_orient: NDArray[np.floating],
- t: float,
- ) -> np.ndarray:
- """Spherical linear interpolation for orientation"""
- # Convert to quaternions
- r1 = Rotation.from_euler("xyz", start_orient, degrees=True)
- r2 = Rotation.from_euler("xyz", end_orient, degrees=True)
-
- # Quaternion interpolation setup
- key_rots = Rotation.from_quat(np.stack([r1.as_quat(), r2.as_quat()]))
- slerp = Slerp(np.array([0.0, 1.0], dtype=float), key_rots)
-
- # Interpolate at a single time by passing a 1D array
- interp_rot = slerp(np.array([t], dtype=float))
- return interp_rot.as_euler("xyz", degrees=True)[0]
diff --git a/parol6/smooth_motion/helix.py b/parol6/smooth_motion/helix.py
deleted file mode 100644
index 7003986..0000000
--- a/parol6/smooth_motion/helix.py
+++ /dev/null
@@ -1,295 +0,0 @@
-"""
-Helix trajectory generator.
-"""
-
-from collections.abc import Sequence
-
-import numpy as np
-from numpy.typing import NDArray
-
-from .base import TrajectoryGenerator
-
-
-class HelixMotion(TrajectoryGenerator):
- """Generate helical trajectories with various velocity profiles"""
-
- def _get_perpendicular_vector(self, v: np.ndarray) -> np.ndarray:
- """Find a vector perpendicular to the given vector"""
- v = np.array(v, dtype=float) # Ensure it's a numpy array
- if abs(v[0]) < 0.9:
- return np.cross(v, [1, 0, 0]) / np.linalg.norm(np.cross(v, [1, 0, 0]))
- else:
- return np.cross(v, [0, 1, 0]) / np.linalg.norm(np.cross(v, [0, 1, 0]))
-
- def generate_helix_with_profile(
- self,
- center: Sequence[float] | NDArray,
- radius: float,
- pitch: float,
- height: float,
- axis: Sequence[float] | NDArray = [0, 0, 1],
- duration: float | np.floating = 4.0,
- trajectory_type: str = "cubic",
- jerk_limit: float | None = None,
- start_point: Sequence[float] | None = None,
- clockwise: bool = False,
- ) -> np.ndarray:
- """
- Generate helix with specified trajectory profile.
-
- Args:
- center: Helix center point [x, y, z]
- radius: Helix radius in mm
- pitch: Vertical distance per revolution in mm
- height: Total height of helix in mm
- axis: Helix axis vector (normalized internally)
- duration: Time to complete helix in seconds
- trajectory_type: 'cubic', 'quintic', or 's_curve'
- jerk_limit: Maximum jerk for s_curve profile
- start_point: Starting position [x, y, z, rx, ry, rz]
- clockwise: Rotation direction
-
- Returns:
- Array of waypoints with shape (N, 6) for [x, y, z, rx, ry, rz]
- """
- if trajectory_type == "cubic":
- return self.generate_cubic_helix(
- center, radius, pitch, height, axis, duration, start_point, clockwise
- )
- if trajectory_type == "quintic":
- return self.generate_quintic_helix(
- center, radius, pitch, height, axis, duration, start_point, clockwise
- )
- if trajectory_type == "s_curve":
- return self.generate_scurve_helix(
- center,
- radius,
- pitch,
- height,
- axis,
- duration,
- jerk_limit,
- start_point,
- clockwise,
- )
-
- raise ValueError(f"Unknown trajectory type: {trajectory_type}")
-
- def generate_cubic_helix(
- self,
- center: Sequence[float] | NDArray,
- radius: float,
- pitch: float,
- height: float,
- axis: Sequence[float] | NDArray = [0, 0, 1],
- duration: float | np.floating = 4.0,
- start_point: Sequence[float] | None = None,
- clockwise: bool = False,
- ) -> np.ndarray:
- """
- Generate helix with cubic (linear) interpolation.
- This is the simplest profile with constant angular velocity.
- """
- # Calculate number of revolutions
- num_revolutions = height / pitch if pitch > 0 else 1
- total_angle = 2 * np.pi * num_revolutions
-
- # Setup coordinate system
- axis_np: np.ndarray = np.array(axis, dtype=float)
- axis_np = axis_np / np.linalg.norm(axis_np)
- u = self._get_perpendicular_vector(axis_np)
- v = np.cross(axis_np, u)
- center_np = np.array(center, dtype=float)
-
- # Determine starting angle if start_point provided
- start_angle = 0.0
- if start_point is not None:
- start_pos = np.array(start_point[:3])
- to_start = start_pos - center_np
- to_start_plane = to_start - np.dot(to_start, axis_np) * axis_np
-
- if np.linalg.norm(to_start_plane) > 0.001:
- to_start_normalized = to_start_plane / np.linalg.norm(to_start_plane)
- start_angle = np.arctan2(
- np.dot(to_start_normalized, v), np.dot(to_start_normalized, u)
- )
-
- # Generate trajectory points
- num_points = int(duration * self.control_rate)
- timestamps = np.linspace(0, duration, num_points)
- trajectory = []
-
- for t in timestamps:
- # Linear interpolation for simple cubic profile
- progress = t / duration
-
- # Angular position (constant velocity)
- if clockwise:
- theta = start_angle - total_angle * progress
- else:
- theta = start_angle + total_angle * progress
-
- # Vertical position (constant velocity)
- z_offset = height * progress
-
- # Calculate 3D position
- pos = (
- center_np
- + radius * (np.cos(theta) * u + np.sin(theta) * v)
- + z_offset * axis_np
- )
-
- # Placeholder orientation (could be enhanced)
- orient = [0, 0, 0] if start_point is None else start_point[3:6]
-
- trajectory.append(np.concatenate([pos, orient]))
-
- return np.array(trajectory)
-
- def generate_quintic_helix(
- self,
- center: Sequence[float] | NDArray,
- radius: float,
- pitch: float,
- height: float,
- axis: Sequence[float] | NDArray = [0, 0, 1],
- duration: float | np.floating = 4.0,
- start_point: Sequence[float] | None = None,
- clockwise: bool = False,
- ) -> np.ndarray:
- """
- Generate helix with quintic polynomial profile.
- Provides smooth acceleration and deceleration.
- """
- # Calculate total angle
- num_revolutions = height / pitch if pitch > 0 else 1
- total_angle = 2 * np.pi * num_revolutions
-
- # Setup coordinate system
- axis_np: np.ndarray = np.array(axis, dtype=float)
- axis_np = axis_np / np.linalg.norm(axis_np)
- u = self._get_perpendicular_vector(axis_np)
- v = np.cross(axis_np, u)
- center_np = np.array(center, dtype=float)
-
- # Determine starting angle
- start_angle = 0.0
- if start_point is not None:
- start_pos = np.array(start_point[:3])
- to_start = start_pos - center_np
- to_start_plane = to_start - np.dot(to_start, axis_np) * axis_np
-
- if np.linalg.norm(to_start_plane) > 0.001:
- to_start_normalized = to_start_plane / np.linalg.norm(to_start_plane)
- start_angle = np.arctan2(
- np.dot(to_start_normalized, v), np.dot(to_start_normalized, u)
- )
-
- # Generate trajectory with quintic profile
- num_points = int(duration * self.control_rate)
- timestamps = np.linspace(0, duration, num_points)
- trajectory = []
-
- for t in timestamps:
- # Quintic polynomial: 10τ³ - 15τ⁴ + 6τ⁵
- tau = t / duration
- progress = 10 * tau**3 - 15 * tau**4 + 6 * tau**5
-
- # Apply to both angular and vertical motion
- if clockwise:
- theta = start_angle - total_angle * progress
- else:
- theta = start_angle + total_angle * progress
-
- z_offset = height * progress
-
- # Calculate position
- pos = (
- center_np
- + radius * (np.cos(theta) * u + np.sin(theta) * v)
- + z_offset * axis_np
- )
-
- # Orientation
- orient = [0, 0, 0] if start_point is None else start_point[3:6]
-
- trajectory.append(np.concatenate([pos, orient]))
-
- return np.array(trajectory)
-
- def generate_scurve_helix(
- self,
- center: Sequence[float] | NDArray,
- radius: float,
- pitch: float,
- height: float,
- axis: Sequence[float] | NDArray = [0, 0, 1],
- duration: float | np.floating = 4.0,
- jerk_limit: float | None = None,
- start_point: Sequence[float] | None = None,
- clockwise: bool = False,
- ) -> np.ndarray:
- """
- Generate helix with S-curve (smoothstep) profile.
- Provides jerk-limited motion.
- """
- # Calculate total angle
- num_revolutions = height / pitch if pitch > 0 else 1
- total_angle = 2 * np.pi * num_revolutions
-
- # Setup coordinate system
- axis_np: np.ndarray = np.array(axis, dtype=float)
- axis_np = axis_np / np.linalg.norm(axis_np)
- u = self._get_perpendicular_vector(axis_np)
- v = np.cross(axis_np, u)
- center_np = np.array(center, dtype=float)
-
- # Determine starting angle
- start_angle = 0.0
- if start_point is not None:
- start_pos = np.array(start_point[:3])
- to_start = start_pos - center_np
- to_start_plane = to_start - np.dot(to_start, axis_np) * axis_np
-
- if np.linalg.norm(to_start_plane) > 0.001:
- to_start_normalized = to_start_plane / np.linalg.norm(to_start_plane)
- start_angle = np.arctan2(
- np.dot(to_start_normalized, v), np.dot(to_start_normalized, u)
- )
-
- # Generate trajectory with S-curve profile
- num_points = int(duration * self.control_rate)
- timestamps = np.linspace(0, duration, num_points)
- trajectory = []
-
- for t in timestamps:
- # S-curve (smoothstep): 3τ² - 2τ³
- tau = t / duration
- if tau <= 0.0:
- progress = 0.0
- elif tau >= 1.0:
- progress = 1.0
- else:
- progress = tau * tau * (3.0 - 2.0 * tau)
-
- # Apply to motion
- if clockwise:
- theta = start_angle - total_angle * progress
- else:
- theta = start_angle + total_angle * progress
-
- z_offset = height * progress
-
- # Calculate position
- pos = (
- center_np
- + radius * (np.cos(theta) * u + np.sin(theta) * v)
- + z_offset * axis_np
- )
-
- # Orientation
- orient = [0, 0, 0] if start_point is None else start_point[3:6]
-
- trajectory.append(np.concatenate([pos, orient]))
-
- return np.array(trajectory)
diff --git a/parol6/smooth_motion/motion_blender.py b/parol6/smooth_motion/motion_blender.py
deleted file mode 100644
index 0900877..0000000
--- a/parol6/smooth_motion/motion_blender.py
+++ /dev/null
@@ -1,65 +0,0 @@
-"""
-Motion blending utilities.
-"""
-
-from __future__ import annotations
-
-import numpy as np
-from scipy.spatial.transform import Rotation, Slerp
-
-
-class MotionBlender:
- """Blend between different motion segments for smooth transitions"""
-
- def __init__(self, blend_time: float = 0.5):
- self.blend_time = blend_time
-
- def blend_trajectories(
- self, traj1: np.ndarray, traj2: np.ndarray, blend_samples: int = 50
- ) -> np.ndarray:
- """Blend two trajectory segments with improved velocity continuity"""
-
- if blend_samples < 4:
- return np.vstack([traj1, traj2])
-
- # Use more samples for smoother blending
- blend_samples = max(blend_samples, 20) # Minimum 20 samples for smooth blend
-
- # Trajectory overlap region analysis
- overlap_start = max(0, len(traj1) - blend_samples // 3)
- overlap_end = min(len(traj2), blend_samples // 3)
-
- # Extract blend region
- blend_start_pose = (
- traj1[overlap_start] if overlap_start < len(traj1) else traj1[-1]
- )
- blend_end_pose = traj2[overlap_end] if overlap_end < len(traj2) else traj2[0]
-
- # Generate smooth transition using S-curve
- blended: list[np.ndarray] = []
- for i in range(blend_samples):
- t = i / (blend_samples - 1)
- # Use smoothstep function for smoother acceleration
- s = t * t * (3 - 2 * t) # Smoothstep
-
- # Blend position
- pos_blend = blend_start_pose * (1 - s) + blend_end_pose * s
-
- # Orientation interpolation via SLERP (pass array-like time)
- r1 = Rotation.from_euler("xyz", blend_start_pose[3:], degrees=True)
- r2 = Rotation.from_euler("xyz", blend_end_pose[3:], degrees=True)
- key_rots = Rotation.from_quat(np.stack([r1.as_quat(), r2.as_quat()]))
- slerp = Slerp(np.array([0.0, 1.0], dtype=float), key_rots)
- orient_blend = slerp(np.array([float(s)], dtype=float)).as_euler(
- "xyz", degrees=True
- )[0]
-
- pos_blend[3:] = orient_blend
- blended.append(pos_blend)
-
- # Combine with better overlap handling
- result = np.vstack(
- [traj1[:overlap_start], np.array(blended), traj2[overlap_end:]]
- )
-
- return result
diff --git a/parol6/smooth_motion/motion_constraints.py b/parol6/smooth_motion/motion_constraints.py
deleted file mode 100644
index aa0c661..0000000
--- a/parol6/smooth_motion/motion_constraints.py
+++ /dev/null
@@ -1,108 +0,0 @@
-"""
-Motion constraints for PAROL6 robot.
-
-Defines per-joint limits for velocity, acceleration, and jerk
-based on gear ratios and mechanical properties.
-"""
-
-import numpy as np
-
-import parol6.PAROL6_ROBOT as PAROL6_ROBOT
-
-
-class MotionConstraints:
- """
- Motion constraints for PAROL6 robot.
-
- Defines per-joint limits for velocity, acceleration, and jerk
- based on gear ratios and mechanical properties.
- """
-
- def __init__(self):
- """Initialize with PAROL6-specific constraints."""
- # Use gear ratios from PAROL6_ROBOT.py
- self.gear_ratios = [float(x) for x in PAROL6_ROBOT.joint.ratio.tolist()]
-
- # Use jerk limits from PAROL6_ROBOT.py
- self.max_jerk = [float(x) for x in PAROL6_ROBOT.joint.jerk.max.tolist()]
-
- # Use maximum velocities from PAROL6_ROBOT.py
- self.max_velocity = [float(x) for x in PAROL6_ROBOT.joint.speed.max.tolist()]
-
- # Use max acceleration from PAROL6_ROBOT.py (convert from RAD/S² to STEP/S²)
- # steps/s² = (rad/s²) * (steps/rad) , and steps/rad = ratio / radian_per_step
- max_acc_rad = float(PAROL6_ROBOT.joint.acc.max_rad)
- steps_per_rad_base = 1.0 / float(PAROL6_ROBOT.conv.radian_per_step)
- self.max_acceleration = [
- max_acc_rad * steps_per_rad_base * float(ratio)
- for ratio in PAROL6_ROBOT.joint.ratio
- ]
-
- def get_joint_constraints(self, joint_idx: int) -> dict[str, float] | None:
- """Get constraints for specific joint."""
- if joint_idx >= len(self.gear_ratios):
- return None
-
- return {
- "gear_ratio": self.gear_ratios[joint_idx],
- "v_max": self.max_velocity[joint_idx],
- "a_max": self.max_acceleration[joint_idx],
- "j_max": self.max_jerk[joint_idx],
- }
-
- def scale_for_gear_ratio(
- self, joint_idx: int, base_limits: dict[str, float]
- ) -> dict[str, float]:
- """Scale motion limits based on gear ratio."""
- ratio = self.gear_ratios[joint_idx]
-
- # Higher gear ratio = lower speed but higher precision
- scaled = {
- "v_max": base_limits["v_max"] / ratio,
- "a_max": base_limits["a_max"] / ratio,
- "j_max": base_limits["j_max"] / ratio,
- }
-
- return scaled
-
- def validate_trajectory(
- self, trajectory: np.ndarray, joint_idx: int, dt: float = 0.01
- ) -> dict[str, float | bool]:
- """
- Validate that trajectory respects constraints.
-
- Returns:
- Dictionary with validation results
- """
- constraints = self.get_joint_constraints(joint_idx)
- if constraints is None or len(trajectory) < 3:
- return {
- "velocity_ok": True,
- "acceleration_ok": True,
- "jerk_ok": True,
- "max_velocity": 0.0,
- "max_acceleration": 0.0,
- "max_jerk": 0.0,
- }
-
- # Calculate derivatives numerically
- velocities = np.diff(trajectory) / dt
- accelerations = np.diff(velocities) / dt
- jerks = np.diff(accelerations) / dt
-
- validation: dict[str, float | bool] = {
- "velocity_ok": bool(np.all(np.abs(velocities) <= constraints["v_max"])),
- "acceleration_ok": bool(
- np.all(np.abs(accelerations) <= constraints["a_max"])
- ),
- "jerk_ok": bool(np.all(np.abs(jerks) <= constraints["j_max"])),
- "max_velocity": float(np.max(np.abs(velocities)))
- if velocities.size
- else 0.0,
- "max_acceleration": float(np.max(np.abs(accelerations)))
- if accelerations.size
- else 0.0,
- "max_jerk": float(np.max(np.abs(jerks))) if jerks.size else 0.0,
- }
-
- return validation
diff --git a/parol6/smooth_motion/quintic.py b/parol6/smooth_motion/quintic.py
deleted file mode 100644
index 6ef76b0..0000000
--- a/parol6/smooth_motion/quintic.py
+++ /dev/null
@@ -1,392 +0,0 @@
-"""
-Quintic polynomial primitives and multi-axis quintic trajectory.
-"""
-
-from typing import Any, Optional
-
-import numpy as np
-
-from .motion_constraints import MotionConstraints
-
-
-class QuinticPolynomial:
- """
- Single-axis quintic polynomial trajectory primitive.
-
- Provides C² continuous trajectories (continuous position, velocity, acceleration)
- with zero jerk at boundaries. This is the building block for S-curve profiles
- and advanced multi-segment trajectories.
- """
-
- def __init__(
- self,
- q0: float,
- qf: float,
- v0: float = 0,
- vf: float = 0,
- a0: float = 0,
- af: float = 0,
- T: float = 1.0,
- ):
- """
- Generate quintic polynomial trajectory.
-
- Args:
- q0: Initial position
- qf: Final position
- v0: Initial velocity (default 0)
- vf: Final velocity (default 0)
- a0: Initial acceleration (default 0)
- af: Final acceleration (default 0)
- T: Duration of trajectory (must be > 0)
- """
- if T <= 0:
- raise ValueError(f"Duration must be positive, got T={T}")
-
- self.T = T
- self.q0 = q0
- self.qf = qf
-
- # Store boundary conditions
- self.boundary_conditions = {
- "q0": q0,
- "qf": qf,
- "v0": v0,
- "vf": vf,
- "a0": a0,
- "af": af,
- }
-
- # Solve for polynomial coefficients using analytical method
- self.coeffs = self._solve_coefficients_analytical(q0, qf, v0, vf, a0, af, T)
-
- # Pre-compute coefficient derivatives for faster evaluation
- self._prepare_derivative_coeffs()
-
- def _solve_coefficients_analytical(self, q0, qf, v0, vf, a0, af, T):
- """
- Analytical solution for quintic polynomial coefficients.
-
- Uses closed-form solution to avoid numerical issues with matrix inversion.
- Works in normalized time [0,1] then scales back to actual time.
-
- The quintic polynomial is: q(t) = a0 + a1*t + a2*t² + a3*t³ + a4*t⁴ + a5*t⁵
-
- Returns:
- numpy array of coefficients [a0, a1, a2, a3, a4, a5]
- """
- # Scale boundary conditions for normalized time τ = t/T
- v0_norm = v0 * T
- vf_norm = vf * T
- a0_norm = a0 * T**2
- af_norm = af * T**2
-
- # Coefficients in normalized time domain
- a0_ = q0
- a1_ = v0_norm
- a2_ = a0_norm / 2.0
-
- a3_ = 10 * (qf - q0) - 6 * v0_norm - 4 * vf_norm - (3 * a0_norm - af_norm) / 2.0
- a4_ = (
- -15 * (qf - q0)
- + 8 * v0_norm
- + 7 * vf_norm
- + (3 * a0_norm - 2 * af_norm) / 2.0
- )
- a5_ = 6 * (qf - q0) - 3 * (v0_norm + vf_norm) - (a0_norm - af_norm) / 2.0
-
- # Convert back to actual time domain
- coeffs = np.array(
- [a0_, a1_ / T, a2_ / T**2, a3_ / T**3, a4_ / T**4, a5_ / T**5]
- )
- return coeffs
-
- def _prepare_derivative_coeffs(self):
- """Pre-compute coefficients for velocity, acceleration, and jerk."""
- self.vel_coeffs = np.array(
- [
- self.coeffs[1],
- 2 * self.coeffs[2],
- 3 * self.coeffs[3],
- 4 * self.coeffs[4],
- 5 * self.coeffs[5],
- ]
- )
- self.acc_coeffs = np.array(
- [
- 2 * self.coeffs[2],
- 6 * self.coeffs[3],
- 12 * self.coeffs[4],
- 20 * self.coeffs[5],
- ]
- )
- self.jerk_coeffs = np.array(
- [6 * self.coeffs[3], 24 * self.coeffs[4], 60 * self.coeffs[5]]
- )
-
- def position(self, t: float) -> float:
- """Evaluate position at time t using Horner's method."""
- if t < 0:
- return self.q0
- if t > self.T:
- return self.qf
- result = self.coeffs[5]
- for i in range(4, -1, -1):
- result = result * t + self.coeffs[i]
- return result
-
- def velocity(self, t: float) -> float:
- """Evaluate velocity at time t using Horner's method."""
- if t < 0:
- return self.boundary_conditions["v0"]
- if t > self.T:
- return self.boundary_conditions["vf"]
- result = self.vel_coeffs[4] if len(self.vel_coeffs) > 4 else 0
- for i in range(min(3, len(self.vel_coeffs) - 1), -1, -1):
- result = result * t + self.vel_coeffs[i]
- return result
-
- def acceleration(self, t: float) -> float:
- """Evaluate acceleration at time t using Horner's method."""
- if t < 0:
- return self.boundary_conditions["a0"]
- if t > self.T:
- return self.boundary_conditions["af"]
- result = self.acc_coeffs[3] if len(self.acc_coeffs) > 3 else 0
- for i in range(min(2, len(self.acc_coeffs) - 1), -1, -1):
- result = result * t + self.acc_coeffs[i]
- return result
-
- def jerk(self, t: float) -> float:
- """Evaluate jerk at time t using Horner's method."""
- if t < 0 or t > self.T:
- return 0
- result = self.jerk_coeffs[2] if len(self.jerk_coeffs) > 2 else 0
- for i in range(min(1, len(self.jerk_coeffs) - 1), -1, -1):
- result = result * t + self.jerk_coeffs[i]
- return result
-
- def evaluate(self, t: float, derivative: int = 0) -> float:
- """
- Unified evaluation function for any derivative order.
-
- Args:
- t: Time point to evaluate
- derivative: 0=position, 1=velocity, 2=acceleration, 3=jerk
- """
- if derivative == 0:
- return self.position(t)
- if derivative == 1:
- return self.velocity(t)
- if derivative == 2:
- return self.acceleration(t)
- if derivative == 3:
- return self.jerk(t)
- raise ValueError(f"Derivative order {derivative} not supported (max is 3)")
-
- def get_trajectory_points(self, dt: float = 0.01) -> dict[str, np.ndarray]:
- """
- Generate trajectory points at specified time interval.
-
- Args:
- dt: Time step
- """
- time_points = np.arange(0, self.T + dt, dt)
- trajectory = {
- "time": time_points,
- "position": np.array([self.position(t) for t in time_points]),
- "velocity": np.array([self.velocity(t) for t in time_points]),
- "acceleration": np.array([self.acceleration(t) for t in time_points]),
- "jerk": np.array([self.jerk(t) for t in time_points]),
- }
- return trajectory
-
- def validate_continuity(self, tolerance: float = 1e-10) -> dict[str, bool]:
- """
- Validate that boundary conditions are satisfied.
- """
- validation = {
- "q0": abs(self.position(0) - self.boundary_conditions["q0"]) < tolerance,
- "qf": abs(self.position(self.T) - self.boundary_conditions["qf"])
- < tolerance,
- "v0": abs(self.velocity(0) - self.boundary_conditions["v0"]) < tolerance,
- "vf": abs(self.velocity(self.T) - self.boundary_conditions["vf"])
- < tolerance,
- "a0": abs(self.acceleration(0) - self.boundary_conditions["a0"])
- < tolerance,
- "af": abs(self.acceleration(self.T) - self.boundary_conditions["af"])
- < tolerance,
- }
- return validation
-
- def validate_numerical_stability(self) -> dict[str, Any]:
- """
- Check for potential numerical stability issues.
- """
- warnings: list[str] = []
- metrics: dict[str, float] = {}
- is_stable = True
-
- # Check condition number (ratio of time to distance)
- distance = abs(self.qf - self.q0)
- if distance > 1e-6:
- time_distance_ratio = self.T / distance
- metrics["time_distance_ratio"] = time_distance_ratio
- if time_distance_ratio > 100:
- is_stable = False
- warnings.append(
- f"Poor conditioning: T/d ratio = {time_distance_ratio:.1f}"
- )
-
- # Check coefficient magnitudes
- coeff_magnitudes = [abs(c) for c in self.coeffs]
- max_coeff = max(coeff_magnitudes)
- nz = [m for m in coeff_magnitudes if m > 1e-10]
- min_nonzero_coeff = min(nz) if nz else 0.0
-
- if min_nonzero_coeff > 0:
- coeff_ratio = max_coeff / min_nonzero_coeff
- metrics["coefficient_ratio"] = coeff_ratio
- if coeff_ratio > 1e6:
- warnings.append(f"Large coefficient ratio: {coeff_ratio:.2e}")
-
- if self.T < 0.001:
- warnings.append(
- f"Very small duration T={self.T} may cause numerical issues"
- )
-
- max_jerk = max(abs(self.jerk(t)) for t in np.linspace(0, self.T, 10))
- if max_jerk > 1e6:
- warnings.append(f"Very large jerk values: {max_jerk:.2e}")
-
- return {"is_stable": is_stable, "warnings": warnings, "metrics": metrics}
-
-
-class MultiAxisQuinticTrajectory:
- """
- Multi-axis synchronized quintic trajectory generator.
-
- Ensures all axes complete their motion simultaneously using a
- time-scaling approach.
- """
-
- def __init__(
- self,
- q0: list[float],
- qf: list[float],
- v0: list[float] | None = None,
- vf: list[float] | None = None,
- a0: list[float] | None = None,
- af: list[float] | None = None,
- T: float | None = None,
- constraints: Optional["MotionConstraints"] = None,
- ):
- """
- Generate synchronized quintic trajectories for multiple axes.
-
- Args:
- q0: Initial positions for each axis
- qf: Final positions for each axis
- v0: Initial velocities (default zeros)
- vf: Final velocities (default zeros)
- a0: Initial accelerations (default zeros)
- af: Final accelerations (default zeros)
- T: Duration (if None, calculated from constraints)
- constraints: Motion constraints for time calculation
- """
- self.num_axes = len(q0)
-
- # Default boundary conditions (use floats to satisfy typing)
- v0 = v0 if v0 is not None else [0.0] * self.num_axes
- vf = vf if vf is not None else [0.0] * self.num_axes
- a0 = a0 if a0 is not None else [0.0] * self.num_axes
- af = af if af is not None else [0.0] * self.num_axes
-
- # Determine duration as a concrete float
- T_val: float = (
- T
- if T is not None
- else self._calculate_minimum_time(q0, qf, v0, vf, constraints)
- )
- self.T: float = T_val
-
- # Generate quintic polynomial for each axis
- self.axis_trajectories: list[QuinticPolynomial] = []
- for i in range(self.num_axes):
- quintic = QuinticPolynomial(q0[i], qf[i], v0[i], vf[i], a0[i], af[i], T_val)
- self.axis_trajectories.append(quintic)
-
- def _calculate_minimum_time(
- self,
- q0: list[float],
- qf: list[float],
- v0: list[float],
- vf: list[float],
- constraints: Optional["MotionConstraints"],
- ) -> float:
- """
- Calculate minimum time based on velocity and acceleration constraints.
- """
- if constraints is None:
- # Default time based on distance
- max_distance = max(abs(qf[i] - q0[i]) for i in range(self.num_axes))
- return max(2.0, max_distance / 50.0) # Assume 50 units/s default
-
- min_times: list[float] = []
- for i in range(self.num_axes):
- distance = abs(qf[i] - q0[i])
-
- # Time based on velocity limit
- if constraints.max_velocity and i < len(constraints.max_velocity):
- t_vel = distance / constraints.max_velocity[i]
- min_times.append(t_vel)
-
- # Time based on acceleration limit (triangular profile)
- if constraints.max_acceleration and i < len(constraints.max_acceleration):
- t_acc = 2 * np.sqrt(distance / constraints.max_acceleration[i])
- min_times.append(t_acc)
-
- # Use maximum time to ensure all constraints are satisfied
- return max(min_times) * 1.2 if min_times else 2.0
-
- def evaluate_all(self, t: float) -> dict[str, list[float]]:
- """
- Evaluate all axes at time t.
- """
- result: dict[str, list[float]] = {
- "position": [],
- "velocity": [],
- "acceleration": [],
- "jerk": [],
- }
- for traj in self.axis_trajectories:
- result["position"].append(traj.position(t))
- result["velocity"].append(traj.velocity(t))
- result["acceleration"].append(traj.acceleration(t))
- result["jerk"].append(traj.jerk(t))
- return result
-
- def get_trajectory_points(self, dt: float = 0.01) -> dict[str, np.ndarray]:
- """Generate trajectory points for all axes."""
- time_points = np.arange(0, self.T + dt, dt)
- num_points = len(time_points)
-
- positions = np.zeros((num_points, self.num_axes))
- velocities = np.zeros((num_points, self.num_axes))
- accelerations = np.zeros((num_points, self.num_axes))
- jerks = np.zeros((num_points, self.num_axes))
-
- for i, t in enumerate(time_points):
- values = self.evaluate_all(t)
- positions[i] = values["position"]
- velocities[i] = values["velocity"]
- accelerations[i] = values["acceleration"]
- jerks[i] = values["jerk"]
-
- return {
- "time": time_points,
- "position": positions,
- "velocity": velocities,
- "acceleration": accelerations,
- "jerk": jerks,
- }
diff --git a/parol6/smooth_motion/scurve.py b/parol6/smooth_motion/scurve.py
deleted file mode 100644
index cbc6b04..0000000
--- a/parol6/smooth_motion/scurve.py
+++ /dev/null
@@ -1,729 +0,0 @@
-"""
-S-curve profile generator and multi-axis S-curve trajectory.
-"""
-
-from typing import TYPE_CHECKING, TypedDict
-
-import numpy as np
-
-if TYPE_CHECKING:
- # For type checkers and to satisfy linters referencing QuinticPolynomial in annotations
- from .quintic import QuinticPolynomial
-
-from .motion_constraints import MotionConstraints
-
-
-class FeasibilityInfo(TypedDict, total=False):
- status: str
- warnings: list[str]
- achievable_a: float
- achievable_v: float
-
-
-class SCurveProfile:
- """
- Seven-segment S-curve velocity profile generator.
-
- Creates jerk-limited trajectories with smooth acceleration transitions.
- The seven segments are:
- 1. Acceleration buildup (constant positive jerk)
- 2. Constant acceleration
- 3. Acceleration ramp-down (constant negative jerk)
- 4. Constant velocity (cruise)
- 5. Deceleration buildup (constant negative jerk)
- 6. Constant deceleration
- 7. Deceleration ramp-down (constant positive jerk)
- """
-
- def __init__(
- self,
- distance: float,
- v_max: float,
- a_max: float,
- j_max: float,
- v_start: float = 0,
- v_end: float = 0,
- ):
- """
- Calculate S-curve profile for given motion parameters.
-
- Args:
- distance: Total distance to travel
- v_max: Maximum velocity constraint
- a_max: Maximum acceleration constraint
- j_max: Maximum jerk constraint
- v_start: Initial velocity (default 0)
- v_end: Final velocity (default 0)
- """
- self.distance = distance
- self.v_max = float(v_max)
- self.a_max = float(a_max)
- self.j_max = float(j_max)
- self.v_start = float(v_start)
- self.v_end = float(v_end)
-
- # Initialize typed containers for type checkers
- self.profile_type: str = ""
- self.segments: dict[str, float] = {}
- self.segment_boundaries: list[dict[str, float]] = []
-
- # Check feasibility and adjust parameters if needed
- self.feasibility_status = self._check_feasibility()
-
- # Calculate profile type and segment durations
- self.profile_type, self.segments = self._calculate_profile()
-
- # Pre-calculate segment boundaries for proper evaluation
- self._calculate_segment_boundaries()
-
- def _calculate_profile(self) -> tuple[str, dict[str, float]]:
- """
- Calculate the S-curve profile type and segment durations.
-
- Returns:
- profile_type: 'full', 'triangular', 'reduced', or 'asymmetric'
- segments: Dictionary with segment durations
- """
- # For symmetric profiles with v_start = v_end = 0
- if self.v_start == 0 and self.v_end == 0:
- return self._calculate_symmetric_profile()
- # Asymmetric profiles for non-zero boundary velocities
- return self._calculate_asymmetric_profile()
-
- def _calculate_symmetric_profile(self) -> tuple[str, dict[str, float]]:
- """Calculate symmetric S-curve profile (v_start = v_end = 0)."""
- # Time to reach maximum acceleration from zero (jerk phase)
- T_j = self.a_max / self.j_max if self.j_max > 0 else 0.0
-
- # Distance covered during jerk phases
- d_jerk = (self.a_max**3) / (self.j_max**2) if self.j_max > 0 else 0.0
-
- # Check if we can reach maximum velocity
- d_to_vmax = (
- (self.v_max**2 / self.a_max + self.v_max * self.a_max / self.j_max)
- if self.a_max > 0 and self.j_max > 0
- else float("inf")
- )
-
- if self.distance < 2 * d_jerk:
- # Case 1: Reduced acceleration profile (never reach a_max)
- profile_type = "reduced"
- a_reached = (
- (abs(self.distance) * self.j_max**2 / 2) ** (1 / 3)
- if self.j_max > 0
- else 0.0
- )
- T_j_actual = a_reached / self.j_max if self.j_max > 0 else 0.0
-
- segments = {
- "T_j1": T_j_actual, # Jerk up
- "T_a": 0.0, # No constant acceleration
- "T_j2": T_j_actual, # Jerk down
- "T_v": 0.0, # No cruise
- "T_j3": T_j_actual, # Jerk down (decel)
- "T_d": 0.0, # No constant deceleration
- "T_j4": T_j_actual, # Jerk up (decel end)
- "a_reached": a_reached,
- "v_reached": a_reached * T_j_actual,
- }
- elif self.distance < 2 * d_to_vmax:
- # Case 2: Triangular velocity profile (never reach v_max)
- profile_type = "triangular"
-
- v_reached = np.sqrt(
- max(self.distance * self.a_max - 2 * self.a_max**3 / self.j_max**2, 0.0)
- )
- T_a = (
- (v_reached - self.a_max**2 / self.j_max) / self.a_max
- if self.a_max > 0 and self.j_max > 0
- else 0.0
- )
-
- segments = {
- "T_j1": T_j,
- "T_a": T_a,
- "T_j2": T_j,
- "T_v": 0.0, # No cruise phase
- "T_j3": T_j,
- "T_d": T_a,
- "T_j4": T_j,
- "v_reached": v_reached,
- }
- else:
- # Case 3: Full S-curve with cruise phase
- profile_type = "full"
-
- # Time at constant acceleration (after jerk phases)
- T_a = (
- (self.v_max - self.a_max**2 / self.j_max) / self.a_max
- if self.a_max > 0 and self.j_max > 0
- else 0.0
- )
-
- # Distance covered during acceleration/deceleration
- d_accel = (
- self.v_max**2 / self.a_max + self.v_max * self.a_max / self.j_max
- if self.a_max > 0 and self.j_max > 0
- else 0.0
- )
-
- # Distance at cruise velocity
- d_cruise = self.distance - 2 * d_accel
-
- # Time at cruise velocity
- T_v = d_cruise / self.v_max if self.v_max > 0 else 0.0
-
- segments = {
- "T_j1": T_j,
- "T_a": max(T_a, 0.0),
- "T_j2": T_j,
- "T_v": max(T_v, 0.0),
- "T_j3": T_j,
- "T_d": max(T_a, 0.0),
- "T_j4": T_j,
- "v_reached": self.v_max,
- }
-
- # Calculate total time
- total_time = (
- segments.get("T_j1", 0.0)
- + segments.get("T_a", 0.0)
- + segments.get("T_j2", 0.0)
- + segments.get("T_v", 0.0)
- + segments.get("T_j3", 0.0)
- + segments.get("T_d", 0.0)
- + segments.get("T_j4", 0.0)
- )
- segments["T_total"] = total_time
-
- return profile_type, segments
-
- def _calculate_asymmetric_profile(self) -> tuple[str, dict[str, float]]:
- """Calculate asymmetric S-curve for non-zero boundary velocities."""
- v_diff = abs(self.v_end - self.v_start)
- v_avg = (self.v_start + self.v_end) / 2
-
- # Time to change between velocities at max acceleration
- T_vel_change = v_diff / self.a_max if self.a_max > 0 else 0.0
-
- # Jerk time (time to reach max acceleration)
- T_j = self.a_max / self.j_max if self.j_max > 0 else 0.0
-
- if self.v_start < self.v_max and self.v_end < self.v_max:
- v_peak = min(
- self.v_max, v_avg + np.sqrt(max(self.distance * self.a_max, 0.0))
- )
-
- T_accel = (v_peak - self.v_start) / self.a_max if self.a_max > 0 else 0.0
- T_a = max(0.0, T_accel - 2 * T_j)
-
- T_decel = (v_peak - self.v_end) / self.a_max if self.a_max > 0 else 0.0
- T_d = max(0.0, T_decel - 2 * T_j)
-
- d_accel = (
- self.v_start * (T_a + 2 * T_j) + 0.5 * self.a_max * (T_a + T_j) ** 2
- )
- d_decel = self.v_end * (T_d + 2 * T_j) + 0.5 * self.a_max * (T_d + T_j) ** 2
- d_cruise = self.distance - d_accel - d_decel
-
- if d_cruise > 0 and v_peak > 0:
- T_v = d_cruise / v_peak
- else:
- T_v = 0.0
- v_peak = np.sqrt(
- max(
- (
- 2 * self.distance * self.a_max
- + self.v_start**2
- + self.v_end**2
- )
- / 2,
- 0.0,
- )
- )
- else:
- # Simple case - just decelerate
- T_a = 0.0
- T_v = 0.0
- T_d = T_vel_change
- v_peak = max(self.v_start, self.v_end)
-
- segments = {
- "T_j1": T_j,
- "T_a": T_a,
- "T_j2": T_j,
- "T_v": T_v,
- "T_j3": T_j,
- "T_d": T_d,
- "T_j4": T_j,
- "v_reached": v_peak,
- "T_total": 2 * T_j + T_a + T_v + 2 * T_j + T_d,
- }
- return "asymmetric", segments
-
- def _check_feasibility(self) -> FeasibilityInfo:
- """
- Check if S-curve profile is achievable with given constraints.
-
- Returns:
- Dictionary with feasibility status and adjusted parameters
- """
- # Minimum distance to reach maximum acceleration
- d_min_jerk = (self.a_max**3) / (self.j_max**2) if self.j_max > 0 else 0.0
-
- # Minimum distance to reach maximum velocity
- d_min_vel = (
- (self.v_max**2 / self.a_max + self.v_max * self.a_max / self.j_max)
- if self.a_max > 0 and self.j_max > 0
- else float("inf")
- )
-
- warnings: list[str] = []
- feasibility: FeasibilityInfo = {"status": "feasible", "warnings": warnings}
-
- if self.distance < 2 * d_min_jerk:
- achievable_a = (
- (abs(self.distance) * self.j_max**2 / 2) ** (1 / 3)
- if self.j_max > 0
- else 0.0
- )
- feasibility["status"] = "reduced_acceleration"
- feasibility["achievable_a"] = achievable_a
- feasibility["warnings"].append(
- f"Distance too short to reach full acceleration. Max achievable: {achievable_a:.2f}"
- )
- elif self.distance < 2 * d_min_vel:
- achievable_v = (
- np.sqrt(self.distance * self.a_max) if self.a_max > 0 else 0.0
- )
- feasibility["status"] = "triangular_velocity"
- feasibility["achievable_v"] = achievable_v
- feasibility["warnings"].append(
- f"Distance too short to reach full velocity. Max achievable: {achievable_v:.2f}"
- )
-
- if self.distance > 0 and self.a_max > 0:
- time_estimate = 2 * np.sqrt(self.distance / self.a_max)
- if time_estimate > 100:
- feasibility["warnings"].append(
- f"Very long trajectory time ({time_estimate:.1f}s) may cause numerical issues"
- )
-
- if self.v_max <= 0 or self.a_max <= 0 or self.j_max <= 0:
- feasibility["status"] = "invalid_constraints"
- feasibility["warnings"].append(
- "Invalid constraints: v_max, a_max, and j_max must all be positive"
- )
-
- return feasibility
-
- def _calculate_segment_boundaries(self) -> None:
- """
- Pre-calculate position, velocity, and acceleration at segment boundaries.
- This ensures proper continuity across segments.
- """
- boundary_list: list[dict[str, float]] = []
-
- # Initial state
- pos = 0.0
- vel = self.v_start
- acc = 0.0
-
- # Segment 1: Jerk up (acceleration buildup)
- T_j1 = self.segments["T_j1"]
- if T_j1 > 0:
- j = self.j_max
- acc_end = acc + j * T_j1
- vel_end = vel + acc * T_j1 + 0.5 * j * T_j1**2
- pos_end = pos + vel * T_j1 + 0.5 * acc * T_j1**2 + (1 / 6) * j * T_j1**3
- boundary_list.append(
- {
- "pos_start": pos,
- "vel_start": vel,
- "acc_start": acc,
- "pos_end": pos_end,
- "vel_end": vel_end,
- "acc_end": acc_end,
- "jerk": j,
- "duration": T_j1,
- }
- )
- pos, vel, acc = pos_end, vel_end, acc_end
-
- # Segment 2: Constant acceleration
- T_a = self.segments["T_a"]
- if T_a > 0:
- j = 0.0
- acc_end = acc
- vel_end = vel + acc * T_a
- pos_end = pos + vel * T_a + 0.5 * acc * T_a**2
- boundary_list.append(
- {
- "pos_start": pos,
- "vel_start": vel,
- "acc_start": acc,
- "pos_end": pos_end,
- "vel_end": vel_end,
- "acc_end": acc_end,
- "jerk": j,
- "duration": T_a,
- }
- )
- pos, vel, acc = pos_end, vel_end, acc_end
-
- # Segment 3: Jerk down (acceleration ramp-down)
- T_j2 = self.segments["T_j2"]
- if T_j2 > 0:
- j = -self.j_max
- acc_end = acc + j * T_j2
- vel_end = vel + acc * T_j2 + 0.5 * j * T_j2**2
- pos_end = pos + vel * T_j2 + 0.5 * acc * T_j2**2 + (1 / 6) * j * T_j2**3
- boundary_list.append(
- {
- "pos_start": pos,
- "vel_start": vel,
- "acc_start": acc,
- "pos_end": pos_end,
- "vel_end": vel_end,
- "acc_end": acc_end,
- "jerk": j,
- "duration": T_j2,
- }
- )
- pos, vel, acc = pos_end, vel_end, acc_end
-
- # Segment 4: Constant velocity (cruise)
- T_v = self.segments["T_v"]
- if T_v > 0:
- j = 0.0
- acc_end = 0.0
- vel_end = vel
- pos_end = pos + vel * T_v
- boundary_list.append(
- {
- "pos_start": pos,
- "vel_start": vel,
- "acc_start": acc,
- "pos_end": pos_end,
- "vel_end": vel_end,
- "acc_end": acc_end,
- "jerk": j,
- "duration": T_v,
- }
- )
- pos, vel, acc = pos_end, vel_end, acc_end
-
- # Segment 5: Jerk down (deceleration buildup)
- T_j3 = self.segments["T_j3"]
- if T_j3 > 0:
- j = -self.j_max
- acc_end = j * T_j3
- vel_end = vel + 0.5 * j * T_j3**2
- pos_end = pos + vel * T_j3 + (1 / 6) * j * T_j3**3
- boundary_list.append(
- {
- "pos_start": pos,
- "vel_start": vel,
- "acc_start": acc,
- "pos_end": pos_end,
- "vel_end": vel_end,
- "acc_end": acc_end,
- "jerk": j,
- "duration": T_j3,
- }
- )
- pos, vel, acc = pos_end, vel_end, acc_end
-
- # Segment 6: Constant deceleration
- T_d = self.segments["T_d"]
- if T_d > 0:
- j = 0.0
- acc_end = acc
- vel_end = vel + acc * T_d
- pos_end = pos + vel * T_d + 0.5 * acc * T_d**2
- boundary_list.append(
- {
- "pos_start": pos,
- "vel_start": vel,
- "acc_start": acc,
- "pos_end": pos_end,
- "vel_end": vel_end,
- "acc_end": acc_end,
- "jerk": j,
- "duration": T_d,
- }
- )
- pos, vel, acc = pos_end, vel_end, acc_end
-
- # Segment 7: Jerk up (deceleration ramp-down)
- T_j4 = self.segments["T_j4"]
- if T_j4 > 0:
- j = self.j_max
- acc_end = acc + j * T_j4
- vel_end = vel + acc * T_j4 + 0.5 * j * T_j4**2
- pos_end = pos + vel * T_j4 + 0.5 * acc * T_j4**2 + (1 / 6) * j * T_j4**3
- boundary_list.append(
- {
- "pos_start": pos,
- "vel_start": vel,
- "acc_start": acc,
- "pos_end": pos_end,
- "vel_end": vel_end,
- "acc_end": acc_end,
- "jerk": j,
- "duration": T_j4,
- }
- )
- pos, vel, acc = pos_end, vel_end, acc_end
- # finalize
- self.segment_boundaries = boundary_list
-
- def generate_trajectory_quintics(self) -> list["QuinticPolynomial"]:
- """
- Generate quintic polynomials for each segment of the S-curve.
-
- Returns:
- List of QuinticPolynomial objects representing each segment
- """
- from .quintic import QuinticPolynomial # Local import to avoid cycle
-
- quintics: list[QuinticPolynomial] = []
- for seg in self.segment_boundaries:
- if seg["duration"] > 0:
- q = QuinticPolynomial(
- seg["pos_start"],
- seg["pos_end"],
- seg["vel_start"],
- seg["vel_end"],
- seg["acc_start"],
- seg["acc_end"],
- seg["duration"],
- )
- quintics.append(q)
-
- return quintics
-
- def get_total_time(self) -> float:
- """Get total time for the S-curve trajectory."""
- return float(self.segments["T_total"])
-
- def evaluate_at_time(self, t: float) -> dict[str, float]:
- """
- Evaluate S-curve at specific time.
-
- Returns:
- Dictionary with position, velocity, acceleration, jerk
- """
- if t <= 0:
- return {
- "position": 0.0,
- "velocity": self.v_start,
- "acceleration": 0.0,
- "jerk": 0.0,
- }
-
- if t >= self.segments["T_total"]:
- if self.segment_boundaries:
- last = self.segment_boundaries[-1]
- return {
- "position": last["pos_end"],
- "velocity": last["vel_end"],
- "acceleration": 0.0,
- "jerk": 0.0,
- }
- return {
- "position": self.distance,
- "velocity": self.v_end,
- "acceleration": 0.0,
- "jerk": 0.0,
- }
-
- # Find which segment we're in
- cumulative_time = 0.0
- for seg in self.segment_boundaries:
- if t <= cumulative_time + seg["duration"]:
- t_in_segment = t - cumulative_time
- return self._evaluate_in_segment(seg, t_in_segment)
- cumulative_time += seg["duration"]
-
- # Fallback
- return {
- "position": self.distance,
- "velocity": self.v_end,
- "acceleration": 0.0,
- "jerk": 0.0,
- }
-
- def _evaluate_in_segment(
- self, segment: dict[str, float], t: float
- ) -> dict[str, float]:
- """
- Evaluate motion within a specific segment using proper kinematics.
- """
- p0 = segment["pos_start"]
- v0 = segment["vel_start"]
- a0 = segment["acc_start"]
- j = segment["jerk"]
-
- # Kinematics within segment
- acc = a0 + j * t
- vel = v0 + a0 * t + 0.5 * j * t**2
- pos = p0 + v0 * t + 0.5 * a0 * t**2 + (1 / 6) * j * t**3
-
- return {"position": pos, "velocity": vel, "acceleration": acc, "jerk": j}
-
-
-class MultiAxisSCurveTrajectory:
- """
- Multi-axis synchronized S-curve trajectory generator.
-
- Coordinates multiple S-curve profiles (one per axis) and synchronizes them
- to ensure all axes complete their motion simultaneously while respecting
- individual axis constraints.
- """
-
- def __init__(
- self,
- start_pose: list[float],
- end_pose: list[float],
- v0: list[float] | None = None,
- vf: list[float] | None = None,
- T: float | None = None,
- jerk_limit: float | None = None,
- ):
- """
- Create synchronized S-curve trajectories for multiple axes.
-
- Args:
- start_pose: Starting position for each axis
- end_pose: Target position for each axis
- v0: Initial velocities (defaults to zeros)
- vf: Final velocities (defaults to zeros)
- T: Desired duration (if None, calculates minimum time)
- jerk_limit: Maximum jerk limit to apply to all axes
- """
- self.start_pose = np.array(start_pose, dtype=float)
- self.end_pose = np.array(end_pose, dtype=float)
- self.num_axes = len(start_pose)
-
- self.v0 = (
- np.array(v0, dtype=float)
- if v0 is not None
- else np.zeros(self.num_axes, dtype=float)
- )
- self.vf = (
- np.array(vf, dtype=float)
- if vf is not None
- else np.zeros(self.num_axes, dtype=float)
- )
-
- self.constraints = MotionConstraints()
-
- self.axis_profiles: list[SCurveProfile | None] = []
- self.max_time = 0.0
-
- for i in range(self.num_axes):
- distance = self.end_pose[i] - self.start_pose[i]
-
- if abs(distance) < 1e-6:
- self.axis_profiles.append(None)
- continue
-
- joint_constraints = self.constraints.get_joint_constraints(i)
- if joint_constraints is None:
- joint_constraints = {
- "v_max": 10000.0,
- "a_max": 20000.0,
- "j_max": jerk_limit if jerk_limit else 50000.0,
- }
-
- j_max = jerk_limit if jerk_limit is not None else joint_constraints["j_max"]
-
- axis_profile = SCurveProfile(
- float(distance),
- float(joint_constraints["v_max"]),
- float(joint_constraints["a_max"]),
- float(j_max),
- v_start=float(self.v0[i]),
- v_end=float(self.vf[i]),
- )
- self.axis_profiles.append(axis_profile)
- self.max_time = max(self.max_time, axis_profile.get_total_time())
-
- self.T = float(T) if T is not None else float(self.max_time)
-
- # Calculate time scaling factors for synchronization
- self.time_scales: list[float] = []
- for maybe_profile in self.axis_profiles:
- if maybe_profile is not None and self.T > 0:
- scale = maybe_profile.get_total_time() / self.T
- self.time_scales.append(scale)
- else:
- self.time_scales.append(1.0)
-
- def get_trajectory_points(self, dt: float = 0.01) -> dict[str, np.ndarray]:
- """
- Generate synchronized trajectory points for all axes.
-
- Args:
- dt: Time step for sampling
- """
- num_points = int(self.T / dt) + 1 if self.T > 0 else 1
- timestamps = np.linspace(0, self.T, num_points)
-
- positions = np.zeros((num_points, self.num_axes))
- velocities = np.zeros((num_points, self.num_axes))
- accelerations = np.zeros((num_points, self.num_axes))
-
- for idx, t in enumerate(timestamps):
- for axis in range(self.num_axes):
- axis_profile = self.axis_profiles[axis]
- if axis_profile is None:
- positions[idx, axis] = self.start_pose[axis]
- velocities[idx, axis] = 0.0
- accelerations[idx, axis] = 0.0
- else:
- t_scaled = t * self.time_scales[axis]
- values = axis_profile.evaluate_at_time(t_scaled)
- positions[idx, axis] = self.start_pose[axis] + values["position"]
- velocities[idx, axis] = values["velocity"]
- accelerations[idx, axis] = values["acceleration"]
-
- return {
- "position": positions,
- "velocity": velocities,
- "acceleration": accelerations,
- "timestamps": timestamps,
- }
-
- def evaluate_at_time(self, t: float) -> dict[str, np.ndarray]:
- """
- Evaluate trajectory at specific time.
-
- Args:
- t: Time point to evaluate
- """
- position = np.zeros(self.num_axes)
- velocity = np.zeros(self.num_axes)
- acceleration = np.zeros(self.num_axes)
-
- for axis in range(self.num_axes):
- axis_profile = self.axis_profiles[axis]
- if axis_profile is None:
- position[axis] = self.start_pose[axis]
- velocity[axis] = 0.0
- acceleration[axis] = 0.0
- else:
- t_scaled = min(
- t * self.time_scales[axis], axis_profile.get_total_time()
- )
- values = axis_profile.evaluate_at_time(t_scaled)
- position[axis] = self.start_pose[axis] + values["position"]
- velocity[axis] = values["velocity"]
- acceleration[axis] = values["acceleration"]
-
- return {
- "position": position,
- "velocity": velocity,
- "acceleration": acceleration,
- }
diff --git a/parol6/smooth_motion/spline.py b/parol6/smooth_motion/spline.py
deleted file mode 100644
index b26979d..0000000
--- a/parol6/smooth_motion/spline.py
+++ /dev/null
@@ -1,356 +0,0 @@
-"""
-Spline trajectory generator.
-"""
-
-from __future__ import annotations
-
-from typing import Any
-
-import numpy as np
-from scipy.interpolate import CubicSpline
-from scipy.spatial.transform import Rotation, Slerp
-
-from .base import TrajectoryGenerator
-from .quintic import MultiAxisQuinticTrajectory
-from .scurve import SCurveProfile
-
-
-class SplineMotion(TrajectoryGenerator):
- """Generate smooth spline trajectories through waypoints"""
-
- def generate_quintic_spline_true(
- self,
- waypoints: list[list[float]],
- waypoint_behavior: str = "stop",
- profile_type: str = "s_curve",
- optimization: str = "jerk",
- time_optimal: bool = False,
- jerk_limit: float | None = None,
- ) -> np.ndarray:
- """
- Generate true quintic spline trajectory with optional S-curve profiles.
-
- This is the enhanced version using our quintic polynomial implementation
- instead of the cubic-based version.
-
- Args:
- waypoints: List of poses [x, y, z, rx, ry, rz]
- waypoint_behavior: 'stop' or 'continuous' at waypoints
- profile_type: 's_curve', 'quintic', or 'cubic'
- optimization: 'time', 'jerk', or 'energy'
- time_optimal: Calculate minimum time if True
- """
- if profile_type == "s_curve":
- return self._generate_scurve_waypoints(
- waypoints, waypoint_behavior, optimization, jerk_limit
- )
- if profile_type == "quintic":
- return self._generate_pure_quintic_waypoints(waypoints, waypoint_behavior)
- # Fall back to cubic implementation
- return self.generate_cubic_spline(waypoints)
-
- def _generate_pure_quintic_waypoints(self, waypoints, behavior):
- """Generate quintic trajectories between waypoints."""
- waypoints = np.array(waypoints)
- num_waypoints = len(waypoints)
-
- if num_waypoints < 2:
- return waypoints
-
- # Calculate timing for each segment
- segment_times = []
- for i in range(num_waypoints - 1):
- distance = np.linalg.norm(waypoints[i + 1, :3] - waypoints[i, :3])
- time = float(max(1.0, float(distance) / 50.0)) # 50 mm/s average
- segment_times.append(time)
-
- # Generate trajectory segments
- full_trajectory: list[list[float]] = []
- prev_vf: list[float] | None = None
-
- for i in range(num_waypoints - 1):
- start_pose = waypoints[i]
- end_pose = waypoints[i + 1]
- T = segment_times[i]
-
- # Determine boundary velocities based on behavior
- if behavior == "stop":
- v0 = [0.0] * 6
- vf = [0.0] * 6
- else: # continuous
- if i == 0:
- v0 = [0.0] * 6
- else:
- v0 = prev_vf if prev_vf is not None else [0.0] * 6
-
- if i == num_waypoints - 2:
- vf = [0.0] * 6
- else:
- next_direction = waypoints[i + 2] - waypoints[i + 1]
- next_segment_time = (
- segment_times[i + 1]
- if (i + 1) < len(segment_times)
- else segment_times[i]
- )
- current_direction = waypoints[i + 1] - waypoints[i]
- avg_direction = (
- current_direction / segment_times[i]
- + next_direction / next_segment_time
- ) * 0.5
- vf = list(avg_direction[:6] * 0.7) # Scale factor for stability
-
- prev_vf = vf
-
- # Create multi-axis quintic trajectory
- segment_traj = MultiAxisQuinticTrajectory(
- list(start_pose), list(end_pose), v0, vf, T=T
- )
-
- # Sample the segment
- segment_points = segment_traj.get_trajectory_points(self.dt)
-
- # Add to full trajectory (avoid duplicating waypoints)
- if i == 0:
- full_trajectory.extend(segment_points["position"].tolist())
- else:
- full_trajectory.extend(segment_points["position"][1:].tolist())
-
- return np.array(full_trajectory)
-
- def _generate_scurve_waypoints(
- self, waypoints, behavior, optimization, jerk_limit=None
- ):
- """Generate S-curve trajectories between waypoints."""
- waypoints = np.array(waypoints)
- num_waypoints = len(waypoints)
-
- if num_waypoints < 2:
- return waypoints
-
- full_trajectory: list[list[float]] = []
-
- for i in range(num_waypoints - 1):
- start_pose = waypoints[i]
- end_pose = waypoints[i + 1]
-
- # For each joint, calculate S-curve profile
- segment_trajectories = []
- max_time = 0.0
-
- for j in range(6): # 6 joints
- distance = end_pose[j] - start_pose[j]
-
- # Get joint constraints
- constraints = self.constraints.get_joint_constraints(j)
- if constraints is None:
- constraints = {
- "v_max": 10000.0,
- "a_max": 20000.0,
- "j_max": (jerk_limit if jerk_limit is not None else 50000.0),
- }
-
- # Use provided jerk limit if available, otherwise use constraints
- j_max = jerk_limit if jerk_limit is not None else constraints["j_max"]
-
- # Create S-curve profile
- scurve = SCurveProfile(
- distance, constraints["v_max"], constraints["a_max"], j_max
- )
-
- max_time = max(max_time, scurve.get_total_time())
- segment_trajectories.append(scurve)
-
- # Synchronize all joints to slowest
- if optimization == "time":
- sync_time = max_time
- else:
- sync_time = max_time * 1.2 # margin
-
- # Generate synchronized points
- timestamps = self.generate_timestamps(sync_time)
-
- for t in timestamps:
- pose = []
- for j in range(6):
- joint_scurve = segment_trajectories[j]
- t_normalized = t / sync_time # Normalize to [0, 1]
- t_joint = t_normalized * joint_scurve.get_total_time()
-
- values = joint_scurve.evaluate_at_time(float(t_joint))
- pose.append(float(start_pose[j] + values["position"]))
- full_trajectory.append(pose)
-
- return np.array(full_trajectory)
-
- def generate_cubic_spline(
- self,
- waypoints: list[list[float]],
- timestamps: list[float] | None = None,
- velocity_start: list[float] | None = None,
- velocity_end: list[float] | None = None,
- ) -> np.ndarray:
- """
- Generate cubic spline trajectory through waypoints
-
- Args:
- waypoints: List of poses [x, y, z, rx, ry, rz]
- timestamps: Time for each waypoint (auto-generated if None)
- velocity_start: Initial velocity (zero if None)
- velocity_end: Final velocity (zero if None)
-
- Returns:
- Array of interpolated poses
- """
- waypoints_arr = np.asarray(waypoints, dtype=float)
- num_waypoints = len(waypoints_arr)
-
- # Auto-generate timestamps if not provided
- if timestamps is None:
- total_dist = 0.0
- for i in range(1, num_waypoints):
- dist = np.linalg.norm(waypoints_arr[i, :3] - waypoints_arr[i - 1, :3])
- total_dist += float(dist)
-
- # Assume average speed of 50 mm/s
- total_time = total_dist / 50.0 if total_dist > 0 else 0.0
- timestamps_arr = np.linspace(0, total_time, num_waypoints)
- else:
- timestamps_arr = np.asarray(timestamps, dtype=float)
-
- # Validate array dimensions before creating splines
- if len(timestamps_arr) != len(waypoints_arr):
- raise ValueError(
- f"Timestamps length ({len(timestamps_arr)}) must match waypoints length ({len(waypoints_arr)})"
- )
-
- # Position trajectory splines
- pos_splines = []
- for i in range(3):
- # Provide boundary conditions per component
- bc: Any
- if velocity_start is not None and velocity_end is not None:
- bc = ((1, float(velocity_start[i])), (1, float(velocity_end[i])))
- else:
- bc = "not-a-knot"
- spline = CubicSpline(timestamps_arr, waypoints_arr[:, i], bc_type=bc)
- pos_splines.append(spline)
-
- # Orientation trajectory splines
- rotations = [
- Rotation.from_euler("xyz", wp[3:], degrees=True) for wp in waypoints
- ]
- quats = np.array([r.as_quat() for r in rotations])
- key_rots = Rotation.from_quat(quats)
- slerp = Slerp(timestamps_arr, key_rots)
-
- # Generate dense trajectory
- t_eval = self.generate_timestamps(
- float(timestamps_arr[-1] if len(timestamps_arr) else 0.0)
- )
- trajectory: list[list[float]] = []
-
- for t in t_eval:
- pos = [float(spline(float(t))) for spline in pos_splines]
- rot_single = slerp(np.array([float(t)]))
- orient = rot_single.as_euler("xyz", degrees=True)[0]
- trajectory.append(np.concatenate([pos, orient]).tolist())
-
- return np.array(trajectory)
-
- def generate_quintic_spline(
- self, waypoints: list[list[float]], timestamps: list[float] | None = None
- ) -> np.ndarray:
- """
- Generate quintic (5th order) spline with zero velocity and acceleration at endpoints
-
- Args:
- waypoints: List of poses [x, y, z, rx, ry, rz]
- timestamps: Time for each waypoint
- """
- # Quintic spline boundary conditions at the endpoints
- return self.generate_cubic_spline(
- waypoints, timestamps, velocity_start=[0, 0, 0], velocity_end=[0, 0, 0]
- )
-
- def generate_scurve_spline(
- self,
- waypoints: list[list[float]],
- duration: float | None = None,
- jerk_limit: float = 1000.0,
- timestamps: list[float] | None = None,
- ) -> np.ndarray:
- """
- Generate S-curve spline with jerk-limited motion profile
-
- Args:
- waypoints: List of poses [x, y, z, rx, ry, rz]
- duration: Total duration for the trajectory (optional)
- jerk_limit: Maximum jerk value in mm/s^3
- timestamps: Time for each waypoint (optional)
-
- Returns:
- Array of interpolated poses with S-curve velocity profile
- """
- basic_trajectory = self.generate_cubic_spline(
- waypoints, timestamps, velocity_start=[0, 0, 0], velocity_end=[0, 0, 0]
- )
-
- if len(basic_trajectory) < 2:
- return basic_trajectory
-
- # Calculate total path length
- path_length = 0.0
- for i in range(1, len(basic_trajectory)):
- segment_length = np.linalg.norm(
- np.array(basic_trajectory[i][:3])
- - np.array(basic_trajectory[i - 1][:3])
- )
- path_length += float(segment_length)
-
- if path_length < 0.001:
- return basic_trajectory
-
- # Determine duration if not provided
- if duration is None:
- avg_speed = 50.0 # mm/s conservative speed
- duration = path_length / avg_speed
-
- # Generate S-curve profile for the motion
- num_points = int(duration * self.control_rate)
- if num_points < 2:
- num_points = 2
-
- # Create S-curve time parameterization
- time_points = np.linspace(0, duration, num_points)
- s_curve_params: list[float] = []
-
- for t in time_points:
- tau = t / duration
- if tau <= 0.0:
- s = 0.0
- elif tau >= 1.0:
- s = 1.0
- else:
- s = tau * tau * (3.0 - 2.0 * tau) # smoothstep
- s_curve_params.append(float(s))
-
- # Re-sample the trajectory according to S-curve profile
- new_indices = np.array(s_curve_params) * (len(basic_trajectory) - 1)
-
- resampled_trajectory: list[list[float]] = []
- for new_idx in new_indices:
- if new_idx <= 0:
- resampled_trajectory.append(basic_trajectory[0].tolist())
- elif new_idx >= len(basic_trajectory) - 1:
- resampled_trajectory.append(basic_trajectory[-1].tolist())
- else:
- lower_idx = int(new_idx)
- upper_idx = min(lower_idx + 1, len(basic_trajectory) - 1)
- alpha = float(new_idx - lower_idx)
-
- lower_point = np.array(basic_trajectory[lower_idx])
- upper_point = np.array(basic_trajectory[upper_idx])
- interpolated = lower_point + alpha * (upper_point - lower_point)
- resampled_trajectory.append(interpolated.tolist())
-
- return np.array(resampled_trajectory)
diff --git a/parol6/smooth_motion/waypoints.py b/parol6/smooth_motion/waypoints.py
deleted file mode 100644
index 0062003..0000000
--- a/parol6/smooth_motion/waypoints.py
+++ /dev/null
@@ -1,709 +0,0 @@
-"""
-Waypoint trajectory planner.
-"""
-
-from typing import TypedDict
-
-import numpy as np
-
-
-class BlendRegion(TypedDict):
- waypoint_idx: int
- entry: np.ndarray
- exit: np.ndarray
- radius: float
- v_entry: np.ndarray
- v_exit: np.ndarray
-
-
-class WaypointTrajectoryPlanner:
- """
- Trajectory planner for smooth motion through waypoints with corner cutting.
-
- Implements mstraj-style parabolic blending at waypoints to avoid acceleration
- spikes and ensure smooth motion through complex paths.
- """
-
- def __init__(
- self,
- waypoints: list[list[float]],
- constraints: dict | None = None,
- sample_rate: float = 100.0,
- ):
- """
- Initialize waypoint trajectory planner.
-
- Args:
- waypoints: List of waypoint poses [x, y, z, rx, ry, rz]
- constraints: Motion constraints (max_velocity, max_acceleration, max_jerk)
- sample_rate: Trajectory sampling rate in Hz
- """
- self.waypoints = np.array(waypoints, dtype=np.float64)
- self.num_waypoints = len(waypoints)
- self.sample_rate = sample_rate
- self.dt = 1.0 / sample_rate
-
- # Default constraints
- default_constraints = {
- "max_velocity": 100.0, # mm/s
- "max_acceleration": 500.0, # mm/s²
- "max_jerk": 5000.0, # mm/s³
- }
- self.constraints = constraints if constraints else default_constraints
-
- # Blend planning data
- self.blend_radii: list[float] = []
- self.blend_regions: list[BlendRegion | None] = []
- self.segment_velocities: list[float] = []
- self.via_modes = ["via"] * self.num_waypoints # Default: pass through all
-
- def calculate_corner_angle(self, idx: int) -> float:
- """
- Calculate the angle between incoming and outgoing path segments.
-
- Args:
- idx: Waypoint index
-
- Returns:
- Angle in radians (0 to π)
- """
- if idx <= 0 or idx >= self.num_waypoints - 1:
- return 0.0 # No corner at start/end
-
- # Vectors for path segments
- v_in = self.waypoints[idx] - self.waypoints[idx - 1]
- v_out = self.waypoints[idx + 1] - self.waypoints[idx]
-
- # Normalize (use only position components)
- v_in_norm = v_in[:3] / (np.linalg.norm(v_in[:3]) + 1e-9)
- v_out_norm = v_out[:3] / (np.linalg.norm(v_out[:3]) + 1e-9)
-
- # Calculate angle
- cos_angle = np.clip(np.dot(v_in_norm, v_out_norm), -1, 1)
- angle = np.arccos(cos_angle)
-
- return float(angle)
-
- def calculate_safe_blend_radius(self, idx: int, approach_velocity: float) -> float:
- """
- Calculate maximum safe blend radius for a waypoint.
-
- Args:
- idx: Waypoint index
- approach_velocity: Velocity approaching the waypoint
-
- Returns:
- Safe blend radius in mm
- """
- angle = self.calculate_corner_angle(idx)
-
- if angle < 0.01: # Nearly straight (< 0.5 degrees)
- return 0.0
-
- # Dynamic blend radius based on velocity and angle
- a_max = float(self.constraints["max_acceleration"])
-
- # Centripetal acceleration constraint
- # r = v² / (a_max * sin(θ/2))
- sin_half_angle = float(np.sin(angle / 2))
- if sin_half_angle > 0:
- r_dynamic = float((approach_velocity**2) / (a_max * sin_half_angle))
- else:
- r_dynamic = 0.0
-
- # Geometric constraint - don't exceed segment lengths
- r_geometric = float(self.get_max_geometric_radius(idx))
-
- # Apply safety factor and limits
- r_safe = min(r_dynamic, r_geometric) * 0.7 # 70% safety factor
- r_safe = max(0.0, min(r_safe, 100.0)) # Cap at 100mm
-
- return float(r_safe)
-
- def get_max_geometric_radius(self, idx: int) -> float:
- """
- Get maximum blend radius based on segment geometry.
-
- Args:
- idx: Waypoint index
-
- Returns:
- Maximum geometric radius in mm
- """
- if idx <= 0 or idx >= self.num_waypoints - 1:
- return 0.0
-
- # Distance to previous waypoint
- dist_prev = np.linalg.norm(
- self.waypoints[idx][:3] - self.waypoints[idx - 1][:3]
- )
-
- # Distance to next waypoint
- dist_next = np.linalg.norm(
- self.waypoints[idx + 1][:3] - self.waypoints[idx][:3]
- )
-
- # Maximum radius is 40% of shortest segment
- max_radius = 0.4 * min(dist_prev, dist_next)
-
- return float(max_radius)
-
- def calculate_auto_blend_radii(self):
- """
- Automatically calculate blend radius for each waypoint.
- """
- self.blend_radii = []
-
- for i in range(self.num_waypoints):
- if i == 0 or i == self.num_waypoints - 1:
- # No blending at start/end
- self.blend_radii.append(0.0)
- else:
- # Estimate approach velocity
- segment_length = np.linalg.norm(
- self.waypoints[i][:3] - self.waypoints[i - 1][:3]
- )
-
- # Simple velocity estimation
- if segment_length > 0:
- approach_velocity = min(
- self.constraints["max_velocity"],
- np.sqrt(self.constraints["max_acceleration"] * segment_length),
- )
- else:
- approach_velocity = 0
-
- # Calculate safe radius
- radius = self.calculate_safe_blend_radius(i, approach_velocity)
- self.blend_radii.append(radius)
-
- def compute_blend_points(
- self, idx: int, blend_radius: float
- ) -> tuple[np.ndarray, np.ndarray]:
- """
- Calculate blend entry and exit points for a waypoint.
-
- Args:
- idx: Waypoint index
- blend_radius: Blend radius in mm
-
- Returns:
- Tuple of (entry_point, exit_point)
- """
- if blend_radius <= 0 or idx <= 0 or idx >= self.num_waypoints - 1:
- return self.waypoints[idx], self.waypoints[idx]
-
- # Get path vectors
- v_in = self.waypoints[idx] - self.waypoints[idx - 1]
- v_out = self.waypoints[idx + 1] - self.waypoints[idx]
-
- # Normalize position components
- v_in_norm = np.zeros_like(v_in)
- v_out_norm = np.zeros_like(v_out)
-
- v_in_norm[:3] = v_in[:3] / (np.linalg.norm(v_in[:3]) + 1e-9)
- v_out_norm[:3] = v_out[:3] / (np.linalg.norm(v_out[:3]) + 1e-9)
-
- # Calculate blend entry point (along incoming path)
- blend_entry = self.waypoints[idx].copy()
- blend_entry[:3] -= v_in_norm[:3] * blend_radius
-
- # Calculate blend exit point (along outgoing path)
- blend_exit = self.waypoints[idx].copy()
- blend_exit[:3] += v_out_norm[:3] * blend_radius
-
- # Interpolate orientations
- angle = self.calculate_corner_angle(idx)
- if angle > 0:
- # Weighted average for orientations at blend points
- alpha_entry = 1.0 - (blend_radius / (np.linalg.norm(v_in[:3]) + 1e-9))
- alpha_exit = blend_radius / (np.linalg.norm(v_out[:3]) + 1e-9)
-
- blend_entry[3:] = (
- self.waypoints[idx - 1][3:] * (1 - alpha_entry)
- + self.waypoints[idx][3:] * alpha_entry
- )
- blend_exit[3:] = (
- self.waypoints[idx][3:] * (1 - alpha_exit)
- + self.waypoints[idx + 1][3:] * alpha_exit
- )
-
- return blend_entry, blend_exit
-
- def generate_parabolic_blend(
- self,
- entry_point: np.ndarray,
- exit_point: np.ndarray,
- v_entry: np.ndarray,
- v_exit: np.ndarray,
- blend_time: float | None = None,
- ) -> list[np.ndarray]:
- """
- Generate parabolic trajectory for corner blend with acceleration limits.
-
- Parabolic blends have constant acceleration, providing smooth motion.
-
- Args:
- entry_point: Blend entry position
- exit_point: Blend exit position
- v_entry: Velocity at blend entry
- v_exit: Velocity at blend exit
- blend_time: Blend duration (auto-calculated if None)
-
- Returns:
- List of trajectory points through the blend
- """
- # Calculate blend parameters
- delta_p = exit_point - entry_point
- distance = np.linalg.norm(delta_p[:3])
-
- if distance < 1e-6:
- return [entry_point] # No blend needed
-
- # Calculate velocity change needed
- delta_v = v_exit - v_entry
- delta_v_mag = np.linalg.norm(delta_v[:3])
-
- # Minimum blend time from acceleration constraint
- min_blend_time = float(
- delta_v_mag / (self.constraints["max_acceleration"] + 1e-9)
- )
-
- # Calculate blend time if not specified
- if blend_time is None:
- v_avg = (
- float(np.linalg.norm(v_entry[:3])) + float(np.linalg.norm(v_exit[:3]))
- ) / 2.0
- if v_avg > 0.0:
- time_from_velocity = float(distance) / v_avg
- else:
- time_from_velocity = float(
- np.sqrt(
- 2.0
- * float(distance)
- / (self.constraints["max_acceleration"] + 1e-9)
- )
- )
- blend_time = max(min_blend_time, time_from_velocity)
- else:
- blend_time = max(blend_time, min_blend_time)
-
- bt = float(max(blend_time, 0.01)) # Numerical stability
-
- # Acceleration (bounded)
- a_blend = delta_v / bt
- a_mag = float(np.linalg.norm(a_blend[:3]))
- amax = float(self.constraints["max_acceleration"])
- if a_mag > amax * 1.1: # 10% tolerance
- scale = amax / (a_mag + 1e-9)
- a_blend = a_blend * scale
- bt = float(bt / (scale + 1e-9))
-
- # Generate trajectory using cubic Hermite interpolation (C1 continuity)
- num_points = max(5, int(bt * self.sample_rate)) # At least 5 points
- blend_traj = []
-
- for i in range(num_points):
- # Normalized time from 0 to 1
- s = i / (num_points - 1) if num_points > 1 else 0.0
-
- # Cubic Hermite basis functions
- h00 = 2 * s**3 - 3 * s**2 + 1
- h10 = s**3 - 2 * s**2 + s
- h01 = -2 * s**3 + 3 * s**2
- h11 = s**3 - s**2
-
- # Interpolate position using hermite spline
- # Scale velocities by blend_time to get tangents
- pos = (
- h00 * entry_point
- + h10 * (v_entry * bt)
- + h01 * exit_point
- + h11 * (v_exit * bt)
- )
-
- blend_traj.append(pos)
-
- return blend_traj
-
- def generate_linear_segment(
- self, start: np.ndarray, end: np.ndarray, velocity: float | None = None
- ) -> list[np.ndarray]:
- """
- Generate linear trajectory segment between two points.
-
- Args:
- start: Start position
- end: End position
- velocity: Desired velocity (uses max if None)
-
- Returns:
- List of trajectory points
- """
- distance = np.linalg.norm(end[:3] - start[:3])
-
- if distance < 1e-6:
- return [start]
-
- # Determine velocity
- if velocity is None:
- velocity = float(self.constraints["max_velocity"])
-
- # Calculate duration and number of points
- duration = float(distance) / float(velocity)
- num_points = max(2, int(duration * self.sample_rate))
-
- # Generate trajectory
- segment = []
- for i in range(num_points):
- alpha = i / (num_points - 1) if num_points > 1 else 0.0
- pos = start * (1 - alpha) + end * alpha
- segment.append(pos)
-
- return segment
-
- def compute_blend_regions(self):
- """
- Compute all blend regions for the trajectory.
- """
- self.blend_regions = []
-
- for i in range(1, self.num_waypoints - 1):
- if self.blend_radii[i] > 0 and self.via_modes[i] == "via":
- entry, exit = self.compute_blend_points(i, self.blend_radii[i])
-
- # Calculate velocities at blend points
- v_entry_dir = self.waypoints[i] - self.waypoints[i - 1]
- v_entry = (
- v_entry_dir[:3]
- / (np.linalg.norm(v_entry_dir[:3]) + 1e-9)
- * self.segment_velocities[i - 1]
- )
- v_entry_full = np.zeros(len(entry))
- v_entry_full[:3] = v_entry
-
- v_exit_dir = self.waypoints[i + 1] - self.waypoints[i]
- v_exit = (
- v_exit_dir[:3]
- / (np.linalg.norm(v_exit_dir[:3]) + 1e-9)
- * self.segment_velocities[i]
- )
- v_exit_full = np.zeros(len(exit))
- v_exit_full[:3] = v_exit
-
- region: BlendRegion = {
- "waypoint_idx": i,
- "entry": entry,
- "exit": exit,
- "radius": float(self.blend_radii[i]),
- "v_entry": v_entry_full,
- "v_exit": v_exit_full,
- }
- self.blend_regions.append(region)
- else:
- # No blend or stop point
- self.blend_regions.append(None)
-
- def plan_trajectory(
- self,
- blend_mode: str = "auto",
- blend_radii: list[float] | None = None,
- via_modes: list[str] | None = None,
- trajectory_type: str = "cubic",
- jerk_limit: float | None = None,
- ) -> np.ndarray:
- """
- Plan complete trajectory through waypoints with blending.
-
- Args:
- blend_mode: 'auto', 'manual', or 'none'
- blend_radii: Manual blend radii (if blend_mode='manual')
- via_modes: 'via' or 'stop' for each waypoint
- trajectory_type: 'cubic', 'quintic', or 's_curve' velocity profile
- jerk_limit: Maximum jerk for s_curve profile (mm/s^3)
-
- Returns:
- Complete trajectory as numpy array
- """
- if self.num_waypoints < 2:
- return self.waypoints
-
- # Set blend radii
- if blend_mode in ["auto", "parabolic", "circular"]:
- self.calculate_auto_blend_radii()
- elif blend_mode == "manual" and blend_radii is not None:
- self.blend_radii = blend_radii
- elif blend_mode == "none":
- self.blend_radii = [0.0] * self.num_waypoints
- else:
- # Default to auto for unrecognized modes
- self.calculate_auto_blend_radii()
-
- # Set via modes
- if via_modes is not None:
- self.via_modes = via_modes
-
- # Calculate segment velocities
- self.segment_velocities = []
- for i in range(self.num_waypoints - 1):
- segment_length = np.linalg.norm(
- self.waypoints[i + 1][:3] - self.waypoints[i][:3]
- )
- # Simple velocity planning
- if self.via_modes[i] == "stop" or self.via_modes[i + 1] == "stop":
- # Trapezoid profile with acceleration
- v_max = min(
- float(self.constraints["max_velocity"]),
- float(
- np.sqrt(
- float(self.constraints["max_acceleration"])
- * float(segment_length)
- )
- ),
- )
- else:
- v_max = float(self.constraints["max_velocity"])
-
- self.segment_velocities.append(v_max)
-
- # Compute blend regions
- self.compute_blend_regions()
-
- # Generate full trajectory
- full_trajectory: list[np.ndarray] = []
-
- for i in range(self.num_waypoints - 1):
- # Determine segment start and end
- if i == 0:
- segment_start = self.waypoints[0]
- else:
- # Check for blend at current waypoint
- blend_region_prev = (
- self.blend_regions[i - 1]
- if i - 1 < len(self.blend_regions)
- else None
- )
- if blend_region_prev is not None:
- segment_start = blend_region_prev["exit"]
- else:
- segment_start = self.waypoints[i]
-
- if i < self.num_waypoints - 2:
- # Check for blend at next waypoint
- blend_region_next = (
- self.blend_regions[i] if i < len(self.blend_regions) else None
- )
- if blend_region_next is not None:
- segment_end = blend_region_next["entry"]
- else:
- segment_end = self.waypoints[i + 1]
- else:
- segment_end = self.waypoints[i + 1]
-
- # Generate linear segment
- segment = self.generate_linear_segment(
- segment_start, segment_end, self.segment_velocities[i]
- )
-
- # Add segment to trajectory
- if i == 0:
- full_trajectory.extend(segment)
- else:
- # Skip first point to avoid duplication
- full_trajectory.extend(segment[1:])
-
- # Add blend if needed
- if i < len(self.blend_regions) and self.blend_regions[i] is not None:
- br = self.blend_regions[i]
- assert br is not None
- blend_traj = self.generate_parabolic_blend(
- br["entry"],
- br["exit"],
- br["v_entry"],
- br["v_exit"],
- )
- # Skip first point to avoid duplication
- full_trajectory.extend(blend_traj[1:])
-
- trajectory_array = np.array(full_trajectory)
-
- # Apply profile to the generated trajectory if not cubic
- if trajectory_type != "cubic":
- trajectory_array = self.apply_velocity_profile(
- trajectory_array, trajectory_type, jerk_limit
- )
-
- return trajectory_array
-
- def apply_velocity_profile(
- self,
- trajectory: np.ndarray,
- profile_type: str = "quintic",
- jerk_limit: float | None = None,
- ) -> np.ndarray:
- """
- Apply velocity profile to existing trajectory points.
-
- Instead of re-interpolating with sparse waypoints, this method
- applies the velocity profile to ALL trajectory points, preserving
- the geometric path while adjusting the timing.
-
- Args:
- trajectory: Input trajectory points
- profile_type: 'quintic' or 's_curve'
- jerk_limit: Maximum jerk for s_curve (mm/s^3)
-
- Returns:
- Trajectory with velocity profile applied
- """
- if len(trajectory) < 2:
- return trajectory
-
- # Calculate cumulative arc length
- arc_lengths = [0.0]
- for i in range(1, len(trajectory)):
- dist = float(np.linalg.norm(trajectory[i][:3] - trajectory[i - 1][:3]))
- arc_lengths.append(arc_lengths[-1] + dist)
-
- total_length = arc_lengths[-1]
- if total_length < 1e-6:
- return trajectory
-
- # Generate new time mapping based on profile
- num_points = len(trajectory)
- new_trajectory = np.zeros_like(trajectory)
-
- for i in range(num_points):
- # Get normalized position along path
- s = i / (num_points - 1) if num_points > 1 else 0.0
-
- # Apply velocity profile to get new arc length position
- if profile_type == "quintic":
- # Quintic polynomial: smooth acceleration/deceleration
- s_new = 10 * s**3 - 15 * s**4 + 6 * s**5
- elif profile_type == "s_curve":
- # S-curve (smoothstep): smooth jerk-limited motion
- if s <= 0.0:
- s_new = 0.0
- elif s >= 1.0:
- s_new = 1.0
- else:
- # Smoothstep function for smooth acceleration
- s_new = s * s * (3.0 - 2.0 * s)
- else:
- # Default to linear (no change)
- s_new = s
-
- # Find the corresponding point on original trajectory
- # Use linear interpolation between trajectory points
- target_arc_length = s_new * total_length
-
- # Find the segment containing this arc length
- for j in range(len(arc_lengths) - 1):
- if arc_lengths[j] <= target_arc_length <= arc_lengths[j + 1]:
- # Interpolate within this segment
- segment_length = arc_lengths[j + 1] - arc_lengths[j]
- if segment_length > 1e-6:
- alpha = (target_arc_length - arc_lengths[j]) / segment_length
- else:
- alpha = 0.0
-
- # Linear interpolation between points
- new_trajectory[i] = (1 - alpha) * trajectory[
- j
- ] + alpha * trajectory[j + 1]
- break
- else:
- # If we didn't find it (shouldn't happen), use the last point
- new_trajectory[i] = trajectory[-1]
-
- return new_trajectory
-
- def validate_trajectory(self, trajectory: np.ndarray) -> dict[str, float | bool]:
- """
- Validate that trajectory respects all constraints.
-
- Args:
- trajectory: Trajectory to validate
-
- Returns:
- Dictionary of validation results with detailed information
- """
- results: dict[str, float | bool] = {
- "velocity_ok": True,
- "acceleration_ok": True,
- "jerk_ok": True,
- "continuity_ok": True,
- "max_velocity": 0.0,
- "max_acceleration": 0.0,
- "max_jerk": 0.0,
- "max_step": 0.0,
- }
-
- if len(trajectory) < 2:
- return results
-
- dt = self.dt
- n_dims = min(trajectory.shape[1], 6)
-
- # Check continuity - maximum step size between points
- position_diffs = np.diff(trajectory[:, :3], axis=0)
- step_sizes = np.linalg.norm(position_diffs, axis=1)
- max_step = float(np.max(step_sizes)) if step_sizes.size else 0.0
- results["max_step"] = max_step
-
- expected_max_step = self.constraints["max_velocity"] * dt * 1.5 # 50% tolerance
- results["continuity_ok"] = max_step <= expected_max_step
-
- # Velocity - use all relevant dimensions
- velocities = np.diff(trajectory[:, :n_dims], axis=0) / dt
- velocity_magnitudes = (
- np.linalg.norm(velocities[:, :3], axis=1)
- if velocities.shape[0]
- else np.array([0.0])
- )
- max_vel = (
- float(np.max(velocity_magnitudes)) if velocity_magnitudes.size else 0.0
- )
- results["max_velocity"] = max_vel
- results["velocity_ok"] = max_vel <= self.constraints["max_velocity"] * 1.1
-
- # Acceleration
- if len(trajectory) > 2:
- accelerations = (
- np.diff(velocities[:, :3], axis=0) / dt
- if velocities.shape[0] > 1
- else np.zeros((0, 3))
- )
- acceleration_magnitudes = (
- np.linalg.norm(accelerations, axis=1)
- if accelerations.shape[0]
- else np.array([0.0])
- )
- max_acc = (
- float(np.max(acceleration_magnitudes))
- if acceleration_magnitudes.size
- else 0.0
- )
- results["max_acceleration"] = max_acc
- results["acceleration_ok"] = (
- max_acc <= self.constraints["max_acceleration"] * 1.2
- )
-
- # Jerk
- if len(trajectory) > 3:
- jerks = (
- np.diff(accelerations, axis=0) / dt
- if accelerations.shape[0] > 1
- else np.zeros((0, 3))
- )
- jerk_magnitudes = (
- np.linalg.norm(jerks, axis=1) if jerks.shape[0] else np.array([0.0])
- )
- max_jerk = (
- float(np.max(jerk_magnitudes)) if jerk_magnitudes.size else 0.0
- )
- results["max_jerk"] = max_jerk
- results["jerk_ok"] = max_jerk <= self.constraints["max_jerk"] * 1.5
-
- return results
diff --git a/parol6/tools.py b/parol6/tools.py
index 20032b2..a9f81de 100644
--- a/parol6/tools.py
+++ b/parol6/tools.py
@@ -1,94 +1,659 @@
"""
-Tool Configuration Module
+Typed tool configuration and registry for the PAROL6 robot.
-Defines available end-effector tools for the PAROL6 robot with their transforms and visualization data.
-Tools are swappable at runtime and affect both kinematics and visualization.
+Each tool type has a frozen config dataclass that holds physical description,
+valid actions, and a ``populate_status()`` method the controller uses to fill
+the 50 Hz ``ToolStatus`` broadcast from hardware state.
"""
-from typing import Any
+from __future__ import annotations
+
+import logging
+import math
+from dataclasses import dataclass
+
+from typing import TYPE_CHECKING, Protocol, runtime_checkable
import numpy as np
-from spatialmath import SE3
-
-TOOL_CONFIGS: dict[str, dict[str, Any]] = {
- "NONE": {
- "name": "No Tool",
- "description": "Bare flange - no tool attached",
- "transform": np.eye(4),
- "stl_files": [],
- },
- "PNEUMATIC": {
- "name": "Pneumatic Gripper",
- "description": "Pneumatic gripper assembly",
- "transform": (SE3(-0.04525, 0, 0) @ SE3.Rx(np.pi)).A,
- "stl_files": [
- {
- "file": "pneumatic_gripper_assembly.STL",
- "origin": [0, 0, 0],
- "rpy": [0, 0, 0],
- }
- ],
- },
-}
-
-
-def get_tool_transform(tool_name: str) -> np.ndarray:
- """
- Get the 4x4 transformation matrix for a tool.
-
- Parameters
- ----------
- tool_name : str
- Name of the tool (must be in TOOL_CONFIGS)
-
- Returns
- -------
- np.ndarray
- 4x4 homogeneous transformation matrix from flange to tool TCP
-
- Raises
- ------
- ValueError
- If tool_name is not recognized
+from pinokin import se3_from_trans
+from waldoctl import (
+ LinearMotion,
+ MeshRole,
+ MeshSpec,
+ PartMotion,
+ ToolState,
+ ToolVariant,
+)
+
+if TYPE_CHECKING:
+ from waldoctl import ToolStatus
+
+ from parol6.commands.base import MotionCommand
+ from parol6.commands.gripper_commands import (
+ ElectricGripperCommand,
+ PneumaticGripperCommand,
+ )
+ from parol6.server.state import ControllerState
+ from parol6.server.transports.mock_serial_transport import MockRobotState
+
+logger = logging.getLogger(__name__)
+
+
+# ---------------------------------------------------------------------------
+# Tool simulator protocol
+# ---------------------------------------------------------------------------
+
+
+@runtime_checkable
+class ToolSimulator(Protocol):
+ """Protocol for tool-type-specific simulation logic.
+
+ Each tool config that needs simulation creates a simulator instance via
+ ``create_simulator()``. The simulator's ``resolve_params()`` is called
+ once on tool change, and ``tick()`` is called every simulation step.
"""
- if tool_name not in TOOL_CONFIGS:
- raise ValueError(f"Unknown tool '{tool_name}'. Available tools: {list_tools()}")
- return TOOL_CONFIGS[tool_name]["transform"]
+ def resolve_params(self, cfg: ToolConfig) -> None:
+ """Compute simulation parameters from the tool config."""
+ ...
+ def tick(self, state: MockRobotState, dt: float) -> None:
+ """Advance the tool simulation by *dt* seconds."""
+ ...
-def list_tools() -> list[str]:
+
+# ---------------------------------------------------------------------------
+# Base config
+# ---------------------------------------------------------------------------
+
+
+@dataclass(frozen=True)
+class ToolConfig:
+ """Immutable configuration for one tool type."""
+
+ name: str
+ description: str
+ transform: np.ndarray # 4x4 homogeneous transform (flange → TCP)
+ meshes: tuple[MeshSpec, ...] = ()
+ motions: tuple[PartMotion, ...] = ()
+ variants: tuple[ToolVariant, ...] = ()
+
+ def populate_status(self, hw: ControllerState, out: ToolStatus) -> None:
+ """Fill *out* from hardware state. Override in subclasses."""
+
+ def create_command(self, action: str, params: list) -> MotionCommand | None:
+ """Create a command engine for this tool action. Returns None if not supported."""
+ return None
+
+ def create_simulator(self) -> ToolSimulator | None:
+ """Create a simulator for this tool type. Returns None if no simulation needed."""
+ return None
+
+ def estimate_duration(self, action: str, params: list) -> float:
+ """Estimate how long a tool action takes, in seconds.
+
+ Override in subclasses with physical models. Returns 0.0 by default.
+ """
+ return 0.0
+
+
+# ---------------------------------------------------------------------------
+# Gripper configs
+# ---------------------------------------------------------------------------
+
+
+@dataclass(frozen=True)
+class PneumaticGripperConfig(ToolConfig):
+ """Configuration for pneumatic grippers controlled via digital I/O."""
+
+ io_port: int = 1
+ valid_actions: tuple[str, ...] = ("open", "close", "move", "set_position")
+
+ def populate_status(self, hw: ControllerState, out: ToolStatus) -> None:
+ port_idx = 2 if self.io_port == 1 else 3
+ # Simulator writes ramped position into Gripper_data_in[1] (0-255).
+ # Real hardware has no position feedback — use valve output directly.
+ # Convention: 0.0 = open, 1.0 = closed. Pneumatic simulator ramps
+ # toward 255 for open, so we invert.
+ pos_byte = hw.Gripper_data_in[1]
+ if pos_byte > 0 or hw.InOut_out[port_idx] == 0:
+ out.positions = (1.0 - float(pos_byte) / 255.0,)
+ else:
+ out.positions = (1.0 - float(hw.InOut_out[port_idx]),)
+ out.part_detected = bool(hw.InOut_in[port_idx])
+ out.engaged = bool(hw.InOut_out[port_idx])
+ out.state = ToolState.IDLE
+
+ def create_command(self, action: str, params: list) -> PneumaticGripperCommand:
+ from parol6.commands.gripper_commands import PneumaticGripperCommand
+
+ if action not in self.valid_actions:
+ raise ValueError(f"Invalid action '{action}' for pneumatic gripper")
+ if action in ("move", "set_position"):
+ position = float(params[0]) if params and len(params) > 0 else 0.0
+ action = "open" if position < 0.5 else "close"
+ return PneumaticGripperCommand.from_tool_action(
+ action=action, port=self.io_port
+ )
+
+ def estimate_duration(self, action: str, params: list) -> float:
+ for m in self.motions:
+ if isinstance(m, LinearMotion) and m.estimated_speed_m_s:
+ return m.travel_m / m.estimated_speed_m_s
+ return 0.0
+
+ def create_simulator(self) -> PneumaticToolSimulator:
+ return PneumaticToolSimulator()
+
+
+@dataclass(frozen=True)
+class ElectricGripperConfig(ToolConfig):
+ """Configuration for electric grippers controlled via the serial gripper bus."""
+
+ current_range: tuple[int, int] = (0, 0)
+ position_range: tuple[float, float] = (0.0, 1.0)
+ speed_range: tuple[float, float] = (0.0, 1.0)
+ valid_actions: tuple[str, ...] = (
+ "move",
+ "open",
+ "close",
+ "set_position",
+ "calibrate",
+ )
+
+ # Motor controller / mechanical properties
+ encoder_cpr: int = 16_384 # encoder counts per revolution
+ gear_pd_mm: float = 12.0 # rack-and-pinion gear pitch diameter (mm)
+ firmware_speed_range_tps: tuple[int, int] = (
+ 40,
+ 80_000,
+ ) # CAN byte 0..255 → ticks/s
+ motor_kt: float = 0.0 # motor torque constant (Nm/A); 0 = force estimation disabled
+
+ def populate_status(self, hw: ControllerState, out: ToolStatus) -> None:
+ current_ma = float(hw.Gripper_data_in[3])
+ out.positions = (float(hw.Gripper_data_in[1]) / 255.0,)
+ out.channels = (current_ma,)
+ out.part_detected = bool(hw.Gripper_data_in[5])
+ out.engaged = bool(hw.Gripper_data_in[2]) # speed > 0
+ out.state = ToolState.IDLE
+
+ def create_command(self, action: str, params: list) -> ElectricGripperCommand:
+ from parol6.commands.gripper_commands import ElectricGripperCommand
+
+ if action not in self.valid_actions:
+ raise ValueError(f"Invalid action '{action}' for electric gripper")
+ # Translate Python-level method names to wire-level "move" action
+ if action == "open":
+ params = [0.0] + params[1:]
+ action = "move"
+ elif action == "close":
+ params = [1.0] + params[1:]
+ action = "move"
+ elif action == "set_position":
+ action = "move"
+ position = float(params[0]) if len(params) > 0 else 0.0
+ speed = float(params[1]) if len(params) > 1 else 0.5
+ current = int(params[2]) if len(params) > 2 else 500
+ return ElectricGripperCommand.from_tool_action(
+ action=action, position=position, speed=speed, current=current
+ )
+
+ def estimate_duration(self, action: str, params: list) -> float:
+ # Resolve position delta from action + params (same logic as create_command)
+ if action in ("open", "close"):
+ target = 0.0 if action == "open" else 1.0
+ speed = float(params[0]) if len(params) > 0 else 0.5
+ elif action in ("move", "set_position"):
+ target = float(params[0]) if len(params) > 0 else 0.0
+ speed = float(params[1]) if len(params) > 1 else 0.5
+ else:
+ return 0.0
+
+ # Assume worst-case full travel (0→target or 1→target)
+ pos_delta = max(target, 1.0 - target)
+ if pos_delta < 1e-6:
+ return 0.0
+
+ # Mirror the simulator's speed model
+ speed_byte = max(1, min(255, int(round(speed * 255))))
+ min_tps, max_tps = self.firmware_speed_range_tps
+ velocity_tps = min_tps + (speed_byte / 255.0) * (max_tps - min_tps)
+
+ # Compute tick_range from mechanical params
+ travel_mm = 0.0
+ for m in self.motions:
+ if isinstance(m, LinearMotion):
+ travel_mm = m.travel_m * 1000.0
+ break
+ if travel_mm == 0.0:
+ return 0.0
+ tick_range = (travel_mm / (math.pi * self.gear_pd_mm)) * self.encoder_cpr
+
+ # Normalized velocity in position-byte units per second
+ norm_vel = (velocity_tps / tick_range) * 255.0
+ if norm_vel < 1e-9:
+ return 0.0
+
+ return (pos_delta * 255.0) / norm_vel
+
+ def create_simulator(self) -> ElectricGripperSimulator:
+ return ElectricGripperSimulator()
+
+
+# ---------------------------------------------------------------------------
+# Tool simulators
+# ---------------------------------------------------------------------------
+
+
+class PneumaticToolSimulator:
+ """Simulates binary-activation tool ramp (pneumatic grippers, vacuum, etc.).
+
+ Reads the commanded I/O output to determine whether the tool is
+ engaged, then ramps the tool position toward the target at the
+ physical speed derived from the tool's LinearMotion descriptor.
+ Writes the ramped position byte into ``gripper_data_in[1]`` for
+ ``populate_status()`` to read.
"""
- Get list of available tool names.
- Returns
- -------
- List[str]
- List of available tool configuration names
+ __slots__ = ("_io_port", "_ramp_speed")
+
+ def __init__(self) -> None:
+ self._io_port: int = -1
+ self._ramp_speed: float = 0.0
+
+ def resolve_params(self, cfg: ToolConfig) -> None:
+ self._io_port = -1
+ self._ramp_speed = 0.0
+
+ if not isinstance(cfg, PneumaticGripperConfig):
+ return
+
+ # Find first LinearMotion with estimated speed
+ for m in cfg.motions:
+ if isinstance(m, LinearMotion) and m.estimated_speed_m_s:
+ # Normalized speed: fraction of full travel per second
+ self._ramp_speed = m.estimated_speed_m_s / m.travel_m
+ break
+
+ if self._ramp_speed > 0:
+ # Map io_port to InOut_out index (port 1 -> index 2, port 2 -> index 3)
+ self._io_port = cfg.io_port + 1
+
+ def tick(self, state: MockRobotState, dt: float) -> None:
+ if self._io_port < 0:
+ return
+
+ # Read commanded I/O output to determine target (0=closed, 1=open)
+ io_val = float(state.io_out[self._io_port])
+ target = 1.0 if io_val > 0 else 0.0
+ if target != state.tool_ramp_target:
+ state.tool_ramp_target = target
+
+ # Ramp toward target
+ error = state.tool_ramp_target - state.tool_ramp_current
+ if abs(error) < 1e-6:
+ return
+ step = self._ramp_speed * dt
+ if abs(error) <= step:
+ state.tool_ramp_current = state.tool_ramp_target
+ elif error > 0:
+ state.tool_ramp_current += step
+ else:
+ state.tool_ramp_current -= step
+
+ # Write ramped position as byte into gripper_data_in[1] (same slot electric uses)
+ state.gripper_data_in[1] = int(state.tool_ramp_current * 255.0 + 0.5)
+
+ # Update part detection input when ramp completes
+ if abs(state.tool_ramp_current - state.tool_ramp_target) < 1e-6:
+ det_idx = self._io_port
+ state.io_in[det_idx] = 1 if state.tool_ramp_target < 0.5 else 0
+
+
+class ElectricGripperSimulator:
+ """Simulates electric gripper position ramp via the @njit ramp function.
+
+ Resolves tick_range, min/max speed from the tool config's mechanical
+ parameters and LinearMotion descriptor, then delegates per-tick
+ simulation to the ``_simulate_gripper_ramp_jit`` numba function.
"""
- return list(TOOL_CONFIGS.keys())
+ __slots__ = ("_tick_range", "_min_speed", "_max_speed")
+
+ def __init__(self) -> None:
+ self._tick_range: float = 0.0
+ self._min_speed: float = 0.0
+ self._max_speed: float = 0.0
+
+ def resolve_params(self, cfg: ToolConfig) -> None:
+ if not isinstance(cfg, ElectricGripperConfig):
+ return
+ # Find jaw travel from default motion
+ for m in cfg.motions:
+ if isinstance(m, LinearMotion):
+ travel_mm = m.travel_m * 1000.0
+ self._tick_range = (
+ travel_mm / (math.pi * cfg.gear_pd_mm)
+ ) * cfg.encoder_cpr
+ break
+ min_tps, max_tps = cfg.firmware_speed_range_tps
+ self._min_speed = float(min_tps)
+ self._max_speed = float(max_tps)
+
+ def tick(self, state: MockRobotState, dt: float) -> None:
+ from parol6.server.transports.mock_serial_transport import (
+ _simulate_gripper_ramp_jit,
+ )
+
+ state.gripper_pos_f = _simulate_gripper_ramp_jit(
+ state.gripper_ramp,
+ state.gripper_data_in,
+ state.gripper_pos_f,
+ dt,
+ self._tick_range,
+ self._min_speed,
+ self._max_speed,
+ )
+
+
+# ---------------------------------------------------------------------------
+# Registry
+# ---------------------------------------------------------------------------
-def get_tool_info(tool_name: str) -> dict[str, Any]:
+_TOOL_REGISTRY: dict[str, ToolConfig] = {}
+
+
+def register_tool(key: str, config: ToolConfig) -> None:
+ """Register a tool configuration by key (e.g. ``"PNEUMATIC"``)."""
+ _TOOL_REGISTRY[key] = config
+
+
+def get_registry() -> dict[str, ToolConfig]:
+ """Return the tool registry (read-only view not enforced — callers cooperate)."""
+ return _TOOL_REGISTRY
+
+
+def list_tools() -> list[str]:
+ """Get list of available tool keys."""
+ return list(_TOOL_REGISTRY.keys())
+
+
+def get_tool_transform(
+ tool_name: str,
+ variant_key: str | None = None,
+) -> np.ndarray:
+ """Get the 4x4 transformation matrix for a tool or variant.
+
+ When *variant_key* is given and the matching variant has a
+ ``tcp_origin``, returns a transform built from the variant's TCP
+ instead of the tool-level transform.
+
+ Raises ValueError if *tool_name* is not registered.
"""
- Get complete configuration for a tool.
-
- Parameters
- ----------
- tool_name : str
- Name of the tool
-
- Returns
- -------
- Dict[str, Any]
- Complete tool configuration dictionary
-
- Raises
- ------
- ValueError
- If tool_name is not recognized
+ cfg = _TOOL_REGISTRY.get(tool_name)
+ if cfg is None:
+ raise ValueError(f"Unknown tool '{tool_name}'. Available: {list_tools()}")
+ if variant_key:
+ for v in cfg.variants:
+ if v.key == variant_key and v.tcp_origin is not None:
+ return _make_tcp_transform(*v.tcp_origin)
+ logger.warning("Variant '%s' not found for tool '%s'", variant_key, tool_name)
+ return cfg.transform
+
+
+# ---------------------------------------------------------------------------
+# Built-in PAROL6 tools — registered at import time
+# ---------------------------------------------------------------------------
+
+
+def _make_tcp_transform(
+ x: float = 0.0,
+ y: float = 0.0,
+ z: float = 0.0,
+) -> np.ndarray:
+ """TCP transform for a tool mounted on the flange.
+
+ Pure translation — tool frame orientation matches the flange.
"""
- if tool_name not in TOOL_CONFIGS:
- raise ValueError(f"Unknown tool '{tool_name}'. Available tools: {list_tools()}")
+ out = np.zeros((4, 4), dtype=np.float64)
+ se3_from_trans(x, y, z, out)
+ return out
+
+
+_TCP_RPY = (0.0, 0.0, 0.0)
+
+# All PAROL6 tool meshes were designed with Rx(π) in the kinematic chain.
+# Now that the kinematic transform is pure translation (for correct IK),
+# the rotation is applied to the mesh definitions instead.
+_MESH_RPY = (math.pi, 0.0, 0.0)
+
+
+register_tool(
+ "NONE",
+ ToolConfig(
+ name="No Tool",
+ description="Bare flange - no tool attached",
+ transform=np.eye(4, dtype=np.float64),
+ ),
+)
+
+
+# ---------------------------------------------------------------------------
+# Pneumatic gripper — vertical & horizontal mounting variants
+# ---------------------------------------------------------------------------
+
+_PNEUMATIC_VERTICAL_MESHES = (
+ MeshSpec(file="pneumatic_gripper_vertical_body.stl", role=MeshRole.BODY),
+ MeshSpec(file="pneumatic_gripper_vertical_right_jaw.stl", role=MeshRole.JAW),
+ MeshSpec(file="pneumatic_gripper_vertical_left_jaw.stl", role=MeshRole.JAW),
+)
+_PNEUMATIC_VERTICAL_MOTION = (
+ LinearMotion(
+ role=MeshRole.JAW,
+ axis=(0.0, 1.0, 0.0),
+ travel_m=0.0035,
+ symmetric=True,
+ estimated_speed_m_s=0.023,
+ estimated_accel_m_s2=2.0,
+ ),
+)
+
+_PNEUMATIC_HORIZONTAL_MESHES = (
+ MeshSpec(file="pneumatic_gripper_horizontal_body.stl", role=MeshRole.BODY),
+ MeshSpec(file="pneumatic_gripper_horizontal_right_jaw.stl", role=MeshRole.JAW),
+ MeshSpec(file="pneumatic_gripper_horizontal_left_jaw.stl", role=MeshRole.JAW),
+)
+_PNEUMATIC_HORIZONTAL_MOTION = (
+ LinearMotion(
+ role=MeshRole.JAW,
+ axis=(1.0, 0.0, 0.0),
+ travel_m=0.01045,
+ symmetric=True,
+ estimated_speed_m_s=0.07,
+ estimated_accel_m_s2=2.0,
+ ),
+)
+
+register_tool(
+ "PNEUMATIC",
+ PneumaticGripperConfig(
+ name="Pneumatic Gripper",
+ description="Pneumatic gripper assembly (vertical/horizontal mounting)",
+ transform=_make_tcp_transform(x=-0.055, z=-0.027),
+ meshes=_PNEUMATIC_VERTICAL_MESHES,
+ motions=_PNEUMATIC_VERTICAL_MOTION,
+ variants=(
+ ToolVariant(
+ key="vertical",
+ display_name="Vertical",
+ meshes=_PNEUMATIC_VERTICAL_MESHES,
+ motions=_PNEUMATIC_VERTICAL_MOTION,
+ tcp_origin=(-0.055, 0.0, -0.027),
+ tcp_rpy=_TCP_RPY,
+ ),
+ ToolVariant(
+ key="horizontal",
+ display_name="Horizontal",
+ meshes=_PNEUMATIC_HORIZONTAL_MESHES,
+ motions=_PNEUMATIC_HORIZONTAL_MOTION,
+ tcp_origin=(0.0, 0.0, -0.082),
+ tcp_rpy=_TCP_RPY,
+ ),
+ ),
+ io_port=1,
+ ),
+)
+
+
+# ---------------------------------------------------------------------------
+# SSG-48 electric gripper — finger & pinch jaw variants
+# ---------------------------------------------------------------------------
+
+_SSG48_JAW_MOTION = (
+ LinearMotion(
+ role=MeshRole.JAW, axis=(0.0, 1.0, 0.0), travel_m=0.024, symmetric=True
+ ),
+)
+
+_SSG48_FINGER_MESHES = (
+ MeshSpec(file="ssg48_body.stl", role=MeshRole.BODY),
+ MeshSpec(file="ssg48_finger_right.stl", role=MeshRole.JAW),
+ MeshSpec(file="ssg48_finger_left.stl", role=MeshRole.JAW),
+)
+
+_SSG48_PINCH_MESHES = (
+ MeshSpec(file="ssg48_body.stl", role=MeshRole.BODY),
+ MeshSpec(file="ssg48_pinch_right.stl", role=MeshRole.JAW),
+ MeshSpec(file="ssg48_pinch_left.stl", role=MeshRole.JAW),
+)
+
+register_tool(
+ "SSG-48",
+ ElectricGripperConfig(
+ name="SSG-48 Electric Gripper",
+ description="SSG-48 adaptive electric gripper (Spectral micro BLDC)",
+ transform=_make_tcp_transform(z=-0.105),
+ meshes=_SSG48_FINGER_MESHES,
+ motions=_SSG48_JAW_MOTION,
+ variants=(
+ ToolVariant(
+ key="finger",
+ display_name="Finger",
+ meshes=_SSG48_FINGER_MESHES,
+ motions=_SSG48_JAW_MOTION,
+ tcp_origin=(0.0, 0.0, -0.105),
+ tcp_rpy=_TCP_RPY,
+ ),
+ ToolVariant(
+ key="pinch",
+ display_name="Pinch",
+ meshes=_SSG48_PINCH_MESHES,
+ motions=_SSG48_JAW_MOTION,
+ tcp_origin=(0.0, 0.0, -0.105),
+ tcp_rpy=_TCP_RPY,
+ ),
+ ),
+ position_range=(0.0, 1.0),
+ speed_range=(0.0, 1.0),
+ current_range=(100, 1300),
+ ),
+)
+
+
+# ---------------------------------------------------------------------------
+# MSG AI stepper gripper — 100mm, 150mm, 200mm rail variants
+# ---------------------------------------------------------------------------
+
+_MSG_100_JAW_MOTION = (
+ LinearMotion(
+ role=MeshRole.JAW, axis=(0.0, 1.0, 0.0), travel_m=0.0267, symmetric=True
+ ),
+)
+_MSG_150_JAW_MOTION = (
+ LinearMotion(
+ role=MeshRole.JAW, axis=(0.0, 1.0, 0.0), travel_m=0.0514, symmetric=True
+ ),
+)
+_MSG_200_JAW_MOTION = (
+ LinearMotion(
+ role=MeshRole.JAW, axis=(0.0, 1.0, 0.0), travel_m=0.0767, symmetric=True
+ ),
+)
+
+_MSG_100_MESHES = (
+ MeshSpec(file="msg_ai_100_body.stl", role=MeshRole.BODY),
+ MeshSpec(file="msg_ai_100_right_jaw.stl", role=MeshRole.JAW),
+ MeshSpec(file="msg_ai_100_left_jaw.stl", role=MeshRole.JAW),
+)
+
+_MSG_150_MESHES = (
+ MeshSpec(file="msg_ai_150_body.stl", role=MeshRole.BODY),
+ MeshSpec(file="msg_ai_150_right_jaw.stl", role=MeshRole.JAW),
+ MeshSpec(file="msg_ai_150_left_jaw.stl", role=MeshRole.JAW),
+)
+
+_MSG_200_MESHES = (
+ MeshSpec(file="msg_ai_200_body.stl", role=MeshRole.BODY),
+ MeshSpec(file="msg_ai_200_right_jaw.stl", role=MeshRole.JAW),
+ MeshSpec(file="msg_ai_200_left_jaw.stl", role=MeshRole.JAW),
+)
+
+register_tool(
+ "MSG",
+ ElectricGripperConfig(
+ name="MSG AI Stepper Gripper",
+ description="MSG compliant AI stepper gripper (StepFOC)",
+ transform=_make_tcp_transform(x=-0.029, z=-0.103),
+ meshes=_MSG_100_MESHES,
+ motions=_MSG_100_JAW_MOTION,
+ variants=(
+ ToolVariant(
+ key="100mm",
+ display_name="100mm Rail",
+ meshes=_MSG_100_MESHES,
+ motions=_MSG_100_JAW_MOTION,
+ tcp_origin=(-0.029, 0.0, -0.103),
+ tcp_rpy=_TCP_RPY,
+ ),
+ ToolVariant(
+ key="150mm",
+ display_name="150mm Rail",
+ meshes=_MSG_150_MESHES,
+ motions=_MSG_150_JAW_MOTION,
+ tcp_origin=(-0.029, 0.0, -0.103),
+ tcp_rpy=_TCP_RPY,
+ ),
+ ToolVariant(
+ key="200mm",
+ display_name="200mm Rail",
+ meshes=_MSG_200_MESHES,
+ motions=_MSG_200_JAW_MOTION,
+ tcp_origin=(-0.029, 0.0, -0.103),
+ tcp_rpy=_TCP_RPY,
+ ),
+ ),
+ position_range=(0.0, 1.0),
+ speed_range=(0.0, 1.0),
+ current_range=(100, 2800),
+ gear_pd_mm=16.67, # 32P 21T gear: PD = 21/32" = 16.67mm
+ firmware_speed_range_tps=(500, 60_000), # StepFOC velocity range
+ ),
+)
+
+
+# ---------------------------------------------------------------------------
+# Vacuum gripper — pneumatic valve control, no jaws
+# ---------------------------------------------------------------------------
- return TOOL_CONFIGS[tool_name]
+register_tool(
+ "VACUUM",
+ PneumaticGripperConfig(
+ name="Vacuum Gripper",
+ description="Vacuum gripper (pneumatic valve I/O)",
+ transform=_make_tcp_transform(z=-0.037),
+ meshes=(MeshSpec(file="vacuum_gripper_body.stl", role=MeshRole.BODY),),
+ motions=(),
+ io_port=1,
+ ),
+)
diff --git a/parol6/urdf_model/meshes/L1_simplified.stl b/parol6/urdf_model/meshes/L1_simplified.stl
new file mode 100644
index 0000000..648d752
Binary files /dev/null and b/parol6/urdf_model/meshes/L1_simplified.stl differ
diff --git a/parol6/urdf_model/meshes/L2_simplified.stl b/parol6/urdf_model/meshes/L2_simplified.stl
new file mode 100644
index 0000000..e6198ab
Binary files /dev/null and b/parol6/urdf_model/meshes/L2_simplified.stl differ
diff --git a/parol6/urdf_model/meshes/L3_simplified.stl b/parol6/urdf_model/meshes/L3_simplified.stl
new file mode 100644
index 0000000..62b0ba2
Binary files /dev/null and b/parol6/urdf_model/meshes/L3_simplified.stl differ
diff --git a/parol6/urdf_model/meshes/L4_simplified.stl b/parol6/urdf_model/meshes/L4_simplified.stl
new file mode 100644
index 0000000..92c3c36
Binary files /dev/null and b/parol6/urdf_model/meshes/L4_simplified.stl differ
diff --git a/parol6/urdf_model/meshes/L5_simplified.stl b/parol6/urdf_model/meshes/L5_simplified.stl
new file mode 100644
index 0000000..82b3530
Binary files /dev/null and b/parol6/urdf_model/meshes/L5_simplified.stl differ
diff --git a/parol6/urdf_model/meshes/L6.STL b/parol6/urdf_model/meshes/L6.STL
index 8b89dff..8adafbb 100644
Binary files a/parol6/urdf_model/meshes/L6.STL and b/parol6/urdf_model/meshes/L6.STL differ
diff --git a/parol6/urdf_model/meshes/L6_simplified.stl b/parol6/urdf_model/meshes/L6_simplified.stl
new file mode 100644
index 0000000..6d32763
Binary files /dev/null and b/parol6/urdf_model/meshes/L6_simplified.stl differ
diff --git a/parol6/urdf_model/meshes/base_link_simplified.stl b/parol6/urdf_model/meshes/base_link_simplified.stl
new file mode 100644
index 0000000..be39569
Binary files /dev/null and b/parol6/urdf_model/meshes/base_link_simplified.stl differ
diff --git a/parol6/urdf_model/meshes/msg_ai_100_body.stl b/parol6/urdf_model/meshes/msg_ai_100_body.stl
new file mode 100644
index 0000000..1800230
Binary files /dev/null and b/parol6/urdf_model/meshes/msg_ai_100_body.stl differ
diff --git a/parol6/urdf_model/meshes/msg_ai_100_body_simplified.stl b/parol6/urdf_model/meshes/msg_ai_100_body_simplified.stl
new file mode 100644
index 0000000..18ac9ae
Binary files /dev/null and b/parol6/urdf_model/meshes/msg_ai_100_body_simplified.stl differ
diff --git a/parol6/urdf_model/meshes/msg_ai_100_left_jaw.stl b/parol6/urdf_model/meshes/msg_ai_100_left_jaw.stl
new file mode 100644
index 0000000..98947cf
Binary files /dev/null and b/parol6/urdf_model/meshes/msg_ai_100_left_jaw.stl differ
diff --git a/parol6/urdf_model/meshes/msg_ai_100_left_jaw_simplified.stl b/parol6/urdf_model/meshes/msg_ai_100_left_jaw_simplified.stl
new file mode 100644
index 0000000..8540517
Binary files /dev/null and b/parol6/urdf_model/meshes/msg_ai_100_left_jaw_simplified.stl differ
diff --git a/parol6/urdf_model/meshes/msg_ai_100_right_jaw.stl b/parol6/urdf_model/meshes/msg_ai_100_right_jaw.stl
new file mode 100644
index 0000000..fdfde21
Binary files /dev/null and b/parol6/urdf_model/meshes/msg_ai_100_right_jaw.stl differ
diff --git a/parol6/urdf_model/meshes/msg_ai_100_right_jaw_simplified.stl b/parol6/urdf_model/meshes/msg_ai_100_right_jaw_simplified.stl
new file mode 100644
index 0000000..86cae35
Binary files /dev/null and b/parol6/urdf_model/meshes/msg_ai_100_right_jaw_simplified.stl differ
diff --git a/parol6/urdf_model/meshes/msg_ai_150_body.stl b/parol6/urdf_model/meshes/msg_ai_150_body.stl
new file mode 100644
index 0000000..ac5f713
Binary files /dev/null and b/parol6/urdf_model/meshes/msg_ai_150_body.stl differ
diff --git a/parol6/urdf_model/meshes/msg_ai_150_body_simplified.stl b/parol6/urdf_model/meshes/msg_ai_150_body_simplified.stl
new file mode 100644
index 0000000..57d04db
Binary files /dev/null and b/parol6/urdf_model/meshes/msg_ai_150_body_simplified.stl differ
diff --git a/parol6/urdf_model/meshes/msg_ai_150_left_jaw.stl b/parol6/urdf_model/meshes/msg_ai_150_left_jaw.stl
new file mode 100644
index 0000000..bcebb51
Binary files /dev/null and b/parol6/urdf_model/meshes/msg_ai_150_left_jaw.stl differ
diff --git a/parol6/urdf_model/meshes/msg_ai_150_left_jaw_simplified.stl b/parol6/urdf_model/meshes/msg_ai_150_left_jaw_simplified.stl
new file mode 100644
index 0000000..25664ac
Binary files /dev/null and b/parol6/urdf_model/meshes/msg_ai_150_left_jaw_simplified.stl differ
diff --git a/parol6/urdf_model/meshes/msg_ai_150_right_jaw.stl b/parol6/urdf_model/meshes/msg_ai_150_right_jaw.stl
new file mode 100644
index 0000000..077ea17
Binary files /dev/null and b/parol6/urdf_model/meshes/msg_ai_150_right_jaw.stl differ
diff --git a/parol6/urdf_model/meshes/msg_ai_150_right_jaw_simplified.stl b/parol6/urdf_model/meshes/msg_ai_150_right_jaw_simplified.stl
new file mode 100644
index 0000000..3f9d6f7
Binary files /dev/null and b/parol6/urdf_model/meshes/msg_ai_150_right_jaw_simplified.stl differ
diff --git a/parol6/urdf_model/meshes/msg_ai_200_body.stl b/parol6/urdf_model/meshes/msg_ai_200_body.stl
new file mode 100644
index 0000000..ea92498
Binary files /dev/null and b/parol6/urdf_model/meshes/msg_ai_200_body.stl differ
diff --git a/parol6/urdf_model/meshes/msg_ai_200_body_simplified.stl b/parol6/urdf_model/meshes/msg_ai_200_body_simplified.stl
new file mode 100644
index 0000000..26c6c7a
Binary files /dev/null and b/parol6/urdf_model/meshes/msg_ai_200_body_simplified.stl differ
diff --git a/parol6/urdf_model/meshes/msg_ai_200_left_jaw.stl b/parol6/urdf_model/meshes/msg_ai_200_left_jaw.stl
new file mode 100644
index 0000000..8d4e0e4
Binary files /dev/null and b/parol6/urdf_model/meshes/msg_ai_200_left_jaw.stl differ
diff --git a/parol6/urdf_model/meshes/msg_ai_200_left_jaw_simplified.stl b/parol6/urdf_model/meshes/msg_ai_200_left_jaw_simplified.stl
new file mode 100644
index 0000000..196bf3e
Binary files /dev/null and b/parol6/urdf_model/meshes/msg_ai_200_left_jaw_simplified.stl differ
diff --git a/parol6/urdf_model/meshes/msg_ai_200_right_jaw.stl b/parol6/urdf_model/meshes/msg_ai_200_right_jaw.stl
new file mode 100644
index 0000000..235a51a
Binary files /dev/null and b/parol6/urdf_model/meshes/msg_ai_200_right_jaw.stl differ
diff --git a/parol6/urdf_model/meshes/msg_ai_200_right_jaw_simplified.stl b/parol6/urdf_model/meshes/msg_ai_200_right_jaw_simplified.stl
new file mode 100644
index 0000000..680af7a
Binary files /dev/null and b/parol6/urdf_model/meshes/msg_ai_200_right_jaw_simplified.stl differ
diff --git a/parol6/urdf_model/meshes/pneumatic_gripper_assembly_simplified.stl b/parol6/urdf_model/meshes/pneumatic_gripper_assembly_simplified.stl
new file mode 100644
index 0000000..a5d9363
Binary files /dev/null and b/parol6/urdf_model/meshes/pneumatic_gripper_assembly_simplified.stl differ
diff --git a/parol6/urdf_model/meshes/pneumatic_gripper_horizontal_body.stl b/parol6/urdf_model/meshes/pneumatic_gripper_horizontal_body.stl
new file mode 100644
index 0000000..1dcd585
Binary files /dev/null and b/parol6/urdf_model/meshes/pneumatic_gripper_horizontal_body.stl differ
diff --git a/parol6/urdf_model/meshes/pneumatic_gripper_horizontal_body_simplified.stl b/parol6/urdf_model/meshes/pneumatic_gripper_horizontal_body_simplified.stl
new file mode 100644
index 0000000..d727f34
Binary files /dev/null and b/parol6/urdf_model/meshes/pneumatic_gripper_horizontal_body_simplified.stl differ
diff --git a/parol6/urdf_model/meshes/pneumatic_gripper_horizontal_left_jaw.stl b/parol6/urdf_model/meshes/pneumatic_gripper_horizontal_left_jaw.stl
new file mode 100644
index 0000000..ba235b7
Binary files /dev/null and b/parol6/urdf_model/meshes/pneumatic_gripper_horizontal_left_jaw.stl differ
diff --git a/parol6/urdf_model/meshes/pneumatic_gripper_horizontal_left_jaw_simplified.stl b/parol6/urdf_model/meshes/pneumatic_gripper_horizontal_left_jaw_simplified.stl
new file mode 100644
index 0000000..1501e37
Binary files /dev/null and b/parol6/urdf_model/meshes/pneumatic_gripper_horizontal_left_jaw_simplified.stl differ
diff --git a/parol6/urdf_model/meshes/pneumatic_gripper_horizontal_right_jaw.stl b/parol6/urdf_model/meshes/pneumatic_gripper_horizontal_right_jaw.stl
new file mode 100644
index 0000000..baf809a
Binary files /dev/null and b/parol6/urdf_model/meshes/pneumatic_gripper_horizontal_right_jaw.stl differ
diff --git a/parol6/urdf_model/meshes/pneumatic_gripper_horizontal_right_jaw_simplified.stl b/parol6/urdf_model/meshes/pneumatic_gripper_horizontal_right_jaw_simplified.stl
new file mode 100644
index 0000000..9497e6e
Binary files /dev/null and b/parol6/urdf_model/meshes/pneumatic_gripper_horizontal_right_jaw_simplified.stl differ
diff --git a/parol6/urdf_model/meshes/pneumatic_gripper_vertical_body.stl b/parol6/urdf_model/meshes/pneumatic_gripper_vertical_body.stl
new file mode 100644
index 0000000..407b2cf
Binary files /dev/null and b/parol6/urdf_model/meshes/pneumatic_gripper_vertical_body.stl differ
diff --git a/parol6/urdf_model/meshes/pneumatic_gripper_vertical_body_simplified.stl b/parol6/urdf_model/meshes/pneumatic_gripper_vertical_body_simplified.stl
new file mode 100644
index 0000000..b96f216
Binary files /dev/null and b/parol6/urdf_model/meshes/pneumatic_gripper_vertical_body_simplified.stl differ
diff --git a/parol6/urdf_model/meshes/pneumatic_gripper_vertical_left_jaw.stl b/parol6/urdf_model/meshes/pneumatic_gripper_vertical_left_jaw.stl
new file mode 100644
index 0000000..cf74379
Binary files /dev/null and b/parol6/urdf_model/meshes/pneumatic_gripper_vertical_left_jaw.stl differ
diff --git a/parol6/urdf_model/meshes/pneumatic_gripper_vertical_left_jaw_simplified.stl b/parol6/urdf_model/meshes/pneumatic_gripper_vertical_left_jaw_simplified.stl
new file mode 100644
index 0000000..51de531
Binary files /dev/null and b/parol6/urdf_model/meshes/pneumatic_gripper_vertical_left_jaw_simplified.stl differ
diff --git a/parol6/urdf_model/meshes/pneumatic_gripper_vertical_right_jaw.stl b/parol6/urdf_model/meshes/pneumatic_gripper_vertical_right_jaw.stl
new file mode 100644
index 0000000..33aef11
Binary files /dev/null and b/parol6/urdf_model/meshes/pneumatic_gripper_vertical_right_jaw.stl differ
diff --git a/parol6/urdf_model/meshes/pneumatic_gripper_vertical_right_jaw_simplified.stl b/parol6/urdf_model/meshes/pneumatic_gripper_vertical_right_jaw_simplified.stl
new file mode 100644
index 0000000..fffb9e1
Binary files /dev/null and b/parol6/urdf_model/meshes/pneumatic_gripper_vertical_right_jaw_simplified.stl differ
diff --git a/parol6/urdf_model/meshes/ssg48_body.stl b/parol6/urdf_model/meshes/ssg48_body.stl
new file mode 100644
index 0000000..18313bc
Binary files /dev/null and b/parol6/urdf_model/meshes/ssg48_body.stl differ
diff --git a/parol6/urdf_model/meshes/ssg48_body_simplified.stl b/parol6/urdf_model/meshes/ssg48_body_simplified.stl
new file mode 100644
index 0000000..5134025
Binary files /dev/null and b/parol6/urdf_model/meshes/ssg48_body_simplified.stl differ
diff --git a/parol6/urdf_model/meshes/ssg48_finger_left.stl b/parol6/urdf_model/meshes/ssg48_finger_left.stl
new file mode 100644
index 0000000..c95a4ec
Binary files /dev/null and b/parol6/urdf_model/meshes/ssg48_finger_left.stl differ
diff --git a/parol6/urdf_model/meshes/ssg48_finger_left_simplified.stl b/parol6/urdf_model/meshes/ssg48_finger_left_simplified.stl
new file mode 100644
index 0000000..153ff51
Binary files /dev/null and b/parol6/urdf_model/meshes/ssg48_finger_left_simplified.stl differ
diff --git a/parol6/urdf_model/meshes/ssg48_finger_right.stl b/parol6/urdf_model/meshes/ssg48_finger_right.stl
new file mode 100644
index 0000000..62ebc46
Binary files /dev/null and b/parol6/urdf_model/meshes/ssg48_finger_right.stl differ
diff --git a/parol6/urdf_model/meshes/ssg48_finger_right_simplified.stl b/parol6/urdf_model/meshes/ssg48_finger_right_simplified.stl
new file mode 100644
index 0000000..622057d
Binary files /dev/null and b/parol6/urdf_model/meshes/ssg48_finger_right_simplified.stl differ
diff --git a/parol6/urdf_model/meshes/ssg48_pinch_left.stl b/parol6/urdf_model/meshes/ssg48_pinch_left.stl
new file mode 100644
index 0000000..b59d8fd
Binary files /dev/null and b/parol6/urdf_model/meshes/ssg48_pinch_left.stl differ
diff --git a/parol6/urdf_model/meshes/ssg48_pinch_left_simplified.stl b/parol6/urdf_model/meshes/ssg48_pinch_left_simplified.stl
new file mode 100644
index 0000000..40bfe12
Binary files /dev/null and b/parol6/urdf_model/meshes/ssg48_pinch_left_simplified.stl differ
diff --git a/parol6/urdf_model/meshes/ssg48_pinch_right.stl b/parol6/urdf_model/meshes/ssg48_pinch_right.stl
new file mode 100644
index 0000000..4d0268e
Binary files /dev/null and b/parol6/urdf_model/meshes/ssg48_pinch_right.stl differ
diff --git a/parol6/urdf_model/meshes/ssg48_pinch_right_simplified.stl b/parol6/urdf_model/meshes/ssg48_pinch_right_simplified.stl
new file mode 100644
index 0000000..a05477d
Binary files /dev/null and b/parol6/urdf_model/meshes/ssg48_pinch_right_simplified.stl differ
diff --git a/parol6/urdf_model/meshes/vacuum_gripper_body.stl b/parol6/urdf_model/meshes/vacuum_gripper_body.stl
new file mode 100644
index 0000000..5080a02
Binary files /dev/null and b/parol6/urdf_model/meshes/vacuum_gripper_body.stl differ
diff --git a/parol6/urdf_model/meshes/vacuum_gripper_body_simplified.stl b/parol6/urdf_model/meshes/vacuum_gripper_body_simplified.stl
new file mode 100644
index 0000000..a63b7f0
Binary files /dev/null and b/parol6/urdf_model/meshes/vacuum_gripper_body_simplified.stl differ
diff --git a/parol6/urdf_model/urdf/PAROL6.urdf b/parol6/urdf_model/urdf/PAROL6.urdf
index 8dbab00..ac81a5f 100644
--- a/parol6/urdf_model/urdf/PAROL6.urdf
+++ b/parol6/urdf_model/urdf/PAROL6.urdf
@@ -347,7 +347,7 @@
name="L6">
@@ -361,7 +361,7 @@
diff --git a/parol6/utils/error_catalog.py b/parol6/utils/error_catalog.py
new file mode 100644
index 0000000..94b61dc
--- /dev/null
+++ b/parol6/utils/error_catalog.py
@@ -0,0 +1,198 @@
+"""Structured robot error types and error catalog.
+
+RobotError carries KUKA-style structured fields: code, title, cause, effect, remedy.
+The catalog maps ErrorCode → template; make_error() instantiates with runtime params.
+"""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+
+from .error_codes import ErrorCode
+
+
+@dataclass(frozen=True)
+class RobotError:
+ """Structured error with code, title, cause, effect, and remedy."""
+
+ command_index: int
+ code: int
+ title: str
+ cause: str
+ effect: str
+ remedy: str
+
+ def to_wire(self) -> list:
+ """Serialize to a list for ormsgpack packing."""
+ return [
+ self.command_index,
+ self.code,
+ self.title,
+ self.cause,
+ self.effect,
+ self.remedy,
+ ]
+
+ @staticmethod
+ def from_wire(data: list) -> RobotError:
+ """Reconstruct from a wire-format list."""
+ return RobotError(
+ command_index=data[0],
+ code=data[1],
+ title=data[2],
+ cause=data[3],
+ effect=data[4],
+ remedy=data[5],
+ )
+
+ def __str__(self) -> str:
+ return f"[{self.code}] {self.title}: {self.cause}"
+
+
+@dataclass(frozen=True)
+class _ErrorTemplate:
+ title: str
+ cause: str # may contain {placeholders}
+ effect: str
+ remedy: str
+
+
+_CATALOG: dict[int, _ErrorTemplate] = {
+ # -- IK --
+ ErrorCode.IK_TARGET_UNREACHABLE: _ErrorTemplate(
+ title="IK: target unreachable",
+ cause="Target pose has no valid IK solution. {detail}",
+ effect="Motion command rejected. Pipeline halted.",
+ remedy="Verify target is within workspace. Try different orientation.",
+ ),
+ ErrorCode.IK_PARTIAL_PATH: _ErrorTemplate(
+ title="IK: partial path failure",
+ cause="Only {valid}/{total} poses along the path are reachable.",
+ effect="Motion command rejected. Pipeline halted.",
+ remedy="Shorten the move, add intermediate waypoints, or adjust orientation.",
+ ),
+ # -- Trajectory --
+ ErrorCode.TRAJ_EMPTY_RESULT: _ErrorTemplate(
+ title="Trajectory: empty result",
+ cause="Trajectory generation returned no waypoints. {detail}",
+ effect="Motion command rejected.",
+ remedy="Check motion parameters. Start and end may be too close.",
+ ),
+ ErrorCode.TRAJ_NO_STEPS: _ErrorTemplate(
+ title="Trajectory: no steps",
+ cause="Trajectory calculation produced zero steps. {detail}",
+ effect="Motion command rejected.",
+ remedy="Increase duration or reduce speed fraction.",
+ ),
+ # -- Motion execution --
+ ErrorCode.MOTN_HOME_TIMEOUT: _ErrorTemplate(
+ title="Homing timeout",
+ cause="Robot did not start homing sequence within timeout.",
+ effect="Home command aborted.",
+ remedy="Check serial connection and robot power. Ensure E-stop is released.",
+ ),
+ ErrorCode.MOTN_GRIPPER_TIMEOUT: _ErrorTemplate(
+ title="Gripper timeout",
+ cause="Gripper command timed out in state {state}.",
+ effect="Gripper command aborted.",
+ remedy="Check gripper connection and calibration.",
+ ),
+ ErrorCode.MOTN_GRIPPER_UNKNOWN: _ErrorTemplate(
+ title="Gripper: unknown state",
+ cause="Gripper entered an unknown internal state.",
+ effect="Gripper command aborted.",
+ remedy="Reset the controller and recalibrate the gripper.",
+ ),
+ ErrorCode.MOTN_SETUP_FAILED: _ErrorTemplate(
+ title="Command setup failed",
+ cause="Command could not be initialized. {detail}",
+ effect="Command rejected. Pipeline halted.",
+ remedy="Check command parameters and robot state.",
+ ),
+ ErrorCode.MOTN_TICK_FAILED: _ErrorTemplate(
+ title="Command execution error",
+ cause="Unexpected error during execution. {detail}",
+ effect="Command aborted. Robot stopped.",
+ remedy="Check robot state. May need to re-home.",
+ ),
+ # -- Communication --
+ ErrorCode.COMM_QUEUE_FULL: _ErrorTemplate(
+ title="Command queue full",
+ cause="Motion queue at maximum capacity.",
+ effect="Command rejected.",
+ remedy="Wait for current motions to complete.",
+ ),
+ ErrorCode.COMM_UNKNOWN_COMMAND: _ErrorTemplate(
+ title="Unknown command",
+ cause="No handler for received command type.",
+ effect="Command ignored.",
+ remedy="Check client library version matches server.",
+ ),
+ ErrorCode.COMM_DECODE_ERROR: _ErrorTemplate(
+ title="Command decode error",
+ cause="Failed to decode command. {detail}",
+ effect="Command ignored.",
+ remedy="Check command encoding. Possible version mismatch.",
+ ),
+ ErrorCode.COMM_VALIDATION_ERROR: _ErrorTemplate(
+ title="Command validation error",
+ cause="Invalid parameters. {detail}",
+ effect="Command rejected.",
+ remedy="Check parameter ranges and types.",
+ ),
+ # -- System / safety --
+ ErrorCode.SYS_CONTROLLER_DISABLED: _ErrorTemplate(
+ title="Controller disabled",
+ cause="Motion command sent while controller is disabled. {detail}",
+ effect="Command rejected.",
+ remedy="Call resume() to re-enable the controller.",
+ ),
+ ErrorCode.SYS_ESTOP_ACTIVE: _ErrorTemplate(
+ title="E-stop active",
+ cause="Emergency stop is currently engaged.",
+ effect="All motion halted. Queue cleared.",
+ remedy="Release the E-stop button and call resume().",
+ ),
+ ErrorCode.SYS_PORT_SAVE_FAILED: _ErrorTemplate(
+ title="Serial port save failed",
+ cause="Could not save serial port configuration.",
+ effect="Port may not persist across restarts.",
+ remedy="Check file permissions and disk space.",
+ ),
+ ErrorCode.SYS_PROFILE_INVALID: _ErrorTemplate(
+ title="Invalid motion profile",
+ cause="Unrecognized motion profile: {detail}",
+ effect="Profile not changed.",
+ remedy="Use one of: TOPPRA, RUCKIG, QUINTIC, TRAPEZOID, LINEAR.",
+ ),
+}
+
+
+def make_error(
+ code: ErrorCode, command_index: int = -1, **params: object
+) -> RobotError:
+ """Create a RobotError from the catalog, formatting placeholders in title/cause."""
+ tmpl = _CATALOG[code]
+ return RobotError(
+ command_index=command_index,
+ code=int(code),
+ title=tmpl.title.format_map(params) if params else tmpl.title,
+ cause=tmpl.cause.format_map(params) if params else tmpl.cause,
+ effect=tmpl.effect,
+ remedy=tmpl.remedy,
+ )
+
+
+def extract_robot_error(
+ exc: Exception, fallback_code: ErrorCode, command_index: int = -1, **params: object
+) -> RobotError:
+ """Extract a RobotError from an exception, falling back to a catalog error.
+
+ If the exception carries a ``robot_error`` attribute (e.g. IKError,
+ TrajectoryPlanningError), return it directly. Otherwise, construct a
+ new RobotError from the catalog using *fallback_code* and *params*.
+ """
+ robot_error: RobotError | None = getattr(exc, "robot_error", None)
+ if robot_error is not None:
+ return robot_error
+ return make_error(fallback_code, command_index, **params)
diff --git a/parol6/utils/error_codes.py b/parol6/utils/error_codes.py
new file mode 100644
index 0000000..5e32b39
--- /dev/null
+++ b/parol6/utils/error_codes.py
@@ -0,0 +1,40 @@
+"""Error codes for structured robot error reporting.
+
+Subsystem ranges (10 codes each):
+ 10-19 IK (inverse kinematics)
+ 20-29 Trajectory planning
+ 30-39 Motion execution
+ 40-49 Communication / protocol
+ 50-59 System / safety
+"""
+
+from enum import IntEnum
+
+
+class ErrorCode(IntEnum):
+ # IK subsystem
+ IK_TARGET_UNREACHABLE = 10
+ IK_PARTIAL_PATH = 11
+
+ # Trajectory planning
+ TRAJ_EMPTY_RESULT = 20
+ TRAJ_NO_STEPS = 21
+
+ # Motion execution
+ MOTN_HOME_TIMEOUT = 30
+ MOTN_GRIPPER_TIMEOUT = 31
+ MOTN_GRIPPER_UNKNOWN = 32
+ MOTN_SETUP_FAILED = 33
+ MOTN_TICK_FAILED = 34
+
+ # Communication / protocol
+ COMM_QUEUE_FULL = 40
+ COMM_UNKNOWN_COMMAND = 41
+ COMM_DECODE_ERROR = 42
+ COMM_VALIDATION_ERROR = 43
+
+ # System / safety
+ SYS_CONTROLLER_DISABLED = 50
+ SYS_ESTOP_ACTIVE = 51
+ SYS_PORT_SAVE_FAILED = 52
+ SYS_PROFILE_INVALID = 53
diff --git a/parol6/utils/errors.py b/parol6/utils/errors.py
index d5b297d..506670b 100644
--- a/parol6/utils/errors.py
+++ b/parol6/utils/errors.py
@@ -3,24 +3,37 @@
Keep this focused and non-redundant; prefer built-ins where appropriate.
"""
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from .error_catalog import RobotError
+
class IKError(RuntimeError):
"""Inverse kinematics failure (no solution, constraints violated, etc.)."""
- def __init__(self, message: str):
- self.original_message = message
- super().__init__(f"IK ERROR: {message}")
-
- def __str__(self):
- return f"IK ERROR: {self.original_message}"
+ def __init__(self, robot_error: RobotError):
+ self.robot_error = robot_error
+ super().__init__(str(robot_error))
class TrajectoryPlanningError(RuntimeError):
"""Trajectory generation/planning failure."""
- def __init__(self, message: str):
- self.original_message = message
- super().__init__(f"Trajectory Planning Error: {message}")
+ def __init__(self, robot_error: RobotError):
+ self.robot_error = robot_error
+ super().__init__(str(robot_error))
+
+
+class MotionError(RuntimeError):
+ """Pipeline planning/execution error detected via status broadcast."""
+
+ def __init__(self, robot_error: RobotError):
+ self.robot_error = robot_error
+ super().__init__(str(robot_error))
- def __str__(self):
- return f"Trajectory Planning Error: {self.original_message}"
+ @property
+ def command_index(self) -> int:
+ return self.robot_error.command_index
diff --git a/parol6/utils/frames.py b/parol6/utils/frames.py
deleted file mode 100644
index 217f4f7..0000000
--- a/parol6/utils/frames.py
+++ /dev/null
@@ -1,223 +0,0 @@
-"""
-Shared TRF/WRF transformation utilities used by commands.
-
-These helpers convert tool/frame-relative inputs (TRF) into world reference frame (WRF)
-using the current tool pose derived from the robot's forward kinematics.
-"""
-
-from __future__ import annotations
-
-import logging
-from collections.abc import Sequence
-from typing import Any, cast
-
-import numpy as np
-from numpy.typing import NDArray
-from spatialmath import SE3
-
-from parol6.server.state import get_fkine_se3
-
-logger = logging.getLogger(__name__)
-
-# Constants for TRF plane normal vectors
-PLANE_NORMALS_TRF: dict[str, NDArray] = {
- "XY": np.array([0.0, 0.0, 1.0]), # Tool's Z-axis
- "XZ": np.array([0.0, 1.0, 0.0]), # Tool's Y-axis
- "YZ": np.array([1.0, 0.0, 0.0]), # Tool's X-axis
-}
-
-
-def point_trf_to_wrf_mm(point_mm: Sequence[float], tool_pose: SE3) -> list[float]:
- """Convert 3D point from TRF to WRF coordinates (both in mm)."""
- point_trf = SE3(point_mm[0] / 1000.0, point_mm[1] / 1000.0, point_mm[2] / 1000.0)
- point_wrf = cast(SE3, tool_pose * point_trf)
- return (point_wrf.t * 1000.0).tolist()
-
-
-def pose6_trf_to_wrf(pose6_mm_deg: Sequence[float], tool_pose: SE3) -> list[float]:
- """Convert 6D pose [x,y,z,rx,ry,rz] from TRF to WRF (mm, degrees)."""
- pose_trf = SE3(
- pose6_mm_deg[0] / 1000.0, pose6_mm_deg[1] / 1000.0, pose6_mm_deg[2] / 1000.0
- ) * SE3.RPY(pose6_mm_deg[3:], unit="deg", order="xyz")
- pose_wrf = cast(SE3, tool_pose * pose_trf)
- return np.concatenate(
- [pose_wrf.t * 1000.0, pose_wrf.rpy(unit="deg", order="xyz")]
- ).tolist()
-
-
-def se3_to_pose6_mm_deg(T: SE3) -> list[float]:
- """Convert SE3 transform to 6D pose [x,y,z,rx,ry,rz] (mm, degrees)."""
- return np.concatenate([T.t * 1000.0, T.rpy(unit="deg", order="xyz")]).tolist()
-
-
-def transform_center_trf_to_wrf(
- params: dict[str, Any], tool_pose: SE3, transformed: dict[str, Any]
-) -> None:
- """Transform 'center' parameter from TRF (mm) to WRF (mm) using tool_pose."""
- center_trf = SE3(
- params["center"][0] / 1000.0,
- params["center"][1] / 1000.0,
- params["center"][2] / 1000.0,
- )
- center_wrf = cast(SE3, tool_pose * center_trf)
- transformed["center"] = (center_wrf.t * 1000.0).tolist()
-
-
-def transform_start_pose_if_needed(
- start_pose: Sequence[float] | None, frame: str, current_position_in: np.ndarray
-) -> list[float] | None:
- """Transform start_pose from TRF to WRF if needed."""
- if frame == "TRF" and start_pose:
- tool_pose = get_fkine_se3()
- return pose6_trf_to_wrf(start_pose, tool_pose)
- return (
- np.asarray(start_pose, dtype=float).tolist() if start_pose is not None else None
- )
-
-
-def transform_command_params_to_wrf(
- command_type: str,
- params: dict[str, Any],
- frame: str,
- current_position_in: np.ndarray,
-) -> dict[str, Any]:
- """
- Transform command parameters from TRF to WRF.
- Handles position, orientation, and directional vectors correctly.
- """
- if frame == "WRF":
- return params
-
- # Get current tool pose
- tool_pose = get_fkine_se3()
-
- transformed = params.copy()
-
- # SMOOTH_CIRCLE - Transform center and plane normal
- if command_type == "SMOOTH_CIRCLE":
- if "center" in params:
- transform_center_trf_to_wrf(params, tool_pose, transformed)
-
- if "plane" in params:
- normal_trf = PLANE_NORMALS_TRF[params["plane"]]
- normal_wrf = tool_pose.R @ normal_trf
- transformed["normal_vector"] = normal_wrf.tolist()
- logger.info(f" -> TRF circle plane {params['plane']} transformed to WRF")
-
- # SMOOTH_ARC_CENTER - Transform center, end_pose, and implied plane
- elif command_type == "SMOOTH_ARC_CENTER":
- if "center" in params:
- transform_center_trf_to_wrf(params, tool_pose, transformed)
-
- if "end_pose" in params:
- transformed["end_pose"] = pose6_trf_to_wrf(params["end_pose"], tool_pose)
-
- if "plane" in params:
- normal_trf = PLANE_NORMALS_TRF[params["plane"]]
- normal_wrf = tool_pose.R @ normal_trf
- transformed["normal_vector"] = normal_wrf.tolist()
-
- # SMOOTH_ARC_PARAM - Transform end_pose and arc plane
- elif command_type == "SMOOTH_ARC_PARAM":
- if "end_pose" in params:
- transformed["end_pose"] = pose6_trf_to_wrf(params["end_pose"], tool_pose)
-
- # For parametric arc, the plane is usually XY of the tool
- if "plane" not in params:
- params["plane"] = "XY" # Default to XY plane
-
- normal_trf = PLANE_NORMALS_TRF[params.get("plane", "XY")]
- normal_wrf = tool_pose.R @ normal_trf
- transformed["normal_vector"] = normal_wrf.tolist()
-
- # SMOOTH_HELIX - Transform center and helix axis
- elif command_type == "SMOOTH_HELIX":
- if "center" in params:
- transform_center_trf_to_wrf(params, tool_pose, transformed)
-
- # Transform helix axis (default is Z-axis of tool)
- axis_trf = np.array([0.0, 0.0, 1.0]) # Tool's Z-axis
- axis_wrf = tool_pose.R @ axis_trf
- transformed["helix_axis"] = axis_wrf.tolist()
-
- # Transform up vector (default is Y-axis of tool)
- up_trf = np.array([0.0, 1.0, 0.0]) # Tool's Y-axis
- up_wrf = tool_pose.R @ up_trf
- transformed["up_vector"] = up_wrf.tolist()
-
- # SMOOTH_SPLINE - Transform waypoints
- elif command_type == "SMOOTH_SPLINE":
- if "waypoints" in params:
- transformed_waypoints = []
- for wp in params["waypoints"]:
- transformed_waypoints.append(pose6_trf_to_wrf(wp, tool_pose))
- transformed["waypoints"] = transformed_waypoints
-
- # SMOOTH_BLEND - Transform all segment definitions
- elif command_type == "SMOOTH_BLEND":
- if "segments" in params:
- transformed_segments = []
- for seg in params["segments"]:
- seg_transformed = seg.copy()
-
- # Transform based on segment type
- if seg["type"] == "LINE":
- if "end" in seg:
- seg_transformed["end"] = pose6_trf_to_wrf(seg["end"], tool_pose)
-
- elif seg["type"] == "ARC":
- if "end" in seg:
- seg_transformed["end"] = pose6_trf_to_wrf(seg["end"], tool_pose)
-
- if "center" in seg:
- # Create a temporary params dict to use the helper
- seg_params = {"center": seg["center"]}
- transform_center_trf_to_wrf(
- seg_params, tool_pose, seg_transformed
- )
-
- # Transform plane normal if specified
- if "plane" in seg:
- normal_trf = PLANE_NORMALS_TRF[seg["plane"]]
- normal_wrf = tool_pose.R @ normal_trf
- seg_transformed["normal_vector"] = normal_wrf.tolist()
-
- elif seg["type"] == "CIRCLE":
- if "center" in seg:
- # Create a temporary params dict to use the helper
- seg_params = {"center": seg["center"]}
- transform_center_trf_to_wrf(
- seg_params, tool_pose, seg_transformed
- )
-
- if "plane" in seg:
- normal_trf = PLANE_NORMALS_TRF[seg["plane"]]
- normal_wrf = tool_pose.R @ normal_trf
- seg_transformed["normal_vector"] = normal_wrf.tolist()
-
- elif seg["type"] == "SPLINE":
- if "waypoints" in seg:
- transformed_wps = []
- for wp in seg["waypoints"]:
- transformed_wps.append(pose6_trf_to_wrf(wp, tool_pose))
- seg_transformed["waypoints"] = transformed_wps
-
- transformed_segments.append(seg_transformed)
- transformed["segments"] = transformed_segments
-
- # Generic transformations for any command with these parameters
- if "start_pose" in params:
- transformed["start_pose"] = pose6_trf_to_wrf(params["start_pose"], tool_pose)
-
- return transformed
-
-
-__all__ = [
- "PLANE_NORMALS_TRF",
- "point_trf_to_wrf_mm",
- "pose6_trf_to_wrf",
- "se3_to_pose6_mm_deg",
- "transform_center_trf_to_wrf",
- "transform_start_pose_if_needed",
- "transform_command_params_to_wrf",
-]
diff --git a/parol6/utils/ik.py b/parol6/utils/ik.py
index ca45e28..cbe5808 100644
--- a/parol6/utils/ik.py
+++ b/parol6/utils/ik.py
@@ -3,174 +3,353 @@
Shared functions used by multiple command classes for inverse kinematics calculations.
"""
+import atexit
import logging
-from collections.abc import Sequence
-from typing import NamedTuple
+import time
+from dataclasses import dataclass
import numpy as np
-from numpy.typing import NDArray
-from roboticstoolbox import DHRobot
-from spatialmath import SE3
+from numba import njit
+from numpy.typing import ArrayLike, NDArray
+from pinokin import Damping as _Damping, IKSolver as _IKSolver, Robot
import parol6.PAROL6_ROBOT as PAROL6_ROBOT
+from parol6.config import IK_SAFETY_MARGINS_RAD
logger = logging.getLogger(__name__)
-# This dictionary maps descriptive axis names to movement vectors
-# Format: ([x, y, z], [rx, ry, rz])
-AXIS_MAP = {
- "X+": ([1, 0, 0], [0, 0, 0]),
- "X-": ([-1, 0, 0], [0, 0, 0]),
- "Y+": ([0, 1, 0], [0, 0, 0]),
- "Y-": ([0, -1, 0], [0, 0, 0]),
- "Z+": ([0, 0, 1], [0, 0, 0]),
- "Z-": ([0, 0, -1], [0, 0, 0]),
- "RX+": ([0, 0, 0], [1, 0, 0]),
- "RX-": ([0, 0, 0], [-1, 0, 0]),
- "RY+": ([0, 0, 0], [0, 1, 0]),
- "RY-": ([0, 0, 0], [0, -1, 0]),
- "RZ+": ([0, 0, 0], [0, 0, 1]),
- "RZ-": ([0, 0, 0], [0, 0, -1]),
-}
-
-
-def unwrap_angles(
- q_solution: Sequence[float] | NDArray[np.float64],
- q_current: Sequence[float] | NDArray[np.float64],
-) -> NDArray[np.float64]:
- """
- Vectorized unwrap: bring solution angles near current by adding/subtracting 2*pi.
- This minimizes joint motion between consecutive configurations.
- """
- qs = np.asarray(q_solution, dtype=float)
- qc = np.asarray(q_current, dtype=float)
- diff = qs - qc
- q_unwrapped = qs.copy()
- q_unwrapped[diff > np.pi] -= 2 * np.pi
- q_unwrapped[diff < -np.pi] += 2 * np.pi
- return q_unwrapped
+class RateLimitedWarning:
+ """Rate-limited warning logger to avoid spam on hot paths."""
+
+ __slots__ = ("_interval", "_last")
+
+ def __init__(self, interval: float = 1.0):
+ self._interval = interval
+ self._last: float = 0.0
+
+ def __call__(self, log: logging.Logger, msg: str, *args: object) -> None:
+ now = time.monotonic()
+ if now - self._last > self._interval:
+ log.warning(msg, *args)
+ self._last = now
+
+
+_ik_warn = RateLimitedWarning()
-class IKResult(NamedTuple):
+
+@dataclass
+class SolveIKResult:
+ """IK result with safety violation tracking."""
+
+ q: NDArray[np.float64]
success: bool
- q: NDArray[np.float64] | None
iterations: int
residual: float
- violations: str | None
+ violations: str | None = None
+
+
+# Cached solver state — invalidated when robot instance changes (e.g., tool change)
+_cached_robot_id: int = -1
+_cached_qlim: NDArray[np.float64] | None = None
+_cached_solver: _IKSolver | None = None
+_cached_buffered_min: NDArray[np.float64] | None = None
+_cached_buffered_max: NDArray[np.float64] | None = None
+_cached_qlim_min: NDArray[np.float64] | None = None
+_cached_qlim_max: NDArray[np.float64] | None = None
+_cached_result: SolveIKResult | None = None
+
+
+@atexit.register
+def _cleanup_ik_cache() -> None:
+ global _cached_solver
+ _cached_solver = None
+
+
+def _ensure_cache(robot: Robot) -> None:
+ """Rebuild cache if robot instance changed."""
+ global _cached_robot_id, _cached_qlim, _cached_solver
+ global _cached_buffered_min, _cached_buffered_max, _cached_result
+ global _cached_qlim_min, _cached_qlim_max
+
+ robot_id = id(robot)
+ if _cached_robot_id == robot_id:
+ return
+
+ qlim = robot.qlim
+ _cached_robot_id = robot_id
+ _cached_qlim = qlim
+ _cached_qlim_min = np.ascontiguousarray(qlim[0, :])
+ _cached_qlim_max = np.ascontiguousarray(qlim[1, :])
+ _cached_solver = _IKSolver(
+ robot,
+ damping=_Damping.Sugihara,
+ tol=1e-12,
+ lm_lambda=0.0,
+ max_iter=10,
+ max_restarts=10,
+ )
+ _cached_buffered_min = qlim[0, :] + IK_SAFETY_MARGINS_RAD[:, 0]
+ _cached_buffered_max = qlim[1, :] - IK_SAFETY_MARGINS_RAD[:, 1]
+ _cached_result = SolveIKResult(
+ q=np.zeros(qlim.shape[1], dtype=np.float64),
+ success=False,
+ iterations=0,
+ residual=0.0,
+ )
+
+
+@njit(cache=True)
+def _ik_safety_check(
+ q: NDArray[np.float64],
+ cq: NDArray[np.float64],
+ buffered_min: NDArray[np.float64],
+ buffered_max: NDArray[np.float64],
+ qlim_min: NDArray[np.float64],
+ qlim_max: NDArray[np.float64],
+) -> tuple[bool, bool, int]:
+ """
+ JIT-compiled IK safety validation (zero-allocation scalar loop).
+ Returns (ok, is_recovery_violation, violation_idx).
+ """
+ recovery_idx = -1
+ safety_idx = -1
+ for i in range(q.shape[0]):
+ in_danger = cq[i] < buffered_min[i] or cq[i] > buffered_max[i]
+ if in_danger:
+ d_old = min(abs(cq[i] - qlim_min[i]), abs(cq[i] - qlim_max[i]))
+ d_new = min(abs(q[i] - qlim_min[i]), abs(q[i] - qlim_max[i]))
+ if d_new < d_old and recovery_idx < 0:
+ recovery_idx = i
+ else:
+ if (q[i] < buffered_min[i] or q[i] > buffered_max[i]) and safety_idx < 0:
+ safety_idx = i
+ if recovery_idx >= 0:
+ return False, True, recovery_idx
+ if safety_idx >= 0:
+ return False, False, safety_idx
+ return True, False, -1
def solve_ik(
- robot: DHRobot,
- target_pose: SE3,
- current_q: Sequence[float] | NDArray[np.float64],
- jogging: bool = False,
- safety_margin_rad: float = 0.03,
+ robot: Robot,
+ target_pose: NDArray[np.float64],
+ current_q: NDArray[np.float64],
quiet_logging: bool = False,
-) -> IKResult:
+) -> SolveIKResult:
"""
- IK solver
+ IK solver with per-joint safety margins.
+
+ Per-joint safety margins (IK_SAFETY_MARGINS_RAD) are always enforced.
+ Joints like J3 (elbow) have larger margins because backwards bend creates
+ trap configurations that are hard to recover from.
+
+ Returns shared result -- copy result.q if storing across calls.
Parameters
----------
- robot : DHRobot
- Robot model
- target_pose : SE3
- Target pose to reach
- current_q : array_like
+ robot : Robot
+ pinokin Robot model
+ target_pose : NDArray[np.float64]
+ Target pose as 4x4 SE3 transformation matrix
+ current_q : NDArray[np.float64]
Current joint configuration in radians
- jogging : bool, optional
- If True, use very strict tolerance for jogging (default: False)
- safety_margin_rad : float, optional
- Buffer distance (radians) from joint limits (default: 0.03)
+ quiet_logging : bool, optional
+ If True, suppress warning logs (default: False)
Returns
-------
- IKResult
+ SolveIKResult
success - True if solution found
- q - Joint configuration in radians (or None if failed)
+ q - Joint configuration in radians
iterations - Number of iterations used
residual - Final error value
- tolerance_used - Tolerance used for convergence
violations - Error message if failed, None if successful
"""
- cq: NDArray[np.float64] = np.asarray(current_q, dtype=np.float64)
- result = robot.ets().ik_LM(
- target_pose, q0=cq, tol=1e-10, joint_limits=True, k=0.0, method="sugihara"
- )
- q = result[0]
- success = result[1] > 0
- iterations = result[2]
- remaining = result[3]
-
- violations = None
-
- if success and jogging:
- # Vectorized safety validation with recovery support
- qlim = robot.qlim
- buffered_min = qlim[0, :] + safety_margin_rad
- buffered_max = qlim[1, :] - safety_margin_rad
-
- # Check which joints were in danger zone (beyond buffer)
- in_danger_old = (current_q < buffered_min) | (current_q > buffered_max)
-
- # Compute distance from nearest limit for all joints
- dist_old_lower = np.abs(current_q - qlim[0, :])
- dist_old_upper = np.abs(current_q - qlim[1, :])
- dist_old = np.minimum(dist_old_lower, dist_old_upper)
-
- dist_new_lower = np.abs(q - qlim[0, :])
- dist_new_upper = np.abs(q - qlim[1, :])
- dist_new = np.minimum(dist_new_lower, dist_new_upper)
-
- # Check for recovery violations (was in danger, moved closer to limit)
- recovery_violations = in_danger_old & (dist_new < dist_old)
-
- # Check for safety violations (was safe, left buffer zone)
- in_danger_new = (q < buffered_min) | (q > buffered_max)
- safety_violations = (~in_danger_old) & in_danger_new
-
- # Report first violation found
- if np.any(recovery_violations):
- idx = np.argmax(recovery_violations)
- success = False
- violations = (
- f"J{idx + 1} moving further into danger zone (recovery blocked)"
- )
- if not quiet_logging:
- logger.warning(violations)
- elif np.any(safety_violations):
- idx = np.argmax(safety_violations)
- success = False
- violations = f"J{idx + 1} would leave safe zone (buffer violated)"
- if not quiet_logging:
- logger.warning(violations)
+ _ensure_cache(robot)
+ assert _cached_solver is not None
+ assert _cached_result is not None
+ solver = _cached_solver
+ result = _cached_result
+
+ solver.solve(target_pose, q0=current_q)
- if success:
- # Valid solution - apply unwrapping to minimize joint motion
- q_unwrapped = unwrap_angles(q, current_q)
+ # Write into pre-allocated result buffer (zero-allocation)
+ result.q[:] = solver.q
+ result.success = solver.success
+ result.iterations = solver.iterations
+ result.residual = solver.residual
+ result.violations = None
- # Verify unwrapped solution still within actual limits
- within_limits = PAROL6_ROBOT.check_limits(
- current_q, q_unwrapped, allow_recovery=True, log=not quiet_logging
+ if result.success:
+ # JIT-compiled safety validation
+ ok, is_recovery, idx = _ik_safety_check(
+ result.q,
+ current_q,
+ _cached_buffered_min,
+ _cached_buffered_max,
+ _cached_qlim_min,
+ _cached_qlim_max,
)
+ if not ok:
+ result.success = False
+ if is_recovery:
+ result.violations = (
+ f"J{idx + 1} moving further into danger zone (recovery blocked)"
+ )
+ else:
+ result.violations = (
+ f"J{idx + 1} would leave safe zone (buffer violated)"
+ )
+ if not quiet_logging:
+ _ik_warn(logger, result.violations)
- if within_limits:
- q = q_unwrapped
- # else: use original result.q which already passed library's limit check
+ if result.success:
+ if not check_limits(
+ current_q, result.q, allow_recovery=True, log=not quiet_logging
+ ):
+ result.success = False
else:
- violations = "IK failed to solve."
- return IKResult(
- success=success,
- q=q if success else None,
- iterations=iterations,
- residual=remaining,
- violations=violations,
- )
+ if result.violations is None:
+ result.violations = "IK failed to solve."
+
+ return result
-def quintic_scaling(s: float) -> float:
+# -----------------------------
+# Fast, vectorized limit checking with edge-triggered logging
+# -----------------------------
+# Pre-allocated buffers for check_limits (avoid per-call allocation)
+_cl_viol = np.zeros(6, dtype=np.bool_)
+_cl_below = np.zeros(6, dtype=np.bool_)
+_cl_above = np.zeros(6, dtype=np.bool_)
+_cl_cur_viol = np.zeros(6, dtype=np.bool_)
+_cl_t_below = np.zeros(6, dtype=np.bool_)
+_cl_t_above = np.zeros(6, dtype=np.bool_)
+_cl_dummy_target = np.zeros(6, dtype=np.float64)
+_cl_mn = np.ascontiguousarray(PAROL6_ROBOT._joint_limits_radian[:, 0])
+_cl_mx = np.ascontiguousarray(PAROL6_ROBOT._joint_limits_radian[:, 1])
+_last_violation_mask = np.zeros(6, dtype=np.bool_)
+_last_any_violation = False
+
+
+@njit(cache=True)
+def _check_limits_core(
+ q_arr: NDArray[np.float64],
+ t_arr: NDArray[np.float64],
+ mn: NDArray[np.float64],
+ mx: NDArray[np.float64],
+ allow_recovery: bool,
+ has_target: bool,
+ viol_out: NDArray[np.bool_],
+ below_out: NDArray[np.bool_],
+ above_out: NDArray[np.bool_],
+ cur_viol_out: NDArray[np.bool_],
+ t_below_out: NDArray[np.bool_],
+ t_above_out: NDArray[np.bool_],
+) -> bool:
+ """JIT-compiled core of check_limits. Writes masks to output buffers."""
+ n = q_arr.shape[0]
+ for i in range(n):
+ below_out[i] = q_arr[i] < mn[i]
+ above_out[i] = q_arr[i] > mx[i]
+ cur_viol_out[i] = below_out[i] or above_out[i]
+
+ if not has_target:
+ all_ok = True
+ for i in range(n):
+ viol_out[i] = cur_viol_out[i]
+ if viol_out[i]:
+ all_ok = False
+ return all_ok
+
+ for i in range(n):
+ t_below_out[i] = t_arr[i] < mn[i]
+ t_above_out[i] = t_arr[i] > mx[i]
+
+ all_ok = True
+ for i in range(n):
+ t_viol = t_below_out[i] or t_above_out[i]
+ if allow_recovery:
+ rec_ok = (above_out[i] and t_arr[i] <= q_arr[i]) or (
+ below_out[i] and t_arr[i] >= q_arr[i]
+ )
+ else:
+ rec_ok = False
+ ok = (not cur_viol_out[i] and not t_viol) or (cur_viol_out[i] and rec_ok)
+ viol_out[i] = not ok
+ if not ok:
+ all_ok = False
+
+ return all_ok
+
+
+def check_limits(
+ q: ArrayLike,
+ target_q: ArrayLike | None = None,
+ allow_recovery: bool = True,
+ *,
+ log: bool = True,
+) -> bool:
"""
- Calculates a smooth 0-to-1 scaling factor for progress 's'
- using a quintic polynomial, ensuring smooth start/end accelerations.
+ Vectorized limits check in radians.
+ - q: current joint angles in radians (array-like)
+ - target_q: optional target joint angles in radians (array-like)
+ - allow_recovery: allow movement that heads back toward valid range if currently violating
+ - log: emit edge-triggered warning/info logs on violation state changes
+
+ Returns True if move is allowed (within limits or valid recovery), False otherwise.
"""
- return 6 * (s**5) - 15 * (s**4) + 10 * (s**3)
+ global _last_any_violation
+
+ q_arr = np.asarray(q, dtype=np.float64).reshape(-1)
+ has_target = target_q is not None
+ t_arr = (
+ np.asarray(target_q, dtype=np.float64).reshape(-1)
+ if has_target
+ else _cl_dummy_target
+ )
+
+ all_ok = _check_limits_core(
+ q_arr,
+ t_arr,
+ _cl_mn,
+ _cl_mx,
+ allow_recovery,
+ has_target,
+ _cl_viol,
+ _cl_below,
+ _cl_above,
+ _cl_cur_viol,
+ _cl_t_below,
+ _cl_t_above,
+ )
+
+ if log:
+ any_viol = not all_ok
+
+ # Edge-triggered violation logs
+ if any_viol and (
+ np.any(_cl_viol != _last_violation_mask) or not _last_any_violation
+ ):
+ idxs = np.where(_cl_viol)[0]
+ tokens = []
+ for i in idxs:
+ if _cl_cur_viol[i]:
+ tokens.append(
+ f"J{i + 1}:" + ("curmax")
+ )
+ elif has_target and _cl_t_below[i]:
+ tokens.append(f"J{i + 1}:targetmax")
+ else:
+ tokens.append(f"J{i + 1}:violation")
+ logger.warning("LIMIT VIOLATION: %s", " ".join(tokens))
+ elif (not any_viol) and _last_any_violation:
+ logger.info("Limits back in range")
+
+ _last_violation_mask[:] = _cl_viol
+ _last_any_violation = any_viol
+
+ return all_ok
diff --git a/parol6/utils/trajectory.py b/parol6/utils/trajectory.py
deleted file mode 100644
index ef75193..0000000
--- a/parol6/utils/trajectory.py
+++ /dev/null
@@ -1,172 +0,0 @@
-"""
-Shared trajectory planning utilities.
-"""
-
-from collections.abc import Sequence
-
-import numpy as np
-
-from parol6.config import CONTROL_RATE_HZ
-
-
-def _samples_for_duration(duration: float, sample_rate: float) -> int:
- if duration <= 0:
- return 2
- n = int(round(duration * sample_rate)) + 1
- return max(2, n)
-
-
-def plan_linear_quintic(
- start: Sequence[float],
- end: Sequence[float],
- duration: float,
- sample_rate: float | None = None,
-) -> np.ndarray:
- """
- Quintic time-scaling with zero velocity and acceleration at endpoints.
- Applies the scalar blend S(τ) = 10τ^3 - 15τ^4 + 6τ^5 to each dimension.
-
- Returns: array of shape (N, D)
- """
- sr = CONTROL_RATE_HZ if sample_rate is None else float(sample_rate)
- start_arr = np.asarray(start, dtype=float)
- end_arr = np.asarray(end, dtype=float)
- if start_arr.shape != end_arr.shape:
- raise ValueError("start and end must have the same shape")
-
- if duration <= 0:
- return np.vstack([start_arr, end_arr])
-
- n = _samples_for_duration(duration, sr)
- tau = np.linspace(0.0, 1.0, n)
- s = 10 * tau**3 - 15 * tau**4 + 6 * tau**5 # [0..1]
- s = s.reshape(-1, 1)
- delta = (end_arr - start_arr).reshape(1, -1)
- traj = start_arr.reshape(1, -1) + s * delta
- return traj
-
-
-def plan_linear_cubic(
- start: Sequence[float],
- end: Sequence[float],
- duration: float,
- sample_rate: float | None = None,
-) -> np.ndarray:
- """
- Cubic time-scaling with zero velocity at endpoints.
- Applies S(τ) = 3τ^2 - 2τ^3 to each dimension.
-
- Returns: array of shape (N, D)
- """
- sr = CONTROL_RATE_HZ if sample_rate is None else float(sample_rate)
- start_arr = np.asarray(start, dtype=float)
- end_arr = np.asarray(end, dtype=float)
- if start_arr.shape != end_arr.shape:
- raise ValueError("start and end must have the same shape")
-
- if duration <= 0:
- return np.vstack([start_arr, end_arr])
-
- n = _samples_for_duration(duration, sr)
- tau = np.linspace(0.0, 1.0, n)
- s = 3 * tau**2 - 2 * tau**3
- s = s.reshape(-1, 1)
- delta = (end_arr - start_arr).reshape(1, -1)
- traj = start_arr.reshape(1, -1) + s * delta
- return traj
-
-
-def _trapezoid_timings(
- distance: float, v_max: float, a_max: float
-) -> tuple[float, float, float, float, bool]:
- """
- Compute trapezoid or triangular profile timing.
-
- Returns: (T, t_a, t_c, v_peak, triangular)
- - T: total time
- - t_a: accel time
- - t_c: constant velocity time (0 for triangular)
- - v_peak: peak velocity reached
- - triangular: True if triangular profile (no cruise), else False
- """
- if distance <= 0 or v_max <= 0 or a_max <= 0:
- return 0.0, 0.0, 0.0, 0.0, True
-
- t_a = v_max / a_max
- s_a = 0.5 * a_max * t_a**2 # distance covered during accel
-
- if 2 * s_a < distance:
- # Trapezoidal: accel, cruise, decel
- s_c = distance - 2 * s_a
- t_c = s_c / v_max
- T = 2 * t_a + t_c
- return T, t_a, t_c, v_max, False
- else:
- # Triangular: peak velocity determined by distance
- v_peak = np.sqrt(a_max * distance)
- t_a = v_peak / a_max
- T = 2 * t_a
- return T, t_a, 0.0, v_peak, True
-
-
-def plan_trapezoid_position_1d(
- start: float,
- end: float,
- v_max: float,
- a_max: float,
- sample_rate: float | None = None,
-) -> np.ndarray:
- """
- Generate 1D position samples following a trapezoidal (or triangular) velocity profile.
- Returns positions of shape (N,), including start and end.
-
- Notes:
- - start and end can be any floats; profile is computed along the line with correct sign.
- """
- sr = CONTROL_RATE_HZ if sample_rate is None else float(sample_rate)
- d = float(end) - float(start)
- sign = 1.0 if d >= 0 else -1.0
- L = abs(d)
-
- if L == 0 or v_max <= 0 or a_max <= 0:
- return np.array([start, end], dtype=float)
-
- T, t_a, t_c, v_peak, triangular = _trapezoid_timings(L, v_max, a_max)
- if T <= 0:
- return np.array([start, end], dtype=float)
-
- n = _samples_for_duration(T, sr)
- t = np.linspace(0.0, T, n)
-
- # Piecewise position function along positive axis (0..L)
- pos = np.zeros_like(t)
- if triangular:
- # accel then decel with peak v_peak
- for i, ti in enumerate(t):
- if ti <= t_a:
- pos[i] = 0.5 * a_max * ti**2
- else:
- # decel phase mirrored
- td = ti - t_a
- pos[i] = (0.5 * a_max * t_a**2) + v_peak * td - 0.5 * a_max * td**2
- else:
- # trapezoid: accel (0..t_a), cruise (t_a..t_a+t_c), decel (t_a+t_c..T)
- for i, ti in enumerate(t):
- if ti <= t_a:
- pos[i] = 0.5 * a_max * ti**2
- elif ti <= (t_a + t_c):
- pos[i] = (0.5 * a_max * t_a**2) + v_max * (ti - t_a)
- else:
- td = ti - (t_a + t_c)
- pos[i] = (
- (0.5 * a_max * t_a**2)
- + v_max * t_c
- + v_peak * td
- - 0.5 * a_max * td**2
- )
-
- # Clamp last sample to exact L to avoid drift
- pos[-1] = L
- # Apply direction and offset
- world_pos = float(start) + sign * pos
- return world_pos.astype(float)
diff --git a/parol6/utils/warmup.py b/parol6/utils/warmup.py
new file mode 100644
index 0000000..7a968a5
--- /dev/null
+++ b/parol6/utils/warmup.py
@@ -0,0 +1,300 @@
+"""
+JIT warmup utilities.
+
+Call warmup_jit() on startup to pre-compile all numba functions before the control loop.
+With cache=True, this is fast (~100ms) if cache exists, slower (~3-10s) on first run.
+"""
+
+import logging
+import time
+
+import numpy as np
+
+from parol6.commands.servo_commands import _max_vel_ratio_jit
+from parol6.config import (
+ deg_to_steps,
+ deg_to_steps_scalar,
+ rad_to_steps,
+ rad_to_steps_scalar,
+ speed_deg_to_steps,
+ speed_deg_to_steps_scalar,
+ speed_rad_to_steps,
+ speed_rad_to_steps_scalar,
+ speed_steps_to_deg,
+ speed_steps_to_deg_scalar,
+ speed_steps_to_rad,
+ speed_steps_to_rad_scalar,
+ steps_to_deg,
+ steps_to_deg_scalar,
+ steps_to_rad,
+ steps_to_rad_scalar,
+)
+from parol6.motion.streaming_executors import (
+ _pose_to_tangent_jit,
+ _tangent_to_pose_jit,
+)
+from parol6.protocol.wire import (
+ _pack_bitfield,
+ _pack_positions,
+ _unpack_bitfield,
+ _unpack_positions,
+ fuse_2_bytes,
+ fuse_3_bytes,
+ pack_tx_frame_into,
+ split_to_3_bytes,
+ unpack_rx_frame_into,
+)
+from parol6.server.ik_worker import (
+ _AXIS_DIRS,
+ _compute_joint_enable,
+ _compute_target_poses,
+)
+from parol6.server.status_cache import unpack_ik_response_into
+from parol6.server.loop_timer import (
+ _compute_event_rate,
+ _compute_loop_stats,
+ _compute_phase_stats,
+ _quickselect,
+ _quickselect_partition,
+)
+from parol6.server.status_cache import _update_arrays
+from parol6.server.transports.mock_serial_transport import (
+ _encode_payload_jit,
+ _simulate_gripper_ramp_jit,
+ _simulate_motion_jit,
+ _write_frame_jit,
+)
+from parol6.utils.ik import _check_limits_core, _ik_safety_check
+from pinokin import warmup_numba_se3
+
+logger = logging.getLogger(__name__)
+
+
+def warmup_jit() -> float:
+ """
+ Pre-compile all numba JIT functions by calling them with dummy data.
+
+ Returns the time taken in seconds.
+ """
+ logging.info("Warming JIT...")
+ start = time.perf_counter()
+
+ # Warm up pinokin's zero-allocation SE3/SO3 functions
+ warmup_numba_se3()
+
+ dummy_6f = np.zeros(6, dtype=np.float64)
+ dummy_6i = np.zeros(6, dtype=np.int32)
+ out_6f = np.zeros(6, dtype=np.float64)
+ out_6i = np.zeros(6, dtype=np.int32)
+
+ # parol6/config.py
+ deg_to_steps(dummy_6f, out_6i)
+ deg_to_steps_scalar(0.0, 0)
+ steps_to_deg(dummy_6i, out_6f)
+ steps_to_deg_scalar(0, 0)
+ rad_to_steps(dummy_6f, out_6i)
+ rad_to_steps_scalar(0.0, 0)
+ steps_to_rad(dummy_6i, out_6f)
+ steps_to_rad_scalar(0, 0)
+ speed_steps_to_deg(dummy_6i, out_6f)
+ speed_steps_to_deg_scalar(0.0, 0)
+ speed_deg_to_steps(dummy_6f, out_6i)
+ speed_deg_to_steps_scalar(0.0, 0)
+ speed_steps_to_rad(dummy_6i, out_6f)
+ speed_steps_to_rad_scalar(0.0, 0)
+ speed_rad_to_steps(dummy_6f, out_6i)
+ speed_rad_to_steps_scalar(0.0, 0)
+
+ # parol6/utils/ik.py
+ _ik_safety_check(dummy_6f, dummy_6f, dummy_6f, dummy_6f, dummy_6f, dummy_6f)
+ viol = np.zeros(6, dtype=np.bool_)
+ _check_limits_core(
+ dummy_6f,
+ dummy_6f,
+ dummy_6f,
+ dummy_6f,
+ True,
+ False,
+ viol,
+ viol,
+ viol,
+ viol,
+ viol,
+ viol,
+ )
+
+ # parol6/protocol/wire.py
+ dummy_pos = np.zeros(6, dtype=np.int32)
+ dummy_bits = np.zeros(8, dtype=np.uint8)
+ dummy_out = np.zeros(20, dtype=np.uint8)
+ _pack_positions(dummy_out, dummy_pos, 0)
+ _unpack_positions(dummy_out, dummy_pos)
+ _pack_bitfield(dummy_bits)
+ _unpack_bitfield(0, dummy_bits)
+ split_to_3_bytes(0)
+ fuse_3_bytes(0, 0, 0)
+ fuse_2_bytes(0, 0)
+
+ # parol6/server/status_cache.py
+ dummy_5u8 = np.zeros(5, dtype=np.uint8)
+ _update_arrays(
+ dummy_6i, # pos_in
+ dummy_5u8, # io_in
+ dummy_6i, # spd_in
+ dummy_6i, # pos_last
+ dummy_6f, # angles_deg
+ dummy_6f, # q_rad_buf
+ dummy_5u8, # io_cached
+ dummy_6i, # spd_cached
+ )
+
+ # Dummy SE3 matrices for jit warmups below
+ dummy_4x4 = np.zeros((4, 4), dtype=np.float64)
+ dummy_4x4_b = np.zeros((4, 4), dtype=np.float64)
+ dummy_4x4_out = np.zeros((4, 4), dtype=np.float64)
+
+ # parol6/server/ik_worker.py
+ dummy_qlim = np.zeros((2, 6), dtype=np.float64)
+ dummy_12u8 = np.zeros(12, dtype=np.uint8)
+ _compute_joint_enable(dummy_6f, dummy_qlim, dummy_12u8)
+ dummy_targets = np.zeros((12, 4, 4), dtype=np.float64)
+ _compute_target_poses(dummy_4x4, 0.001, 0.01, True, _AXIS_DIRS, dummy_targets)
+
+ # parol6/server/ipc - IK response unpacking
+ dummy_ik_buf = np.zeros(44, dtype=np.uint8)
+ dummy_joint_en = np.zeros(12, dtype=np.uint8)
+ dummy_cart_wrf = np.zeros(12, dtype=np.uint8)
+ dummy_cart_trf = np.zeros(12, dtype=np.uint8)
+ unpack_ik_response_into(
+ dummy_ik_buf, 0, dummy_joint_en, dummy_cart_wrf, dummy_cart_trf
+ )
+
+ # parol6/protocol/wire.py - frame packing/unpacking
+ dummy_tx_frame = memoryview(bytearray(64))
+ dummy_gripper_data = np.zeros(6, dtype=np.int32)
+ dummy_8u8_bitfield = np.zeros(8, dtype=np.uint8)
+ pack_tx_frame_into(
+ dummy_tx_frame, # out
+ dummy_6i, # position_out
+ dummy_6i, # speed_out
+ 0, # command_code
+ dummy_8u8_bitfield, # affected_joint_out (8 elements for _pack_bitfield)
+ dummy_8u8_bitfield, # inout_out (8 elements for _pack_bitfield)
+ 0, # timeout_out
+ dummy_gripper_data, # gripper_data_out
+ )
+ dummy_rx_frame = memoryview(bytearray(64))
+ dummy_8u8_homed = np.zeros(8, dtype=np.uint8)
+ dummy_8u8_io = np.zeros(8, dtype=np.uint8)
+ dummy_8u8_temp = np.zeros(8, dtype=np.uint8)
+ dummy_8u8_poserr = np.zeros(8, dtype=np.uint8)
+ dummy_timing_out = np.zeros(1, dtype=np.int32)
+ dummy_grip_out = np.zeros(6, dtype=np.int32)
+ unpack_rx_frame_into(
+ dummy_rx_frame, # data
+ dummy_6i, # pos_out
+ dummy_6i, # spd_out
+ dummy_8u8_homed, # homed_out
+ dummy_8u8_io, # io_out
+ dummy_8u8_temp, # temp_out
+ dummy_8u8_poserr, # poserr_out
+ dummy_timing_out, # timing_out
+ dummy_grip_out, # grip_out
+ )
+
+ # parol6/server/loop_timer.py - stats computation
+ dummy_1000f = np.zeros(1000, dtype=np.float64)
+ dummy_1000f_scratch = np.zeros(1000, dtype=np.float64)
+ # Fill with some data for realistic warmup
+ dummy_1000f[:100] = np.linspace(0.004, 0.006, 100)
+ _quickselect_partition(dummy_1000f_scratch[:10].copy(), 0, 9)
+ _quickselect(dummy_1000f_scratch[:100].copy(), 50)
+ _compute_phase_stats(dummy_1000f, dummy_1000f_scratch, 100)
+ _compute_loop_stats(dummy_1000f, dummy_1000f_scratch, 100)
+ _compute_event_rate(dummy_1000f, 100, 1.0, 1.0)
+
+ # parol6/server/transports/mock_serial_transport.py
+ dummy_pos_f = np.zeros(6, dtype=np.float64)
+ dummy_8u8 = np.zeros(8, dtype=np.uint8)
+ dummy_gripper_6i = np.zeros(6, dtype=np.int32)
+ _simulate_motion_jit(
+ dummy_pos_f, # position_f
+ dummy_6i, # position_in
+ dummy_6i, # speed_in
+ dummy_6i, # speed_out
+ dummy_6i, # position_out
+ dummy_8u8, # homed_in
+ dummy_8u8, # io_in
+ dummy_6f.copy(), # prev_pos_f
+ dummy_6f.copy(), # vmax_f
+ dummy_6f.copy(), # jmin_f
+ dummy_6f.copy(), # jmax_f
+ dummy_6f.copy(), # home_angles_deg
+ 0, # command_out
+ 0.004, # dt
+ 0, # homing_countdown
+ )
+ dummy_gripper_ramp = np.zeros(3, dtype=np.float64)
+ _write_frame_jit(
+ dummy_6i, # state_position_out
+ dummy_6i, # state_speed_out
+ dummy_gripper_6i, # state_gripper_data_in
+ dummy_6i, # position_out
+ dummy_6i, # speed_out
+ dummy_gripper_6i, # gripper_data_out
+ dummy_gripper_ramp, # gripper_ramp
+ )
+ _simulate_gripper_ramp_jit(
+ dummy_gripper_ramp, # gripper_ramp
+ dummy_gripper_6i, # gripper_data_in
+ 0.0, # gripper_pos_f
+ 0.004, # dt
+ 10432.0, # tick_range
+ 40.0, # min_speed
+ 80000.0, # max_speed
+ )
+ dummy_payload = memoryview(bytearray(64))
+ dummy_timing = np.zeros(1, dtype=np.int32)
+ dummy_gripper_in = np.zeros(6, dtype=np.int32)
+ _encode_payload_jit(
+ dummy_payload, # out
+ dummy_6i, # position_in
+ dummy_6i, # speed_in
+ dummy_8u8, # homed_in
+ dummy_8u8, # io_in (8 elements for _pack_bitfield)
+ dummy_8u8, # temp_err_in
+ dummy_8u8, # pos_err_in
+ dummy_timing, # timing_in
+ dummy_gripper_in, # gripper_in
+ )
+
+ # Workspace arrays for jit functions below (SE3 funcs already warmed by pinokin)
+ dummy_twist = np.zeros(6, dtype=np.float64)
+ omega_ws = np.zeros(3, dtype=np.float64)
+ R_ws = np.zeros((3, 3), dtype=np.float64)
+ V_ws = np.zeros((3, 3), dtype=np.float64)
+ V_inv_ws = np.zeros((3, 3), dtype=np.float64)
+
+ # parol6/motion/streaming_executors.py
+ ref_inv = np.zeros((4, 4), dtype=np.float64)
+ delta_4x4 = np.zeros((4, 4), dtype=np.float64)
+ _pose_to_tangent_jit(
+ dummy_4x4,
+ dummy_4x4_b,
+ ref_inv,
+ delta_4x4,
+ dummy_twist,
+ omega_ws,
+ R_ws,
+ V_inv_ws,
+ )
+ _tangent_to_pose_jit(
+ dummy_4x4, dummy_twist, delta_4x4, dummy_4x4_out, omega_ws, R_ws, V_ws
+ )
+
+ # parol6/commands/servo_commands.py
+ _max_vel_ratio_jit(dummy_6f, dummy_6f)
+
+ elapsed = time.perf_counter() - start
+ logger.info(f"\tJIT warmup completed in {elapsed * 1000:.1f}ms")
+ return elapsed
diff --git a/pyproject.toml b/pyproject.toml
index 372af52..dd20435 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -4,38 +4,49 @@ build-backend = "setuptools.build_meta"
[project]
name = "parol6"
-version = "0.1.0"
+version = "0.2.2"
description = "Python library for controlling PAROL6 robot arms"
-requires-python = ">=3.10,<3.12"
+requires-python = ">=3.11"
dependencies = [
- # TEMPORARY SOLUTION: Using custom-built robotics-toolbox-python wheels from forked repository
- # The proper solution requires the upstream maintainers to complete the merge of their "future" branch.
- # Until then, we use platform-specific wheels from: https://github.com/Jepson2k/robotics-toolbox-python/releases/tag/v1.2.2
+ # pinokin: Pinocchio-based FK/IK bindings
+ # https://github.com/Jepson2k/pinokin/releases/tag/v0.1.5
# macOS ARM64 (Apple Silicon)
- "roboticstoolbox-python @ https://github.com/Jepson2k/robotics-toolbox-python/releases/download/v1.2.2/roboticstoolbox_python-1.2.2-cp310-cp310-macosx_11_0_arm64.whl ; python_version == '3.10' and platform_system == 'Darwin' and platform_machine == 'arm64'",
- "roboticstoolbox-python @ https://github.com/Jepson2k/robotics-toolbox-python/releases/download/v1.2.2/roboticstoolbox_python-1.2.2-cp311-cp311-macosx_11_0_arm64.whl ; python_version == '3.11' and platform_system == 'Darwin' and platform_machine == 'arm64'",
-
- # macOS x86_64 (Intel)
- "roboticstoolbox-python @ https://github.com/Jepson2k/robotics-toolbox-python/releases/download/v1.2.2/roboticstoolbox_python-1.2.2-cp310-cp310-macosx_11_0_x86_64.whl ; python_version == '3.10' and platform_system == 'Darwin' and platform_machine == 'x86_64'",
- "roboticstoolbox-python @ https://github.com/Jepson2k/robotics-toolbox-python/releases/download/v1.2.2/roboticstoolbox_python-1.2.2-cp311-cp311-macosx_11_0_x86_64.whl ; python_version == '3.11' and platform_system == 'Darwin' and platform_machine == 'x86_64'",
+ "pinokin @ https://github.com/Jepson2k/pinokin/releases/download/v0.1.5/pinokin-0.1.5-cp311-cp311-macosx_15_0_arm64.whl ; python_version == '3.11' and platform_system == 'Darwin' and platform_machine == 'arm64'",
+ "pinokin @ https://github.com/Jepson2k/pinokin/releases/download/v0.1.5/pinokin-0.1.5-cp312-cp312-macosx_15_0_arm64.whl ; python_version == '3.12' and platform_system == 'Darwin' and platform_machine == 'arm64'",
+ "pinokin @ https://github.com/Jepson2k/pinokin/releases/download/v0.1.5/pinokin-0.1.5-cp313-cp313-macosx_15_0_arm64.whl ; python_version == '3.13' and platform_system == 'Darwin' and platform_machine == 'arm64'",
+ "pinokin @ https://github.com/Jepson2k/pinokin/releases/download/v0.1.5/pinokin-0.1.5-cp314-cp314-macosx_15_0_arm64.whl ; python_version == '3.14' and platform_system == 'Darwin' and platform_machine == 'arm64'",
# Windows AMD64
- "roboticstoolbox-python @ https://github.com/Jepson2k/robotics-toolbox-python/releases/download/v1.2.2/roboticstoolbox_python-1.2.2-cp310-cp310-win_amd64.whl ; python_version == '3.10' and platform_system == 'Windows' and platform_machine == 'AMD64'",
- "roboticstoolbox-python @ https://github.com/Jepson2k/robotics-toolbox-python/releases/download/v1.2.2/roboticstoolbox_python-1.2.2-cp311-cp311-win_amd64.whl ; python_version == '3.11' and platform_system == 'Windows' and platform_machine == 'AMD64'",
+ "pinokin @ https://github.com/Jepson2k/pinokin/releases/download/v0.1.5/pinokin-0.1.5-cp311-cp311-win_amd64.whl ; python_version == '3.11' and platform_system == 'Windows' and platform_machine == 'AMD64'",
+ "pinokin @ https://github.com/Jepson2k/pinokin/releases/download/v0.1.5/pinokin-0.1.5-cp312-cp312-win_amd64.whl ; python_version == '3.12' and platform_system == 'Windows' and platform_machine == 'AMD64'",
+ "pinokin @ https://github.com/Jepson2k/pinokin/releases/download/v0.1.5/pinokin-0.1.5-cp313-cp313-win_amd64.whl ; python_version == '3.13' and platform_system == 'Windows' and platform_machine == 'AMD64'",
+ "pinokin @ https://github.com/Jepson2k/pinokin/releases/download/v0.1.5/pinokin-0.1.5-cp314-cp314-win_amd64.whl ; python_version == '3.14' and platform_system == 'Windows' and platform_machine == 'AMD64'",
# Linux x86_64
- "roboticstoolbox-python @ https://github.com/Jepson2k/robotics-toolbox-python/releases/download/v1.2.2/roboticstoolbox_python-1.2.2-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl ; python_version == '3.10' and platform_system == 'Linux' and platform_machine == 'x86_64'",
- "roboticstoolbox-python @ https://github.com/Jepson2k/robotics-toolbox-python/releases/download/v1.2.2/roboticstoolbox_python-1.2.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl ; python_version == '3.11' and platform_system == 'Linux' and platform_machine == 'x86_64'",
+ "pinokin @ https://github.com/Jepson2k/pinokin/releases/download/v0.1.5/pinokin-0.1.5-cp311-cp311-manylinux_2_39_x86_64.whl ; python_version == '3.11' and platform_system == 'Linux' and platform_machine == 'x86_64'",
+ "pinokin @ https://github.com/Jepson2k/pinokin/releases/download/v0.1.5/pinokin-0.1.5-cp312-cp312-manylinux_2_39_x86_64.whl ; python_version == '3.12' and platform_system == 'Linux' and platform_machine == 'x86_64'",
+ "pinokin @ https://github.com/Jepson2k/pinokin/releases/download/v0.1.5/pinokin-0.1.5-cp313-cp313-manylinux_2_39_x86_64.whl ; python_version == '3.13' and platform_system == 'Linux' and platform_machine == 'x86_64'",
+ "pinokin @ https://github.com/Jepson2k/pinokin/releases/download/v0.1.5/pinokin-0.1.5-cp314-cp314-manylinux_2_39_x86_64.whl ; python_version == '3.14' and platform_system == 'Linux' and platform_machine == 'x86_64'",
# Linux aarch64 (ARM64)
- "roboticstoolbox-python @ https://github.com/Jepson2k/robotics-toolbox-python/releases/download/v1.2.2/roboticstoolbox_python-1.2.2-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl ; python_version == '3.10' and platform_system == 'Linux' and platform_machine == 'aarch64'",
- "roboticstoolbox-python @ https://github.com/Jepson2k/robotics-toolbox-python/releases/download/v1.2.2/roboticstoolbox_python-1.2.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl ; python_version == '3.11' and platform_system == 'Linux' and platform_machine == 'aarch64'",
-
- "pyserial",
- "spatialmath-python",
- "scipy==1.11.4"
+ "pinokin @ https://github.com/Jepson2k/pinokin/releases/download/v0.1.5/pinokin-0.1.5-cp311-cp311-manylinux_2_39_aarch64.whl ; python_version == '3.11' and platform_system == 'Linux' and platform_machine == 'aarch64'",
+ "pinokin @ https://github.com/Jepson2k/pinokin/releases/download/v0.1.5/pinokin-0.1.5-cp312-cp312-manylinux_2_39_aarch64.whl ; python_version == '3.12' and platform_system == 'Linux' and platform_machine == 'aarch64'",
+ "pinokin @ https://github.com/Jepson2k/pinokin/releases/download/v0.1.5/pinokin-0.1.5-cp313-cp313-manylinux_2_39_aarch64.whl ; python_version == '3.13' and platform_system == 'Linux' and platform_machine == 'aarch64'",
+ "pinokin @ https://github.com/Jepson2k/pinokin/releases/download/v0.1.5/pinokin-0.1.5-cp314-cp314-manylinux_2_39_aarch64.whl ; python_version == '3.14' and platform_system == 'Linux' and platform_machine == 'aarch64'",
+
+ "pyserial>=3.4",
+ "scipy>=1.11.4",
+ "ruckig>=0.12.2",
+ "toppra>=0.6.3",
+ "interpolatepy>=2.0.0",
+ "numpy>=2.0",
+ "numba>=0.59",
+ "psutil>=5.9",
+ "msgspec>=0.18",
+ "ormsgpack>=1.4.0",
+ "waldoctl @ git+https://github.com/Jepson2k/waldoctl.git@v0.2.0",
]
[tool.setuptools.packages.find]
@@ -44,68 +55,62 @@ include = ["parol6*"]
[project.optional-dependencies]
dev = [
"ruff",
- "mypy",
+ "ty",
"pytest",
"pytest-asyncio",
- "pre-commit"
+ "pytest-timeout",
+ "pre-commit",
+ "trimesh",
+ "fast-simplification",
+ "rtree",
+ "scipy-stubs",
+ "types-pyserial",
]
[project.scripts]
parol6-server = "parol6.cli.server:main_entry"
+[project.entry-points."waldoctl.robots"]
+parol6 = "parol6.robot:Robot"
+
[tool.pytest.ini_options]
-# Limit discovery to the tests/ directory only
testpaths = ["tests"]
-
-# Test discovery patterns
-python_files = ["test_*.py", "*_test.py"]
+python_files = ["test_*.py"]
python_classes = ["Test*"]
python_functions = ["test_*"]
-
-# Output configuration
addopts = [
- "-v",
- "--tb=short",
"--strict-markers",
- "--disable-warnings",
- "-p", "no:cacheprovider"
+ "--strict-config",
+ "-ra",
+ "--junitxml=test-results.xml",
]
-
-# Timeout configuration (requires pytest-timeout)
-timeout = 300
+timeout = 45
timeout_method = "thread"
-
-# Logging configuration for tests
-log_cli = true
-log_cli_level = "INFO"
-log_cli_format = "%(asctime)s [%(levelname)8s] %(name)s: %(message)s"
-log_cli_date_format = "%Y-%m-%d %H:%M:%S"
-
-# Pytest markers for different test types
markers = [
"unit: Unit tests that test individual components in isolation",
"integration: Integration tests that test component interactions with FAKE_SERIAL",
- "hardware: Hardware tests that require actual robot hardware and human confirmation",
- "slow: Slow-running tests (typically hardware or complex integration tests)",
"e2e: End-to-end tests that exercise complete workflows",
- "gcode: Tests specifically for GCODE parsing and interpretation functionality"
+ "examples: Standalone example scripts (binds port 5001, run with `pytest -m examples`)",
]
-
-# Filter warnings
+log_cli = false
+log_level = "DEBUG"
filterwarnings = [
"ignore::DeprecationWarning",
"ignore::PendingDeprecationWarning",
- "ignore::UserWarning:roboticstoolbox.*",
- "ignore::UserWarning:spatialmath.*"
]
-[[tool.mypy.overrides]]
-module = ["roboticstoolbox", "roboticstoolbox.*", "spatialmath", "spatialmath.*"]
-ignore_missing_imports = true
-
-[[tool.mypy.overrides]]
-module = ["scipy", "scipy.*", "serial"]
-ignore_missing_imports = true
+[tool.ty]
+# Dynamic Command union (built at import time from tagged structs)
+# generates invalid-type-form in all files that reference it.
+[[tool.ty.overrides]]
+include = [
+ "parol6/protocol/wire.py",
+ "parol6/commands/base.py",
+ "parol6/server/command_registry.py",
+ "parol6/server/command_executor.py",
+]
+[tool.ty.overrides.rules]
+invalid-type-form = "ignore"
[tool.setuptools]
include-package-data = true
@@ -113,8 +118,19 @@ include-package-data = true
[tool.setuptools.package-data]
parol6 = ["urdf_model/**"]
-# ---------------- Mypy configuration ----------------
-[tool.mypy]
-python_version = "3.11"
-files = ["parol6"]
-exclude = "(?x)(^build/|^dist/|^\\.venv/)"
+# Release tooling: `tbump ` edits pyproject.toml, commits, tags
+# and pushes (main + tag) in one shot. Install with `pip install tbump`.
+[tool.tbump]
+github_url = "https://github.com/Jepson2k/PAROL6-python-API"
+
+[tool.tbump.version]
+current = "0.2.2"
+regex = '''(?P\d+)\.(?P\d+)\.(?P\d+)'''
+
+[tool.tbump.git]
+message_template = "bump version to {new_version}"
+tag_template = "v{new_version}"
+
+[[tool.tbump.file]]
+src = "pyproject.toml"
+search = 'version = "{current_version}"'
diff --git a/scripts/simplify_meshes.py b/scripts/simplify_meshes.py
new file mode 100644
index 0000000..b7059bc
--- /dev/null
+++ b/scripts/simplify_meshes.py
@@ -0,0 +1,454 @@
+#!/usr/bin/env python3
+"""
+Mesh Simplification Script for PAROL6 Robot STL Files
+
+Uses Open3D's quadric decimation with boundary preservation to maintain
+sharp mechanical edges while aggressively reducing flat surface triangles.
+
+Key insight: CAD meshes have many triangles on flat surfaces (wasteful) but
+sharp edges that define the mechanical appearance (must preserve). Open3D's
+boundary_weight parameter helps preserve these features.
+
+The auto-optimization uses binary search to find the most aggressive reduction
+that keeps Hausdorff distance (surface deviation) below a threshold. This
+correlates well with visual quality - meshes that stay within 1% of the
+bounding box diagonal look nearly identical to the original.
+
+Usage:
+ python scripts/simplify_meshes.py # Auto-optimize all meshes
+ python scripts/simplify_meshes.py --preview # Preview without saving
+ python scripts/simplify_meshes.py --target 0.2 # Keep 20% of triangles (80% reduction)
+ python scripts/simplify_meshes.py --max-hausdorff 0.02 # Allow 2% surface deviation
+"""
+
+from __future__ import annotations
+
+import argparse
+import logging
+from dataclasses import dataclass
+from pathlib import Path
+
+import numpy as np
+
+try:
+ import open3d as o3d
+except ImportError:
+ print("Error: open3d is required. Install with: pip install open3d")
+ raise SystemExit(1)
+
+try:
+ import trimesh
+except ImportError:
+ print("Error: trimesh is required. Install with: pip install trimesh")
+ raise SystemExit(1)
+
+logging.basicConfig(
+ level=logging.INFO,
+ format="%(asctime)s [%(levelname)s] %(message)s",
+)
+logger = logging.getLogger(__name__)
+
+DEFAULT_MESH_DIR = Path(__file__).parent.parent / "parol6" / "urdf_model" / "meshes"
+
+
+@dataclass
+class QualityMetrics:
+ """Quality metrics comparing original and simplified meshes."""
+
+ original_triangles: int
+ simplified_triangles: int
+ triangle_reduction: float
+ sharp_edges_original: int
+ sharp_edges_simplified: int
+ sharp_edge_preservation: float
+ hausdorff_normalized: float
+ file_size_original: int
+ file_size_simplified: int
+
+ def looks_good(self) -> tuple[bool, str]:
+ """Check if the simplification maintains visual quality.
+
+ Primary metric is Hausdorff distance (surface deviation), which correlates
+ well with visual quality. Sharp edge preservation is secondary - aggressive
+ reduction typically preserves 25-40% of sharp edges while still looking good.
+ """
+ issues = []
+
+ # Primary quality metric - surface deviation
+ if self.hausdorff_normalized > 0.01:
+ issues.append(f"surface deviation: {self.hausdorff_normalized:.1%}")
+
+ # Secondary - sharp edges (lower threshold for aggressive mode)
+ if self.sharp_edge_preservation < 0.20:
+ issues.append(f"sharp edges: {self.sharp_edge_preservation:.0%} preserved")
+
+ if issues:
+ return False, "; ".join(issues)
+ return True, "good quality"
+
+ def __str__(self) -> str:
+ looks_good, reason = self.looks_good()
+ status = "✓ GOOD" if looks_good else "✗ CHECK"
+
+ size_reduction = (1 - self.file_size_simplified / self.file_size_original) * 100
+
+ return (
+ f" Triangles: {self.original_triangles:,} → {self.simplified_triangles:,} "
+ f"({self.triangle_reduction:.0%} reduction)\n"
+ f" Sharp edges: {self.sharp_edges_original:,} → {self.sharp_edges_simplified:,} "
+ f"({self.sharp_edge_preservation:.0%} preserved)\n"
+ f" Hausdorff: {self.hausdorff_normalized:.2%} of bounding box\n"
+ f" File size: {self.file_size_original / 1024:.0f} KB → {self.file_size_simplified / 1024:.0f} KB "
+ f"({size_reduction:.0f}% smaller)\n"
+ f" Quality: {status} - {reason}"
+ )
+
+
+def count_sharp_edges(mesh: trimesh.Trimesh, threshold_deg: float = 60) -> int:
+ """Count edges with dihedral angle above threshold (default 60° for mechanical edges)."""
+ if len(mesh.face_adjacency_angles) == 0:
+ return 0
+ angles_deg = np.degrees(mesh.face_adjacency_angles)
+ return int(np.sum(angles_deg > threshold_deg))
+
+
+def compute_hausdorff(
+ original: trimesh.Trimesh, simplified: trimesh.Trimesh, samples: int = 5000
+) -> float:
+ """Compute normalized Hausdorff distance."""
+ bbox_diag = np.linalg.norm(original.bounding_box.extents)
+ if bbox_diag == 0:
+ return 0.0
+
+ try:
+ pts, _ = trimesh.sample.sample_surface(original, samples)
+ _, distances, _ = trimesh.proximity.closest_point(simplified, pts)
+ return float(distances.max() / bbox_diag)
+ except Exception:
+ return 0.0
+
+
+def simplify_mesh_o3d(
+ mesh_o3d: o3d.geometry.TriangleMesh,
+ target_ratio: float = 0.5,
+ boundary_weight: float = 100.0,
+) -> o3d.geometry.TriangleMesh:
+ """
+ Simplify mesh using Open3D's quadric decimation with boundary preservation.
+
+ Args:
+ mesh_o3d: Open3D triangle mesh
+ target_ratio: Target ratio of triangles to keep (0.5 = keep 50%)
+ boundary_weight: Weight for boundary edge preservation (higher = more preservation)
+
+ Returns:
+ Simplified mesh
+ """
+ target_triangles = max(int(len(mesh_o3d.triangles) * target_ratio), 4)
+
+ simplified = mesh_o3d.simplify_quadric_decimation(
+ target_number_of_triangles=target_triangles,
+ boundary_weight=boundary_weight,
+ )
+ simplified.compute_vertex_normals()
+
+ return simplified
+
+
+def find_optimal_reduction(
+ mesh_o3d: o3d.geometry.TriangleMesh,
+ tm_original: trimesh.Trimesh,
+ max_hausdorff: float = 0.003,
+ min_target: float = 0.20,
+ max_target: float = 0.60,
+) -> tuple[float, o3d.geometry.TriangleMesh]:
+ """
+ Find the most aggressive reduction while maintaining geometric accuracy.
+
+ Uses binary search to find optimal target_ratio. Primary metric is Hausdorff
+ distance (surface deviation) which correlates better with visual quality than
+ sharp edge counts.
+ """
+ best_ratio = max_target
+ best_mesh = None
+
+ low, high = min_target, max_target
+ for _ in range(10): # Binary search iterations
+ mid = (low + high) / 2
+
+ simplified = simplify_mesh_o3d(mesh_o3d, mid)
+ tm_simplified = trimesh.Trimesh(
+ vertices=np.asarray(simplified.vertices),
+ faces=np.asarray(simplified.triangles),
+ )
+
+ hausdorff = compute_hausdorff(tm_original, tm_simplified)
+
+ if hausdorff <= max_hausdorff:
+ # Good enough quality, try more aggressive
+ best_ratio = mid
+ best_mesh = simplified
+ high = mid
+ else:
+ # Too aggressive, back off
+ low = mid
+
+ # If we couldn't meet the threshold, use the max_target (least aggressive)
+ if best_mesh is None:
+ best_mesh = simplify_mesh_o3d(mesh_o3d, max_target)
+ best_ratio = max_target
+
+ return best_ratio, best_mesh
+
+
+def process_stl_file(
+ input_path: Path,
+ output_path: Path | None = None,
+ target_ratio: float | None = None,
+ max_hausdorff: float = 0.01,
+ preview_only: bool = False,
+) -> tuple[Path | None, QualityMetrics | None]:
+ """Process a single STL file."""
+ if output_path is None:
+ output_path = input_path.parent / f"{input_path.stem}_simplified.stl"
+
+ logger.info(f"Processing: {input_path.name}")
+
+ # Load with Open3D
+ mesh_o3d = o3d.io.read_triangle_mesh(str(input_path))
+ mesh_o3d.remove_duplicated_vertices()
+ mesh_o3d.remove_duplicated_triangles()
+ mesh_o3d.compute_vertex_normals()
+
+ original_triangles = len(mesh_o3d.triangles)
+
+ # Also load with trimesh for quality metrics
+ tm_original = trimesh.Trimesh(
+ vertices=np.asarray(mesh_o3d.vertices),
+ faces=np.asarray(mesh_o3d.triangles),
+ )
+ original_sharp = count_sharp_edges(tm_original)
+
+ # Simplify
+ if target_ratio is not None:
+ simplified = simplify_mesh_o3d(mesh_o3d, target_ratio)
+ used_ratio = target_ratio
+ else:
+ used_ratio, simplified = find_optimal_reduction(
+ mesh_o3d, tm_original, max_hausdorff
+ )
+
+ # Convert to trimesh for metrics
+ tm_simplified = trimesh.Trimesh(
+ vertices=np.asarray(simplified.vertices),
+ faces=np.asarray(simplified.triangles),
+ )
+
+ # Compute metrics
+ simplified_sharp = count_sharp_edges(tm_simplified)
+ hausdorff = compute_hausdorff(tm_original, tm_simplified)
+
+ # Ensure normals are computed for STL export
+ simplified.compute_vertex_normals()
+ simplified.compute_triangle_normals()
+
+ # Save to temp file to get size (or actual output)
+ if preview_only:
+ import tempfile
+
+ with tempfile.NamedTemporaryFile(suffix=".stl", delete=False) as f:
+ temp_path = Path(f.name)
+ o3d.io.write_triangle_mesh(str(temp_path), simplified)
+ file_size_simplified = temp_path.stat().st_size
+ temp_path.unlink()
+ else:
+ o3d.io.write_triangle_mesh(str(output_path), simplified)
+ file_size_simplified = output_path.stat().st_size
+
+ metrics = QualityMetrics(
+ original_triangles=original_triangles,
+ simplified_triangles=len(simplified.triangles),
+ triangle_reduction=1 - len(simplified.triangles) / original_triangles,
+ sharp_edges_original=original_sharp,
+ sharp_edges_simplified=simplified_sharp,
+ sharp_edge_preservation=simplified_sharp / original_sharp
+ if original_sharp > 0
+ else 1.0,
+ hausdorff_normalized=hausdorff,
+ file_size_original=input_path.stat().st_size,
+ file_size_simplified=file_size_simplified,
+ )
+
+ logger.info(f"\n{metrics}")
+
+ if preview_only:
+ logger.info(" (preview mode - not saved)")
+ return None, metrics
+
+ logger.info(f" Saved: {output_path.name}")
+ return output_path, metrics
+
+
+def process_directory(
+ directory: Path,
+ target_ratio: float | None = None,
+ max_hausdorff: float = 0.01,
+ skip_existing: bool = False,
+ preview_only: bool = False,
+) -> list[tuple[Path | None, QualityMetrics]]:
+ """Process all STL files in a directory."""
+ stl_files = list(directory.glob("*.STL")) + list(directory.glob("*.stl"))
+ stl_files = [f for f in stl_files if "_simplified" not in f.stem]
+
+ if not stl_files:
+ logger.warning(f"No STL files found in {directory}")
+ return []
+
+ logger.info(f"Found {len(stl_files)} STL files to process\n")
+
+ results = []
+ for stl_file in sorted(stl_files):
+ output_path = stl_file.parent / f"{stl_file.stem}_simplified.stl"
+
+ if skip_existing and output_path.exists():
+ logger.info(f"Skipping {stl_file.name} (exists)\n")
+ continue
+
+ try:
+ result_path, metrics = process_stl_file(
+ stl_file,
+ output_path=output_path,
+ target_ratio=target_ratio,
+ max_hausdorff=max_hausdorff,
+ preview_only=preview_only,
+ )
+ if metrics:
+ results.append((result_path, metrics))
+ logger.info("")
+ except Exception as e:
+ logger.error(f"Failed to process {stl_file.name}: {e}")
+ import traceback
+
+ traceback.print_exc()
+
+ return results
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Simplify STL meshes while preserving sharp mechanical edges",
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ epilog="""
+Examples:
+ %(prog)s # Auto-optimize all meshes (1%% Hausdorff threshold)
+ %(prog)s --preview # Preview without saving
+ %(prog)s --target 0.2 # Keep 20%% of triangles (80%% reduction)
+ %(prog)s --max-hausdorff 0.02 # Allow 2%% surface deviation (more aggressive)
+ %(prog)s -i L5.STL --preview # Preview single file
+
+The script uses Open3D's quadric decimation with boundary preservation,
+which is better suited for CAD meshes than generic decimation algorithms.
+ """,
+ )
+
+ parser.add_argument(
+ "--input",
+ "-i",
+ type=Path,
+ help="Single STL file to process",
+ )
+ parser.add_argument(
+ "--output",
+ "-o",
+ type=Path,
+ help="Output file path (default: _simplified.stl)",
+ )
+ parser.add_argument(
+ "--mesh-dir",
+ "-d",
+ type=Path,
+ default=DEFAULT_MESH_DIR,
+ help=f"Directory containing STL files (default: {DEFAULT_MESH_DIR})",
+ )
+ parser.add_argument(
+ "--target",
+ "-t",
+ type=float,
+ help="Target ratio of triangles to keep (0.0-1.0). Disables auto-optimization.",
+ )
+ parser.add_argument(
+ "--max-hausdorff",
+ "-m",
+ type=float,
+ default=0.003,
+ help="Maximum Hausdorff distance (fraction of bounding box, default: 0.003 = 0.3%%)",
+ )
+ parser.add_argument(
+ "--preview",
+ "-p",
+ action="store_true",
+ help="Preview metrics without saving",
+ )
+ parser.add_argument(
+ "--skip-existing",
+ "-s",
+ action="store_true",
+ help="Skip files that already have simplified versions",
+ )
+ parser.add_argument(
+ "--verbose",
+ "-v",
+ action="store_true",
+ help="Enable verbose output",
+ )
+
+ args = parser.parse_args()
+
+ if args.verbose:
+ logging.getLogger().setLevel(logging.DEBUG)
+
+ if args.input:
+ input_path = args.input
+ if not input_path.is_absolute():
+ if (args.mesh_dir / input_path).exists():
+ input_path = args.mesh_dir / input_path
+ elif not input_path.exists():
+ logger.error(f"File not found: {input_path}")
+ raise SystemExit(1)
+
+ process_stl_file(
+ input_path,
+ output_path=args.output,
+ target_ratio=args.target,
+ max_hausdorff=args.max_hausdorff,
+ preview_only=args.preview,
+ )
+ else:
+ if not args.mesh_dir.exists():
+ logger.error(f"Mesh directory not found: {args.mesh_dir}")
+ raise SystemExit(1)
+
+ results = process_directory(
+ args.mesh_dir,
+ target_ratio=args.target,
+ max_hausdorff=args.max_hausdorff,
+ skip_existing=args.skip_existing,
+ preview_only=args.preview,
+ )
+
+ if results:
+ # Summary
+ good = sum(1 for _, m in results if m.looks_good()[0])
+ logger.info(f"Summary: {good}/{len(results)} meshes passed quality check")
+
+ total_orig = sum(m.file_size_original for _, m in results)
+ total_simp = sum(m.file_size_simplified for _, m in results)
+ reduction = (1 - total_simp / total_orig) * 100
+ logger.info(
+ f"Total: {total_orig / 1024 / 1024:.2f} MB → {total_simp / 1024 / 1024:.2f} MB "
+ f"({reduction:.0f}% reduction)"
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tests/conftest.py b/tests/conftest.py
index 64a3fc0..d57fa99 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -7,19 +7,11 @@
import logging
import os
-import signal
-import sys
-import time
from collections.abc import Generator
from dataclasses import dataclass
import pytest
-
-# Add the parent directory to Python path so we can import the API modules
-sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
-
-# Import parol6 for server management
-from parol6.client.manager import ServerManager
+from parol6 import Robot
# Import utilities for port detection
@@ -67,12 +59,6 @@ class TestPorts:
def pytest_addoption(parser):
"""Add custom command line options for the test suite."""
- parser.addoption(
- "--run-hardware",
- action="store_true",
- default=False,
- help="Enable hardware tests that require actual robot hardware and human confirmation",
- )
parser.addoption(
"--server-ip",
action="store",
@@ -99,6 +85,25 @@ def pytest_addoption(parser):
default=False,
help="Keep the test server running between test sessions for debugging",
)
+ parser.addoption(
+ "--examples",
+ action="store_true",
+ default=False,
+ help="Run example script tests (binds port 5001, can't run alongside server tests)",
+ )
+
+
+def pytest_collection_modifyitems(config, items):
+ """With --examples: run only example tests. Without: skip them."""
+ if config.getoption("--examples"):
+ deselected = [item for item in items if "examples" not in item.keywords]
+ items[:] = [item for item in items if "examples" in item.keywords]
+ config.hook.pytest_deselected(items=deselected)
+ return
+ skip_examples = pytest.mark.skip(reason="needs --examples to run")
+ for item in items:
+ if "examples" in item.keywords:
+ item.add_marker(skip_examples)
# ============================================================================
@@ -181,134 +186,35 @@ def robot_api_env(ports: TestPorts) -> Generator[dict[str, str], None, None]:
@pytest.fixture(scope="session")
def server_proc(request, ports: TestPorts, robot_api_env):
- """
- Launch parol6 server for integration tests using ServerManager.
+ """Launch parol6 server for integration tests.
Starts the server with FAKE_SERIAL mode and waits for readiness.
Automatically cleans up the server when tests complete.
"""
- import asyncio
- import socket
-
keep_running = request.config.getoption("--keep-server-running")
- # Create server manager
- manager = ServerManager()
-
- async def start_and_wait():
- # Start the controller process
- manager.start_controller(
- no_autohome=True,
- extra_env={
- "PAROL6_FAKE_SERIAL": "1",
- "PAROL6_NOAUTOHOME": "1",
- "PAROL6_CONTROLLER_IP": ports.server_ip,
- "PAROL6_CONTROLLER_PORT": str(ports.server_port),
- },
- )
-
- # Wait for server to be ready with custom ping logic
- timeout = 10.0
- start_time = time.time()
-
- while time.time() - start_time < timeout:
- try:
- with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
- sock.settimeout(1.0)
- sock.sendto(b"PING", (ports.server_ip, ports.server_port))
- data, _ = sock.recvfrom(256)
- if data.decode("utf-8").strip().startswith("PONG"):
- return True
- except (TimeoutError, Exception):
- pass
- await asyncio.sleep(0.5)
- return False
-
- # Start server using parol6's ServerManager
- logger.info(f"Starting test server on {ports.server_ip}:{ports.server_port}")
+ robot = Robot(host=ports.server_ip, port=ports.server_port, timeout=60.0)
- ready = asyncio.run(start_and_wait())
- if not ready:
- pytest.fail("Failed to start headless commander server for testing")
+ logger.info(f"Starting test server on {ports.server_ip}:{ports.server_port}")
+ robot.start(
+ extra_env={
+ "PAROL6_FAKE_SERIAL": "1",
+ "PAROL6_NOAUTOHOME": "1",
+ "PAROL6_CONTROLLER_IP": ports.server_ip,
+ "PAROL6_CONTROLLER_PORT": str(ports.server_port),
+ },
+ )
try:
- yield manager
-
+ yield robot
finally:
if not keep_running:
logger.info("Stopping test server")
- manager.stop_controller()
+ robot.stop()
else:
logger.info("Leaving test server running (--keep-server-running)")
-# ============================================================================
-# HARDWARE TEST SUPPORT
-# ============================================================================
-
-
-@pytest.fixture
-def human_prompt(request):
- """
- Provide human confirmation prompts for hardware tests.
-
- Automatically skips tests marked with @pytest.mark.hardware unless
- --run-hardware is specified. For enabled hardware tests, provides
- a utility function to prompt for human confirmation.
- """
- # Check if hardware tests are enabled
- run_hardware = request.config.getoption("--run-hardware")
-
- # Skip hardware tests if not enabled
- if request.node.get_closest_marker("hardware") and not run_hardware:
- pytest.skip("Hardware tests disabled. Use --run-hardware to enable.")
-
- def prompt_user(message: str, timeout: float | None = None) -> bool:
- """
- Prompt user for confirmation during hardware tests.
-
- Args:
- message: Message to display to user
- timeout: Optional timeout in seconds
-
- Returns:
- True if user confirms, False otherwise
- """
- if not run_hardware:
- return False
-
- print(f"\n{'=' * 60}")
- print("HARDWARE TEST CONFIRMATION REQUIRED")
- print(f"{'=' * 60}")
- print(f"{message}")
- print(f"{'=' * 60}")
-
- try:
- if timeout:
-
- def timeout_handler(signum, frame):
- raise TimeoutError("User confirmation timeout")
-
- signal.signal(signal.SIGALRM, timeout_handler)
- signal.alarm(int(timeout))
-
- response = input("Continue? [y/N]: ").strip().lower()
-
- if timeout:
- signal.alarm(0) # Cancel timeout
-
- return response in ["y", "yes"]
-
- except (KeyboardInterrupt, TimeoutError):
- print("\nUser confirmation cancelled or timed out")
- return False
- except Exception as e:
- print(f"\nError getting user confirmation: {e}")
- return False
-
- return prompt_user
-
-
# ============================================================================
# COMMON TEST UTILITIES
# ============================================================================
@@ -392,33 +298,9 @@ def pytest_configure(config):
"markers",
"integration: Integration tests that test component interactions with FAKE_SERIAL",
)
- config.addinivalue_line(
- "markers",
- "hardware: Hardware tests that require actual robot hardware and human confirmation",
- )
- config.addinivalue_line(
- "markers",
- "slow: Slow-running tests (typically hardware or complex integration tests)",
- )
config.addinivalue_line(
"markers", "e2e: End-to-end tests that exercise complete workflows"
)
- config.addinivalue_line(
- "markers",
- "gcode: Tests specifically for GCODE parsing and interpretation functionality",
- )
-
-
-def pytest_collection_modifyitems(config, items):
- """Modify test collection to add markers and skip conditions."""
- # Skip hardware tests by default unless --run-hardware is specified
- if not config.getoption("--run-hardware"):
- skip_hardware = pytest.mark.skip(
- reason="Hardware tests disabled (use --run-hardware to enable)"
- )
- for item in items:
- if item.get_closest_marker("hardware"):
- item.add_marker(skip_hardware)
def pytest_sessionstart(session):
@@ -427,9 +309,6 @@ def pytest_sessionstart(session):
# Print test configuration info
config = session.config
- logger.info(
- f"Hardware tests: {'enabled' if config.getoption('--run-hardware') else 'disabled'}"
- )
logger.info(f"Server IP: {config.getoption('--server-ip')}")
server_port = config.getoption("--server-port")
diff --git a/tests/hardware/__init__.py b/tests/hardware/__init__.py
deleted file mode 100644
index 81d7dc6..0000000
--- a/tests/hardware/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-"""
-Hardware tests package.
-
-Contains tests that require actual robot hardware and human confirmation.
-These tests are marked with @pytest.mark.hardware and are only executed
-when the --run-hardware flag is provided.
-"""
diff --git a/tests/hardware/test_manual_moves.py b/tests/hardware/test_manual_moves.py
deleted file mode 100644
index d4ff6fe..0000000
--- a/tests/hardware/test_manual_moves.py
+++ /dev/null
@@ -1,727 +0,0 @@
-"""
-Hardware tests for manual robot movements.
-
-These tests require actual robot hardware and human confirmation.
-They are marked with @pytest.mark.hardware and require the --run-hardware flag.
-
-SAFETY NOTICE: These tests will move the physical robot. Ensure the robot
-workspace is clear and E-stop is within reach before running.
-"""
-
-import os
-import sys
-import time
-
-import pytest
-
-# Add the parent directory to Python path
-sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
-
-# Removed direct robot_api import - using client fixture from conftest.py
-
-# Define the safe starting joint configuration for all hardware tests
-# This ensures consistency and repeatability for each test
-# Angles: [J1, J2, J3, J4, J5, J6] in degrees
-SAFE_SMOOTH_START_JOINTS = [42.697, -89.381, 144.831, -0.436, 31.528, 180.0]
-
-
-def initialize_hardware_position(client, human_prompt) -> list[float] | None:
- """
- Moves the robot to the predefined safe starting joint angles.
-
- Args:
- client: RobotClient fixture from conftest.py
- human_prompt: Fixture for human confirmation
-
- Returns:
- Robot's Cartesian pose after moving, or None if failed
- """
-
- print(f"Moving to safe starting position: {SAFE_SMOOTH_START_JOINTS}")
-
- # Move to the joint position
- result = client.move_joints(
- SAFE_SMOOTH_START_JOINTS, duration=4, wait_for_ack=True, timeout=15
- )
- print(f"Move command result: {result}")
-
- # Wait until robot stops
- if client.wait_until_stopped(timeout=20):
- print("Robot has reached the starting position.")
- time.sleep(1)
- start_pose = client.get_pose_rpy() # Get [x,y,z,rx,ry,rz] format
- if start_pose:
- print(f"Starting pose: {[round(p, 2) for p in start_pose]}")
- return start_pose
- else:
- print("ERROR: Could not retrieve robot pose after moving.")
- return None
- else:
- print("ERROR: Robot did not stop in time.")
- return None
-
-
-@pytest.mark.hardware
-@pytest.mark.slow
-class TestHardwareBasicMoves:
- """Test basic robot movements with hardware."""
-
- def test_hardware_homing(self, client, human_prompt):
- """Test robot homing sequence."""
- if not human_prompt(
- "Ready to test robot homing?\n"
- "This will home all joints to their reference positions.\n"
- "Ensure robot workspace is completely clear."
- ):
- pytest.skip("User declined homing test")
-
- # Check E-stop status first
- if client.is_estop_pressed():
- pytest.fail("E-stop is pressed. Release E-stop before testing.")
-
- print("Starting homing sequence...")
- result = client.home(wait_for_ack=True, timeout=60)
-
- assert isinstance(result, dict)
- assert result.get("status") in ["COMPLETED", "QUEUED", "EXECUTING"]
-
- # Wait for homing to complete - use client's built-in wait
- if client.wait_until_stopped(timeout=95, show_progress=True):
- print("Homing completed successfully")
- else:
- pytest.fail("Robot homing did not complete within timeout")
-
- def test_small_joint_movements(self, client, human_prompt):
- """Test small joint movements for safety verification."""
- start_pose = initialize_hardware_position(client, human_prompt)
- if not start_pose:
- pytest.skip("Failed to reach starting position")
-
- if not human_prompt(
- "Ready to test small joint movements?\n"
- "Robot will move each joint individually by small amounts.\n"
- "Watch for smooth, controlled motion."
- ):
- pytest.skip("User declined joint movement test")
-
- # Test small movements on each joint
- for joint_idx in range(6):
- print(f"Testing joint {joint_idx + 1} movement...")
-
- # Small positive movement
- result = client.jog_joint(
- joint_idx, speed_percentage=20, duration=1.0, wait_for_ack=True
- )
-
- assert isinstance(result, dict)
- assert result.get("status") in ["COMPLETED", "QUEUED", "EXECUTING"]
-
- client.wait_until_stopped(timeout=10)
-
- # Small negative movement (return) - use joint_idx+6 for reverse direction
- result = client.jog_joint(
- joint_idx + 6, # +6 indicates reverse direction
- speed_percentage=20,
- duration=1.0,
- wait_for_ack=True,
- )
-
- client.wait_until_stopped(timeout=10)
-
- print("All joint movements completed successfully")
-
- def test_small_cartesian_movements(self, client, human_prompt):
- """Test small Cartesian movements in different axes."""
- start_pose = initialize_hardware_position(client, human_prompt)
- if not start_pose:
- pytest.skip("Failed to reach starting position")
-
- if not human_prompt(
- "Ready to test small Cartesian movements?\n"
- "Robot will move end-effector in X, Y, Z directions.\n"
- "Movements will be small (10mm) and slow (20% speed)."
- ):
- pytest.skip("User declined Cartesian movement test")
-
- # Test movements in each axis
- axes = ["X+", "X-", "Y+", "Y-", "Z+", "Z-"]
-
- for axis in axes:
- print(f"Testing Cartesian jog in {axis} direction...")
-
- result = client.jog_cartesian(
- frame="WRF",
- axis=axis,
- speed_percentage=20,
- duration=1.0,
- wait_for_ack=True,
- )
-
- assert isinstance(result, dict)
- assert result.get("status") in ["COMPLETED", "QUEUED", "EXECUTING"]
-
- time.sleep(2.0)
- client.wait_until_stopped(timeout=10)
-
- print("All Cartesian movements completed successfully")
-
-
-@pytest.mark.hardware
-@pytest.mark.slow
-class TestHardwareSmoothMotion:
- """Test smooth motion commands with actual hardware."""
-
- def test_hardware_smooth_circle(self, client, human_prompt):
- """Test smooth circular motion on hardware."""
- start_pose = initialize_hardware_position(client, human_prompt)
- if not start_pose:
- pytest.skip("Failed to reach starting position")
-
- if not human_prompt(
- "Ready to test smooth circular motion?\n"
- "Robot will execute a 30mm radius circle in XY plane.\n"
- "Motion will be slow and controlled (5 second duration).\n"
- "Watch for smooth, continuous motion without stops."
- ):
- pytest.skip("User declined circle test")
-
- # Execute smooth circle
- center_point = [start_pose[0], start_pose[1] + 30, start_pose[2]]
-
- print(f"Executing circle: center={center_point}, radius=30mm")
- result = client.smooth_circle(
- center=center_point,
- radius=30.0,
- plane="XY",
- duration=5.0,
- clockwise=False,
- wait_for_ack=True,
- timeout=15,
- )
-
- assert isinstance(result, dict)
- assert result.get("status") in ["COMPLETED", "QUEUED", "EXECUTING"]
-
- # Wait for completion
- if client.wait_until_stopped(timeout=20):
- print("Circle motion completed successfully")
-
- if not human_prompt(
- "Did the robot execute a smooth circular motion?\n"
- "Motion should have been continuous without stops or jerks."
- ):
- pytest.fail("User reported motion was not smooth")
- else:
- pytest.fail("Robot did not stop after circle motion timeout")
-
- def test_hardware_smooth_arc(self, client, human_prompt):
- """Test smooth arc motion on hardware."""
- start_pose = initialize_hardware_position(client, human_prompt)
- if not start_pose:
- pytest.skip("Failed to reach starting position")
-
- if not human_prompt(
- "Ready to test smooth arc motion?\n"
- "Robot will execute an arc from current position to a new pose.\n"
- "Motion will be controlled and smooth."
- ):
- pytest.skip("User declined arc test")
-
- # Define arc motion
- end_pose = [
- start_pose[0] + 40,
- start_pose[1] + 20,
- start_pose[2],
- start_pose[3],
- start_pose[4],
- start_pose[5] + 45,
- ]
- center = [start_pose[0] + 20, start_pose[1], start_pose[2]]
-
- print(f"Executing arc: end={end_pose[:3]}, center={center}")
- result = client.smooth_arc_center(
- end_pose=end_pose,
- center=center,
- duration=4.0,
- clockwise=True,
- wait_for_ack=True,
- timeout=12,
- )
-
- assert isinstance(result, dict)
- assert result.get("status") in ["COMPLETED", "QUEUED", "EXECUTING"]
-
- if client.wait_until_stopped(timeout=17):
- print("Arc motion completed successfully")
-
- if not human_prompt(
- "Did the robot execute a smooth arc motion?\n"
- "Path should have been curved, not straight lines."
- ):
- pytest.fail("User reported arc motion was not smooth")
- else:
- pytest.fail("Robot did not stop after arc motion timeout")
-
- def test_hardware_smooth_spline(self, client, human_prompt):
- """Test smooth spline motion through multiple waypoints."""
- start_pose = initialize_hardware_position(client, human_prompt)
- if not start_pose:
- pytest.skip("Failed to reach starting position")
-
- if not human_prompt(
- "Ready to test smooth spline motion?\n"
- "Robot will move through multiple waypoints with smooth transitions.\n"
- "Motion should be continuous without stops at waypoints."
- ):
- pytest.skip("User declined spline test")
-
- # Define spline waypoints
- waypoints = [
- [
- start_pose[0] + 20,
- start_pose[1] + 10,
- start_pose[2],
- start_pose[3],
- start_pose[4],
- start_pose[5],
- ],
- [
- start_pose[0] + 35,
- start_pose[1] + 25,
- start_pose[2] + 10,
- start_pose[3],
- start_pose[4],
- start_pose[5] + 20,
- ],
- [
- start_pose[0] + 20,
- start_pose[1] + 35,
- start_pose[2],
- start_pose[3],
- start_pose[4],
- start_pose[5] + 40,
- ],
- [
- start_pose[0] + 5,
- start_pose[1] + 20,
- start_pose[2],
- start_pose[3],
- start_pose[4],
- start_pose[5],
- ],
- ]
-
- print(f"Executing spline through {len(waypoints)} waypoints")
- result = client.smooth_spline(
- waypoints=waypoints,
- duration=6.0,
- frame="WRF",
- wait_for_ack=True,
- timeout=20,
- )
-
- assert isinstance(result, dict)
- assert result.get("status") in ["COMPLETED", "QUEUED", "EXECUTING"]
-
- if client.wait_until_stopped(timeout=25):
- print("Spline motion completed successfully")
-
- if not human_prompt(
- "Did the robot move smoothly through all waypoints?\n"
- "Motion should have been continuous without stops at intermediate points."
- ):
- pytest.fail("User reported spline motion was not smooth")
- else:
- pytest.fail("Robot did not stop after spline motion timeout")
-
-
-@pytest.mark.hardware
-@pytest.mark.slow
-class TestHardwareAdvancedSmooth:
- """Test advanced smooth motion features with hardware."""
-
- def test_hardware_helix_motion(self, client, human_prompt):
- """Test helical motion on hardware."""
- start_pose = initialize_hardware_position(client, human_prompt)
- if not start_pose:
- pytest.skip("Failed to reach starting position")
-
- if not human_prompt(
- "Ready to test helical motion?\n"
- "Robot will execute a helical (screw-like) motion.\n"
- "Motion combines circular movement with vertical progression.\n"
- "Radius: 25mm, Height: 40mm, 3 revolutions."
- ):
- pytest.skip("User declined helix test")
-
- # Execute helix motion
- center = [start_pose[0], start_pose[1] + 25, start_pose[2] - 20]
-
- print(f"Executing helix: center={center}, radius=25mm, height=40mm")
- result = client.smooth_helix(
- center=center,
- radius=25.0,
- pitch=13.0, # 40mm / ~3 revolutions
- height=40.0,
- duration=8.0,
- clockwise=False,
- wait_for_ack=True,
- timeout=20,
- )
-
- assert isinstance(result, dict)
- assert result.get("status") in ["COMPLETED", "QUEUED", "EXECUTING"]
-
- if client.wait_until_stopped(timeout=25):
- print("Helix motion completed successfully")
-
- if not human_prompt("Did the robot execute a smooth helical motion?\n"):
- pytest.fail("User reported helix motion was incorrect")
- else:
- pytest.fail("Robot did not stop after helix motion timeout")
-
- def test_hardware_reference_frame_comparison(self, client, human_prompt):
- """Test motion in different reference frames (WRF vs TRF)."""
- start_pose = initialize_hardware_position(client, human_prompt)
- if not start_pose:
- pytest.skip("Failed to reach starting position")
-
- if not human_prompt(
- "Ready to test reference frame comparison?\n"
- "Robot will execute similar motions in World frame (WRF) and Tool frame (TRF).\n"
- "You should observe different motion patterns."
- ):
- pytest.skip("User declined reference frame test")
-
- # Test 1: Circle in World Reference Frame
- print("Executing circle in World Reference Frame (WRF)...")
- result_wrf = client.smooth_circle(
- center=[start_pose[0], start_pose[1] + 30, start_pose[2]],
- radius=20,
- duration=4.0,
- frame="WRF",
- plane="XY",
- wait_for_ack=True,
- timeout=12,
- )
-
- assert isinstance(result_wrf, dict)
- assert result_wrf.get("status") in ["COMPLETED", "QUEUED", "EXECUTING"]
-
- client.wait_until_stopped(timeout=17)
- time.sleep(2)
-
- # Test 2: Circle in Tool Reference Frame
- print("Executing circle in Tool Reference Frame (TRF)...")
- result_trf = client.smooth_circle(
- center=[0, 30, 0], # Relative to tool position
- radius=20,
- duration=4.0,
- frame="TRF",
- plane="XY", # Tool's XY plane
- wait_for_ack=True,
- timeout=12,
- )
-
- assert isinstance(result_trf, dict)
- assert result_trf.get("status") in ["COMPLETED", "QUEUED", "EXECUTING"]
-
- client.wait_until_stopped(timeout=17)
-
- if not human_prompt(
- "Did you observe different motion patterns?\n"
- "WRF motion should follow world coordinates.\n"
- "TRF motion should follow tool orientation."
- ):
- pytest.fail("User reported motion patterns were not as expected")
-
-
-@pytest.mark.hardware
-@pytest.mark.slow
-class TestHardwareSafety:
- """Test hardware safety features and error conditions."""
-
- def test_joint_limit_safety(self, client, human_prompt):
- """Test joint limit safety (if supported by controller)."""
- if not human_prompt(
- "Ready to test joint limit safety?\n"
- "This will attempt to move a joint near its limit to test safety systems.\n"
- "Controller should prevent unsafe movements.\n"
- "This test is informational only."
- ):
- pytest.skip("User declined joint limit test")
-
- # Try to move to a potentially extreme position (should be rejected or limited)
- extreme_joints = [
- 180.0,
- -180.0,
- 180.0,
- -180.0,
- 180.0,
- -180.0,
- ] # Extreme angles as floats
-
- print("Testing extreme joint angles (should be rejected or limited)...")
- result = client.move_joints(
- extreme_joints,
- speed_percentage=5, # Very slow for safety
- wait_for_ack=True,
- timeout=10,
- )
-
- print(f"Result of extreme joint command: {result}")
-
- # This test is informational - we just want to see how the system responds
- time.sleep(5.0)
-
- # Return to safe position
- initialize_hardware_position(client, human_prompt)
-
-
-@pytest.mark.hardware
-@pytest.mark.slow
-class TestHardwareLegacySequence:
- """Test the exact sequence from the legacy test_script.py for verified safe operation."""
-
- def test_legacy_script_safe_sequence(self, client, human_prompt):
- """
- Reproduce the exact sequence from test_script.py with verified safe waypoints.
-
- This test uses the exact same joint angles, poses, and parameters that were
- manually verified to be safe in the original test script.
- """
- if not human_prompt(
- "Ready to execute the legacy safe test sequence?\n"
- "This will reproduce the exact movements from test_script.py:\n"
- "- Electric gripper calibration and moves (pos 100, then 200)\n"
- "- Pneumatic gripper open/close sequence\n"
- "- Joint moves: [90,-90,160,12,12,180] -> [50,-60,180,-12,32,0] -> back\n"
- "- Pose move: [7,250,200,-100,0,-90]\n"
- "- Cartesian move: [7,250,150,-100,0,-90]\n"
- "These waypoints were verified safe in the original script."
- ):
- pytest.skip("User declined legacy sequence test")
-
- # Check E-stop status first
- if client.is_estop_pressed():
- pytest.fail("E-stop is pressed. Release E-stop before testing.")
-
- print("Starting legacy test sequence with verified safe waypoints...")
-
- # Electric gripper calibration and moves
- print("Calibrating electric gripper...")
- result = client.control_electric_gripper(
- action="calibrate", wait_for_ack=True, timeout=10
- )
- if isinstance(result, dict):
- assert result.get("status") in ["COMPLETED", "QUEUED", "EXECUTING"]
- time.sleep(2)
-
- print("Moving electric gripper to position 100...")
- result = client.control_electric_gripper(
- action="move",
- position=100,
- speed=150,
- current=200,
- wait_for_ack=True,
- timeout=10,
- )
- if isinstance(result, dict):
- assert result.get("status") in ["COMPLETED", "QUEUED", "EXECUTING"]
- time.sleep(2)
-
- print("Moving electric gripper to position 200...")
- result = client.control_electric_gripper(
- action="move",
- position=200,
- speed=150,
- current=200,
- wait_for_ack=True,
- timeout=10,
- )
- if isinstance(result, dict):
- assert result.get("status") in ["COMPLETED", "QUEUED", "EXECUTING"]
- time.sleep(2)
-
- # Get and verify initial status
- print("Getting robot joint angles and pose...")
- angles = client.get_angles()
- pose = client.get_pose_rpy()
- assert angles is not None
- assert pose is not None
- print(f"Initial angles: {angles}")
- print(f"Initial pose: {pose}")
-
- # Pneumatic gripper sequence (exact timing from test_script.py)
- print("Testing pneumatic gripper sequence...")
- client.control_pneumatic_gripper("open", 1)
- time.sleep(0.3)
- client.control_pneumatic_gripper("close", 1)
- time.sleep(0.3)
- client.control_pneumatic_gripper("open", 1)
- time.sleep(0.3)
- client.control_pneumatic_gripper("close", 1)
- time.sleep(0.3)
-
- # Joint movement sequence (exact waypoints and timing from test_script.py)
- print("Moving to first joint position: [90, -90, 160, 12, 12, 180]...")
- result = client.move_joints(
- [90, -90, 160, 12, 12, 180], duration=5.5, wait_for_ack=True, timeout=15
- )
- if isinstance(result, dict):
- assert result.get("status") in ["COMPLETED", "QUEUED", "EXECUTING"]
- time.sleep(6)
-
- print("Moving to second joint position: [50, -60, 180, -12, 32, 0]...")
- result = client.move_joints(
- [50, -60, 180, -12, 32, 0], duration=5.5, wait_for_ack=True, timeout=15
- )
- if isinstance(result, dict):
- assert result.get("status") in ["COMPLETED", "QUEUED", "EXECUTING"]
- time.sleep(6)
-
- print("Moving back to first joint position: [90, -90, 160, 12, 12, 180]...")
- result = client.move_joints(
- [90, -90, 160, 12, 12, 180], duration=5.5, wait_for_ack=True, timeout=15
- )
- if isinstance(result, dict):
- assert result.get("status") in ["COMPLETED", "QUEUED", "EXECUTING"]
- time.sleep(6)
-
- # Pose movement (exact waypoint from test_script.py)
- print("Moving to pose: [7, 250, 200, -100, 0, -90]...")
- result = client.move_pose(
- [7, 250, 200, -100, 0, -90], duration=5.5, wait_for_ack=True, timeout=15
- )
- if isinstance(result, dict):
- assert result.get("status") in ["COMPLETED", "QUEUED", "EXECUTING"]
- time.sleep(6)
-
- # Cartesian movement (exact waypoint from test_script.py)
- print("Moving cartesian to: [7, 250, 150, -100, 0, -90]...")
- result = client.move_cartesian(
- [7, 250, 150, -100, 0, -90],
- speed_percentage=50,
- wait_for_ack=True,
- timeout=15,
- )
- if isinstance(result, dict):
- assert result.get("status") in ["COMPLETED", "QUEUED", "EXECUTING"]
-
- # Final status checks (exact from test_script.py)
- print("Getting final gripper and IO status...")
- gripper_status = client.get_gripper_status()
- io_status = client.get_io()
-
- assert gripper_status is not None
- assert io_status is not None
-
- print(f"Final gripper status: {gripper_status}")
- print(f"Final IO status: {io_status}")
-
- if not human_prompt(
- "Legacy test sequence completed successfully.\n"
- "Did all movements execute safely and as expected?\n"
- "This sequence replicates the verified safe waypoints from the original test_script.py."
- ):
- pytest.fail("User reported legacy sequence did not execute correctly")
-
- print("Legacy safe sequence test completed successfully")
-
-
-@pytest.mark.hardware
-@pytest.mark.slow
-class TestHardwareGripper:
- """Test gripper functionality with hardware."""
-
- def test_pneumatic_gripper(self, client, human_prompt):
- """Test pneumatic gripper operation."""
- if not human_prompt(
- "Ready to test pneumatic gripper?\n"
- "Ensure gripper is connected and air pressure is available.\n"
- "Gripper will open and close on port 1."
- ):
- pytest.skip("User declined gripper test")
-
- # Test gripper open
- print("Opening pneumatic gripper...")
- result = client.control_pneumatic_gripper(
- "open", 1, wait_for_ack=True, timeout=5
- )
-
- assert isinstance(result, dict)
- assert result.get("status") in ["COMPLETED", "QUEUED", "EXECUTING"]
-
- time.sleep(2.0)
-
- if not human_prompt("Did the gripper open? Confirm before continuing."):
- pytest.fail("User reported gripper did not open")
-
- # Test gripper close
- print("Closing pneumatic gripper...")
- result = client.control_pneumatic_gripper(
- "close", 1, wait_for_ack=True, timeout=5
- )
-
- assert isinstance(result, dict)
- assert result.get("status") in ["COMPLETED", "QUEUED", "EXECUTING"]
-
- time.sleep(2.0)
-
- if not human_prompt("Did the gripper close? Confirm operation."):
- pytest.fail("User reported gripper did not close")
-
- print("Pneumatic gripper test completed successfully")
-
- def test_electric_gripper(self, client, human_prompt):
- """Test electric gripper operation including calibration."""
- if not human_prompt(
- "Ready to test electric gripper?\n"
- "Ensure electric gripper is connected and powered.\n"
- "Gripper will calibrate, then move to different positions."
- ):
- pytest.skip("User declined electric gripper test")
-
- # Get current gripper status
- gripper_status = client.get_gripper_status()
- if gripper_status:
- print(f"Initial gripper status: {gripper_status}")
-
- # Test gripper calibration (from legacy test_script.py)
- print("Calibrating electric gripper...")
- result = client.control_electric_gripper(
- action="calibrate", wait_for_ack=True, timeout=10
- )
-
- assert isinstance(result, dict)
- assert result.get("status") in ["COMPLETED", "QUEUED", "EXECUTING"]
-
- time.sleep(2.0)
-
- # Test gripper movement
- print("Moving electric gripper to position 100...")
- result = client.control_electric_gripper(
- "move", position=100, speed=100, current=400, wait_for_ack=True, timeout=10
- )
-
- assert isinstance(result, dict)
- assert result.get("status") in ["COMPLETED", "QUEUED", "EXECUTING"]
-
- time.sleep(3.0)
-
- # Check new position
- new_status = client.get_gripper_status()
- if new_status:
- print(f"Gripper status after move: {new_status}")
-
- if not human_prompt(
- "Did the electric gripper move to the commanded position?\n"
- "Check gripper position and movement quality."
- ):
- pytest.fail("User reported electric gripper did not move correctly")
-
- print("Electric gripper test completed successfully")
-
-
-if __name__ == "__main__":
- pytest.main([__file__])
diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py
new file mode 100644
index 0000000..ef9003c
--- /dev/null
+++ b/tests/integration/conftest.py
@@ -0,0 +1,20 @@
+"""Integration test fixtures."""
+
+import pytest
+
+
+@pytest.fixture(autouse=True)
+def clean_state(server_proc, client):
+ """
+ Reset controller state before each integration test for isolation.
+
+ Uses RESET command to instantly reset positions, queues, tool, errors.
+ Sets LINEAR motion profile for faster test execution.
+ Depends on server_proc to ensure server is ready before resetting.
+ """
+ client.reset()
+ client.select_profile("LINEAR")
+ idx = client.home()
+ assert idx >= 0, "Home command failed to send"
+ assert client.wait_command(idx, timeout=5.0), "Home did not complete"
+ return client
diff --git a/tests/integration/test_ack_and_nonblocking.py b/tests/integration/test_ack_and_nonblocking.py
deleted file mode 100644
index 711477d..0000000
--- a/tests/integration/test_ack_and_nonblocking.py
+++ /dev/null
@@ -1,114 +0,0 @@
-"""
-Integration tests for acknowledgment and non-blocking behavior with parol6.
-
-Tests wait_for_ack functionality, non-blocking command flows, status polling,
-timeout handling, and command tracking with live server communication.
-"""
-
-import os
-import sys
-
-import pytest
-
-# Add the parent directory to Python path
-sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
-
-
-@pytest.mark.integration
-class TestAcknowledmentTracking:
- """Test command acknowledgment tracking functionality."""
-
- def test_tracked_command_basic_flow(self, server_proc, client):
- """Test basic acknowledgment flow with tracked commands."""
- # Send a home command (fire-and-forget)
- result = client.home()
- # Should return True on successful send (or OK if FORCE_ACK/system)
- assert result is True
-
- def test_non_blocking_command_returns_id(self, server_proc, client):
- """Test that non-blocking commands return command ID immediately."""
- # Send command
- result = client.move_joints(
- [0, 0, 0, 0, 0, 0],
- duration=1.0,
- )
- # Should return True on successful send
- assert result is True
-
- def test_multiple_tracked_commands(self, server_proc, client):
- """Test tracking multiple commands simultaneously."""
- # Send several commands
- results = []
- for i in range(3):
- result = client.move_joints(
- [i, i, i, i, i, i],
- duration=0.2,
- )
- results.append(result)
- # Each should be True
- assert all(r is True for r in results)
- # Wait for motion to complete instead of sleeping
- assert client.wait_until_stopped(timeout=8.0)
-
- def test_command_status_polling(self, server_proc, client):
- """Test polling command status during execution."""
- # Send a longer running command with valid joint targets (fire-and-forget)
- result = client.move_joints(
- [0, -45, 180, 15, 20, 25], # Valid within limits for sim
- duration=1.0,
- )
- assert result is True
- # Verify server remains responsive without fixed sleep
- assert client.ping() is not None
-
-
-@pytest.mark.integration
-class TestErrorConditions:
- """Test error conditions with acknowledgment tracking."""
-
- def test_invalid_command_with_tracking(self, server_proc, client):
- """Test that invalid commands return proper error acknowledgments."""
- # Try to send invalid command; client enforces timing requirement
- with pytest.raises(RuntimeError):
- _ = client.move_joints([0, 0, 0, 0, 0, 0]) # Missing timing parameters
-
- def test_malformed_parameters_with_tracking(self, server_proc, client):
- """Test acknowledgment for commands with malformed parameters."""
- # Test with out-of-range speed percentage; client sends fire-and-forget
- result = client.move_cartesian(
- pose=[100, 100, 100, 0, 0, 0],
- speed_percentage=200,
- )
- assert result is True
-
-
-@pytest.mark.integration
-class TestBasicCommands:
- """Test basic commands work with the server."""
-
- def test_ping(self, server_proc, client):
- """Test ping functionality."""
- assert client.ping() is not None
-
- def test_get_angles(self, server_proc, client):
- """Test angle retrieval."""
- angles = client.get_angles()
- assert isinstance(angles, list)
- assert len(angles) == 6
-
- def test_get_io(self, server_proc, client):
- """Test IO status retrieval."""
- io_status = client.get_io()
- assert isinstance(io_status, list)
- assert len(io_status) >= 5
-
- def test_stop_command(self, server_proc, client):
- """Test stop command."""
- result = client.stop()
- assert result is True
- # Re-enable controller to avoid leaking disabled state to subsequent tests
- assert client.enable() is True
-
-
-if __name__ == "__main__":
- pytest.main([__file__])
diff --git a/tests/integration/test_blend_lookahead.py b/tests/integration/test_blend_lookahead.py
new file mode 100644
index 0000000..b9c0d9d
--- /dev/null
+++ b/tests/integration/test_blend_lookahead.py
@@ -0,0 +1,212 @@
+"""Integration tests for N-command blend lookahead.
+
+Tests the full pipeline: client → UDP → controller → planner subprocess →
+composite trajectory → segment player → mock serial → final position.
+"""
+
+import pytest
+
+
+@pytest.mark.integration
+class TestJointBlendLookahead:
+ """Joint-space blending with N-command lookahead."""
+
+ def test_three_move_j_blended_reaches_final_target(self, client, server_proc):
+ """Three move_j with blend zones should reach the last target."""
+ targets = [
+ [80, -80, 170, 5, 5, 170],
+ [70, -70, 160, 10, 10, 160],
+ [60, -60, 150, 15, 15, 150],
+ ]
+
+ # r>0 on intermediate commands creates blend zones; r=0 on the last
+ # command terminates the chain and triggers immediate planner flush.
+ assert client.move_j(targets[0], speed=0.5, r=30.0, wait=False) >= 0
+ assert client.move_j(targets[1], speed=0.5, r=30.0, wait=False) >= 0
+ assert client.move_j(targets[2], speed=0.5, r=0.0, wait=True, timeout=15.0) >= 0
+
+ # Verify final position matches last target
+ angles = client.angles()
+ assert angles is not None
+ for i, (actual, expected) in enumerate(zip(angles, targets[-1])):
+ assert abs(actual - expected) < 1.0, (
+ f"J{i}: expected {expected}, got {actual}"
+ )
+
+ def test_move_j_r0_stops_blend_chain(self, client, server_proc):
+ """A move_j with r=0 in the middle should stop the blend chain."""
+ targets = [
+ ([80, -80, 170, 5, 5, 170], 30.0), # blendable
+ ([70, -70, 160, 10, 10, 160], 0.0), # r=0 → hard stop
+ ([60, -60, 150, 15, 15, 150], 30.0), # separate motion
+ ]
+
+ for t, r in targets:
+ assert client.move_j(t, speed=0.5, r=r, wait=False) >= 0
+
+ assert client.wait_motion(timeout=15.0)
+
+ angles = client.angles()
+ assert angles is not None
+ for i, (actual, expected) in enumerate(zip(angles, targets[-1][0])):
+ assert abs(actual - expected) < 1.0, (
+ f"J{i}: expected {expected}, got {actual}"
+ )
+
+ def test_two_move_j_blended(self, client, server_proc):
+ """Two move_j with r>0 should blend (minimum blend chain)."""
+ t1 = [80, -80, 170, 5, 5, 170]
+ t2 = [70, -70, 160, 10, 10, 160]
+
+ assert client.move_j(t1, speed=0.5, r=20.0, wait=False) >= 0
+ assert client.move_j(t2, speed=0.5, r=0.0, wait=False) >= 0
+
+ assert client.wait_motion(timeout=15.0)
+
+ angles = client.angles()
+ assert angles is not None
+ for i, (actual, expected) in enumerate(zip(angles, t2)):
+ assert abs(actual - expected) < 1.0, (
+ f"J{i}: expected {expected}, got {actual}"
+ )
+
+
+@pytest.mark.integration
+class TestCartesianBlendLookahead:
+ """Cartesian (move_l) blending with N-command lookahead."""
+
+ def test_three_move_l_blended_reaches_final_target(self, client, server_proc):
+ """Three move_l with blend zones should reach the last target."""
+ start = client.pose()
+ assert start is not None
+
+ # Small offsets from current pose (guaranteed reachable)
+ targets = [
+ [start[0], start[1] + 15, start[2], start[3], start[4], start[5]],
+ [start[0], start[1] + 30, start[2], start[3], start[4], start[5]],
+ [start[0], start[1] + 45, start[2], start[3], start[4], start[5]],
+ ]
+
+ # r>0 on intermediate commands creates blend zones; r=0 on the last
+ # command terminates the blend chain and triggers immediate flush.
+ assert client.move_l(targets[0], speed=0.5, r=20.0, wait=False) >= 0
+ assert client.move_l(targets[1], speed=0.5, r=20.0, wait=False) >= 0
+ assert client.move_l(targets[2], speed=0.5, r=0.0, wait=True, timeout=15.0) >= 0
+
+ final = client.pose()
+ assert final is not None
+ for i in range(3):
+ assert abs(final[i] - targets[-1][i]) < 2.0, (
+ f"Axis {i}: expected {targets[-1][i]:.1f}, got {final[i]:.1f}"
+ )
+
+ def test_square_with_rounded_corners(self, client, server_proc):
+ """Trace a 20mm square in YZ plane with r=5 rounded corners.
+
+ Path: home → right → down → left → up → right (closed loop + return).
+ Verifies position accuracy and orientation preservation through
+ 4 blended 90-degree direction changes.
+ """
+ start = client.pose()
+ assert start is not None
+
+ side = 20.0
+ r = 5.0
+
+ # Build absolute waypoints for the square (Y=right, Z=up)
+ def offset(dy: float, dz: float) -> list[float]:
+ return [
+ start[0],
+ start[1] + dy,
+ start[2] + dz,
+ start[3],
+ start[4],
+ start[5],
+ ]
+
+ right = offset(side, 0)
+ down_right = offset(side, -side)
+ down_left = offset(0, -side)
+ back_home = offset(0, 0)
+ back_right = offset(side, 0) # same as right — closes the loop
+
+ # 5 move_l commands: 4 corners blended (r=5), last terminates chain (r=0)
+ assert client.move_l(right, speed=0.3, r=r, wait=False) >= 0
+ assert client.move_l(down_right, speed=0.3, r=r, wait=False) >= 0
+ assert client.move_l(down_left, speed=0.3, r=r, wait=False) >= 0
+ assert client.move_l(back_home, speed=0.3, r=r, wait=False) >= 0
+ assert client.move_l(back_right, speed=0.3, r=0.0, wait=False) >= 0
+
+ assert client.wait_motion(timeout=20.0)
+
+ final = client.pose()
+ assert final is not None
+
+ # Position: should match back_right within 2mm
+ for i in range(3):
+ assert abs(final[i] - back_right[i]) < 2.0, (
+ f"Axis {i}: expected {back_right[i]:.1f}, got {final[i]:.1f}"
+ )
+
+ # Orientation: should be unchanged (pure translation moves)
+ for i in range(3, 6):
+ diff = abs((final[i] - start[i] + 180) % 360 - 180)
+ assert diff < 1.0, (
+ f"Orientation axis {i - 3}: drifted {diff:.2f}° (start={start[i]:.1f}, end={final[i]:.1f})"
+ )
+
+ def test_move_l_r0_stops_blend_chain(self, client, server_proc):
+ """A move_l with r=0 in the middle should stop the blend chain."""
+ start = client.pose()
+ assert start is not None
+
+ targets = [
+ ([start[0], start[1] + 15, start[2], start[3], start[4], start[5]], 20.0),
+ ([start[0], start[1] + 30, start[2], start[3], start[4], start[5]], 0.0),
+ ([start[0], start[1] + 45, start[2], start[3], start[4], start[5]], 20.0),
+ ]
+
+ for t, r in targets:
+ assert client.move_l(t, speed=0.5, r=r, wait=False) >= 0
+
+ assert client.wait_motion(timeout=15.0)
+
+ final = client.pose()
+ assert final is not None
+ for i in range(3):
+ assert abs(final[i] - targets[-1][0][i]) < 2.0
+
+
+@pytest.mark.integration
+class TestMixedTypeBlendTermination:
+ """Blend chain should stop at type boundaries."""
+
+ def test_move_j_then_move_l_executes_separately(self, client, server_proc):
+ """move_j(r>0) followed by move_l should not blend across types."""
+ # Small joint move with blend radius
+ assert (
+ client.move_j(
+ [85, -85, 175, 2, 2, 175],
+ speed=0.5,
+ r=20.0,
+ wait=False,
+ )
+ >= 0
+ )
+
+ # Wait for joint move, then get the pose for a reachable Cartesian target
+ assert client.wait_motion(timeout=10.0)
+ mid_pose = client.pose()
+ assert mid_pose is not None
+
+ # Small Cartesian offset from current position
+ final_target = list(mid_pose)
+ final_target[1] += 5 # 5mm Y offset — very small, always reachable
+ assert client.move_l(final_target, speed=0.5, r=0.0) >= 0
+
+ final = client.pose()
+ assert final is not None
+ for i in range(3):
+ assert abs(final[i] - final_target[i]) < 2.0, (
+ f"Axis {i}: expected {final_target[i]:.1f}, got {final[i]:.1f}"
+ )
diff --git a/tests/integration/test_curved_commands_e2e.py b/tests/integration/test_curved_commands_e2e.py
new file mode 100644
index 0000000..0604906
--- /dev/null
+++ b/tests/integration/test_curved_commands_e2e.py
@@ -0,0 +1,177 @@
+"""
+Integration tests for curved motion commands (move_c, move_s, move_p).
+
+Tests command acceptance, completion, and frame handling in FAKE_SERIAL mode.
+"""
+
+import numpy as np
+import pytest
+
+from parol6.motion.geometry import compute_circle_from_3_points
+
+
+@pytest.mark.integration
+class TestCurvedMotionCommands:
+ """Integration tests for move_c, move_s, move_p."""
+
+ @pytest.fixture
+ def homed_robot(self, client, server_proc, robot_api_env):
+ """Ensure robot is homed before tests."""
+ client.select_profile("TOPPRA")
+ assert client.home() >= 0
+ assert client.wait_motion(timeout=15.0)
+
+ @pytest.fixture
+ def home_pose(self, client, homed_robot) -> list[float]:
+ """Return the TCP pose at home [x, y, z, rx, ry, rz] (mm, degrees)."""
+ pose = client.pose()
+ assert pose is not None and len(pose) == 6
+ return pose
+
+ def _offset(self, pose: list[float], dx=0, dy=0, dz=0, drz=0) -> list[float]:
+ """Small position offset from a base pose, preserving orientation."""
+ return [
+ pose[0] + dx,
+ pose[1] + dy,
+ pose[2] + dz,
+ pose[3],
+ pose[4],
+ pose[5] + drz,
+ ]
+
+ def test_move_c_basic(self, client, server_proc, robot_api_env, home_pose):
+ """Test circular arc through current -> via -> end."""
+ result = client.move_c(
+ via=self._offset(home_pose, dx=10, dy=10, dz=-5),
+ end=self._offset(home_pose, dx=20),
+ duration=2.0,
+ frame="WRF",
+ )
+ assert result >= 0
+ assert client.wait_motion(timeout=9.0)
+ assert client.is_robot_stopped(threshold_speed=5.0)
+
+ def test_move_c_with_orientation(
+ self, client, server_proc, robot_api_env, home_pose
+ ):
+ """Test arc with small orientation change."""
+ result = client.move_c(
+ via=self._offset(home_pose, dx=10, dy=10, dz=-5, drz=10),
+ end=self._offset(home_pose, dx=20, drz=20),
+ duration=2.0,
+ frame="WRF",
+ )
+ assert result >= 0
+ assert client.wait_motion(timeout=15.0)
+ assert client.is_robot_stopped(threshold_speed=5.0)
+
+ def test_move_c_trf_accepted(self, client, server_proc, robot_api_env, homed_robot):
+ """Test that move_c with frame=TRF is accepted and completes."""
+ result = client.move_c(
+ via=[10, 5, 0, 0, 0, 0],
+ end=[20, 0, 0, 0, 0, 0],
+ duration=2.0,
+ frame="TRF",
+ )
+ assert result >= 0
+ assert client.wait_motion(timeout=15.0)
+ assert client.is_robot_stopped(threshold_speed=5.0)
+
+ def test_move_s_basic(self, client, server_proc, robot_api_env, home_pose):
+ """Test spline motion through waypoints."""
+ waypoints = [
+ self._offset(home_pose),
+ self._offset(home_pose, dx=20, dy=8, dz=5),
+ self._offset(home_pose),
+ ]
+ result = client.move_s(waypoints=waypoints, duration=3.0, frame="WRF")
+ assert result >= 0
+ assert client.wait_motion(timeout=15.0)
+ assert client.is_robot_stopped(threshold_speed=5.0)
+
+ def test_move_s_trf_accepted(self, client, server_proc, robot_api_env, homed_robot):
+ """Test that move_s with frame=TRF is accepted and completes."""
+ waypoints = [
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ [10.0, 5.0, 0.0, 0.0, 0.0, 0.0],
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ ]
+ result = client.move_s(waypoints=waypoints, duration=3.0, frame="TRF")
+ assert result >= 0
+ assert client.wait_motion(timeout=15.0)
+ assert client.is_robot_stopped(threshold_speed=5.0)
+
+ def test_move_p_basic(self, client, server_proc, robot_api_env, home_pose):
+ """Test process move through waypoints with constant TCP speed."""
+ waypoints = [
+ self._offset(home_pose, dz=-5),
+ self._offset(home_pose, dx=10, dy=5, dz=-5),
+ self._offset(home_pose, dz=-5),
+ ]
+ result = client.move_p(waypoints=waypoints, speed=0.3, frame="WRF")
+ assert result >= 0
+ assert client.wait_motion(timeout=15.0)
+ assert client.is_robot_stopped(threshold_speed=5.0)
+
+ def test_move_p_trf_accepted(self, client, server_proc, robot_api_env, homed_robot):
+ """Test that move_p with frame=TRF is accepted and completes."""
+ waypoints = [
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ [10.0, 5.0, 0.0, 0.0, 0.0, 0.0],
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ ]
+ result = client.move_p(waypoints=waypoints, speed=0.3, frame="TRF")
+ assert result >= 0
+ assert client.wait_motion(timeout=15.0)
+ assert client.is_robot_stopped(threshold_speed=5.0)
+
+
+class TestComputeCircleFrom3Points:
+ """Unit tests for compute_circle_from_3_points geometry."""
+
+ def test_known_circle(self):
+ """Three points on a known circle produce correct center and radius."""
+ # Points on circle centered at (0,0,0) with radius 10 in XY plane
+ p1 = np.array([10.0, 0.0, 0.0])
+ p2 = np.array([0.0, 10.0, 0.0])
+ p3 = np.array([-10.0, 0.0, 0.0])
+
+ center, radius, normal = compute_circle_from_3_points(p1, p2, p3)
+
+ np.testing.assert_allclose(center, [0, 0, 0], atol=1e-10)
+ assert abs(radius - 10.0) < 1e-10
+ np.testing.assert_allclose(abs(normal), [0, 0, 1], atol=1e-10)
+
+ def test_collinear_raises(self):
+ """Collinear points raise ValueError."""
+ p1 = np.array([0.0, 0.0, 0.0])
+ p2 = np.array([1.0, 0.0, 0.0])
+ p3 = np.array([2.0, 0.0, 0.0])
+
+ with pytest.raises(ValueError, match="collinear"):
+ compute_circle_from_3_points(p1, p2, p3)
+
+ def test_coincident_raises(self):
+ """Coincident points raise ValueError."""
+ p = np.array([5.0, 5.0, 5.0])
+
+ with pytest.raises(ValueError):
+ compute_circle_from_3_points(p, p, p)
+
+ def test_3d_plane(self):
+ """Points in a tilted 3D plane produce correct radius."""
+ # Circle of radius 5 in a tilted plane
+ p1 = np.array([5.0, 0.0, 0.0])
+ p2 = np.array([0.0, 5.0, 0.0])
+ p3 = np.array([0.0, 0.0, 5.0])
+
+ center, radius, normal = compute_circle_from_3_points(p1, p2, p3)
+
+ # All points should be equidistant from center
+ assert abs(np.linalg.norm(p1 - center) - radius) < 1e-10
+ assert abs(np.linalg.norm(p2 - center) - radius) < 1e-10
+ assert abs(np.linalg.norm(p3 - center) - radius) < 1e-10
+
+
+if __name__ == "__main__":
+ pytest.main([__file__])
diff --git a/tests/integration/test_jog_speed_extremes.py b/tests/integration/test_jog_speed_extremes.py
new file mode 100644
index 0000000..332b857
--- /dev/null
+++ b/tests/integration/test_jog_speed_extremes.py
@@ -0,0 +1,212 @@
+"""
+Integration tests for jog commands at speed extremes.
+
+These tests verify that jog commands actually move the robot at both
+the slowest (1%) and fastest (100%) speed settings. This catches issues
+where tolerance settings (e.g., IK tolerance) might prevent movement
+at certain speeds.
+"""
+
+import pytest
+
+from parol6 import RobotClient
+
+
+@pytest.mark.integration
+class TestJogSpeedExtremes:
+ """Test jog commands at minimum and maximum speeds."""
+
+ def test_jog_joint_slowest_speed_moves_robot(
+ self, client: RobotClient, server_proc
+ ):
+ """
+ Jog at slowest speed (1%) should still move the robot.
+
+ This test ensures that even at minimum speed percentage, the robot
+ actually moves. This catches issues where tolerance settings might
+ prevent movement at low speeds.
+ """
+ # Get initial joint angles
+ initial_angles = client.angles()
+ assert initial_angles is not None, "Failed to get initial angles"
+ assert len(initial_angles) == 6, "Expected 6 joint angles"
+
+ # Jog J1 at slowest speed (1%) for a short duration
+ result = client.jog_j(
+ joint=0,
+ speed=0.01, # Slowest speed
+ duration=0.5,
+ )
+ assert result > 0, "Jog command failed to send"
+
+ # Wait for motion to complete plus some settling time
+ client.wait_motion(timeout=10, settle_window=1)
+
+ # Get final joint angles
+ final_angles = client.angles()
+ assert final_angles is not None, "Failed to get final angles"
+
+ # Verify J1 actually moved
+ angle_change = abs(final_angles[0] - initial_angles[0])
+ assert angle_change > 0.001, (
+ f"Expected J1 to move at slowest speed (1%), but angle changed by only "
+ f"{angle_change:.4f} degrees (initial={initial_angles[0]:.4f}, "
+ f"final={final_angles[0]:.4f})"
+ )
+
+ def test_jog_joint_fastest_speed_moves_robot(
+ self, client: RobotClient, server_proc
+ ):
+ """
+ Jog at fastest speed (100%) should move the robot.
+
+ This test ensures that at maximum speed percentage, the robot
+ moves as expected and produces more movement than the slow test.
+ """
+ # Get initial joint angles
+ initial_angles = client.angles()
+ assert initial_angles is not None, "Failed to get initial angles"
+ assert len(initial_angles) == 6, "Expected 6 joint angles"
+
+ # Jog J1 at fastest speed (100%) for a short duration
+ result = client.jog_j(
+ joint=0,
+ speed=1.0, # Fastest speed
+ duration=0.5,
+ )
+ assert result > 0, "Jog command failed to send"
+
+ # Wait for motion to complete plus some settling time
+ client.wait_motion(timeout=10)
+
+ # Get final joint angles
+ final_angles = client.angles()
+ assert final_angles is not None, "Failed to get final angles"
+
+ # Verify J1 actually moved
+ angle_change = abs(final_angles[0] - initial_angles[0])
+ assert angle_change > 0.1, (
+ f"Expected J1 to move significantly at fastest speed (100%), but angle "
+ f"changed by only {angle_change:.4f} degrees (initial={initial_angles[0]:.4f}, "
+ f"final={final_angles[0]:.4f})"
+ )
+
+ def test_jog_faster_speed_moves_more(self, client: RobotClient, server_proc):
+ """
+ Verify that faster speed produces more movement in the same duration.
+
+ This test compares movement at slow (10%) vs fast (90%) speeds to
+ confirm the speed parameter actually affects motion rate.
+ """
+ # First, jog at slow speed and measure movement
+ initial_angles_slow = client.angles()
+ assert initial_angles_slow is not None
+
+ result = client.jog_j(joint=1, speed=0.1, duration=1.0)
+ assert result > 0
+ client.wait_motion(timeout=10)
+
+ final_angles_slow = client.angles()
+ assert final_angles_slow is not None
+ slow_movement = abs(final_angles_slow[1] - initial_angles_slow[1])
+
+ # Now jog at fast speed
+ initial_angles_fast = client.angles()
+ assert initial_angles_fast is not None
+
+ result = client.jog_j(joint=1, speed=0.9, duration=1.0)
+ assert result > 0
+ client.wait_motion(timeout=10)
+
+ final_angles_fast = client.angles()
+ assert final_angles_fast is not None
+ fast_movement = abs(final_angles_fast[1] - initial_angles_fast[1])
+
+ # Fast movement should be significantly more than slow movement
+ # Using a ratio check rather than absolute values for robustness
+ assert fast_movement > slow_movement * 2, (
+ f"Expected fast speed (90%) to move at least 2x more than slow speed (10%), "
+ f"but slow={slow_movement:.4f}° and fast={fast_movement:.4f}°"
+ )
+
+
+@pytest.mark.integration
+class TestCartesianJogSpeedExtremes:
+ """Test Cartesian jog commands at minimum and maximum speeds."""
+
+ def test_cart_jog_slowest_speed_moves_robot(self, client: RobotClient, server_proc):
+ """
+ Cartesian jog at slowest speed (1%) should still move the robot.
+
+ This test ensures that even at minimum speed percentage, Cartesian
+ jogging actually moves the robot. This catches issues where IK
+ tolerance settings might prevent movement at low speeds.
+ """
+ # Get initial pose
+ initial_pose = client.pose()
+ assert initial_pose is not None, "Failed to get initial pose"
+ assert len(initial_pose) == 6, "Expected 6-element pose [x,y,z,rx,ry,rz]"
+
+ # Cartesian jog in +Y direction at slowest speed
+ result = client.jog_l(
+ frame="WRF",
+ axis="Y",
+ speed=0.02,
+ duration=1,
+ )
+ assert result > 0, "Cartesian jog command failed to send"
+
+ # Wait for motion to complete plus some settling time
+ client.wait_motion(timeout=10)
+
+ # Get final pose
+ final_pose = client.pose()
+ assert final_pose is not None, "Failed to get final pose"
+
+ # Verify position actually changed (check Y coordinate)
+ position_change = abs(final_pose[1] - initial_pose[1])
+ assert position_change > 0.001, (
+ f"Expected Y position to change at slowest cart jog speed (1%), but "
+ f"changed by only {position_change:.4f} mm (initial={initial_pose[1]:.4f}, "
+ f"final={final_pose[1]:.4f})"
+ )
+
+ def test_cart_jog_fastest_speed_moves_robot(self, client: RobotClient, server_proc):
+ """
+ Cartesian jog at fastest speed (100%) should move the robot significantly.
+
+ This test ensures that at maximum speed percentage, Cartesian jogging
+ produces significant movement.
+ """
+ # Get initial pose
+ initial_pose = client.pose()
+ assert initial_pose is not None, "Failed to get initial pose"
+ assert len(initial_pose) == 6, "Expected 6-element pose [x,y,z,rx,ry,rz]"
+
+ # Cartesian jog in +X direction at fastest speed
+ result = client.jog_l(
+ frame="WRF",
+ axis="X",
+ speed=1.0,
+ duration=2.0,
+ )
+ assert result > 0, "Cartesian jog command failed to send"
+
+ # Wait for motion to complete plus some settling time
+ client.wait_motion(timeout=10)
+
+ # Get final pose
+ final_pose = client.pose()
+ assert final_pose is not None, "Failed to get final pose"
+
+ # Verify position actually changed significantly
+ position_change = abs(final_pose[0] - initial_pose[0])
+ assert position_change > 0.5, (
+ f"Expected significant X position change at fastest cart jog speed (100%), "
+ f"but changed by only {position_change:.4f} mm (initial={initial_pose[0]:.4f}, "
+ f"final={final_pose[0]:.4f})"
+ )
+
+
+if __name__ == "__main__":
+ pytest.main([__file__, "-v"])
diff --git a/tests/integration/test_movecart_accuracy.py b/tests/integration/test_movecart_accuracy.py
index 301a6ad..4d2736a 100644
--- a/tests/integration/test_movecart_accuracy.py
+++ b/tests/integration/test_movecart_accuracy.py
@@ -1,61 +1,49 @@
"""
-Integration test for MoveCart pose accuracy.
-Verifies that movecart commands reach the correct final pose.
+Integration test for MoveL pose accuracy.
+Verifies that move_l commands reach the correct final pose.
"""
-import os
-import sys
-
import numpy as np
import pytest
-# Skip on macOS CI runners due to flakiness
-pytestmark = pytest.mark.skipif(
- sys.platform == "darwin" and os.getenv("CI") == "true",
- reason="Flaky on the slow macOS GitHub Actions runners.; skip on CI",
-)
-
-# Add the parent directory to Python path
-sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
-
@pytest.mark.integration
-class TestMoveCartAccuracy:
- """Test that MoveCart commands reach correct final poses."""
+class TestMoveLAccuracy:
+ """Test that MoveL commands reach correct final poses."""
- def test_movecart_from_home(self, client, server_proc):
- """Test MoveCart accuracy starting from home position."""
+ def test_move_l_from_home(self, client, server_proc):
+ """Test move_l accuracy starting from home position."""
# Ensure controller is enabled before motion
- assert client.enable() is True
+ assert client.resume() > 0
# Home the robot first
- assert client.home() is True
- assert client.wait_until_stopped(timeout=15.0)
+ assert client.home() >= 0
+ assert client.wait_motion(timeout=15.0)
# Get home pose for reference
- home_pose = client.get_pose_rpy()
+ home_pose = client.pose()
print(f"\nHome pose (mm, deg): {home_pose}")
# This is in mm for position, degrees for orientation
target = [0.000, 263, 242, 90, 0, 90]
# Execute movecart
- result = client.move_cartesian(target, speed_percentage=50)
- assert result is True
+ result = client.move_l(target, speed=0.5)
+ assert result >= 0
# Wait for completion
- assert client.wait_until_stopped(timeout=15.0)
+ assert client.wait_motion(timeout=15.0)
# Get final pose
- final_pose = client.get_pose_rpy()
+ final_pose = client.pose()
print(f"Target pose (mm, deg): {target}")
print(f"Final pose (mm, deg): {final_pose}")
# Verify pose accuracy
- # Position tolerance: 1mm
+ # Position tolerance: 1.5mm (allows for minor FP drift across platforms)
pos_error = np.linalg.norm(np.array(final_pose[:3]) - np.array(target[:3]))
print(f"Position error: {pos_error:.3f} mm")
- assert pos_error < 1.0, (
- f"Position error {pos_error:.3f}mm exceeds 1mm tolerance"
+ assert pos_error < 1.5, (
+ f"Position error {pos_error:.3f}mm exceeds 1.5mm tolerance"
)
# Orientation tolerance: 1 degree per axis
@@ -74,13 +62,13 @@ def angle_diff(a, b):
print("✓ MoveCart pose accuracy test passed!")
- def test_movecart_multiple_targets(self, client, server_proc):
- """Test MoveCart accuracy with multiple sequential targets."""
+ def test_move_l_multiple_targets(self, client, server_proc):
+ """Test move_l accuracy with multiple sequential targets."""
# Ensure controller is enabled before motion
- assert client.enable() is True
+ assert client.resume() > 0
# Home first
- assert client.home() is True
- assert client.wait_until_stopped(timeout=15.0)
+ assert client.home() >= 0
+ assert client.wait_motion(timeout=15.0)
# Define multiple targets to test
targets = [
@@ -94,21 +82,21 @@ def test_movecart_multiple_targets(self, client, server_proc):
print(f"Moving to: {target}")
# Execute movecart
- result = client.move_cartesian(target, speed_percentage=30)
- assert result is True
+ result = client.move_l(target, speed=0.3)
+ assert result >= 0
# Wait for completion
- assert client.wait_until_stopped(timeout=15.0)
+ assert client.wait_motion(timeout=15.0)
# Get final pose
- final_pose = client.get_pose_rpy()
+ final_pose = client.pose()
print(f"Achieved: {final_pose}")
- # Verify position accuracy (1mm tolerance)
+ # Verify position accuracy (1.5mm tolerance)
pos_error = np.linalg.norm(np.array(final_pose[:3]) - np.array(target[:3]))
print(f"Position error: {pos_error:.3f} mm")
- assert pos_error < 1.0, (
- f"Target {idx + 1}: Position error {pos_error:.3f}mm exceeds 1mm"
+ assert pos_error < 1.5, (
+ f"Target {idx + 1}: Position error {pos_error:.3f}mm exceeds 1.5mm"
)
# Verify orientation accuracy (1° tolerance per axis)
diff --git a/tests/integration/test_movecart_idempotence.py b/tests/integration/test_movecart_idempotence.py
index dd608c3..b204409 100644
--- a/tests/integration/test_movecart_idempotence.py
+++ b/tests/integration/test_movecart_idempotence.py
@@ -1,42 +1,37 @@
"""
-Integration test for MoveCart idempotence.
+Integration test for MoveL idempotence.
Verifies that moving to the current pose results in no motion (angular distance ≈ 0).
"""
-import os
-import sys
-
import numpy as np
import pytest
-# Add the parent directory to Python path
-sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
-
@pytest.mark.integration
-class TestMoveCartIdempotence:
- """Test that MoveCart to current pose results in zero movement."""
+class TestMoveLIdempotence:
+ """Test that MoveL to current pose results in zero movement."""
- def test_movecart_to_current_pose_no_rotation(self, client, server_proc):
+ def test_move_l_to_current_pose_no_rotation(self, client, server_proc):
"""Test that moving to the current pose results in no rotation."""
# Home the robot first
- assert client.home() is True
- assert client.wait_until_stopped(timeout=15.0)
+ idx = client.home()
+ assert idx >= 0
+ assert client.wait_command(idx, timeout=15.0)
# Get current pose (should be home position)
- current_pose = client.get_pose_rpy()
+ current_pose = client.pose()
print(f"\nCurrent pose at home (mm, deg): {current_pose}")
# Move to the exact same pose - should result in zero angular distance
# and effectively be a no-op
- result = client.move_cartesian(current_pose, speed_percentage=50)
- assert result is True
+ result = client.move_l(current_pose, speed=0.5)
+ assert result >= 0
# Wait for completion (should be instant if duration is ~0)
- assert client.wait_until_stopped(timeout=10.0)
+ assert client.wait_motion(timeout=10.0)
# Get final pose
- final_pose = client.get_pose_rpy()
+ final_pose = client.pose()
print(f"Final pose after 'move to same' (mm, deg): {final_pose}")
# Verify pose hasn't changed significantly
diff --git a/tests/integration/test_multicast_reception.py b/tests/integration/test_multicast_reception.py
new file mode 100644
index 0000000..0522ab6
--- /dev/null
+++ b/tests/integration/test_multicast_reception.py
@@ -0,0 +1,33 @@
+"""Test multicast status reception from live server."""
+
+import asyncio
+
+import pytest
+
+from parol6 import AsyncRobotClient
+
+
+@pytest.mark.asyncio
+@pytest.mark.integration
+async def test_multicast_status_reception(server_proc, ports):
+ """Verify that multicast status is actually received from a running server."""
+ received_count = 0
+
+ async with AsyncRobotClient(port=ports.server_port) as client:
+
+ async def receive_status():
+ nonlocal received_count
+ async for _ in client.stream_status_shared():
+ received_count += 1
+ if received_count >= 3:
+ return
+
+ # Should receive at least 3 status messages within 5 seconds
+ try:
+ await asyncio.wait_for(receive_status(), timeout=5.0)
+ except asyncio.TimeoutError:
+ pass
+
+ assert received_count >= 3, (
+ f"Expected at least 3 status messages, got {received_count}"
+ )
diff --git a/tests/integration/test_profile_commands.py b/tests/integration/test_profile_commands.py
new file mode 100644
index 0000000..b4b51fa
--- /dev/null
+++ b/tests/integration/test_profile_commands.py
@@ -0,0 +1,332 @@
+"""
+Integration tests for motion profile commands.
+
+Tests SETPROFILE/GETPROFILE through the client API with a running server.
+"""
+
+import threading
+import time
+
+import numpy as np
+import pytest
+
+
+@pytest.mark.integration
+class TestProfileCommands:
+ """Test motion profile get/set commands."""
+
+ def test_profile_returns_default(self, client, server_proc):
+ """Test GETPROFILE returns default profile (TOPPRA) after reset."""
+ client.reset()
+ result = client.profile()
+ assert result is not None
+ assert result == "TOPPRA"
+
+ def test_select_and_get_profile_roundtrip(self, client, server_proc):
+ """Test setting a profile and getting it back."""
+ for p in ["LINEAR", "QUINTIC", "TRAPEZOID", "RUCKIG", "TOPPRA"]:
+ assert client.select_profile(p) > 0
+ assert client.profile() == p
+
+ def test_select_profile_case_insensitive(self, client, server_proc):
+ """Test that profile names are case-insensitive."""
+ assert client.select_profile("linear") > 0
+ assert client.profile() == "LINEAR"
+
+ assert client.select_profile("Quintic") > 0
+ assert client.profile() == "QUINTIC"
+
+
+@pytest.mark.integration
+class TestProfileMotionBehavior:
+ """Test that different profiles produce correct motion behavior."""
+
+ def test_joint_move_reaches_target_all_profiles(self, client, server_proc):
+ """Test that joint moves reach target position with all profiles."""
+ target_angles = [10, -50, 190, 5, 10, 15]
+
+ for p in ["LINEAR", "QUINTIC", "TRAPEZOID", "RUCKIG", "TOPPRA"]:
+ # Reset to home first
+ client.home(wait=True)
+
+ # Set profile and execute move
+ assert client.select_profile(p) > 0
+ result = client.move_j(target_angles, duration=2.0)
+ assert result >= 0
+ assert client.wait_motion(timeout=10.0)
+
+ # Verify we reached target (within tolerance)
+ angles = client.angles()
+ assert angles is not None
+ for i, (actual, target) in enumerate(zip(angles, target_angles)):
+ assert abs(actual - target) < 1.0, (
+ f"Profile {p}: Joint {i} off target "
+ f"(expected {target}, got {actual})"
+ )
+
+ def test_cartesian_move_reaches_target_all_profiles(self, client, server_proc):
+ """Test that Cartesian moves reach target position with all profiles.
+
+ Note: RUCKIG automatically falls back to TOPPRA for Cartesian moves.
+ """
+ # Start from home
+ client.home(wait=True)
+ start_pose = client.pose()
+ assert start_pose is not None
+
+ # Target pose (small offset from start)
+ target_pose = [
+ start_pose[0],
+ start_pose[1] + 20, # Y + 20mm
+ start_pose[2],
+ start_pose[3],
+ start_pose[4],
+ start_pose[5],
+ ]
+
+ # All profiles should work for Cartesian moves
+ # RUCKIG falls back to TOPPRA automatically
+ for p in ["LINEAR", "QUINTIC", "TRAPEZOID", "RUCKIG", "TOPPRA"]:
+ # Reset to home first
+ client.home(wait=True)
+
+ # Set profile and execute move
+ assert client.select_profile(p) > 0
+ result = client.move_l(target_pose, duration=2.0)
+ assert result >= 0
+ assert client.wait_motion(timeout=10.0)
+
+ # Verify position reached (within tolerance)
+ pose = client.pose()
+ assert pose is not None
+ assert abs(pose[0] - target_pose[0]) < 1.0, (
+ f"Profile {p}: X position off target "
+ f"(expected {target_pose[0]:.1f}, got {pose[0]:.1f})"
+ )
+
+
+@pytest.mark.integration
+class TestServoCartesian:
+ """Test servo Cartesian motion (replaces old streaming mode tests)."""
+
+ def test_servo_l_sequential(self, client, server_proc):
+ """Test sequential servo_l moves."""
+ client.home(wait=True)
+ start_pose = client.pose()
+ assert start_pose is not None
+
+ # Send a sequence of servo Cartesian commands (fire-and-forget)
+ for i in range(5):
+ target = [
+ start_pose[0] + (i * 5),
+ start_pose[1],
+ start_pose[2],
+ start_pose[3],
+ start_pose[4],
+ start_pose[5],
+ ]
+ result = client.servo_l(target, speed=0.5)
+ assert result > 0
+ time.sleep(0.1)
+
+ assert client.wait_motion(timeout=10.0)
+
+ # Verify robot completed motion
+ assert client.is_robot_stopped()
+
+
+@pytest.mark.integration
+class TestCartesianPrecision:
+ """Test Cartesian move precision with different profiles."""
+
+ @pytest.mark.parametrize("profile", ["TOPPRA", "LINEAR", "QUINTIC", "TRAPEZOID"])
+ def test_cartesian_simple_sequence(self, client, server_proc, profile):
+ """
+ Test precision of simple Cartesian moves with all profiles.
+
+ All profiles should handle Cartesian paths correctly.
+ LINEAR uses uniform time distribution, which may require longer durations.
+ """
+ client.home(wait=True)
+ assert client.select_profile(profile) > 0
+
+ # Get current pose after homing to build moves relative to it
+ start_pose = client.pose()
+ assert start_pose is not None
+
+ # Use smaller offset for LINEAR to keep duration reasonable
+ if profile == "LINEAR":
+ offset = 20.0
+ else:
+ offset = 50.0
+
+ # Move relative to home pose (just Y offset, keep orientation)
+ moves = [
+ [
+ start_pose[0],
+ start_pose[1] + offset,
+ start_pose[2],
+ start_pose[3],
+ start_pose[4],
+ start_pose[5],
+ ],
+ [
+ start_pose[0],
+ start_pose[1] - offset,
+ start_pose[2],
+ start_pose[3],
+ start_pose[4],
+ start_pose[5],
+ ],
+ ]
+
+ for target in moves:
+ result = client.move_l(target, duration=2.0)
+ assert result >= 0
+ assert client.wait_motion(timeout=15.0)
+
+ # Verify final pose
+ pose = client.pose()
+ assert pose is not None
+ final_target = moves[-1]
+
+ # Print diagnostic info
+ print(f"\nProfile {profile}:")
+ print(
+ f" Target: X={final_target[0]:.2f}, Y={final_target[1]:.2f}, Z={final_target[2]:.2f}"
+ )
+ print(
+ f" RX={final_target[3]:.2f}, RY={final_target[4]:.2f}, RZ={final_target[5]:.2f}"
+ )
+ print(f" Actual: X={pose[0]:.2f}, Y={pose[1]:.2f}, Z={pose[2]:.2f}")
+ print(f" RX={pose[3]:.2f}, RY={pose[4]:.2f}, RZ={pose[5]:.2f}")
+
+ # Check position (X, Y, Z) within 1mm tolerance
+ for i, (actual, expected) in enumerate(zip(pose[:3], final_target[:3])):
+ assert abs(actual - expected) < 1.0, (
+ f"Profile {profile}: Position[{i}] off target "
+ f"(expected {expected:.2f}, got {actual:.2f})"
+ )
+
+ # Check orientation (RX, RY, RZ) within 1 degree tolerance
+ for i, (actual, expected) in enumerate(zip(pose[3:], final_target[3:])):
+ assert abs(actual - expected) < 1.0, (
+ f"Profile {profile}: Orientation[{i}] off target "
+ f"(expected {expected:.2f}, got {actual:.2f})"
+ )
+
+
+def _point_to_line_distance(
+ point: np.ndarray, line_start: np.ndarray, line_end: np.ndarray
+) -> float:
+ """Calculate perpendicular distance from a point to a line segment."""
+ line_vec = line_end - line_start
+ line_len = np.linalg.norm(line_vec)
+ if line_len < 1e-9:
+ return float(np.linalg.norm(point - line_start))
+
+ line_unit = line_vec / line_len
+ point_vec = point - line_start
+
+ # Project point onto line
+ projection_len = np.dot(point_vec, line_unit)
+
+ # Clamp to line segment
+ projection_len = max(0, min(line_len, projection_len))
+
+ # Find closest point on line
+ closest_point = line_start + projection_len * line_unit
+
+ return float(np.linalg.norm(point - closest_point))
+
+
+def _extract_position_from_pose_matrix(pose_flat: list[float]) -> np.ndarray:
+ """Extract XYZ position from flattened 4x4 transformation matrix."""
+ # Row-major 4x4 matrix: translation is at indices 3, 7, 11
+ return np.array([pose_flat[3], pose_flat[7], pose_flat[11]])
+
+
+@pytest.mark.integration
+class TestTCPPathAccuracy:
+ """Test that Cartesian moves follow straight-line TCP paths."""
+
+ @pytest.mark.parametrize("profile", ["TOPPRA", "LINEAR", "QUINTIC", "TRAPEZOID"])
+ def test_cartesian_follows_straight_line(self, client, server_proc, profile):
+ """
+ Verify TCP follows a straight line during Cartesian moves.
+
+ Samples TCP position during motion and checks that all points
+ lie within tolerance of the expected straight-line path.
+ """
+ client.home(wait=True)
+ assert client.select_profile(profile) > 0
+
+ start_pose = client.pose()
+ assert start_pose is not None
+ start_xyz = np.array(start_pose[:3])
+
+ # Move along Y axis - smaller offset for LINEAR to keep duration reasonable
+ if profile == "LINEAR":
+ offset = 20.0
+ else:
+ offset = 50.0
+ target_pose = [
+ start_pose[0],
+ start_pose[1] + offset,
+ start_pose[2],
+ start_pose[3],
+ start_pose[4],
+ start_pose[5],
+ ]
+ target_xyz = np.array(target_pose[:3])
+
+ # Collect TCP positions during motion
+ sampled_positions: list[np.ndarray] = []
+ sampling_done = threading.Event()
+
+ def sample_positions():
+ """Background thread to sample TCP positions."""
+ while not sampling_done.is_set():
+ status = client.status()
+ if status:
+ pos = _extract_position_from_pose_matrix(status.pose)
+ sampled_positions.append(pos)
+ time.sleep(0.02) # 50 Hz sampling
+
+ # Start sampling thread
+ sampler = threading.Thread(target=sample_positions, daemon=True)
+ sampler.start()
+
+ # Execute move
+ result = client.move_l(target_pose, duration=2.0)
+ assert result >= 0
+ assert client.wait_motion(timeout=10.0)
+
+ # Stop sampling
+ sampling_done.set()
+ sampler.join(timeout=1.0)
+
+ # Need at least a few samples to validate path
+ assert len(sampled_positions) >= 5, (
+ f"Only got {len(sampled_positions)} samples, need at least 5"
+ )
+
+ # Calculate max deviation from straight line
+ max_deviation = 0.0
+ deviations = []
+ for pos in sampled_positions:
+ dist = _point_to_line_distance(pos, start_xyz, target_xyz)
+ deviations.append(dist)
+ max_deviation = max(max_deviation, dist)
+
+ # Print diagnostic info
+ print(f"\nProfile {profile}:")
+ print(f" Samples collected: {len(sampled_positions)}")
+ print(f" Max path deviation: {max_deviation:.3f} mm")
+ print(f" Mean path deviation: {np.mean(deviations):.3f} mm")
+
+ tolerance_mm = 0.15
+ assert max_deviation < tolerance_mm, (
+ f"Profile {profile}: TCP deviated {max_deviation:.3f}mm from straight line "
+ f"(tolerance: {tolerance_mm}mm)"
+ )
diff --git a/tests/integration/test_smooth_commands_e2e.py b/tests/integration/test_smooth_commands_e2e.py
deleted file mode 100644
index 1516cd8..0000000
--- a/tests/integration/test_smooth_commands_e2e.py
+++ /dev/null
@@ -1,156 +0,0 @@
-"""
-Integration tests for smooth motion commands (minimal set).
-
-Tests one representative command per smooth motion family in FAKE_SERIAL mode.
-Verifies command acceptance, completion status transitions, and basic functionality.
-"""
-
-import os
-import sys
-
-import pytest
-
-# Add the parent directory to Python path
-sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
-
-
-def _check_if_fake_serial_xfail(result):
- """Helper to mark test as xfail if smooth motion fails due to IK in FAKE_SERIAL."""
- if (
- isinstance(result, dict)
- and result.get("status") == "FAILED"
- and "IK failed" in result.get("details", "")
- ):
- pytest.xfail("Smooth motion commands fail IK in FAKE_SERIAL mode (expected)")
-
-
-@pytest.mark.integration
-class TestSmoothMotionMinimal:
- """Minimal set of smooth motion tests - one per command family."""
-
- @pytest.fixture
- def homed_robot(self, client, server_proc, robot_api_env):
- """Ensure robot is homed before smooth motion tests."""
- print("Homing robot for smooth motion tests...")
-
- # Home the robot first
- result = client.home()
- assert result is True
-
- # Wait for robot to be stopped
- assert client.wait_until_stopped(timeout=15.0)
- print("Robot homed and ready for smooth motion tests")
-
- return True
-
- def test_smooth_circle_basic(self, client, server_proc, robot_api_env, homed_robot):
- """Test basic circular motion in FAKE_SERIAL mode."""
- result = client.smooth_circle(
- center=[0, 0, 100],
- radius=30,
- duration=2.0,
- plane="XY",
- frame="WRF",
- )
-
- # Check if we should xfail due to FAKE_SERIAL limitations
- _check_if_fake_serial_xfail(result)
-
- # Should complete successfully in FAKE_SERIAL mode
- assert result is True
-
- # Wait for completion and verify robot stops
- assert client.wait_until_stopped(timeout=9.0)
- assert client.is_robot_stopped(threshold_speed=5.0)
-
- def test_smooth_arc_center_basic(
- self, client, server_proc, robot_api_env, homed_robot
- ):
- """Test basic arc motion defined by center point."""
- result = client.smooth_arc_center(
- end_pose=[100, 100, 150, 0, 0, 90],
- center=[50, 50, 150],
- duration=2.0,
- frame="WRF",
- )
-
- _check_if_fake_serial_xfail(result)
-
- assert result is True
-
- assert client.wait_until_stopped(timeout=9.0)
- assert client.is_robot_stopped(threshold_speed=5.0)
-
- def test_smooth_spline_basic(self, client, server_proc, robot_api_env, homed_robot):
- """Test basic spline motion through waypoints."""
- waypoints = [
- [100.0, 100.0, 120.0, 0.0, 0.0, 0.0],
- [150.0, 150.0, 130.0, 0.0, 0.0, 30.0],
- [200.0, 100.0, 120.0, 0.0, 0.0, 60.0],
- ]
-
- result = client.smooth_spline(
- waypoints=waypoints,
- duration=3.0,
- frame="WRF",
- )
-
- _check_if_fake_serial_xfail(result)
-
- assert result is True
-
- assert client.wait_until_stopped(timeout=10.0)
- assert client.is_robot_stopped(threshold_speed=5.0)
-
- def test_smooth_helix_basic(self, client, server_proc, robot_api_env, homed_robot):
- """Test basic helical motion."""
- result = client.smooth_helix(
- center=[100, 100, 80],
- radius=25,
- pitch=20,
- height=60,
- duration=3.0,
- frame="WRF",
- )
-
- _check_if_fake_serial_xfail(result)
-
- assert result is True
-
- assert client.wait_until_stopped(timeout=10.0)
- assert client.is_robot_stopped(threshold_speed=5.0)
-
- def test_smooth_blend_basic(self, client, server_proc, robot_api_env, homed_robot):
- """Test basic blended motion through segments."""
- segments = [
- {
- "type": "LINE",
- "end": [120.0, 80.0, 140.0, 0.0, 0.0, 30.0],
- "duration": 1.0,
- },
- {
- "type": "CIRCLE",
- "center": [120, 120, 140],
- "radius": 25,
- "plane": "XY",
- "duration": 2.0,
- "clockwise": False,
- },
- ]
-
- result = client.smooth_blend(
- segments=segments,
- blend_time=0.3,
- frame="WRF",
- )
-
- _check_if_fake_serial_xfail(result)
-
- assert result is True
-
- assert client.wait_until_stopped(timeout=10.0)
- assert client.is_robot_stopped(threshold_speed=5.0)
-
-
-if __name__ == "__main__":
- pytest.main([__file__])
diff --git a/tests/integration/test_status_broadcast_autofailover.py b/tests/integration/test_status_broadcast_autofailover.py
index 62fd05e..4e333af 100644
--- a/tests/integration/test_status_broadcast_autofailover.py
+++ b/tests/integration/test_status_broadcast_autofailover.py
@@ -1,7 +1,6 @@
import asyncio
import contextlib
import socket
-import time
import pytest
@@ -9,7 +8,6 @@
from parol6.server.state import StateManager
from parol6.server.status_broadcast import StatusBroadcaster
from parol6.server.status_cache import get_cache
-from parol6.client.status_subscriber import subscribe_status
def _free_udp_port() -> int:
@@ -22,20 +20,15 @@ def _free_udp_port() -> int:
@pytest.mark.asyncio
-async def test_status_broadcast_auto_failover_receives_frame(monkeypatch):
+async def test_status_broadcast_auto_failover_to_unicast(monkeypatch):
"""
Deterministically force multicast setup to fail so the broadcaster falls back
- to UNICAST and verify that a multicast-configured subscriber still receives
- frames on the same port.
+ to UNICAST mode.
"""
port = _free_udp_port()
group = cfg.MCAST_GROUP
iface = "127.0.0.1"
- # Ensure subscriber uses multicast socket (which also accepts unicast to port)
- monkeypatch.setattr(cfg, "STATUS_TRANSPORT", "MULTICAST", raising=False)
- monkeypatch.setattr(cfg, "STATUS_UNICAST_HOST", "127.0.0.1", raising=False)
-
# Prepare state/cache so broadcaster is allowed to send (cache not stale)
cache = get_cache()
cache.mark_serial_observed()
@@ -43,7 +36,7 @@ async def test_status_broadcast_auto_failover_receives_frame(monkeypatch):
# Start broadcaster with our chosen port and force unicast fallback
state_mgr = StateManager()
- def _force_unicast_setup(self: StatusBroadcaster) -> None: # type: ignore[no-redef]
+ def _force_unicast_setup(self: StatusBroadcaster) -> None:
self._use_unicast = True
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1 << 20)
@@ -60,34 +53,27 @@ def _force_unicast_setup(self: StatusBroadcaster) -> None: # type: ignore[no-re
stale_s=1.0,
)
- broadcaster.start()
+ # StatusBroadcaster is now a polling class - call tick() manually
+ stop_flag = False
+
+ async def _tick_loop():
+ while not stop_flag:
+ broadcaster.tick()
+ await asyncio.sleep(0.05)
+
+ tick_task = asyncio.create_task(_tick_loop())
try:
- # Give broadcaster a tiny moment to initialize
- await asyncio.sleep(0.05)
+ await asyncio.sleep(0.1)
assert broadcaster._use_unicast is True, (
"Broadcaster did not fall back to unicast"
)
-
- async def _consume_one(timeout: float = 3.0) -> bool:
- deadline = time.time() + timeout
- async for status in subscribe_status(
- group=group, port=port, iface_ip=iface
- ):
- assert isinstance(status, dict)
- assert "angles" in status
- return True
- if time.time() > deadline:
- break
- return False
-
- ok = await asyncio.wait_for(_consume_one(), timeout=4.0)
- assert ok, "Did not receive a status frame within timeout"
-
finally:
- broadcaster.stop()
- with contextlib.suppress(Exception):
- broadcaster.join(timeout=1.0)
+ stop_flag = True
+ tick_task.cancel()
+ with contextlib.suppress(asyncio.CancelledError):
+ await tick_task
+ broadcaster.close()
@pytest.mark.asyncio
@@ -96,6 +82,9 @@ async def test_subscriber_multicast_socket_receives_unicast(monkeypatch):
Verify that when the subscriber is configured for multicast, it still receives
a unicast datagram sent to the same port (since it binds to ("", port)).
"""
+ from parol6.client.async_client import _create_multicast_socket
+ from parol6.protocol.wire import StatusBuffer, decode_status_bin_into
+
port = _free_udp_port()
group = cfg.MCAST_GROUP
iface = "127.0.0.1"
@@ -103,39 +92,47 @@ async def test_subscriber_multicast_socket_receives_unicast(monkeypatch):
# Ensure subscriber chooses multicast socket
monkeypatch.setattr(cfg, "STATUS_TRANSPORT", "MULTICAST", raising=False)
- # Craft a valid STATUS payload (defaults are acceptable for parsing)
- payload = get_cache().to_ascii().encode("ascii", errors="ignore")
+ # Craft a valid binary msgpack STATUS payload
+ payload = get_cache().to_binary()
+
+ # Create a multicast socket (same as client would)
+ sock = _create_multicast_socket(group, port, iface)
async def _send_once():
await asyncio.sleep(0.05)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
+ # Send unicast to the multicast socket's port
s.sendto(payload, ("127.0.0.1", port))
finally:
s.close()
async def _consume_one(timeout: float = 3.0) -> bool:
+ loop = asyncio.get_running_loop()
+ buf = StatusBuffer()
+
# Start sender in background
sender = asyncio.create_task(_send_once())
try:
- async for status in subscribe_status(
- group=group, port=port, iface_ip=iface
- ):
- assert isinstance(status, dict)
- assert "io" in status
+ # Wait for data on the multicast socket
+ data = await asyncio.wait_for(loop.sock_recv(sock, 4096), timeout=timeout)
+ if decode_status_bin_into(data, buf):
+ assert buf.io is not None
return True
+ except asyncio.TimeoutError:
+ pass
finally:
with contextlib.suppress(Exception):
sender.cancel()
await sender
- # If we exit the loop without receiving, signal failure
+ sock.close()
return False
ok = await asyncio.wait_for(_consume_one(), timeout=4.0)
assert ok, "Subscriber did not receive unicast datagram on multicast socket"
-def _raise_sendto(*args, **kwargs): # helper for monkeypatching socket.sendto
+def _raise_sendto(*args, **kwargs):
raise OSError("simulated send failure")
@@ -160,7 +157,17 @@ async def test_multicast_send_errors_should_trigger_fallback_but_currently_do_no
broadcaster = StatusBroadcaster(
state_mgr=state_mgr, port=port, iface_ip="127.0.0.1", rate_hz=20.0, stale_s=2.0
)
- broadcaster.start()
+
+ # StatusBroadcaster is now a polling class - call tick() manually
+ stop_flag = False
+
+ async def _tick_loop():
+ while not stop_flag:
+ broadcaster.tick()
+ await asyncio.sleep(0.05)
+
+ tick_task = asyncio.create_task(_tick_loop())
+
try:
# Allow setup to complete and at least one send to work
await asyncio.sleep(0.1)
@@ -177,6 +184,8 @@ async def test_multicast_send_errors_should_trigger_fallback_but_currently_do_no
"Broadcaster did not fall back to unicast on repeated send errors"
)
finally:
- broadcaster.stop()
- with contextlib.suppress(Exception):
- broadcaster.join(timeout=1.0)
+ stop_flag = True
+ tick_task.cancel()
+ with contextlib.suppress(asyncio.CancelledError):
+ await tick_task
+ broadcaster.close()
diff --git a/tests/integration/test_streaming_cartesian_accuracy.py b/tests/integration/test_streaming_cartesian_accuracy.py
new file mode 100644
index 0000000..0d379c6
--- /dev/null
+++ b/tests/integration/test_streaming_cartesian_accuracy.py
@@ -0,0 +1,127 @@
+"""
+Integration test for servo Cartesian move accuracy.
+
+Tests the servo_l path used for TCP dragging.
+Catches bugs where reference pose gets corrupted (e.g., aliasing with FK cache).
+"""
+
+import numpy as np
+import pytest
+
+
+def angle_diff(a: float, b: float) -> float:
+ """Compute smallest angle difference considering wrapping."""
+ diff = (a - b + 180) % 360 - 180
+ return abs(diff)
+
+
+def assert_pose_accuracy(
+ final_pose: list[float],
+ target: list[float],
+ pos_tol_mm: float = 1.0,
+ ori_tol_deg: float = 1.0,
+ context: str = "",
+) -> None:
+ """Assert that final pose matches target within tolerances."""
+ # Position check
+ pos_error = np.linalg.norm(np.array(final_pose[:3]) - np.array(target[:3]))
+ assert pos_error < pos_tol_mm, (
+ f"{context}Position error {pos_error:.3f}mm exceeds {pos_tol_mm}mm tolerance. "
+ f"Target: {target[:3]}, Final: {final_pose[:3]}"
+ )
+
+ # Orientation check
+ for i, axis in enumerate(["RX", "RY", "RZ"]):
+ ori_error = angle_diff(final_pose[3 + i], target[3 + i])
+ assert ori_error < ori_tol_deg, (
+ f"{context}{axis} error {ori_error:.3f}° exceeds {ori_tol_deg}° tolerance. "
+ f"Target: {target[3 + i]:.1f}°, Final: {final_pose[3 + i]:.1f}°"
+ )
+
+
+@pytest.mark.integration
+class TestServoCartesianAccuracy:
+ """Test that servo cartesian moves reach correct targets."""
+
+ def test_servo_l_reaches_target(self, client, server_proc):
+ """servo_l move should arrive at the requested target.
+
+ Tests the servo Cartesian path (replaces old stream_on + move_cartesian).
+ """
+ assert client.resume() > 0
+ assert client.home() >= 0
+ assert client.wait_motion(timeout=15.0)
+
+ # Get starting pose
+ start_pose = client.pose()
+ print(f"\nStart pose: {start_pose}")
+
+ # Target: offset from start (like beginning of a TCP drag)
+ target = list(start_pose)
+ target[0] += 30.0 # +30mm in X
+
+ print(f"Target pose: {target}")
+
+ # Send servo cartesian move (fire-and-forget, no stream mode toggle needed)
+ result = client.servo_l(target, speed=1.0)
+ assert result > 0
+
+ # Wait for motion to settle
+ assert client.wait_motion(timeout=10.0)
+
+ # Verify final pose
+ final_pose = client.pose()
+ print(f"Final pose: {final_pose}")
+
+ assert_pose_accuracy(final_pose, target)
+
+ def test_servo_l_sequential_targets(self, client, server_proc):
+ """Sequential servo moves should each reach their target.
+
+ Simulates TCP dragging behavior where multiple servo_l commands
+ are sent in sequence.
+ """
+ assert client.resume() > 0
+ assert client.home() >= 0
+ assert client.wait_motion(timeout=15.0)
+
+ start_pose = client.pose()
+ print(f"\nStart pose: {start_pose}")
+
+ # Simulate a drag path: series of small incremental moves
+ # This pattern catches bugs where reference pose gets corrupted
+ # between moves (like the FK cache aliasing bug)
+ offsets = [
+ (30.0, 0.0, 0.0), # +30mm X
+ (30.0, 30.0, 0.0), # +30mm X, +30mm Y
+ (30.0, 30.0, -30.0), # +30mm X, +30mm Y, -30mm Z
+ (0.0, 0.0, 0.0), # hold position
+ ]
+
+ for i, (dx, dy, dz) in enumerate(offsets):
+ target = list(start_pose)
+ target[0] += dx
+ target[1] += dy
+ target[2] += dz
+
+ print(f"\n--- Move {i + 1}/{len(offsets)} ---")
+ print(f"Target: {target[:3]}")
+
+ result = client.servo_l(target, speed=1.0)
+ assert result > 0
+
+ # Wait for this move to complete before next
+ assert client.wait_motion(timeout=10.0, settle_window=2.0)
+
+ final_pose = client.pose()
+ start_pose = final_pose
+ print(f"Final: {final_pose[:3]}")
+
+ assert_pose_accuracy(final_pose, target, context=f"Move {i + 1}: ")
+ print(f"Move {i + 1} accurate")
+
+ print("\nAll sequential servo moves reached targets accurately")
+
+
+if __name__ == "__main__":
+ pytest.main([__file__, "-v", "-s"])
diff --git a/tests/integration/test_tool_operations.py b/tests/integration/test_tool_operations.py
new file mode 100644
index 0000000..f9b9f9e
--- /dev/null
+++ b/tests/integration/test_tool_operations.py
@@ -0,0 +1,261 @@
+"""
+Integration tests for tool operations.
+
+Tests tool switching, gripper methods, and tool registry through the client API
+with a running controller (FAKE_SERIAL mode).
+"""
+
+import pytest
+import pytest_asyncio
+
+from waldoctl import (
+ ElectricGripperTool,
+ GripperType,
+ PneumaticGripperTool,
+ ToolType,
+)
+
+
+# ---------------------------------------------------------------------------
+# Async client fixture
+# ---------------------------------------------------------------------------
+
+
+@pytest_asyncio.fixture
+async def async_client(server_proc, ports):
+ """Async client for tool action testing."""
+ client = server_proc.create_async_client(
+ host=ports.server_ip, port=ports.server_port
+ )
+ await client.wait_ready(timeout=5.0)
+ yield server_proc, client
+ await client.close()
+
+
+# ===========================================================================
+# Tool Switching (sync client)
+# ===========================================================================
+
+
+@pytest.mark.integration
+class TestToolSwitching:
+ """Test tool switching via the client API."""
+
+ def test_set_get_tool_cycle(self, client, server_proc):
+ """Cycle through all registered tools and verify tools() reflects each switch."""
+ # Default after reset should be NONE
+ result = client.tools()
+ assert result is not None
+ assert result.tool == "NONE"
+
+ # Available should include all registered tools
+ for expected in ("PNEUMATIC", "SSG-48", "MSG"):
+ assert expected in result.available, (
+ f"{expected} not in available tools: {result.available}"
+ )
+
+ # Switch to each tool and verify
+ for tool_name in ("PNEUMATIC", "SSG-48", "MSG", "NONE"):
+ assert client.select_tool(tool_name) >= 0
+ assert client.wait_motion(timeout=5.0)
+ result = client.tools()
+ assert result is not None
+ assert result.tool == tool_name
+
+ def test_tool_persists_across_motion(self, client, server_proc):
+ """Tool setting should survive a joint move."""
+ assert client.select_tool("PNEUMATIC") >= 0
+ assert client.wait_motion(timeout=5.0)
+
+ idx = client.move_j([0, -45, 180, 0, 0, 180], speed=0.5)
+ assert idx >= 0
+ assert client.wait_motion(timeout=10.0)
+
+ result = client.tools()
+ assert result is not None
+ assert result.tool == "PNEUMATIC"
+
+
+# ===========================================================================
+# Pneumatic Gripper Methods (async, via client.tool)
+# ===========================================================================
+
+
+@pytest.mark.integration
+class TestPneumaticGripperMethods:
+ """Test pneumatic gripper via client.tool()."""
+
+ @pytest.mark.asyncio
+ async def test_pneumatic_open_close(self, async_client):
+ """Open and close pneumatic gripper via tool methods."""
+ robot, client = async_client
+ spec = robot.tools["PNEUMATIC"]
+ assert isinstance(spec, PneumaticGripperTool)
+ assert spec.gripper_type == GripperType.PNEUMATIC
+ assert spec.io_port == 1
+
+ await client.select_tool("PNEUMATIC")
+ await client.wait_motion(timeout=5.0)
+
+ tool = client.tool
+
+ # Open — command accepted and completes
+ idx = await tool.open()
+ assert idx >= 0
+ assert await client.wait_motion(timeout=5.0)
+
+ # Close — command accepted and completes
+ idx = await tool.close()
+ assert idx >= 0
+ assert await client.wait_motion(timeout=5.0)
+
+ @pytest.mark.asyncio
+ async def test_pneumatic_set_position_threshold(self, async_client):
+ """set_position uses binary threshold: < 0.5 opens, >= 0.5 closes."""
+ robot, client = async_client
+
+ await client.select_tool("PNEUMATIC")
+ await client.wait_motion(timeout=5.0)
+
+ tool = client.tool
+
+ # Position 0.8 → dispatches to close()
+ idx = await tool.set_position(0.8)
+ assert idx >= 0
+ assert await client.wait_motion(timeout=5.0)
+
+ # Position 0.2 → dispatches to open()
+ idx = await tool.set_position(0.2)
+ assert idx >= 0
+ assert await client.wait_motion(timeout=5.0)
+
+
+# ===========================================================================
+# SSG-48 Electric Gripper Methods (async, via client.tool)
+# ===========================================================================
+
+
+@pytest.mark.integration
+class TestSSG48GripperMethods:
+ """Test SSG-48 electric gripper via client.tool()."""
+
+ @pytest.mark.asyncio
+ async def test_ssg48_calibrate_and_move(self, async_client):
+ """Calibrate and move SSG-48 gripper through tool methods."""
+ robot, client = async_client
+ spec = robot.tools["SSG-48"]
+ assert isinstance(spec, ElectricGripperTool)
+ assert spec.gripper_type == GripperType.ELECTRIC
+
+ # Verify parameter ranges
+ assert spec.position_range == (0.0, 1.0)
+ assert spec.speed_range == (0.0, 1.0)
+ assert spec.current_range == (100, 1300)
+
+ await client.select_tool("SSG-48")
+ await client.wait_motion(timeout=5.0)
+
+ tool = client.tool
+
+ # Calibrate
+ idx = await tool.calibrate()
+ assert idx >= 0
+ await client.wait_motion(timeout=10.0)
+
+ # Move to half position
+ idx = await tool.set_position(0.5, speed=0.7, current=600)
+ assert idx >= 0
+ await client.wait_motion(timeout=10.0)
+
+
+# ===========================================================================
+# MSG AI Stepper Gripper Methods (async, via client.tool)
+# ===========================================================================
+
+
+@pytest.mark.integration
+class TestMSGGripperMethods:
+ """Test MSG compliant AI stepper gripper via client.tool()."""
+
+ @pytest.mark.asyncio
+ async def test_msg_calibrate_and_move(self, async_client):
+ """Calibrate and move MSG gripper through tool methods."""
+ robot, client = async_client
+ spec = robot.tools["MSG"]
+ assert isinstance(spec, ElectricGripperTool)
+ assert spec.gripper_type == GripperType.ELECTRIC
+
+ await client.select_tool("MSG")
+ await client.wait_motion(timeout=5.0)
+
+ tool = client.tool
+
+ # Calibrate
+ idx = await tool.calibrate()
+ assert idx >= 0
+ await client.wait_motion(timeout=10.0)
+
+ # Move to position
+ idx = await tool.set_position(0.3, speed=0.5, current=500)
+ assert idx >= 0
+ await client.wait_motion(timeout=10.0)
+
+
+# ===========================================================================
+# Tool Registry (no server needed)
+# ===========================================================================
+
+
+@pytest.mark.unit
+class TestToolRegistry:
+ """Test tool registry completeness — no server required."""
+
+ def test_registry_completeness(self):
+ """All expected tools are registered with correct types and properties."""
+ from parol6 import Robot
+
+ robot = Robot()
+ tools = robot.tools
+
+ # 5 tools registered
+ keys = [t.key for t in tools.available]
+ assert len(keys) == 5
+ for expected in ("NONE", "PNEUMATIC", "SSG-48", "MSG", "VACUUM"):
+ assert expected in keys, f"{expected} not in {keys}"
+
+ # Default is NONE
+ assert tools.default.key == "NONE"
+ assert tools.default.tool_type == ToolType.NONE
+
+ # 4 grippers
+ grippers = tools.by_type(ToolType.GRIPPER)
+ assert len(grippers) == 4
+ gripper_keys = {t.key for t in grippers}
+ assert gripper_keys == {"PNEUMATIC", "SSG-48", "MSG", "VACUUM"}
+
+ # Type checks
+ assert ToolType.GRIPPER in tools
+ assert isinstance(tools["PNEUMATIC"], PneumaticGripperTool)
+ assert tools["PNEUMATIC"].gripper_type == GripperType.PNEUMATIC
+ assert isinstance(tools["SSG-48"], ElectricGripperTool)
+ assert tools["SSG-48"].gripper_type == GripperType.ELECTRIC
+ assert isinstance(tools["MSG"], ElectricGripperTool)
+ assert tools["MSG"].gripper_type == GripperType.ELECTRIC
+
+ # TCP origins differ
+ origins = {t.key: t.tcp_origin for t in tools.available}
+ assert origins["NONE"] != origins["PNEUMATIC"]
+ assert origins["PNEUMATIC"] != origins["SSG-48"]
+ assert origins["SSG-48"] != origins["MSG"]
+
+ # Invalid key raises
+ with pytest.raises(KeyError):
+ tools["BOGUS"]
+
+ # SSG-48 has meshes
+ ssg48 = tools["SSG-48"]
+ assert len(ssg48.meshes) == 3 # body + 2 jaws
+
+ # PNEUMATIC has meshes
+ pneumatic = tools["PNEUMATIC"]
+ assert len(pneumatic.meshes) == 3 # body + 2 jaws
diff --git a/tests/integration/test_udp_smoke.py b/tests/integration/test_udp_smoke.py
index b0a1881..dfe1ab1 100644
--- a/tests/integration/test_udp_smoke.py
+++ b/tests/integration/test_udp_smoke.py
@@ -3,14 +3,10 @@
Covers PING/PONG, GET_* endpoints, STOP semantics, and basic functionality.
"""
-import os
-import sys
+import socket
import pytest
-# Add the parent directory to Python path
-sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
-
from parol6 import RobotClient
@@ -27,34 +23,26 @@ def test_ping_pong(self, client, server_proc):
class TestGetEndpoints:
"""Test GET_* command endpoints that return immediate data."""
- def test_get_pose(self, client, server_proc):
- """Test GET_POSE command."""
- pose = client.get_pose()
+ def test_pose(self, client, server_proc):
+ """Test POSE command — returns [x, y, z, rx, ry, rz]."""
+ pose = client.pose()
assert pose is not None
assert isinstance(pose, list)
- assert len(pose) == 16 # 4x4 transformation matrix flattened
-
- # Test helper methods too
- pose_rpy = client.get_pose_rpy()
- assert pose_rpy is not None
- assert isinstance(pose_rpy, list)
- assert len(pose_rpy) == 6 # [x, y, z, rx, ry, rz]
-
- pose_xyz = client.get_pose_xyz()
- assert pose_xyz is not None
- assert isinstance(pose_xyz, list)
- assert len(pose_xyz) == 3 # [x, y, z]
-
- def test_get_angles(self, client, server_proc):
- """Test GET_ANGLES command."""
- angles = client.get_angles()
+ assert len(pose) == 6 # [x, y, z, rx, ry, rz]
+
+ pose_xyz = pose[:3]
+ assert len(pose_xyz) == 3
+
+ def test_angles(self, client, server_proc):
+ """Test ANGLES command."""
+ angles = client.angles()
assert angles is not None
assert isinstance(angles, list)
assert len(angles) == 6 # 6 joint angles
- def test_get_io(self, client, server_proc):
- """Test GET_IO command."""
- io_status = client.get_io()
+ def test_io(self, client, server_proc):
+ """Test IO command."""
+ io_status = client.io()
assert io_status is not None
assert isinstance(io_status, list)
assert len(io_status) == 5 # IN1, IN2, OUT1, OUT2, ESTOP
@@ -65,16 +53,9 @@ def test_get_io(self, client, server_proc):
# Test helper method too
assert not client.is_estop_pressed() # Should be False in FAKE_SERIAL
- def test_get_gripper(self, client, server_proc):
- """Test GET_GRIPPER command."""
- gripper = client.get_gripper_status()
- assert gripper is not None
- assert isinstance(gripper, list)
- assert len(gripper) == 6 # ID, Position, Speed, Current, Status, ObjDetection
-
- def test_get_speeds(self, client, server_proc):
- """Test GET_SPEEDS command."""
- speeds = client.get_speeds()
+ def test_joint_speeds(self, client, server_proc):
+ """Test JOINT_SPEEDS command."""
+ speeds = client.joint_speeds()
assert speeds is not None
assert isinstance(speeds, list)
assert len(speeds) == 6 # 6 joint speeds
@@ -83,33 +64,34 @@ def test_get_speeds(self, client, server_proc):
stopped = client.is_robot_stopped()
assert isinstance(stopped, bool)
- def test_get_status_aggregate(self, client, server_proc):
- """Test GET_STATUS aggregate command."""
- status = client.get_status()
+ def test_status_aggregate(self, client, server_proc):
+ """Test STATUS aggregate command."""
+ from parol6.protocol.wire import StatusResultStruct
+
+ status = client.status()
assert status is not None
- assert isinstance(status, dict)
+ assert isinstance(status, StatusResultStruct)
- # Should contain all status components
- assert "pose" in status
- assert "angles" in status
- assert "io" in status
- assert "gripper" in status
+ # Should contain all status components (as struct attributes)
+ assert hasattr(status, "pose")
+ assert hasattr(status, "angles")
+ assert hasattr(status, "io")
+ assert hasattr(status, "tool_status")
@pytest.mark.integration
-class TestStreamMode:
- """Test streaming mode functionality."""
-
- def test_stream_mode_toggle(self, server_proc, ports):
- """Test STREAM ON/OFF commands."""
- client = RobotClient(ports.server_ip, ports.server_port)
-
- # Enable stream mode and verify responsiveness
- assert client.stream_on() is True
- assert client.ping() is not None
-
- # Disable stream mode and verify responsiveness
- assert client.stream_off() is True
+class TestServoMode:
+ """Test servo (real-time) mode functionality.
+
+ stream_on/stream_off were removed in the API redesign.
+ Servo commands (servo_j/servo_l) replaced streaming mode.
+ """
+
+ def test_servo_joint_basic(self, client, server_proc):
+ """Test that servo_j command is accepted."""
+ # servo_j sends a single real-time joint target
+ result = client.servo_j([0, -45, 180, 0, 0, 180], speed=0.5, accel=0.5)
+ assert result > 0
assert client.ping() is not None
@@ -120,16 +102,16 @@ class TestBasicMotionCommands:
def test_home_command(self, client, server_proc):
"""Test HOME command (fire-and-forget)."""
result = client.home()
- assert result is True
+ assert result >= 0
# Wait for completion and verify robot stops
- assert client.wait_until_stopped(timeout=15.0)
+ assert client.wait_motion(timeout=15.0)
# Check that robot is responsive after homing
assert client.ping() is not None
# Check that angles are available after homing
- angles = client.get_angles()
+ angles = client.angles()
assert angles is not None
assert len(angles) == 6
@@ -138,48 +120,50 @@ def test_basic_joint_move(self, client, server_proc):
# Use joint angles that are within the robot's limits
# Joint 2 range: [-145.0088, -3.375]
# Joint 3 range: [107.866, 287.8675]
- result = client.move_joints(
+ result = client.move_j(
[0, -45, 180, 15, 20, 25], # Valid angles within joint limits
duration=2.0,
)
- assert result is True
+ assert result >= 0
# Wait for completion and verify robot stops
- assert client.wait_until_stopped(timeout=15.0)
+ assert client.wait_motion(timeout=15.0)
# Verify robot state after move attempt
- angles = client.get_angles()
+ angles = client.angles()
assert angles is not None
assert client.ping() is not None
- def test_basic_pose_move(self, client, server_proc):
- """Test basic pose movement command with validation."""
- result = client.move_pose(
- [100, 100, 100, 0, 0, 0],
- speed_percentage=50,
+ def test_joint_move_with_speed(self, client, server_proc):
+ """Test basic joint movement command with validation."""
+ result = client.move_j(
+ [80, -80, 170, 5, 5, 190],
+ speed=0.5,
)
- assert result is True
+ assert result >= 0
# Wait for completion and verify robot stops
- assert client.wait_until_stopped(timeout=15.0)
+ assert client.wait_motion(timeout=15.0)
# Verify robot state
- pose = client.get_pose_rpy()
+ pose = client.pose()
assert pose is not None
assert len(pose) == 6
def test_cartesian_move_validation(self, client, server_proc):
"""Test cartesian movement with proper validation."""
- # Test that move requires either duration or speed (client raises)
- with pytest.raises(RuntimeError):
- client.move_cartesian([50, 50, 50, 0, 0, 0]) # No duration or speed
+ from parol6.utils.errors import MotionError
- # Valid cartesian move (may still fail IK in FAKE_SERIAL)
- result = client.move_cartesian(
- [50, 50, 50, 0, 0, 0],
- duration=2.0,
- )
- assert result is True
+ # Test that move requires either duration or speed (struct validates)
+ with pytest.raises(ValueError):
+ client.move_l([50, 50, 50, 0, 0, 0]) # No duration or speed
+
+ # Unreachable pose — planner surfaces IK failure via MotionError
+ with pytest.raises(MotionError):
+ client.move_l(
+ [50, 50, 50, 0, 0, 0],
+ duration=2.0,
+ )
@pytest.mark.integration
@@ -187,28 +171,42 @@ class TestErrorHandling:
"""Test error handling and edge cases."""
def test_invalid_command_format(self, server_proc, ports):
- """Test server response to invalid commands."""
- client = RobotClient(ports.server_ip, ports.server_port)
-
- # Send malformed command and consume server error response
- reply = client.send_raw(
- "INVALID_COMMAND|BAD|PARAMS", await_reply=True, timeout=1.0
- )
- assert isinstance(reply, str) and reply.startswith("ERROR|")
+ """Test server response to invalid binary msgpack commands."""
+ from parol6.protocol.wire import MsgType, encode, decode
+
+ # Send invalid command via raw socket with binary msgpack
+ with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
+ sock.settimeout(2.0)
+ # Send an array with invalid command type (9999 is not a valid CmdType)
+ msg = encode([9999, "invalid_param"])
+ sock.sendto(msg, (ports.server_ip, ports.server_port))
+
+ # Expect error response in array format: [MsgType.ERROR, message]
+ data, _ = sock.recvfrom(1024)
+ resp = decode(data)
+ assert isinstance(resp, (list, tuple))
+ assert resp[0] == MsgType.ERROR
+ # resp[1] is a RobotError wire list: [cmd_idx, code, title, cause, effect, remedy]
+ error_wire = resp[1]
+ assert isinstance(error_wire, list)
+ assert any("9999" in str(f) or "Invalid" in str(f) for f in error_wire)
# Server should remain responsive after handling the error
- assert client.ping() is not None
-
- def test_empty_command(self, server_proc, ports):
- """Test server response to empty commands."""
client = RobotClient(ports.server_ip, ports.server_port)
+ assert client.ping() is not None
- # Send empty command (fire-and-forget)
- sent = client.send_raw("")
- assert sent is True
+ def test_halted_motion_raises_motion_error(self, client, server_proc):
+ """Motion commands on a halted controller raise MotionError, not -1."""
+ from parol6.utils.errors import MotionError
- # Server should remain responsive
- assert client.ping() is not None
+ client.halt()
+ try:
+ with pytest.raises(MotionError) as exc_info:
+ client.home()
+ assert exc_info.value.robot_error.code > 0
+ assert exc_info.value.robot_error.title
+ finally:
+ client.resume()
def test_rapid_command_sequence(self, server_proc, ports):
"""Test server stability under rapid command sequence."""
@@ -233,13 +231,13 @@ def test_command_sequence_execution(self, server_proc, ports):
start_time = __import__("time").time()
# Execute sequence using public API
- assert client.home() is True
- assert client.delay(0.2) is True
- assert client.delay(0.2) is True
- assert client.delay(0.2) is True
+ assert client.home() >= 0
+ assert client.delay(0.2) >= 0
+ assert client.delay(0.2) >= 0
+ assert client.delay(0.2) >= 0
# Wait for all commands to complete via speeds
- assert client.wait_until_stopped(timeout=10.0)
+ assert client.wait_motion(timeout=10.0)
# Server should be responsive after sequence
assert client.ping() is not None
diff --git a/tests/test_examples.py b/tests/test_examples.py
new file mode 100644
index 0000000..b4a5d77
--- /dev/null
+++ b/tests/test_examples.py
@@ -0,0 +1,43 @@
+"""Verify that all example scripts run successfully in the simulator.
+
+These tests run each example as an isolated subprocess so they don't
+conflict with the shared integration test server.
+"""
+
+import os
+import subprocess
+import sys
+from pathlib import Path
+
+import pytest
+
+EXAMPLES_DIR = Path(__file__).resolve().parents[1] / "examples"
+
+EXAMPLES = sorted(
+ p.name for p in EXAMPLES_DIR.glob("*.py") if not p.name.startswith("_")
+)
+
+ENV = {
+ **os.environ,
+ "PAROL6_FAKE_SERIAL": "1",
+ "PAROL6_STATUS_RATE_HZ": "20",
+}
+
+
+@pytest.mark.examples
+@pytest.mark.timeout(300)
+@pytest.mark.parametrize("script", EXAMPLES)
+def test_example_runs(script):
+ """Run each example as a subprocess and check it exits cleanly."""
+ result = subprocess.run(
+ [sys.executable, str(EXAMPLES_DIR / script)],
+ env=ENV,
+ capture_output=True,
+ text=True,
+ timeout=240,
+ )
+ assert result.returncode == 0, (
+ f"{script} failed (exit {result.returncode}):\n"
+ f"--- stdout ---\n{result.stdout[-2000:]}\n"
+ f"--- stderr ---\n{result.stderr[-2000:]}"
+ )
diff --git a/tests/unit/test_async_client_lifecycle.py b/tests/unit/test_async_client_lifecycle.py
index 52a2f80..27a82a4 100644
--- a/tests/unit/test_async_client_lifecycle.py
+++ b/tests/unit/test_async_client_lifecycle.py
@@ -7,13 +7,12 @@
@pytest.mark.asyncio
@pytest.mark.integration
-async def test_multicast_listener_shuts_down_on_close(ports, server_proc):
- """AsyncRobotClient.close() should cancel and clean up the real multicast listener.
+async def test_status_listener_shuts_down_on_close(ports, server_proc):
+ """AsyncRobotClient.close() should close the status UDP transport.
This test uses the real server process (server_proc), the real AsyncRobotClient,
- and the real multicast subscriber stack. It only relies on the existing
- test fixtures to start the FAKE_SERIAL controller on the auto-detected
- test ports.
+ and the real status listener. It only relies on the existing test fixtures to
+ start the FAKE_SERIAL controller on the auto-detected test ports.
"""
client = AsyncRobotClient(
@@ -21,28 +20,22 @@ async def test_multicast_listener_shuts_down_on_close(ports, server_proc):
)
try:
- # Ensure the controller is responsive before starting the multicast listener
- await client.wait_for_server_ready(timeout=5.0)
+ # Ensure the controller is responsive before starting the status listener
+ await client.wait_ready(timeout=5.0)
- # Force endpoint and multicast listener creation; this will invoke the
- # real _start_multicast_listener, which uses subscribe_status and
- # the underlying multicast socket implementation.
+ # Force endpoint and status listener creation
await client._ensure_endpoint()
- task = client._multicast_task
+ transport = client._status_transport
- assert task is not None, "Multicast listener task should be created"
- assert not task.done(), (
- "Multicast listener task should be running before close()"
- )
+ assert transport is not None, "Status transport should be created"
# Invoke graceful shutdown
await client.close()
- # After close(), the task should be finished and cleared
- assert client._multicast_task is None, (
- "Multicast listener reference should be cleared after close()"
+ # After close(), the transport should be cleared
+ assert client._status_transport is None, (
+ "Status transport should be cleared after close()"
)
- assert task.done(), "Multicast listener task should be completed after close()"
finally:
# close() is idempotent; ensure cleanup even if assertions fail earlier
await client.close()
@@ -50,11 +43,11 @@ async def test_multicast_listener_shuts_down_on_close(ports, server_proc):
@pytest.mark.asyncio
@pytest.mark.integration
-async def test_status_stream_terminates_on_close(ports, server_proc):
- """status_stream consumers should terminate when AsyncRobotClient.close() is called.
+async def test_stream_status_terminates_on_close(ports, server_proc):
+ """stream_status consumers should terminate when AsyncRobotClient.close() is called.
- This test exercises the real server process and the real multicast subscriber
- stack to ensure that any background tasks consuming status_stream() are
+ This test exercises the real server process and the real status listener
+ to ensure that any background tasks consuming stream_status() are
unblocked and finished by the time close() completes.
"""
@@ -65,18 +58,18 @@ async def test_status_stream_terminates_on_close(ports, server_proc):
async def consumer() -> None:
# Consume a few status messages until the client is closed.
# The loop should terminate automatically when close() is invoked.
- async for _ in client.status_stream():
+ async for _ in client.stream_status():
# Yield control so we don't spin too fast in tests
await asyncio.sleep(0)
try:
- # Ensure the controller is responsive before starting the multicast listener
- await client.wait_for_server_ready(timeout=5.0)
+ # Ensure the controller is responsive before starting the status listener
+ await client.wait_ready(timeout=5.0)
# Start the consumer task; this will internally trigger _ensure_endpoint()
consumer_task = asyncio.create_task(consumer())
- # Wait briefly to allow the multicast listener and status stream to start
+ # Wait briefly to allow the status listener and status stream to start
await asyncio.sleep(0.5)
# Closing the client should cause the consumer to exit its async-for loop
@@ -86,7 +79,7 @@ async def consumer() -> None:
await asyncio.wait_for(consumer_task, timeout=5.0)
assert consumer_task.done(), (
- "status_stream consumer should be finished after close()"
+ "stream_status consumer should be finished after close()"
)
finally:
# Ensure cleanup even if assertions fail earlier
diff --git a/tests/unit/test_blend.py b/tests/unit/test_blend.py
new file mode 100644
index 0000000..184e0b3
--- /dev/null
+++ b/tests/unit/test_blend.py
@@ -0,0 +1,253 @@
+"""Unit tests for joint-space and Cartesian N-command blending."""
+
+import numpy as np
+import pytest
+
+from parol6.motion.geometry import (
+ _blend_joint_path_into,
+ _linear_joint_segment_into,
+ build_composite_joint_path,
+)
+
+
+class TestLinearJointSegment:
+ """Tests for _linear_joint_segment_into helper."""
+
+ def test_full_segment(self):
+ start = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
+ end = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
+ out = np.empty((11, 6), dtype=np.float64)
+ _linear_joint_segment_into(start, end, out)
+ assert out.shape[0] == 11
+ assert np.allclose(out[0], start)
+ assert np.allclose(out[-1], end)
+ assert np.allclose(out[5], (start + end) / 2)
+
+ def test_partial_segment(self):
+ start = np.zeros(6)
+ end = np.ones(6)
+ out = np.empty((11, 6), dtype=np.float64)
+ _linear_joint_segment_into(start, end, out, s_start=0.25, s_end=0.75)
+ assert out.shape[0] == 11
+ assert np.allclose(out[0], np.full(6, 0.25))
+ assert np.allclose(out[-1], np.full(6, 0.75))
+
+
+class TestBlendJointPath:
+ """Tests for _blend_joint_path_into Bezier blend zone."""
+
+ def test_endpoints_match(self):
+ entry = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
+ waypoint = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
+ exit_ = np.array([2.0, 0.0, 2.0, 0.0, 2.0, 0.0])
+
+ out = np.empty((21, 6), dtype=np.float64)
+ _blend_joint_path_into(entry, waypoint, exit_, out)
+ assert out.shape[0] == 21
+ assert np.allclose(out[0], entry)
+ assert np.allclose(out[-1], exit_)
+
+ def test_c1_continuity_at_entry(self):
+ """Tangent at t=0 should point from entry toward waypoint."""
+ entry = np.zeros(6)
+ waypoint = np.ones(6)
+ exit_ = np.array([2.0, 0.0, 2.0, 0.0, 2.0, 0.0])
+
+ out = np.empty((101, 6), dtype=np.float64)
+ _blend_joint_path_into(entry, waypoint, exit_, out)
+
+ # Numerical derivative at t=0
+ dt = 1.0 / 100
+ tangent_start = (out[1] - out[0]) / dt
+
+ # Expected tangent: d/dt[(1-t)^2 E + 2t(1-t)W + t^2 X] at t=0
+ # = 2(W - E)
+ expected_tangent = 2.0 * (waypoint - entry)
+ assert np.allclose(tangent_start, expected_tangent, atol=0.1)
+
+ def test_c1_continuity_at_exit(self):
+ """Tangent at t=1 should point from waypoint toward exit."""
+ entry = np.zeros(6)
+ waypoint = np.ones(6)
+ exit_ = np.array([2.0, 0.0, 2.0, 0.0, 2.0, 0.0])
+
+ out = np.empty((101, 6), dtype=np.float64)
+ _blend_joint_path_into(entry, waypoint, exit_, out)
+
+ dt = 1.0 / 100
+ tangent_end = (out[-1] - out[-2]) / dt
+
+ # Expected: 2(X - W)
+ expected_tangent = 2.0 * (exit_ - waypoint)
+ assert np.allclose(tangent_end, expected_tangent, atol=0.1)
+
+
+class TestBuildCompositeJointPath:
+ """Tests for build_composite_joint_path."""
+
+ def test_two_waypoints_no_blend(self):
+ """Two waypoints should produce a straight interpolation."""
+ w0 = np.zeros(6)
+ w1 = np.ones(6)
+ result = build_composite_joint_path([w0, w1], [], samples_per_segment=11)
+ assert result.shape == (11, 6)
+ assert np.allclose(result[0], w0)
+ assert np.allclose(result[-1], w1)
+
+ def test_three_waypoints_with_blend(self):
+ """Three waypoints with a blend zone at the middle one."""
+ w0 = np.zeros(6)
+ w1 = np.ones(6)
+ w2 = np.array([2.0, 0.0, 2.0, 0.0, 2.0, 0.0])
+
+ result = build_composite_joint_path(
+ [w0, w1, w2],
+ [(0.3, 0.3)],
+ samples_per_segment=20,
+ )
+
+ assert result.ndim == 2
+ assert result.shape[1] == 6
+ # Path should start at w0 and end at w2
+ assert np.allclose(result[0], w0)
+ assert np.allclose(result[-1], w2)
+
+ # Path should NOT pass exactly through w1 (it's blended)
+ dists_to_w1 = np.linalg.norm(result - w1, axis=1)
+ assert dists_to_w1.min() > 0.01, (
+ "Path should round the corner, not pass through w1"
+ )
+
+ def test_four_waypoints_two_blend_zones(self):
+ """Four waypoints with two blend zones."""
+ w0 = np.zeros(6)
+ w1 = np.ones(6)
+ w2 = np.array([2.0, 0.0, 2.0, 0.0, 2.0, 0.0])
+ w3 = np.full(6, 3.0)
+
+ result = build_composite_joint_path(
+ [w0, w1, w2, w3],
+ [(0.2, 0.2), (0.2, 0.2)],
+ samples_per_segment=20,
+ )
+
+ assert result.ndim == 2
+ assert result.shape[1] == 6
+ assert np.allclose(result[0], w0)
+ assert np.allclose(result[-1], w3)
+
+ def test_zero_blend_fracs(self):
+ """Zero blend fractions should produce sharp corners (linear segments only)."""
+ w0 = np.zeros(6)
+ w1 = np.ones(6)
+ w2 = np.full(6, 2.0)
+
+ result = build_composite_joint_path(
+ [w0, w1, w2],
+ [(0.0, 0.0)],
+ samples_per_segment=11,
+ )
+
+ # With zero blend, path should pass through w1
+ dists_to_w1 = np.linalg.norm(result - w1, axis=1)
+ assert dists_to_w1.min() < 1e-10, "Zero blend should pass through waypoint"
+
+ def test_large_blend_fracs_clamped(self):
+ """Blend fractions > 0.5 should be clamped."""
+ w0 = np.zeros(6)
+ w1 = np.ones(6)
+ w2 = np.full(6, 2.0)
+
+ # Should not raise even with extreme fractions
+ result = build_composite_joint_path(
+ [w0, w1, w2],
+ [(0.9, 0.9)],
+ samples_per_segment=20,
+ )
+ assert result.ndim == 2
+ assert np.allclose(result[0], w0)
+ assert np.allclose(result[-1], w2)
+
+ def test_wrong_blend_fracs_count_raises(self):
+ """Mismatched blend_fracs count should raise ValueError."""
+ w0 = np.zeros(6)
+ w1 = np.ones(6)
+ w2 = np.full(6, 2.0)
+
+ with pytest.raises(ValueError, match="Expected 1 blend_fracs"):
+ build_composite_joint_path([w0, w1, w2], [])
+
+ def test_single_waypoint_raises(self):
+ with pytest.raises(ValueError, match="Need at least 2"):
+ build_composite_joint_path([np.zeros(6)], [])
+
+ def test_path_continuity(self):
+ """Adjacent samples should be close (no jumps)."""
+ w0 = np.zeros(6)
+ w1 = np.ones(6) * 0.5
+ w2 = np.ones(6)
+
+ result = build_composite_joint_path(
+ [w0, w1, w2],
+ [(0.3, 0.3)],
+ samples_per_segment=50,
+ )
+
+ # Max jump between consecutive samples should be small
+ diffs = np.diff(result, axis=0)
+ max_jump = np.max(np.abs(diffs))
+ assert max_jump < 0.05, f"Max jump {max_jump} too large — path is discontinuous"
+
+ def test_no_double_skip_at_junction(self):
+ """Gap at blend-to-linear junction should be <= 1 grid step."""
+ w0 = np.zeros(6)
+ w1 = np.ones(6) * 0.5
+ w2 = np.ones(6)
+
+ result = build_composite_joint_path(
+ [w0, w1, w2],
+ [(0.3, 0.3)],
+ samples_per_segment=50,
+ )
+
+ # Compute per-step distances
+ diffs = np.linalg.norm(np.diff(result, axis=0), axis=1)
+ # The maximum step should not be more than 2x the median step
+ median_step = np.median(diffs)
+ assert diffs.max() < 3.0 * median_step, (
+ f"Max step {diffs.max():.6f} is >3x median {median_step:.6f} — "
+ "likely double-skip at junction"
+ )
+
+ def test_adaptive_blend_samples(self):
+ """Blend sample count should scale with blend fraction."""
+ w0 = np.zeros(6)
+ w1 = np.ones(6) * 0.5
+ w2 = np.ones(6)
+
+ small_blend = build_composite_joint_path(
+ [w0, w1, w2],
+ [(0.05, 0.05)],
+ samples_per_segment=50,
+ )
+ large_blend = build_composite_joint_path(
+ [w0, w1, w2],
+ [(0.4, 0.4)],
+ samples_per_segment=50,
+ )
+
+ # Larger blend fraction should produce more samples
+ assert large_blend.shape[0] > small_blend.shape[0], (
+ f"Large blend ({large_blend.shape[0]}) should have more samples "
+ f"than small blend ({small_blend.shape[0]})"
+ )
+
+
+class TestMaxBlendLookahead:
+ """Test the config constant exists."""
+
+ def test_config_exists(self):
+ from parol6.config import MAX_BLEND_LOOKAHEAD
+
+ assert isinstance(MAX_BLEND_LOOKAHEAD, int)
+ assert MAX_BLEND_LOOKAHEAD >= 1
diff --git a/tests/unit/test_controller_system_commands.py b/tests/unit/test_controller_system_commands.py
new file mode 100644
index 0000000..bc7e1f0
--- /dev/null
+++ b/tests/unit/test_controller_system_commands.py
@@ -0,0 +1,83 @@
+"""
+Unit tests for system command side-effect signaling.
+
+Tests verify that system commands set typed side-effect attributes
+(_switch_simulator, _switch_port, _sync_mock) which the controller
+reads to trigger infrastructure changes.
+"""
+
+from unittest.mock import patch
+
+from parol6.commands.base import ExecutionStatusCode
+from parol6.server.state import ControllerState
+
+
+class TestSystemCommandSideEffects:
+ """Test that system commands signal side-effects via typed attributes."""
+
+ def test_simulator_command_sets_switch_simulator(self):
+ """Verify SIMULATOR command sets _switch_simulator attribute."""
+ from parol6.commands.system_commands import SimulatorCommand
+ from parol6.protocol.wire import SimulatorCmd
+
+ cmd = SimulatorCommand(SimulatorCmd(on=True))
+ state = ControllerState()
+
+ code = cmd.execute_step(state)
+
+ assert code == ExecutionStatusCode.COMPLETED
+ assert cmd._switch_simulator is True
+
+ def test_simulator_command_off(self):
+ """Verify SIMULATOR command off sets _switch_simulator=False."""
+ from parol6.commands.system_commands import SimulatorCommand
+ from parol6.protocol.wire import SimulatorCmd
+
+ cmd = SimulatorCommand(SimulatorCmd(on=False))
+ state = ControllerState()
+
+ code = cmd.execute_step(state)
+
+ assert code == ExecutionStatusCode.COMPLETED
+ assert cmd._switch_simulator is False
+
+ def test_set_port_command_sets_switch_port(self):
+ """Verify CONNECT_HARDWARE command sets _switch_port attribute."""
+ from parol6.commands.system_commands import ConnectHardwareCommand
+ from parol6.protocol.wire import ConnectHardwareCmd
+
+ cmd = ConnectHardwareCommand(ConnectHardwareCmd(port_str="/dev/ttyUSB0"))
+ state = ControllerState()
+
+ with patch("parol6.commands.system_commands.save_com_port", return_value=True):
+ code = cmd.execute_step(state)
+
+ assert code == ExecutionStatusCode.COMPLETED
+ assert cmd._switch_port == "/dev/ttyUSB0"
+
+ def test_set_port_command_fail_leaves_no_side_effect(self):
+ """Verify CONNECT_HARDWARE does not set _switch_port on save failure."""
+ from parol6.commands.system_commands import ConnectHardwareCommand
+ from parol6.protocol.wire import ConnectHardwareCmd
+
+ cmd = ConnectHardwareCommand(ConnectHardwareCmd(port_str="/dev/ttyUSB0"))
+ state = ControllerState()
+
+ with patch("parol6.commands.system_commands.save_com_port", return_value=False):
+ code = cmd.execute_step(state)
+
+ assert code == ExecutionStatusCode.FAILED
+ assert cmd._switch_port is None
+
+ def test_reset_command_sets_sync_mock(self):
+ """Verify RESET command sets _sync_mock attribute."""
+ from parol6.commands.utility_commands import ResetCommand
+ from parol6.protocol.wire import ResetCmd
+
+ cmd = ResetCommand(ResetCmd())
+ state = ControllerState()
+
+ code = cmd.execute_step(state)
+
+ assert code == ExecutionStatusCode.COMPLETED
+ assert cmd._sync_mock is True
diff --git a/tests/unit/test_conversions.py b/tests/unit/test_conversions.py
index dcf2712..28182f9 100644
--- a/tests/unit/test_conversions.py
+++ b/tests/unit/test_conversions.py
@@ -1,19 +1,20 @@
from unittest.mock import AsyncMock
from parol6 import RobotClient
+from parol6.protocol.wire import PoseResultStruct
-def _pose_payload_from_matrix(m):
- # Flatten list of lists to comma string after prefix
+def _pose_result(matrix: list) -> PoseResultStruct:
+ """Create PoseResultStruct with flattened pose matrix."""
flat = []
- for row in m:
+ for row in matrix:
flat.extend(row)
- return "POSE|" + ",".join(str(x) for x in flat)
+ return PoseResultStruct(pose=flat)
-def test_get_pose_rpy_identity_translation(monkeypatch):
+def test_pose_identity_translation(monkeypatch):
"""
- Validate get_pose_rpy converts 4x4 pose matrix to [x,y,z,rx,ry,rz] (mm,deg).
+ Validate pose() converts 4x4 pose matrix to [x,y,z,rx,ry,rz] (mm,deg).
Use identity rotation with translation (10,20,30) mm.
"""
client = RobotClient()
@@ -25,13 +26,12 @@ def test_get_pose_rpy_identity_translation(monkeypatch):
[0, 0, 1, 30],
[0, 0, 0, 1],
]
- payload = _pose_payload_from_matrix(mat)
+ result = _pose_result(mat)
- # Patch the async client's _request coroutine used under the hood
- mock_request = AsyncMock(return_value=payload)
+ mock_request = AsyncMock(return_value=result)
monkeypatch.setattr(client.async_client, "_request", mock_request)
- pose_rpy = client.get_pose_rpy()
+ pose_rpy = client.pose()
assert pose_rpy is not None
# Translations
assert pose_rpy[0:3] == [10, 20, 30]
@@ -42,15 +42,14 @@ def test_get_pose_rpy_identity_translation(monkeypatch):
assert abs(rz) < 1e-6
-def test_get_pose_rpy_malformed_payload(monkeypatch):
+def test_pose_malformed_payload(monkeypatch):
"""
Malformed POSE payload (wrong length) should return None.
"""
client = RobotClient()
- # Not 16 elements
- mock_request = AsyncMock(return_value="POSE|1,2,3")
+ mock_request = AsyncMock(return_value=PoseResultStruct(pose=[1, 2, 3]))
monkeypatch.setattr(client.async_client, "_request", mock_request)
- pose_rpy = client.get_pose_rpy()
+ pose_rpy = client.pose()
assert pose_rpy is None
diff --git a/tests/unit/test_dry_run_blend.py b/tests/unit/test_dry_run_blend.py
new file mode 100644
index 0000000..ab38997
--- /dev/null
+++ b/tests/unit/test_dry_run_blend.py
@@ -0,0 +1,88 @@
+"""Unit tests for DryRunRobotClient blend buffering."""
+
+import numpy as np
+import pytest
+
+from parol6.client.dry_run_client import DryRunRobotClient
+
+# Valid PAROL6 joint angles (deg) within limits:
+# J1: [-123, 123], J2: [-145, -3.375], J3: [107.9, 287.9],
+# J4: [-105, 105], J5: [-90, 90], J6: [0, 360]
+# Home/standby is [90, -90, 180, 0, 0, 180].
+W0 = [90.0, -90.0, 180.0, 0.0, 0.0, 180.0]
+W1 = [80.0, -80.0, 190.0, 10.0, 10.0, 190.0]
+W2 = [70.0, -70.0, 200.0, 20.0, 20.0, 200.0]
+W3 = [60.0, -60.0, 210.0, 30.0, 30.0, 210.0]
+
+
+@pytest.fixture
+def client():
+ return DryRunRobotClient()
+
+
+class TestDryRunBlend:
+ """Tests for blend buffering in DryRunRobotClient."""
+
+ def test_blend_produces_composite(self, client):
+ """3x move_j with r > 0 should buffer, then flush returns a single composite."""
+ r1 = client.move_j(angles=W1, speed=0.5, r=10)
+ assert r1 is None, "r > 0 should buffer, not return immediately"
+
+ r2 = client.move_j(angles=W2, speed=0.5, r=10)
+ assert r2 is None, "r > 0 should buffer"
+
+ # r=0 terminates the chain → flush returns composite result
+ r3 = client.move_j(angles=W3, speed=0.5, r=0)
+ assert r3 is not None, "r=0 after buffered commands should flush and return"
+ assert r3.tcp_poses.shape[0] > 0
+ assert r3.tcp_poses.shape[1] == 6
+ assert r3.error is None
+
+ def test_no_blend_without_radius(self, client):
+ """move_j with r=0 should return immediately (no buffering)."""
+ result = client.move_j(angles=W1, speed=0.5, r=0)
+ assert result is not None, "r=0 should return immediately"
+ assert result.tcp_poses.shape[0] > 0
+ assert result.error is None
+
+ def test_flush_returns_buffered(self, client):
+ """Explicit flush() after buffered commands should return results list."""
+ r1 = client.move_j(angles=W1, speed=0.5, r=10)
+ assert r1 is None
+
+ r2 = client.move_j(angles=W2, speed=0.5, r=10)
+ assert r2 is None
+
+ results = client.flush()
+ assert len(results) > 0, "flush() should return buffered results"
+ assert results[0].tcp_poses.shape[0] > 0
+ assert results[0].error is None
+
+ def test_flush_empty_returns_empty_list(self, client):
+ """flush() with no buffered commands should return empty list."""
+ assert client.flush() == []
+
+ def test_blended_trajectory_is_longer(self, client):
+ """Composite blended trajectory should have longer duration than a single move."""
+ single = DryRunRobotClient()
+ single_result = single.move_j(angles=W3, speed=0.3, r=0)
+ assert single_result is not None
+
+ # Blended chain of 3 moves
+ client.move_j(angles=W1, speed=0.3, r=10)
+ client.move_j(angles=W2, speed=0.3, r=10)
+ r3 = client.move_j(angles=W3, speed=0.3, r=0)
+ assert r3 is not None
+
+ assert r3.duration > single_result.duration, (
+ f"Blended ({r3.duration:.3f}s) should be longer than single ({single_result.duration:.3f}s)"
+ )
+
+ def test_state_updated_after_blend(self, client):
+ """Position should reflect the final waypoint after a blended chain."""
+ client.move_j(angles=W1, speed=0.5, r=10)
+ client.move_j(angles=W2, speed=0.5, r=0)
+
+ angles_after = client.angles()
+ assert len(angles_after) == 6
+ np.testing.assert_allclose(angles_after, W2, atol=0.5)
diff --git a/tests/unit/test_dry_run_script_compat.py b/tests/unit/test_dry_run_script_compat.py
new file mode 100644
index 0000000..e2271b6
--- /dev/null
+++ b/tests/unit/test_dry_run_script_compat.py
@@ -0,0 +1,120 @@
+"""Verify that all motion commands users write in scripts work through the dry run client.
+
+The dry run client uses __getattr__ + build_cmd to dispatch calls by mapping
+kwargs to wire struct fields. If the client API param names don't match the
+struct field names, the kwargs get silently dropped and the command fails.
+
+This test calls every user-facing motion method with the same signatures shown
+in the docs / editor auto-complete, ensuring the dry run path doesn't diverge
+from the real client.
+"""
+
+import pytest
+
+from parol6.client.dry_run_client import DryRunRobotClient
+
+HOME = [90.0, -90.0, 180.0, 0.0, 0.0, 180.0]
+POSE_A = [0.0, 280.0, 200.0, 90.0, 0.0, 90.0]
+POSE_B = [50.0, 280.0, 200.0, 90.0, 0.0, 90.0]
+POSE_C = [50.0, 280.0, 250.0, 90.0, 0.0, 90.0]
+ANGLES_A = [80.0, -80.0, 190.0, 10.0, 10.0, 190.0]
+ANGLES_B = [70.0, -70.0, 200.0, 20.0, 20.0, 200.0]
+
+
+@pytest.fixture
+def client():
+ return DryRunRobotClient()
+
+
+class TestDryRunScriptCompat:
+ """Every call signature a user can write in a script must work in dry run."""
+
+ def test_home(self, client):
+ result = client.home()
+ assert result is not None
+ assert result.error is None
+
+ def test_move_j_positional(self, client):
+ result = client.move_j(ANGLES_A, speed=0.5)
+ assert result is not None
+ assert result.error is None
+
+ def test_move_j_angles_kwarg(self, client):
+ result = client.move_j(angles=ANGLES_A, speed=0.5)
+ assert result is not None
+ assert result.error is None
+
+ def test_move_j_with_accel(self, client):
+ result = client.move_j(ANGLES_A, speed=0.5, accel=0.8)
+ assert result is not None
+ assert result.error is None
+
+ def test_move_j_with_duration(self, client):
+ result = client.move_j(ANGLES_A, duration=2.0)
+ assert result is not None
+ assert result.error is None
+
+ def test_move_j_relative(self, client):
+ result = client.move_j(ANGLES_A, speed=0.5, rel=True)
+ assert result is not None
+
+ def test_move_l_positional(self, client):
+ result = client.move_l(POSE_A, speed=0.5)
+ assert result is not None
+ assert result.error is None
+
+ def test_move_l_with_frame(self, client):
+ result = client.move_l(POSE_A, speed=0.5, frame="WRF")
+ assert result is not None
+
+ def test_move_c(self, client):
+ client.move_l(POSE_A, speed=0.5)
+ result = client.move_c(via=POSE_B, end=POSE_A, speed=0.5)
+ assert result is not None
+
+ def test_move_s(self, client):
+ client.move_l(POSE_A, speed=0.5)
+ waypoints = [POSE_A, POSE_B, POSE_C, POSE_A]
+ result = client.move_s(waypoints=waypoints, speed=0.5)
+ assert result is not None
+
+ def test_move_p(self, client):
+ client.move_l(POSE_A, speed=0.5)
+ waypoints = [POSE_A, POSE_B, POSE_C, POSE_A]
+ result = client.move_p(waypoints=waypoints, speed=0.5)
+ assert result is not None
+
+ def test_move_j_blend_radius(self, client):
+ """Blend radius queues commands; r=0 flushes."""
+ r1 = client.move_j(ANGLES_A, speed=0.5, r=10)
+ assert r1 is None # buffered
+ r2 = client.move_j(ANGLES_B, speed=0.5, r=0)
+ assert r2 is not None # flushed
+
+ def test_move_l_blend_radius(self, client):
+ r1 = client.move_l(POSE_A, speed=0.5, r=15)
+ assert r1 is None
+ r2 = client.move_l(POSE_B, speed=0.5, r=0)
+ assert r2 is not None
+
+ def test_angles(self, client):
+ angles = client.angles()
+ assert isinstance(angles, list)
+ assert len(angles) == 6
+
+ def test_pose(self, client):
+ pose = client.pose()
+ assert isinstance(pose, list)
+ assert len(pose) == 6
+
+ def test_flush(self, client):
+ results = client.flush()
+ assert isinstance(results, list)
+
+ def test_delay(self, client):
+ # Should be a no-op, not raise
+ client.delay(1.0)
+
+ def test_wait_motion(self, client):
+ client.move_j(ANGLES_A, speed=0.5)
+ client.wait_motion()
diff --git a/tests/unit/test_gripper_ramp.py b/tests/unit/test_gripper_ramp.py
new file mode 100644
index 0000000..f197a0e
--- /dev/null
+++ b/tests/unit/test_gripper_ramp.py
@@ -0,0 +1,200 @@
+"""Unit tests for electric gripper ramp simulation in mock transport."""
+
+import math
+
+import numpy as np
+import pytest
+
+from parol6.server.transports.mock_serial_transport import _simulate_gripper_ramp_jit
+from parol6.tools import ElectricGripperConfig, get_registry
+
+
+# SSG-48 constants (from firmware + physical measurement)
+SSG48_TICK_RANGE = 10_432.0 # 24mm / (pi * 12mm) * 16384
+SSG48_MIN_SPEED = 40.0
+SSG48_MAX_SPEED = 80_000.0
+
+# MSG constants
+MSG_GEAR_PD_MM = 16.67
+MSG_ENCODER_CPR = 16_384
+MSG_MIN_SPEED = 500.0
+MSG_MAX_SPEED = 60_000.0
+
+
+class TestGripperRampJit:
+ """Test _simulate_gripper_ramp_jit directly with known inputs."""
+
+ def _make_state(self, target=128.0, speed_byte=255.0, active=True, pos_f=0.0):
+ ramp = np.array([target, speed_byte, 1.0 if active else 0.0], dtype=np.float64)
+ data_in = np.zeros(6, dtype=np.int32)
+ return ramp, data_in, pos_f
+
+ def test_inactive_ramp_is_noop(self):
+ ramp, data_in, pos_f = self._make_state(active=False, pos_f=50.0)
+ result = _simulate_gripper_ramp_jit(
+ ramp,
+ data_in,
+ pos_f,
+ 0.01,
+ SSG48_TICK_RANGE,
+ SSG48_MIN_SPEED,
+ SSG48_MAX_SPEED,
+ )
+ assert result == 50.0
+ assert data_in[1] == 0 # unchanged
+
+ def test_ramp_is_gradual_not_instant(self):
+ """After one tick at moderate speed, position should not yet reach target."""
+ ramp, data_in, pos_f = self._make_state(
+ target=200.0, speed_byte=128.0, pos_f=0.0
+ )
+ result = _simulate_gripper_ramp_jit(
+ ramp,
+ data_in,
+ pos_f,
+ 0.01,
+ SSG48_TICK_RANGE,
+ SSG48_MIN_SPEED,
+ SSG48_MAX_SPEED,
+ )
+ # Should have moved but not arrived
+ assert result > 0.0
+ assert result < 200.0
+ assert data_in[1] == int(result + 0.5)
+
+ def test_ramp_converges_to_target(self):
+ """Running enough ticks should reach the target."""
+ ramp, data_in, pos_f = self._make_state(
+ target=200.0, speed_byte=255.0, pos_f=0.0
+ )
+ dt = 0.01 # 100 Hz
+ for _ in range(200): # 2 seconds — well beyond max travel time
+ pos_f = _simulate_gripper_ramp_jit(
+ ramp,
+ data_in,
+ pos_f,
+ dt,
+ SSG48_TICK_RANGE,
+ SSG48_MIN_SPEED,
+ SSG48_MAX_SPEED,
+ )
+ assert pos_f == pytest.approx(200.0)
+ assert ramp[2] < 0.5 # deactivated
+ assert data_in[1] == 200
+
+ def test_higher_speed_arrives_faster(self):
+ """Higher speed byte should reach target in fewer ticks."""
+ dt = 0.01
+
+ # Slow speed (byte=25)
+ ramp_slow, data_slow, pos_slow = self._make_state(target=200.0, speed_byte=25.0)
+ ticks_slow = 0
+ for _ in range(500):
+ pos_slow = _simulate_gripper_ramp_jit(
+ ramp_slow,
+ data_slow,
+ pos_slow,
+ dt,
+ SSG48_TICK_RANGE,
+ SSG48_MIN_SPEED,
+ SSG48_MAX_SPEED,
+ )
+ ticks_slow += 1
+ if ramp_slow[2] < 0.5:
+ break
+
+ # Fast speed (byte=255)
+ ramp_fast, data_fast, pos_fast = self._make_state(
+ target=200.0, speed_byte=255.0
+ )
+ ticks_fast = 0
+ for _ in range(500):
+ pos_fast = _simulate_gripper_ramp_jit(
+ ramp_fast,
+ data_fast,
+ pos_fast,
+ dt,
+ SSG48_TICK_RANGE,
+ SSG48_MIN_SPEED,
+ SSG48_MAX_SPEED,
+ )
+ ticks_fast += 1
+ if ramp_fast[2] < 0.5:
+ break
+
+ assert ticks_fast < ticks_slow
+
+ def test_ramp_moves_in_both_directions(self):
+ """Ramp should work for both increasing and decreasing positions."""
+ dt = 0.01
+ # Start at 200, target 50 (decreasing)
+ ramp, data_in, pos_f = self._make_state(
+ target=50.0, speed_byte=200.0, pos_f=200.0
+ )
+ result = _simulate_gripper_ramp_jit(
+ ramp, data_in, pos_f, dt, SSG48_TICK_RANGE, SSG48_MIN_SPEED, SSG48_MAX_SPEED
+ )
+ assert result < 200.0 # moved toward target
+
+ def test_ssg48_full_travel_time(self):
+ """SSG-48 full travel at max speed should take ~0.13s (13 ticks at 100Hz)."""
+ dt = 0.01
+ ramp, data_in, pos_f = self._make_state(
+ target=255.0, speed_byte=255.0, pos_f=0.0
+ )
+ ticks = 0
+ for _ in range(100):
+ pos_f = _simulate_gripper_ramp_jit(
+ ramp,
+ data_in,
+ pos_f,
+ dt,
+ SSG48_TICK_RANGE,
+ SSG48_MIN_SPEED,
+ SSG48_MAX_SPEED,
+ )
+ ticks += 1
+ if ramp[2] < 0.5:
+ break
+ # Should complete in roughly 13 ticks (0.13s) — allow some tolerance
+ assert 10 <= ticks <= 20
+
+
+class TestElectricGripperConfigSpecs:
+ """Verify physical specs on tool configs match expected values."""
+
+ def test_ssg48_config(self):
+ cfg = get_registry()["SSG-48"]
+ assert isinstance(cfg, ElectricGripperConfig)
+ assert cfg.encoder_cpr == 16_384
+ assert cfg.gear_pd_mm == 12.0
+ assert cfg.firmware_speed_range_tps == (40, 80_000)
+
+ def test_msg_config(self):
+ cfg = get_registry()["MSG"]
+ assert isinstance(cfg, ElectricGripperConfig)
+ assert cfg.encoder_cpr == 16_384
+ assert cfg.gear_pd_mm == pytest.approx(16.67, abs=0.01)
+ assert cfg.firmware_speed_range_tps == (500, 60_000)
+
+ def test_ssg48_tick_range_derivation(self):
+ """Tick range derived from gear PD + travel_m should match expected ~10,432."""
+ cfg = get_registry()["SSG-48"]
+ assert isinstance(cfg, ElectricGripperConfig)
+ from waldoctl.tools import LinearMotion
+
+ motion = next(m for m in cfg.motions if isinstance(m, LinearMotion))
+ travel_mm = motion.travel_m * 1000.0
+ tick_range = (travel_mm / (math.pi * cfg.gear_pd_mm)) * cfg.encoder_cpr
+ assert tick_range == pytest.approx(10_432, rel=0.01)
+
+ def test_msg_100mm_tick_range_derivation(self):
+ """MSG 100mm tick range should be ~8,353."""
+ cfg = get_registry()["MSG"]
+ assert isinstance(cfg, ElectricGripperConfig)
+ from waldoctl.tools import LinearMotion
+
+ motion = next(m for m in cfg.motions if isinstance(m, LinearMotion))
+ travel_mm = motion.travel_m * 1000.0
+ tick_range = (travel_mm / (math.pi * cfg.gear_pd_mm)) * cfg.encoder_cpr
+ assert tick_range == pytest.approx(8_353, rel=0.02)
diff --git a/tests/unit/test_messages.py b/tests/unit/test_messages.py
new file mode 100644
index 0000000..e8f8b2a
--- /dev/null
+++ b/tests/unit/test_messages.py
@@ -0,0 +1,181 @@
+"""
+Unit tests for binary protocol message helpers.
+"""
+
+import numpy as np
+import pytest
+import msgspec
+
+from parol6.utils.error_catalog import make_error
+from parol6.utils.error_codes import ErrorCode
+from waldoctl import ActionState, ToolStatus
+from waldoctl.tools import ToolState
+
+from parol6.protocol.wire import (
+ AnglesResultStruct,
+ ErrorMsg,
+ MsgType,
+ OkMsg,
+ PoseResultStruct,
+ ResponseMsg,
+ StatusBuffer,
+ decode,
+ decode_message,
+ decode_status_bin_into,
+ encode,
+ pack_error,
+ pack_ok,
+ pack_ok_index,
+ pack_response,
+ pack_status,
+)
+
+
+class TestPackUnpack:
+ """Test packing and unpacking roundtrips via decode_message."""
+
+ def test_pack_ok(self):
+ msg = decode_message(pack_ok())
+ assert isinstance(msg, OkMsg)
+ assert msg.index is None
+
+ def test_pack_ok_index(self):
+ msg = decode_message(pack_ok_index(42))
+ assert isinstance(msg, OkMsg)
+ assert msg.index == 42
+
+ def test_pack_error(self):
+ error = make_error(
+ ErrorCode.COMM_VALIDATION_ERROR, detail="Something went wrong"
+ )
+ msg = decode_message(pack_error(error))
+ assert isinstance(msg, ErrorMsg)
+ assert isinstance(msg.message, list)
+ from parol6.utils.error_catalog import RobotError
+
+ recovered = RobotError.from_wire(msg.message)
+ assert recovered.code == ErrorCode.COMM_VALIDATION_ERROR
+ assert "Something went wrong" in recovered.cause
+
+ def test_pack_response(self):
+ msg = decode_message(
+ pack_response(AnglesResultStruct(angles=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]))
+ )
+ assert isinstance(msg, ResponseMsg)
+ assert isinstance(msg.result, AnglesResultStruct)
+ assert msg.result.angles == [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
+
+ def test_pack_response_with_numpy(self):
+ arr = np.array([1.0, 2.0, 3.0], dtype=np.float64)
+ msg = decode_message(pack_response(PoseResultStruct(pose=arr)))
+ assert isinstance(msg, ResponseMsg)
+ assert isinstance(msg.result, PoseResultStruct)
+ assert msg.result.pose == [1.0, 2.0, 3.0]
+
+ def test_pack_status_roundtrip(self):
+ """Status broadcast (uses separate decode path, not decode_message)."""
+ pose = np.arange(16, dtype=np.float64)
+ angles = np.array([0.0, 10.0, 20.0, 30.0, 40.0, 50.0], dtype=np.float64)
+ speeds = np.array([100, 200, 300, 400, 500, 600], dtype=np.int32)
+ io = np.array([1, 0, 1, 0, 1], dtype=np.uint8)
+ joint_en = np.ones(12, dtype=np.uint8)
+ cart_en_wrf = np.ones(12, dtype=np.uint8)
+ cart_en_trf = np.ones(12, dtype=np.uint8)
+ tool_status = ToolStatus(
+ key="ssg48",
+ state=ToolState.ACTIVE,
+ engaged=True,
+ part_detected=True,
+ fault_code=0,
+ positions=(0.75, 0.25),
+ channels=(5.5, 3.14),
+ )
+
+ packed = pack_status(
+ pose,
+ angles,
+ speeds,
+ io,
+ "MoveJCommand",
+ ActionState.EXECUTING,
+ joint_en,
+ cart_en_wrf,
+ cart_en_trf,
+ action_params="speed=50 acc=100",
+ tool_status=tool_status,
+ tcp_speed=123.456,
+ )
+ unpacked = decode(packed)
+ assert unpacked[0] == MsgType.STATUS
+ assert unpacked[1] == list(pose)
+ assert unpacked[2] == list(angles)
+ assert unpacked[5] == "MoveJCommand"
+ assert unpacked[6] == ActionState.EXECUTING
+
+ # action_params at index 16
+ assert unpacked[16] == "speed=50 acc=100"
+
+ # tool_status at index 17 is a 7-element tuple:
+ # (key, state, engaged, part_detected, fault_code, positions, channels)
+ ts = unpacked[17]
+ assert ts[0] == "ssg48" # key
+ assert ts[1] == 2 # state (ToolState.ACTIVE)
+ assert ts[2] is True # engaged
+ assert ts[3] is True # part_detected
+ assert ts[4] == 0 # fault_code
+ assert ts[5] == [0.75, 0.25] # positions (tuple -> list via msgpack)
+ assert ts[6] == [5.5, 3.14] # channels (tuple -> list via msgpack)
+
+ # tcp_speed at index 18
+ assert unpacked[18] == pytest.approx(123.456)
+
+ def test_pack_decode_status_bin_roundtrip(self):
+ """pack_status -> decode_status_bin_into preserves all tool status fields."""
+ pose = np.eye(4, dtype=np.float64).ravel()
+ angles = np.array([10.0, 20.0, 30.0, 40.0, 50.0, 60.0], dtype=np.float64)
+ speeds = np.zeros(6, dtype=np.float64)
+ io = np.array([1, 0, 1, 0, 0], dtype=np.uint8)
+ joint_en = np.ones(12, dtype=np.uint8)
+ cart_en_wrf = np.ones(12, dtype=np.uint8)
+ cart_en_trf = np.ones(12, dtype=np.uint8)
+ tool_status = ToolStatus(
+ key="electric_gripper",
+ state=ToolState.IDLE,
+ engaged=False,
+ part_detected=True,
+ fault_code=42,
+ positions=(0.5,),
+ channels=(1.2, 3.4),
+ )
+
+ packed = pack_status(
+ pose,
+ angles,
+ speeds,
+ io,
+ "HomeCommand",
+ ActionState.IDLE,
+ joint_en,
+ cart_en_wrf,
+ cart_en_trf,
+ tool_status=tool_status,
+ tcp_speed=55.5,
+ )
+
+ buf = StatusBuffer()
+ assert decode_status_bin_into(packed, buf) is True
+
+ ts = buf.tool_status
+ assert ts.key == "electric_gripper"
+ assert ts.state == ToolState.IDLE
+ assert isinstance(ts.state, ToolState)
+ assert ts.engaged is False
+ assert ts.part_detected is True
+ assert ts.fault_code == 42
+ assert ts.positions == (0.5,)
+ assert ts.channels == (1.2, 3.4)
+ assert buf.tcp_speed == pytest.approx(55.5)
+
+ def test_invalid_data_raises(self):
+ with pytest.raises(msgspec.ValidationError):
+ decode_message(encode(["not", "a", "valid", "message"]))
diff --git a/tests/unit/test_mock_transport.py b/tests/unit/test_mock_transport.py
deleted file mode 100644
index 10fd837..0000000
--- a/tests/unit/test_mock_transport.py
+++ /dev/null
@@ -1,398 +0,0 @@
-"""
-Unit tests for MockSerialTransport.
-
-Tests the mock serial transport implementation to ensure:
-1. Mock transport can be created and connected
-2. Mock transport simulates robot responses correctly
-3. Transport factory correctly selects mock when PAROL6_FAKE_SERIAL is set
-4. Mock transport is compatible with SerialTransport interface
-"""
-
-import os
-import threading
-import time
-
-import numpy as np
-import parol6.PAROL6_ROBOT as PAROL6_ROBOT
-import pytest
-from parol6.config import HOME_ANGLES_DEG
-from parol6.protocol.wire import CommandCode, unpack_rx_frame_into
-from parol6.server.transports import create_transport, is_simulation_mode
-from parol6.server.transports.mock_serial_transport import (
- MockRobotState,
- MockSerialTransport,
-)
-
-
-def _wait_for_latest_frame_and_decode(
- transport: MockSerialTransport, timeout_s: float = 0.5
-):
- """
- Helper: wait for a latest frame publication and decode into numpy arrays.
- Returns dict-like with arrays or None on timeout.
- """
- start = time.time()
- last_ver = -1
- # Ensure reader running
- shutdown = threading.Event()
- transport.start_reader(shutdown)
-
- pos = np.zeros(6, dtype=np.int32)
- spd = np.zeros(6, dtype=np.int32)
- homed = np.zeros(8, dtype=np.uint8)
- io_bits = np.zeros(8, dtype=np.uint8)
- temp = np.zeros(8, dtype=np.uint8)
- poserr = np.zeros(8, dtype=np.uint8)
- timing = np.zeros(1, dtype=np.int32)
- grip = np.zeros(6, dtype=np.int32)
-
- while time.time() - start < timeout_s:
- mv, ver, ts = transport.get_latest_frame_view()
- if mv is not None and ver != last_ver:
- ok = unpack_rx_frame_into(
- mv,
- pos_out=pos,
- spd_out=spd,
- homed_out=homed,
- io_out=io_bits,
- temp_out=temp,
- poserr_out=poserr,
- timing_out=timing,
- grip_out=grip,
- )
- if ok:
- return {
- "pos": pos.copy(),
- "spd": spd.copy(),
- "homed": homed.copy(),
- "io": io_bits.copy(),
- "temp": temp.copy(),
- "poserr": poserr.copy(),
- "timing": timing.copy(),
- "grip": grip.copy(),
- "ver": ver,
- "ts": ts,
- }
- time.sleep(0.005)
- return None
-
-
-class TestMockSerialTransport:
- """Test MockSerialTransport functionality."""
-
- def test_create_and_connect(self):
- """Test that MockSerialTransport can be created and connected."""
- transport = MockSerialTransport()
- assert transport is not None
- assert not transport.is_connected()
-
- # Connect should always succeed for mock
- assert transport.connect()
- assert transport.is_connected()
-
- # Disconnect
- transport.disconnect()
- assert not transport.is_connected()
-
- def test_auto_reconnect(self):
- """Test auto-reconnect functionality."""
- transport = MockSerialTransport()
-
- # Auto-reconnect should succeed when not connected
- assert transport.auto_reconnect()
- assert transport.is_connected()
-
- # Auto-reconnect should return False when already connected
- assert not transport.auto_reconnect()
-
- def test_write_frame(self):
- """Test writing command frames."""
- transport = MockSerialTransport()
- transport.connect()
-
- # Prepare command data
- position_out = [0, 0, 0, 0, 0, 0]
- speed_out = [100, 100, 100, 100, 100, 100]
- command_out = CommandCode.JOG
- affected_joint = [1, 1, 1, 1, 1, 1, 0, 0]
- inout = [0, 0, 0, 0, 0, 0, 0, 0]
- timeout = 0
- gripper_data = [0, 0, 0, 0, 0, 0]
-
- # Write should succeed when connected
- success = transport.write_frame(
- position_out,
- speed_out,
- command_out,
- affected_joint,
- inout,
- timeout,
- gripper_data,
- )
- assert success
-
- # Verify internal state updated
- assert transport._state.command_out == command_out
- assert np.array_equal(transport._state.position_out, position_out)
- assert np.array_equal(transport._state.speed_out, speed_out)
-
- # Disconnect and try again - should fail
- transport.disconnect()
- success = transport.write_frame(
- position_out,
- speed_out,
- command_out,
- affected_joint,
- inout,
- timeout,
- gripper_data,
- )
- assert not success
-
- def test_read_frames(self):
- """
- Test reading response frames using latest-frame API (no legacy queues).
- """
- transport = MockSerialTransport()
- transport.connect()
-
- decoded = _wait_for_latest_frame_and_decode(transport, timeout_s=0.5)
- assert decoded is not None, "No frame published by mock transport"
-
- # Check data shapes
- assert decoded["pos"].shape == (6,)
- assert decoded["spd"].shape == (6,)
- assert decoded["homed"].shape == (8,)
- assert decoded["io"].shape == (8,)
- assert decoded["temp"].shape == (8,)
- assert decoded["poserr"].shape == (8,)
- assert decoded["timing"].shape == (1,)
- assert decoded["grip"].shape == (6,)
-
- # E-stop should be released in simulation (io bit 4)
- assert int(decoded["io"][4]) == 1
-
- def test_motion_simulation_jog(self):
- """Test JOG command simulation via latest-frame API."""
- transport = MockSerialTransport()
- transport.connect()
-
- # Baseline
- baseline = _wait_for_latest_frame_and_decode(transport, timeout_s=0.5)
- assert baseline is not None
- initial_pos = int(baseline["pos"][0])
-
- # Send JOG command to move joint 1
- speed_out = [1000, 0, 0, 0, 0, 0]
- assert transport.write_frame(
- [0] * 6, speed_out, CommandCode.JOG, [1] * 8, [0] * 8, 0, [0] * 6
- )
-
- # Wait for movement
- moved = None
- t0 = time.time()
- while time.time() - t0 < 0.5:
- decoded = _wait_for_latest_frame_and_decode(transport, timeout_s=0.1)
- if decoded is None:
- continue
- if int(decoded["pos"][0]) != initial_pos:
- moved = decoded
- break
- assert moved is not None, "Joint didn't move during JOG"
-
- def test_motion_simulation_move(self):
- """Test MOVE command simulation via latest-frame API."""
- transport = MockSerialTransport()
- transport.connect()
-
- target_pos = [5000, 0, 0, 0, 0, 0]
- assert transport.write_frame(
- target_pos, [0] * 6, CommandCode.MOVE, [1] * 8, [0] * 8, 0, [0] * 6
- )
-
- # Poll until position moves toward target or timeout
- final = None
- t0 = time.time()
- while time.time() - t0 < 1.0:
- decoded = _wait_for_latest_frame_and_decode(transport, timeout_s=0.1)
- if decoded is None:
- continue
- current_pos = int(decoded["pos"][0])
- if abs(current_pos - target_pos[0]) < 2000:
- final = decoded
- break
- assert final is not None, "Didn't move toward target sufficiently"
-
- def test_homing_simulation(self):
- """Test HOME command simulation via latest-frame API."""
- transport = MockSerialTransport()
- transport.connect()
-
- # Expected home positions (steps) derived from config HOME_ANGLES_DEG
- expected_steps = [
- int(PAROL6_ROBOT.ops.deg_to_steps(float(deg), i))
- for i, deg in enumerate(HOME_ANGLES_DEG)
- ]
- tol_steps = 500 # tolerance in steps
-
- # Send HOME command
- assert transport.write_frame(
- [0] * 6, [0] * 6, CommandCode.HOME, [1] * 8, [0] * 8, 0, [0] * 6
- )
-
- homing_started = False
- homing_completed = False
- t0 = time.time()
- last_homed_bits = None
-
- while time.time() - t0 < 1.0:
- decoded = _wait_for_latest_frame_and_decode(transport, timeout_s=0.1)
- if decoded is None:
- continue
- homed_bits = decoded["homed"].tolist()
- if not all(h == 1 for h in homed_bits):
- homing_started = True
- if homing_started and all(h == 1 for h in homed_bits):
- # Verify positions near configured home posture
- pos_list = decoded["pos"].tolist()
- if all(
- abs(int(pos_list[i]) - expected_steps[i]) < tol_steps
- for i in range(6)
- ):
- homing_completed = True
- break
- last_homed_bits = homed_bits
-
- # Either already homed or homed sequence executed
- if not homing_started:
- assert last_homed_bits is not None
- assert all(h == 1 for h in last_homed_bits), "Robot should be homed"
- else:
- assert homing_completed, "Homing sequence started but did not complete"
-
- def test_gripper_simulation(self):
- """Test gripper command simulation."""
- transport = MockSerialTransport()
- transport.connect()
-
- # Test calibration mode
- gripper_data = [100, 150, 500, 0, 1, 42] # mode=1 for calibration, id=42
- transport.write_frame(
- [0] * 6, [0] * 6, CommandCode.IDLE, [0] * 8, [0] * 8, 0, gripper_data
- )
-
- # Check gripper state updated
- assert transport._state.gripper_data_in[0] == 42 # Device ID set
- assert transport._state.gripper_data_in[4] & 0x40 != 0 # Calibrated bit set
-
- # Test error clear mode
- gripper_data[4] = 2 # mode=2 for error clear
- transport.write_frame(
- [0] * 6, [0] * 6, CommandCode.IDLE, [0] * 8, [0] * 8, 0, gripper_data
- )
-
- # Error bit should be cleared
- assert transport._state.gripper_data_in[4] & 0x20 == 0
-
- def test_get_info(self):
- """Test get_info method."""
- transport = MockSerialTransport(port="TEST_PORT", baudrate=115200)
-
- info = transport.get_info()
- assert info["port"] == "TEST_PORT"
- assert info["baudrate"] == 115200
- assert not info["connected"]
- assert info["mode"] == "MOCK_SERIAL"
-
- transport.connect()
- info = transport.get_info()
- assert info["connected"]
- assert "frames_sent" in info
- assert "frames_received" in info
- assert "simulation_rate_hz" in info
- assert "robot_state" in info
-
-
-class TestTransportFactory:
- """Test transport factory with mock mode."""
-
- def test_simulation_mode_detection(self):
- """Test is_simulation_mode function."""
- # Should be False by default
- if "PAROL6_FAKE_SERIAL" in os.environ:
- del os.environ["PAROL6_FAKE_SERIAL"]
- assert not is_simulation_mode()
-
- # Test various true values
- for value in ["1", "true", "TRUE", "yes", "YES", "on", "ON"]:
- os.environ["PAROL6_FAKE_SERIAL"] = value
- assert is_simulation_mode()
-
- # Test false values
- for value in ["0", "false", "FALSE", "no", "NO", "off", "OFF", ""]:
- os.environ["PAROL6_FAKE_SERIAL"] = value
- assert not is_simulation_mode()
-
- # Clean up
- del os.environ["PAROL6_FAKE_SERIAL"]
-
- def test_create_transport_auto_detect(self):
- """Test transport factory auto-detection."""
- # Without FAKE_SERIAL, should create SerialTransport
- if "PAROL6_FAKE_SERIAL" in os.environ:
- del os.environ["PAROL6_FAKE_SERIAL"]
-
- from parol6.server.transports.serial_transport import SerialTransport
-
- transport = create_transport()
- assert isinstance(transport, SerialTransport)
-
- # With FAKE_SERIAL, should create MockSerialTransport
- os.environ["PAROL6_FAKE_SERIAL"] = "1"
- transport = create_transport()
- assert isinstance(transport, MockSerialTransport)
-
- # Clean up
- del os.environ["PAROL6_FAKE_SERIAL"]
-
- def test_create_transport_explicit(self):
- """Test explicit transport type selection."""
- # Explicit mock regardless of environment
- transport = create_transport(transport_type="mock")
- assert isinstance(transport, MockSerialTransport)
-
- # Explicit serial regardless of environment
- from parol6.server.transports.serial_transport import SerialTransport
-
- os.environ["PAROL6_FAKE_SERIAL"] = "1"
- transport = create_transport(transport_type="serial")
- assert isinstance(transport, SerialTransport)
-
- # Invalid type should raise
- with pytest.raises(ValueError):
- create_transport(transport_type="invalid")
-
- # Clean up
- if "PAROL6_FAKE_SERIAL" in os.environ:
- del os.environ["PAROL6_FAKE_SERIAL"]
-
-
-class TestMockRobotState:
- """Test MockRobotState initialization."""
-
- def test_initial_state(self):
- """Test initial robot state."""
- state = MockRobotState()
-
- # Should start at standby position
- for i in range(6):
- deg = float(PAROL6_ROBOT.joint.standby.deg[i])
- steps = int(PAROL6_ROBOT.ops.deg_to_steps(deg, i))
- assert state.position_in[i] == steps
-
- # E-stop should be released
- assert state.io_in[4] == 1
-
- # No errors initially
- assert all(e == 0 for e in state.temperature_error_in)
- assert all(e == 0 for e in state.position_error_in)
diff --git a/tests/unit/test_motion.py b/tests/unit/test_motion.py
new file mode 100644
index 0000000..531c0c4
--- /dev/null
+++ b/tests/unit/test_motion.py
@@ -0,0 +1,233 @@
+"""Unit tests for parol6.motion trajectory pipeline."""
+
+import numpy as np
+import pytest
+
+from parol6.config import INTERVAL_S, LIMITS, steps_to_rad
+from parol6.motion import JointPath, ProfileType, Trajectory, TrajectoryBuilder
+
+
+class TestJointPath:
+ """Tests for JointPath dataclass."""
+
+ def test_interpolate_two_points(self):
+ """Interpolate between two joint configurations."""
+ start = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
+ end = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
+ n_samples = 11
+
+ path = JointPath.interpolate(start, end, n_samples)
+
+ assert len(path) == n_samples
+ assert np.allclose(path.positions[0], start)
+ assert np.allclose(path.positions[-1], end)
+ # Midpoint should be average
+ assert np.allclose(path.positions[5], (start + end) / 2)
+
+ def test_interpolate_minimum_samples(self):
+ """Interpolate with n_samples < 2 should produce 2 samples."""
+ start = np.zeros(6)
+ end = np.ones(6)
+
+ path = JointPath.interpolate(start, end, n_samples=1)
+ assert len(path) == 2
+
+ path = JointPath.interpolate(start, end, n_samples=0)
+ assert len(path) == 2
+
+ def test_sample_at_boundaries(self):
+ """Sample at s=0 and s=1 should return exact endpoints."""
+ start = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
+ end = np.array([7.0, 8.0, 9.0, 10.0, 11.0, 12.0])
+ path = JointPath.interpolate(start, end, n_samples=10)
+
+ assert np.allclose(path.sample(0.0), start)
+ assert np.allclose(path.sample(1.0), end)
+
+ def test_sample_clamps_out_of_bounds(self):
+ """Sample at s<0 or s>1 should clamp to boundaries."""
+ start = np.zeros(6)
+ end = np.ones(6)
+ path = JointPath.interpolate(start, end, n_samples=10)
+
+ assert np.allclose(path.sample(-0.5), start)
+ assert np.allclose(path.sample(1.5), end)
+
+ def test_append_concatenates_paths(self):
+ """Append should concatenate two paths."""
+ path1 = JointPath.interpolate(np.zeros(6), np.ones(6), n_samples=5)
+ path2 = JointPath.interpolate(np.ones(6), np.ones(6) * 2, n_samples=5)
+
+ combined = path1.append(path2)
+
+ assert len(combined) == 10
+ assert np.allclose(combined.positions[:5], path1.positions)
+ assert np.allclose(combined.positions[5:], path2.positions)
+
+
+class TestTrajectoryBuilder:
+ """Tests for TrajectoryBuilder."""
+
+ @pytest.fixture
+ def simple_joint_path(self) -> JointPath:
+ """Create a simple joint path for testing."""
+ start = np.array([0.0, -1.57, 3.14, 0.0, 0.0, 0.0]) # ~0, -90deg, 180deg
+ end = np.array([0.5, -1.0, 2.5, 0.5, 0.0, 0.5])
+ return JointPath.interpolate(start, end, n_samples=50)
+
+ def test_build_linear_profile(self, simple_joint_path):
+ """LINEAR profile should produce uniformly spaced trajectory."""
+ builder = TrajectoryBuilder(
+ joint_path=simple_joint_path,
+ profile=ProfileType.LINEAR,
+ duration=1.0, # 1 second
+ dt=INTERVAL_S,
+ )
+
+ trajectory = builder.build()
+
+ assert isinstance(trajectory, Trajectory)
+ assert len(trajectory) > 0
+ assert trajectory.duration >= 1.0
+ # Steps should be int32
+ assert trajectory.steps.dtype == np.int32
+
+ def test_build_quintic_profile(self, simple_joint_path):
+ """QUINTIC profile should produce smooth trajectory."""
+ builder = TrajectoryBuilder(
+ joint_path=simple_joint_path,
+ profile=ProfileType.QUINTIC,
+ duration=3.0, # Long duration to stay within velocity limits
+ dt=INTERVAL_S,
+ )
+
+ trajectory = builder.build()
+
+ assert isinstance(trajectory, Trajectory)
+ assert len(trajectory) > 0
+ assert trajectory.duration >= 3.0
+
+ def test_build_trapezoid_profile(self, simple_joint_path):
+ """TRAPEZOID profile should produce trajectory with plateau."""
+ builder = TrajectoryBuilder(
+ joint_path=simple_joint_path,
+ profile=ProfileType.TRAPEZOID,
+ duration=3.0, # Long duration to stay within velocity limits
+ dt=INTERVAL_S,
+ )
+
+ trajectory = builder.build()
+
+ assert isinstance(trajectory, Trajectory)
+ assert len(trajectory) > 0
+ assert trajectory.duration >= 3.0
+
+ def test_build_ruckig_profile(self, simple_joint_path):
+ """RUCKIG profile should produce jerk-limited trajectory."""
+ builder = TrajectoryBuilder(
+ joint_path=simple_joint_path,
+ profile=ProfileType.RUCKIG,
+ dt=INTERVAL_S,
+ )
+
+ trajectory = builder.build()
+
+ assert isinstance(trajectory, Trajectory)
+ assert len(trajectory) > 0
+ assert trajectory.duration > 0
+
+ def test_velocity_frac_scaling(self, simple_joint_path):
+ """Lower velocity_frac should increase duration."""
+ # Use TOPPRA which is time-optimal and respects velocity limits
+ builder_100 = TrajectoryBuilder(
+ joint_path=simple_joint_path,
+ profile=ProfileType.TOPPRA,
+ velocity_frac=1.0,
+ )
+ builder_50 = TrajectoryBuilder(
+ joint_path=simple_joint_path,
+ profile=ProfileType.TOPPRA,
+ velocity_frac=0.5,
+ )
+
+ traj_100 = builder_100.build()
+ traj_50 = builder_50.build()
+
+ # At 50% velocity, duration should be longer
+ assert traj_50.duration >= traj_100.duration
+
+ def test_single_point_path(self):
+ """Single-point path should produce single-step trajectory."""
+ path = JointPath(positions=np.array([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]))
+
+ builder = TrajectoryBuilder(
+ joint_path=path,
+ profile=ProfileType.LINEAR,
+ )
+
+ trajectory = builder.build()
+
+ assert len(trajectory) == 1
+ assert trajectory.duration == 0.0
+
+ @pytest.mark.parametrize(
+ "profile",
+ [
+ ProfileType.LINEAR,
+ ProfileType.QUINTIC,
+ ProfileType.TRAPEZOID,
+ ProfileType.TOPPRA,
+ ProfileType.RUCKIG,
+ ],
+ )
+ def test_trajectory_respects_joint_velocity_limits(
+ self, simple_joint_path, profile
+ ):
+ """All profiles should produce trajectories within joint velocity limits."""
+ builder = TrajectoryBuilder(
+ joint_path=simple_joint_path,
+ profile=profile,
+ dt=INTERVAL_S,
+ )
+ traj = builder.build()
+
+ if len(traj) < 2:
+ return # Can't check velocity on single-point trajectory
+
+ # Convert steps back to radians for limit checking
+ trajectory_rad = np.empty(traj.steps.shape, dtype=np.float64)
+ for i in range(len(traj.steps)):
+ steps_to_rad(traj.steps[i], trajectory_rad[i])
+
+ # Compute velocities via finite difference
+ dt = traj.duration / (len(traj) - 1)
+ velocities_rad = np.diff(trajectory_rad, axis=0) / dt
+
+ # Check against velocity limits (with small tolerance for numerical error)
+ v_max_rad = LIMITS.joint.hard.velocity
+ max_vel_ratio = float(np.max(np.abs(velocities_rad) / v_max_rad))
+
+ assert max_vel_ratio <= 1.05, (
+ f"Profile {profile.name}: velocity exceeded limits by "
+ f"{(max_vel_ratio - 1) * 100:.1f}%"
+ )
+
+
+class TestTrajectory:
+ """Tests for Trajectory dataclass."""
+
+ def test_len_returns_step_count(self):
+ """len() should return number of steps."""
+ steps = np.zeros((100, 6), dtype=np.int32)
+ traj = Trajectory(steps=steps, duration=1.0)
+
+ assert len(traj) == 100
+
+ def test_getitem_returns_step(self):
+ """Indexing should return individual step."""
+ steps = np.arange(60, dtype=np.int32).reshape(10, 6)
+ traj = Trajectory(steps=steps, duration=1.0)
+
+ assert np.array_equal(traj[0], steps[0])
+ assert np.array_equal(traj[5], steps[5])
+ assert np.array_equal(traj[-1], steps[-1])
diff --git a/tests/unit/test_motion_pipeline.py b/tests/unit/test_motion_pipeline.py
new file mode 100644
index 0000000..9e56c37
--- /dev/null
+++ b/tests/unit/test_motion_pipeline.py
@@ -0,0 +1,502 @@
+"""Unit tests for the motion pipeline (MotionPlanner + SegmentPlayer).
+
+Tests the PlannerWorker directly (no subprocess) and the SegmentPlayer
+state machine with mock segments.
+"""
+
+import multiprocessing
+import time
+
+import numpy as np
+import pytest
+
+from parol6.config import deg_to_steps
+from parol6.protocol.wire import (
+ CheckpointCmd,
+ DelayCmd,
+ HomeCmd,
+ MoveJCmd,
+ SelectToolCmd,
+)
+from parol6.server.motion_planner import (
+ InlineSegment,
+ MotionPlanner,
+ PlanCommand,
+ PlannerWorker,
+ TrajectorySegment,
+)
+from parol6.server.segment_player import SegmentPlayer
+from parol6.server.state import ControllerState
+
+# Valid PAROL6 joint angles within limits (same as test_dry_run_blend.py)
+HOME = [90.0, -90.0, 180.0, 0.0, 0.0, 180.0]
+W1 = [80.0, -80.0, 190.0, 10.0, 10.0, 190.0]
+W2 = [70.0, -70.0, 200.0, 20.0, 20.0, 200.0]
+W3 = [60.0, -60.0, 210.0, 30.0, 30.0, 210.0]
+
+
+def _home_steps() -> np.ndarray:
+ """Get home position in motor steps."""
+ import parol6.PAROL6_ROBOT as PAROL6_ROBOT
+
+ buf = np.zeros(6, dtype=np.int32)
+ home_deg = np.array(PAROL6_ROBOT.joint.standby_deg, dtype=np.float64)
+ deg_to_steps(home_deg, buf)
+ return buf
+
+
+def _deg_to_steps(angles: list[float]) -> np.ndarray:
+ buf = np.zeros(6, dtype=np.int32)
+ deg_to_steps(np.array(angles, dtype=np.float64), buf)
+ return buf
+
+
+def _make_movej_cmd(
+ angles: list[float], speed: float = 0.5, r: float = 0.0
+) -> MoveJCmd:
+ return MoveJCmd(angles=angles, speed=speed, accel=1.0, r=r)
+
+
+# ---------------------------------------------------------------------------
+# Planner unit tests (direct PlannerWorker calls, no subprocess)
+# ---------------------------------------------------------------------------
+
+
+class TestPlannerDirect:
+ """Test the planner's command routing and trajectory generation."""
+
+ @pytest.fixture
+ def worker(self):
+ q = multiprocessing.Queue()
+ w = PlannerWorker(q)
+ w.state.Position_in[:] = _home_steps()
+ return w
+
+ @pytest.fixture
+ def segment_queue(self, worker):
+ return worker._segment_queue
+
+ def test_single_movej_produces_trajectory_segment(self, worker, segment_queue):
+ """A single MoveJ command should produce a TrajectorySegment."""
+ params = _make_movej_cmd(W1)
+ msg = PlanCommand(command_index=0, params=params)
+
+ worker.process_command(msg)
+
+ seg = segment_queue.get(timeout=1.0)
+ assert isinstance(seg, TrajectorySegment)
+ assert seg.command_index == 0
+ assert seg.trajectory_steps.shape[0] > 0
+ assert seg.trajectory_steps.shape[1] == 6
+ assert seg.trajectory_steps.dtype == np.int32
+ assert seg.duration > 0
+ assert seg.blend_consumed_indices == []
+
+ def test_settool_produces_inline_segment(self, worker, segment_queue):
+ """SelectTool should produce an InlineSegment."""
+ params = SelectToolCmd(tool_name="PNEUMATIC")
+ msg = PlanCommand(command_index=0, params=params)
+
+ worker.process_command(msg)
+
+ seg = segment_queue.get(timeout=1.0)
+ assert isinstance(seg, InlineSegment)
+ assert seg.command_index == 0
+ assert isinstance(seg.params, SelectToolCmd)
+
+ def test_home_produces_inline_and_updates_position(self, worker, segment_queue):
+ """Home command should produce InlineSegment and update planner position."""
+ # Start from non-home position
+ worker.state.Position_in[:] = _deg_to_steps(W1)
+ home_steps = _home_steps()
+
+ params = HomeCmd()
+ msg = PlanCommand(command_index=0, params=params)
+
+ worker.process_command(msg)
+
+ seg = segment_queue.get(timeout=1.0)
+ assert isinstance(seg, InlineSegment)
+ # Planner's position should now be home
+ np.testing.assert_array_equal(worker.state.Position_in, home_steps)
+
+ def test_checkpoint_produces_inline_segment(self, worker, segment_queue):
+ """Checkpoint should produce an InlineSegment."""
+ params = CheckpointCmd(label="step1")
+ msg = PlanCommand(command_index=5, params=params)
+
+ worker.process_command(msg)
+
+ seg = segment_queue.get(timeout=1.0)
+ assert isinstance(seg, InlineSegment)
+ assert seg.command_index == 5
+
+ def test_delay_produces_inline_segment(self, worker, segment_queue):
+ """Delay should produce an InlineSegment."""
+ params = DelayCmd(seconds=1.5)
+ msg = PlanCommand(command_index=3, params=params)
+
+ worker.process_command(msg)
+
+ seg = segment_queue.get(timeout=1.0)
+ assert isinstance(seg, InlineSegment)
+ assert seg.command_index == 3
+
+ def test_ordering_preserved_movej_settool_movej(self, worker, segment_queue):
+ """MoveJ -> SelectTool -> MoveJ should produce segments in order."""
+ # 1. MoveJ to W1
+ worker.process_command(PlanCommand(command_index=0, params=_make_movej_cmd(W1)))
+
+ # 2. SelectTool (inline)
+ worker.process_command(
+ PlanCommand(command_index=1, params=SelectToolCmd(tool_name="PNEUMATIC"))
+ )
+
+ # 3. MoveJ to W2
+ worker.process_command(PlanCommand(command_index=2, params=_make_movej_cmd(W2)))
+
+ # Verify ordering
+ seg1 = segment_queue.get(timeout=1.0)
+ seg2 = segment_queue.get(timeout=1.0)
+ seg3 = segment_queue.get(timeout=1.0)
+
+ assert isinstance(seg1, TrajectorySegment)
+ assert seg1.command_index == 0
+
+ assert isinstance(seg2, InlineSegment)
+ assert seg2.command_index == 1
+
+ assert isinstance(seg3, TrajectorySegment)
+ assert seg3.command_index == 2
+
+ def test_blend_chain_produces_single_segment(self, worker, segment_queue):
+ """MoveJ r>0, MoveJ r>0, MoveJ r=0 should produce a blended segment."""
+ # r=10 -> buffer
+ worker.process_command(
+ PlanCommand(command_index=0, params=_make_movej_cmd(W1, r=10.0))
+ )
+ assert len(worker._planner._blend_buffer) == 1
+
+ # r=10 -> buffer
+ worker.process_command(
+ PlanCommand(command_index=1, params=_make_movej_cmd(W2, r=10.0))
+ )
+ assert len(worker._planner._blend_buffer) == 2
+
+ # r=0 -> flush
+ worker.process_command(
+ PlanCommand(command_index=2, params=_make_movej_cmd(W3, r=0.0))
+ )
+ assert len(worker._planner._blend_buffer) == 0
+
+ seg = segment_queue.get(timeout=1.0)
+ assert isinstance(seg, TrajectorySegment)
+ assert seg.command_index == 0
+ assert seg.trajectory_steps.shape[0] > 0
+ # Should have consumed commands 1 and 2
+ assert len(seg.blend_consumed_indices) > 0
+
+ def test_stale_blend_buffer_flushed_on_inline(self, worker, segment_queue):
+ """A pending blend buffer should be flushed when an inline command arrives."""
+ # MoveJ with r=10 -> buffers
+ worker.process_command(
+ PlanCommand(command_index=0, params=_make_movej_cmd(W1, r=10.0))
+ )
+ assert len(worker._planner._blend_buffer) == 1
+
+ # Inline command -> flush blend buffer first
+ worker.process_command(
+ PlanCommand(command_index=1, params=SelectToolCmd(tool_name="PNEUMATIC"))
+ )
+
+ # Should get: trajectory segment (flushed from buffer), then inline segment
+ seg1 = segment_queue.get(timeout=1.0)
+ seg2 = segment_queue.get(timeout=1.0)
+ assert isinstance(seg1, TrajectorySegment)
+ assert seg1.command_index == 0
+ assert isinstance(seg2, InlineSegment)
+ assert seg2.command_index == 1
+
+ def test_stale_blend_flushed_on_timeout(self, worker, segment_queue):
+ """flush_stale_blend should emit buffered trajectory."""
+ worker.process_command(
+ PlanCommand(command_index=0, params=_make_movej_cmd(W1, r=10.0))
+ )
+ assert len(worker._planner._blend_buffer) == 1
+
+ worker.flush_stale_blend()
+ assert len(worker._planner._blend_buffer) == 0
+
+ seg = segment_queue.get(timeout=1.0)
+ assert isinstance(seg, TrajectorySegment)
+ assert seg.command_index == 0
+
+ def test_cancel_clears_blend_buffer(self, worker, segment_queue):
+ """cancel() should clear the blend buffer."""
+ worker.process_command(
+ PlanCommand(command_index=0, params=_make_movej_cmd(W1, r=10.0))
+ )
+ assert len(worker._planner._blend_buffer) == 1
+
+ worker.cancel()
+ assert len(worker._planner._blend_buffer) == 0
+
+
+# ---------------------------------------------------------------------------
+# Planner subprocess test
+# ---------------------------------------------------------------------------
+
+
+class TestPlannerSubprocess:
+ """Test the planner running in a real subprocess."""
+
+ def test_planner_start_stop(self):
+ """Planner starts and stops cleanly."""
+ planner = MotionPlanner()
+ planner.start()
+ assert planner.alive
+ planner.stop()
+ assert not planner.alive
+
+ def test_planner_produces_segment(self):
+ """Submit a MoveJ and get a TrajectorySegment back."""
+ planner = MotionPlanner()
+ planner.start()
+ try:
+ position_in = _home_steps()
+ planner.submit(
+ PlanCommand(
+ command_index=0,
+ params=_make_movej_cmd(W1),
+ position_in=position_in,
+ )
+ )
+
+ # Poll for result (with timeout)
+ seg = None
+ deadline = time.time() + 10.0
+ while time.time() < deadline:
+ seg = planner.poll_segment()
+ if seg is not None:
+ break
+ time.sleep(0.05)
+
+ assert seg is not None, "Planner should produce a segment"
+ assert isinstance(seg, TrajectorySegment)
+ assert seg.command_index == 0
+ assert seg.trajectory_steps.shape[0] > 0
+ finally:
+ planner.stop()
+
+ def test_planner_cancel(self):
+ """CancelAll clears planner state."""
+ planner = MotionPlanner()
+ planner.start()
+ try:
+ planner.cancel()
+ time.sleep(0.2)
+ # Should still be alive after cancel
+ assert planner.alive
+ finally:
+ planner.stop()
+
+ def test_planner_sync_profile(self):
+ """SyncProfile message is accepted without error."""
+ planner = MotionPlanner()
+ planner.start()
+ try:
+ planner.sync_profile("QUINTIC")
+ # Submit a command to verify the planner is still working
+ planner.submit(
+ PlanCommand(
+ command_index=0,
+ params=_make_movej_cmd(W1),
+ position_in=_home_steps(),
+ )
+ )
+ seg = None
+ deadline = time.time() + 10.0
+ while time.time() < deadline:
+ seg = planner.poll_segment()
+ if seg is not None:
+ break
+ time.sleep(0.05)
+ assert seg is not None
+ finally:
+ planner.stop()
+
+ def test_planner_inline_command(self):
+ """Submit a SelectTool command, get InlineSegment back."""
+ planner = MotionPlanner()
+ planner.start()
+ try:
+ planner.submit(
+ PlanCommand(
+ command_index=0,
+ params=SelectToolCmd(tool_name="PNEUMATIC"),
+ position_in=_home_steps(),
+ )
+ )
+
+ seg = None
+ deadline = time.time() + 5.0
+ while time.time() < deadline:
+ seg = planner.poll_segment()
+ if seg is not None:
+ break
+ time.sleep(0.05)
+
+ assert seg is not None
+ assert isinstance(seg, InlineSegment)
+ assert seg.command_index == 0
+ finally:
+ planner.stop()
+
+
+# ---------------------------------------------------------------------------
+# SegmentPlayer unit tests
+# ---------------------------------------------------------------------------
+
+
+class TestSegmentPlayer:
+ """Test the SegmentPlayer state machine with mock data."""
+
+ @pytest.fixture
+ def state(self):
+ s = ControllerState()
+ s.Position_in[:] = _home_steps()
+ return s
+
+ def test_trajectory_playback(self, state):
+ """SegmentPlayer should play trajectory waypoints at 1 step per tick."""
+ planner = MotionPlanner() # not started -- we'll inject segments manually
+
+ player = SegmentPlayer(planner)
+
+ # Create a trajectory with 5 waypoints
+ steps = np.tile(_home_steps(), (5, 1))
+ for i in range(5):
+ steps[i, 0] += i * 100 # vary J1
+
+ seg = TrajectorySegment(
+ command_index=0,
+ trajectory_steps=steps,
+ duration=0.05,
+ )
+ player._buffer.append(seg)
+
+ # Tick 5 times -- should output each waypoint
+ for i in range(5):
+ assert player.tick(state) is True
+ np.testing.assert_array_equal(state.Position_out, steps[i])
+ # Simulate Position_in converging to Position_out (firmware tracking)
+ state.Position_in[:] = state.Position_out
+
+ # 6th tick -- settling detects convergence, completes -> returns False
+ assert player.tick(state) is False
+ assert state.completed_command_index == 0
+
+ def test_inline_command_execution(self, state):
+ """SegmentPlayer should execute inline commands via setup()/tick()."""
+ planner = MotionPlanner()
+ player = SegmentPlayer(planner)
+
+ # Checkpoint is an instant inline command (completes in 1 tick)
+ seg = InlineSegment(
+ command_index=1,
+ params=CheckpointCmd(label="test_checkpoint"),
+ )
+ player._buffer.append(seg)
+
+ # Should complete immediately (returns False since no more segments)
+ result = player.tick(state)
+ assert result is False
+ assert state.completed_command_index == 1
+ assert state.last_checkpoint == "test_checkpoint"
+
+ def test_segment_ordering(self, state):
+ """Segments should be processed in FIFO order."""
+ planner = MotionPlanner()
+ player = SegmentPlayer(planner)
+
+ # Trajectory segment
+ steps = np.tile(_home_steps(), (3, 1))
+ traj = TrajectorySegment(command_index=0, trajectory_steps=steps, duration=0.03)
+
+ # Inline segment
+ inline = InlineSegment(
+ command_index=1, params=CheckpointCmd(label="after_move")
+ )
+
+ player._buffer.append(traj)
+ player._buffer.append(inline)
+
+ # Play through trajectory (3 ticks)
+ for _ in range(3):
+ assert player.tick(state) is True
+
+ # Next tick processes inline, then returns False (empty)
+ assert player.tick(state) is False
+ assert state.completed_command_index == 1
+ assert state.last_checkpoint == "after_move"
+
+ def test_cancel_clears_everything(self, state):
+ """Cancel should clear active segment and buffer."""
+ planner = MotionPlanner()
+ player = SegmentPlayer(planner)
+
+ steps = np.tile(_home_steps(), (100, 1))
+ seg = TrajectorySegment(command_index=0, trajectory_steps=steps, duration=1.0)
+ player._buffer.append(seg)
+
+ # Start playing
+ assert player.tick(state) is True
+
+ # Cancel
+ player.cancel(state)
+ assert player._active is None
+ assert len(player._buffer) == 0
+
+ # Should return False now
+ assert player.tick(state) is False
+
+ def test_blend_consumed_indices(self, state):
+ """Blended segment should report max consumed index on completion."""
+ planner = MotionPlanner()
+ player = SegmentPlayer(planner)
+
+ steps = np.tile(_home_steps(), (2, 1))
+ seg = TrajectorySegment(
+ command_index=0,
+ trajectory_steps=steps,
+ duration=0.02,
+ blend_consumed_indices=[1, 2],
+ )
+ player._buffer.append(seg)
+
+ # Play 2 ticks
+ assert player.tick(state) is True
+ assert player.tick(state) is True
+
+ # Complete
+ assert player.tick(state) is False
+ assert state.completed_command_index == 2 # max of 0, 1, 2
+
+ def test_active_property(self, state):
+ """active should reflect whether there's work to do."""
+ planner = MotionPlanner()
+ player = SegmentPlayer(planner)
+
+ assert player.active is False
+
+ steps = np.tile(_home_steps(), (2, 1))
+ player._buffer.append(
+ TrajectorySegment(command_index=0, trajectory_steps=steps, duration=0.02)
+ )
+ assert player.active is True
+
+ # Play through
+ player.tick(state)
+ player.tick(state)
+ player.tick(state)
+ assert player.active is False
diff --git a/tests/unit/test_movecart_command.py b/tests/unit/test_movecart_command.py
new file mode 100644
index 0000000..a41f775
--- /dev/null
+++ b/tests/unit/test_movecart_command.py
@@ -0,0 +1,118 @@
+"""Tests for MoveLCommand parsing via msgspec structs.
+
+Commands now use msgspec Structs for parameter validation:
+Format: MoveLCmd(pose, frame, duration, speed, accel, r, rel)
+"""
+
+import msgspec
+import pytest
+
+from parol6.commands.cartesian_commands import MoveLCommand
+from parol6.protocol.wire import MoveLCmd
+
+
+class TestMoveLCommandParsing:
+ """Test MoveLCmd struct parsing and validation."""
+
+ def test_parse_with_speed(self):
+ """Parse with explicit speed."""
+ # Create params struct
+ params = MoveLCmd(
+ pose=[100.0, 200.0, 300.0, 0.0, 0.0, 0.0],
+ speed=0.5,
+ accel=0.75,
+ )
+
+ cmd = MoveLCommand(params)
+
+ assert cmd.p.pose == [100.0, 200.0, 300.0, 0.0, 0.0, 0.0]
+ assert cmd.p.duration == 0.0 # default
+ assert cmd.p.speed == 0.5
+ assert cmd.p.accel == 0.75
+
+ def test_parse_accel_default(self):
+ """Default acceleration should be 1.0."""
+ params = MoveLCmd(
+ pose=[100.0, 200.0, 300.0, 0.0, 0.0, 0.0],
+ speed=0.5,
+ )
+
+ cmd = MoveLCommand(params)
+
+ assert cmd.p.accel == 1.0 # default
+
+ def test_parse_with_duration(self):
+ """Parse with duration instead of speed."""
+ params = MoveLCmd(
+ pose=[100.0, 200.0, 300.0, 0.0, 0.0, 0.0],
+ duration=2.5,
+ accel=0.8,
+ )
+
+ cmd = MoveLCommand(params)
+
+ assert cmd.p.duration == 2.5
+ assert cmd.p.speed == 0.0 # default
+ assert cmd.p.accel == 0.8
+
+ def test_parse_full_accel_range(self):
+ """Test acceleration values at boundaries."""
+ # Min accel (must be > 0)
+ params1 = MoveLCmd(
+ pose=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ speed=0.5,
+ accel=0.001,
+ )
+ cmd1 = MoveLCommand(params1)
+ assert cmd1.p.accel == 0.001
+
+ # Max accel
+ params2 = MoveLCmd(
+ pose=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ speed=0.5,
+ accel=1.0,
+ )
+ cmd2 = MoveLCommand(params2)
+ assert cmd2.p.accel == 1.0
+
+ def test_validation_requires_duration_or_speed(self):
+ """Must have either duration > 0 or speed > 0."""
+ with pytest.raises((ValueError, msgspec.ValidationError)):
+ # Both zero (default) should fail __post_init__
+ MoveLCmd(pose=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
+
+ def test_validation_rejects_both_duration_and_speed(self):
+ """Cannot have both duration > 0 and speed > 0."""
+ with pytest.raises((ValueError, msgspec.ValidationError)):
+ MoveLCmd(
+ pose=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ duration=2.0,
+ speed=0.5,
+ )
+
+ def test_validation_pose_length(self):
+ """Pose must have exactly 6 elements when decoded from wire format."""
+ from parol6.protocol.wire import CmdType, decode_command
+
+ # Validation happens during decode from msgpack (wire format)
+ import msgspec
+
+ encoder = msgspec.msgpack.Encoder()
+ # Wire format: [tag, pose, frame, duration, speed, accel, r, rel]
+ raw = encoder.encode(
+ [int(CmdType.MOVEL), [0.0, 0.0, 0.0], "WRF", 0.0, 0.5, 1.0, 0.0, False]
+ )
+
+ with pytest.raises(msgspec.ValidationError):
+ decode_command(raw)
+
+ def test_command_init(self):
+ """Test that MoveLCommand initializes correctly."""
+ params = MoveLCmd(
+ pose=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ speed=0.5,
+ )
+ cmd = MoveLCommand(params)
+
+ assert cmd.p is params
+ assert not cmd.is_finished
diff --git a/tests/unit/test_pdeathsig.py b/tests/unit/test_pdeathsig.py
new file mode 100644
index 0000000..0219ac6
--- /dev/null
+++ b/tests/unit/test_pdeathsig.py
@@ -0,0 +1,120 @@
+"""Test that child processes exit when parent is killed (dirty shutdown).
+
+Verifies that set_pdeathsig() causes multiprocessing.Process children to
+receive SIGTERM when their parent is SIGKILL'd, preventing orphaned
+resource_tracker processes.
+"""
+
+import os
+import signal
+import subprocess
+import sys
+import time
+
+import pytest
+
+
+@pytest.mark.unit
+@pytest.mark.skipif(sys.platform == "win32", reason="SIGKILL not available on Windows")
+def test_child_exits_on_parent_death(tmp_path):
+ """Children calling set_pdeathsig() exit when parent is SIGKILL'd."""
+ # Child target — must be a separate module for spawn to import
+ (tmp_path / "_pdeathsig_child.py").write_text(
+ "import os, signal, time\n"
+ "def child_main(pid_file):\n"
+ " signal.signal(signal.SIGINT, signal.SIG_IGN)\n"
+ " from parol6.server import set_pdeathsig\n"
+ " set_pdeathsig()\n"
+ " with open(pid_file, 'w') as f: f.write(str(os.getpid()))\n"
+ " while True: time.sleep(0.1)\n"
+ )
+
+ # Parent script — spawns child then sleeps forever
+ pid_file = str(tmp_path / "child.pid")
+ (tmp_path / "_pdeathsig_parent.py").write_text(
+ "import multiprocessing, os, sys, time\n"
+ f"sys.path.insert(0, {str(tmp_path)!r})\n"
+ "from _pdeathsig_child import child_main\n"
+ "if __name__ == '__main__':\n"
+ " multiprocessing.set_start_method('spawn', force=True)\n"
+ f" p = multiprocessing.Process(target=child_main, args=({pid_file!r},), daemon=True)\n"
+ " p.start()\n"
+ " sys.stdout.write(str(os.getpid()) + '\\n')\n"
+ " sys.stdout.flush()\n"
+ " while True: time.sleep(60)\n"
+ )
+
+ proc = subprocess.Popen(
+ [sys.executable, str(tmp_path / "_pdeathsig_parent.py")],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+
+ # Read parent PID
+ assert proc.stdout is not None
+ line = proc.stdout.readline().decode().strip()
+ assert line, (
+ f"Parent failed to start. stderr: {proc.stderr.read().decode()[:500] if proc.stderr else ''}"
+ )
+ parent_pid = int(line)
+
+ # Wait for child to write its PID
+ deadline = time.monotonic() + 10.0
+ child_pid = None
+ while time.monotonic() < deadline:
+ if os.path.exists(pid_file):
+ content = open(pid_file).read().strip()
+ if content:
+ child_pid = int(content)
+ break
+ time.sleep(0.2)
+ assert child_pid is not None, "Child never wrote its PID"
+
+ # Both should be alive
+ os.kill(parent_pid, 0)
+ os.kill(child_pid, 0)
+
+ # Kill parent (dirty shutdown)
+ os.kill(parent_pid, signal.SIGKILL)
+ proc.wait()
+
+ # Child should exit within a few seconds
+ deadline = time.monotonic() + 5.0
+ child_alive = True
+ while time.monotonic() < deadline:
+ try:
+ os.kill(child_pid, 0)
+ except OSError:
+ child_alive = False
+ break
+ time.sleep(0.1)
+
+ if child_alive:
+ os.kill(child_pid, signal.SIGKILL)
+ pytest.fail("Child process did not exit within 5s after parent was killed")
+
+ # Verify no freshly-orphaned resource_tracker processes
+ time.sleep(0.5)
+ result = subprocess.run(
+ ["pgrep", "-f", "resource_tracker"], capture_output=True, text=True
+ )
+ for pid_str in result.stdout.strip().splitlines():
+ if not pid_str.strip():
+ continue
+ tracker_pid = int(pid_str.strip())
+ try:
+ with open(f"/proc/{tracker_pid}/status") as f:
+ for status_line in f:
+ if status_line.startswith("PPid:"):
+ ppid = int(status_line.split()[1])
+ if ppid == 1:
+ age = time.time() - os.stat(f"/proc/{tracker_pid}").st_ctime
+ if age < 10:
+ os.kill(tracker_pid, signal.SIGKILL)
+ pytest.fail(
+ f"Orphaned resource_tracker (PID={tracker_pid}) "
+ f"found {age:.1f}s after test"
+ )
+ break
+ except (FileNotFoundError, ProcessLookupError):
+ pass
diff --git a/tests/unit/test_protocol_status_decode_enablement.py b/tests/unit/test_protocol_status_decode_enablement.py
deleted file mode 100644
index 9395cac..0000000
--- a/tests/unit/test_protocol_status_decode_enablement.py
+++ /dev/null
@@ -1,34 +0,0 @@
-import pytest
-
-from parol6.protocol import wire
-
-
-@pytest.mark.unit
-def test_decode_status_with_enablement_arrays():
- # Minimal valid STATUS with ANGLES present to satisfy parser
- joint_en = ",".join(["1", "0"] * 6)
- cart_wrf = ",".join(["1"] * 12)
- cart_trf = ",".join(["0"] * 12)
- status = (
- "STATUS|ANGLES=0,0,0,0,0,0"
- f"|JOINT_EN={joint_en}"
- f"|CART_EN_WRF={cart_wrf}"
- f"|CART_EN_TRF={cart_trf}"
- )
-
- decoded = wire.decode_status(status)
- assert decoded is not None
- assert isinstance(decoded.get("joint_en"), list)
- assert isinstance(decoded.get("cart_en_wrf"), list)
- assert isinstance(decoded.get("cart_en_trf"), list)
- je = decoded.get("joint_en")
- wrf = decoded.get("cart_en_wrf")
- trf = decoded.get("cart_en_trf")
- assert isinstance(je, list) and len(je) == 12
- assert isinstance(wrf, list) and len(wrf) == 12
- assert isinstance(trf, list) and len(trf) == 12
-
- # Spot-check values
- assert je[0] == 1 and je[1] == 0
- assert all(v == 1 for v in wrf) # all ones
- assert all(v == 0 for v in trf) # all zeros
diff --git a/tests/unit/test_query_commands_actions.py b/tests/unit/test_query_commands_actions.py
index 678703a..d5ef7b8 100644
--- a/tests/unit/test_query_commands_actions.py
+++ b/tests/unit/test_query_commands_actions.py
@@ -1,217 +1,148 @@
"""
Unit tests for action-related query commands.
-Tests GET_CURRENT_ACTION and GET_QUEUE query commands without requiring a running server.
-Uses stub UDP transport and minimal state objects to test command logic in isolation.
+Tests ACTIVITY and QUEUE query commands without requiring a running server.
+Uses minimal state objects to test command logic in isolation.
"""
-import json
from types import SimpleNamespace
-from parol6.commands.base import CommandContext
-from parol6.commands.query_commands import GetCurrentActionCommand, GetQueueCommand
+from waldoctl import ActionState
+from parol6.commands.query_commands import ActivityCommand, QueueCommand
+from parol6.protocol.wire import (
+ CurrentActionResultStruct,
+ ActivityCmd,
+ QueueCmd,
+ QueryType,
+ QueueResultStruct,
+ ResponseMsg,
+ decode_message,
+)
-class StubUDPTransport:
- """Stub UDP transport that captures sent responses."""
- def __init__(self):
- self.sent = []
+def _unpack_response(data: bytes):
+ """Decode packed bytes into a typed result struct."""
+ msg = decode_message(data)
+ assert isinstance(msg, ResponseMsg)
+ return msg.result
- def send_response(self, message: str, addr: tuple):
- """Capture sent responses for verification."""
- self.sent.append((message, addr))
+def test_activity_command_init():
+ """Test that ACTIVITY command initializes correctly."""
+ cmd = ActivityCommand(ActivityCmd())
-def test_get_current_action_command_match():
- """Test that GET_CURRENT_ACTION command matches correctly."""
- cmd = GetCurrentActionCommand()
+ assert not cmd.is_finished
+ assert cmd.PARAMS_TYPE is not None
+ assert cmd.QUERY_TYPE == QueryType.CURRENT_ACTION
- # Should match
- can_handle, error = cmd.do_match(["GET_CURRENT_ACTION"])
- assert can_handle
- assert error is None
- # Should not match other commands
- can_handle, error = cmd.do_match(["GET_QUEUE"])
- assert not can_handle
-
- can_handle, error = cmd.do_match(["UNKNOWN"])
- assert not can_handle
-
-
-def test_get_current_action_replies_json():
- """Test that GET_CURRENT_ACTION returns correct JSON response."""
- # Setup
- udp = StubUDPTransport()
- ctx = CommandContext(
- udp_transport=udp, addr=("127.0.0.1", 5001), gcode_interpreter=None, dt=0.01
- )
-
- # Create minimal state with action tracking fields
+def test_activity_returns_details():
+ """Test that ACTIVITY compute() returns correct data."""
state = SimpleNamespace(
- action_current="MovePoseCommand",
- action_state="EXECUTING",
+ action_current="MoveJPoseCommand",
+ action_state=ActionState.EXECUTING,
action_next="HomeCommand",
+ action_params="angles=[10,20,30,40,50,60]",
)
- # Execute command
- cmd = GetCurrentActionCommand()
- cmd.bind(ctx)
+ cmd = ActivityCommand(ActivityCmd())
cmd.setup(state)
- status = cmd.tick(state)
-
- # Verify response sent
- assert len(udp.sent) == 1
- message, addr = udp.sent[0]
-
- # Verify message format
- assert message.startswith("ACTION|")
+ result = _unpack_response(cmd.compute(state))
- # Parse and verify JSON payload
- json_str = message.split("|", 1)[1]
- payload = json.loads(json_str)
+ assert isinstance(result, CurrentActionResultStruct)
+ assert result.current == "MoveJPoseCommand"
+ assert result.state == "EXECUTING"
+ assert result.next == "HomeCommand"
+ assert result.params == "angles=[10,20,30,40,50,60]"
- assert payload["current"] == "MovePoseCommand"
- assert payload["state"] == "EXECUTING"
- assert payload["next"] == "HomeCommand"
- # Verify command completed
- assert status.code.value == "COMPLETED"
- assert cmd.is_finished
-
-
-def test_get_current_action_with_idle_state():
- """Test GET_CURRENT_ACTION when robot is idle."""
- udp = StubUDPTransport()
- ctx = CommandContext(
- udp_transport=udp, addr=("127.0.0.1", 5001), gcode_interpreter=None, dt=0.01
+def test_activity_with_idle_state():
+ """Test ACTIVITY when robot is idle."""
+ state = SimpleNamespace(
+ action_current="",
+ action_state=ActionState.IDLE,
+ action_next="",
+ action_params="",
)
- # Idle state - no current action
- state = SimpleNamespace(action_current="", action_state="IDLE", action_next="")
-
- cmd = GetCurrentActionCommand()
- cmd.bind(ctx)
+ cmd = ActivityCommand(ActivityCmd())
cmd.setup(state)
- cmd.tick(state)
-
- # Verify response
- assert len(udp.sent) == 1
- message, _ = udp.sent[0]
- json_str = message.split("|", 1)[1]
- payload = json.loads(json_str)
-
- assert payload["current"] == ""
- assert payload["state"] == "IDLE"
- assert payload["next"] == ""
+ result = _unpack_response(cmd.compute(state))
+ assert isinstance(result, CurrentActionResultStruct)
+ assert result.current == ""
+ assert result.state == "IDLE"
+ assert result.next == ""
+ assert result.params == ""
-def test_get_queue_command_match():
- """Test that GET_QUEUE command matches correctly."""
- cmd = GetQueueCommand()
- # Should match
- can_handle, error = cmd.do_match(["GET_QUEUE"])
- assert can_handle
- assert error is None
+def test_queue_command_init():
+ """Test that QUEUE command initializes correctly."""
+ cmd = QueueCommand(QueueCmd())
- # Should not match other commands
- can_handle, error = cmd.do_match(["GET_CURRENT_ACTION"])
- assert not can_handle
+ assert not cmd.is_finished
+ assert cmd.PARAMS_TYPE is not None
+ assert cmd.QUERY_TYPE == QueryType.QUEUE
-def test_get_queue_replies_json():
- """Test that GET_QUEUE returns correct JSON response."""
- # Setup
- udp = StubUDPTransport()
- ctx = CommandContext(
- udp_transport=udp, addr=("127.0.0.1", 5001), gcode_interpreter=None, dt=0.01
- )
-
- # Create state with queued commands
+def test_queue_returns_details():
+ """Test that QUEUE compute() returns correct data."""
state = SimpleNamespace(
- queue_nonstreamable=["MovePoseCommand", "HomeCommand", "MoveJointCommand"]
+ queue_nonstreamable=["MoveJPoseCommand", "HomeCommand", "MoveJCommand"],
+ executing_command_index=1,
+ completed_command_index=0,
+ last_checkpoint="cp1",
+ queued_duration=3.5,
)
- # Execute command
- cmd = GetQueueCommand()
- cmd.bind(ctx)
+ cmd = QueueCommand(QueueCmd())
cmd.setup(state)
- status = cmd.tick(state)
-
- # Verify response sent
- assert len(udp.sent) == 1
- message, addr = udp.sent[0]
-
- # Verify message format
- assert message.startswith("QUEUE|")
+ result = _unpack_response(cmd.compute(state))
- # Parse and verify JSON payload
- json_str = message.split("|", 1)[1]
- payload = json.loads(json_str)
+ assert isinstance(result, QueueResultStruct)
+ assert result.queue == ["MoveJPoseCommand", "HomeCommand", "MoveJCommand"]
+ assert result.executing_index == 1
+ assert result.completed_index == 0
+ assert result.last_checkpoint == "cp1"
+ assert result.queued_duration == 3.5
- assert payload["non_streamable"] == [
- "MovePoseCommand",
- "HomeCommand",
- "MoveJointCommand",
- ]
- assert payload["size"] == 3
- # Verify command completed
- assert status.code.value == "COMPLETED"
- assert cmd.is_finished
-
-
-def test_get_queue_with_empty_queue():
- """Test GET_QUEUE when queue is empty."""
- udp = StubUDPTransport()
- ctx = CommandContext(
- udp_transport=udp, addr=("127.0.0.1", 5001), gcode_interpreter=None, dt=0.01
+def test_queue_with_empty_queue():
+ """Test QUEUE when queue is empty."""
+ state = SimpleNamespace(
+ queue_nonstreamable=[],
+ executing_command_index=-1,
+ completed_command_index=-1,
+ last_checkpoint="",
+ queued_duration=0.0,
)
- # Empty queue
- state = SimpleNamespace(queue_nonstreamable=[])
-
- cmd = GetQueueCommand()
- cmd.bind(ctx)
+ cmd = QueueCommand(QueueCmd())
cmd.setup(state)
- cmd.tick(state)
-
- # Verify response
- assert len(udp.sent) == 1
- message, _ = udp.sent[0]
- json_str = message.split("|", 1)[1]
- payload = json.loads(json_str)
+ result = _unpack_response(cmd.compute(state))
- assert payload["non_streamable"] == []
- assert payload["size"] == 0
+ assert isinstance(result, QueueResultStruct)
+ assert result.queue == []
+ assert result.executing_index == -1
+ assert result.completed_index == -1
-def test_get_queue_excludes_streamable():
+def test_queue_excludes_streamable():
"""Test that queue only contains non-streamable commands (by design)."""
- # This test verifies the API contract - the queue_nonstreamable field
- # should already have streamable commands filtered out by the controller
-
- udp = StubUDPTransport()
- ctx = CommandContext(
- udp_transport=udp, addr=("127.0.0.1", 5001), gcode_interpreter=None, dt=0.01
+ state = SimpleNamespace(
+ queue_nonstreamable=["MoveJPoseCommand", "HomeCommand"],
+ executing_command_index=2,
+ completed_command_index=1,
+ last_checkpoint="",
+ queued_duration=1.0,
)
- # State should only contain non-streamable commands
- # (streamable commands like JogJointCommand are filtered by controller)
- state = SimpleNamespace(queue_nonstreamable=["MovePoseCommand", "HomeCommand"])
-
- cmd = GetQueueCommand()
- cmd.bind(ctx)
+ cmd = QueueCommand(QueueCmd())
cmd.setup(state)
- cmd.tick(state)
-
- message, _ = udp.sent[0]
- json_str = message.split("|", 1)[1]
- payload = json.loads(json_str)
+ result = _unpack_response(cmd.compute(state))
- # Verify only non-streamable commands in response
- assert "MovePoseCommand" in payload["non_streamable"]
- assert "HomeCommand" in payload["non_streamable"]
- assert payload["size"] == 2
+ assert isinstance(result, QueueResultStruct)
+ assert "MoveJPoseCommand" in result.queue
+ assert "HomeCommand" in result.queue
+ assert len(result.queue) == 2
diff --git a/tests/unit/test_reset_command.py b/tests/unit/test_reset_command.py
new file mode 100644
index 0000000..4c6fc34
--- /dev/null
+++ b/tests/unit/test_reset_command.py
@@ -0,0 +1,107 @@
+"""Tests for RESET command."""
+
+import numpy as np
+import pytest
+
+from parol6.commands.utility_commands import ResetCommand
+from parol6.protocol.wire import ResetCmd
+from parol6.server.state import ControllerState
+
+
+class TestResetCommandParsing:
+ """Test ResetCommand initialization."""
+
+ def test_init(self):
+ """RESET takes no parameters."""
+ cmd = ResetCommand(ResetCmd())
+
+ assert not cmd.is_finished
+ assert cmd.p is not None
+
+ def test_struct_has_no_params(self):
+ """ResetCmd struct should have no fields."""
+ params = ResetCmd()
+ # Struct should be valid with no fields
+ assert params is not None
+
+
+class TestResetCommandExecution:
+ """Test ResetCommand.tick resets state correctly."""
+
+ def test_reset_clears_positions(self):
+ """Reset should zero out position buffers."""
+ state = ControllerState()
+ state.Position_in = np.array(
+ [1000, 2000, 3000, 4000, 5000, 6000], dtype=np.int32
+ )
+ state.Speed_in = np.array([10, 20, 30, 40, 50, 60], dtype=np.int32)
+
+ cmd = ResetCommand(ResetCmd())
+ cmd.tick(state) # Reset executes in tick
+
+ assert np.all(state.Position_in == 0)
+ assert np.all(state.Speed_in == 0)
+
+ def test_reset_clears_errors(self):
+ """Reset should clear error states."""
+ state = ControllerState()
+ state.e_stop_active = True
+ state.soft_error = True
+ state.disabled_reason = "some error"
+
+ cmd = ResetCommand(ResetCmd())
+ cmd.tick(state)
+
+ assert state.e_stop_active is False
+ assert state.soft_error is False
+ assert state.disabled_reason == ""
+
+ def test_reset_clears_tool(self):
+ """Reset should reset tool to NONE."""
+ state = ControllerState()
+ state._current_tool = "GRIPPER"
+
+ cmd = ResetCommand(ResetCmd())
+ cmd.tick(state)
+
+ assert state._current_tool == "NONE"
+
+ def test_reset_preserves_connection_state(self):
+ """Reset should NOT reset connection-related state."""
+ state = ControllerState()
+ state.ip = "192.168.1.100"
+ state.port = 9999
+ state.start_time = 12345.0
+ state.ser = "mock_serial"
+
+ cmd = ResetCommand(ResetCmd())
+ cmd.tick(state)
+
+ assert state.ip == "192.168.1.100"
+ assert state.port == 9999
+ assert state.start_time == 12345.0
+ assert state.ser == "mock_serial"
+
+ def test_reset_finishes_immediately(self):
+ """Reset command should complete in single tick."""
+ state = ControllerState()
+ cmd = ResetCommand(ResetCmd())
+ cmd.tick(state)
+
+ assert cmd.is_finished is True
+
+
+@pytest.mark.integration
+class TestResetIntegration:
+ """Integration tests for RESET command via client."""
+
+ def test_reset_command_succeeds(self, client, server_proc):
+ """Test reset command executes successfully via client."""
+ result = client.reset()
+ assert result > 0
+
+ def test_reset_multiple_times(self, client, server_proc):
+ """Test reset can be called multiple times."""
+ for _ in range(3):
+ result = client.reset()
+ assert result > 0
diff --git a/tests/unit/test_ruckig_bypass.py b/tests/unit/test_ruckig_bypass.py
new file mode 100644
index 0000000..901504f
--- /dev/null
+++ b/tests/unit/test_ruckig_bypass.py
@@ -0,0 +1,255 @@
+"""
+Tests for trajectory execution:
+1. Precomputed trajectories go directly to controller (no streaming executor)
+2. QUINTIC/TRAPEZOID profiles validate velocity/acceleration limits
+3. TOPPRA/RUCKIG profiles are inherently limit-respecting
+"""
+
+import numpy as np
+import pytest
+
+from parol6.commands.joint_commands import MoveJCommand
+from parol6.config import rad_to_steps, steps_to_rad
+from parol6.motion import JointPath, TrajectoryBuilder, ProfileType
+
+pytestmark = pytest.mark.unit
+
+
+# Minimal ControllerState mock
+class MockState:
+ def __init__(self):
+ # Start position: actual home position from config
+ from parol6.config import HOME_ANGLES_DEG
+
+ home_rad = np.deg2rad(HOME_ANGLES_DEG)
+ self.Position_in = np.zeros(6, dtype=np.int32)
+ rad_to_steps(home_rad, self.Position_in)
+ self.Position_out = np.zeros(6, dtype=np.int32)
+ self.Speed_out = np.zeros(6, dtype=np.int32)
+ self.Command_out = 0
+ self.motion_profile = "TOPPRA"
+
+
+class TestPrecomputedTrajectories:
+ """Tests for precomputed trajectory execution (no streaming executor)."""
+
+ def test_ruckig_trajectory_builds_successfully(self):
+ """RUCKIG trajectory builder creates valid trajectory."""
+ start_rad = np.deg2rad([90, -90, 180, 0, 0, 180]).astype(np.float64)
+ end_rad = np.deg2rad([10, -50, 180, 15, 10, 5]).astype(np.float64)
+
+ joint_path = JointPath.interpolate(start_rad, end_rad, n_samples=50)
+ builder = TrajectoryBuilder(
+ joint_path=joint_path,
+ profile=ProfileType.RUCKIG,
+ velocity_frac=0.5,
+ accel_frac=0.5,
+ )
+
+ trajectory = builder.build()
+
+ assert len(trajectory.steps) > 0, "Trajectory should have steps"
+ # Trajectory has no smooth attribute anymore - goes direct to controller
+
+ def test_toppra_trajectory_builds_successfully(self):
+ """TOPPRA trajectory builder creates valid trajectory."""
+ start_rad = np.deg2rad([90, -90, 180, 0, 0, 180]).astype(np.float64)
+ end_rad = np.deg2rad([10, -50, 180, 15, 10, 5]).astype(np.float64)
+
+ joint_path = JointPath.interpolate(start_rad, end_rad, n_samples=50)
+ builder = TrajectoryBuilder(
+ joint_path=joint_path,
+ profile=ProfileType.TOPPRA,
+ velocity_frac=0.5,
+ accel_frac=0.5,
+ )
+
+ trajectory = builder.build()
+
+ assert len(trajectory.steps) > 0, "Trajectory should have steps"
+
+ def test_linear_trajectory_builds_successfully(self):
+ """LINEAR trajectory builder creates valid trajectory."""
+ start_rad = np.deg2rad([90, -90, 180, 0, 0, 180]).astype(np.float64)
+ end_rad = np.deg2rad([10, -50, 180, 15, 10, 5]).astype(np.float64)
+
+ joint_path = JointPath.interpolate(start_rad, end_rad, n_samples=50)
+ builder = TrajectoryBuilder(
+ joint_path=joint_path,
+ profile=ProfileType.LINEAR,
+ velocity_frac=0.5,
+ accel_frac=0.5,
+ )
+
+ trajectory = builder.build()
+
+ assert len(trajectory.steps) > 0, "Trajectory should have steps"
+
+
+class TestLimitValidation:
+ """Tests for QUINTIC/TRAPEZOID limit validation."""
+
+ def test_quintic_extends_duration_when_explicit_duration_too_short(self, caplog):
+ """QUINTIC automatically extends duration when user-specified 0.5s would violate joint velocity limits.
+
+ Scenario: Large joint move (80° on J1, 40° on J2) with explicit 0.5s duration.
+ Expected: Builder detects velocity violation, extends duration, logs warning about
+ exceeding the user-requested duration.
+ """
+ start_rad = np.deg2rad([90, -90, 180, 0, 0, 180]).astype(np.float64)
+ end_rad = np.deg2rad([10, -50, 180, 15, 10, 5]).astype(np.float64)
+
+ joint_path = JointPath.interpolate(start_rad, end_rad, n_samples=50)
+ builder = TrajectoryBuilder(
+ joint_path=joint_path,
+ profile=ProfileType.QUINTIC,
+ velocity_frac=0.5,
+ accel_frac=0.5,
+ duration=0.5, # Too short for this move - will be extended
+ )
+
+ trajectory = builder.build()
+ # Duration should have been extended beyond the requested 0.5s
+ assert trajectory.duration > 0.5
+ # Should have logged a warning about extension
+ assert "Extending duration" in caplog.text
+
+ def test_quintic_accepts_safe_velocity(self):
+ """QUINTIC trajectory builds successfully with safe parameters."""
+ # Small move with long duration stays within limits
+ start_rad = np.deg2rad([90, -90, 180, 0, 0, 180]).astype(np.float64)
+ # Small move - just 5 degrees
+ end_rad = np.deg2rad([90, -85, 180, 0, 0, 180]).astype(np.float64)
+
+ joint_path = JointPath.interpolate(start_rad, end_rad, n_samples=50)
+ builder = TrajectoryBuilder(
+ joint_path=joint_path,
+ profile=ProfileType.QUINTIC,
+ velocity_frac=0.5,
+ accel_frac=0.5,
+ duration=2.0, # Long duration for small move
+ )
+
+ trajectory = builder.build()
+ assert len(trajectory.steps) > 0
+
+ def test_trapezoid_extends_duration_when_explicit_duration_too_short(self, caplog):
+ """TRAPEZOID automatically extends duration when user-specified 0.3s would violate joint velocity limits.
+
+ Scenario: Large joint move (80° on J1, 40° on J2) with explicit 0.3s duration.
+ Expected: Builder detects velocity violation, extends duration, logs warning about
+ exceeding the user-requested duration.
+ """
+ start_rad = np.deg2rad([90, -90, 180, 0, 0, 180]).astype(np.float64)
+ end_rad = np.deg2rad([10, -50, 180, 15, 10, 5]).astype(np.float64)
+
+ joint_path = JointPath.interpolate(start_rad, end_rad, n_samples=50)
+ builder = TrajectoryBuilder(
+ joint_path=joint_path,
+ profile=ProfileType.TRAPEZOID,
+ velocity_frac=0.5,
+ accel_frac=0.5,
+ duration=0.3, # Too short for this move - will be extended
+ )
+
+ trajectory = builder.build()
+ # Duration should have been extended beyond the requested 0.3s
+ assert trajectory.duration > 0.3
+ # Should have logged a warning about extension
+ assert "Extending duration" in caplog.text
+
+
+class TestRuckigExecution:
+ """Tests for RUCKIG joint move execution."""
+
+ def test_ruckig_reaches_target(self):
+ """RUCKIG trajectory reaches target accurately."""
+ start_rad = np.deg2rad([90, -90, 180, 0, 0, 180]).astype(np.float64)
+ target_angles = [10, -50, 180, 15, 10, 5]
+ end_rad = np.deg2rad(target_angles).astype(np.float64)
+
+ joint_path = JointPath.interpolate(start_rad, end_rad, n_samples=50)
+ builder = TrajectoryBuilder(
+ joint_path=joint_path,
+ profile=ProfileType.RUCKIG,
+ velocity_frac=1.0,
+ accel_frac=1.0,
+ )
+
+ trajectory = builder.build()
+
+ # Verify final step reaches target
+ final_steps = trajectory.steps[-1]
+ final_rad = np.zeros(6, dtype=np.float64)
+ steps_to_rad(final_steps, final_rad)
+
+ error_deg = np.rad2deg(np.abs(final_rad - end_rad))
+ max_error = np.max(error_deg)
+
+ assert max_error < 0.1, (
+ f"RUCKIG should reach target within 0.1 deg, got {max_error:.3f} deg"
+ )
+
+ def test_ruckig_joint_move_command_setup(self):
+ """MoveJCommand with RUCKIG profile sets up trajectory."""
+ from parol6.protocol.wire import MoveJCmd
+
+ # Create params struct with speed (duration=0 means use speed)
+ params = MoveJCmd(
+ angles=[10.0, -50.0, 180.0, 15.0, 10.0, 5.0],
+ speed=0.5,
+ accel=0.5,
+ )
+ cmd = MoveJCommand(params)
+
+ state = MockState()
+ state.motion_profile = "RUCKIG"
+
+ cmd.do_setup(state)
+
+ # Command should have trajectory steps ready for direct execution
+ assert hasattr(cmd, "trajectory_steps")
+ assert len(cmd.trajectory_steps) > 0
+
+
+class TestQuinticGeometry:
+ """Tests for QUINTIC trajectory geometry."""
+
+ def test_quintic_samples_path_correctly(self):
+ """QUINTIC trajectory samples all path waypoints correctly."""
+ # Use a small move that respects limits
+ start_rad = np.deg2rad([90, -90, 180, 0, 0, 180]).astype(np.float64)
+ end_rad = np.deg2rad([90, -85, 180, 0, 0, 180]).astype(np.float64)
+
+ joint_path = JointPath.interpolate(start_rad, end_rad, n_samples=100)
+ builder = TrajectoryBuilder(
+ joint_path=joint_path,
+ profile=ProfileType.QUINTIC,
+ velocity_frac=0.5,
+ duration=3.0, # Long duration for safe velocity
+ )
+
+ trajectory = builder.build()
+
+ # Check intermediate points follow the path geometry
+ n_steps = len(trajectory.steps)
+ mid_idx = n_steps // 2
+
+ mid_steps = trajectory.steps[mid_idx]
+ mid_rad = np.zeros(6, dtype=np.float64)
+ steps_to_rad(mid_steps, mid_rad)
+
+ # Quintic timing at t=0.5 gives s = 10*(0.5)^3 - 15*(0.5)^4 + 6*(0.5)^5 = 0.5
+ # So midpoint should be halfway along path
+ expected_mid_rad = (start_rad + end_rad) / 2
+ error_deg = np.rad2deg(np.abs(mid_rad - expected_mid_rad))
+ max_error = np.max(error_deg)
+
+ # Allow some error from step quantization
+ assert max_error < 2.0, (
+ f"QUINTIC midpoint should be near path midpoint, error={max_error:.2f} deg"
+ )
+
+
+if __name__ == "__main__":
+ pytest.main([__file__, "-v"])
diff --git a/tests/unit/test_server_manager.py b/tests/unit/test_server_manager.py
index 341a1a5..e578117 100644
--- a/tests/unit/test_server_manager.py
+++ b/tests/unit/test_server_manager.py
@@ -1,62 +1,40 @@
import pytest
-from parol6.client.manager import ServerManager, is_server_running, manage_server
+from parol6 import Robot
-@pytest.mark.asyncio
-async def test_is_server_running_false_when_no_server(monkeypatch):
- # Use an unlikely high port to avoid collisions
- assert not is_server_running(host="127.0.0.1", port=59999, timeout=0.2)
+def test_is_available_false_when_no_server():
+ robot = Robot(port=59999)
+ assert not robot.is_available()
-@pytest.mark.asyncio
-async def test_manage_server_starts_and_reports_running(monkeypatch):
- # Choose a high-numbered UDP port to minimize collision risk
+def test_robot_start_and_stop():
host = "127.0.0.1"
port = 59998
- # Ensure no server is running before we start
- assert not is_server_running(host=host, port=port, timeout=0.2)
+ robot = Robot(host=host, port=port, timeout=15.0)
+ assert not robot.is_available()
- manager: ServerManager | None = None
try:
- manager = manage_server(
- host=host, port=port, com_port=None, extra_env=None, normalize_logs=False
- )
- assert isinstance(manager, ServerManager)
-
- # After manage_server, the UDP endpoint should respond to PING
- assert is_server_running(host=host, port=port, timeout=1.0)
+ robot.start()
+ assert robot.is_available()
finally:
- if manager is not None:
- manager.stop_controller()
+ robot.stop()
- # After stop_controller returns, the server should no longer respond to PING
- assert not is_server_running(host=host, port=port, timeout=0.5)
+ assert not robot.is_available()
-@pytest.mark.asyncio
-async def test_manage_server_fast_fails_when_already_running(monkeypatch):
+def test_robot_start_fast_fails_when_already_running():
host = "127.0.0.1"
port = 59997
- manager: ServerManager | None = None
+ robot = Robot(host=host, port=port, timeout=15.0)
try:
- # First start a server
- manager = manage_server(
- host=host, port=port, com_port=None, extra_env=None, normalize_logs=False
- )
- assert is_server_running(host=host, port=port, timeout=1.0)
+ robot.start()
+ assert robot.is_available()
- # Second attempt should raise RuntimeError because the port is taken by an existing server
+ robot2 = Robot(host=host, port=port)
with pytest.raises(RuntimeError):
- manage_server(
- host=host,
- port=port,
- com_port=None,
- extra_env=None,
- normalize_logs=False,
- )
+ robot2.start()
finally:
- if manager is not None:
- manager.stop_controller()
+ robot.stop()
diff --git a/tests/unit/test_smooth_motion_api.py b/tests/unit/test_smooth_motion_api.py
deleted file mode 100644
index 21f647c..0000000
--- a/tests/unit/test_smooth_motion_api.py
+++ /dev/null
@@ -1,24 +0,0 @@
-import importlib
-import inspect
-
-
-def test_smooth_motion_reexports_exist():
- sm = importlib.import_module("parol6.smooth_motion")
-
- # Required re-exports
- for name in [
- "CircularMotion",
- "SplineMotion",
- "HelixMotion",
- "WaypointTrajectoryPlanner",
- ]:
- assert hasattr(sm, name), f"parol6.smooth_motion missing {name}"
- obj = getattr(sm, name)
- assert inspect.isclass(obj), f"{name} should be a class"
-
- # Optional blenders (presence depends on legacy module)
- # If present, they should be classes as well
- for opt_name in ["MotionBlender", "AdvancedMotionBlender"]:
- if hasattr(sm, opt_name):
- opt = getattr(sm, opt_name)
- assert inspect.isclass(opt), f"{opt_name} should be a class if present"
diff --git a/tests/unit/test_status_cache_enablement_ascii.py b/tests/unit/test_status_cache_enablement_ascii.py
index dcb4036..b1d7ac0 100644
--- a/tests/unit/test_status_cache_enablement_ascii.py
+++ b/tests/unit/test_status_cache_enablement_ascii.py
@@ -1,74 +1,128 @@
+"""
+Integration tests for enablement detection via IK worker subprocess.
+"""
+
+import time
+
import numpy as np
import pytest
from parol6.server.status_cache import StatusCache
-from parol6.server.state import ControllerState
+import parol6.PAROL6_ROBOT as PAROL6_ROBOT
+
+
+@pytest.mark.integration
+def test_ik_worker_detects_joint_limits():
+ """
+ Test that the IK worker correctly detects when a joint is near its limits.
+
+ Joint enablement format: [J1+, J1-, J2+, J2-, ..., J6+, J6-]
+ - When near max limit: + direction disabled (0), - direction enabled (1)
+ - When near min limit: + direction enabled (1), - direction disabled (0)
+ """
+ cache = StatusCache()
+
+ try:
+ time.sleep(0.5) # Allow more time for subprocess startup on Windows
+ assert cache._ik_process.is_alive(), "IK worker failed to start"
+
+ # Start from home position and move J1 near its max limit
+ from parol6.config import HOME_ANGLES_DEG
+
+ qlim = PAROL6_ROBOT.robot.qlim
+
+ q = np.deg2rad(HOME_ANGLES_DEG)
+ # Delta is 0.2 degrees = 0.0035 rad, so we need to be within that
+ q[0] = qlim[1, 0] - 0.001 # J1 very near max limit
+
+ T = PAROL6_ROBOT.robot.fkine(q)
+ T_matrix = T
+
+ cache._submit_ik_request(q, T_matrix)
+
+ ready = False
+ for _ in range(200): # Longer timeout for CI - IK worker does 24 IK solves
+ ready = cache._poll_ik_results()
+ if ready:
+ break
+ time.sleep(0.02)
+
+ assert ready, "IK worker did not return results"
+ joint_en = cache.joint_en
+
+ # J1 near max: J1+ should be disabled, J1- should be enabled
+ assert joint_en[0] == 0, (
+ f"J1+ should be disabled near max limit, got {joint_en[0]}"
+ )
+ assert joint_en[1] == 1, (
+ f"J1- should be enabled near max limit, got {joint_en[1]}"
+ )
+ # Now test J1 near min limit
+ q[0] = qlim[0, 0] + 0.001 # J1 very near min limit
-@pytest.mark.unit
-def test_status_cache_includes_enablement_fields(monkeypatch):
+ T = PAROL6_ROBOT.robot.fkine(q)
+ T_matrix = T
+
+ cache._submit_ik_request(q, T_matrix)
+
+ ready = False
+ for _ in range(200): # Longer timeout for CI - IK worker does 24 IK solves
+ ready = cache._poll_ik_results()
+ if ready:
+ break
+ time.sleep(0.02)
+
+ assert ready, "IK worker did not return results for min limit test"
+ joint_en = cache.joint_en
+
+ # J1 near min: J1+ should be enabled, J1- should be disabled
+ assert joint_en[0] == 1, (
+ f"J1+ should be enabled near min limit, got {joint_en[0]}"
+ )
+ assert joint_en[1] == 0, (
+ f"J1- should be disabled near min limit, got {joint_en[1]}"
+ )
+
+ finally:
+ cache.close()
+
+
+@pytest.mark.integration
+def test_ik_worker_all_enabled_in_safe_position():
+ """
+ Test that all directions are enabled when robot is in the true center of its limits.
+ """
cache = StatusCache()
- # Patch heavy dependencies to be deterministic and fast
- monkeypatch.setattr(
- "parol6.server.status_cache.PAROL6_ROBOT",
- type(
- "_Dummy",
- (),
- {
- "ops": type(
- "_Ops",
- (),
- {
- "steps_to_deg": staticmethod(lambda steps: [0.0] * 6),
- "steps_to_rad": staticmethod(lambda steps: [0.0] * 6),
- },
- )(),
- "robot": type(
- "_Robot", (), {"qlim": np.vstack([[-3.14] * 6, [3.14] * 6])}
- )(),
- },
- ),
- raising=True,
- )
-
- # Short-circuit fkine to avoid spatialmath calls
- monkeypatch.setattr(
- "parol6.server.status_cache.get_fkine_flat_mm",
- lambda state: np.zeros((16,), dtype=float),
- raising=True,
- )
-
- # Patch enablement calculators to fixed values
- def _fake_joint(cache_self, q_rad): # type: ignore[no-redef]
- bits = [1, 0] * 6
- cache_self.joint_en[:] = np.asarray(bits, dtype=np.uint8)
- cache_self._joint_en_ascii = ",".join(str(int(v)) for v in bits)
-
- def _fake_cart(cache_self, T, frame, q_rad): # type: ignore[no-redef]
- bits = [1] * 12 if frame == "WRF" else [0] * 12
- ascii_bits = ",".join(str(b) for b in bits)
- if frame == "WRF":
- cache_self.cart_en_wrf[:] = np.asarray(bits, dtype=np.uint8)
- cache_self._cart_en_wrf_ascii = ascii_bits
- else:
- cache_self.cart_en_trf[:] = np.asarray(bits, dtype=np.uint8)
- cache_self._cart_en_trf_ascii = ascii_bits
-
- monkeypatch.setattr(StatusCache, "_compute_joint_enable", _fake_joint, raising=True)
- monkeypatch.setattr(StatusCache, "_compute_cart_enable", _fake_cart, raising=True)
-
- # Trigger an update with a fresh state
- state = ControllerState()
- # Change Position_in so StatusCache treats it as an update (pos_changed=True)
- arr = np.zeros((6,), dtype=np.int32)
- arr[0] = 1
- state.Position_in[:] = arr
- cache.update_from_state(state)
-
- txt = cache.to_ascii()
- assert "JOINT_EN=" in txt
- assert "CART_EN_WRF=" in txt and "CART_EN_TRF=" in txt
- assert "JOINT_EN=1,0,1,0,1,0,1,0,1,0,1,0" in txt
- assert "CART_EN_WRF=" + ",".join(["1"] * 12) in txt
- assert "CART_EN_TRF=" + ",".join(["0"] * 12) in txt
+ try:
+ time.sleep(0.5) # Allow more time for subprocess startup on Windows
+ assert cache._ik_process.is_alive()
+
+ # Use home position - a known safe position
+ from parol6.config import HOME_ANGLES_DEG
+
+ q_home = np.deg2rad(HOME_ANGLES_DEG)
+
+ T = PAROL6_ROBOT.robot.fkine(q_home)
+ T_matrix = T
+
+ cache._submit_ik_request(q_home, T_matrix)
+
+ ready = False
+ for _ in range(200): # Longer timeout for CI - IK worker does 24 IK solves
+ ready = cache._poll_ik_results()
+ if ready:
+ break
+ time.sleep(0.02)
+
+ assert ready, "IK worker did not return results in time"
+ joint_en = cache.joint_en
+
+ # All joint directions should be enabled in true center position
+ assert np.all(joint_en == 1), (
+ f"All joints should be enabled at center, got {joint_en}"
+ )
+
+ finally:
+ cache.close()
diff --git a/tests/unit/test_tcp_offset.py b/tests/unit/test_tcp_offset.py
new file mode 100644
index 0000000..c93e669
--- /dev/null
+++ b/tests/unit/test_tcp_offset.py
@@ -0,0 +1,98 @@
+"""Tests for the TCP offset API."""
+
+import numpy as np
+
+from parol6.protocol.wire import SetTcpOffsetCmd, TcpOffsetResultStruct
+
+
+# ── Wire round-trip ──────────────────────────────────────────────────────
+
+
+def test_set_tcp_offset_cmd_fields():
+ cmd = SetTcpOffsetCmd(x=1.0, y=2.0, z=-190.0)
+ assert cmd.x == 1.0
+ assert cmd.y == 2.0
+ assert cmd.z == -190.0
+
+
+def test_set_tcp_offset_cmd_defaults():
+ cmd = SetTcpOffsetCmd()
+ assert cmd.x == 0.0
+ assert cmd.y == 0.0
+ assert cmd.z == 0.0
+
+
+def test_tcp_offset_result_struct():
+ result = TcpOffsetResultStruct(x=10.0, y=20.0, z=-190.0)
+ assert result.x == 10.0
+ assert result.y == 20.0
+ assert result.z == -190.0
+
+
+# ── apply_tool with tcp_offset_m ─────────────────────────────────────────
+
+
+def test_apply_tool_with_zero_offset():
+ """apply_tool with zero offset should produce same FK as without offset."""
+ import parol6.PAROL6_ROBOT as PAROL6_ROBOT
+
+ q = np.zeros(6, dtype=np.float64)
+
+ PAROL6_ROBOT.apply_tool("SSG-48")
+ fk_without = PAROL6_ROBOT.robot.fkine(q).copy()
+
+ PAROL6_ROBOT.apply_tool("SSG-48", tcp_offset_m=(0.0, 0.0, 0.0))
+ fk_with_zero = PAROL6_ROBOT.robot.fkine(q).copy()
+
+ np.testing.assert_allclose(fk_without, fk_with_zero, atol=1e-12)
+ PAROL6_ROBOT.apply_tool("NONE")
+
+
+def test_apply_tool_with_nonzero_offset():
+ """apply_tool with offset should shift the FK TCP position."""
+ import parol6.PAROL6_ROBOT as PAROL6_ROBOT
+
+ q = np.zeros(6, dtype=np.float64)
+
+ PAROL6_ROBOT.apply_tool("SSG-48")
+ fk_base = PAROL6_ROBOT.robot.fkine(q).copy()
+
+ PAROL6_ROBOT.apply_tool("SSG-48", tcp_offset_m=(0.0, 0.0, -0.190))
+ fk_offset = PAROL6_ROBOT.robot.fkine(q).copy()
+
+ # FK results should differ (TCP position shifted)
+ assert not np.allclose(fk_base[:3, 3], fk_offset[:3, 3], atol=1e-6)
+
+ PAROL6_ROBOT.apply_tool("NONE")
+
+
+# ── DryRunRobotClient TCP offset ─────────────────────────────────────────
+
+
+def test_dry_run_set_tcp_offset():
+ """DryRunRobotClient should track TCP offset."""
+ from parol6.client.dry_run_client import DryRunRobotClient
+
+ client = DryRunRobotClient()
+ assert client.tcp_offset() == [0.0, 0.0, 0.0]
+
+ client.set_tcp_offset(x=0, y=0, z=-190)
+ assert client.tcp_offset() == [0.0, 0.0, -190.0]
+
+ # Reset
+ client.set_tcp_offset(x=0, y=0, z=0)
+ assert client.tcp_offset() == [0.0, 0.0, 0.0]
+
+
+def test_dry_run_select_tool_resets_tcp_offset():
+ """Selecting a new tool should reset TCP offset to zero."""
+ from parol6.client.dry_run_client import DryRunRobotClient
+
+ client = DryRunRobotClient()
+ client.select_tool("SSG-48")
+ client.set_tcp_offset(x=0, y=0, z=-190)
+ assert client.tcp_offset() == [0.0, 0.0, -190.0]
+
+ # Selecting a tool resets offset
+ client.select_tool("SSG-48")
+ assert client.tcp_offset() == [0.0, 0.0, 0.0]
diff --git a/tests/unit/test_trajectory.py b/tests/unit/test_trajectory.py
deleted file mode 100644
index 2f9daf1..0000000
--- a/tests/unit/test_trajectory.py
+++ /dev/null
@@ -1,96 +0,0 @@
-import math
-
-import numpy as np
-import pytest
-from parol6.config import CONTROL_RATE_HZ
-from parol6.utils import trajectory as traj
-
-
-def approx_equal(a, b, tol=1e-6):
- return abs(a - b) <= tol
-
-
-def test_plan_linear_quintic_endpoints_and_shape():
- start = [0.0, 0.0]
- end = [10.0, 20.0]
- duration = 1.0 # seconds
-
- path = traj.plan_linear_quintic(start, end, duration)
- # Expected sample count: duration * rate + 1
- expected_n = int(round(duration * CONTROL_RATE_HZ)) + 1
- assert path.shape == (expected_n, 2)
-
- # Endpoints
- assert np.allclose(path[0], start)
- assert np.allclose(path[-1], end)
-
- # Monotonic progression on each axis
- diffs = np.diff(path, axis=0)
- assert np.all(diffs[:, 0] >= -1e-9)
- assert np.all(diffs[:, 1] >= -1e-9)
-
-
-def test_plan_linear_cubic_endpoints_and_shape():
- start = [5.0, -5.0, 2.5]
- end = [6.0, 0.0, 4.5]
- duration = 0.5
-
- path = traj.plan_linear_cubic(start, end, duration)
- expected_n = int(round(duration * CONTROL_RATE_HZ)) + 1
- assert path.shape == (expected_n, 3)
-
- assert np.allclose(path[0], start)
- assert np.allclose(path[-1], end)
-
- diffs = np.diff(path, axis=0)
- # Allow tiny numerical noise
- assert np.all(diffs >= -1e-9)
-
-
-def _timings(distance, v_max, a_max):
- # Mirror of _trapezoid_timings logic for test-side expectation
- if distance <= 0 or v_max <= 0 or a_max <= 0:
- return 0.0, 0.0, 0.0, 0.0, True
- t_a = v_max / a_max
- s_a = 0.5 * a_max * t_a**2
- if 2 * s_a < distance:
- s_c = distance - 2 * s_a
- t_c = s_c / v_max
- T = 2 * t_a + t_c
- return T, t_a, t_c, v_max, False
- else:
- v_peak = math.sqrt(a_max * distance)
- t_a = v_peak / a_max
- T = 2 * t_a
- return T, t_a, 0.0, v_peak, True
-
-
-@pytest.mark.parametrize(
- "start,end,v_max,a_max",
- [
- (0.0, 1.0, 0.5, 1.0), # trapezoidal (2*s_a < distance)
- (0.0, 0.02, 1.0, 10.0), # triangular (cannot reach cruise)
- (10.0, 0.0, 0.5, 1.0), # reverse direction trapezoidal
- ],
-)
-def test_plan_trapezoid_position_1d_shapes_and_endpoints(start, end, v_max, a_max):
- positions = traj.plan_trapezoid_position_1d(start, end, v_max, a_max)
- assert positions.ndim == 1
- assert approx_equal(positions[0], start, tol=1e-9)
- assert approx_equal(positions[-1], end, tol=1e-9)
-
- # Monotonic in the proper direction
- diffs = np.diff(positions)
- if end >= start:
- assert np.all(diffs >= -1e-9)
- else:
- assert np.all(diffs <= 1e-9)
-
- # Sample count should match expected duration discretization
- dist = abs(end - start)
- T, _ta, _tc, _vp, _tri = _timings(dist, v_max, a_max)
- if T > 0:
- expected_n = int(round(T * CONTROL_RATE_HZ)) + 1
- assert len(positions) == expected_n
- else:
- assert len(positions) == 2
diff --git a/tests/unit/test_wire.py b/tests/unit/test_wire.py
deleted file mode 100644
index e91dc98..0000000
--- a/tests/unit/test_wire.py
+++ /dev/null
@@ -1,126 +0,0 @@
-import pytest
-from parol6.protocol import wire
-
-
-def test_encode_move_joint_with_none():
- s = wire.encode_move_joint([1, 2, 3, 4, 5, 6], None, None)
- assert s == "MOVEJOINT|1|2|3|4|5|6|NONE|NONE"
-
-
-def test_encode_move_joint_with_values():
- s = wire.encode_move_joint([0, -10.5, 20, 30.25, -40, 50], 2.5, 75)
- assert s == "MOVEJOINT|0|-10.5|20|30.25|-40|50|2.5|75"
-
-
-def test_encode_move_pose():
- s = wire.encode_move_pose([1, 2, 3, 4, 5, 6], 1.0, None)
- assert s == "MOVEPOSE|1|2|3|4|5|6|1.0|NONE"
-
-
-def test_encode_move_cartesian():
- s = wire.encode_move_cartesian([10, 20, 30, 1, 2, 3], None, 50)
- assert s == "MOVECART|10|20|30|1|2|3|NONE|50"
-
-
-def test_encode_move_cartesian_rel_trf_variants():
- # Profile/tracking should be upper-cased and None becomes NONE
- s = wire.encode_move_cartesian_rel_trf(
- deltas=[1, 2, 3, 4, 5, 6],
- duration=None,
- speed=50,
- accel=100,
- profile="s_curve",
- tracking="queued",
- )
- assert s == "MOVECARTRELTRF|1|2|3|4|5|6|NONE|50|100|S_CURVE|QUEUED"
-
- s2 = wire.encode_move_cartesian_rel_trf(
- deltas=[0, 0, 0, 0, 0, 0],
- duration=2.0,
- speed=None,
- accel=None,
- profile=None,
- tracking=None,
- )
- assert s2 == "MOVECARTRELTRF|0|0|0|0|0|0|2.0|NONE|NONE|NONE|NONE"
-
-
-def test_encode_jog_joint():
- s = wire.encode_jog_joint(3, 80, 0.25, None)
- assert s == "JOG|3|80|0.25|NONE"
-
- s2 = wire.encode_jog_joint(0, 10, None, 5.5)
- assert s2 == "JOG|0|10|NONE|5.5"
-
-
-def test_encode_cart_jog():
- s = wire.encode_cart_jog("WRF", "X+", 50, 0.5)
- assert s == "CARTJOG|WRF|X+|50|0.5"
-
-
-def test_encode_gcode():
- s = wire.encode_gcode("G0 X0 Y0 Z0")
- assert s == "GCODE|G0 X0 Y0 Z0"
-
-
-def test_encode_gcode_program_inline():
- s = wire.encode_gcode_program_inline(["G21", "G90", "G0 X0 Y0", "G1 X10 F1000"])
- assert s == "GCODE_PROGRAM|INLINE|G21;G90;G0 X0 Y0;G1 X10 F1000"
-
-
-@pytest.mark.parametrize(
- "resp,prefix,expected",
- [
- ("ANGLES|0,1,2,3,4,5", "ANGLES", [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]),
- ("IO|1,0,1,0,1", "IO", [1, 0, 1, 0, 1]),
- ("GRIPPER|1,255,150,500,3,1", "GRIPPER", [1, 255, 150, 500, 3, 1]),
- ("SPEEDS|0,0.5,-1,2.5,3,4", "SPEEDS", [0.0, 0.5, -1.0, 2.5, 3.0, 4.0]),
- (
- "POSE|1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16",
- "POSE",
- [float(i) for i in range(1, 17)],
- ),
- ],
-)
-def test_decode_simple_success(resp, prefix, expected):
- out = wire.decode_simple(resp, prefix)
- assert out == expected
-
-
-@pytest.mark.parametrize(
- "resp,prefix",
- [
- ("ANGLES|a,b,c", "ANGLES"),
- ("IO|1,2,x", "IO"),
- ("WRONG|1,2,3", "ANGLES"),
- ("", "ANGLES"),
- (None, "ANGLES"),
- ],
-)
-def test_decode_simple_fail(resp, prefix):
- out = wire.decode_simple(resp, prefix)
- assert out is None
-
-
-def test_decode_status_success():
- resp = (
- "STATUS|"
- "POSE=1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16|"
- "ANGLES=0,10,20,30,40,50|"
- "IO=1,1,0,0,1|"
- "GRIPPER=1,20,150,500,3,1"
- )
- result = wire.decode_status(resp)
- assert result is not None
- assert isinstance(result, dict)
- assert isinstance(result.get("pose"), list)
- assert len(result["pose"]) == 16
- assert result["angles"] == [0.0, 10.0, 20.0, 30.0, 40.0, 50.0]
- assert result["io"] == [1, 1, 0, 0, 1]
- assert result["gripper"] == [1, 20, 150, 500, 3, 1]
-
-
-def test_decode_status_invalid_returns_none():
- assert wire.decode_status("STATUS|") is None
- assert wire.decode_status("") is None
- assert wire.decode_status("NOTSTATUS|whatever") is None
diff --git a/tests/unit/test_wire_actions.py b/tests/unit/test_wire_actions.py
deleted file mode 100644
index bf43772..0000000
--- a/tests/unit/test_wire_actions.py
+++ /dev/null
@@ -1,100 +0,0 @@
-"""
-Unit tests for wire protocol action field parsing.
-
-Tests that decode_status correctly parses ACTION_CURRENT, ACTION_STATE, and ACTION_NEXT fields.
-"""
-
-from parol6.protocol import wire
-
-
-def test_decode_status_with_action_fields():
- """Test that decode_status parses ACTION_* fields from status string."""
- # Build status string with action fields
- resp = (
- "STATUS|"
- "POSE=" + ",".join(str(i) for i in range(1, 17)) + "|"
- "ANGLES=0,10,20,30,40,50|"
- "IO=1,1,0,0,1|"
- "GRIPPER=1,20,150,500,3,1|"
- "ACTION_CURRENT=MovePoseCommand|"
- "ACTION_STATE=EXECUTING|"
- "ACTION_NEXT=HomeCommand"
- )
-
- result = wire.decode_status(resp)
-
- assert result is not None
- assert isinstance(result, dict)
-
- # Verify traditional fields still work
- assert len(result["pose"]) == 16
- assert result["angles"] == [0.0, 10.0, 20.0, 30.0, 40.0, 50.0]
- assert result["io"] == [1, 1, 0, 0, 1]
- assert result["gripper"] == [1, 20, 150, 500, 3, 1]
-
- # Verify new action fields
- assert result["action_current"] == "MovePoseCommand"
- assert result["action_state"] == "EXECUTING"
-
-
-def test_decode_status_with_empty_action_fields():
- """Test parsing when action fields are present but empty."""
- resp = (
- "STATUS|"
- "POSE=" + ",".join(str(i) for i in range(1, 17)) + "|"
- "ANGLES=0,0,0,0,0,0|"
- "IO=1,1,0,0,1|"
- "GRIPPER=0,0,0,0,0,0|"
- "ACTION_CURRENT=|"
- "ACTION_STATE=IDLE|"
- "ACTION_NEXT="
- )
-
- result = wire.decode_status(resp)
-
- assert result is not None
- assert result["action_current"] == ""
- assert result["action_state"] == "IDLE"
-
-
-def test_decode_status_backward_compatible_without_actions():
- """Test that status without ACTION_* fields still decodes (backward compat)."""
- # Old-style status without action fields
- resp = (
- "STATUS|"
- "POSE=" + ",".join(str(i) for i in range(1, 17)) + "|"
- "ANGLES=0,10,20,30,40,50|"
- "IO=1,1,0,0,1|"
- "GRIPPER=1,20,150,500,3,1"
- )
-
- result = wire.decode_status(resp)
-
- assert result is not None
- # Traditional fields should work
- assert result["angles"] == [0.0, 10.0, 20.0, 30.0, 40.0, 50.0]
-
- # Action fields should be None when not present
- assert result.get("action_current") is None
- assert result.get("action_state") is None
-
-
-def test_decode_status_with_various_action_states():
- """Test parsing with different action state values."""
- states = ["IDLE", "EXECUTING", "COMPLETED", "FAILED"]
-
- for state_value in states:
- resp = (
- "STATUS|"
- "POSE=" + ",".join("0" for _ in range(16)) + "|"
- "ANGLES=0,0,0,0,0,0|"
- "IO=1,1,0,0,1|"
- "GRIPPER=0,0,0,0,0,0|"
- f"ACTION_CURRENT=TestCommand|"
- f"ACTION_STATE={state_value}|"
- f"ACTION_NEXT=NextCommand"
- )
-
- result = wire.decode_status(resp)
- assert result is not None
- assert result["action_state"] == state_value
diff --git a/tests/unit/test_wrist_flip_velocity_limit.py b/tests/unit/test_wrist_flip_velocity_limit.py
new file mode 100644
index 0000000..5cbc518
--- /dev/null
+++ b/tests/unit/test_wrist_flip_velocity_limit.py
@@ -0,0 +1,72 @@
+"""Test _max_vel_ratio_jit velocity ratio computation.
+
+When IK crosses the wrist singularity (J5≈0), J4/J6 can jump ~90° in one tick.
+_max_vel_ratio_jit returns the ratio of the worst-case joint delta to its
+per-tick hardware velocity limit. A ratio >1.0 triggers CSE speed reduction.
+"""
+
+import numpy as np
+
+import parol6.PAROL6_ROBOT as PAROL6_ROBOT
+from parol6.commands.servo_commands import _max_vel_ratio_jit
+from parol6.config import INTERVAL_S, LIMITS
+
+_HARD_VEL = np.array(LIMITS.joint.hard.velocity, dtype=np.float64)
+_MAX_STEP_RAD = _HARD_VEL * INTERVAL_S
+
+
+def _home_rad() -> np.ndarray:
+ return np.deg2rad(
+ np.ascontiguousarray(PAROL6_ROBOT.joint.standby_deg, dtype=np.float64)
+ )
+
+
+class TestMaxVelRatioJit:
+ def test_large_jump_exceeds_limit(self) -> None:
+ home = _home_rad()
+ target = home.copy()
+ target[3] += np.deg2rad(90.0)
+
+ assert _max_vel_ratio_jit(target, home) > 1.0
+
+ def test_small_move_within_limit(self) -> None:
+ home = _home_rad()
+ target = home.copy()
+ target[0] += _MAX_STEP_RAD[0] * 0.5
+
+ assert _max_vel_ratio_jit(target, home) < 1.0
+
+ def test_exactly_at_limit(self) -> None:
+ home = _home_rad()
+ target = home.copy()
+ target[2] += _MAX_STEP_RAD[2] * 0.999
+
+ assert _max_vel_ratio_jit(target, home) < 1.0
+
+ def test_just_over_limit(self) -> None:
+ home = _home_rad()
+ target = home.copy()
+ target[2] += _MAX_STEP_RAD[2] * 1.001
+
+ ratio = _max_vel_ratio_jit(target, home)
+ assert ratio > 1.0
+ assert abs(ratio - 1.001) < 0.01
+
+ def test_multi_joint_returns_worst_case(self) -> None:
+ """Ratio reflects the worst-case joint, not the sum."""
+ home = _home_rad()
+ target = home.copy()
+ target[0] += _MAX_STEP_RAD[0] * 0.5 # ratio 0.5
+ target[3] += np.deg2rad(90.0) # ratio >> 1
+ target[5] -= np.deg2rad(45.0) # ratio >> 1
+
+ ratio = _max_vel_ratio_jit(target, home)
+ assert ratio > 1.0
+ # Should be driven by whichever of J4/J6 has the higher ratio
+ expected_j4 = np.deg2rad(90.0) / _MAX_STEP_RAD[3]
+ expected_j6 = np.deg2rad(45.0) / _MAX_STEP_RAD[5]
+ assert abs(ratio - max(expected_j4, expected_j6)) < 0.1
+
+ def test_identical_positions_zero_ratio(self) -> None:
+ home = _home_rad()
+ assert _max_vel_ratio_jit(home, home) == 0.0
diff --git a/tests/utils/__init__.py b/tests/utils/__init__.py
deleted file mode 100644
index 7cc9d38..0000000
--- a/tests/utils/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-"""
-Test utilities package.
-
-Provides helper functions and classes for testing the PAROL6 Python API.
-"""
-
-from .process import HeadlessCommanderProc, find_available_ports, wait_for_completion
-
-__all__ = [
- "HeadlessCommanderProc",
- "wait_for_completion",
- "find_available_ports",
-]
diff --git a/tests/utils/process.py b/tests/utils/process.py
deleted file mode 100644
index 546636d..0000000
--- a/tests/utils/process.py
+++ /dev/null
@@ -1,329 +0,0 @@
-"""
-Process management utilities for testing.
-
-Provides classes and functions to manage the controller.py subprocess
-during integration tests, including startup, readiness checks, and cleanup.
-"""
-
-import logging
-import os
-import socket
-import subprocess
-import sys
-import threading
-import time
-from typing import Any
-
-logger = logging.getLogger(__name__)
-
-
-class HeadlessCommanderProc:
- """
- Manages a controller.py subprocess for integration testing.
-
- Handles starting, stopping, and checking readiness of the commander process
- with configurable ports and environment variables.
- """
-
- def __init__(
- self,
- ip: str = "127.0.0.1",
- port: int = 5001,
- ack_port: int = 5002,
- env: dict[str, str] | None = None,
- ):
- """
- Initialize the process manager.
-
- Args:
- ip: IP address for the server to bind to
- port: UDP port for command reception
- ack_port: UDP port for acknowledgment sending
- env: Additional environment variables to set
- """
- self.ip = ip
- self.port = port
- self.ack_port = ack_port
- self.env = env or {}
-
- self.process: subprocess.Popen | None = None
- self._output_lines: list[str] = []
- self._error_lines: list[str] = []
- self._output_thread: threading.Thread | None = None
- self._error_thread: threading.Thread | None = None
-
- def start(self, log_level: str = "WARNING", timeout: float = 10.0) -> bool:
- """
- Start the headless commander process.
-
- Args:
- log_level: Logging level for the subprocess
- timeout: Maximum time to wait for process startup
-
- Returns:
- True if started successfully, False otherwise
- """
- if self.process and self.process.poll() is None:
- logger.warning("Process already running")
- return True
-
- # Prepare environment
- process_env = os.environ.copy()
- process_env.update(
- {
- "PAROL6_FAKE_SERIAL": "1", # Enable fake serial simulation
- "PAROL6_NOAUTOHOME": "1", # Disable auto-homing for tests
- "PAROL6_SERVER_IP": self.ip,
- "PAROL6_SERVER_PORT": str(self.port),
- "PAROL6_ACK_PORT": str(self.ack_port),
- }
- )
- process_env.update(self.env)
-
- # Find the controller.py script
- script_path = os.path.join(
- os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "controller.py"
- )
-
- if not os.path.exists(script_path):
- logger.error(f"controller.py not found at {script_path}")
- return False
-
- # Prepare command
- cmd = [sys.executable, script_path, "--log-level", log_level, "--no-auto-home"]
-
- try:
- logger.info(f"Starting headless commander: {' '.join(cmd)}")
- logger.debug(
- f"Environment: FAKE_SERIAL=1, NOAUTOHOME=1, IP={self.ip}, PORT={self.port}, ACK_PORT={self.ack_port}"
- )
-
- self.process = subprocess.Popen(
- cmd,
- env=process_env,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- text=True,
- bufsize=1, # Line buffered
- universal_newlines=True,
- )
-
- # Start output capture threads
- self._start_output_capture()
-
- # Wait for process to be ready
- if self.wait_ready(timeout):
- logger.info(
- f"Headless commander started successfully (PID: {self.process.pid})"
- )
- return True
- else:
- logger.error("Process failed to become ready within timeout")
- self.stop()
- return False
-
- except Exception as e:
- logger.error(f"Failed to start process: {e}")
- if self.process:
- self.process.terminate()
- self.process = None
- return False
-
- def _start_output_capture(self):
- """Start threads to capture stdout and stderr."""
- if not self.process:
- return
-
- def capture_output(stream, output_list):
- try:
- for line in iter(stream.readline, ""):
- if line:
- line = line.rstrip("\n\r")
- output_list.append(line)
- # Also log to our test logger for debugging
- logger.debug(f"PROC: {line}")
- except Exception as e:
- logger.debug(f"Output capture error: {e}")
-
- self._output_thread = threading.Thread(
- target=capture_output,
- args=(self.process.stdout, self._output_lines),
- daemon=True,
- )
- self._error_thread = threading.Thread(
- target=capture_output,
- args=(self.process.stderr, self._error_lines),
- daemon=True,
- )
-
- self._output_thread.start()
- self._error_thread.start()
-
- def wait_ready(self, timeout: float = 10.0) -> bool:
- """
- Wait for the process to be ready by sending PING commands.
-
- Args:
- timeout: Maximum time to wait in seconds
-
- Returns:
- True if process responds to PING, False otherwise
- """
- start_time = time.time()
-
- while time.time() - start_time < timeout:
- if self.process and self.process.poll() is not None:
- # Process died
- logger.error("Process terminated during startup")
- return False
-
- # Try to ping the server
- try:
- with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
- sock.settimeout(1.0)
- sock.sendto(b"PING", (self.ip, self.port))
-
- try:
- data, _ = sock.recvfrom(1024)
- if data.decode("utf-8").strip() == "PONG":
- return True
- except TimeoutError:
- pass # No response yet
-
- except Exception as e:
- logger.debug(f"Ping attempt failed: {e}")
-
- time.sleep(0.5) # Wait before retry
-
- return False
-
- def stop(self) -> bool:
- """
- Stop the headless commander process.
-
- Returns:
- True if stopped successfully, False otherwise
- """
- if not self.process:
- return True
-
- try:
- # Try graceful shutdown first
- logger.debug("Attempting graceful shutdown...")
- self.process.terminate()
-
- try:
- self.process.wait(timeout=5.0)
- logger.info(
- f"Process terminated gracefully (exit code: {self.process.returncode})"
- )
- except subprocess.TimeoutExpired:
- logger.warning(
- "Process did not terminate gracefully, forcing shutdown..."
- )
- self.process.kill()
- self.process.wait()
- logger.info(f"Process killed (exit code: {self.process.returncode})")
-
- except Exception as e:
- logger.error(f"Error stopping process: {e}")
- return False
- finally:
- self.process = None
-
- return True
-
- def is_running(self) -> bool:
- """Check if the process is currently running."""
- return self.process is not None and self.process.poll() is None
-
- def get_output_lines(self) -> list[str]:
- """Get captured stdout lines."""
- return self._output_lines.copy()
-
- def get_error_lines(self) -> list[str]:
- """Get captured stderr lines."""
- return self._error_lines.copy()
-
- def clear_output(self):
- """Clear captured output lines."""
- self._output_lines.clear()
- self._error_lines.clear()
-
-
-def wait_for_completion(result_or_id: Any, timeout: float = 10.0) -> dict[str, Any]:
- """
- Unified waiting logic for acknowledgment-tracked results in tests.
-
- Handles both direct result dictionaries and command IDs that need polling.
-
- Args:
- result_or_id: Either a result dict with status info, or a command ID string
- timeout: Maximum time to wait for completion
-
- Returns:
- Dictionary with status information
- """
- if isinstance(result_or_id, dict):
- # Already a result dictionary
- return result_or_id
-
- # If it's a string, it might be a command ID - for now just return a placeholder
- # In a real implementation, this would poll the robot_api for status
- return {
- "status": "TIMEOUT",
- "details": "wait_for_completion not fully implemented for command IDs",
- "completed": True,
- "command_id": result_or_id,
- }
-
-
-def find_available_ports(start_port: int = 5001, count: int = 2) -> list[int]:
- """
- Find available UDP ports starting from the given port.
-
- Args:
- start_port: Port to start searching from
- count: Number of consecutive ports needed
-
- Returns:
- List of available port numbers
- """
- available_ports: list[int] = []
- current_port = start_port
-
- while len(available_ports) < count:
- try:
- with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
- sock.bind(("127.0.0.1", current_port))
- available_ports.append(current_port)
- except OSError:
- # Port in use, reset search if we were building a consecutive sequence
- available_ports.clear()
-
- current_port += 1
-
- # Prevent infinite loop
- if current_port > start_port + 1000:
- break
-
- return available_ports
-
-
-def check_port_available(port: int, host: str = "127.0.0.1") -> bool:
- """
- Check if a UDP port is available.
-
- Args:
- port: Port number to check
- host: Host address to check
-
- Returns:
- True if port is available, False otherwise
- """
- try:
- with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
- sock.bind((host, port))
- return True
- except OSError:
- return False
diff --git a/tests/utils/udp.py b/tests/utils/udp.py
deleted file mode 100644
index a408a8b..0000000
--- a/tests/utils/udp.py
+++ /dev/null
@@ -1,379 +0,0 @@
-"""
-UDP utilities for testing.
-
-Provides helper functions for UDP communication during tests,
-including acknowledgment listening and command sending utilities.
-"""
-
-import logging
-import queue
-import socket
-import threading
-import time
-from typing import Any
-
-logger = logging.getLogger(__name__)
-
-
-class UDPClient:
- """
- Simple UDP client for sending commands to the robot server during tests.
- """
-
- def __init__(self, server_ip: str = "127.0.0.1", server_port: int = 5001):
- """
- Initialize UDP client.
-
- Args:
- server_ip: IP address of the robot server
- server_port: Port of the robot server
- """
- self.server_ip = server_ip
- self.server_port = server_port
-
- def send_command(self, command: str, timeout: float = 2.0) -> str | None:
- """
- Send a command and wait for immediate response (for GET commands).
-
- Args:
- command: Command string to send
- timeout: Timeout for waiting for response
-
- Returns:
- Response string if received, None otherwise
- """
- try:
- with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
- sock.settimeout(timeout)
-
- # Send command
- sock.sendto(command.encode("utf-8"), (self.server_ip, self.server_port))
- logger.debug(f"Sent command: {command}")
-
- # Wait for response (for GET commands)
- try:
- data, _ = sock.recvfrom(2048)
- response = data.decode("utf-8")
- logger.debug(f"Received response: {response}")
- return response
- except TimeoutError:
- logger.debug(f"No response received for command: {command}")
- return None
-
- except Exception as e:
- logger.error(f"Error sending command '{command}': {e}")
- return None
-
- def send_command_no_response(self, command: str) -> bool:
- """
- Send a command without waiting for response (for motion commands).
-
- Args:
- command: Command string to send
-
- Returns:
- True if sent successfully, False otherwise
- """
- try:
- with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
- sock.sendto(command.encode("utf-8"), (self.server_ip, self.server_port))
- logger.debug(f"Sent command (no response): {command}")
- return True
- except Exception as e:
- logger.error(f"Error sending command '{command}': {e}")
- return False
-
-
-class AckListener:
- """
- Listens for UDP acknowledgment messages during tests.
-
- Provides functionality to capture and wait for specific acknowledgments
- from the robot controller during command execution.
- """
-
- def __init__(self, listen_port: int = 5002):
- """
- Initialize acknowledgment listener.
-
- Args:
- listen_port: Port to listen on for acknowledgments
- """
- self.listen_port = listen_port
- self.socket: socket.socket | None = None
- self.thread: threading.Thread | None = None
- self.running = False
-
- # Storage for received acknowledgments
- self.ack_queue: queue.Queue[dict[str, Any]] = queue.Queue()
- self.ack_history: list[dict[str, Any]] = []
-
- def start(self) -> bool:
- """
- Start listening for acknowledgments.
-
- Returns:
- True if started successfully, False otherwise
- """
- if self.running:
- logger.warning("AckListener already running")
- return True
-
- try:
- self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- self.socket.bind(("127.0.0.1", self.listen_port))
- self.socket.settimeout(0.1) # Short timeout for non-blocking operation
-
- self.running = True
- self.thread = threading.Thread(target=self._listen_loop, daemon=True)
- self.thread.start()
-
- logger.debug(f"AckListener started on port {self.listen_port}")
- return True
-
- except Exception as e:
- logger.error(f"Failed to start AckListener: {e}")
- self.stop()
- return False
-
- def stop(self):
- """Stop listening for acknowledgments."""
- self.running = False
-
- if self.thread:
- self.thread.join(timeout=1.0)
- self.thread = None
-
- if self.socket:
- self.socket.close()
- self.socket = None
-
- logger.debug("AckListener stopped")
-
- def _listen_loop(self):
- """Main listening loop (runs in separate thread)."""
- while self.running and self.socket:
- try:
- data, addr = self.socket.recvfrom(1024)
- message = data.decode("utf-8")
-
- # Parse acknowledgment message: ACK|cmd_id|status|details
- parts = message.split("|", 3)
- if parts[0] == "ACK" and len(parts) >= 3:
- ack_info = {
- "cmd_id": parts[1],
- "status": parts[2],
- "details": parts[3] if len(parts) > 3 else "",
- "timestamp": time.time(),
- "sender": addr,
- }
-
- # Add to both queue and history
- self.ack_queue.put(ack_info)
- self.ack_history.append(ack_info)
-
- logger.debug(f"Received ACK: {ack_info}")
-
- except TimeoutError:
- continue # Normal timeout, keep listening
- except Exception as e:
- if self.running: # Only log if we should still be running
- logger.debug(f"Listen loop error: {e}")
-
- def wait_for_ack(
- self, cmd_id: str, timeout: float = 5.0, expected_status: str | None = None
- ) -> dict[str, Any] | None:
- """
- Wait for a specific acknowledgment.
-
- Args:
- cmd_id: Command ID to wait for
- timeout: Maximum time to wait
- expected_status: Specific status to wait for (optional)
-
- Returns:
- Acknowledgment info dict if received, None if timeout
- """
- start_time = time.time()
-
- while time.time() - start_time < timeout:
- try:
- # Check queue with short timeout
- ack_info = self.ack_queue.get(timeout=0.1)
-
- if ack_info["cmd_id"] == cmd_id:
- if expected_status is None or ack_info["status"] == expected_status:
- return ack_info
- else:
- # Put back in queue if status doesn't match
- self.ack_queue.put(ack_info)
- else:
- # Put back in queue if cmd_id doesn't match
- self.ack_queue.put(ack_info)
-
- except queue.Empty:
- continue
-
- logger.debug(
- f"Timeout waiting for ACK: cmd_id={cmd_id}, expected_status={expected_status}"
- )
- return None
-
- def get_all_acks_for_command(self, cmd_id: str) -> list[dict[str, Any]]:
- """
- Get all acknowledgments received for a specific command ID.
-
- Args:
- cmd_id: Command ID to search for
-
- Returns:
- List of acknowledgment info dicts
- """
- return [ack for ack in self.ack_history if ack["cmd_id"] == cmd_id]
-
- def get_recent_acks(self, count: int = 10) -> list[dict[str, Any]]:
- """
- Get the most recent acknowledgments.
-
- Args:
- count: Number of recent acknowledgments to return
-
- Returns:
- List of recent acknowledgment info dicts
- """
- return self.ack_history[-count:] if self.ack_history else []
-
- def clear_history(self):
- """Clear acknowledgment history and queue."""
- self.ack_history.clear()
- while not self.ack_queue.empty():
- try:
- self.ack_queue.get_nowait()
- except queue.Empty:
- break
-
-
-def send_command_with_ack(
- command: str,
- server_ip: str = "127.0.0.1",
- server_port: int = 5001,
- ack_port: int = 5002,
- timeout: float = 5.0,
-) -> tuple[str | None, dict[str, Any] | None]:
- """
- Send a command with acknowledgment tracking.
-
- This is a convenience function that sets up an acknowledgment listener,
- sends a command, and waits for the acknowledgment.
-
- Args:
- command: Command to send
- server_ip: Server IP address
- server_port: Server command port
- ack_port: Acknowledgment port
- timeout: Timeout for acknowledgment
-
- Returns:
- Tuple of (immediate_response, final_ack_info)
- """
- # Set up acknowledgment listener
- ack_listener = AckListener(ack_port)
- if not ack_listener.start():
- return None, None
-
- try:
- # Send command
- client = UDPClient(server_ip, server_port)
- response = client.send_command(command, timeout=1.0)
-
- # For tracked commands, the response might be empty and we need to wait for ACK
- # For immediate commands (GET_*), we get response right away
- if response and not response.startswith("ACK"):
- # Got immediate response, no ACK expected
- return response, None
-
- # Wait for acknowledgment (extract command ID if present)
- # This is a simplified version - in practice, you'd extract the actual command ID
- # from the command string if it contains one
- parts = command.split("|", 1)
- if (
- len(parts) > 1
- and len(parts[0]) == 8
- and parts[0].replace("-", "").isalnum()
- ):
- cmd_id = parts[0]
- ack_info = ack_listener.wait_for_ack(cmd_id, timeout)
- return response, ack_info
- else:
- # No command ID in command, can't track ACK
- return response, None
-
- finally:
- ack_listener.stop()
-
-
-def create_tracked_command(base_command: str, cmd_id: str | None = None) -> str:
- """
- Create a command with tracking ID.
-
- Args:
- base_command: Base command string
- cmd_id: Command ID to use (generates one if None)
-
- Returns:
- Command string with tracking ID prepended
- """
- import uuid
-
- if cmd_id is None:
- cmd_id = str(uuid.uuid4())[:8]
-
- return f"{cmd_id}|{base_command}"
-
-
-def parse_server_response(response: str) -> dict[str, Any]:
- """
- Parse a server response into a structured format.
-
- Args:
- response: Raw response string from server
-
- Returns:
- Dictionary with parsed response data
- """
- if not response:
- return {"type": "empty", "data": None}
-
- parts = response.split("|", 1)
- response_type = parts[0]
-
- parsed: dict[str, Any] = {"type": response_type, "raw": response}
-
- if len(parts) > 1:
- data = parts[1]
-
- if response_type in ["POSE", "ANGLES", "SPEEDS"]:
- # Numeric array data
- try:
- parsed["data"] = [float(x) for x in data.split(",")]
- except ValueError:
- parsed["data"] = data
- elif response_type in ["IO", "GRIPPER"]:
- # Integer array data
- try:
- parsed["data"] = [int(x) for x in data.split(",")]
- except ValueError:
- parsed["data"] = data
- elif response_type == "STATUS":
- # Complex status data
- parsed["data"] = {}
- for item in data.split("|"):
- if "=" in item:
- key, value = item.split("=", 1)
- parsed["data"][key] = value
- else:
- parsed["data"] = data
- else:
- parsed["data"] = None
-
- return parsed