From 14364b15902d581931c4a99a72a49541cfde1e58 Mon Sep 17 00:00:00 2001 From: MaximEdogawa Date: Tue, 14 Apr 2026 20:49:14 +0200 Subject: [PATCH 1/8] refactor: streamline tool image management and enhance catalog structure - Removed the `ghcr-verify.yml` workflow and associated Dockerfile as part of the cleanup. - Updated `detect-matrix.sh` to compare tool blobs instead of versions for more accurate change detection. - Enhanced `read-tool-manifest.sh` to support both npm and PyPI package definitions, ensuring tools can specify dependencies correctly. - Introduced new Dockerfiles for various tools, allowing for better integration with upstream packages from npm and PyPI. - Updated `mcp-tools.json` and `tools.json` to reflect new tools and their configurations, including version bumps and additional metadata. - Added a script to build local images for tools, improving the development workflow. --- .github/ghcr-verify-context/Dockerfile | 3 - .../scripts/tools-publish/detect-matrix.sh | 8 +- .../tools-publish/read-tool-manifest.sh | 38 +++- .../scripts/tools-publish/smoke-test-mcp.sh | 11 +- .github/workflows/ghcr-verify.yml | 67 ------ .github/workflows/tools-publish.yml | 9 +- src-tauri/src/modules/tool_engine/service.rs | 48 ++++- src-tauri/src/modules/tool_engine/tools.json | 200 +++++++++++++++++- src-tauri/src/modules/tool_engine/types.rs | 14 ++ src/shared/ui/SpecMockup.tsx | 2 +- tools/build-local-images.sh | 54 +++++ tools/fetch/Dockerfile | 11 + tools/git/Dockerfile | 15 ++ tools/mcp-tools.json | 200 +++++++++++++++++- tools/memory/Dockerfile | 13 ++ tools/sequential-thinking/Dockerfile | 13 ++ tools/time/Dockerfile | 11 + tools/update-upstream.sh | 137 +++++++----- 18 files changed, 703 insertions(+), 151 deletions(-) delete mode 100644 .github/ghcr-verify-context/Dockerfile delete mode 100644 .github/workflows/ghcr-verify.yml create mode 100755 tools/build-local-images.sh create mode 100644 tools/fetch/Dockerfile create mode 100644 tools/git/Dockerfile create mode 100644 tools/memory/Dockerfile create mode 100644 tools/sequential-thinking/Dockerfile create mode 100644 tools/time/Dockerfile diff --git a/.github/ghcr-verify-context/Dockerfile b/.github/ghcr-verify-context/Dockerfile deleted file mode 100644 index 0eef1a6..0000000 --- a/.github/ghcr-verify-context/Dockerfile +++ /dev/null @@ -1,3 +0,0 @@ -# Minimal image to verify Actions can push to ghcr.io/pengine-ai/... -FROM scratch -COPY README.md /README.md diff --git a/.github/scripts/tools-publish/detect-matrix.sh b/.github/scripts/tools-publish/detect-matrix.sh index c187956..7e4f932 100755 --- a/.github/scripts/tools-publish/detect-matrix.sh +++ b/.github/scripts/tools-publish/detect-matrix.sh @@ -27,10 +27,10 @@ else continue fi if echo "$changed" | grep -q "^tools/mcp-tools.json$"; then - old_ver=$(git show HEAD~1:tools/mcp-tools.json 2>/dev/null \ - | jq -r --arg s "$s" '.tools[] | select(.id | endswith("/" + $s)) | .current // ""' 2>/dev/null || echo "") - new_ver=$(jq -r --arg s "$s" '.tools[] | select(.id | endswith("/" + $s)) | .current' "$REGISTRY") - if [[ -n "$old_ver" && "$old_ver" != "$new_ver" ]]; then + old_blob=$(git show HEAD~1:tools/mcp-tools.json 2>/dev/null \ + | jq -c --arg s "$s" '.tools[]? | select(.id | endswith("/" + $s))' 2>/dev/null || echo "") + new_blob=$(jq -c --arg s "$s" '.tools[] | select(.id | endswith("/" + $s))' "$REGISTRY") + if [[ "$old_blob" != "$new_blob" ]]; then slugs="$slugs $s" fi fi diff --git a/.github/scripts/tools-publish/read-tool-manifest.sh b/.github/scripts/tools-publish/read-tool-manifest.sh index 654c1ff..5edf506 100755 --- a/.github/scripts/tools-publish/read-tool-manifest.sh +++ b/.github/scripts/tools-publish/read-tool-manifest.sh @@ -1,5 +1,6 @@ #!/usr/bin/env bash -# Writes image, version, npm_pkg, npm_ver to GITHUB_OUTPUT for one tool slug. +# Writes image, version, and multiline build_args to GITHUB_OUTPUT for one tool slug. +# Each tool must define either upstream_mcp_npm or upstream_mcp_pypi (not both). # Usage: TOOL_SLUG=file-manager (env) or first argument. set -euo pipefail @@ -15,11 +16,34 @@ VERSION=$(jq -r --arg s "$SUFFIX" '.tools[] | select(.id | endswith("/" + $s)) | echo "image=$IMAGE" >> "$GITHUB_OUTPUT" echo "version=$VERSION" >> "$GITHUB_OUTPUT" -PKG=$(jq -r --arg s "$SUFFIX" '.tools[] | select(.id | endswith("/" + $s)) | .upstream_mcp_npm.package // ""' "$REGISTRY") -NPM_VER=$(jq -r --arg s "$SUFFIX" '.tools[] | select(.id | endswith("/" + $s)) | .upstream_mcp_npm.version // ""' "$REGISTRY") -if [[ -z "$PKG" || -z "$NPM_VER" ]]; then - echo "::error::${REGISTRY}: tool '${SUFFIX}' must define non-empty upstream_mcp_npm.package and upstream_mcp_npm.version" >&2 +npm_pkg=$(jq -r --arg s "$SUFFIX" '.tools[] | select(.id | endswith("/" + $s)) | .upstream_mcp_npm.package // ""' "$REGISTRY") +npm_ver=$(jq -r --arg s "$SUFFIX" '.tools[] | select(.id | endswith("/" + $s)) | .upstream_mcp_npm.version // ""' "$REGISTRY") +pypi_pkg=$(jq -r --arg s "$SUFFIX" '.tools[] | select(.id | endswith("/" + $s)) | .upstream_mcp_pypi.package // ""' "$REGISTRY") +pypi_ver=$(jq -r --arg s "$SUFFIX" '.tools[] | select(.id | endswith("/" + $s)) | .upstream_mcp_pypi.version // ""' "$REGISTRY") + +has_npm=0 +[[ -n "$npm_pkg" && -n "$npm_ver" ]] && has_npm=1 +has_pypi=0 +[[ -n "$pypi_pkg" && -n "$pypi_ver" ]] && has_pypi=1 + +if [[ "$has_npm" -eq 1 && "$has_pypi" -eq 1 ]]; then + echo "::error::${REGISTRY}: tool '${SUFFIX}' must not set both upstream_mcp_npm and upstream_mcp_pypi" >&2 exit 1 fi -echo "npm_pkg=$PKG" >> "$GITHUB_OUTPUT" -echo "npm_ver=$NPM_VER" >> "$GITHUB_OUTPUT" +if [[ "$has_npm" -eq 0 && "$has_pypi" -eq 0 ]]; then + echo "::error::${REGISTRY}: tool '${SUFFIX}' must define upstream_mcp_npm or upstream_mcp_pypi" >&2 + exit 1 +fi + +{ + echo 'build_args<> "$GITHUB_OUTPUT" diff --git a/.github/scripts/tools-publish/smoke-test-mcp.sh b/.github/scripts/tools-publish/smoke-test-mcp.sh index 9286c74..dc70ad0 100755 --- a/.github/scripts/tools-publish/smoke-test-mcp.sh +++ b/.github/scripts/tools-publish/smoke-test-mcp.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # Pull image by digest and run one MCP initialize JSON-RPC round-trip. -# Env: IMAGE_WITH_DIGEST (e.g. ghcr.io/org/img@sha256:...). +# Env: IMAGE_WITH_DIGEST (e.g. ghcr.io/org/img@sha256:...). Optional TOOL_SLUG for argv quirks. set -euo pipefail if [[ -z "${IMAGE_WITH_DIGEST:-}" ]]; then @@ -9,8 +9,15 @@ if [[ -z "${IMAGE_WITH_DIGEST:-}" ]]; then fi docker pull "$IMAGE_WITH_DIGEST" + +# Filesystem MCP expects at least one allowed root on argv; others ignore extra args. +extra=() +if [[ "${TOOL_SLUG:-}" == "file-manager" ]]; then + extra=(/tmp) +fi + RESP=$(echo '{"jsonrpc":"2.0","id":1,"method":"initialize","params":{"protocolVersion":"2024-11-05","capabilities":{},"clientInfo":{"name":"smoke","version":"0.0.1"}}}' \ - | timeout 15 docker run --rm -i --network=none "$IMAGE_WITH_DIGEST" /tmp \ + | timeout 15 docker run --rm -i --network=none "$IMAGE_WITH_DIGEST" "${extra[@]}" \ | head -1) echo "$RESP" | jq -e '.result.serverInfo' > /dev/null \ || { echo "::error::MCP init failed: $RESP"; exit 1; } diff --git a/.github/workflows/ghcr-verify.yml b/.github/workflows/ghcr-verify.yml deleted file mode 100644 index 0d87bc4..0000000 --- a/.github/workflows/ghcr-verify.yml +++ /dev/null @@ -1,67 +0,0 @@ -name: Verify GHCR login - -# Manual-only: push a tiny image to ghcr.io/... to debug GITHUB_TOKEN + GHCR. - -on: - workflow_dispatch: - inputs: - image: - description: 'Full image name without tag (e.g. ghcr.io/pengine-ai/pengine-file-manager)' - required: false - default: 'ghcr.io/pengine-ai/pengine-ghcr-verify' - -permissions: - contents: read - packages: write - -jobs: - verify: - runs-on: ubuntu-latest - env: - # workflow_dispatch input can be cleared in UI; fall back to default. - VERIFY_IMAGE: ${{ github.event.inputs.image || 'ghcr.io/pengine-ai/pengine-ghcr-verify' }} - steps: - - uses: actions/checkout@v4 - - - uses: docker/setup-buildx-action@v3 - - - name: Log in to GHCR - uses: docker/login-action@v3 - with: - registry: ghcr.io - username: ${{ github.repository_owner }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Push probe image - id: build - uses: docker/build-push-action@v6 - with: - context: .github/ghcr-verify-context - file: .github/ghcr-verify-context/Dockerfile - platforms: linux/amd64 - push: true - provenance: false - sbom: false - tags: ${{ env.VERIFY_IMAGE }}:verify-${{ github.run_id }} - - - name: Pull probe (read check) - env: - REF: ${{ env.VERIFY_IMAGE }}@${{ steps.build.outputs.digest }} - run: | - set -euo pipefail - docker pull "$REF" - - - name: Summary - env: - REF: ${{ env.VERIFY_IMAGE }}@${{ steps.build.outputs.digest }} - TAG: ${{ env.VERIFY_IMAGE }}:verify-${{ github.run_id }} - run: | - { - echo '### GHCR verify' - echo "Push succeeded for **\`$TAG\`** (digest below)." - echo "Read check: \`docker pull\` by digest succeeded." - echo "" - echo "**Digest:** \`$REF\`" - echo "" - echo 'You can delete the probe tag in **Packages** when finished.' - } >> "$GITHUB_STEP_SUMMARY" diff --git a/.github/workflows/tools-publish.yml b/.github/workflows/tools-publish.yml index e23a503..d315aeb 100644 --- a/.github/workflows/tools-publish.yml +++ b/.github/workflows/tools-publish.yml @@ -81,9 +81,7 @@ jobs: push: true provenance: false sbom: false - build-args: | - UPSTREAM_MCP_NPM_PACKAGE=${{ steps.cfg.outputs.npm_pkg }} - UPSTREAM_MCP_NPM_VERSION=${{ steps.cfg.outputs.npm_ver }} + build-args: ${{ steps.cfg.outputs.build_args }} tags: ${{ steps.cfg.outputs.image }}:${{ steps.cfg.outputs.version }}-ci-arm64-${{ github.run_id }} - name: Build and push (linux/amd64) @@ -95,9 +93,7 @@ jobs: push: true provenance: false sbom: false - build-args: | - UPSTREAM_MCP_NPM_PACKAGE=${{ steps.cfg.outputs.npm_pkg }} - UPSTREAM_MCP_NPM_VERSION=${{ steps.cfg.outputs.npm_ver }} + build-args: ${{ steps.cfg.outputs.build_args }} tags: ${{ steps.cfg.outputs.image }}:${{ steps.cfg.outputs.version }}-ci-amd64-${{ github.run_id }} - name: Merge multi-arch manifest @@ -117,6 +113,7 @@ jobs: - name: Smoke test (MCP init handshake) env: IMAGE_WITH_DIGEST: ${{ steps.cfg.outputs.image }}@${{ steps.merge.outputs.digest }} + TOOL_SLUG: ${{ matrix.slug }} run: bash .github/scripts/tools-publish/smoke-test-mcp.sh - name: Summary diff --git a/src-tauri/src/modules/tool_engine/service.rs b/src-tauri/src/modules/tool_engine/service.rs index 009f8e8..4ffbe7a 100644 --- a/src-tauri/src/modules/tool_engine/service.rs +++ b/src-tauri/src/modules/tool_engine/service.rs @@ -3,7 +3,7 @@ use super::types::{ToolCatalog, ToolEntry, VersionEntry}; use crate::modules::mcp::service as mcp_service; use crate::modules::mcp::types::{CustomToolEntry, McpConfig, ServerEntry}; use std::collections::{HashMap, HashSet}; -use std::path::Path; +use std::path::{Path, PathBuf}; use std::process::Stdio; use tokio::io::{AsyncBufReadExt, BufReader}; @@ -41,9 +41,46 @@ pub fn load_embedded_catalog() -> Result { serde_json::from_str(EMBEDDED_CATALOG).map_err(|e| format!("parse embedded tools.json: {e}")) } -/// Fetch the remote catalog from GitHub, falling back to the embedded catalog -/// on network errors, timeouts, or parse failures. +/// Try repo `tools/mcp-tools.json` before the remote catalog (used by `bun run tauri dev` and +/// any run where the file exists next to the workspace). Release builds from CI point at paths +/// that do not exist on end-user machines, so this safely no-ops there. +fn try_load_local_tools_catalog() -> Option { + let mut paths: Vec = Vec::new(); + paths.push(PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../tools/mcp-tools.json")); + if let Ok(mut cwd) = std::env::current_dir() { + for _ in 0..8 { + paths.push(cwd.join("tools/mcp-tools.json")); + if !cwd.pop() { + break; + } + } + } + for p in paths { + if let Ok(json) = std::fs::read_to_string(&p) { + if let Some(cat) = parse_catalog(&json) { + log::info!("loaded tool catalog from {}", p.display()); + return Some(cat); + } + log::warn!( + "found {} but it did not parse as catalog schema v1", + p.display() + ); + } + } + None +} + +/// Resolve the tool catalog: prefer repo `tools/mcp-tools.json` when present, then remote, +/// then embedded fallback. pub async fn load_catalog() -> Result { + if let Some(cat) = try_load_local_tools_catalog() { + log::info!( + "using local tools/mcp-tools.json (revision {}); remote fetch skipped", + cat.catalog_revision + ); + return Ok(cat); + } + match fetch_remote_catalog().await { Ok(cat) => { log::info!("using remote catalog (revision {})", cat.catalog_revision); @@ -139,11 +176,14 @@ pub fn podman_run_argv_for_tool( "run".into(), "--rm".into(), "-i".into(), - "--network=none".into(), format!("--cpus={}", entry.limits.cpus), format!("--memory={}", entry.limits.memory), ]; + if entry.network_isolated { + args.insert(3, "--network=none".into()); + } + if entry.container_read_only_rootfs { args.push("--read-only".into()); } diff --git a/src-tauri/src/modules/tool_engine/tools.json b/src-tauri/src/modules/tool_engine/tools.json index d6ee129..cb038bf 100644 --- a/src-tauri/src/modules/tool_engine/tools.json +++ b/src-tauri/src/modules/tool_engine/tools.json @@ -1,8 +1,8 @@ { "schema_version": 1, - "generated_at": "2026-04-13T00:00:00Z", - "catalog_revision": 2, - "valid_until": "2026-05-13T00:00:00Z", + "generated_at": "2026-04-14T00:00:00Z", + "catalog_revision": 3, + "valid_until": "2026-05-14T00:00:00Z", "minimum_pengine_version": "0.5.0", "tools": [ { @@ -51,6 +51,200 @@ "memory": "256m", "timeout_secs": 30 } + }, + { + "id": "pengine/fetch", + "name": "Fetch", + "description": "Official MCP fetch server: download URLs and return markdown-friendly content for the model.", + "image": "ghcr.io/pengine-ai/pengine-fetch", + "current": "0.1.0", + "versions": [ + { + "version": "0.1.0", + "digest": "sha256:placeholder", + "released_at": "2026-04-14T00:00:00Z", + "yanked": false, + "revoked": false, + "security": false + } + ], + "container_read_only_rootfs": true, + "mount_read_only": true, + "mount_workspace": false, + "append_workspace_roots": false, + "direct_return": true, + "network_isolated": false, + "upstream_mcp_pypi": { + "package": "mcp-server-fetch", + "version": "2025.4.7" + }, + "mcp_server_cmd": [], + "commands": [ + { "name": "fetch", "description": "Fetch a URL and return extracted content (markdown by default)" } + ], + "limits": { + "cpus": "0.5", + "memory": "512m", + "timeout_secs": 60 + } + }, + { + "id": "pengine/git", + "name": "Git", + "description": "Official MCP git server: status, diff, log, branches, and commits against repos under your mounted folders (use paths like /app/).", + "image": "ghcr.io/pengine-ai/pengine-git", + "current": "0.1.0", + "versions": [ + { + "version": "0.1.0", + "digest": "sha256:placeholder", + "released_at": "2026-04-14T00:00:00Z", + "yanked": false, + "revoked": false, + "security": false + } + ], + "container_read_only_rootfs": false, + "mount_read_only": false, + "mount_workspace": true, + "append_workspace_roots": false, + "direct_return": true, + "upstream_mcp_pypi": { + "package": "mcp-server-git", + "version": "2026.1.14" + }, + "mcp_server_cmd": [], + "commands": [ + { "name": "git_status", "description": "Working tree status for a repository" }, + { "name": "git_diff_unstaged", "description": "Diff unstaged changes" }, + { "name": "git_diff_staged", "description": "Diff staged changes" }, + { "name": "git_diff", "description": "Diff against a branch or commit" }, + { "name": "git_commit", "description": "Create a commit" }, + { "name": "git_add", "description": "Stage files" }, + { "name": "git_reset", "description": "Unstage all staged changes" }, + { "name": "git_log", "description": "Commit history with optional date filters" }, + { "name": "git_branch", "description": "List branches" }, + { "name": "git_checkout", "description": "Switch branches" } + ], + "limits": { + "cpus": "0.5", + "memory": "512m", + "timeout_secs": 60 + } + }, + { + "id": "pengine/sequential-thinking", + "name": "Sequential Thinking", + "description": "Official MCP sequential-thinking server for structured step-by-step reasoning.", + "image": "ghcr.io/pengine-ai/pengine-sequential-thinking", + "current": "0.1.0", + "versions": [ + { + "version": "0.1.0", + "digest": "sha256:placeholder", + "released_at": "2026-04-14T00:00:00Z", + "yanked": false, + "revoked": false, + "security": false + } + ], + "container_read_only_rootfs": true, + "mount_read_only": true, + "mount_workspace": false, + "append_workspace_roots": false, + "direct_return": false, + "upstream_mcp_npm": { + "package": "@modelcontextprotocol/server-sequential-thinking", + "version": "2025.12.18" + }, + "mcp_server_cmd": [], + "commands": [ + { "name": "sequentialthinking", "description": "Record and refine a sequence of reasoning steps" } + ], + "limits": { + "cpus": "0.25", + "memory": "256m", + "timeout_secs": 30 + } + }, + { + "id": "pengine/time", + "name": "Time", + "description": "Official MCP time server: current time and timezone conversions.", + "image": "ghcr.io/pengine-ai/pengine-time", + "current": "0.1.0", + "versions": [ + { + "version": "0.1.0", + "digest": "sha256:placeholder", + "released_at": "2026-04-14T00:00:00Z", + "yanked": false, + "revoked": false, + "security": false + } + ], + "container_read_only_rootfs": true, + "mount_read_only": true, + "mount_workspace": false, + "append_workspace_roots": false, + "direct_return": true, + "upstream_mcp_pypi": { + "package": "mcp-server-time", + "version": "2026.1.26" + }, + "mcp_server_cmd": [], + "commands": [ + { "name": "get_current_time", "description": "Current time in a timezone" }, + { "name": "convert_time", "description": "Convert a time between timezones" } + ], + "limits": { + "cpus": "0.25", + "memory": "256m", + "timeout_secs": 30 + } + }, + { + "id": "pengine/memory", + "name": "Memory", + "description": "Official MCP memory server: knowledge-graph style entities, relations, and observations (in-container persistence).", + "image": "ghcr.io/pengine-ai/pengine-memory", + "current": "0.1.0", + "versions": [ + { + "version": "0.1.0", + "digest": "sha256:placeholder", + "released_at": "2026-04-14T00:00:00Z", + "yanked": false, + "revoked": false, + "security": false + } + ], + "container_read_only_rootfs": false, + "mount_read_only": true, + "mount_workspace": false, + "append_workspace_roots": false, + "direct_return": false, + "upstream_mcp_npm": { + "package": "@modelcontextprotocol/server-memory", + "version": "2026.1.26" + }, + "mcp_server_cmd": [], + "commands": [ + { "name": "create_entities", "description": "Create entities in the knowledge graph" }, + { "name": "create_relations", "description": "Create relations between entities" }, + { "name": "add_observations", "description": "Add observations to entities" }, + { "name": "read_graph", "description": "Read the knowledge graph" }, + { "name": "search_nodes", "description": "Search nodes in the graph" }, + { "name": "open_nodes", "description": "Open nodes by name" }, + { "name": "delete_entities", "description": "Delete entities" }, + { "name": "delete_observations", "description": "Delete observations" }, + { "name": "delete_relations", "description": "Delete relations" } + ], + "limits": { + "cpus": "0.25", + "memory": "256m", + "timeout_secs": 30 + } } ] } diff --git a/src-tauri/src/modules/tool_engine/types.rs b/src-tauri/src/modules/tool_engine/types.rs index e033f75..5685b6c 100644 --- a/src-tauri/src/modules/tool_engine/types.rs +++ b/src-tauri/src/modules/tool_engine/types.rs @@ -43,6 +43,13 @@ pub struct UpstreamMcpNpm { pub version: String, } +/// PyPI package pinned inside a container image (Python MCP servers). +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct UpstreamMcpPypi { + pub package: String, + pub version: String, +} + /// One entry in the tool catalog (`tools.json`). #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ToolEntry { @@ -84,6 +91,13 @@ pub struct ToolEntry { /// When set, image build (`tools-publish.yml`) installs this npm package at this version. #[serde(default, skip_serializing_if = "Option::is_none")] pub upstream_mcp_npm: Option, + /// When set, image build installs this PyPI package at this version (Python MCP servers). + #[serde(default, skip_serializing_if = "Option::is_none")] + pub upstream_mcp_pypi: Option, + /// When true (default), run the tool container with `--network=none`. Set false for servers + /// that need outbound network (e.g. web fetch). + #[serde(default = "default_true")] + pub network_isolated: bool, } #[derive(Debug, Clone, Serialize, Deserialize)] diff --git a/src/shared/ui/SpecMockup.tsx b/src/shared/ui/SpecMockup.tsx index 6bd4abb..cc88e7e 100644 --- a/src/shared/ui/SpecMockup.tsx +++ b/src/shared/ui/SpecMockup.tsx @@ -1,6 +1,6 @@ const items = [ ["Interface", "Telegram chat on your phone"], - ["Runtime", "WASM now, Tauri desktop shell next"], + ["Runtime", "Rust + WASM"], ["Models", "Ollama local inference by default"], ["Tools", "Docker containers become agent abilities"], ["Loop", "Plan -> execute -> reflect"], diff --git a/tools/build-local-images.sh b/tools/build-local-images.sh new file mode 100755 index 0000000..44133a5 --- /dev/null +++ b/tools/build-local-images.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash +# Build every tool image from tools//Dockerfile and tag as in tools/mcp-tools.json +# (image:current) so the dashboard / podman run resolve them without GHCR. +# +# Uses **podman** when available (or set PENGINE_CONTAINER_RUNTIME explicitly, e.g. `docker`). +# +# Usage (repo root): +# ./tools/build-local-images.sh +set -euo pipefail + +ROOT="$(cd "$(dirname "$0")/.." && pwd)" +REG="${ROOT}/tools/mcp-tools.json" + +if [[ -n "${PENGINE_CONTAINER_RUNTIME:-}" ]]; then + RUNTIME="${PENGINE_CONTAINER_RUNTIME}" +elif command -v podman &>/dev/null; then + RUNTIME=podman +elif command -v docker &>/dev/null; then + RUNTIME=docker +else + echo "error: install podman or docker, or set PENGINE_CONTAINER_RUNTIME" >&2 + exit 1 +fi +echo "Using container runtime: ${RUNTIME}" + +# Prefer host CPU arch so base images match (avoids linux/amd64 on Apple Silicon, etc.). +BUILD_PLATFORM="${PENGINE_CONTAINER_PLATFORM:-}" +if [[ -z "$BUILD_PLATFORM" ]]; then + case "$(uname -m)" in + arm64|aarch64) BUILD_PLATFORM=linux/arm64 ;; + x86_64|amd64) BUILD_PLATFORM=linux/amd64 ;; + esac +fi +PLATFORM_ARGS=() +if [[ -n "${BUILD_PLATFORM:-}" ]]; then + PLATFORM_ARGS=(--platform "${BUILD_PLATFORM}") + echo "Using --platform ${BUILD_PLATFORM} (set PENGINE_CONTAINER_PLATFORM= to disable)" +fi + +# Space-separated (avoid @tsv + split quirks in some jq versions). +while read -r slug image current; do + [[ -z "$slug" ]] && continue + ctx="${ROOT}/tools/${slug}" + df="${ctx}/Dockerfile" + if [[ ! -f "$df" ]]; then + echo "skip $slug: no $df" >&2 + continue + fi + tag="${image}:${current}" + echo "=== build $slug -> $tag ===" + "${RUNTIME}" build "${PLATFORM_ARGS[@]}" -f "$df" -t "$tag" "$ctx" +done < <(jq -r '.tools[] | "\(.id | split("/")[1]) \(.image) \(.current)"' "$REG") + +echo "Done. Images tagged as :." diff --git a/tools/fetch/Dockerfile b/tools/fetch/Dockerfile new file mode 100644 index 0000000..b9800a1 --- /dev/null +++ b/tools/fetch/Dockerfile @@ -0,0 +1,11 @@ +# Upstream PyPI package + version: CI passes --build-arg from tools/mcp-tools.json (`upstream_mcp_pypi`). +FROM python:3.12-slim-bookworm +ARG UPSTREAM_MCP_PYPI_PACKAGE=mcp-server-fetch +ARG UPSTREAM_MCP_PYPI_VERSION=2025.4.7 +ENV PIP_NO_CACHE_DIR=1 \ + PIP_DISABLE_PIP_VERSION_CHECK=1 \ + PYTHONUNBUFFERED=1 +RUN pip install --no-cache-dir "${UPSTREAM_MCP_PYPI_PACKAGE}==${UPSTREAM_MCP_PYPI_VERSION}" +RUN useradd -r -u 10001 -m -d /home/mcp mcp +USER mcp +ENTRYPOINT ["mcp-server-fetch"] diff --git a/tools/git/Dockerfile b/tools/git/Dockerfile new file mode 100644 index 0000000..4a9700a --- /dev/null +++ b/tools/git/Dockerfile @@ -0,0 +1,15 @@ +# Upstream PyPI package + version: CI passes --build-arg from tools/mcp-tools.json (`upstream_mcp_pypi`). +FROM python:3.12-slim-bookworm +RUN apt-get update \ + && apt-get install -y --no-install-recommends git git-lfs \ + && rm -rf /var/lib/apt/lists/* \ + && git lfs install --system +ARG UPSTREAM_MCP_PYPI_PACKAGE=mcp-server-git +ARG UPSTREAM_MCP_PYPI_VERSION=2026.1.14 +ENV PIP_NO_CACHE_DIR=1 \ + PIP_DISABLE_PIP_VERSION_CHECK=1 \ + PYTHONUNBUFFERED=1 +RUN pip install --no-cache-dir "${UPSTREAM_MCP_PYPI_PACKAGE}==${UPSTREAM_MCP_PYPI_VERSION}" +RUN useradd -r -u 10001 -m -d /home/mcp mcp +USER mcp +ENTRYPOINT ["mcp-server-git"] diff --git a/tools/mcp-tools.json b/tools/mcp-tools.json index d6ee129..cb038bf 100644 --- a/tools/mcp-tools.json +++ b/tools/mcp-tools.json @@ -1,8 +1,8 @@ { "schema_version": 1, - "generated_at": "2026-04-13T00:00:00Z", - "catalog_revision": 2, - "valid_until": "2026-05-13T00:00:00Z", + "generated_at": "2026-04-14T00:00:00Z", + "catalog_revision": 3, + "valid_until": "2026-05-14T00:00:00Z", "minimum_pengine_version": "0.5.0", "tools": [ { @@ -51,6 +51,200 @@ "memory": "256m", "timeout_secs": 30 } + }, + { + "id": "pengine/fetch", + "name": "Fetch", + "description": "Official MCP fetch server: download URLs and return markdown-friendly content for the model.", + "image": "ghcr.io/pengine-ai/pengine-fetch", + "current": "0.1.0", + "versions": [ + { + "version": "0.1.0", + "digest": "sha256:placeholder", + "released_at": "2026-04-14T00:00:00Z", + "yanked": false, + "revoked": false, + "security": false + } + ], + "container_read_only_rootfs": true, + "mount_read_only": true, + "mount_workspace": false, + "append_workspace_roots": false, + "direct_return": true, + "network_isolated": false, + "upstream_mcp_pypi": { + "package": "mcp-server-fetch", + "version": "2025.4.7" + }, + "mcp_server_cmd": [], + "commands": [ + { "name": "fetch", "description": "Fetch a URL and return extracted content (markdown by default)" } + ], + "limits": { + "cpus": "0.5", + "memory": "512m", + "timeout_secs": 60 + } + }, + { + "id": "pengine/git", + "name": "Git", + "description": "Official MCP git server: status, diff, log, branches, and commits against repos under your mounted folders (use paths like /app/).", + "image": "ghcr.io/pengine-ai/pengine-git", + "current": "0.1.0", + "versions": [ + { + "version": "0.1.0", + "digest": "sha256:placeholder", + "released_at": "2026-04-14T00:00:00Z", + "yanked": false, + "revoked": false, + "security": false + } + ], + "container_read_only_rootfs": false, + "mount_read_only": false, + "mount_workspace": true, + "append_workspace_roots": false, + "direct_return": true, + "upstream_mcp_pypi": { + "package": "mcp-server-git", + "version": "2026.1.14" + }, + "mcp_server_cmd": [], + "commands": [ + { "name": "git_status", "description": "Working tree status for a repository" }, + { "name": "git_diff_unstaged", "description": "Diff unstaged changes" }, + { "name": "git_diff_staged", "description": "Diff staged changes" }, + { "name": "git_diff", "description": "Diff against a branch or commit" }, + { "name": "git_commit", "description": "Create a commit" }, + { "name": "git_add", "description": "Stage files" }, + { "name": "git_reset", "description": "Unstage all staged changes" }, + { "name": "git_log", "description": "Commit history with optional date filters" }, + { "name": "git_branch", "description": "List branches" }, + { "name": "git_checkout", "description": "Switch branches" } + ], + "limits": { + "cpus": "0.5", + "memory": "512m", + "timeout_secs": 60 + } + }, + { + "id": "pengine/sequential-thinking", + "name": "Sequential Thinking", + "description": "Official MCP sequential-thinking server for structured step-by-step reasoning.", + "image": "ghcr.io/pengine-ai/pengine-sequential-thinking", + "current": "0.1.0", + "versions": [ + { + "version": "0.1.0", + "digest": "sha256:placeholder", + "released_at": "2026-04-14T00:00:00Z", + "yanked": false, + "revoked": false, + "security": false + } + ], + "container_read_only_rootfs": true, + "mount_read_only": true, + "mount_workspace": false, + "append_workspace_roots": false, + "direct_return": false, + "upstream_mcp_npm": { + "package": "@modelcontextprotocol/server-sequential-thinking", + "version": "2025.12.18" + }, + "mcp_server_cmd": [], + "commands": [ + { "name": "sequentialthinking", "description": "Record and refine a sequence of reasoning steps" } + ], + "limits": { + "cpus": "0.25", + "memory": "256m", + "timeout_secs": 30 + } + }, + { + "id": "pengine/time", + "name": "Time", + "description": "Official MCP time server: current time and timezone conversions.", + "image": "ghcr.io/pengine-ai/pengine-time", + "current": "0.1.0", + "versions": [ + { + "version": "0.1.0", + "digest": "sha256:placeholder", + "released_at": "2026-04-14T00:00:00Z", + "yanked": false, + "revoked": false, + "security": false + } + ], + "container_read_only_rootfs": true, + "mount_read_only": true, + "mount_workspace": false, + "append_workspace_roots": false, + "direct_return": true, + "upstream_mcp_pypi": { + "package": "mcp-server-time", + "version": "2026.1.26" + }, + "mcp_server_cmd": [], + "commands": [ + { "name": "get_current_time", "description": "Current time in a timezone" }, + { "name": "convert_time", "description": "Convert a time between timezones" } + ], + "limits": { + "cpus": "0.25", + "memory": "256m", + "timeout_secs": 30 + } + }, + { + "id": "pengine/memory", + "name": "Memory", + "description": "Official MCP memory server: knowledge-graph style entities, relations, and observations (in-container persistence).", + "image": "ghcr.io/pengine-ai/pengine-memory", + "current": "0.1.0", + "versions": [ + { + "version": "0.1.0", + "digest": "sha256:placeholder", + "released_at": "2026-04-14T00:00:00Z", + "yanked": false, + "revoked": false, + "security": false + } + ], + "container_read_only_rootfs": false, + "mount_read_only": true, + "mount_workspace": false, + "append_workspace_roots": false, + "direct_return": false, + "upstream_mcp_npm": { + "package": "@modelcontextprotocol/server-memory", + "version": "2026.1.26" + }, + "mcp_server_cmd": [], + "commands": [ + { "name": "create_entities", "description": "Create entities in the knowledge graph" }, + { "name": "create_relations", "description": "Create relations between entities" }, + { "name": "add_observations", "description": "Add observations to entities" }, + { "name": "read_graph", "description": "Read the knowledge graph" }, + { "name": "search_nodes", "description": "Search nodes in the graph" }, + { "name": "open_nodes", "description": "Open nodes by name" }, + { "name": "delete_entities", "description": "Delete entities" }, + { "name": "delete_observations", "description": "Delete observations" }, + { "name": "delete_relations", "description": "Delete relations" } + ], + "limits": { + "cpus": "0.25", + "memory": "256m", + "timeout_secs": 30 + } } ] } diff --git a/tools/memory/Dockerfile b/tools/memory/Dockerfile new file mode 100644 index 0000000..fbfe0c3 --- /dev/null +++ b/tools/memory/Dockerfile @@ -0,0 +1,13 @@ +# Upstream MCP npm package + version: CI passes --build-arg from tools/mcp-tools.json (`upstream_mcp_npm`). +FROM node:22-alpine +ARG UPSTREAM_MCP_NPM_PACKAGE=@modelcontextprotocol/server-memory +ARG UPSTREAM_MCP_NPM_VERSION=2026.1.26 +RUN addgroup -S mcp && adduser -S -G mcp -H mcp +WORKDIR /mcp +RUN npm install --omit=dev --prefix /mcp "${UPSTREAM_MCP_NPM_PACKAGE}@${UPSTREAM_MCP_NPM_VERSION}" \ + && npm cache clean --force \ + && rm -rf /root/.npm \ + && chown -R mcp:mcp /mcp +USER mcp +ENV NODE_ENV=production +ENTRYPOINT ["node", "/mcp/node_modules/@modelcontextprotocol/server-memory/dist/index.js"] diff --git a/tools/sequential-thinking/Dockerfile b/tools/sequential-thinking/Dockerfile new file mode 100644 index 0000000..7605097 --- /dev/null +++ b/tools/sequential-thinking/Dockerfile @@ -0,0 +1,13 @@ +# Upstream MCP npm package + version: CI passes --build-arg from tools/mcp-tools.json (`upstream_mcp_npm`). +FROM node:22-alpine +ARG UPSTREAM_MCP_NPM_PACKAGE=@modelcontextprotocol/server-sequential-thinking +ARG UPSTREAM_MCP_NPM_VERSION=2025.12.18 +RUN addgroup -S mcp && adduser -S -G mcp -H mcp +WORKDIR /mcp +RUN npm install --omit=dev --prefix /mcp "${UPSTREAM_MCP_NPM_PACKAGE}@${UPSTREAM_MCP_NPM_VERSION}" \ + && npm cache clean --force \ + && rm -rf /root/.npm \ + && chown -R mcp:mcp /mcp +USER mcp +ENV NODE_ENV=production +ENTRYPOINT ["node", "/mcp/node_modules/@modelcontextprotocol/server-sequential-thinking/dist/index.js"] diff --git a/tools/time/Dockerfile b/tools/time/Dockerfile new file mode 100644 index 0000000..967e566 --- /dev/null +++ b/tools/time/Dockerfile @@ -0,0 +1,11 @@ +# Upstream PyPI package + version: CI passes --build-arg from tools/mcp-tools.json (`upstream_mcp_pypi`). +FROM python:3.12-slim-bookworm +ARG UPSTREAM_MCP_PYPI_PACKAGE=mcp-server-time +ARG UPSTREAM_MCP_PYPI_VERSION=2026.1.26 +ENV PIP_NO_CACHE_DIR=1 \ + PIP_DISABLE_PIP_VERSION_CHECK=1 \ + PYTHONUNBUFFERED=1 +RUN pip install --no-cache-dir "${UPSTREAM_MCP_PYPI_PACKAGE}==${UPSTREAM_MCP_PYPI_VERSION}" +RUN useradd -r -u 10001 -m -d /home/mcp mcp +USER mcp +ENTRYPOINT ["mcp-server-time"] diff --git a/tools/update-upstream.sh b/tools/update-upstream.sh index 272bfc8..d8cb2ab 100755 --- a/tools/update-upstream.sh +++ b/tools/update-upstream.sh @@ -1,16 +1,11 @@ #!/usr/bin/env bash -# update-upstream.sh — check npm registry for newer upstream MCP packages +# update-upstream.sh — check npm / PyPI for newer upstream MCP packages # and bump versions in mcp-tools.json (like `npm update` for tool images). # # Usage: # ./tools/update-upstream.sh # check all tools # ./tools/update-upstream.sh file-manager # check one tool # -# What it does: -# 1. For each tool with upstream_mcp_npm, query npm for the latest version -# 2. If newer, update mcp-tools.json (upstream version + tool patch bump) -# 3. Print a summary of what changed -# # After running, commit the changes and push — CI builds only the affected images. set -euo pipefail @@ -27,6 +22,11 @@ if ! command -v npm &>/dev/null; then exit 1 fi +if ! command -v curl &>/dev/null; then + echo "error: curl is required" >&2 + exit 1 +fi + if [[ ! -f "$TOOLS_FILE" ]]; then echo "error: $TOOLS_FILE not found" >&2 exit 1 @@ -37,74 +37,109 @@ CHANGED=0 tool_count=$(jq '.tools | length' "$TOOLS_FILE") +bump_tool() { + local idx="$1" + local kind="$2" + local new_upstream_ver="$3" + local current_tool + local new_tool_version + current_tool=$(jq -r ".tools[$idx].current" "$TOOLS_FILE") + IFS='.' read -r major minor patch <<< "$current_tool" + new_tool_version="${major}.${minor}.$((patch + 1))" + echo "[$slug] bumping tool version: $current_tool → $new_tool_version" + tmp=$(mktemp) + if [[ "$kind" == "npm" ]]; then + jq --arg idx "$idx" \ + --arg npm_ver "$new_upstream_ver" \ + --arg tool_ver "$new_tool_version" \ + --arg now "$(date -u +%Y-%m-%dT%H:%M:%SZ)" ' + .tools[($idx | tonumber)].upstream_mcp_npm.version = $npm_ver | + .tools[($idx | tonumber)].current = $tool_ver | + .tools[($idx | tonumber)].versions += [{ + version: $tool_ver, + digest: "sha256:placeholder", + released_at: $now, + yanked: false, + revoked: false, + security: false + }] + ' "$TOOLS_FILE" > "$tmp" && mv "$tmp" "$TOOLS_FILE" + else + jq --arg idx "$idx" \ + --arg pypi_ver "$new_upstream_ver" \ + --arg tool_ver "$new_tool_version" \ + --arg now "$(date -u +%Y-%m-%dT%H:%M:%SZ)" ' + .tools[($idx | tonumber)].upstream_mcp_pypi.version = $pypi_ver | + .tools[($idx | tonumber)].current = $tool_ver | + .tools[($idx | tonumber)].versions += [{ + version: $tool_ver, + digest: "sha256:placeholder", + released_at: $now, + yanked: false, + revoked: false, + security: false + }] + ' "$TOOLS_FILE" > "$tmp" && mv "$tmp" "$TOOLS_FILE" + fi + CHANGED=$((CHANGED + 1)) +} + for i in $(seq 0 $((tool_count - 1))); do tool_id=$(jq -r ".tools[$i].id" "$TOOLS_FILE") slug=$(echo "$tool_id" | cut -d/ -f2) - # Skip if user asked for a specific tool if [[ -n "$FILTER" && "$slug" != "$FILTER" ]]; then continue fi npm_pkg=$(jq -r ".tools[$i].upstream_mcp_npm.package // empty" "$TOOLS_FILE") - if [[ -z "$npm_pkg" ]]; then - echo "[$slug] no upstream_mcp_npm — skipped" + pypi_pkg=$(jq -r ".tools[$i].upstream_mcp_pypi.package // empty" "$TOOLS_FILE") + + if [[ -n "$npm_pkg" ]]; then + current_npm=$(jq -r ".tools[$i].upstream_mcp_npm.version" "$TOOLS_FILE") + echo -n "[$slug] npm $npm_pkg@$current_npm … " + latest_npm=$(npm view "$npm_pkg" version 2>/dev/null || echo "") + if [[ -z "$latest_npm" ]]; then + echo "failed to query npm registry" + continue + fi + if [[ "$latest_npm" == "$current_npm" ]]; then + echo "up to date ($current_npm)" + continue + fi + echo "new version: $current_npm → $latest_npm" + bump_tool "$i" "npm" "$latest_npm" continue fi - current_npm=$(jq -r ".tools[$i].upstream_mcp_npm.version" "$TOOLS_FILE") - current_tool=$(jq -r ".tools[$i].current" "$TOOLS_FILE") - - echo -n "[$slug] checking $npm_pkg@$current_npm … " - - latest_npm=$(npm view "$npm_pkg" version 2>/dev/null || echo "") - if [[ -z "$latest_npm" ]]; then - echo "failed to query npm registry" + if [[ -n "$pypi_pkg" ]]; then + current_pypi=$(jq -r ".tools[$i].upstream_mcp_pypi.version" "$TOOLS_FILE") + echo -n "[$slug] PyPI $pypi_pkg@$current_pypi … " + latest_pypi=$(curl -fsSL "https://pypi.org/pypi/${pypi_pkg}/json" | jq -r '.info.version' 2>/dev/null || echo "") + if [[ -z "$latest_pypi" || "$latest_pypi" == "null" ]]; then + echo "failed to query PyPI" + continue + fi + if [[ "$latest_pypi" == "$current_pypi" ]]; then + echo "up to date ($current_pypi)" + continue + fi + echo "new version: $current_pypi → $latest_pypi" + bump_tool "$i" "pypi" "$latest_pypi" continue fi - if [[ "$latest_npm" == "$current_npm" ]]; then - echo "up to date ($current_npm)" - continue - fi - - echo "new version available: $current_npm → $latest_npm" - - # Bump the tool's patch version (0.1.0 → 0.1.1, 0.2.3 → 0.2.4) - IFS='.' read -r major minor patch <<< "$current_tool" - new_tool_version="${major}.${minor}.$((patch + 1))" - - echo "[$slug] bumping tool version: $current_tool → $new_tool_version" - - # Update mcp-tools.json in place - tmp=$(mktemp) - jq --arg idx "$i" \ - --arg npm_ver "$latest_npm" \ - --arg tool_ver "$new_tool_version" \ - --arg now "$(date -u +%Y-%m-%dT%H:%M:%SZ)" ' - .tools[($idx | tonumber)].upstream_mcp_npm.version = $npm_ver | - .tools[($idx | tonumber)].current = $tool_ver | - .tools[($idx | tonumber)].versions += [{ - version: $tool_ver, - digest: "sha256:placeholder", - released_at: $now, - yanked: false, - revoked: false, - security: false - }] - ' "$TOOLS_FILE" > "$tmp" && mv "$tmp" "$TOOLS_FILE" - - CHANGED=$((CHANGED + 1)) + echo "[$slug] no upstream_mcp_npm or upstream_mcp_pypi — skipped" done echo "" if [[ $CHANGED -gt 0 ]]; then echo "$CHANGED tool(s) updated. Review the diff, then commit and push:" - echo " git add tools/mcp-tools.json" + echo " git add tools/mcp-tools.json src-tauri/src/modules/tool_engine/tools.json" echo " git commit -m 'chore: bump upstream MCP packages'" echo " git push" echo "" - echo "CI will build only the tools whose version changed." + echo "CI will build tools whose catalog entry changed." else echo "All tools are up to date." fi From 217052de29626b2926a043a5a6b41266d4a2a3ba Mon Sep 17 00:00:00 2001 From: MaximEdogawa Date: Thu, 16 Apr 2026 16:18:39 +0200 Subject: [PATCH 2/8] update: changed default EMBEDDED_CATALOG location --- doc/tool-engine/manual-publish.md | 7 +- src-tauri/src/modules/tool_engine/service.rs | 5 +- src-tauri/src/modules/tool_engine/tools.json | 250 ------------------- tools/build-local-images.sh | 14 +- tools/update-upstream.sh | 58 ++--- 5 files changed, 39 insertions(+), 295 deletions(-) delete mode 100644 src-tauri/src/modules/tool_engine/tools.json diff --git a/doc/tool-engine/manual-publish.md b/doc/tool-engine/manual-publish.md index 5a40b03..b71820f 100644 --- a/doc/tool-engine/manual-publish.md +++ b/doc/tool-engine/manual-publish.md @@ -79,7 +79,7 @@ After a successful push, get the digest: podman image inspect "${IMAGE}:${VERSION}" --format '{{index .RepoDigests 0}}' ``` -Update the `sha256:…` value in the matching `versions[]` entry in **`tools/mcp-tools.json`**. The app fetches this file from GitHub at runtime; the embedded `src-tauri/src/modules/tool_engine/tools.json` is the offline fallback. +Update the `sha256:…` value in the matching `versions[]` entry in **`tools/mcp-tools.json`**. The app fetches this file from GitHub at runtime and embeds the same file at compile time as the offline fallback. --- @@ -132,8 +132,7 @@ CI passes these as `docker build` args so you bump the npm version in the regist ## Files -- **`tools/mcp-tools.json`** — tool registry (all tools, versions, digests, npm). CI and the app read this. +- **`tools/mcp-tools.json`** — single-source tool registry (all tools, versions, digests, upstream). CI, the app at runtime, and the embedded offline fallback (`include_str!`) all read this file. - **`tools//Dockerfile`** — image build context. -- **`tools/update-upstream.sh`** — bump upstream npm versions (like `npm update`). -- **`src-tauri/src/modules/tool_engine/tools.json`** — embedded catalog (offline fallback). Update after publish. +- **`tools/update-upstream.sh`** — bump upstream npm/PyPI versions (like `npm update`). - **`.github/workflows/tools-publish.yml`** — CI workflow. diff --git a/src-tauri/src/modules/tool_engine/service.rs b/src-tauri/src/modules/tool_engine/service.rs index 4ffbe7a..c0f2f2e 100644 --- a/src-tauri/src/modules/tool_engine/service.rs +++ b/src-tauri/src/modules/tool_engine/service.rs @@ -7,7 +7,7 @@ use std::path::{Path, PathBuf}; use std::process::Stdio; use tokio::io::{AsyncBufReadExt, BufReader}; -const EMBEDDED_CATALOG: &str = include_str!("tools.json"); +const EMBEDDED_CATALOG: &str = include_str!("../../../../tools/mcp-tools.json"); /// Remote registry URL — raw GitHub content. The app fetches this at runtime so /// users get new tools / version bumps without waiting for a Pengine app update. @@ -38,7 +38,8 @@ fn parse_catalog(json: &str) -> Option { /// Load the embedded (compile-time) catalog. Always succeeds on a valid build. pub fn load_embedded_catalog() -> Result { - serde_json::from_str(EMBEDDED_CATALOG).map_err(|e| format!("parse embedded tools.json: {e}")) + serde_json::from_str(EMBEDDED_CATALOG) + .map_err(|e| format!("parse embedded mcp-tools.json: {e}")) } /// Try repo `tools/mcp-tools.json` before the remote catalog (used by `bun run tauri dev` and diff --git a/src-tauri/src/modules/tool_engine/tools.json b/src-tauri/src/modules/tool_engine/tools.json deleted file mode 100644 index cb038bf..0000000 --- a/src-tauri/src/modules/tool_engine/tools.json +++ /dev/null @@ -1,250 +0,0 @@ -{ - "schema_version": 1, - "generated_at": "2026-04-14T00:00:00Z", - "catalog_revision": 3, - "valid_until": "2026-05-14T00:00:00Z", - "minimum_pengine_version": "0.5.0", - "tools": [ - { - "id": "pengine/file-manager", - "name": "File Manager", - "description": "Filesystem MCP in a container. Add folders in MCP Tools; each mounts at /app/. Install works before any folder is set.", - "image": "ghcr.io/pengine-ai/pengine-file-manager", - "current": "0.1.0", - "versions": [ - { - "version": "0.1.0", - "digest": "sha256:placeholder", - "released_at": "2026-04-12T00:00:00Z", - "yanked": false, - "revoked": false, - "security": false - } - ], - "container_read_only_rootfs": false, - "mount_read_only": true, - "mount_workspace": true, - "append_workspace_roots": true, - "direct_return": true, - "upstream_mcp_npm": { - "package": "@modelcontextprotocol/server-filesystem", - "version": "2026.1.14" - }, - "mcp_server_cmd": [], - "commands": [ - { "name": "read_text_file", "description": "Read a file as UTF-8 text; optional head/tail line limits" }, - { "name": "read_media_file", "description": "Read image or audio as base64 with MIME type" }, - { "name": "read_multiple_files", "description": "Read several files in one call" }, - { "name": "write_file", "description": "Create or overwrite a file" }, - { "name": "edit_file", "description": "Pattern-based selective edits with optional dry run" }, - { "name": "create_directory", "description": "Create a directory (and parents)" }, - { "name": "list_directory", "description": "List entries with [FILE]/[DIR] prefixes" }, - { "name": "list_directory_with_sizes", "description": "List directory with sizes and optional sort" }, - { "name": "move_file", "description": "Move or rename a file or directory" }, - { "name": "search_files", "description": "Recursive glob search under a path" }, - { "name": "directory_tree", "description": "Recursive JSON tree of directory contents" }, - { "name": "get_file_info", "description": "Metadata: size, times, type, permissions" }, - { "name": "list_allowed_directories", "description": "List MCP roots currently allowed" } - ], - "limits": { - "cpus": "0.5", - "memory": "256m", - "timeout_secs": 30 - } - }, - { - "id": "pengine/fetch", - "name": "Fetch", - "description": "Official MCP fetch server: download URLs and return markdown-friendly content for the model.", - "image": "ghcr.io/pengine-ai/pengine-fetch", - "current": "0.1.0", - "versions": [ - { - "version": "0.1.0", - "digest": "sha256:placeholder", - "released_at": "2026-04-14T00:00:00Z", - "yanked": false, - "revoked": false, - "security": false - } - ], - "container_read_only_rootfs": true, - "mount_read_only": true, - "mount_workspace": false, - "append_workspace_roots": false, - "direct_return": true, - "network_isolated": false, - "upstream_mcp_pypi": { - "package": "mcp-server-fetch", - "version": "2025.4.7" - }, - "mcp_server_cmd": [], - "commands": [ - { "name": "fetch", "description": "Fetch a URL and return extracted content (markdown by default)" } - ], - "limits": { - "cpus": "0.5", - "memory": "512m", - "timeout_secs": 60 - } - }, - { - "id": "pengine/git", - "name": "Git", - "description": "Official MCP git server: status, diff, log, branches, and commits against repos under your mounted folders (use paths like /app/).", - "image": "ghcr.io/pengine-ai/pengine-git", - "current": "0.1.0", - "versions": [ - { - "version": "0.1.0", - "digest": "sha256:placeholder", - "released_at": "2026-04-14T00:00:00Z", - "yanked": false, - "revoked": false, - "security": false - } - ], - "container_read_only_rootfs": false, - "mount_read_only": false, - "mount_workspace": true, - "append_workspace_roots": false, - "direct_return": true, - "upstream_mcp_pypi": { - "package": "mcp-server-git", - "version": "2026.1.14" - }, - "mcp_server_cmd": [], - "commands": [ - { "name": "git_status", "description": "Working tree status for a repository" }, - { "name": "git_diff_unstaged", "description": "Diff unstaged changes" }, - { "name": "git_diff_staged", "description": "Diff staged changes" }, - { "name": "git_diff", "description": "Diff against a branch or commit" }, - { "name": "git_commit", "description": "Create a commit" }, - { "name": "git_add", "description": "Stage files" }, - { "name": "git_reset", "description": "Unstage all staged changes" }, - { "name": "git_log", "description": "Commit history with optional date filters" }, - { "name": "git_branch", "description": "List branches" }, - { "name": "git_checkout", "description": "Switch branches" } - ], - "limits": { - "cpus": "0.5", - "memory": "512m", - "timeout_secs": 60 - } - }, - { - "id": "pengine/sequential-thinking", - "name": "Sequential Thinking", - "description": "Official MCP sequential-thinking server for structured step-by-step reasoning.", - "image": "ghcr.io/pengine-ai/pengine-sequential-thinking", - "current": "0.1.0", - "versions": [ - { - "version": "0.1.0", - "digest": "sha256:placeholder", - "released_at": "2026-04-14T00:00:00Z", - "yanked": false, - "revoked": false, - "security": false - } - ], - "container_read_only_rootfs": true, - "mount_read_only": true, - "mount_workspace": false, - "append_workspace_roots": false, - "direct_return": false, - "upstream_mcp_npm": { - "package": "@modelcontextprotocol/server-sequential-thinking", - "version": "2025.12.18" - }, - "mcp_server_cmd": [], - "commands": [ - { "name": "sequentialthinking", "description": "Record and refine a sequence of reasoning steps" } - ], - "limits": { - "cpus": "0.25", - "memory": "256m", - "timeout_secs": 30 - } - }, - { - "id": "pengine/time", - "name": "Time", - "description": "Official MCP time server: current time and timezone conversions.", - "image": "ghcr.io/pengine-ai/pengine-time", - "current": "0.1.0", - "versions": [ - { - "version": "0.1.0", - "digest": "sha256:placeholder", - "released_at": "2026-04-14T00:00:00Z", - "yanked": false, - "revoked": false, - "security": false - } - ], - "container_read_only_rootfs": true, - "mount_read_only": true, - "mount_workspace": false, - "append_workspace_roots": false, - "direct_return": true, - "upstream_mcp_pypi": { - "package": "mcp-server-time", - "version": "2026.1.26" - }, - "mcp_server_cmd": [], - "commands": [ - { "name": "get_current_time", "description": "Current time in a timezone" }, - { "name": "convert_time", "description": "Convert a time between timezones" } - ], - "limits": { - "cpus": "0.25", - "memory": "256m", - "timeout_secs": 30 - } - }, - { - "id": "pengine/memory", - "name": "Memory", - "description": "Official MCP memory server: knowledge-graph style entities, relations, and observations (in-container persistence).", - "image": "ghcr.io/pengine-ai/pengine-memory", - "current": "0.1.0", - "versions": [ - { - "version": "0.1.0", - "digest": "sha256:placeholder", - "released_at": "2026-04-14T00:00:00Z", - "yanked": false, - "revoked": false, - "security": false - } - ], - "container_read_only_rootfs": false, - "mount_read_only": true, - "mount_workspace": false, - "append_workspace_roots": false, - "direct_return": false, - "upstream_mcp_npm": { - "package": "@modelcontextprotocol/server-memory", - "version": "2026.1.26" - }, - "mcp_server_cmd": [], - "commands": [ - { "name": "create_entities", "description": "Create entities in the knowledge graph" }, - { "name": "create_relations", "description": "Create relations between entities" }, - { "name": "add_observations", "description": "Add observations to entities" }, - { "name": "read_graph", "description": "Read the knowledge graph" }, - { "name": "search_nodes", "description": "Search nodes in the graph" }, - { "name": "open_nodes", "description": "Open nodes by name" }, - { "name": "delete_entities", "description": "Delete entities" }, - { "name": "delete_observations", "description": "Delete observations" }, - { "name": "delete_relations", "description": "Delete relations" } - ], - "limits": { - "cpus": "0.25", - "memory": "256m", - "timeout_secs": 30 - } - } - ] -} diff --git a/tools/build-local-images.sh b/tools/build-local-images.sh index 44133a5..9241223 100755 --- a/tools/build-local-images.sh +++ b/tools/build-local-images.sh @@ -38,7 +38,8 @@ if [[ -n "${BUILD_PLATFORM:-}" ]]; then fi # Space-separated (avoid @tsv + split quirks in some jq versions). -while read -r slug image current; do +# "-" sentinels stand in for absent upstream_mcp_{npm,pypi} fields so `read` gets a fixed column count. +while read -r slug image current npm_pkg npm_ver pypi_pkg pypi_ver; do [[ -z "$slug" ]] && continue ctx="${ROOT}/tools/${slug}" df="${ctx}/Dockerfile" @@ -48,7 +49,14 @@ while read -r slug image current; do fi tag="${image}:${current}" echo "=== build $slug -> $tag ===" - "${RUNTIME}" build "${PLATFORM_ARGS[@]}" -f "$df" -t "$tag" "$ctx" -done < <(jq -r '.tools[] | "\(.id | split("/")[1]) \(.image) \(.current)"' "$REG") + build_args=() + [[ "$npm_pkg" != "-" ]] && build_args+=(--build-arg "UPSTREAM_MCP_NPM_PACKAGE=$npm_pkg") + [[ "$npm_ver" != "-" ]] && build_args+=(--build-arg "UPSTREAM_MCP_NPM_VERSION=$npm_ver") + [[ "$pypi_pkg" != "-" ]] && build_args+=(--build-arg "UPSTREAM_MCP_PYPI_PACKAGE=$pypi_pkg") + [[ "$pypi_ver" != "-" ]] && build_args+=(--build-arg "UPSTREAM_MCP_PYPI_VERSION=$pypi_ver") + "${RUNTIME}" build "${PLATFORM_ARGS[@]}" "${build_args[@]}" -f "$df" -t "$tag" "$ctx" +done < <(jq -r ' + .tools[] | "\(.id | split("/")[1]) \(.image) \(.current) \(.upstream_mcp_npm.package // "-") \(.upstream_mcp_npm.version // "-") \(.upstream_mcp_pypi.package // "-") \(.upstream_mcp_pypi.version // "-")" +' "$REG") echo "Done. Images tagged as :." diff --git a/tools/update-upstream.sh b/tools/update-upstream.sh index d8cb2ab..bf82a27 100755 --- a/tools/update-upstream.sh +++ b/tools/update-upstream.sh @@ -41,46 +41,32 @@ bump_tool() { local idx="$1" local kind="$2" local new_upstream_ver="$3" + local slug="$4" + local upstream_key="upstream_mcp_${kind}" local current_tool local new_tool_version current_tool=$(jq -r ".tools[$idx].current" "$TOOLS_FILE") IFS='.' read -r major minor patch <<< "$current_tool" new_tool_version="${major}.${minor}.$((patch + 1))" echo "[$slug] bumping tool version: $current_tool → $new_tool_version" + local tmp tmp=$(mktemp) - if [[ "$kind" == "npm" ]]; then - jq --arg idx "$idx" \ - --arg npm_ver "$new_upstream_ver" \ - --arg tool_ver "$new_tool_version" \ - --arg now "$(date -u +%Y-%m-%dT%H:%M:%SZ)" ' - .tools[($idx | tonumber)].upstream_mcp_npm.version = $npm_ver | - .tools[($idx | tonumber)].current = $tool_ver | - .tools[($idx | tonumber)].versions += [{ - version: $tool_ver, - digest: "sha256:placeholder", - released_at: $now, - yanked: false, - revoked: false, - security: false - }] - ' "$TOOLS_FILE" > "$tmp" && mv "$tmp" "$TOOLS_FILE" - else - jq --arg idx "$idx" \ - --arg pypi_ver "$new_upstream_ver" \ - --arg tool_ver "$new_tool_version" \ - --arg now "$(date -u +%Y-%m-%dT%H:%M:%SZ)" ' - .tools[($idx | tonumber)].upstream_mcp_pypi.version = $pypi_ver | - .tools[($idx | tonumber)].current = $tool_ver | - .tools[($idx | tonumber)].versions += [{ - version: $tool_ver, - digest: "sha256:placeholder", - released_at: $now, - yanked: false, - revoked: false, - security: false - }] - ' "$TOOLS_FILE" > "$tmp" && mv "$tmp" "$TOOLS_FILE" - fi + jq --arg idx "$idx" \ + --arg upstream_key "$upstream_key" \ + --arg upstream_ver "$new_upstream_ver" \ + --arg tool_ver "$new_tool_version" \ + --arg now "$(date -u +%Y-%m-%dT%H:%M:%SZ)" ' + .tools[($idx | tonumber)][$upstream_key].version = $upstream_ver | + .tools[($idx | tonumber)].current = $tool_ver | + .tools[($idx | tonumber)].versions += [{ + version: $tool_ver, + digest: "sha256:placeholder", + released_at: $now, + yanked: false, + revoked: false, + security: false + }] + ' "$TOOLS_FILE" > "$tmp" && mv "$tmp" "$TOOLS_FILE" CHANGED=$((CHANGED + 1)) } @@ -108,7 +94,7 @@ for i in $(seq 0 $((tool_count - 1))); do continue fi echo "new version: $current_npm → $latest_npm" - bump_tool "$i" "npm" "$latest_npm" + bump_tool "$i" "npm" "$latest_npm" "$slug" continue fi @@ -125,7 +111,7 @@ for i in $(seq 0 $((tool_count - 1))); do continue fi echo "new version: $current_pypi → $latest_pypi" - bump_tool "$i" "pypi" "$latest_pypi" + bump_tool "$i" "pypi" "$latest_pypi" "$slug" continue fi @@ -135,7 +121,7 @@ done echo "" if [[ $CHANGED -gt 0 ]]; then echo "$CHANGED tool(s) updated. Review the diff, then commit and push:" - echo " git add tools/mcp-tools.json src-tauri/src/modules/tool_engine/tools.json" + echo " git add tools/mcp-tools.json" echo " git commit -m 'chore: bump upstream MCP packages'" echo " git push" echo "" From 879a1fa8f84500d474091e1bc4d4d12a3c05f77e Mon Sep 17 00:00:00 2001 From: MaximEdogawa Date: Thu, 16 Apr 2026 21:43:54 +0200 Subject: [PATCH 3/8] feat: implement private folder support for tools in MCP - Added `private_folder` configuration to tools, allowing them to persist state in a designated host directory. - Introduced new API endpoint to set the host path for tools that declare a private folder. - Enhanced the MCP server entry to include `private_host_path` for tools utilizing private folders. - Updated the UI components to manage private folder settings, including folder selection and error handling. - Revised tool catalog to reflect changes in memory server description and private folder configuration. --- src-tauri/src/infrastructure/http_server.rs | 195 +++++++- src-tauri/src/modules/bot/agent.rs | 458 ++++++++++++++++++- src-tauri/src/modules/bot/service.rs | 6 + src-tauri/src/modules/mcp/registry.rs | 6 + src-tauri/src/modules/mcp/service.rs | 13 +- src-tauri/src/modules/mcp/types.rs | 4 + src-tauri/src/modules/memory/mod.rs | 286 ++++++++++++ src-tauri/src/modules/mod.rs | 1 + src-tauri/src/modules/tool_engine/service.rs | 237 +++++++++- src-tauri/src/modules/tool_engine/types.rs | 16 + src-tauri/src/shared/state.rs | 15 + src/modules/mcp/components/McpServerCard.tsx | 117 +++++ src/modules/mcp/index.ts | 2 + src/modules/toolengine/index.ts | 40 ++ tools/mcp-tools.json | 9 +- tools/memory/Dockerfile | 1 + 16 files changed, 1374 insertions(+), 32 deletions(-) create mode 100644 src-tauri/src/modules/memory/mod.rs diff --git a/src-tauri/src/infrastructure/http_server.rs b/src-tauri/src/infrastructure/http_server.rs index 590a584..cf1bb21 100644 --- a/src-tauri/src/infrastructure/http_server.rs +++ b/src-tauri/src/infrastructure/http_server.rs @@ -104,6 +104,10 @@ pub async fn start_server(state: AppState) { "/v1/toolengine/uninstall", post(handle_toolengine_uninstall), ) + .route( + "/v1/toolengine/private-folder", + put(handle_toolengine_private_folder_put), + ) .route("/v1/toolengine/custom", get(handle_toolengine_custom_list)) .route("/v1/toolengine/custom", post(handle_toolengine_custom_add)) .route( @@ -379,11 +383,21 @@ async fn handle_mcp_filesystem_put( mcp_service::set_filesystem_allowed_paths(&mut cfg, &paths); let mut note = None::; + let bot_id = state + .connection + .lock() + .await + .as_ref() + .map(|c| c.bot_id.clone()); match &catalog_result { Ok(cat) => { - if let Err(e) = - te_service::sync_workspace_mounted_tools_for_catalog(&mut cfg, &paths, cat) - { + if let Err(e) = te_service::sync_workspace_mounted_tools_for_catalog( + &mut cfg, + &paths, + cat, + &state.mcp_config_path, + bot_id, + ) { note = Some(e); } } @@ -621,10 +635,25 @@ async fn handle_toolengine_catalog( let installed_ids = te_service::installed_tool_ids(&state.mcp_config_path); + let cfg_snap = state + .mcp_config_path + .exists() + .then(|| mcp_service::read_config(&state.mcp_config_path).ok()) + .flatten(); + let tools: Vec = catalog .tools .iter() .map(|t| { + let stored_pf = cfg_snap.as_ref().and_then(|c| { + let k = te_service::server_key(&t.id); + match c.servers.get(&k)? { + crate::modules::mcp::types::ServerEntry::Stdio { + private_host_path, .. + } => private_host_path.as_deref(), + _ => None, + } + }); let commands: Vec = t .commands .iter() @@ -635,6 +664,18 @@ async fn handle_toolengine_catalog( }) }) .collect(); + let private_folder_json = t.private_folder.as_ref().map(|pf| { + serde_json::json!({ + "container_path": pf.container_path, + "file_env_var": pf.file_env_var, + "file_extension": pf.file_extension, + }) + }); + let private_host_resolved: Option = t.private_folder.as_ref().map(|_| { + te_service::resolve_private_host_path(&state.mcp_config_path, &t.id, stored_pf) + .to_string_lossy() + .into_owned() + }); serde_json::json!({ "id": t.id, "name": t.name, @@ -642,6 +683,8 @@ async fn handle_toolengine_catalog( "description": t.description, "installed": installed_ids.contains(&t.id), "commands": commands, + "private_folder": private_folder_json, + "private_host_path": private_host_resolved, }) }) .collect(); @@ -659,6 +702,12 @@ struct ToolEngineActionBody { tool_id: String, } +#[derive(Deserialize)] +struct PutToolPrivateFolderBody { + tool_id: String, + path: String, +} + async fn handle_toolengine_install( State(state): State, Json(body): Json, @@ -725,6 +774,146 @@ async fn handle_toolengine_install( Ok((StatusCode::OK, Json(serde_json::json!({ "ok": true })))) } +async fn handle_toolengine_private_folder_put( + State(state): State, + Json(body): Json, +) -> Result<(StatusCode, Json), (StatusCode, Json)> { + let tool_id = body.tool_id.trim().to_string(); + let path = body.path.trim().to_string(); + if tool_id.is_empty() || path.is_empty() { + return Err(( + StatusCode::BAD_REQUEST, + Json(ErrorResponse { + error: "tool_id and path are required".into(), + }), + )); + } + + let catalog = te_service::load_catalog().await.map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + + let entry = catalog + .tools + .iter() + .find(|t| t.id == tool_id) + .ok_or_else(|| { + ( + StatusCode::NOT_FOUND, + Json(ErrorResponse { + error: format!("unknown tool '{tool_id}'"), + }), + ) + })?; + + if entry.private_folder.is_none() { + return Err(( + StatusCode::BAD_REQUEST, + Json(ErrorResponse { + error: "this catalog tool does not declare private_folder".into(), + }), + )); + } + + std::fs::create_dir_all(&path).map_err(|e| { + ( + StatusCode::BAD_REQUEST, + Json(ErrorResponse { + error: format!("cannot create directory: {e}"), + }), + ) + })?; + + let bot_id = state + .connection + .lock() + .await + .as_ref() + .map(|c| c.bot_id.clone()); + + { + let _guard = state.mcp_config_mutex.lock().await; + let mut cfg = mcp_service::load_or_init_config(&state.mcp_config_path).map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + + let key = te_service::server_key(&tool_id); + { + let Some(server_ent) = cfg.servers.get_mut(&key) else { + return Err(( + StatusCode::NOT_FOUND, + Json(ErrorResponse { + error: format!("tool '{tool_id}' is not installed"), + }), + )); + }; + match server_ent { + crate::modules::mcp::types::ServerEntry::Stdio { + private_host_path, .. + } => { + *private_host_path = Some(path.clone()); + } + _ => { + return Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { + error: "tool server entry is not stdio".into(), + }), + )); + } + } + } + + let host_paths = mcp_service::filesystem_allowed_paths(&cfg); + te_service::sync_workspace_mounted_tools_for_catalog( + &mut cfg, + &host_paths, + &catalog, + &state.mcp_config_path, + bot_id, + ) + .map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + + mcp_service::save_config(&state.mcp_config_path, &cfg).map_err(|e| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(ErrorResponse { error: e }), + ) + })?; + } + + state + .emit_log( + "toolengine", + &format!("private data folder for {tool_id} set to {path}"), + ) + .await; + + let bg = state.clone(); + tokio::spawn(async move { + if let Err(e) = mcp_service::rebuild_registry_into_state(&bg).await { + bg.emit_log( + "mcp", + &format!("ERROR: MCP registry rebuild failed after private-folder update: {e}"), + ) + .await; + } + }); + + Ok((StatusCode::OK, Json(serde_json::json!({ "ok": true })))) +} + async fn handle_toolengine_uninstall( State(state): State, Json(body): Json, diff --git a/src-tauri/src/modules/bot/agent.rs b/src-tauri/src/modules/bot/agent.rs index ea51f1a..188224c 100644 --- a/src-tauri/src/modules/bot/agent.rs +++ b/src-tauri/src/modules/bot/agent.rs @@ -1,11 +1,59 @@ +use crate::modules::memory::{ + self, MemoryProvider, SessionCommand, DIARY_END_PHRASES, DIARY_START_PHRASES, + SESSION_END_PHRASES, SESSION_START_PHRASES, +}; use crate::modules::ollama::service as ollama; use crate::modules::tool_engine::service::workspace_app_bind_pairs; -use crate::shared::state::AppState; +use crate::shared::state::{AppState, MemorySession}; +use chrono::Utc; use serde_json::json; use std::time::{Duration, Instant}; const MAX_STEPS: usize = 3; +/// Hint appended to the system prompt when a memory server is connected. Generic on +/// purpose — specific tool names live in `modules::memory` so swapping backends doesn't +/// drift the prompt. +fn memory_hint(session_active: Option<&str>, diary_active: bool) -> String { + let starts = SESSION_START_PHRASES.join("\", \""); + let diary_starts = DIARY_START_PHRASES.join("\", \""); + let diary_ends = DIARY_END_PHRASES.join("\", \""); + let ends = SESSION_END_PHRASES.join("\", \""); + let base = format!( + "\n\n\ +You have long-term memory via a connected Memory MCP server (see the available tools list). \ +Memory recording is controlled by the HOST, not by you:\n\ +- When the user says \"{starts}\", the host opens a **chat** memory session and saves **each \ +user message and your reply** after every turn.\n\ +- When the user says \"{diary_starts}\", the host opens **diary** mode: only the user's lines \ +are saved; the host does **not** run you for those lines — you will not see them as normal chat.\n\ +- Diary mode stops on \"{diary_ends}\" or any session end phrase (e.g. \"{ends}\").\n\ +- \"over and out\" always ends the memory session (chat or diary).\n\ +- While a **chat** session is active (not diary), each user message + your reply is persisted by \ +the host after your response. You do NOT need to call memory write tools yourself.\n\ +- When the user says \"{ends}\", or signs off Starfleet-style (\"Commander out\" / \ +\"Captain out\"), the host closes the session.\n\ +\n\ +Feel free to call the server's read tools (e.g. `read_graph`, `search_nodes`, `open_nodes`) \ +when recalling prior context helps the user. Outside an active session you may also write \ +facts on your own when the user explicitly asks you to remember something specific." + ); + match session_active { + Some(name) if diary_active => format!( + "{base}\n\n\ +A **diary** memory session is ACTIVE (`{name}`). The user may be sending lines that are saved \ +without invoking you — if you receive a normal user message in this chat, answer as usual when \ +not in diary-only flow." + ), + Some(name) => format!( + "{base}\n\n\ +A **chat** memory session is ACTIVE (`{name}`). The host is recording — do not call memory \ +write tools; just answer clearly and helpfully." + ), + None => base, + } +} + /// Ollama sometimes returns `function.arguments` as a JSON string; normalize to an object. fn tool_call_arguments(call: &serde_json::Value) -> serde_json::Value { let raw = call.get("function").and_then(|f| f.get("arguments")); @@ -36,20 +84,412 @@ pub enum ReplySource { pub struct TurnResult { pub text: String, pub source: ReplySource, + /// When true, the Telegram layer must not send `text` to the user (diary lines). + pub suppress_telegram_reply: bool, } pub async fn run_turn(state: &AppState, user_message: &str) -> Result { + // Session keyword commands short-circuit the model — they're host-level controls. + if let Some(cmd) = memory::detect_session_command(user_message) { + return match cmd { + SessionCommand::Start => handle_session_start(state).await, + SessionCommand::End => handle_session_end(state).await, + SessionCommand::DiaryStart => handle_diary_start(state).await, + SessionCommand::DiaryEnd => handle_diary_end(state).await, + }; + } + + if let Some(s) = state.memory_session.read().await.clone() { + if s.diary_only { + return handle_diary_line(state, user_message).await; + } + } + + let result = run_model_turn(state, user_message).await?; + spawn_memory_save(state, user_message, &result.text).await; + Ok(result) +} + +/// If a session is active and a memory server is connected, persist this turn in the +/// background. The MCP transport allows up to 2 minutes per call, so doing the append +/// inline would leak that latency into the user's reply path. +async fn spawn_memory_save(state: &AppState, user_message: &str, reply: &str) { + let Some(session) = state.memory_session.read().await.clone() else { + return; + }; + if session.diary_only { + return; + } + let Some(mem) = MemoryProvider::detect(&*state.mcp.read().await) else { + state + .emit_log( + "memory", + &format!( + "session `{}` active but no memory server connected — turn dropped", + session.entity_name + ), + ) + .await; + return; + }; + + let content = format!("[user] {user_message}\n[assistant] {reply}"); + let state_bg = state.clone(); + let entity = session.entity_name; + tokio::spawn(async move { + state_bg + .emit_log("memory", &format!("saving turn to `{entity}`…")) + .await; + match mem.append(&entity, &content).await { + Ok(()) => { + if let Some(s) = state_bg.memory_session.write().await.as_mut() { + if s.entity_name == entity { + s.turn_count += 1; + } + } + state_bg + .emit_log("memory", &format!("saved turn to `{entity}`")) + .await; + } + Err(e) => { + state_bg + .emit_log("memory", &format!("save turn to `{entity}` failed: {e}")) + .await; + } + } + }); +} + +async fn handle_session_start(state: &AppState) -> Result { + let memory = MemoryProvider::detect(&*state.mcp.read().await); + let Some(memory) = memory else { + return Ok(TurnResult { + text: "No memory server is connected. Install a memory tool in Dashboard → MCP Tools first." + .into(), + source: ReplySource::Model, + suppress_telegram_reply: false, + }); + }; + + if let Some(existing) = state.memory_session.read().await.clone() { + if existing.diary_only { + return Ok(TurnResult { + text: "Diary recording is active — say \"record end\" or \"over and out\" before starting a chat memory session." + .into(), + source: ReplySource::Model, + suppress_telegram_reply: false, + }); + } + return Ok(TurnResult { + text: format!( + "Already recording to memory as `{}`. Say \"close session\" to end it.", + existing.entity_name + ), + source: ReplySource::Model, + suppress_telegram_reply: false, + }); + } + + let now = Utc::now(); + let entity_name = memory::session_entity_name(now); + let description = format!( + "Chat session opened at {} UTC.", + now.format("%Y-%m-%d %H:%M:%S") + ); + + if let Err(e) = memory.start_session(&entity_name, &description).await { + state + .emit_log("memory", &format!("failed to open session: {e}")) + .await; + return Ok(TurnResult { + text: format!("Could not open memory session: {e}"), + source: ReplySource::Model, + suppress_telegram_reply: false, + }); + } + + *state.memory_session.write().await = Some(MemorySession { + entity_name: entity_name.clone(), + started_at: now, + turn_count: 0, + diary_only: false, + }); + + state + .emit_log( + "memory", + &format!("session opened on {}: {entity_name}", memory.server_name()), + ) + .await; + + Ok(TurnResult { + text: format!( + "Captain's log opened as `{entity_name}`. Every message from here is saved to memory. \ + Say \"close session\", \"end log\", or sign off Starfleet-style (\"Commander out\") \ + to close it." + ), + source: ReplySource::Model, + suppress_telegram_reply: false, + }) +} + +async fn handle_diary_start(state: &AppState) -> Result { + let memory = MemoryProvider::detect(&*state.mcp.read().await); + let Some(memory) = memory else { + return Ok(TurnResult { + text: "No memory server is connected. Install a memory tool in Dashboard → MCP Tools first." + .into(), + source: ReplySource::Model, + suppress_telegram_reply: false, + }); + }; + + if let Some(existing) = state.memory_session.read().await.clone() { + if existing.diary_only { + return Ok(TurnResult { + text: format!( + "Diary recording is already active (`{}`). Say \"record end\" or \"over and out\" to stop.", + existing.entity_name + ), + source: ReplySource::Model, + suppress_telegram_reply: false, + }); + } + return Ok(TurnResult { + text: "A chat memory session is active — say \"close session\" or \"over and out\" first, then send \"record\" for diary-only mode." + .into(), + source: ReplySource::Model, + suppress_telegram_reply: false, + }); + } + + let now = Utc::now(); + let entity_name = memory::session_entity_name(now); + let description = format!( + "Diary recording opened at {} UTC (user lines only; no assistant replies).", + now.format("%Y-%m-%d %H:%M:%S") + ); + + if let Err(e) = memory.start_session(&entity_name, &description).await { + state + .emit_log("memory", &format!("failed to open diary session: {e}")) + .await; + return Ok(TurnResult { + text: format!("Could not open diary session: {e}"), + source: ReplySource::Model, + suppress_telegram_reply: false, + }); + } + + *state.memory_session.write().await = Some(MemorySession { + entity_name: entity_name.clone(), + started_at: now, + turn_count: 0, + diary_only: true, + }); + + state + .emit_log( + "memory", + &format!( + "diary session opened on {}: {entity_name}", + memory.server_name() + ), + ) + .await; + + Ok(TurnResult { + text: format!( + "Diary recording started (`{entity_name}`). Your lines are saved silently. \ + Say \"record end\" or \"over and out\" to stop." + ), + source: ReplySource::Model, + suppress_telegram_reply: false, + }) +} + +async fn handle_diary_end(state: &AppState) -> Result { + let taken = state.memory_session.write().await.take(); + let Some(session) = taken else { + return Ok(TurnResult { + text: "No diary recording is active. Send \"record\" to start.".into(), + source: ReplySource::Model, + suppress_telegram_reply: false, + }); + }; + + if !session.diary_only { + // Put chat session back — user said "record end" while in captain's log mode. + *state.memory_session.write().await = Some(session); + return Ok(TurnResult { + text: "Not in diary mode — you're in a chat memory session. Say \"close session\" or \"over and out\" to end that." + .into(), + source: ReplySource::Model, + suppress_telegram_reply: false, + }); + } + + let memory = MemoryProvider::detect(&*state.mcp.read().await); + if let Some(memory) = memory { + let now = Utc::now(); + let note = format!( + "Diary recording stopped at {} UTC after {} line(s).", + now.format("%Y-%m-%d %H:%M:%S"), + session.turn_count + ); + if let Err(e) = memory.append(&session.entity_name, ¬e).await { + state + .emit_log("memory", &format!("diary close note not saved: {e}")) + .await; + } + } + + state + .emit_log( + "memory", + &format!( + "diary closed: {} ({} line(s))", + session.entity_name, session.turn_count + ), + ) + .await; + + Ok(TurnResult { + text: format!( + "Diary recording stopped (`{}`, {} line(s) saved).", + session.entity_name, session.turn_count + ), + source: ReplySource::Model, + suppress_telegram_reply: false, + }) +} + +async fn handle_diary_line(state: &AppState, user_message: &str) -> Result { + let Some(session) = state.memory_session.read().await.clone() else { + return Ok(TurnResult { + text: String::new(), + source: ReplySource::Model, + suppress_telegram_reply: true, + }); + }; + let Some(mem) = MemoryProvider::detect(&*state.mcp.read().await) else { + state + .emit_log( + "memory", + "diary active but no memory server — line dropped; closing session", + ) + .await; + *state.memory_session.write().await = None; + return Ok(TurnResult { + text: "Memory server disconnected — diary recording stopped.".into(), + source: ReplySource::Model, + suppress_telegram_reply: false, + }); + }; + + let entity = session.entity_name.clone(); + let line = format!("[diary] {user_message}"); + let state_bg = state.clone(); + tokio::spawn(async move { + state_bg + .emit_log("memory", &format!("saving diary line to `{entity}`…")) + .await; + match mem.append(&entity, &line).await { + Ok(()) => { + if let Some(s) = state_bg.memory_session.write().await.as_mut() { + if s.entity_name == entity { + s.turn_count += 1; + } + } + state_bg + .emit_log("memory", &format!("saved diary line to `{entity}`")) + .await; + } + Err(e) => { + state_bg + .emit_log("memory", &format!("diary append to `{entity}` failed: {e}")) + .await; + } + } + }); + + Ok(TurnResult { + text: String::new(), + source: ReplySource::Model, + suppress_telegram_reply: true, + }) +} + +async fn handle_session_end(state: &AppState) -> Result { + let taken = state.memory_session.write().await.take(); + let Some(session) = taken else { + return Ok(TurnResult { + text: "No memory session is active.".into(), + source: ReplySource::Model, + suppress_telegram_reply: false, + }); + }; + + let memory = MemoryProvider::detect(&*state.mcp.read().await); + if let Some(memory) = memory { + let now = Utc::now(); + let kind = if session.diary_only { + "Diary" + } else { + "Session" + }; + let note = format!( + "{kind} closed at {} UTC after {} turn(s).", + now.format("%Y-%m-%d %H:%M:%S"), + session.turn_count + ); + if let Err(e) = memory.append(&session.entity_name, ¬e).await { + state + .emit_log("memory", &format!("close note not saved: {e}")) + .await; + } + } + + state + .emit_log( + "memory", + &format!( + "session closed: {} ({} turn(s))", + session.entity_name, session.turn_count + ), + ) + .await; + + Ok(TurnResult { + text: format!( + "Memory session `{}` closed after {} turn(s).", + session.entity_name, session.turn_count + ), + source: ReplySource::Model, + suppress_telegram_reply: false, + }) +} + +async fn run_model_turn(state: &AppState, user_message: &str) -> Result { let model = if let Some(selected) = state.preferred_ollama_model.read().await.clone() { selected } else { ollama::active_model().await? }; - let (ollama_tools, has_tools) = { + let (ollama_tools, has_tools, has_memory) = { let reg = state.mcp.read().await; - (reg.ollama_tools(), !reg.is_empty()) + ( + reg.ollama_tools(), + !reg.is_empty(), + MemoryProvider::detect(®).is_some(), + ) }; + let mem_snapshot = state.memory_session.read().await.clone(); + let session_active_name = mem_snapshot.as_ref().map(|s| s.entity_name.clone()); + let diary_active = mem_snapshot.as_ref().is_some_and(|s| s.diary_only); + let fs_context = { let paths = state.cached_filesystem_paths.read().await.clone(); let host_lines: String = workspace_app_bind_pairs(&paths) @@ -73,12 +513,17 @@ pub async fn run_turn(state: &AppState, user_message: &str) -> Result Result Result Result Result Result ResponseResult match result { Ok(turn) => { + if turn.suppress_telegram_reply { + state + .emit_log("reply", "[diary line saved; no Telegram reply]") + .await; + return Ok(()); + } let reply = if turn.text.trim().is_empty() { "(no reply)".to_string() } else { diff --git a/src-tauri/src/modules/mcp/registry.rs b/src-tauri/src/modules/mcp/registry.rs index 4a1ed44..605fb2a 100644 --- a/src-tauri/src/modules/mcp/registry.rs +++ b/src-tauri/src/modules/mcp/registry.rs @@ -181,6 +181,12 @@ impl ToolRegistry { self.cached_tool_names.is_empty() } + /// Read-only access to the registered providers. Used by capability detectors + /// (e.g. `memory::MemoryProvider::detect`) that pick a server by its tool shape. + pub fn providers(&self) -> &[Provider] { + &self.providers + } + pub async fn call_tool(&self, name: &str, args: Value) -> Result<(String, bool), String> { let (provider, tool, direct) = self.resolve_tool(name)?; let args = match &provider { diff --git a/src-tauri/src/modules/mcp/service.rs b/src-tauri/src/modules/mcp/service.rs index ee780be..45762eb 100644 --- a/src-tauri/src/modules/mcp/service.rs +++ b/src-tauri/src/modules/mcp/service.rs @@ -158,6 +158,7 @@ pub async fn connect_one_server( args, env, direct_return, + .. } => match McpClient::connect( server_key.to_string(), command.clone(), @@ -224,11 +225,21 @@ pub async fn rebuild_registry_into_state( }; let paths = filesystem_allowed_paths(&cfg); + let bot_id = state + .connection + .lock() + .await + .as_ref() + .map(|c| c.bot_id.clone()); let mut ws_changed = false; match &catalog_result { Ok(cat) => { match crate::modules::tool_engine::service::sync_workspace_mounted_tools_for_catalog( - &mut cfg, &paths, cat, + &mut cfg, + &paths, + cat, + &state.mcp_config_path, + bot_id, ) { Ok(changed) => ws_changed |= changed, Err(e) => { diff --git a/src-tauri/src/modules/mcp/types.rs b/src-tauri/src/modules/mcp/types.rs index 9b62f4a..084e4be 100644 --- a/src-tauri/src/modules/mcp/types.rs +++ b/src-tauri/src/modules/mcp/types.rs @@ -33,6 +33,10 @@ pub enum ServerEntry { /// sending them back to the model for summarisation. #[serde(default)] direct_return: bool, + /// For catalog tools that declare `private_folder`: the host directory currently mounted + /// into the container. Defaults to `$APP_DATA/tool-data//`; user overrides land here. + #[serde(default, skip_serializing_if = "Option::is_none")] + private_host_path: Option, }, } diff --git a/src-tauri/src/modules/memory/mod.rs b/src-tauri/src/modules/memory/mod.rs new file mode 100644 index 0000000..327d8b9 --- /dev/null +++ b/src-tauri/src/modules/memory/mod.rs @@ -0,0 +1,286 @@ +//! Generic, backend-agnostic memory capability. +//! +//! The agent talks to `MemoryProvider` without knowing which MCP server is behind it. +//! Detection is by **tool shape** (what commands the server exposes), not by catalog id, +//! so any memory MCP server that speaks a known shape is picked up automatically. +//! +//! ## Session policy lives here, not in the agent +//! +//! Keyword phrases ([`SESSION_START_PHRASES`], [`SESSION_END_PHRASES`], [`DIARY_START_PHRASES`], +//! [`DIARY_END_PHRASES`]) and the `session-` naming are defined at the top of this +//! module. Swapping the backing MCP server never touches them. +//! +//! ## Adding a new memory backend +//! +//! 1. Add a [`Backend`] variant. +//! 2. Add a detection arm in [`MemoryProvider::detect`] (match on the new tool shape). +//! 3. Add match arms in [`MemoryProvider::start_session`] / [`MemoryProvider::append`] +//! translating the generic op into that backend's tool calls. +//! +//! The agent, session keywords, and entity-naming scheme do not change. + +use crate::modules::mcp::registry::{Provider, ToolRegistry}; +use chrono::{DateTime, Utc}; +use serde_json::json; +use std::collections::HashSet; + +/// Exact (case-insensitive, trimmed, trailing punctuation stripped) phrases that open a +/// recording session. Stable across backends. +pub const SESSION_START_PHRASES: &[&str] = &[ + "remember this session", + "save this session", + // Star Trek flavor — starting the captain's log opens the session. + "captain's log", + "captains log", + "begin log", +]; + +/// Phrases that end the recording session (full chat log or diary — host always closes). +pub const SESSION_END_PHRASES: &[&str] = &[ + "close session", + "leave session", + "over and out", + "quit", + "exit", + "end log", +]; + +/// Start **diary-only** recording: user lines only, no assistant reply (exact message `record`). +pub const DIARY_START_PHRASES: &[&str] = &["record"]; + +/// Stop diary-only recording without necessarily using a full session end phrase. +pub const DIARY_END_PHRASES: &[&str] = &["record end"]; + +/// Starfleet sign-off: ` out`. Matches e.g. `Commander Worf out`, +/// `Captain Picard out`. Rank must be present so casual phrases like "logging out" +/// never trigger. +fn is_starfleet_signoff(normalized: &str) -> bool { + let toks: Vec<&str> = normalized.split_whitespace().collect(); + if toks.len() < 3 { + return false; + } + let first = toks.first().copied().unwrap_or(""); + let last = toks.last().copied().unwrap_or(""); + matches!(first, "commander" | "captain") && last == "out" +} + +/// Abstract command a user message can request of the memory subsystem. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum SessionCommand { + /// Captain's log / remember this session — saves user + assistant each turn. + Start, + /// Close any active memory session (including diary). `over and out` maps here. + End, + /// Diary mode — only user messages persisted, no Telegram reply on each line. + DiaryStart, + /// Leave diary mode (must be in diary session). + DiaryEnd, +} + +/// Match a user message against the session keyword lists. Case-insensitive, +/// whitespace-trimmed, trailing `.!?,;` stripped. Only exact matches count — casual +/// substrings like "I want to quit my job" do **not** end the session. +pub fn detect_session_command(msg: &str) -> Option { + let normalized = msg + .trim() + .trim_end_matches(['.', '!', '?', ',', ';']) + .to_ascii_lowercase(); + // More specific phrases first (`record end` vs `record`). + if DIARY_END_PHRASES.iter().any(|p| normalized == *p) { + return Some(SessionCommand::DiaryEnd); + } + if DIARY_START_PHRASES.iter().any(|p| normalized == *p) { + return Some(SessionCommand::DiaryStart); + } + if SESSION_START_PHRASES.iter().any(|p| normalized == *p) { + return Some(SessionCommand::Start); + } + if SESSION_END_PHRASES.iter().any(|p| normalized == *p) { + return Some(SessionCommand::End); + } + if is_starfleet_signoff(&normalized) { + return Some(SessionCommand::End); + } + None +} + +/// Build a session entity name: `session-YYYYMMDDThhmmssZ`. Deterministic format so +/// humans can sort sessions chronologically in the knowledge graph. +pub fn session_entity_name(at: DateTime) -> String { + format!("session-{}", at.format("%Y%m%dT%H%M%SZ")) +} + +/// Which MCP tool shape backs this provider. One variant per supported memory style. +/// The agent never inspects this — it's private to the provider impl. +enum Backend { + /// Official `@modelcontextprotocol/server-memory` shape: entities + observations. + KnowledgeGraph, +} + +/// Memory capability bound to a concrete MCP provider in the registry. +/// +/// Each generic op (`start_session`, `append`) dispatches on the backend to the right +/// tool names — so the agent stays decoupled from any specific MCP server. +pub struct MemoryProvider { + backend: Backend, + provider: Provider, +} + +impl MemoryProvider { + /// Find a memory-capable server in the registry. Picks the first match. Returns + /// `None` if no connected MCP server exposes a known memory tool shape. + pub fn detect(reg: &ToolRegistry) -> Option { + for p in reg.providers() { + let tools: HashSet<&str> = p.tools().iter().map(|t| t.name.as_str()).collect(); + if tools.contains("create_entities") && tools.contains("add_observations") { + return Some(Self { + backend: Backend::KnowledgeGraph, + provider: p.clone(), + }); + } + } + None + } + + /// MCP server key hosting this memory (e.g. `te_pengine-memory`). Useful for logs. + pub fn server_name(&self) -> &str { + self.provider.server_name() + } + + /// Create the root session entity. `description` becomes the first observation. + pub async fn start_session(&self, name: &str, description: &str) -> Result<(), String> { + match self.backend { + Backend::KnowledgeGraph => { + let args = json!({ + "entities": [{ + "name": name, + "entityType": "ChatSession", + "observations": [description], + }] + }); + self.provider.call_tool("create_entities", args).await?; + } + } + Ok(()) + } + + /// Append a free-form observation to an existing session entity. + pub async fn append(&self, entity_name: &str, content: &str) -> Result<(), String> { + match self.backend { + Backend::KnowledgeGraph => { + let args = json!({ + "observations": [{ + "entityName": entity_name, + "contents": [content], + }] + }); + self.provider.call_tool("add_observations", args).await?; + } + } + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn start_phrases_match_exactly_ignoring_case_and_punctuation() { + for p in SESSION_START_PHRASES { + assert_eq!(detect_session_command(p), Some(SessionCommand::Start)); + assert_eq!( + detect_session_command(&p.to_uppercase()), + Some(SessionCommand::Start) + ); + assert_eq!( + detect_session_command(&format!(" {p}.")), + Some(SessionCommand::Start) + ); + } + } + + #[test] + fn end_phrases_match_exactly() { + for p in SESSION_END_PHRASES { + assert_eq!(detect_session_command(p), Some(SessionCommand::End)); + } + } + + #[test] + fn diary_phrases() { + assert_eq!( + detect_session_command("record"), + Some(SessionCommand::DiaryStart) + ); + assert_eq!( + detect_session_command("record end"), + Some(SessionCommand::DiaryEnd) + ); + assert_eq!( + detect_session_command("record."), + Some(SessionCommand::DiaryStart) + ); + } + + /// Casual use of single-word end phrases must not trigger a session close — keywords + /// are full-message-only to avoid accidental terminations. + #[test] + fn casual_mentions_do_not_trigger() { + assert_eq!(detect_session_command("I want to quit my job"), None); + assert_eq!(detect_session_command("exit the building safely"), None); + assert_eq!( + detect_session_command("please remember this session later"), + None + ); + // "logging out" should not fire the Starfleet matcher — it requires a rank prefix. + assert_eq!(detect_session_command("logging out"), None); + assert_eq!(detect_session_command("I need to head out"), None); + } + + #[test] + fn starfleet_signoff_closes_session() { + assert_eq!( + detect_session_command("Commander Worf out"), + Some(SessionCommand::End) + ); + assert_eq!( + detect_session_command("Captain Picard out."), + Some(SessionCommand::End) + ); + assert_eq!( + detect_session_command("commander data out"), + Some(SessionCommand::End) + ); + // No rank — don't fire. + assert_eq!(detect_session_command("Kirk out"), None); + } + + #[test] + fn captains_log_opens_session() { + assert_eq!( + detect_session_command("Captain's Log"), + Some(SessionCommand::Start) + ); + assert_eq!( + detect_session_command("captains log"), + Some(SessionCommand::Start) + ); + } + + #[test] + fn session_entity_name_is_sortable() { + let a = session_entity_name( + chrono::DateTime::parse_from_rfc3339("2026-04-16T10:00:00Z") + .unwrap() + .with_timezone(&Utc), + ); + let b = session_entity_name( + chrono::DateTime::parse_from_rfc3339("2026-04-16T11:30:00Z") + .unwrap() + .with_timezone(&Utc), + ); + assert!(a < b); + assert!(a.starts_with("session-")); + } +} diff --git a/src-tauri/src/modules/mod.rs b/src-tauri/src/modules/mod.rs index 0493163..1252ce7 100644 --- a/src-tauri/src/modules/mod.rs +++ b/src-tauri/src/modules/mod.rs @@ -1,4 +1,5 @@ pub mod bot; pub mod mcp; +pub mod memory; pub mod ollama; pub mod tool_engine; diff --git a/src-tauri/src/modules/tool_engine/service.rs b/src-tauri/src/modules/tool_engine/service.rs index c0f2f2e..a0bfa5a 100644 --- a/src-tauri/src/modules/tool_engine/service.rs +++ b/src-tauri/src/modules/tool_engine/service.rs @@ -1,5 +1,5 @@ use super::runtime::RuntimeInfo; -use super::types::{ToolCatalog, ToolEntry, VersionEntry}; +use super::types::{PrivateFolderConfig, ToolCatalog, ToolEntry, VersionEntry}; use crate::modules::mcp::service as mcp_service; use crate::modules::mcp::types::{CustomToolEntry, McpConfig, ServerEntry}; use std::collections::{HashMap, HashSet}; @@ -7,6 +7,10 @@ use std::path::{Path, PathBuf}; use std::process::Stdio; use tokio::io::{AsyncBufReadExt, BufReader}; +/// Sentinel used in `.` when no bot is connected yet. The file gets rewritten to +/// `.` on the next sync after connect, so this name is only ever live when unused. +const BOT_ID_FALLBACK: &str = "default"; + const EMBEDDED_CATALOG: &str = include_str!("../../../../tools/mcp-tools.json"); /// Remote registry URL — raw GitHub content. The app fetches this at runtime so @@ -120,6 +124,116 @@ pub fn server_key(tool_id: &str) -> String { format!("{TE_PREFIX}{}", tool_id.replace('/', "-")) } +/// Default host directory for a catalog tool's `private_folder` (`/tool-data//`). +pub fn default_private_data_dir(mcp_config_path: &Path, tool_id: &str) -> PathBuf { + let base = mcp_config_path.parent().unwrap_or_else(|| Path::new(".")); + base.join("tool-data").join(tool_id.replace('/', "-")) +} + +/// Resolve the host path for private tool data: explicit `mcp.json` override, else [`default_private_data_dir`]. +pub fn resolve_private_host_path( + mcp_config_path: &Path, + tool_id: &str, + stored: Option<&str>, +) -> PathBuf { + if let Some(s) = stored.map(str::trim).filter(|s| !s.is_empty()) { + PathBuf::from(s) + } else { + default_private_data_dir(mcp_config_path, tool_id) + } +} + +fn ensure_private_data_dir(path: &Path) -> Result<(), String> { + std::fs::create_dir_all(path) + .map_err(|e| format!("create private tool data dir {}: {e}", path.display())) +} + +/// Per-container env entry that points the MCP server at its bot-scoped state file +/// inside the bind-mounted private folder. +fn private_folder_container_env(pf: &PrivateFolderConfig, bot_id: &str) -> (String, String) { + let root = pf.container_path.trim_end_matches('/'); + let value = format!("{root}/{bot_id}.{}", pf.file_extension); + (pf.file_env_var.clone(), value) +} + +/// Everything the container needs to mount and address the private folder in one bundle. +pub struct PrivateBind<'a> { + pub host_path: &'a Path, + pub config: &'a PrivateFolderConfig, + pub bot_id: &'a str, +} + +fn catalog_tool_stdio_eq(a: &ServerEntry, b: &ServerEntry) -> bool { + match (a, b) { + ( + ServerEntry::Stdio { + command: c1, + args: a1, + env: e1, + direct_return: d1, + private_host_path: p1, + }, + ServerEntry::Stdio { + command: c2, + args: a2, + env: e2, + direct_return: d2, + private_host_path: p2, + }, + ) => c1 == c2 && a1 == a2 && e1 == e2 && d1 == d2 && p1 == p2, + _ => false, + } +} + +/// Rebuild argv for one installed catalog tool from `mcp.json` + catalog entry. +/// The container env is baked into argv via `-e` flags, so `ServerEntry.env` stays empty +/// (host-process env does not propagate into the container). +fn rebuild_installed_catalog_tool_stdio( + entry: &ToolEntry, + host_paths: &[String], + mcp_config_path: &Path, + prev: &ServerEntry, + bot_id: Option<&str>, +) -> Result { + let ServerEntry::Stdio { + command, + direct_return, + private_host_path, + .. + } = prev + else { + return Err("internal: expected stdio server entry for tool engine catalog tool".into()); + }; + + let pb_buf = if entry.private_folder.is_some() { + let pb = + resolve_private_host_path(mcp_config_path, &entry.id, private_host_path.as_deref()); + ensure_private_data_dir(&pb)?; + Some(pb) + } else { + None + }; + let bid = bot_id.unwrap_or(BOT_ID_FALLBACK); + let private_bind: Option = match (&pb_buf, &entry.private_folder) { + (Some(pb), Some(pf)) => Some(PrivateBind { + host_path: pb.as_path(), + config: pf, + bot_id: bid, + }), + _ => None, + }; + + let args = podman_run_argv_for_tool(entry, host_paths, private_bind.as_ref())?; + + Ok(ServerEntry::Stdio { + command: command.clone(), + args, + env: HashMap::new(), + direct_return: *direct_return, + private_host_path: private_host_path.clone(), + }) +} + fn sanitize_mount_label(name: &str) -> String { let s: String = name .chars() @@ -166,6 +280,7 @@ pub fn workspace_app_bind_pairs(host_paths: &[String]) -> Vec<(String, String)> pub fn podman_run_argv_for_tool( entry: &ToolEntry, host_paths: &[String], + private_bind: Option<&PrivateBind<'_>>, ) -> Result, String> { if entry.append_workspace_roots && !entry.mount_workspace { return Err("catalog: append_workspace_roots requires mount_workspace".into()); @@ -205,6 +320,21 @@ pub fn podman_run_argv_for_tool( ); } + if let Some(pb) = private_bind { + let host_s = pb.host_path.to_str().ok_or_else(|| { + format!( + "private data path must be valid UTF-8: {}", + pb.host_path.display() + ) + })?; + args.push(format!( + "-v={host_s}:{}:rw", + pb.config.container_path.trim_end_matches('/') + )); + let (k, v) = private_folder_container_env(pb.config, pb.bot_id); + args.push(format!("--env={k}={v}")); + } + args.push(image_ref); args.extend(entry.mcp_server_cmd.iter().cloned()); @@ -389,37 +519,30 @@ pub fn installed_tool_ids(mcp_config_path: &Path) -> Vec { /// /// Pass the catalog from [`load_catalog`] (or tests) so callers can fetch **before** holding /// `mcp_config_mutex`, avoiding network I/O under that lock. +/// +/// `mcp_config_path` and `bot_id` refresh `private_folder` bind mounts and per-bot env paths. pub fn sync_workspace_mounted_tools_for_catalog( cfg: &mut McpConfig, host_paths: &[String], catalog: &ToolCatalog, + mcp_config_path: &Path, + bot_id: Option, ) -> Result { + let bid = bot_id.as_deref(); let mut changed = false; for entry in &catalog.tools { let key = server_key(&entry.id); - let Some(ServerEntry::Stdio { - command, - args, - env, - direct_return, - }) = cfg.servers.get(&key) - else { + let Some(prev) = cfg.servers.get(&key) else { continue; }; - let new_args = podman_run_argv_for_tool(entry, host_paths)?; - if args == &new_args { - continue; - } + let new_entry = + rebuild_installed_catalog_tool_stdio(entry, host_paths, mcp_config_path, prev, bid)?; - let new_entry = ServerEntry::Stdio { - command: command.clone(), - args: new_args, - env: env.clone(), - direct_return: *direct_return, - }; - cfg.servers.insert(key, new_entry); - changed = true; + if !catalog_tool_stdio_eq(prev, &new_entry) { + cfg.servers.insert(key, new_entry); + changed = true; + } } Ok(changed) } @@ -444,13 +567,31 @@ pub async fn install_tool( let _cfg_guard = mcp_cfg_lock.lock().await; let mut cfg = mcp_service::load_or_init_config(mcp_config_path)?; let host_paths = mcp_service::filesystem_allowed_paths(&cfg); - let args = podman_run_argv_for_tool(entry, &host_paths)?; + + let pb_buf = if entry.private_folder.is_some() { + let pb = resolve_private_host_path(mcp_config_path, tool_id, None); + ensure_private_data_dir(&pb)?; + Some(pb) + } else { + None + }; + let private_bind: Option = match (&pb_buf, &entry.private_folder) { + (Some(pb), Some(pf)) => Some(PrivateBind { + host_path: pb.as_path(), + config: pf, + bot_id: BOT_ID_FALLBACK, + }), + _ => None, + }; + + let args = podman_run_argv_for_tool(entry, &host_paths, private_bind.as_ref())?; let server_entry = ServerEntry::Stdio { command: runtime.binary.clone(), args, env: HashMap::new(), direct_return: entry.direct_return, + private_host_path: None, }; cfg.servers.insert(server_key(tool_id), server_entry); @@ -603,6 +744,7 @@ pub async fn add_custom_tool( args, env: HashMap::new(), direct_return: entry.direct_return, + private_host_path: None, }; cfg.servers @@ -650,6 +792,7 @@ pub fn sync_custom_tools_if_installed(cfg: &mut McpConfig, host_paths: &[String] args, env, direct_return, + private_host_path, }) = cfg.servers.get(&key) else { continue; @@ -665,6 +808,7 @@ pub fn sync_custom_tools_if_installed(cfg: &mut McpConfig, host_paths: &[String] args: new_args, env: env.clone(), direct_return: *direct_return, + private_host_path: private_host_path.clone(), }; cfg.servers.insert(key, new_entry); changed = true; @@ -707,6 +851,16 @@ mod tests { .expect("file-manager catalog pins upstream MCP npm"); assert!(u.package.contains("server-filesystem")); assert!(!u.version.is_empty()); + let mem = catalog + .tools + .iter() + .find(|t| t.id == "pengine/memory") + .expect("memory in embedded catalog"); + let mp = mem + .private_folder + .as_ref() + .expect("memory declares private_folder"); + assert_eq!(mp.file_env_var, "MEMORY_FILE_PATH"); } #[test] @@ -731,7 +885,7 @@ mod tests { .find(|v| v.version == fm.current) .unwrap(); assert_eq!(ver.digest, "sha256:placeholder"); - let argv = podman_run_argv_for_tool(fm, &[]).expect("argv"); + let argv = podman_run_argv_for_tool(fm, &[], None).expect("argv"); let tagged = format!("{}:{}", fm.image, fm.current); let image_ref = argv .iter() @@ -742,4 +896,43 @@ mod tests { "placeholder must not use @digest: {image_ref}" ); } + + #[test] + fn memory_catalog_has_private_folder_and_argv_includes_bind_and_env() { + let catalog = load_embedded_catalog().unwrap(); + let mem = catalog + .tools + .iter() + .find(|t| t.id == "pengine/memory") + .expect("memory in catalog"); + let pf = mem + .private_folder + .as_ref() + .expect("memory declares private_folder"); + assert_eq!(pf.container_path, "/mcp/data"); + assert_eq!(pf.file_env_var, "MEMORY_FILE_PATH"); + + let tmp = std::env::temp_dir().join("pengine-mem-test-data"); + let _ = std::fs::remove_dir_all(&tmp); + std::fs::create_dir_all(&tmp).unwrap(); + let pb = PrivateBind { + host_path: tmp.as_path(), + config: pf, + bot_id: "12345", + }; + let argv = podman_run_argv_for_tool(mem, &[], Some(&pb)).expect("argv"); + + let want_mount = format!("-v={}:/mcp/data:rw", tmp.to_str().expect("utf8 tmp path")); + assert!( + argv.iter().any(|a| a == &want_mount), + "missing mount: argv={argv:?}" + ); + + // Container env must be passed as `-e` argv — not host env, which podman does not forward. + let want_env = "--env=MEMORY_FILE_PATH=/mcp/data/12345.json".to_string(); + assert!( + argv.iter().any(|a| a == &want_env), + "missing -e flag: argv={argv:?}" + ); + } } diff --git a/src-tauri/src/modules/tool_engine/types.rs b/src-tauri/src/modules/tool_engine/types.rs index 5685b6c..b7ecf25 100644 --- a/src-tauri/src/modules/tool_engine/types.rs +++ b/src-tauri/src/modules/tool_engine/types.rs @@ -50,6 +50,18 @@ pub struct UpstreamMcpPypi { pub version: String, } +/// Declares that a tool keeps mutable state on disk. The app bind-mounts a host +/// directory to `container_path` and sets `file_env_var` on the container to +/// `/.` so state is scoped per connected bot. +/// Host directory defaults to `$APP_DATA/tool-data//` and can be overridden by +/// `PUT /v1/toolengine/private-folder` (`{ "tool_id", "path" }`). +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PrivateFolderConfig { + pub container_path: String, + pub file_env_var: String, + pub file_extension: String, +} + /// One entry in the tool catalog (`tools.json`). #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ToolEntry { @@ -98,6 +110,10 @@ pub struct ToolEntry { /// that need outbound network (e.g. web fetch). #[serde(default = "default_true")] pub network_isolated: bool, + /// When set, the app bind-mounts a host folder into the container and passes a per-bot file + /// path via env so the tool can persist state (e.g. the Memory server's knowledge-graph JSON). + #[serde(default, skip_serializing_if = "Option::is_none")] + pub private_folder: Option, } #[derive(Debug, Clone, Serialize, Deserialize)] diff --git a/src-tauri/src/shared/state.rs b/src-tauri/src/shared/state.rs index d069e06..a12f7c8 100644 --- a/src-tauri/src/shared/state.rs +++ b/src-tauri/src/shared/state.rs @@ -21,6 +21,18 @@ pub struct LogEntry { pub message: String, } +/// Active "remember this session" recording state. While set, every completed turn is +/// appended as an observation on the session entity in the Memory server. +#[derive(Debug, Clone)] +pub struct MemorySession { + /// Entity name in the knowledge graph, e.g. `session-20260416T183000Z`. + pub entity_name: String, + pub started_at: DateTime, + pub turn_count: u32, + /// When true (`record` command), only user lines are saved — no model reply or tool loop. + pub diary_only: bool, +} + #[derive(Clone)] pub struct AppState { pub connection: Arc>>, @@ -38,6 +50,8 @@ pub struct AppState { pub preferred_ollama_model: Arc>>, pub cached_filesystem_paths: Arc>>, pub tool_engine_mutex: Arc>, + /// Active memory-session recording (toggled by keyword commands; see `bot::agent`). + pub memory_session: Arc>>, } impl AppState { @@ -58,6 +72,7 @@ impl AppState { preferred_ollama_model: Arc::new(RwLock::new(None)), cached_filesystem_paths: Arc::new(RwLock::new(Vec::new())), tool_engine_mutex: Arc::new(Mutex::new(())), + memory_session: Arc::new(RwLock::new(None)), } } diff --git a/src/modules/mcp/components/McpServerCard.tsx b/src/modules/mcp/components/McpServerCard.tsx index 2887e61..a3e8acf 100644 --- a/src/modules/mcp/components/McpServerCard.tsx +++ b/src/modules/mcp/components/McpServerCard.tsx @@ -1,4 +1,5 @@ import { useEffect, useState } from "react"; +import { fetchToolCatalog, putToolPrivateFolder } from "../../toolengine"; import { workspaceAppContainerMountPaths } from "../../../shared/workspaceMounts"; import { fetchMcpConfig, @@ -8,6 +9,11 @@ import { type ServerEntryStdio, } from ".."; +/** `pengine/memory` → `te_pengine-memory` (matches Rust `server_key`). */ +function teServerKeyForToolId(toolId: string): string { + return `te_${toolId.replace(/\//g, "-")}`; +} + type Props = { name: string; entry: ServerEntry; @@ -224,6 +230,13 @@ function InlineEditForm({ const [teApplyError, setTeApplyError] = useState(null); const [teApplyBusy, setTeApplyBusy] = useState(false); + /** Catalog tool id (e.g. `pengine/memory`) when this server uses `private_folder`. */ + const [tePrivateToolId, setTePrivateToolId] = useState(null); + const [tePrivatePathInput, setTePrivatePathInput] = useState(""); + const [tePrivatePickError, setTePrivatePickError] = useState(null); + const [tePrivateApplyError, setTePrivateApplyError] = useState(null); + const [tePrivateApplyBusy, setTePrivateApplyBusy] = useState(false); + useEffect(() => { if (!isTeFileManager) return; void (async () => { @@ -232,6 +245,25 @@ function InlineEditForm({ })(); }, [isTeFileManager, name]); + useEffect(() => { + if (!name.startsWith("te_")) { + setTePrivateToolId(null); + setTePrivatePathInput(""); + return; + } + void (async () => { + const cat = await fetchToolCatalog(5000); + const t = cat?.find((x) => teServerKeyForToolId(x.id) === name && x.private_folder != null); + if (t) { + setTePrivateToolId(t.id); + setTePrivatePathInput(t.private_host_path ?? ""); + } else { + setTePrivateToolId(null); + setTePrivatePathInput(""); + } + })(); + }, [name]); + const isFs = argsTextLooksLikeFilesystem(argsText); // ── Filesystem folder helpers (read/write the args textarea) ────── @@ -327,6 +359,42 @@ function InlineEditForm({ onCancel(); }; + const pickTePrivateFolder = async () => { + setTePrivatePickError(null); + try { + const { invoke } = await import("@tauri-apps/api/core"); + try { + const picked = await invoke("pick_mcp_filesystem_folder"); + if (picked) setTePrivatePathInput(picked); + } catch (invokeErr) { + setTePrivatePickError( + invokeErr instanceof Error ? invokeErr.message : "Could not open folder picker", + ); + } + } catch { + setTePrivatePickError("Folder picker needs the desktop app (Tauri)."); + } + }; + + const applyTePrivateFolder = async () => { + if (!tePrivateToolId) return; + setTePrivateApplyError(null); + const path = tePrivatePathInput.trim(); + if (!path) { + setTePrivateApplyError("Enter a host folder path or use Choose folder."); + return; + } + setTePrivateApplyBusy(true); + const result = await putToolPrivateFolder(tePrivateToolId, path, 120_000); + setTePrivateApplyBusy(false); + if (!result.ok) { + setTePrivateApplyError(result.error ?? "Could not save"); + return; + } + await onReloadServers?.(); + onCancel(); + }; + // ── Submit ──────────────────────────────────────────────────────── const handleSubmit = async () => { @@ -348,6 +416,7 @@ function InlineEditForm({ args, env, direct_return: directReturn, + private_host_path: entry.private_host_path ?? null, }); }; @@ -414,6 +483,54 @@ function InlineEditForm({ )} + {tePrivateToolId && ( +
+

+ Private data folder (host) +

+

+ This tool keeps state on disk in a single host directory (bind-mounted into the + container). Use Choose folder or paste a path, then Apply — same idea as File + Manager's shared folders, but only for this tool's data file(s). +

+ {tePrivatePickError && ( +

+ {tePrivatePickError} +

+ )} +
+ setTePrivatePathInput(e.target.value)} + placeholder="/path/to/memory-data" + className="min-w-0 flex-1 rounded-md border border-white/15 bg-white/5 px-2 py-1.5 font-mono text-[11px] text-white outline-none placeholder:text-white/20 focus:border-white/30" + /> + +
+ {tePrivateApplyError && ( +

+ {tePrivateApplyError} +

+ )} + +
+ )} + {/* Filesystem folder helper (npx server-filesystem) */} {isFs && ( ; direct_return: boolean; + /** Host dir for Tool Engine tools that declare `private_folder` in the catalog (e.g. Memory). */ + private_host_path?: string | null; }; export type ServerEntryNative = { diff --git a/src/modules/toolengine/index.ts b/src/modules/toolengine/index.ts index a5e2bb2..7b4c13d 100644 --- a/src/modules/toolengine/index.ts +++ b/src/modules/toolengine/index.ts @@ -12,6 +12,12 @@ export type CatalogToolCommand = { description: string; }; +export type PrivateFolderConfig = { + container_path: string; + file_env_var: string; + file_extension: string; +}; + export type CatalogTool = { id: string; name: string; @@ -19,6 +25,9 @@ export type CatalogTool = { description: string; installed: boolean; commands: CatalogToolCommand[]; + private_folder?: PrivateFolderConfig | null; + /** Resolved host directory (default under app data or user override). */ + private_host_path?: string | null; }; function makeTimeoutSignal(timeoutMs: number): { signal: AbortSignal; cleanup: () => void } { @@ -121,6 +130,37 @@ export async function installTool( } } +/** PUT `/v1/toolengine/private-folder` — set host folder for a tool that declares `private_folder`. */ +export async function putToolPrivateFolder( + toolId: string, + path: string, + timeoutMs = 120_000, +): Promise<{ ok: boolean; error?: string }> { + const { signal, cleanup } = makeTimeoutSignal(timeoutMs); + try { + const resp = await fetch(`${PENGINE_API_BASE}/v1/toolengine/private-folder`, { + method: "PUT", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ tool_id: toolId, path }), + signal, + }); + if (resp.ok) return { ok: true }; + const raw = await resp.text(); + let message = `Request failed (HTTP ${resp.status})`; + try { + const body = JSON.parse(raw) as { error?: string }; + message = body.error ?? raw.trim(); + } catch { + message = raw.trim() || message; + } + return { ok: false, error: message }; + } catch (e) { + return { ok: false, error: fetchErrorMessage(e) }; + } finally { + cleanup(); + } +} + /** POST `/v1/toolengine/uninstall` — remove a container image. */ export async function uninstallTool( toolId: string, diff --git a/tools/mcp-tools.json b/tools/mcp-tools.json index cb038bf..1cc21ee 100644 --- a/tools/mcp-tools.json +++ b/tools/mcp-tools.json @@ -1,7 +1,7 @@ { "schema_version": 1, "generated_at": "2026-04-14T00:00:00Z", - "catalog_revision": 3, + "catalog_revision": 4, "valid_until": "2026-05-14T00:00:00Z", "minimum_pengine_version": "0.5.0", "tools": [ @@ -206,7 +206,7 @@ { "id": "pengine/memory", "name": "Memory", - "description": "Official MCP memory server: knowledge-graph style entities, relations, and observations (in-container persistence).", + "description": "Official MCP memory server: knowledge-graph style entities, relations, and observations; state file is stored in a dedicated folder (default under app data, or pick your own).", "image": "ghcr.io/pengine-ai/pengine-memory", "current": "0.1.0", "versions": [ @@ -228,6 +228,11 @@ "package": "@modelcontextprotocol/server-memory", "version": "2026.1.26" }, + "private_folder": { + "container_path": "/mcp/data", + "file_env_var": "MEMORY_FILE_PATH", + "file_extension": "json" + }, "mcp_server_cmd": [], "commands": [ { "name": "create_entities", "description": "Create entities in the knowledge graph" }, diff --git a/tools/memory/Dockerfile b/tools/memory/Dockerfile index fbfe0c3..5660e1b 100644 --- a/tools/memory/Dockerfile +++ b/tools/memory/Dockerfile @@ -3,6 +3,7 @@ FROM node:22-alpine ARG UPSTREAM_MCP_NPM_PACKAGE=@modelcontextprotocol/server-memory ARG UPSTREAM_MCP_NPM_VERSION=2026.1.26 RUN addgroup -S mcp && adduser -S -G mcp -H mcp +RUN mkdir -p /mcp/data && chown mcp:mcp /mcp/data WORKDIR /mcp RUN npm install --omit=dev --prefix /mcp "${UPSTREAM_MCP_NPM_PACKAGE}@${UPSTREAM_MCP_NPM_VERSION}" \ && npm cache clean --force \ From 882adaea2a1c5bdfb8dc29a367ab9844b848cf98 Mon Sep 17 00:00:00 2001 From: MaximEdogawa Date: Thu, 16 Apr 2026 22:55:05 +0200 Subject: [PATCH 4/8] chore: enhance documentation and improve error handling in tool engine - Updated the manual-publish documentation to clarify the update process for both npm and PyPI versions. - Refactored error handling in the tool engine to use a dedicated function for parsing API errors, improving code readability and maintainability. - Added cancellation logic in the InlineEditForm component to prevent state updates after unmounting. - Enhanced the UI to provide feedback when unsaved changes exist, ensuring better user experience. - Improved the local tools catalog loading mechanism with caching based on modification time, optimizing performance. --- doc/tool-engine/manual-publish.md | 8 ++-- src-tauri/src/infrastructure/http_server.rs | 18 +++++--- src-tauri/src/modules/tool_engine/service.rs | 47 ++++++++++++++++---- src/modules/mcp/components/McpServerCard.tsx | 21 +++++++-- src/modules/toolengine/index.ts | 46 ++++++++----------- 5 files changed, 93 insertions(+), 47 deletions(-) diff --git a/doc/tool-engine/manual-publish.md b/doc/tool-engine/manual-publish.md index b71820f..efa82b5 100644 --- a/doc/tool-engine/manual-publish.md +++ b/doc/tool-engine/manual-publish.md @@ -83,16 +83,16 @@ Update the `sha256:…` value in the matching `versions[]` entry in **`tools/mcp --- -## Updating upstream npm versions +## Updating upstream npm and PyPI versions -Run the update script (like `npm update` for tool images): +Run the update script (like `npm update` / PyPI bump for tool images): ```bash ./tools/update-upstream.sh # check all tools ./tools/update-upstream.sh file-manager # check one tool ``` -This checks the npm registry for newer versions, bumps `mcp-tools.json`, and prints a summary. Commit, push, and CI builds only the affected tools. +`./tools/update-upstream.sh` checks the **npm** registry for tools that declare `upstream_mcp_npm` and the **PyPI** registry for tools that declare `upstream_mcp_pypi`, bumps `mcp-tools.json` when a newer version exists, and prints a summary. Commit, push, and CI builds only the affected tools. In PR descriptions, note that the script may have changed either npm or PyPI pins (or both) depending on the tool. --- @@ -134,5 +134,5 @@ CI passes these as `docker build` args so you bump the npm version in the regist - **`tools/mcp-tools.json`** — single-source tool registry (all tools, versions, digests, upstream). CI, the app at runtime, and the embedded offline fallback (`include_str!`) all read this file. - **`tools//Dockerfile`** — image build context. -- **`tools/update-upstream.sh`** — bump upstream npm/PyPI versions (like `npm update`). +- **`tools/update-upstream.sh`** — bump upstream **npm** and **PyPI** package versions (registry checks for each tool’s ecosystem). - **`.github/workflows/tools-publish.yml`** — CI workflow. diff --git a/src-tauri/src/infrastructure/http_server.rs b/src-tauri/src/infrastructure/http_server.rs index cf1bb21..10e25ca 100644 --- a/src-tauri/src/infrastructure/http_server.rs +++ b/src-tauri/src/infrastructure/http_server.rs @@ -818,14 +818,14 @@ async fn handle_toolengine_private_folder_put( )); } - std::fs::create_dir_all(&path).map_err(|e| { - ( + if !std::path::Path::new(&path).is_absolute() { + return Err(( StatusCode::BAD_REQUEST, Json(ErrorResponse { - error: format!("cannot create directory: {e}"), + error: "path must be an absolute host directory".into(), }), - ) - })?; + )); + } let bot_id = state .connection @@ -857,6 +857,14 @@ async fn handle_toolengine_private_folder_put( crate::modules::mcp::types::ServerEntry::Stdio { private_host_path, .. } => { + if let Err(e) = tokio::fs::create_dir_all(&path).await { + return Err(( + StatusCode::BAD_REQUEST, + Json(ErrorResponse { + error: format!("cannot create directory: {e}"), + }), + )); + } *private_host_path = Some(path.clone()); } _ => { diff --git a/src-tauri/src/modules/tool_engine/service.rs b/src-tauri/src/modules/tool_engine/service.rs index a0bfa5a..b4629ea 100644 --- a/src-tauri/src/modules/tool_engine/service.rs +++ b/src-tauri/src/modules/tool_engine/service.rs @@ -5,6 +5,8 @@ use crate::modules::mcp::types::{CustomToolEntry, McpConfig, ServerEntry}; use std::collections::{HashMap, HashSet}; use std::path::{Path, PathBuf}; use std::process::Stdio; +use std::sync::{Mutex, OnceLock}; +use std::time::SystemTime; use tokio::io::{AsyncBufReadExt, BufReader}; /// Sentinel used in `.` when no bot is connected yet. The file gets rewritten to @@ -40,6 +42,15 @@ fn parse_catalog(json: &str) -> Option { Some(cat) } +static LOCAL_TOOLS_CATALOG_MTIME_CACHE: OnceLock< + Mutex>, +> = OnceLock::new(); + +fn local_tools_catalog_mtime_cache() -> &'static Mutex> +{ + LOCAL_TOOLS_CATALOG_MTIME_CACHE.get_or_init(|| Mutex::new(HashMap::new())) +} + /// Load the embedded (compile-time) catalog. Always succeeds on a valid build. pub fn load_embedded_catalog() -> Result { serde_json::from_str(EMBEDDED_CATALOG) @@ -60,17 +71,37 @@ fn try_load_local_tools_catalog() -> Option { } } } + let cache = local_tools_catalog_mtime_cache(); for p in paths { - if let Ok(json) = std::fs::read_to_string(&p) { - if let Some(cat) = parse_catalog(&json) { - log::info!("loaded tool catalog from {}", p.display()); - return Some(cat); + let mtime = match std::fs::metadata(&p) { + Ok(m) => m.modified().unwrap_or(SystemTime::UNIX_EPOCH), + Err(_) => continue, + }; + { + let map = cache + .lock() + .expect("local tools catalog cache mutex poisoned"); + if let Some((cached_mtime, cat)) = map.get(&p) { + if *cached_mtime == mtime { + return Some(cat.clone()); + } } - log::warn!( - "found {} but it did not parse as catalog schema v1", - p.display() - ); } + let Ok(json) = std::fs::read_to_string(&p) else { + continue; + }; + if let Some(cat) = parse_catalog(&json) { + log::info!("loaded tool catalog from {}", p.display()); + let mut map = cache + .lock() + .expect("local tools catalog cache mutex poisoned"); + map.insert(p, (mtime, cat.clone())); + return Some(cat); + } + log::warn!( + "found {} but it did not parse as catalog schema v1", + p.display() + ); } None } diff --git a/src/modules/mcp/components/McpServerCard.tsx b/src/modules/mcp/components/McpServerCard.tsx index a3e8acf..f6c7c4a 100644 --- a/src/modules/mcp/components/McpServerCard.tsx +++ b/src/modules/mcp/components/McpServerCard.tsx @@ -251,8 +251,10 @@ function InlineEditForm({ setTePrivatePathInput(""); return; } + let cancelled = false; void (async () => { const cat = await fetchToolCatalog(5000); + if (cancelled) return; const t = cat?.find((x) => teServerKeyForToolId(x.id) === name && x.private_folder != null); if (t) { setTePrivateToolId(t.id); @@ -262,6 +264,9 @@ function InlineEditForm({ setTePrivatePathInput(""); } })(); + return () => { + cancelled = true; + }; }, [name]); const isFs = argsTextLooksLikeFilesystem(argsText); @@ -397,7 +402,12 @@ function InlineEditForm({ // ── Submit ──────────────────────────────────────────────────────── + const privatePathBaseline = entry.private_host_path ?? ""; + const hasUnsavedPrivate = + tePrivateToolId != null && tePrivatePathInput.trim() !== privatePathBaseline.trim(); + const handleSubmit = async () => { + if (hasUnsavedPrivate || tePrivateApplyBusy) return; const args = argsText .split("\n") .map((l) => l.trim()) @@ -588,15 +598,20 @@ function InlineEditForm({ Direct return (skip model summary) -
+
+ {hasUnsavedPrivate ? ( +

+ Apply data folder first (or revert the path field) before Save. +

+ ) : null}