diff --git a/.github/BRANCH_PROTECTION.md b/.github/BRANCH_PROTECTION.md index 6873f6e0..a54d0ee3 100644 --- a/.github/BRANCH_PROTECTION.md +++ b/.github/BRANCH_PROTECTION.md @@ -65,16 +65,16 @@ cd control-plane go build ./... # Test cross-compilation -GOOS=linux GOARCH=amd64 go build ./cmd/haxen-server -GOOS=darwin GOARCH=amd64 go build ./cmd/haxen-server -GOOS=windows GOARCH=amd64 go build ./cmd/haxen-server +GOOS=linux GOARCH=amd64 go build ./cmd/agentfield-server +GOOS=darwin GOARCH=amd64 go build ./cmd/agentfield-server +GOOS=windows GOARCH=amd64 go build ./cmd/agentfield-server ``` ### Docker Build Failures ```bash # Test Docker build locally -docker build -f deployments/docker/Dockerfile.control-plane -t haxen-control-plane:test . -docker run --rm haxen-control-plane:test --help +docker build -f deployments/docker/Dockerfile.control-plane -t agentfield-control-plane:test . +docker run --rm agentfield-control-plane:test --help ``` ## Emergency Merges diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index e0dd7eb3..9103460a 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -1,6 +1,6 @@ --- name: Feature Request -about: Suggest an idea for Haxen +about: Suggest an idea for AgentField labels: enhancement --- diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md index ab0253a9..5eac387e 100644 --- a/.github/ISSUE_TEMPLATE/question.md +++ b/.github/ISSUE_TEMPLATE/question.md @@ -1,6 +1,6 @@ --- name: Question -about: Ask a question about using Haxen +about: Ask a question about using AgentField labels: question --- diff --git a/.github/workflows/control-plane.yml b/.github/workflows/control-plane.yml index acaea0cf..b00f95d0 100644 --- a/.github/workflows/control-plane.yml +++ b/.github/workflows/control-plane.yml @@ -113,7 +113,7 @@ jobs: working-directory: control-plane run: | echo "Building for ${GOOS}/${GOARCH}" - go build -o /tmp/haxen-server-${GOOS}-${GOARCH} ./cmd/haxen-server + go build -o /tmp/agentfield-server-${GOOS}-${GOARCH} ./cmd/agentfield-server # Summary job that depends on all critical checks required-checks: diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 30fcd270..127b0af5 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -30,10 +30,10 @@ jobs: context: . file: deployments/docker/Dockerfile.control-plane load: true - tags: haxen-control-plane:test + tags: agentfield-control-plane:test - name: Smoke test control plane image - run: docker run --rm haxen-control-plane:test --help + run: docker run --rm agentfield-control-plane:test --help # Summary job that depends on all critical checks required-checks: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index b54071f7..f29d717e 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -150,7 +150,7 @@ jobs: OWNER: ${{ github.repository_owner }} run: | owner="$(echo "${OWNER}" | tr '[:upper:]' '[:lower:]')" - image="ghcr.io/${owner}/haxen-control-plane" + image="ghcr.io/${owner}/agentfield-control-plane" if [ "${EVENT_NAME}" = "push" ]; then tag="${REF_NAME}" else diff --git a/.goreleaser.yml b/.goreleaser.yml index e22f7bc9..908e3d61 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -1,4 +1,4 @@ -project_name: haxen-control-plane +project_name: agentfield-control-plane before: hooks: @@ -6,10 +6,10 @@ before: - npm --prefix control-plane/web/client run build builds: - - id: haxen-control-plane + - id: agentfield-control-plane dir: control-plane - main: ./cmd/server - binary: haxen-server + main: ./cmd/agentfield-server + binary: agentfield-server env: - CGO_ENABLED=0 goos: @@ -26,9 +26,9 @@ builds: - -s -w archives: - - id: haxen-control-plane + - id: agentfield-control-plane builds: - - haxen-control-plane + - agentfield-control-plane name_template: "{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}" format_overrides: - goos: windows diff --git a/CLAUDE.md b/CLAUDE.md index d827f9d5..c2642717 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -4,7 +4,7 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co ## Project Overview -Haxen is a Kubernetes-style control plane for AI agents. It provides production infrastructure for deploying, orchestrating, and observing multi-agent systems with cryptographic identity and audit trails. +AgentField is a Kubernetes-style control plane for AI agents. It provides production infrastructure for deploying, orchestrating, and observing multi-agent systems with cryptographic identity and audit trails. **Architecture:** Three-tier monorepo - **Control Plane** (Go): Orchestration server providing REST/gRPC APIs, workflow execution, observability, and cryptographic identity @@ -36,21 +36,21 @@ make build **Local mode** (uses SQLite + BoltDB, no external dependencies): ```bash cd control-plane -go run ./cmd/haxen dev -# Or: go run ./cmd/haxen-server +go run ./cmd/af dev +# Or: go run ./cmd/agentfield-server ``` **Cloud mode** (requires PostgreSQL): ```bash # Run migrations first cd control-plane -export HAXEN_DATABASE_URL="postgres://haxen:haxen@localhost:5432/haxen?sslmode=disable" -goose -dir ./migrations postgres "$HAXEN_DATABASE_URL" up +export AGENTFIELD_DATABASE_URL="postgres://agentfield:agentfield@localhost:5432/agentfield?sslmode=disable" +goose -dir ./migrations postgres "$AGENTFIELD_DATABASE_URL" up # Start server -HAXEN_STORAGE_MODE=postgresql \ -HAXEN_DATABASE_URL="postgres://haxen:haxen@localhost:5432/haxen?sslmode=disable" \ -go run ./cmd/haxen-server +AGENTFIELD_STORAGE_MODE=postgresql \ +AGENTFIELD_DATABASE_URL="postgres://agentfield:agentfield@localhost:5432/agentfield?sslmode=disable" \ +go run ./cmd/agentfield-server ``` **Docker Compose** (includes PostgreSQL): @@ -82,7 +82,7 @@ cd sdk/go && go test ./... cd sdk/python && pytest # Python tests with coverage: -cd sdk/python && pytest --cov=haxen_sdk --cov-report=term-missing +cd sdk/python && pytest --cov=agentfield --cov-report=term-missing # Web UI linting: cd control-plane/web/client && npm run lint @@ -103,13 +103,13 @@ cd sdk/python && ruff format . ### Database Migrations ```bash cd control-plane -export HAXEN_DATABASE_URL="postgres://haxen:haxen@localhost:5432/haxen?sslmode=disable" +export AGENTFIELD_DATABASE_URL="postgres://agentfield:agentfield@localhost:5432/agentfield?sslmode=disable" # Check migration status -goose -dir ./migrations postgres "$HAXEN_DATABASE_URL" status +goose -dir ./migrations postgres "$AGENTFIELD_DATABASE_URL" status # Apply all pending migrations -goose -dir ./migrations postgres "$HAXEN_DATABASE_URL" up +goose -dir ./migrations postgres "$AGENTFIELD_DATABASE_URL" up # Create new migration goose -dir ./migrations create sql @@ -123,7 +123,7 @@ npm run dev # Runs on http://localhost:5173 # In parallel, run the control plane server to handle API calls cd control-plane -go run ./cmd/haxen-server +go run ./cmd/agentfield-server ``` The UI dev server proxies API requests to the control plane. In production, the UI is embedded via Go's `embed` package. @@ -133,8 +133,8 @@ The UI dev server proxies API requests to the control plane. In production, the ### Control Plane Structure (`control-plane/`) **Entry Points:** -- `cmd/haxen/` - Unified CLI with server + dev/init commands -- `cmd/haxen-server/` - Standalone server binary +- `cmd/agentfield/` - Unified CLI with server + dev/init commands +- `cmd/agentfield-server/` - Standalone server binary **Core Packages (`internal/`):** - `cli/` - CLI command definitions and routing @@ -149,16 +149,16 @@ The UI dev server proxies API requests to the control plane. In production, the - `mcp/` - Model Context Protocol integration - `logger/` - Structured logging (zerolog) - `config/` - Configuration management (Viper) -- `templates/` - Code generation templates for `haxen init` +- `templates/` - Code generation templates for `af init` - `utils/` - Shared utilities - `encryption/` - Cryptographic primitives for DID/VC - `packages/` - Shared internal packages - `embedded/` - Embedded assets (web UI dist) **Configuration:** -- Environment variables take precedence over `config/haxen.yaml` +- Environment variables take precedence over `config/agentfield.yaml` - See `control-plane/.env.example` for all options -- Key modes: `HAXEN_MODE=local` (SQLite/BoltDB) vs `HAXEN_STORAGE_MODE=postgresql` (cloud) +- Key modes: `AGENTFIELD_MODE=local` (SQLite/BoltDB) vs `AGENTFIELD_STORAGE_MODE=postgresql` (cloud) **Database Schema:** - `migrations/` - SQL migrations managed by Goose @@ -166,9 +166,9 @@ The UI dev server proxies API requests to the control plane. In production, the ### SDK Structure -**Python SDK (`sdk/python/haxen_sdk/`):** +**Python SDK (`sdk/python/agentfield/`):** - Built on FastAPI/Uvicorn for agent HTTP servers -- Key modules: `Agent`, `agent_haxen`, `client`, `execution_context`, `memory`, `ai` +- Key modules: `Agent`, `agent_field_handler`, `client`, `execution_context`, `memory`, `ai` - Agents register "reasoners" (decorated functions) that become REST endpoints - Test with: `pytest` (see `pyproject.toml` for test markers: unit, functional, integration) - Install locally: `pip install -e .[dev]` @@ -189,21 +189,21 @@ The UI dev server proxies API requests to the control plane. In production, the ### Creating a New Agent (Python) ```bash # Generate agent scaffold (run from repo root or any directory) -haxen init my-agent +af init my-agent cd my-agent # Edit agent code (auto-generated template) -# Run agent locally (connects to control plane at HAXEN_SERVER env var or --server flag) -haxen run +# Run agent locally (connects to control plane at AGENTFIELD_SERVER env var or --server flag) +af run ``` ### Creating a New Agent (Go) ```go -import haxenagent "github.com/agentfield/haxen/sdk/go/agent" +import agentfieldagent "github.com/Agent-Field/agentfield/sdk/go/agent" -agent, _ := haxenagent.New(haxenagent.Config{ +agent, _ := agentfieldagent.New(agentfieldagent.Config{ NodeID: "my-agent", - HaxenURL: "http://localhost:8080", + AgentFieldURL: "http://localhost:8080", }) agent.RegisterSkill("greet", func(ctx context.Context, input map[string]any) (any, error) { return map[string]any{"message": "hello"}, nil @@ -249,7 +249,7 @@ Storage interface is unifiedβ€”services call storage layer methods, storage laye ### Configuration Precedence 1. Environment variables (highest priority) -2. Config file (`config/haxen.yaml` or `HAXEN_CONFIG_FILE` path) +2. Config file (`config/agentfield.yaml` or `AGENTFIELD_CONFIG_FILE` path) 3. Defaults in code ### Agent-to-Agent Communication @@ -269,17 +269,17 @@ Automatically synced by control plane. Agents access via SDK methods: `agent.mem - Opt-in per agent: Set `app.vc_generator.set_enabled(True)` in Python or equivalent in Go - Control plane generates W3C Verifiable Credentials for each execution - Export audit trails: `GET /api/v1/did/workflow/{workflow_id}/vc-chain` -- Verify offline: `haxen verify audit.json` +- Verify offline: `af verify audit.json` ## Module Naming **Control Plane (Go):** -- Use `github.com/your-org/haxen/control-plane` as module path -- Internal packages: `github.com/your-org/haxen/control-plane/internal/` +- Use `github.com/Agent-Field/agentfield/control-plane` as module path +- Internal packages: `github.com/Agent-Field/agentfield/control-plane/internal/` **SDKs:** -- Python: `haxen_sdk` (PyPI package) -- Go: `github.com/agentfield/haxen/sdk/go` (import path) +- Python: `agentfield` (PyPI package) +- Go: `github.com/Agent-Field/agentfield/sdk/go` (import path) ## Release Process @@ -290,23 +290,23 @@ Releases are automated via `.github/workflows/release.yml` and `.goreleaser.yml` ## Debugging Tips -- **Control plane not starting:** Check `HAXEN_DATABASE_URL` is set correctly (PostgreSQL mode) or ensure SQLite file path is writable (local mode) +- **Control plane not starting:** Check `AGENTFIELD_DATABASE_URL` is set correctly (PostgreSQL mode) or ensure SQLite file path is writable (local mode) - **Migrations failing:** Ensure PostgreSQL is running and connection string is correct. Check migration status with `goose status` -- **Agent can't connect:** Verify `HAXEN_SERVER` env var points to control plane (default: `http://localhost:8080`) +- **Agent can't connect:** Verify `AGENTFIELD_SERVER` env var points to control plane (default: `http://localhost:8080`) - **UI not loading:** In dev, ensure both Vite dev server (`npm run dev`) and control plane server are running. In prod, ensure `make build` was run to embed UI in binary - **Agent execution stuck:** Check workflow DAG in UI (`/ui/workflows`) for errors. Check agent logs for exceptions. -- **Database connection pool exhausted:** Increase `HAXEN_STORAGE_POSTGRES_MAX_CONNECTIONS` in config +- **Database connection pool exhausted:** Increase `AGENTFIELD_STORAGE_POSTGRES_MAX_CONNECTIONS` in config ## Environment Variables Reference See `control-plane/.env.example` for comprehensive list. Key vars: -- `HAXEN_PORT` - HTTP server port (default: 8080) -- `HAXEN_MODE` - `local` or `cloud` -- `HAXEN_STORAGE_MODE` - `local`, `postgresql`, or `cloud` -- `HAXEN_DATABASE_URL` - PostgreSQL connection string -- `HAXEN_UI_ENABLED` - Enable/disable web UI -- `HAXEN_UI_MODE` - `embedded` (production) or `development` (Vite proxy) -- `HAXEN_CONFIG_FILE` - Path to config YAML +- `AGENTFIELD_PORT` - HTTP server port (default: 8080) +- `AGENTFIELD_MODE` - `local` or `cloud` +- `AGENTFIELD_STORAGE_MODE` - `local`, `postgresql`, or `cloud` +- `AGENTFIELD_DATABASE_URL` - PostgreSQL connection string +- `AGENTFIELD_UI_ENABLED` - Enable/disable web UI +- `AGENTFIELD_UI_MODE` - `embedded` (production) or `development` (Vite proxy) +- `AGENTFIELD_CONFIG_FILE` - Path to config YAML - `GIN_MODE` - `debug` or `release` - `LOG_LEVEL` - `debug`, `info`, `warn`, `error` diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index bd711610..80957601 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -2,7 +2,7 @@ ## Our Pledge -We as members, contributors, and leaders pledge to make participation in the Haxen project a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. +We as members, contributors, and leaders pledge to make participation in the AgentField project a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. diff --git a/README.md b/README.md index 5989de1e..0fc9e2b0 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ **A Kubernetes-style control plane that runs AI agents like microservices: REST/gRPC APIs, async webhooks, and cryptographic identity for every agent and execution.** -Write agents. Haxen deploys, scales, observes, and proves what happened. +Write agents. AgentField deploys, scales, observes, and proves what happened. [![License](https://img.shields.io/badge/license-Apache%202.0-blue.svg)](LICENSE) [![Go](https://img.shields.io/badge/go-1.21+-00ADD8.svg)](https://go.dev/) @@ -12,7 +12,7 @@ Write agents. Haxen deploys, scales, observes, and proves what happened. [![Deploy with Docker](https://img.shields.io/badge/deploy-docker-2496ED.svg)](https://docs.docker.com/) [![Discord](https://img.shields.io/badge/discord-join-5865F2.svg)](https://discord.gg/your-discord) -**[πŸ“š Docs](https://haxen.ai/docs)** β€’ **[⚑ Quickstart](#-try-haxen-in-2-minutes)** β€’ **[πŸ’¬ Discord](https://discord.gg/your-discord)** +**[πŸ“š Docs](https://agentfield.ai/docs)** β€’ **[⚑ Quickstart](#-try-agentfield-in-2-minutes)** β€’ **[πŸ’¬ Discord](https://discord.gg/your-discord)** @@ -26,35 +26,35 @@ Write agents. Haxen deploys, scales, observes, and proves what happened. - **Run anywhere**: local dev, Docker, Kubernetes, cloud ```bash -curl -fsSL https://haxen.ai/install.sh | bash && haxen init my-agents +curl -fsSL https://agentfield.ai/install.sh | bash && af init my-agents ``` --- -## πŸš€ Try Haxen in 2 Minutes +## πŸš€ Try AgentField in 2 Minutes ### Option 1: Local Install ```bash # macOS/Linux - install CLI -curl -fsSL https://haxen.ai/install.sh | bash +curl -fsSL https://agentfield.ai/install.sh | bash # Start control plane + create your first agent -haxen dev -haxen init my-agents && cd my-agents -haxen run +af dev +af init my-agents && cd my-agents +af run ``` ### Option 2: Docker Compose ```bash -git clone https://github.com/agentfield/haxen -cd haxen && docker compose up +git clone https://github.com/agentfield/agentfield +cd agentfield && docker compose up ``` Your control plane is running at `http://localhost:8080` -**[πŸ“š Full quickstart guide β†’](https://haxen.ai/docs/quick-start)** β€’ **[πŸ’¬ Need help? Discord](https://discord.gg/your-discord)** +**[πŸ“š Full quickstart guide β†’](https://agentfield.ai/docs/quick-start)** β€’ **[πŸ’¬ Need help? Discord](https://discord.gg/your-discord)** --- @@ -63,7 +63,7 @@ Your control plane is running at `http://localhost:8080` Write your first agentβ€”automatically get a REST API: ```python -from haxen_sdk import Agent +from agentfield import Agent # Create an agent app = Agent("greeting-agent") @@ -77,7 +77,7 @@ async def say_hello(name: str) -> dict: **Deploy:** ```bash -haxen run +af run ``` **Call from anywhere** (REST API auto-generated): @@ -96,17 +96,17 @@ curl -X POST http://localhost:8080/api/v1/execute/greeting-agent.say_hello \ **That's it.** One function = production-ready service. -**[πŸ“š Docs](https://haxen.ai/docs)** β€’ **[⚑ More examples](https://github.com/agentfield/haxen-examples)** β€’ **[πŸ’¬ Discord](https://discord.gg/your-discord)** +**[πŸ“š Docs](https://agentfield.ai/docs)** β€’ **[⚑ More examples](https://github.com/agentfield/agentfield-examples)** β€’ **[πŸ’¬ Discord](https://discord.gg/your-discord)** --- -## Why Haxen? +## Why AgentField? -Agent frameworks are great for **prototypes**. Haxen builds agents **and** runs them at production scale. +Agent frameworks are great for **prototypes**. AgentField builds agents **and** runs them at production scale. -### What Hurts Today β†’ What Haxen Does Automatically +### What Hurts Today β†’ What AgentField Does Automatically -| πŸ”΄ **Without Haxen** | 🟒 **With Haxen** | +| πŸ”΄ **Without AgentField** | 🟒 **With AgentField** | | ----------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | | **Monolithic deployments** β€” one team's change forces everyone to redeploy | **Independent deployment** β€” teams ship agents on their own schedule, zero coordination | | **No native APIs** β€” your React app needs custom wrappers to call agents | **REST & OpenAPI by default** (gRPC optional) β€” every function is an endpoint, auto-documented | @@ -119,12 +119,12 @@ Agent frameworks are great for **prototypes**. Haxen builds agents **and** runs ``` Traditional Frameworks = Flask (single app) -Haxen = Kubernetes + Auth0 for AI (distributed infrastructure + identity) +AgentField = Kubernetes + Auth0 for AI (distributed infrastructure + identity) ``` -> **Haxen isn't a framework you extend with infrastructure. It IS the infrastructure.** +> **AgentField isn't a framework you extend with infrastructure. It IS the infrastructure.** -Bring your own model/tooling; Haxen handles runtime, scale, and proof. +Bring your own model/tooling; AgentField handles runtime, scale, and proof. --- @@ -133,7 +133,7 @@ Bring your own model/tooling; Haxen handles runtime, scale, and proof. **The scenario:** Customer support system with 3 coordinating agents. ```python -from haxen_sdk import Agent +from agentfield import Agent # Agent 1: Support orchestrator (Team: Customer Success) support = Agent("support-agent") @@ -157,8 +157,8 @@ async def handle_ticket(ticket: dict) -> dict: **Deploy:** ```bash -haxen dev # Start control plane -haxen run # Deploy your agent +af dev # Start control plane +af run # Deploy your agent ``` **You get automatically:** @@ -174,14 +174,14 @@ haxen run # Deploy your agent **From your React app:** ```javascript // Call agents via REST API (no custom SDK needed) -const response = await fetch('http://haxen:8080/api/v1/execute/support-agent.handle_ticket', { +const response = await fetch('http://agentfield:8080/api/v1/execute/support-agent.handle_ticket', { method: 'POST', body: JSON.stringify({ input: { ticket: {...} } }) }); // Stream real-time updates const eventSource = new EventSource( - `http://haxen:8080/api/v1/workflows/runs/${runId}/events/stream` + `http://agentfield:8080/api/v1/workflows/runs/${runId}/events/stream` ); eventSource.onmessage = (e) => { console.log('Agent update:', JSON.parse(e.data)); @@ -189,7 +189,7 @@ eventSource.onmessage = (e) => { ``` **🎨 UI SCREENSHOT #1 (HERO): Add here** -> **What to show:** Workflow DAG visualization from the Haxen UI showing the 3 agents (support-agent β†’ sentiment-agent, support-agent β†’ kb-agent, support-agent β†’ escalation-agent) with execution times, status indicators (green checkmarks), and the visual graph. This is the "wow" moment that shows developers the automatic observability. +> **What to show:** Workflow DAG visualization from the AgentField UI showing the 3 agents (support-agent β†’ sentiment-agent, support-agent β†’ kb-agent, support-agent β†’ escalation-agent) with execution times, status indicators (green checkmarks), and the visual graph. This is the "wow" moment that shows developers the automatic observability. > > **Recommended dimensions:** 1200x700px, annotate with arrows pointing to: "Auto-generated DAG", "Execution times", "Agent-to-agent calls" @@ -201,7 +201,7 @@ kubectl scale deployment sentiment-agent --replicas=10 ### That's the difference. -Haxen **is** the infrastructure you'd otherwise spend 3 months building. +AgentField **is** the infrastructure you'd otherwise spend 3 months building. --- @@ -222,7 +222,7 @@ Haxen **is** the infrastructure you'd otherwise spend 3 months building. ...and many more ! -**πŸ“š [Runtime docs β†’](https://haxen.ai/docs/runtime)** +**πŸ“š [Runtime docs β†’](https://agentfield.ai/docs/runtime)** ### πŸ“Š Scale & Ops @@ -240,7 +240,7 @@ Haxen **is** the infrastructure you'd otherwise spend 3 months building. **How Prometheus metrics are injected:** The control plane acts as a reverse proxy for agent traffic. All agent-to-agent calls and executions flow through the control plane, which records latency, error rates, and throughput **without requiring agents to instrument their code**. Metrics are exposed at `/metrics` in Prometheus format. -**πŸ“š [Scale & ops docs β†’](https://haxen.ai/docs/observability)** +**πŸ“š [Scale & ops docs β†’](https://agentfield.ai/docs/observability)** ### πŸ”’ Identity & Audit @@ -250,7 +250,7 @@ Haxen **is** the infrastructure you'd otherwise spend 3 months building. | ------------------------------ | ---------------------------------------------------------------------------------------------------------------------- | | **W3C DIDs** | Every agent gets a Decentralized Identifier (`did:web` or `did:key`); cryptographic identity for non-repudiation | | **W3C Verifiable Credentials** | Opt-in per agent; each execution generates a VC (JSON-LD format) with signed input/output hashes | -| **Tamper-proof audit trails** | Export full VC chains for regulators; verify offline with `haxen verify audit.json` (no access to your systems needed) | +| **Tamper-proof audit trails** | Export full VC chains for regulators; verify offline with `af verify audit.json` (no access to your systems needed) | | **Non-repudiation** | Agents cryptographically sign decisions; can't deny their actions | | **Policy engine** | Define rules for which executions require VCs (e.g., "all financial decisions > $10K") | | **Export formats** | W3C VC JSON-LD (standard); import into compliance tools | @@ -271,21 +271,21 @@ async def approve_loan(application: dict) -> Decision: **For auditors/compliance:** ```bash # Export cryptographic proof chain -curl http://haxen:8080/api/v1/did/workflow/wf_abc123/vc-chain > audit.json +curl http://agentfield:8080/api/v1/did/workflow/wf_abc123/vc-chain > audit.json # Verify offline (no access to your systems needed) -haxen verify audit.json +af verify audit.json # βœ“ All signatures valid (W3C VC spec) # βœ“ No tampering detected # βœ“ Complete provenance chain ``` **🎨 UI SCREENSHOT #2: Add here** -> **What to show:** Haxen UI showing the DID/VC verification interface. Display a workflow with DIDs for each agent, the VC chain visualization, and the verification status (green checkmarks showing "All signatures valid"). +> **What to show:** AgentField UI showing the DID/VC verification interface. Display a workflow with DIDs for each agent, the VC chain visualization, and the verification status (green checkmarks showing "All signatures valid"). > > **Caption:** "W3C DIDs and Verifiable Credentialsβ€”tamper-proof audit trails for compliance" -**πŸ“š [Identity & audit docs β†’](https://haxen.ai/docs/identity)** +**πŸ“š [Identity & audit docs β†’](https://agentfield.ai/docs/identity)** --- @@ -293,7 +293,7 @@ haxen verify audit.json ### Deploy AI Agents Like Kubernetes Deploys Containers -Haxen uses a **two-layer design**: a stateless **control plane** (like K8s control plane) and independent **agent nodes** (like pods): +AgentField uses a **two-layer design**: a stateless **control plane** (like K8s control plane) and independent **agent nodes** (like pods):
AgentField Architecture - Control Plane and Agent Nodes @@ -301,7 +301,7 @@ Haxen uses a **two-layer design**: a stateless **control plane** (like K8s contr ``` β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ HAXEN CONTROL PLANE β”‚ +β”‚ AGENTFIELD CONTROL PLANE β”‚ β”‚ (Stateless Go Services - Scale Horizontally) β”‚ β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ β”‚ β”‚ @@ -341,11 +341,11 @@ Haxen uses a **two-layer design**: a stateless **control plane** (like K8s contr ### Think Kubernetes, But for AI -| Kubernetes | Haxen | What It Means | +| Kubernetes | AgentField | What It Means | | ---------------------- | --------------------- | ---------------------------------------------- | | **Pods** | **Agent Nodes** | Your AI agent runs in a container | | **Services** | **Agent Registry** | Control plane discovers agents automatically | -| **kubectl apply** | **haxen run** | Deploy your agent independently | +| **kubectl apply** | **af run** | Deploy your agent independently | | **Horizontal scaling** | **Scale agent nodes** | Add more replicas per agent | | **Service mesh** | **Control plane** | Automatic routing, observability, state | | **Ingress** | **API Gateway** | Every agent function is a REST endpoint | @@ -354,7 +354,7 @@ Haxen uses a **two-layer design**: a stateless **control plane** (like K8s contr ### How It Works 1. **Write agents in any language** β€” Python SDK, Go SDK, or raw REST/gRPC -2. **Deploy as containers** β€” `docker build` + `haxen run` or `kubectl apply` +2. **Deploy as containers** β€” `docker build` + `af run` or `kubectl apply` 3. **Control plane orchestrates** β€” routing, state, workflows, identity, observability 4. **Agent nodes scale independently** β€” each team owns their nodes, deploys on their schedule 5. **Everything auto-coordinates** β€” agents call each other via control plane, memory syncs, workflows track @@ -363,13 +363,13 @@ Haxen uses a **two-layer design**: a stateless **control plane** (like K8s contr **Language Flexibility:** Use our Python/Go SDKs for convenience, or implement the REST/gRPC protocol directly in any language. The control plane is language-agnostic by design. -**[πŸ“š Detailed architecture docs β†’](https://haxen.ai/docs/architecture)** +**[πŸ“š Detailed architecture docs β†’](https://agentfield.ai/docs/architecture)** --- -## When to Use Haxen (And When Not To) +## When to Use AgentField (And When Not To) -### βœ… Use Haxen If: +### βœ… Use AgentField If: - You're building **multi-agent systems** that need to coordinate - You need **independent deployment**β€”multiple teams, different schedules @@ -380,7 +380,7 @@ Haxen uses a **two-layer design**: a stateless **control plane** (like K8s contr ### ❌ Start with a Framework If: -- You're **learning agent concepts** and want the simplest possible start (try LangChain or CrewAI first, then migrate to Haxen when you need production features) +- You're **learning agent concepts** and want the simplest possible start (try LangChain or CrewAI first, then migrate to AgentField when you need production features) - You're building a **single-agent chatbot** that will never scale beyond one service - You don't need REST APIs, observability, or multi-agent coordination - You're prototyping and don't plan to deploy to production @@ -388,9 +388,9 @@ Haxen uses a **two-layer design**: a stateless **control plane** (like K8s contr ### The Bottom Line **Frameworks = Build agents** (perfect for learning) -**Haxen = Build and run agents at any scale** (perfect from prototype to production) +**AgentField = Build and run agents at any scale** (perfect from prototype to production) -You can start with Haxen and skip migration pain later. Or start with a framework and migrate when you hit the pain points above. +You can start with AgentField and skip migration pain later. Or start with a framework and migrate when you hit the pain points above. --- @@ -398,25 +398,25 @@ You can start with Haxen and skip migration pain later. Or start with a framewor **Local dev:** ```bash -haxen dev && haxen run +af dev && af run ``` **Docker Compose:** ```yaml services: - haxen-server: - image: haxen/server:latest + agentfield-server: + image: agentfield/server:latest ports: ["8080:8080"] my-agent: build: ./agents/my-agent environment: - - HAXEN_SERVER=http://haxen-server:8080 + - AGENTFIELD_SERVER=http://agentfield-server:8080 ``` **Kubernetes:** ```bash -kubectl apply -f haxen-control-plane.yaml +kubectl apply -f agentfield-control-plane.yaml kubectl apply -f my-agent-deployment.yaml kubectl scale deployment my-agent --replicas=10 ``` @@ -425,18 +425,18 @@ kubectl scale deployment my-agent --replicas=10 Each agent deploys independently. Control plane coordinates automatically. -**[πŸ“š Full deployment guides β†’](https://haxen.ai/docs/deployment)** +**[πŸ“š Full deployment guides β†’](https://agentfield.ai/docs/deployment)** --- ## 🌍 Community & Contributing -We're building Haxen in the open. Join us: +We're building AgentField in the open. Join us: - **[πŸ’¬ Discord](https://discord.gg/your-discord)** β€” Get help, share projects, discuss architecture -- **[πŸ“š Documentation](https://haxen.ai/docs)** β€” Guides, API reference, examples -- **[πŸ’‘ GitHub Discussions](https://github.com/agentfield/haxen/discussions)** β€” Feature requests, Q&A -- **[🐦 Twitter/X](https://x.com/haxen_dev)** β€” Updates and announcements +- **[πŸ“š Documentation](https://agentfield.ai/docs)** β€” Guides, API reference, examples +- **[πŸ’‘ GitHub Discussions](https://github.com/agentfield/agentfield/discussions)** β€” Feature requests, Q&A +- **[🐦 Twitter/X](https://x.com/agentfield_dev)** β€” Updates and announcements ### Contributing @@ -453,7 +453,7 @@ See [CONTRIBUTING.md](CONTRIBUTING.md) for setup and guidelines. Agents authenticate to the control plane via API keys (configurable per environment). The control plane handles all inter-agent routingβ€”agents don't need to authenticate to each other. -For **end-user auth** (e.g., React app calling agents), you can integrate your existing auth system (JWT, OAuth) at the API gateway layer. Haxen respects your auth headers and passes them to agents via context. +For **end-user auth** (e.g., React app calling agents), you can integrate your existing auth system (JWT, OAuth) at the API gateway layer. AgentField respects your auth headers and passes them to agents via context. **W3C DIDs** are for **identity** (proving which agent made a decision), not access control. @@ -473,7 +473,7 @@ Benchmark: 10K requests/sec sustained on a single control plane instance (4 core
Can I use my own observability stack? -Yes. Haxen exposes: +Yes. AgentField exposes: - **Prometheus metrics** at `/metrics` (scrape with your existing Prometheus) - **Structured logs** (JSON) to stdout/stderr (ship to your log aggregator) - **OpenTelemetry traces** (opt-in, export to Jaeger/Datadog/etc.) @@ -485,13 +485,13 @@ The built-in workflow DAG UI is optionalβ€”you can disable it and use your own d
Is this vendor-neutral? Can I switch models/providers? -**100% vendor-neutral.** Haxen is infrastructure, not a model provider. +**100% vendor-neutral.** AgentField is infrastructure, not a model provider. - Use **any LLM**: OpenAI, Anthropic, local Ollama, Hugging Face, etc. - Use **any framework**: Call LangChain, CrewAI, raw model APIsβ€”your choice - Use **any language**: Python SDK, Go SDK, or raw REST/gRPC -Haxen handles deployment, orchestration, and observability. You control the AI logic. +AgentField handles deployment, orchestration, and observability. You control the AI logic.
@@ -499,11 +499,11 @@ Haxen handles deployment, orchestration, and observability. You control the AI l ## πŸ“– Resources -- **[πŸ“š Documentation](https://haxen.ai/docs)** β€” Complete guides and API reference -- **[⚑ Quick Start Tutorial](https://haxen.ai/docs/quick-start)** β€” Build your first agent in 5 minutes -- **[πŸ—οΈ Architecture Deep Dive](https://haxen.ai/docs/architecture)** β€” How Haxen works under the hood -- **[πŸ“¦ Examples Repository](https://github.com/agentfield/haxen-examples)** β€” Production-ready agent templates -- **[πŸ“ Blog](https://haxen.ai/blog)** β€” Tutorials, case studies, best practices +- **[πŸ“š Documentation](https://agentfield.ai/docs)** β€” Complete guides and API reference +- **[⚑ Quick Start Tutorial](https://agentfield.ai/docs/quick-start)** β€” Build your first agent in 5 minutes +- **[πŸ—οΈ Architecture Deep Dive](https://agentfield.ai/docs/architecture)** β€” How AgentField works under the hood +- **[πŸ“¦ Examples Repository](https://github.com/agentfield/agentfield-examples)** β€” Production-ready agent templates +- **[πŸ“ Blog](https://agentfield.ai/blog)** β€” Tutorials, case studies, best practices --- @@ -515,7 +515,7 @@ Haxen handles deployment, orchestration, and observability. You control the AI l **Join the future of autonomous software** -**[🌐 Website](https://haxen.ai) β€’ [πŸ“š Docs](https://haxen.ai/docs) β€’ [πŸ’¬ Discord](https://discord.gg/your-discord) β€’ [🐦 Twitter](https://x.com/haxen_dev)** +**[🌐 Website](https://agentfield.ai) β€’ [πŸ“š Docs](https://agentfield.ai/docs) β€’ [πŸ’¬ Discord](https://discord.gg/your-discord) β€’ [🐦 Twitter](https://x.com/agentfield_dev)** **License:** [Apache 2.0](LICENSE) diff --git a/control-plane/.env.dev b/control-plane/.env.dev index 3e639e02..cd52cebd 100644 --- a/control-plane/.env.dev +++ b/control-plane/.env.dev @@ -1,7 +1,7 @@ -HAXEN_STORAGE_MODE="postgresql" -HAXEN_STORAGE_POSTGRES_URL="postgres://postgres@localhost:5432/testdb?sslmode=disable" -HAXEN_STORAGE_POSTGRES_MAX_CONNECTIONS="10" -HAXEN_STORAGE_POSTGRES_MAX_IDLE_CONNECTIONS="2" -HAXEN_STORAGE_POSTGRES_CONNECTION_TIMEOUT="30s" -HAXEN_STORAGE_POSTGRES_QUERY_TIMEOUT="30s" -HAXEN_STORAGE_POSTGRES_ENABLE_AUTO_MIGRATION="true" +AGENTFIELD_STORAGE_MODE="postgresql" +AGENTFIELD_STORAGE_POSTGRES_URL="postgres://postgres@localhost:5432/testdb?sslmode=disable" +AGENTFIELD_STORAGE_POSTGRES_MAX_CONNECTIONS="10" +AGENTFIELD_STORAGE_POSTGRES_MAX_IDLE_CONNECTIONS="2" +AGENTFIELD_STORAGE_POSTGRES_CONNECTION_TIMEOUT="30s" +AGENTFIELD_STORAGE_POSTGRES_QUERY_TIMEOUT="30s" +AGENTFIELD_STORAGE_POSTGRES_ENABLE_AUTO_MIGRATION="true" diff --git a/control-plane/.env.example b/control-plane/.env.example index 67053330..a2e4b9ab 100644 --- a/control-plane/.env.example +++ b/control-plane/.env.example @@ -1,52 +1,52 @@ -# Haxen Server Environment Variables +# AgentField Server Environment Variables # Copy this file to .env and modify values as needed -# Core Haxen Configuration -HAXEN_PORT=8080 -HAXEN_MODE=local -HAXEN_CONFIG_FILE=./config/haxen.yaml +# Core AgentField Configuration +AGENTFIELD_PORT=8080 +AGENTFIELD_MODE=local +AGENTFIELD_CONFIG_FILE=./config/agentfield.yaml # Database Configuration (for local mode) -HAXEN_STORAGE_LOCAL_DATABASE_PATH=./haxen_local.db -HAXEN_STORAGE_LOCAL_KV_STORE_PATH=./haxen_local.bolt +AGENTFIELD_STORAGE_LOCAL_DATABASE_PATH=./agentfield_local.db +AGENTFIELD_STORAGE_LOCAL_KV_STORE_PATH=./agentfield_local.bolt # UI Configuration -HAXEN_UI_ENABLED=true -HAXEN_UI_MODE=embedded -HAXEN_UI_SOURCE_PATH=./web/client -HAXEN_UI_DIST_PATH=./web/client/dist -HAXEN_UI_DEV_PORT=5173 +AGENTFIELD_UI_ENABLED=true +AGENTFIELD_UI_MODE=embedded +AGENTFIELD_UI_SOURCE_PATH=./web/client +AGENTFIELD_UI_DIST_PATH=./web/client/dist +AGENTFIELD_UI_DEV_PORT=5173 # API Configuration -HAXEN_API_CORS_ALLOWED_ORIGINS=http://localhost:3000,http://localhost:5173,http://localhost:8080 -HAXEN_API_CORS_ALLOWED_METHODS=GET,POST,PUT,DELETE,OPTIONS -HAXEN_API_CORS_ALLOWED_HEADERS=Origin,Content-Type,Accept,Authorization,X-Requested-With -HAXEN_API_CORS_EXPOSED_HEADERS=Content-Length,X-Total-Count -HAXEN_API_CORS_ALLOW_CREDENTIALS=true +AGENTFIELD_API_CORS_ALLOWED_ORIGINS=http://localhost:3000,http://localhost:5173,http://localhost:8080 +AGENTFIELD_API_CORS_ALLOWED_METHODS=GET,POST,PUT,DELETE,OPTIONS +AGENTFIELD_API_CORS_ALLOWED_HEADERS=Origin,Content-Type,Accept,Authorization,X-Requested-With +AGENTFIELD_API_CORS_EXPOSED_HEADERS=Content-Length,X-Total-Count +AGENTFIELD_API_CORS_ALLOW_CREDENTIALS=true # Cloud Configuration (if using cloud mode) -# HAXEN_CLOUD_ENABLED=false -# HAXEN_CLOUD_API_KEY=your-api-key-here +# AGENTFIELD_CLOUD_ENABLED=false +# AGENTFIELD_CLOUD_API_KEY=your-api-key-here # Storage Configuration -HAXEN_STORAGE_MODE=local - -# PostgreSQL Storage Configuration (when HAXEN_STORAGE_MODE=postgresql) -# HAXEN_STORAGE_POSTGRES_URL=postgresql://user:password@localhost:5432/haxen?sslmode=disable -# HAXEN_STORAGE_POSTGRES_MAX_CONNECTIONS=25 -# HAXEN_STORAGE_POSTGRES_MAX_IDLE_CONNECTIONS=5 -# HAXEN_STORAGE_POSTGRES_CONNECTION_TIMEOUT=30s -# HAXEN_STORAGE_POSTGRES_QUERY_TIMEOUT=30s -# HAXEN_STORAGE_POSTGRES_ENABLE_MEMORY_FALLBACK=true -# HAXEN_STORAGE_POSTGRES_ENABLE_DID_FALLBACK=true -# HAXEN_STORAGE_POSTGRES_ENABLE_VC_FALLBACK=true -# HAXEN_STORAGE_POSTGRES_ENABLE_AUTO_MIGRATION=true - -# Cloud Storage Configuration (when HAXEN_STORAGE_MODE=cloud) -# HAXEN_STORAGE_CLOUD_POSTGRES_URL=postgresql://user:password@localhost:5432/haxen -# HAXEN_STORAGE_CLOUD_MAX_CONNECTIONS=50 -# HAXEN_STORAGE_CLOUD_CONNECTION_POOL=true -# HAXEN_STORAGE_CLOUD_REPLICATION_MODE=async +AGENTFIELD_STORAGE_MODE=local + +# PostgreSQL Storage Configuration (when AGENTFIELD_STORAGE_MODE=postgresql) +# AGENTFIELD_STORAGE_POSTGRES_URL=postgresql://user:password@localhost:5432/agentfield?sslmode=disable +# AGENTFIELD_STORAGE_POSTGRES_MAX_CONNECTIONS=25 +# AGENTFIELD_STORAGE_POSTGRES_MAX_IDLE_CONNECTIONS=5 +# AGENTFIELD_STORAGE_POSTGRES_CONNECTION_TIMEOUT=30s +# AGENTFIELD_STORAGE_POSTGRES_QUERY_TIMEOUT=30s +# AGENTFIELD_STORAGE_POSTGRES_ENABLE_MEMORY_FALLBACK=true +# AGENTFIELD_STORAGE_POSTGRES_ENABLE_DID_FALLBACK=true +# AGENTFIELD_STORAGE_POSTGRES_ENABLE_VC_FALLBACK=true +# AGENTFIELD_STORAGE_POSTGRES_ENABLE_AUTO_MIGRATION=true + +# Cloud Storage Configuration (when AGENTFIELD_STORAGE_MODE=cloud) +# AGENTFIELD_STORAGE_CLOUD_POSTGRES_URL=postgresql://user:password@localhost:5432/agentfield +# AGENTFIELD_STORAGE_CLOUD_MAX_CONNECTIONS=50 +# AGENTFIELD_STORAGE_CLOUD_CONNECTION_POOL=true +# AGENTFIELD_STORAGE_CLOUD_REPLICATION_MODE=async # Development/Debug # GIN_MODE=debug diff --git a/control-plane/README.md b/control-plane/README.md index 22b21a07..965e09c8 100644 --- a/control-plane/README.md +++ b/control-plane/README.md @@ -1,6 +1,6 @@ -# Haxen Control Plane +# AgentField Control Plane -The Haxen control plane orchestrates agent workflows, manages verifiable credentials, serves the admin UI, and exposes REST/gRPC APIs consumed by the SDKs. +The AgentField control plane orchestrates agent workflows, manages verifiable credentials, serves the admin UI, and exposes REST/gRPC APIs consumed by the SDKs. ## Requirements @@ -15,11 +15,11 @@ The Haxen control plane orchestrates agent workflows, manages verifiable credent go mod download npm --prefix web/client install -# Run database migrations (requires HAXEN_DATABASE_URL) -goose -dir ./migrations postgres "$HAXEN_DATABASE_URL" up +# Run database migrations (requires AGENTFIELD_DATABASE_URL) +goose -dir ./migrations postgres "$AGENTFIELD_DATABASE_URL" up # Start the control plane -HAXEN_DATABASE_URL=postgres://haxen:haxen@localhost:5432/haxen?sslmode=disable \ +AGENTFIELD_DATABASE_URL=postgres://agentfield:agentfield@localhost:5432/agentfield?sslmode=disable \ go run ./cmd/server ``` @@ -27,11 +27,11 @@ Visit `http://localhost:8080/ui/` to access the embedded admin UI. ## Configuration -Environment variables override `config/haxen.yaml`. Common options: +Environment variables override `config/agentfield.yaml`. Common options: -- `HAXEN_DATABASE_URL` – PostgreSQL DSN -- `HAXEN_HTTP_ADDR` – HTTP listen address (`0.0.0.0:8080` by default) -- `HAXEN_LOG_LEVEL` – log verbosity (`info`, `debug`, etc.) +- `AGENTFIELD_DATABASE_URL` – PostgreSQL DSN +- `AGENTFIELD_HTTP_ADDR` – HTTP listen address (`0.0.0.0:8080` by default) +- `AGENTFIELD_LOG_LEVEL` – log verbosity (`info`, `debug`, etc.) Sample config files live in `config/`. @@ -50,8 +50,8 @@ Run the Go server alongside the UI so API calls resolve locally. During producti Migrations use [Goose](https://github.com/pressly/goose): ```bash -HAXEN_DATABASE_URL=postgres://haxen:haxen@localhost:5432/haxen?sslmode=disable \ -goose -dir ./migrations postgres "$HAXEN_DATABASE_URL" status +AGENTFIELD_DATABASE_URL=postgres://agentfield:agentfield@localhost:5432/agentfield?sslmode=disable \ +goose -dir ./migrations postgres "$AGENTFIELD_DATABASE_URL" status ``` ## Testing diff --git a/control-plane/build-single-binary.sh b/control-plane/build-single-binary.sh index 32deb2f2..178e731d 100755 --- a/control-plane/build-single-binary.sh +++ b/control-plane/build-single-binary.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Haxen Single Binary Builder +# AgentField Single Binary Builder # This script creates a single, portable binary that includes: # - Go backend with universal path management # - Embedded UI @@ -142,21 +142,21 @@ package main import ( "fmt" "strings" - "github.com/your-org/haxen/control-plane/internal/utils" + "github.com/Agent-Field/agentfield/control-plane/internal/utils" ) func main() { - dirs, err := utils.GetHaxenDataDirectories() + dirs, err := utils.GetAgentFieldDataDirectories() if err != nil { fmt.Printf("ERROR: %v\n", err) return } - fmt.Printf("Haxen Home: %s\n", dirs.HaxenHome) + fmt.Printf("AgentField Home: %s\n", dirs.AgentFieldHome) - // Verify that Haxen Home points to ~/.haxen - if !strings.HasSuffix(dirs.HaxenHome, ".haxen") { - fmt.Printf("ERROR: Haxen Home should end with .haxen, got: %s\n", dirs.HaxenHome) + // Verify that AgentField Home points to ~/.agentfield + if !strings.HasSuffix(dirs.AgentFieldHome, ".agentfield") { + fmt.Printf("ERROR: AgentField Home should end with .agentfield, got: %s\n", dirs.AgentFieldHome) return } @@ -168,9 +168,9 @@ func main() { } fmt.Printf("Database Path: %s\n", dbPath) - // Verify database path is in ~/.haxen/data/ - if !strings.Contains(dbPath, ".haxen/data/haxen.db") { - fmt.Printf("ERROR: Database path should be in ~/.haxen/data/, got: %s\n", dbPath) + // Verify database path is in ~/.agentfield/data/ + if !strings.Contains(dbPath, ".agentfield/data/agentfield.db") { + fmt.Printf("ERROR: Database path should be in ~/.agentfield/data/, got: %s\n", dbPath) return } @@ -181,7 +181,7 @@ func main() { return } - fmt.Println("SUCCESS: Path management system working correctly - database will be stored in ~/.haxen/") + fmt.Println("SUCCESS: Path management system working correctly - database will be stored in ~/.agentfield/") } EOF @@ -266,7 +266,7 @@ build_binary() { -ldflags "$LDFLAGS" \ -tags "embedded sqlite_fts5" \ -o "$OUTPUT_DIR/$output_name" \ - ./cmd/haxen-server + ./cmd/agentfield-server if [ $? -eq 0 ]; then # Get file size @@ -305,10 +305,10 @@ build_all_binaries() { # Define platforms to build declare -a platforms=( - # "linux:amd64:haxen-linux-amd64" - # "linux:arm64:haxen-linux-arm64" - # "darwin:amd64:haxen-darwin-amd64" - "darwin:arm64:haxen-darwin-arm64" + # "linux:amd64:agentfield-linux-amd64" + # "linux:arm64:agentfield-linux-arm64" + # "darwin:amd64:agentfield-darwin-amd64" + "darwin:arm64:agentfield-darwin-arm64" ) for platform in "${platforms[@]}"; do @@ -336,10 +336,10 @@ generate_metadata() { # Generate SHA256 checksums if command_exists sha256sum; then - sha256sum haxen-* > checksums.txt 2>/dev/null || true + sha256sum agentfield-* > checksums.txt 2>/dev/null || true print_success "Generated checksums.txt" elif command_exists shasum; then - shasum -a 256 haxen-* > checksums.txt 2>/dev/null || true + shasum -a 256 agentfield-* > checksums.txt 2>/dev/null || true print_success "Generated checksums.txt" else print_warning "No checksum utility found, skipping checksum generation" @@ -347,7 +347,7 @@ generate_metadata() { # Generate build info cat > build-info.txt << EOF -Haxen Single Binary Build Information +AgentField Single Binary Build Information ==================================== Build Version: $VERSION @@ -358,31 +358,31 @@ Build OS: $(uname -s) Build Arch: $(uname -m) Features: -- Universal Path Management (stores data in ~/.haxen/) +- Universal Path Management (stores data in ~/.agentfield/) - Embedded Web UI - Cross-platform compatibility - Single binary deployment Usage: - ./haxen- # Start Haxen server with UI - ./haxen- --help # Show help - ./haxen- --backend-only # Start without UI + ./agentfield- # Start AgentField server with UI + ./agentfield- --help # Show help + ./agentfield- --backend-only # Start without UI Data Storage: -All Haxen data is stored in ~/.haxen/ directory: -- ~/.haxen/data/haxen.db # Main database -- ~/.haxen/data/haxen.bolt # Cache/KV store -- ~/.haxen/data/keys/ # DID cryptographic keys -- ~/.haxen/data/did_registries/ # DID registries -- ~/.haxen/data/vcs/ # Verifiable credentials -- ~/.haxen/agents/ # Installed agents -- ~/.haxen/logs/ # Application logs -- ~/.haxen/config/ # User configurations +All AgentField data is stored in ~/.agentfield/ directory: +- ~/.agentfield/data/agentfield.db # Main database +- ~/.agentfield/data/agentfield.bolt # Cache/KV store +- ~/.agentfield/data/keys/ # DID cryptographic keys +- ~/.agentfield/data/did_registries/ # DID registries +- ~/.agentfield/data/vcs/ # Verifiable credentials +- ~/.agentfield/agents/ # Installed agents +- ~/.agentfield/logs/ # Application logs +- ~/.agentfield/config/ # User configurations Environment Variables: -- HAXEN_HOME: Override default ~/.haxen directory -- HAXEN_PORT: Override default port (8080) -- HAXEN_CONFIG_FILE: Override config file location +- AGENTFIELD_HOME: Override default ~/.agentfield directory +- AGENTFIELD_PORT: Override default port (8080) +- AGENTFIELD_CONFIG_FILE: Override config file location EOF @@ -397,27 +397,27 @@ create_distribution() { # Create a README for the distribution cat > "$OUTPUT_DIR/README.md" << 'EOF' -# Haxen Single Binary Distribution +# AgentField Single Binary Distribution -This package contains pre-built Haxen binaries for multiple platforms. +This package contains pre-built AgentField binaries for multiple platforms. ## Quick Start 1. Download the appropriate binary for your platform: - - `haxen-linux-amd64` - Linux (Intel/AMD 64-bit) - - `haxen-linux-arm64` - Linux (ARM 64-bit) - - `haxen-darwin-amd64` - macOS (Intel) - - `haxen-darwin-arm64` - macOS (Apple Silicon) - - `haxen-windows-amd64.exe` - Windows (64-bit) + - `agentfield-linux-amd64` - Linux (Intel/AMD 64-bit) + - `agentfield-linux-arm64` - Linux (ARM 64-bit) + - `agentfield-darwin-amd64` - macOS (Intel) + - `agentfield-darwin-arm64` - macOS (Apple Silicon) + - `agentfield-windows-amd64.exe` - Windows (64-bit) 2. Make the binary executable (Linux/macOS): ```bash - chmod +x haxen-* + chmod +x agentfield-* ``` -3. Run Haxen: +3. Run AgentField: ```bash - ./haxen-linux-amd64 + ./agentfield-linux-amd64 ``` 4. Open your browser to http://localhost:8080 @@ -425,26 +425,26 @@ This package contains pre-built Haxen binaries for multiple platforms. ## Features - **Single Binary**: Everything bundled in one executable -- **Universal Storage**: All data stored in `~/.haxen/` directory +- **Universal Storage**: All data stored in `~/.agentfield/` directory - **Embedded UI**: Web interface included in binary - **Cross-Platform**: Works on Linux, macOS, and Windows - **Portable**: Run from anywhere, data stays consistent ## Configuration -Haxen can be configured via: -- Environment variables (HAXEN_HOME, HAXEN_PORT, etc.) -- Configuration file (`~/.haxen/haxen.yaml`) +AgentField can be configured via: +- Environment variables (AGENTFIELD_HOME, AGENTFIELD_PORT, etc.) +- Configuration file (`~/.agentfield/agentfield.yaml`) - Command line flags (`--port`, `--backend-only`, etc.) ## Data Directory -All Haxen data is stored in `~/.haxen/`: +All AgentField data is stored in `~/.agentfield/`: ``` -~/.haxen/ +~/.agentfield/ β”œβ”€β”€ data/ -β”‚ β”œβ”€β”€ haxen.db # Main database -β”‚ β”œβ”€β”€ haxen.bolt # Cache +β”‚ β”œβ”€β”€ agentfield.db # Main database +β”‚ β”œβ”€β”€ agentfield.bolt # Cache β”‚ β”œβ”€β”€ keys/ # Cryptographic keys β”‚ β”œβ”€β”€ did_registries/ # DID registries β”‚ └── vcs/ # Verifiable credentials @@ -455,7 +455,7 @@ All Haxen data is stored in `~/.haxen/`: ## Support -For issues and documentation, visit: https://github.com/your-org/haxen +For issues and documentation, visit: https://github.com/Agent-Field/agentfield EOF print_success "Created distribution README.md" @@ -473,7 +473,7 @@ show_summary() { if [ -d "$OUTPUT_DIR" ]; then print_status "Output directory: $OUTPUT_DIR" print_status "Built files:" - ls -la "$OUTPUT_DIR" | grep -E "(haxen-|checksums|build-info|README)" + ls -la "$OUTPUT_DIR" | grep -E "(agentfield-|checksums|build-info|README)" # Calculate total size if command_exists du; then @@ -487,21 +487,21 @@ show_summary() { echo "" print_status "To test your binary:" echo " cd $OUTPUT_DIR" - echo " ./haxen-$(uname -s | tr '[:upper:]' '[:lower:]')-$(uname -m | sed 's/x86_64/amd64/')" + echo " ./agentfield-$(uname -s | tr '[:upper:]' '[:lower:]')-$(uname -m | sed 's/x86_64/amd64/')" echo "" print_status "The binary includes:" echo " βœ… Go backend with universal path management" echo " βœ… Embedded web UI" echo " βœ… All dependencies bundled" echo " βœ… Cross-platform compatibility" - echo " βœ… Portable deployment (stores data in ~/.haxen/)" + echo " βœ… Portable deployment (stores data in ~/.agentfield/)" } # Main build function main() { - print_header "Haxen Single Binary Builder" + print_header "AgentField Single Binary Builder" - echo "Building Haxen single binary with:" + echo "Building AgentField single binary with:" echo " β€’ Universal path management" echo " β€’ Embedded web UI" echo " β€’ Cross-platform support" @@ -531,7 +531,7 @@ case "${1:-}" in test_path_system ;; "help"|"-h"|"--help") - echo "Haxen Single Binary Builder" + echo "AgentField Single Binary Builder" echo "" echo "Usage:" echo " $0 Build complete single binary package" diff --git a/control-plane/cmd/haxen/main.go b/control-plane/cmd/af/main.go similarity index 78% rename from control-plane/cmd/haxen/main.go rename to control-plane/cmd/af/main.go index 8af26439..186eae95 100644 --- a/control-plane/cmd/haxen/main.go +++ b/control-plane/cmd/af/main.go @@ -1,12 +1,6 @@ package main import ( - "github.com/your-org/haxen/control-plane/internal/cli" - "github.com/your-org/haxen/control-plane/internal/config" - "github.com/your-org/haxen/control-plane/internal/logger" - "github.com/your-org/haxen/control-plane/internal/server" - "github.com/your-org/haxen/control-plane/internal/utils" - "github.com/your-org/haxen/control-plane/web/client" "fmt" "log" "os" @@ -16,6 +10,13 @@ import ( "strconv" "time" + "github.com/Agent-Field/agentfield/control-plane/internal/cli" + "github.com/Agent-Field/agentfield/control-plane/internal/config" + "github.com/Agent-Field/agentfield/control-plane/internal/logger" + "github.com/Agent-Field/agentfield/control-plane/internal/server" + "github.com/Agent-Field/agentfield/control-plane/internal/utils" + "github.com/Agent-Field/agentfield/control-plane/web/client" + "github.com/spf13/cobra" "github.com/spf13/viper" ) @@ -31,7 +32,7 @@ func main() { // runServer contains the server startup logic for unified CLI func runServer(cmd *cobra.Command, args []string) { - logger.Logger.Debug().Msg("Haxen server starting...") + logger.Logger.Debug().Msg("AgentField server starting...") // Load configuration with better defaults cfgFilePath, _ := cmd.Flags().GetString("config") @@ -43,13 +44,13 @@ func runServer(cmd *cobra.Command, args []string) { // Override port from flag if provided if cmd.Flags().Lookup("port").Changed { port, _ := cmd.Flags().GetInt("port") - cfg.Haxen.Port = port + cfg.AgentField.Port = port } // Override from environment variables - if envPort := os.Getenv("HAXEN_PORT"); envPort != "" { + if envPort := os.Getenv("AGENTFIELD_PORT"); envPort != "" { if port, err := strconv.Atoi(envPort); err == nil { - cfg.Haxen.Port = port + cfg.AgentField.Port = port } } @@ -62,7 +63,7 @@ func runServer(cmd *cobra.Command, args []string) { } if !storageModeExplicit { - if envMode := os.Getenv("HAXEN_STORAGE_MODE"); envMode != "" { + if envMode := os.Getenv("AGENTFIELD_STORAGE_MODE"); envMode != "" { cfg.Storage.Mode = envMode } } @@ -72,9 +73,9 @@ func runServer(cmd *cobra.Command, args []string) { postgresURL, _ = cmd.Flags().GetString("postgres-url") } if postgresURL == "" { - if env := os.Getenv("HAXEN_POSTGRES_URL"); env != "" { + if env := os.Getenv("AGENTFIELD_POSTGRES_URL"); env != "" { postgresURL = env - } else if env := os.Getenv("HAXEN_STORAGE_POSTGRES_URL"); env != "" { + } else if env := os.Getenv("AGENTFIELD_STORAGE_POSTGRES_URL"); env != "" { postgresURL = env } } @@ -87,24 +88,24 @@ func runServer(cmd *cobra.Command, args []string) { } } - if env := os.Getenv("HAXEN_STORAGE_POSTGRES_HOST"); env != "" { + if env := os.Getenv("AGENTFIELD_STORAGE_POSTGRES_HOST"); env != "" { cfg.Storage.Postgres.Host = env } - if env := os.Getenv("HAXEN_STORAGE_POSTGRES_PORT"); env != "" { + if env := os.Getenv("AGENTFIELD_STORAGE_POSTGRES_PORT"); env != "" { if port, err := strconv.Atoi(env); err == nil { cfg.Storage.Postgres.Port = port } } - if env := os.Getenv("HAXEN_STORAGE_POSTGRES_DATABASE"); env != "" { + if env := os.Getenv("AGENTFIELD_STORAGE_POSTGRES_DATABASE"); env != "" { cfg.Storage.Postgres.Database = env } - if env := os.Getenv("HAXEN_STORAGE_POSTGRES_USER"); env != "" { + if env := os.Getenv("AGENTFIELD_STORAGE_POSTGRES_USER"); env != "" { cfg.Storage.Postgres.User = env } - if env := os.Getenv("HAXEN_STORAGE_POSTGRES_PASSWORD"); env != "" { + if env := os.Getenv("AGENTFIELD_STORAGE_POSTGRES_PASSWORD"); env != "" { cfg.Storage.Postgres.Password = env } - if env := os.Getenv("HAXEN_STORAGE_POSTGRES_SSLMODE"); env != "" { + if env := os.Getenv("AGENTFIELD_STORAGE_POSTGRES_SSLMODE"); env != "" { cfg.Storage.Postgres.SSLMode = env } @@ -142,17 +143,17 @@ func runServer(cmd *cobra.Command, args []string) { fmt.Println("UI is already embedded in binary, skipping build.") } - // Create Haxen server instance - haxenServer, err := server.NewHaxenServer(cfg) + // Create AgentField server instance + agentfieldServer, err := server.NewAgentFieldServer(cfg) if err != nil { - log.Fatalf("Failed to create Haxen server: %v", err) + log.Fatalf("Failed to create AgentField server: %v", err) } // Start the server in a goroutine so we can open the browser go func() { - fmt.Printf("Haxen server attempting to start on port %d...\n", cfg.Haxen.Port) - if err := haxenServer.Start(); err != nil { - log.Fatalf("Failed to start Haxen server: %v", err) + fmt.Printf("AgentField server attempting to start on port %d...\n", cfg.AgentField.Port) + if err := agentfieldServer.Start(); err != nil { + log.Fatalf("Failed to start AgentField server: %v", err) } }() @@ -161,7 +162,7 @@ func runServer(cmd *cobra.Command, args []string) { openBrowserFlag, _ := cmd.Flags().GetBool("open") if cfg.UI.Enabled && openBrowserFlag && !backendOnly { - uiTargetURL := fmt.Sprintf("http://localhost:%d", cfg.Haxen.Port) + uiTargetURL := fmt.Sprintf("http://localhost:%d", cfg.AgentField.Port) if cfg.UI.Mode == "dev" { // Use configured dev port or environment variable devPort := cfg.UI.DevPort @@ -179,7 +180,7 @@ func runServer(cmd *cobra.Command, args []string) { openBrowser(uiTargetURL) } - fmt.Printf("Haxen server running on http://localhost:%d\n", cfg.Haxen.Port) + fmt.Printf("AgentField server running on http://localhost:%d\n", cfg.AgentField.Port) fmt.Printf("Press Ctrl+C to exit.\n") // Keep main goroutine alive select {} @@ -188,7 +189,7 @@ func runServer(cmd *cobra.Command, args []string) { // loadConfig loads configuration with sensible defaults for user experience func loadConfig(configFile string) (*config.Config, error) { // Set environment variable prefixes - viper.SetEnvPrefix("HAXEN") + viper.SetEnvPrefix("AGENTFIELD") viper.AutomaticEnv() // Get the directory where the binary is located for UI paths @@ -203,16 +204,16 @@ func loadConfig(configFile string) (*config.Config, error) { viper.SetConfigFile(configFile) } else { // Check for config file path from environment - if envConfigFile := os.Getenv("HAXEN_CONFIG_FILE"); envConfigFile != "" { + if envConfigFile := os.Getenv("AGENTFIELD_CONFIG_FILE"); envConfigFile != "" { viper.SetConfigFile(envConfigFile) } else { // Look for config in user's home directory first, then relative to exec dir, then local homeDir, _ := os.UserHomeDir() - viper.AddConfigPath(filepath.Join(homeDir, ".haxen")) + viper.AddConfigPath(filepath.Join(homeDir, ".agentfield")) viper.AddConfigPath(filepath.Join(execDir, "config")) viper.AddConfigPath("./config") viper.AddConfigPath(".") - viper.SetConfigName("haxen") + viper.SetConfigName("agentfield") viper.SetConfigType("yaml") } } @@ -231,8 +232,8 @@ func loadConfig(configFile string) (*config.Config, error) { } // Apply sensible defaults for user experience - if cfg.Haxen.Port == 0 { - cfg.Haxen.Port = 8080 + if cfg.AgentField.Port == 0 { + cfg.AgentField.Port = 8080 } // Enable UI by default unless explicitly disabled if cfg.UI.Mode == "" { @@ -248,8 +249,8 @@ func loadConfig(configFile string) (*config.Config, error) { if cfg.UI.SourcePath == "" { candidateSourcePaths := []string{ filepath.Join(execDir, "web", "client"), - filepath.Join(filepath.Dir(execDir), "apps", "platform", "haxen", "web", "client"), - filepath.Join("apps", "platform", "haxen", "web", "client"), + filepath.Join(filepath.Dir(execDir), "apps", "platform", "agentfield", "web", "client"), + filepath.Join("apps", "platform", "agentfield", "web", "client"), filepath.Join("web", "client"), } for _, candidate := range candidateSourcePaths { @@ -266,8 +267,8 @@ func loadConfig(configFile string) (*config.Config, error) { candidateDistPaths := []string{ filepath.Join(cfg.UI.SourcePath, "dist"), filepath.Join(execDir, "web", "client", "dist"), - filepath.Join(filepath.Dir(execDir), "apps", "platform", "haxen", "web", "client", "dist"), - filepath.Join("apps", "platform", "haxen", "web", "client", "dist"), + filepath.Join(filepath.Dir(execDir), "apps", "platform", "agentfield", "web", "client", "dist"), + filepath.Join("apps", "platform", "agentfield", "web", "client", "dist"), filepath.Join("web", "client", "dist"), } for _, candidate := range candidateDistPaths { @@ -324,14 +325,14 @@ func loadConfig(configFile string) (*config.Config, error) { } cfg.Storage.Local.KVStorePath = kvPath } - // Ensure all Haxen data directories exist + // Ensure all AgentField data directories exist if _, err := utils.EnsureDataDirectories(); err != nil { - return nil, fmt.Errorf("failed to create Haxen data directories: %w", err) + return nil, fmt.Errorf("failed to create AgentField data directories: %w", err) } } - fmt.Printf("Loaded config - Storage mode: %s, Haxen Port: %d, UI Mode: %s, UI Enabled: %t\n", - cfg.Storage.Mode, cfg.Haxen.Port, cfg.UI.Mode, cfg.UI.Enabled) + fmt.Printf("Loaded config - Storage mode: %s, AgentField Port: %d, UI Mode: %s, UI Enabled: %t\n", + cfg.Storage.Mode, cfg.AgentField.Port, cfg.UI.Mode, cfg.UI.Enabled) return &cfg, nil } @@ -357,7 +358,7 @@ func buildUI(cfg *config.Config) error { buildEnv = append(buildEnv, fmt.Sprintf("VITE_BUILD_OUT_DIR=%s", filepath.Base(cfg.UI.DistPath))) } - buildEnv = append(buildEnv, fmt.Sprintf("VITE_API_PROXY_TARGET=http://localhost:%d", cfg.Haxen.Port)) + buildEnv = append(buildEnv, fmt.Sprintf("VITE_API_PROXY_TARGET=http://localhost:%d", cfg.AgentField.Port)) // Install dependencies cmdInstall := exec.Command("npm", "install", "--force") diff --git a/control-plane/cmd/haxen-server/main.go b/control-plane/cmd/agentfield-server/main.go similarity index 77% rename from control-plane/cmd/haxen-server/main.go rename to control-plane/cmd/agentfield-server/main.go index 13b68b3f..23031982 100644 --- a/control-plane/cmd/haxen-server/main.go +++ b/control-plane/cmd/agentfield-server/main.go @@ -1,11 +1,6 @@ package main import ( - "github.com/your-org/haxen/control-plane/internal/cli" - "github.com/your-org/haxen/control-plane/internal/config" - "github.com/your-org/haxen/control-plane/internal/server" - "github.com/your-org/haxen/control-plane/internal/utils" - "github.com/your-org/haxen/control-plane/web/client" "fmt" "log" "os" @@ -15,20 +10,26 @@ import ( "strconv" "time" + "github.com/Agent-Field/agentfield/control-plane/internal/cli" + "github.com/Agent-Field/agentfield/control-plane/internal/config" + "github.com/Agent-Field/agentfield/control-plane/internal/server" + "github.com/Agent-Field/agentfield/control-plane/internal/utils" + "github.com/Agent-Field/agentfield/control-plane/web/client" + "github.com/spf13/cobra" // Import cobra "github.com/spf13/viper" ) var ( - loadConfigFunc = loadConfig - newHaxenServerFunc = server.NewHaxenServer - buildUIFunc = buildUI - openBrowserFunc = openBrowser - sleepFunc = time.Sleep - waitForShutdownFunc = func() { select {} } - commandRunner = defaultCommandRunner - browserLauncher = defaultBrowserLauncher - startHaxenServerFunc = defaultStartHaxenServer + loadConfigFunc = loadConfig + newAgentFieldServerFunc = server.NewAgentFieldServer + buildUIFunc = buildUI + openBrowserFunc = openBrowser + sleepFunc = time.Sleep + waitForShutdownFunc = func() { select {} } + commandRunner = defaultCommandRunner + browserLauncher = defaultBrowserLauncher + startAgentFieldServerFunc = defaultStartAgentFieldServer ) // main function now acts as the entry point for the Cobra CLI. @@ -45,7 +46,7 @@ func main() { // runServer contains the original server startup logic. // This function will be called by the Cobra command's Run field. func runServer(cmd *cobra.Command, args []string) { - fmt.Println("Haxen server starting...") + fmt.Println("AgentField server starting...") // Load configuration cfgFilePath, _ := cmd.Flags().GetString("config") @@ -57,13 +58,13 @@ func runServer(cmd *cobra.Command, args []string) { // Override port from flag if provided if cmd.Flags().Lookup("port").Changed { port, _ := cmd.Flags().GetInt("port") - cfg.Haxen.Port = port + cfg.AgentField.Port = port } // Override from environment variables - if envPort := os.Getenv("HAXEN_PORT"); envPort != "" { + if envPort := os.Getenv("AGENTFIELD_PORT"); envPort != "" { if port, err := strconv.Atoi(envPort); err == nil { - cfg.Haxen.Port = port + cfg.AgentField.Port = port } } @@ -76,7 +77,7 @@ func runServer(cmd *cobra.Command, args []string) { } if !storageModeExplicit { - if envMode := os.Getenv("HAXEN_STORAGE_MODE"); envMode != "" { + if envMode := os.Getenv("AGENTFIELD_STORAGE_MODE"); envMode != "" { cfg.Storage.Mode = envMode } } @@ -86,9 +87,9 @@ func runServer(cmd *cobra.Command, args []string) { postgresURL, _ = cmd.Flags().GetString("postgres-url") } if postgresURL == "" { - if env := os.Getenv("HAXEN_POSTGRES_URL"); env != "" { + if env := os.Getenv("AGENTFIELD_POSTGRES_URL"); env != "" { postgresURL = env - } else if env := os.Getenv("HAXEN_STORAGE_POSTGRES_URL"); env != "" { + } else if env := os.Getenv("AGENTFIELD_STORAGE_POSTGRES_URL"); env != "" { postgresURL = env } } @@ -101,24 +102,24 @@ func runServer(cmd *cobra.Command, args []string) { } } - if env := os.Getenv("HAXEN_STORAGE_POSTGRES_HOST"); env != "" { + if env := os.Getenv("AGENTFIELD_STORAGE_POSTGRES_HOST"); env != "" { cfg.Storage.Postgres.Host = env } - if env := os.Getenv("HAXEN_STORAGE_POSTGRES_PORT"); env != "" { + if env := os.Getenv("AGENTFIELD_STORAGE_POSTGRES_PORT"); env != "" { if port, err := strconv.Atoi(env); err == nil { cfg.Storage.Postgres.Port = port } } - if env := os.Getenv("HAXEN_STORAGE_POSTGRES_DATABASE"); env != "" { + if env := os.Getenv("AGENTFIELD_STORAGE_POSTGRES_DATABASE"); env != "" { cfg.Storage.Postgres.Database = env } - if env := os.Getenv("HAXEN_STORAGE_POSTGRES_USER"); env != "" { + if env := os.Getenv("AGENTFIELD_STORAGE_POSTGRES_USER"); env != "" { cfg.Storage.Postgres.User = env } - if env := os.Getenv("HAXEN_STORAGE_POSTGRES_PASSWORD"); env != "" { + if env := os.Getenv("AGENTFIELD_STORAGE_POSTGRES_PASSWORD"); env != "" { cfg.Storage.Postgres.Password = env } - if env := os.Getenv("HAXEN_STORAGE_POSTGRES_SSLMODE"); env != "" { + if env := os.Getenv("AGENTFIELD_STORAGE_POSTGRES_SSLMODE"); env != "" { cfg.Storage.Postgres.SSLMode = env } @@ -156,17 +157,17 @@ func runServer(cmd *cobra.Command, args []string) { fmt.Println("UI is already embedded in binary, skipping build.") } - // Create Haxen server instance - haxenServer, err := newHaxenServerFunc(cfg) + // Create AgentField server instance + agentfieldServer, err := newAgentFieldServerFunc(cfg) if err != nil { - log.Fatalf("Failed to create Haxen server: %v", err) + log.Fatalf("Failed to create AgentField server: %v", err) } // Start the server in a goroutine so we can open the browser go func() { - fmt.Printf("Haxen server attempting to start on port %d...\n", cfg.Haxen.Port) - if err := startHaxenServerFunc(haxenServer); err != nil { - log.Fatalf("Failed to start Haxen server: %v", err) + fmt.Printf("AgentField server attempting to start on port %d...\n", cfg.AgentField.Port) + if err := startAgentFieldServerFunc(agentfieldServer); err != nil { + log.Fatalf("Failed to start AgentField server: %v", err) } }() @@ -175,7 +176,7 @@ func runServer(cmd *cobra.Command, args []string) { openBrowserFlag, _ := cmd.Flags().GetBool("open") if cfg.UI.Enabled && openBrowserFlag && !backendOnly { - uiTargetURL := fmt.Sprintf("http://localhost:%d", cfg.Haxen.Port) + uiTargetURL := fmt.Sprintf("http://localhost:%d", cfg.AgentField.Port) if cfg.UI.Mode == "dev" { // Use configured dev port or environment variable devPort := cfg.UI.DevPort @@ -193,7 +194,7 @@ func runServer(cmd *cobra.Command, args []string) { openBrowserFunc(uiTargetURL) } - fmt.Printf("Haxen server running. Press Ctrl+C to exit.\n") + fmt.Printf("AgentField server running. Press Ctrl+C to exit.\n") // Keep main goroutine alive waitForShutdownFunc() @@ -203,7 +204,7 @@ func runServer(cmd *cobra.Command, args []string) { // loadConfig loads configuration from file and environment variables. func loadConfig(configFile string) (*config.Config, error) { // Set environment variable prefixes - viper.SetEnvPrefix("HAXEN") + viper.SetEnvPrefix("AGENTFIELD") viper.AutomaticEnv() // read in environment variables that match // Skip config file reading if explicitly set to /dev/null or empty @@ -214,13 +215,13 @@ func loadConfig(configFile string) (*config.Config, error) { } } else if configFile == "" { // Check for config file path from environment - if envConfigFile := os.Getenv("HAXEN_CONFIG_FILE"); envConfigFile != "" { + if envConfigFile := os.Getenv("AGENTFIELD_CONFIG_FILE"); envConfigFile != "" { viper.SetConfigFile(envConfigFile) } else { - viper.SetConfigName("haxen") // name of config file (without extension) - viper.SetConfigType("yaml") // type of the config file - viper.AddConfigPath("./config") // path to look for the config file in - viper.AddConfigPath(".") // optionally look for config in the working directory + viper.SetConfigName("agentfield") // name of config file (without extension) + viper.SetConfigType("yaml") // type of the config file + viper.AddConfigPath("./config") // path to look for the config file in + viper.AddConfigPath(".") // optionally look for config in the working directory } if err := viper.ReadInConfig(); err != nil { @@ -240,8 +241,8 @@ func loadConfig(configFile string) (*config.Config, error) { } // Apply defaults if not set - if cfg.Haxen.Port == 0 { - cfg.Haxen.Port = 8080 // Default port + if cfg.AgentField.Port == 0 { + cfg.AgentField.Port = 8080 // Default port } if cfg.Storage.Mode == "" { cfg.Storage.Mode = "local" // Default storage mode @@ -316,24 +317,24 @@ func loadConfig(configFile string) (*config.Config, error) { // Get the executable path and find UI relative to it execPath, err := os.Executable() if err != nil { - cfg.UI.SourcePath = filepath.Join("apps", "platform", "haxen", "web", "client") + cfg.UI.SourcePath = filepath.Join("apps", "platform", "agentfield", "web", "client") if _, statErr := os.Stat(cfg.UI.SourcePath); os.IsNotExist(statErr) { cfg.UI.SourcePath = filepath.Join("web", "client") } } else { execDir := filepath.Dir(execPath) // Look for web/client relative to the executable directory - // This assumes the binary is built in the haxen/ directory + // This assumes the binary is built in the agentfield/ directory cfg.UI.SourcePath = filepath.Join(execDir, "web", "client") - // If that doesn't exist, try going up one level (if binary is in haxen/) + // If that doesn't exist, try going up one level (if binary is in agentfield/) if _, err := os.Stat(cfg.UI.SourcePath); os.IsNotExist(err) { - cfg.UI.SourcePath = filepath.Join(filepath.Dir(execDir), "apps", "platform", "haxen", "web", "client") + cfg.UI.SourcePath = filepath.Join(filepath.Dir(execDir), "apps", "platform", "agentfield", "web", "client") } // Final fallback to current working directory if _, err := os.Stat(cfg.UI.SourcePath); os.IsNotExist(err) { - altPath := filepath.Join("apps", "platform", "haxen", "web", "client") + altPath := filepath.Join("apps", "platform", "agentfield", "web", "client") if _, altErr := os.Stat(altPath); altErr == nil { cfg.UI.SourcePath = altPath } else { @@ -362,14 +363,14 @@ func loadConfig(configFile string) (*config.Config, error) { } cfg.Storage.Local.KVStorePath = kvPath } - // Ensure all Haxen data directories exist + // Ensure all AgentField data directories exist if _, err := utils.EnsureDataDirectories(); err != nil { - return nil, fmt.Errorf("failed to create Haxen data directories: %w", err) + return nil, fmt.Errorf("failed to create AgentField data directories: %w", err) } } - fmt.Printf("Loaded config - Storage mode: %s, Haxen Port: %d, UI Mode: %s, UI Enabled: %t, DID Enabled: %t\n", - cfg.Storage.Mode, cfg.Haxen.Port, cfg.UI.Mode, cfg.UI.Enabled, cfg.Features.DID.Enabled) + fmt.Printf("Loaded config - Storage mode: %s, AgentField Port: %d, UI Mode: %s, UI Enabled: %t, DID Enabled: %t\n", + cfg.Storage.Mode, cfg.AgentField.Port, cfg.UI.Mode, cfg.UI.Enabled, cfg.Features.DID.Enabled) return &cfg, nil } @@ -397,7 +398,7 @@ func buildUI(cfg *config.Config) error { } // Set API proxy target for development builds - buildEnv = append(buildEnv, fmt.Sprintf("VITE_API_PROXY_TARGET=http://localhost:%d", cfg.Haxen.Port)) + buildEnv = append(buildEnv, fmt.Sprintf("VITE_API_PROXY_TARGET=http://localhost:%d", cfg.AgentField.Port)) // Install dependencies if err := commandRunner(uiDir, buildEnv, "npm", "install", "--force"); err != nil { @@ -426,7 +427,7 @@ func defaultBrowserLauncher(name string, args ...string) error { return exec.Command(name, args...).Start() } -func defaultStartHaxenServer(s *server.HaxenServer) error { +func defaultStartAgentFieldServer(s *server.AgentFieldServer) error { return s.Start() } diff --git a/control-plane/cmd/haxen-server/main_test.go b/control-plane/cmd/agentfield-server/main_test.go similarity index 81% rename from control-plane/cmd/haxen-server/main_test.go rename to control-plane/cmd/agentfield-server/main_test.go index 3f1133f2..5a200177 100644 --- a/control-plane/cmd/haxen-server/main_test.go +++ b/control-plane/cmd/agentfield-server/main_test.go @@ -10,16 +10,16 @@ import ( "testing" "time" - "github.com/your-org/haxen/control-plane/internal/config" - "github.com/your-org/haxen/control-plane/internal/server" + "github.com/Agent-Field/agentfield/control-plane/internal/config" + "github.com/Agent-Field/agentfield/control-plane/internal/server" "github.com/spf13/cobra" "github.com/spf13/viper" ) func TestLoadConfig_DefaultsApplied(t *testing.T) { - t.Setenv("HAXEN_PORT", "") - t.Setenv("HAXEN_CONFIG_FILE", "") + t.Setenv("AGENTFIELD_PORT", "") + t.Setenv("AGENTFIELD_CONFIG_FILE", "") viper.Reset() cfg, err := loadConfig("/dev/null") @@ -27,8 +27,8 @@ func TestLoadConfig_DefaultsApplied(t *testing.T) { t.Fatalf("loadConfig returned error: %v", err) } - if cfg.Haxen.Port != 8080 { - t.Errorf("expected default haxen port 8080, got %d", cfg.Haxen.Port) + if cfg.AgentField.Port != 8080 { + t.Errorf("expected default agentfield port 8080, got %d", cfg.AgentField.Port) } if cfg.Storage.Mode != "local" { t.Errorf("expected default storage mode local, got %s", cfg.Storage.Mode) @@ -51,8 +51,8 @@ func TestLoadConfig_ConfigFileValues(t *testing.T) { viper.Reset() dir := t.TempDir() - file := filepath.Join(dir, "haxen.yaml") - content := []byte(`haxen: + file := filepath.Join(dir, "agentfield.yaml") + content := []byte(`agentfield: port: 9231 storage: mode: local @@ -74,8 +74,8 @@ features: t.Fatalf("loadConfig returned error: %v", err) } - if cfg.Haxen.Port != 9231 { - t.Errorf("expected port 9231, got %d", cfg.Haxen.Port) + if cfg.AgentField.Port != 9231 { + t.Errorf("expected port 9231, got %d", cfg.AgentField.Port) } if cfg.UI.Enabled { t.Error("expected UI disabled from config") @@ -115,7 +115,7 @@ func TestBuildUI_RunsInstallAndBuild(t *testing.T) { SourcePath: dir, DistPath: filepath.Join(dir, "dist"), }, - Haxen: config.HaxenConfig{Port: 8081}, + AgentField: config.AgentFieldConfig{Port: 8081}, } var mu sync.Mutex @@ -132,7 +132,7 @@ func TestBuildUI_RunsInstallAndBuild(t *testing.T) { t.Errorf("unexpected command dir %s", dir) } // Ensure environment includes proxy - expectedPrefix := fmt.Sprintf("VITE_API_PROXY_TARGET=http://localhost:%d", cfg.Haxen.Port) + expectedPrefix := fmt.Sprintf("VITE_API_PROXY_TARGET=http://localhost:%d", cfg.AgentField.Port) found := false for _, envVar := range env { if envVar == expectedPrefix { @@ -184,8 +184,8 @@ func TestBuildUI_CommandError(t *testing.T) { func TestRunServer_AppliesFlagOverrides(t *testing.T) { cfg := &config.Config{ - Haxen: config.HaxenConfig{Port: 4000}, - UI: config.UIConfig{Enabled: true, Mode: "embedded"}, + AgentField: config.AgentFieldConfig{Port: 4000}, + UI: config.UIConfig{Enabled: true, Mode: "embedded"}, Features: config.FeatureConfig{DID: config.DIDConfig{ VCRequirements: config.VCRequirements{ RequireVCForExecution: true, @@ -194,21 +194,21 @@ func TestRunServer_AppliesFlagOverrides(t *testing.T) { } loadOrig := loadConfigFunc - newOrig := newHaxenServerFunc + newOrig := newAgentFieldServerFunc buildOrig := buildUIFunc openOrig := openBrowserFunc sleepOrig := sleepFunc waitOrig := waitForShutdownFunc - startOrig := startHaxenServerFunc + startOrig := startAgentFieldServerFunc defer func() { loadConfigFunc = loadOrig - newHaxenServerFunc = newOrig + newAgentFieldServerFunc = newOrig buildUIFunc = buildOrig openBrowserFunc = openOrig sleepFunc = sleepOrig waitForShutdownFunc = waitOrig - startHaxenServerFunc = startOrig + startAgentFieldServerFunc = startOrig }() loadConfigFunc = func(path string) (*config.Config, error) { @@ -219,9 +219,9 @@ func TestRunServer_AppliesFlagOverrides(t *testing.T) { } var gotCfg *config.Config - newHaxenServerFunc = func(c *config.Config) (*server.HaxenServer, error) { + newAgentFieldServerFunc = func(c *config.Config) (*server.AgentFieldServer, error) { gotCfg = c - return &server.HaxenServer{}, nil + return &server.AgentFieldServer{}, nil } buildUIFunc = func(*config.Config) error { return nil } @@ -230,7 +230,7 @@ func TestRunServer_AppliesFlagOverrides(t *testing.T) { waitForShutdownFunc = func() {} started := make(chan struct{}) - startHaxenServerFunc = func(*server.HaxenServer) error { + startAgentFieldServerFunc = func(*server.AgentFieldServer) error { close(started) return nil } @@ -253,17 +253,17 @@ func TestRunServer_AppliesFlagOverrides(t *testing.T) { t.Fatalf("failed to set no-vc-execution: %v", err) } - t.Setenv("HAXEN_PORT", "12345") + t.Setenv("AGENTFIELD_PORT", "12345") runServer(cmd, nil) <-started if gotCfg == nil { - t.Fatal("expected haxen server creation to be invoked") + t.Fatal("expected af server creation to be invoked") } - if gotCfg.Haxen.Port != 12345 { - t.Fatalf("expected env override port 12345, got %d", gotCfg.Haxen.Port) + if gotCfg.AgentField.Port != 12345 { + t.Fatalf("expected env override port 12345, got %d", gotCfg.AgentField.Port) } if gotCfg.UI.Enabled { t.Fatal("backend-only flag should disable UI") @@ -275,7 +275,7 @@ func TestRunServer_AppliesFlagOverrides(t *testing.T) { func TestRunServer_OpensBrowserForDevUI(t *testing.T) { cfg := &config.Config{ - Haxen: config.HaxenConfig{Port: 8800}, + AgentField: config.AgentFieldConfig{Port: 8800}, UI: config.UIConfig{ Enabled: true, Mode: "dev", @@ -285,27 +285,27 @@ func TestRunServer_OpensBrowserForDevUI(t *testing.T) { } loadOrig := loadConfigFunc - newOrig := newHaxenServerFunc + newOrig := newAgentFieldServerFunc openOrig := openBrowserFunc sleepOrig := sleepFunc waitOrig := waitForShutdownFunc - startOrig := startHaxenServerFunc + startOrig := startAgentFieldServerFunc defer func() { loadConfigFunc = loadOrig - newHaxenServerFunc = newOrig + newAgentFieldServerFunc = newOrig openBrowserFunc = openOrig sleepFunc = sleepOrig waitForShutdownFunc = waitOrig - startHaxenServerFunc = startOrig + startAgentFieldServerFunc = startOrig }() loadConfigFunc = func(string) (*config.Config, error) { return cfg, nil } - newHaxenServerFunc = func(*config.Config) (*server.HaxenServer, error) { return &server.HaxenServer{}, nil } + newAgentFieldServerFunc = func(*config.Config) (*server.AgentFieldServer, error) { return &server.AgentFieldServer{}, nil } sleepFunc = func(time.Duration) {} waitForShutdownFunc = func() {} started := make(chan struct{}) - startHaxenServerFunc = func(*server.HaxenServer) error { + startAgentFieldServerFunc = func(*server.AgentFieldServer) error { close(started) return nil } diff --git a/control-plane/config/haxen.yaml b/control-plane/config/agentfield.yaml similarity index 99% rename from control-plane/config/haxen.yaml rename to control-plane/config/agentfield.yaml index 2a8c976c..78b747ed 100644 --- a/control-plane/config/haxen.yaml +++ b/control-plane/config/agentfield.yaml @@ -1,4 +1,4 @@ -haxen: +agentfield: port: 8080 mode: "local" max_concurrent_requests: 1024 diff --git a/control-plane/config/docker-perf.yaml b/control-plane/config/docker-perf.yaml index a56fac8a..38a60cf7 100644 --- a/control-plane/config/docker-perf.yaml +++ b/control-plane/config/docker-perf.yaml @@ -1,4 +1,4 @@ -haxen: +agentfield: port: 8080 mode: "local" max_concurrent_requests: 1024 @@ -78,13 +78,13 @@ features: hash_sensitive_data: true keystore: type: "local" - path: "/var/haxen/data/keys" + path: "/var/agentfield/data/keys" encryption: "AES-256-GCM" backup_enabled: true backup_interval: "24h" data_directories: - haxen_home: "/var/haxen" + agentfield_home: "/var/agentfield" database_dir: "data" keys_dir: "data/keys" did_registries_dir: "data/did_registries" diff --git a/control-plane/go.mod b/control-plane/go.mod index a51a464e..282255d0 100644 --- a/control-plane/go.mod +++ b/control-plane/go.mod @@ -1,4 +1,4 @@ -module github.com/your-org/haxen/control-plane +module github.com/Agent-Field/agentfield/control-plane go 1.24.0 @@ -6,6 +6,8 @@ toolchain go1.24.2 require ( github.com/boltdb/bolt v1.3.1 + github.com/charmbracelet/bubbletea v1.3.10 + github.com/charmbracelet/lipgloss v1.1.0 github.com/fatih/color v1.18.0 github.com/fsnotify/fsnotify v1.8.0 github.com/gin-contrib/cors v1.7.5 @@ -37,9 +39,7 @@ require ( github.com/bytedance/sonic v1.13.2 // indirect github.com/bytedance/sonic/loader v0.2.4 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/charmbracelet/bubbletea v1.3.10 // indirect github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect - github.com/charmbracelet/lipgloss v1.1.0 // indirect github.com/charmbracelet/x/ansi v0.10.1 // indirect github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect github.com/charmbracelet/x/term v0.2.1 // indirect diff --git a/control-plane/go.sum b/control-plane/go.sum index 5c909896..f6885564 100644 --- a/control-plane/go.sum +++ b/control-plane/go.sum @@ -202,6 +202,8 @@ golang.org/x/arch v0.15.0 h1:QtOrQd0bTUnhNVNndMpLHNWrDmYzZ2KDqSrEymqInZw= golang.org/x/arch v0.15.0/go.mod h1:JmwW7aLIoRUKgaTzhkiEFxvcEiQGyOg9BMonBJUS7EE= golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= +golang.org/x/exp v0.0.0-20231108232855-2478ac86f678 h1:mchzmB1XO2pMaKFRqk/+MV3mgGG96aqaPXaMifQU47w= +golang.org/x/exp v0.0.0-20231108232855-2478ac86f678/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= @@ -212,8 +214,6 @@ golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= diff --git a/control-plane/internal/application/container.go b/control-plane/internal/application/container.go index 46061db0..d594e7de 100644 --- a/control-plane/internal/application/container.go +++ b/control-plane/internal/application/container.go @@ -1,23 +1,25 @@ package application import ( - "github.com/your-org/haxen/control-plane/internal/cli/framework" - "github.com/your-org/haxen/control-plane/internal/config" - "github.com/your-org/haxen/control-plane/internal/core/services" - "github.com/your-org/haxen/control-plane/internal/infrastructure/process" - "github.com/your-org/haxen/control-plane/internal/infrastructure/storage" - didServices "github.com/your-org/haxen/control-plane/internal/services" - storageInterface "github.com/your-org/haxen/control-plane/internal/storage" "crypto/sha256" "encoding/hex" "path/filepath" + + "github.com/Agent-Field/agentfield/control-plane/internal/cli/framework" + "github.com/Agent-Field/agentfield/control-plane/internal/config" + "github.com/Agent-Field/agentfield/control-plane/internal/core/services" + "github.com/Agent-Field/agentfield/control-plane/internal/infrastructure/process" + "github.com/Agent-Field/agentfield/control-plane/internal/infrastructure/storage" + "github.com/Agent-Field/agentfield/control-plane/internal/logger" + didServices "github.com/Agent-Field/agentfield/control-plane/internal/services" + storageInterface "github.com/Agent-Field/agentfield/control-plane/internal/storage" ) // CreateServiceContainer creates and wires up all services for the CLI commands -func CreateServiceContainer(cfg *config.Config, haxenHome string) *framework.ServiceContainer { +func CreateServiceContainer(cfg *config.Config, agentfieldHome string) *framework.ServiceContainer { // Create infrastructure components fileSystem := storage.NewFileSystemAdapter() - registryPath := filepath.Join(haxenHome, "installed.json") + registryPath := filepath.Join(agentfieldHome, "installed.json") registryStorage := storage.NewLocalRegistryStorage(fileSystem, registryPath) processManager := process.NewProcessManager() portManager := process.NewPortManager() @@ -32,8 +34,8 @@ func CreateServiceContainer(cfg *config.Config, haxenHome string) *framework.Ser } // Create services - packageService := services.NewPackageService(registryStorage, fileSystem, haxenHome) - agentService := services.NewAgentService(processManager, portManager, registryStorage, nil, haxenHome) // nil agentClient for now + packageService := services.NewPackageService(registryStorage, fileSystem, agentfieldHome) + agentService := services.NewAgentService(processManager, portManager, registryStorage, nil, agentfieldHome) // nil agentClient for now devService := services.NewDevService(processManager, portManager, fileSystem) // Create DID services if enabled @@ -69,18 +71,24 @@ func CreateServiceContainer(cfg *config.Config, haxenHome string) *framework.Ser if keystoreService != nil && didRegistry != nil { didService = didServices.NewDIDService(&cfg.Features.DID, keystoreService, didRegistry) - // Generate haxen server ID based on haxen home directory - // This ensures each haxen instance has a unique ID while being deterministic - haxenServerID := generateHaxenServerID(haxenHome) - didService.Initialize(haxenServerID) - - // Create VC service with database storage (required) - if storageProvider != nil { - vcService = didServices.NewVCService(&cfg.Features.DID, didService, storageProvider) - } - - if vcService != nil { - vcService.Initialize() + // Generate af server ID based on agentfield home directory + // This ensures each agentfield instance has a unique ID while being deterministic + agentfieldServerID := generateAgentFieldServerID(agentfieldHome) + if err := didService.Initialize(agentfieldServerID); err != nil { + logger.Logger.Warn().Err(err).Msg("failed to initialize DID service") + didService = nil + } else { + // Create VC service with database storage (required) + if storageProvider != nil { + vcService = didServices.NewVCService(&cfg.Features.DID, didService, storageProvider) + } + + if vcService != nil { + if err := vcService.Initialize(); err != nil { + logger.Logger.Warn().Err(err).Msg("failed to initialize VC service") + vcService = nil + } + } } } } @@ -98,28 +106,28 @@ func CreateServiceContainer(cfg *config.Config, haxenHome string) *framework.Ser } // CreateServiceContainerWithDefaults creates a service container with default configuration -func CreateServiceContainerWithDefaults(haxenHome string) *framework.ServiceContainer { +func CreateServiceContainerWithDefaults(agentfieldHome string) *framework.ServiceContainer { // Use default config for now cfg := &config.Config{} // This will be enhanced when config is properly structured - return CreateServiceContainer(cfg, haxenHome) + return CreateServiceContainer(cfg, agentfieldHome) } -// generateHaxenServerID creates a deterministic haxen server ID based on the haxen home directory. -// This ensures each haxen instance has a unique ID while being deterministic for the same installation. -func generateHaxenServerID(haxenHome string) string { - // Use the absolute path of haxen home to generate a deterministic ID - absPath, err := filepath.Abs(haxenHome) +// generateAgentFieldServerID creates a deterministic af server ID based on the agentfield home directory. +// This ensures each agentfield instance has a unique ID while being deterministic for the same installation. +func generateAgentFieldServerID(agentfieldHome string) string { + // Use the absolute path of agentfield home to generate a deterministic ID + absPath, err := filepath.Abs(agentfieldHome) if err != nil { // Fallback to the original path if absolute path fails - absPath = haxenHome + absPath = agentfieldHome } - // Create a hash of the haxen home path to generate a unique but deterministic ID + // Create a hash of the agentfield home path to generate a unique but deterministic ID hash := sha256.Sum256([]byte(absPath)) - // Use first 16 characters of the hex hash as the haxen server ID + // Use first 16 characters of the hex hash as the af server ID // This provides uniqueness while keeping the ID manageable - haxenServerID := hex.EncodeToString(hash[:])[:16] + agentfieldServerID := hex.EncodeToString(hash[:])[:16] - return haxenServerID + return agentfieldServerID } diff --git a/control-plane/internal/application/container_test.go b/control-plane/internal/application/container_test.go index d47b46d7..d6d8ea62 100644 --- a/control-plane/internal/application/container_test.go +++ b/control-plane/internal/application/container_test.go @@ -6,17 +6,17 @@ import ( "strings" "testing" - "github.com/your-org/haxen/control-plane/internal/config" - storagecfg "github.com/your-org/haxen/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/internal/config" + storagecfg "github.com/Agent-Field/agentfield/control-plane/internal/storage" ) func TestCreateServiceContainerWithoutDID(t *testing.T) { t.Parallel() - haxenHome := t.TempDir() + agentfieldHome := t.TempDir() cfg := &config.Config{} - container := CreateServiceContainer(cfg, haxenHome) + container := CreateServiceContainer(cfg, agentfieldHome) if container.PackageService == nil || container.AgentService == nil || container.DevService == nil { t.Fatalf("expected core services to be initialised") @@ -29,13 +29,13 @@ func TestCreateServiceContainerWithoutDID(t *testing.T) { func TestCreateServiceContainerDIDWithoutStorageFallback(t *testing.T) { t.Parallel() - haxenHome := t.TempDir() + agentfieldHome := t.TempDir() cfg := &config.Config{} cfg.Features.DID.Enabled = true - cfg.Features.DID.Keystore.Path = filepath.Join(haxenHome, "keys") + cfg.Features.DID.Keystore.Path = filepath.Join(agentfieldHome, "keys") cfg.Storage.Mode = "invalid" - container := CreateServiceContainer(cfg, haxenHome) + container := CreateServiceContainer(cfg, agentfieldHome) if container.DIDService != nil || container.VCService != nil { t.Fatalf("expected DID services to remain nil when storage initialisation fails") @@ -45,15 +45,15 @@ func TestCreateServiceContainerDIDWithoutStorageFallback(t *testing.T) { func TestCreateServiceContainerWithLocalDID(t *testing.T) { t.Parallel() - haxenHome := t.TempDir() + agentfieldHome := t.TempDir() cfg := &config.Config{} cfg.Storage.Mode = "local" cfg.Storage.Local = storagecfg.LocalStorageConfig{ - DatabasePath: filepath.Join(haxenHome, "haxen.db"), - KVStorePath: filepath.Join(haxenHome, "haxen.bolt"), + DatabasePath: filepath.Join(agentfieldHome, "agentfield.db"), + KVStorePath: filepath.Join(agentfieldHome, "agentfield.bolt"), } cfg.Features.DID.Enabled = true - cfg.Features.DID.Keystore.Path = filepath.Join(haxenHome, "keys") + cfg.Features.DID.Keystore.Path = filepath.Join(agentfieldHome, "keys") ctx := context.Background() probe := storagecfg.NewLocalStorage(storagecfg.LocalStorageConfig{}) @@ -71,7 +71,7 @@ func TestCreateServiceContainerWithLocalDID(t *testing.T) { t.Fatalf("failed to close probe storage: %v", err) } - container := CreateServiceContainer(cfg, haxenHome) + container := CreateServiceContainer(cfg, agentfieldHome) if container.DIDService == nil { t.Fatalf("expected DID service to be initialised when configuration is valid") diff --git a/control-plane/internal/cli/add.go b/control-plane/internal/cli/add.go index 4ea6741c..d508bfb3 100644 --- a/control-plane/internal/cli/add.go +++ b/control-plane/internal/cli/add.go @@ -7,8 +7,8 @@ import ( "strings" "time" - "github.com/your-org/haxen/control-plane/internal/config" - "github.com/your-org/haxen/control-plane/internal/mcp" + "github.com/Agent-Field/agentfield/control-plane/internal/config" + "github.com/Agent-Field/agentfield/control-plane/internal/mcp" "github.com/spf13/cobra" ) @@ -43,28 +43,28 @@ func NewAddCommand() *cobra.Command { cmd := &cobra.Command{ Use: "add [alias]", - Short: "Add dependencies to your Haxen agent project", - Long: `Add dependencies to your Haxen agent project. + Short: "Add dependencies to your AgentField agent project", + Long: `Add dependencies to your AgentField agent project. Supports adding MCP servers and regular agent packages with advanced configuration options. Examples: # Remote MCP servers (URL-based) - haxen add --mcp --url https://github.com/modelcontextprotocol/server-github - haxen add --mcp --url https://github.com/ferrislucas/iterm-mcp github-tools + af add --mcp --url https://github.com/modelcontextprotocol/server-github + af add --mcp --url https://github.com/ferrislucas/iterm-mcp github-tools # Local MCP servers with custom commands - haxen add --mcp my-server --run "node server.js --port {{port}}" \ + af add --mcp my-server --run "node server.js --port {{port}}" \ --setup "npm install" --setup "npm run build" - + # Python MCP server with environment variables - haxen add --mcp python-server --run "python server.py --port {{port}}" \ + af add --mcp python-server --run "python server.py --port {{port}}" \ --setup "pip install -r requirements.txt" \ --env "PYTHONPATH={{server_dir}}" \ --working-dir "./src" # Advanced configuration with health checks - haxen add --mcp enterprise-server \ + af add --mcp enterprise-server \ --url https://github.com/company/mcp-server \ --run "node dist/server.js --port {{port}} --config {{config_file}}" \ --setup "npm install" --setup "npm run build" \ @@ -74,8 +74,8 @@ Examples: --tags "enterprise" --tags "production" # Regular agent packages (future) - haxen add github.com/haxen-helpers/email-utils - haxen add github.com/openai/prompt-templates + af add github.com/agentfield-helpers/email-utils + af add github.com/openai/prompt-templates Template Variables: {{port}} - Dynamically assigned port number @@ -88,7 +88,7 @@ Template Variables: RunE: func(cmd *cobra.Command, args []string) error { opts.Source = args[0] // If --alias flag is not used, and a second positional arg is present, use it as alias. - if cmd.Flags().Changed("alias") == false && len(args) > 1 { + if !cmd.Flags().Changed("alias") && len(args) > 1 { opts.Alias = args[1] } // verbose flag is typically a persistent flag from the root command. @@ -121,7 +121,7 @@ func runAddCommandWithOptions(opts *MCPAddOptions, verbose bool) error { return fmt.Errorf("failed to get current directory: %w", err) } - if err := validateHaxenProject(projectDir); err != nil { + if err := validateAgentFieldProject(projectDir); err != nil { return err } @@ -138,10 +138,10 @@ func runAddCommandWithOptions(opts *MCPAddOptions, verbose bool) error { return fmt.Errorf("only MCP server dependencies are currently supported. Use --mcp flag") } -func validateHaxenProject(projectDir string) error { - haxenYAMLPath := filepath.Join(projectDir, "haxen.yaml") - if _, err := os.Stat(haxenYAMLPath); os.IsNotExist(err) { - return fmt.Errorf("not a Haxen project directory (haxen.yaml not found)") +func validateAgentFieldProject(projectDir string) error { + agentfieldYAMLPath := filepath.Join(projectDir, "agentfield.yaml") + if _, err := os.Stat(agentfieldYAMLPath); os.IsNotExist(err) { + return fmt.Errorf("not a AgentField project directory (agentfield.yaml not found)") } return nil } @@ -159,19 +159,18 @@ type MCPAddCommand struct { // It performs initial processing and validation. func NewMCPAddCommand(projectDir string, opts *MCPAddOptions, verboseFlag bool) (*MCPAddCommand, error) { // Load application configuration - appCfg, err := config.LoadConfig(filepath.Join(projectDir, "haxen.yaml")) + appCfg, err := config.LoadConfig(filepath.Join(projectDir, "agentfield.yaml")) if err != nil { - // Fallback for safety, though haxen.yaml should exist due to validateHaxenProject - appCfg, err = config.LoadConfig("haxen.yaml") + // Fallback for safety, though agentfield.yaml should exist due to validateAgentFieldProject + appCfg, err = config.LoadConfig("agentfield.yaml") if err != nil { - return nil, fmt.Errorf("failed to load haxen configuration: %w. Ensure haxen.yaml exists", err) + return nil, fmt.Errorf("failed to load af configuration: %w. Ensure agentfield.yaml exists", err) } } // Determine final alias - finalAlias := opts.Alias - if finalAlias == "" { - finalAlias = deriveAliasLocally(opts.Source) // Using local helper for now + if opts.Alias == "" { + opts.Alias = deriveAliasLocally(opts.Source) // Using local helper for now } // Construct MCPServerConfig (this will be part of the MCPAddCommand or its options) @@ -188,7 +187,6 @@ func NewMCPAddCommand(projectDir string, opts *MCPAddOptions, verboseFlag bool) // return nil, fmt.Errorf("MCP configuration validation failed:\n%s", validationErrs.Error()) // } - return &MCPAddCommand{ ProjectDir: projectDir, Opts: opts, @@ -209,7 +207,7 @@ func (cmd *MCPAddCommand) Execute() error { if finalAlias == "" { finalAlias = deriveAliasLocally(cmd.Opts.Source) } - + // This check should use the refined alias if !cmd.Opts.Force && finalAlias != "" { if mcpExists(cmd.ProjectDir, finalAlias) { @@ -252,16 +250,16 @@ func (cmd *MCPAddCommand) Execute() error { // TODO: Issue 4 - Re-enable validation with new simplified architecture // Temporarily disabled validator to avoid compilation errors /* - // Integrate mcp.ConfigValidator (from Task 1.3) - validator := mcp.NewBasicConfigValidator() - validationErrs := validator.Validate(mcpServerCfg) - if len(validationErrs) > 0 { - return fmt.Errorf("MCP configuration validation failed:\n%s", validationErrs.Error()) - } + // Integrate mcp.ConfigValidator (from Task 1.3) + validator := mcp.NewBasicConfigValidator() + validationErrs := validator.Validate(mcpServerCfg) + if len(validationErrs) > 0 { + return fmt.Errorf("MCP configuration validation failed:\n%s", validationErrs.Error()) + } */ fmt.Printf(" %s Adding MCP server...\n", Blue("β†’")) - + // Use the new simplified Add method if err := cmd.Manager.Add(mcpServerCfg); err != nil { fmt.Printf(" %s %s\n", Red(StatusError), err.Error()) @@ -304,8 +302,8 @@ func (cmd *MCPAddCommand) Execute() error { fmt.Printf(" %s Capabilities discovery and skill generation handled by manager\n", Gray(StatusInfo)) fmt.Printf("\n%s %s\n", Blue("β†’"), Bold("Next steps:")) - fmt.Printf(" %s Start the MCP server: %s\n", Gray("1."), Cyan(fmt.Sprintf("haxen mcp start %s", mcpServerCfg.Alias))) - fmt.Printf(" %s Check status: %s\n", Gray("2."), Cyan("haxen mcp status")) + fmt.Printf(" %s Start the MCP server: %s\n", Gray("1."), Cyan(fmt.Sprintf("af mcp start %s", mcpServerCfg.Alias))) + fmt.Printf(" %s Check status: %s\n", Gray("2."), Cyan("af mcp status")) fmt.Printf(" %s Use MCP tools as regular skills: %s\n", Gray("3."), Cyan(fmt.Sprintf("await app.call(\"%s_\", ...)", mcpServerCfg.Alias))) fmt.Printf(" %s Generated skill file: %s\n", Gray("4."), Gray(fmt.Sprintf("skills/mcp_%s.py", mcpServerCfg.Alias))) @@ -316,7 +314,7 @@ func (cmd *MCPAddCommand) Execute() error { // func addMCPServer(projectDir string, opts *MCPAddOptions, verbose bool) error { // fmt.Printf("Adding MCP server: %s\n", Bold(opts.Source)) // -// appCfg, err := config.LoadConfig(filepath.Join(projectDir, "haxen.yaml")) +// appCfg, err := config.LoadConfig(filepath.Join(projectDir, "agentfield.yaml")) // The orphaned code block that started with "if err != nil {" and was a remnant // of the original addMCPServer function body has been removed by this replacement. // The logic is now consolidated within MCPAddCommand.Execute(). @@ -330,14 +328,20 @@ func mcpExists(projectDir, alias string) bool { // deriveAliasLocally is a placeholder for a more robust alias derivation. // Ideally, this logic resides in the mcp package or is more comprehensive. func deriveAliasLocally(source string) string { - if strings.Contains(source, "@modelcontextprotocol/server-github") { return "github" } - if strings.Contains(source, "@modelcontextprotocol/server-memory") { return "memory" } - if strings.Contains(source, "@modelcontextprotocol/server-filesystem") { return "filesystem" } - + if strings.Contains(source, "@modelcontextprotocol/server-github") { + return "github" + } + if strings.Contains(source, "@modelcontextprotocol/server-memory") { + return "memory" + } + if strings.Contains(source, "@modelcontextprotocol/server-filesystem") { + return "filesystem" + } + // Basic derivation from source string (e.g., github:owner/repo -> repo) parts := strings.Split(source, "/") namePart := parts[len(parts)-1] - + nameParts := strings.SplitN(namePart, "@", 2) // remove version if any namePart = nameParts[0] @@ -345,10 +349,10 @@ func deriveAliasLocally(source string) string { if len(nameParts) > 1 { namePart = nameParts[1] } - + // Sanitize for use as a directory name (simple version) namePart = strings.ReplaceAll(namePart, ".", "_") - + if namePart == "" { return "mcp_server" // Default fallback } diff --git a/control-plane/internal/cli/commands/dev.go b/control-plane/internal/cli/commands/dev.go index f1419238..2726379e 100644 --- a/control-plane/internal/cli/commands/dev.go +++ b/control-plane/internal/cli/commands/dev.go @@ -3,9 +3,9 @@ package commands import ( "fmt" + "github.com/Agent-Field/agentfield/control-plane/internal/cli/framework" + "github.com/Agent-Field/agentfield/control-plane/internal/core/domain" "github.com/spf13/cobra" - "github.com/your-org/haxen/control-plane/internal/cli/framework" - "github.com/your-org/haxen/control-plane/internal/core/domain" ) type DevCommand struct { @@ -25,7 +25,7 @@ func (cmd *DevCommand) GetName() string { } func (cmd *DevCommand) GetDescription() string { - return "Run a Haxen agent package in development mode" + return "Run a AgentField agent package in development mode" } func (cmd *DevCommand) BuildCobraCommand() *cobra.Command { @@ -36,19 +36,19 @@ func (cmd *DevCommand) BuildCobraCommand() *cobra.Command { cobraCmd := &cobra.Command{ Use: "dev [path]", Short: cmd.GetDescription(), - Long: `Run a Haxen agent package in development mode from the current directory or specified path. + Long: `Run a AgentField agent package in development mode from the current directory or specified path. This command is designed for local development and testing. It will: -- Look for haxen.yaml in the current directory (or specified path) +- Look for agentfield.yaml in the current directory (or specified path) - Start the agent without requiring installation - Provide verbose logging for development - Optionally watch for file changes and auto-restart Examples: - haxen dev # Run package in current directory - haxen dev ./my-agent # Run package in specified directory - haxen dev --port 8005 # Use specific port - haxen dev --watch # Watch for changes and auto-restart`, + af dev # Run package in current directory + af dev ./my-agent # Run package in specified directory + af dev --port 8005 # Use specific port + af dev --watch # Watch for changes and auto-restart`, Args: cobra.MaximumNArgs(1), RunE: func(cobraCmd *cobra.Command, args []string) error { return cmd.execute(args, port, watch, verbose) @@ -71,7 +71,7 @@ func (cmd *DevCommand) execute(args []string, port int, watch, verbose bool) err packagePath = "." } - cmd.output.PrintHeader("πŸš€ Haxen Development Mode") + cmd.output.PrintHeader("πŸš€ AgentField Development Mode") cmd.output.PrintInfo(fmt.Sprintf("Package path: %s", packagePath)) // Create dev options diff --git a/control-plane/internal/cli/commands/install.go b/control-plane/internal/cli/commands/install.go index 7e351be5..50fe2cf4 100644 --- a/control-plane/internal/cli/commands/install.go +++ b/control-plane/internal/cli/commands/install.go @@ -2,9 +2,10 @@ package commands import ( "fmt" + + "github.com/Agent-Field/agentfield/control-plane/internal/cli/framework" + "github.com/Agent-Field/agentfield/control-plane/internal/core/domain" "github.com/spf13/cobra" - "github.com/your-org/haxen/control-plane/internal/cli/framework" - "github.com/your-org/haxen/control-plane/internal/core/domain" ) // InstallCommand implements the install command using the new framework @@ -28,28 +29,28 @@ func (cmd *InstallCommand) GetName() string { // GetDescription returns the command description func (cmd *InstallCommand) GetDescription() string { - return "Install a Haxen agent node package for local use" + return "Install a AgentField agent node package for local use" } // BuildCobraCommand builds the Cobra command func (cmd *InstallCommand) BuildCobraCommand() *cobra.Command { var force bool var verbose bool - + cobraCmd := &cobra.Command{ Use: "install ", Short: cmd.GetDescription(), - Long: `Install a Haxen agent node package for local use. + Long: `Install a AgentField agent node package for local use. The package can be: - A local directory path - A GitHub repository URL -- A package name from the Haxen registry +- A package name from the AgentField registry Examples: - haxen install ./my-agent - haxen install https://github.com/user/agent-repo - haxen install agent-name`, + agentfield install ./my-agent + agentfield install https://github.com/user/agent-repo + agentfield install agent-name`, Args: cobra.ExactArgs(1), RunE: func(cobraCmd *cobra.Command, args []string) error { // Update output formatter with verbose setting @@ -57,40 +58,40 @@ Examples: return cmd.execute(args[0], force, verbose) }, } - + cobraCmd.Flags().BoolVarP(&force, "force", "f", false, "Force reinstall if package exists") cobraCmd.Flags().BoolVarP(&verbose, "verbose", "v", false, "Verbose output") - + return cobraCmd } // execute performs the actual installation func (cmd *InstallCommand) execute(packagePath string, force, verbose bool) error { - cmd.output.PrintHeader("Installing Haxen Package") + cmd.output.PrintHeader("Installing AgentField Package") cmd.output.PrintInfo(fmt.Sprintf("Package: %s", packagePath)) - + if verbose { cmd.output.PrintVerbose("Using new framework-based install command") } - + // Create install options options := domain.InstallOptions{ Force: force, Verbose: verbose, } - + // Show progress cmd.output.PrintProgress("Starting installation...") - + // Use the package service to install err := cmd.Services.PackageService.InstallPackage(packagePath, options) if err != nil { cmd.output.PrintError(fmt.Sprintf("Installation failed: %v", err)) return err } - + cmd.output.PrintSuccess("Package installed successfully") - + if verbose { // Show installed packages cmd.output.PrintVerbose("Listing installed packages...") @@ -101,6 +102,6 @@ func (cmd *InstallCommand) execute(packagePath string, force, verbose bool) erro cmd.output.PrintInfo(fmt.Sprintf("Total installed packages: %d", len(packages))) } } - + return nil } diff --git a/control-plane/internal/cli/commands/run.go b/control-plane/internal/cli/commands/run.go index 8fa13418..ad5fe7a1 100644 --- a/control-plane/internal/cli/commands/run.go +++ b/control-plane/internal/cli/commands/run.go @@ -2,9 +2,10 @@ package commands import ( "fmt" + + "github.com/Agent-Field/agentfield/control-plane/internal/cli/framework" + "github.com/Agent-Field/agentfield/control-plane/internal/core/domain" "github.com/spf13/cobra" - "github.com/your-org/haxen/control-plane/internal/cli/framework" - "github.com/your-org/haxen/control-plane/internal/core/domain" ) // RunCommand implements the run command using the new framework @@ -28,7 +29,7 @@ func (cmd *RunCommand) GetName() string { // GetDescription returns the command description func (cmd *RunCommand) GetDescription() string { - return "Run an installed Haxen agent node package" + return "Run an installed AgentField agent node package" } // BuildCobraCommand builds the Cobra command @@ -36,19 +37,19 @@ func (cmd *RunCommand) BuildCobraCommand() *cobra.Command { var port int var detach bool var verbose bool - + cobraCmd := &cobra.Command{ Use: "run ", Short: cmd.GetDescription(), - Long: `Start an installed Haxen agent node package in the background. + Long: `Start an installed AgentField agent node package in the background. The agent node will be assigned an available port and registered with -the Haxen server if available. +the AgentField server if available. Examples: - haxen run email-helper - haxen run data-analyzer --port 8005 - haxen run my-agent --detach=false`, + af run email-helper + af run data-analyzer --port 8005 + af run my-agent --detach=false`, Args: cobra.ExactArgs(1), RunE: func(cobraCmd *cobra.Command, args []string) error { // Update output formatter with verbose setting @@ -56,19 +57,19 @@ Examples: return cmd.execute(args[0], port, detach, verbose) }, } - + cobraCmd.Flags().IntVarP(&port, "port", "p", 0, "Specific port to use (auto-assigned if not specified)") cobraCmd.Flags().BoolVarP(&detach, "detach", "d", true, "Run in background (default: true)") cobraCmd.Flags().BoolVarP(&verbose, "verbose", "v", false, "Verbose output") - + return cobraCmd } // execute performs the actual agent execution func (cmd *RunCommand) execute(agentName string, port int, detach, verbose bool) error { - cmd.output.PrintHeader("Running Haxen Agent") + cmd.output.PrintHeader("Running AgentField Agent") cmd.output.PrintInfo(fmt.Sprintf("Agent: %s", agentName)) - + if verbose { cmd.output.PrintVerbose("Using new framework-based run command") if port > 0 { @@ -76,36 +77,36 @@ func (cmd *RunCommand) execute(agentName string, port int, detach, verbose bool) } cmd.output.PrintVerbose(fmt.Sprintf("Detach mode: %t", detach)) } - + // Create run options options := domain.RunOptions{ Port: port, Detach: detach, } - + // Show progress cmd.output.PrintProgress("Starting agent...") - + // Use the agent service to run the agent runningAgent, err := cmd.Services.AgentService.RunAgent(agentName, options) if err != nil { cmd.output.PrintError(fmt.Sprintf("Failed to run agent: %v", err)) return err } - + // Display success information cmd.output.PrintSuccess(fmt.Sprintf("Agent '%s' started successfully", agentName)) cmd.output.PrintInfo(fmt.Sprintf("PID: %d", runningAgent.PID)) cmd.output.PrintInfo(fmt.Sprintf("Port: %d", runningAgent.Port)) - + if runningAgent.LogFile != "" { cmd.output.PrintInfo(fmt.Sprintf("Logs: %s", runningAgent.LogFile)) } - + if verbose { cmd.output.PrintVerbose(fmt.Sprintf("Status: %s", runningAgent.Status)) cmd.output.PrintVerbose(fmt.Sprintf("Started at: %s", runningAgent.StartedAt.Format("2006-01-02 15:04:05"))) - + // Show running agents cmd.output.PrintVerbose("Listing all running agents...") agents, err := cmd.Services.AgentService.ListRunningAgents() @@ -115,12 +116,12 @@ func (cmd *RunCommand) execute(agentName string, port int, detach, verbose bool) cmd.output.PrintInfo(fmt.Sprintf("Total running agents: %d", len(agents))) } } - + if detach { cmd.output.PrintInfo("Agent is running in the background") - cmd.output.PrintInfo("Use 'haxen stop " + agentName + "' to stop the agent") - cmd.output.PrintInfo("Use 'haxen logs " + agentName + "' to view logs") + cmd.output.PrintInfo("Use 'af stop " + agentName + "' to stop the agent") + cmd.output.PrintInfo("Use 'af logs " + agentName + "' to view logs") } - + return nil } diff --git a/control-plane/internal/cli/config.go b/control-plane/internal/cli/config.go index c5d67160..446c0a65 100644 --- a/control-plane/internal/cli/config.go +++ b/control-plane/internal/cli/config.go @@ -13,31 +13,31 @@ import ( "golang.org/x/term" "gopkg.in/yaml.v3" - "github.com/your-org/haxen/control-plane/internal/packages" + "github.com/Agent-Field/agentfield/control-plane/internal/packages" ) var ( - configList bool - configSet string - configUnset string + configList bool + configSet string + configUnset string ) // NewConfigCommand creates the config command func NewConfigCommand() *cobra.Command { cmd := &cobra.Command{ Use: "config ", - Short: "Configure environment variables for an installed Haxen agent package", - Long: `Configure environment variables for an installed Haxen agent package. + Short: "Configure environment variables for an installed AgentField agent package", + Long: `Configure environment variables for an installed AgentField agent package. This command allows you to set up required and optional environment variables for installed packages. It will prompt for each variable and create a .env file in the package directory that will be loaded when the agent runs. Examples: - haxen config my-agent # Interactive configuration - haxen config my-agent --list # List current configuration - haxen config my-agent --set KEY=VALUE # Set specific variable - haxen config my-agent --unset KEY # Remove variable`, + af config my-agent # Interactive configuration + af config my-agent --list # List current configuration + af config my-agent --set KEY=VALUE # Set specific variable + af config my-agent --unset KEY # Remove variable`, Args: cobra.ExactArgs(1), Run: runConfigCommand, } @@ -51,10 +51,10 @@ Examples: func runConfigCommand(cmd *cobra.Command, args []string) { packageName := args[0] - haxenHome := getHaxenHomeDir() + agentfieldHome := getAgentFieldHomeDir() configManager := &PackageConfigManager{ - HaxenHome: haxenHome, + AgentFieldHome: agentfieldHome, } if configList { @@ -97,7 +97,7 @@ func runConfigCommand(cmd *cobra.Command, args []string) { // PackageConfigManager handles environment configuration for packages type PackageConfigManager struct { - HaxenHome string + AgentFieldHome string } // InteractiveConfig runs interactive configuration for a package @@ -139,7 +139,7 @@ func (pcm *PackageConfigManager) InteractiveConfig(packageName string) error { if currentValue == "" { currentValue = envVar.Default } - + value, err := pcm.promptForVariable(envVar, currentValue) if err != nil { return err @@ -159,7 +159,7 @@ func (pcm *PackageConfigManager) InteractiveConfig(packageName string) error { } fmt.Printf("βœ… Environment configuration saved to: %s/.env\n", packagePath) - fmt.Printf("πŸ’‘ Run 'haxen run %s' to start the agent with these settings\n", packageName) + fmt.Printf("πŸ’‘ Run 'af run %s' to start the agent with these settings\n", packageName) return nil } @@ -361,7 +361,7 @@ func (pcm *PackageConfigManager) UnsetVariable(packageName, key string) error { // loadPackageMetadata loads package metadata and returns the package path func (pcm *PackageConfigManager) loadPackageMetadata(packageName string) (*packages.PackageMetadata, string, error) { // Load registry to get package path - registryPath := filepath.Join(pcm.HaxenHome, "installed.yaml") + registryPath := filepath.Join(pcm.AgentFieldHome, "installed.yaml") registry := &packages.InstallationRegistry{ Installed: make(map[string]packages.InstalledPackage), } @@ -378,7 +378,7 @@ func (pcm *PackageConfigManager) loadPackageMetadata(packageName string) (*packa } // Load package metadata - metadataPath := filepath.Join(installedPackage.Path, "haxen-package.yaml") + metadataPath := filepath.Join(installedPackage.Path, "agentfield-package.yaml") data, err := os.ReadFile(metadataPath) if err != nil { return nil, "", fmt.Errorf("failed to read package metadata: %w", err) @@ -395,7 +395,7 @@ func (pcm *PackageConfigManager) loadPackageMetadata(packageName string) (*packa // loadEnvFile loads environment variables from .env file func (pcm *PackageConfigManager) loadEnvFile(packagePath string) (map[string]string, error) { envPath := filepath.Join(packagePath, ".env") - + data, err := os.ReadFile(envPath) if err != nil { return nil, err @@ -403,24 +403,24 @@ func (pcm *PackageConfigManager) loadEnvFile(packagePath string) (map[string]str envVars := make(map[string]string) lines := strings.Split(string(data), "\n") - + for _, line := range lines { line = strings.TrimSpace(line) if line == "" || strings.HasPrefix(line, "#") { continue } - + parts := strings.SplitN(line, "=", 2) if len(parts) == 2 { key := strings.TrimSpace(parts[0]) value := strings.TrimSpace(parts[1]) - + // Remove quotes if present if (strings.HasPrefix(value, "\"") && strings.HasSuffix(value, "\"")) || - (strings.HasPrefix(value, "'") && strings.HasSuffix(value, "'")) { + (strings.HasPrefix(value, "'") && strings.HasSuffix(value, "'")) { value = value[1 : len(value)-1] } - + envVars[key] = value } } @@ -431,12 +431,12 @@ func (pcm *PackageConfigManager) loadEnvFile(packagePath string) (map[string]str // saveEnvFile saves environment variables to .env file func (pcm *PackageConfigManager) saveEnvFile(packagePath string, envVars map[string]string) error { envPath := filepath.Join(packagePath, ".env") - + var lines []string - lines = append(lines, "# Environment variables for Haxen agent") - lines = append(lines, "# Generated by 'haxen config' command") + lines = append(lines, "# Environment variables for AgentField agent") + lines = append(lines, "# Generated by 'af config' command") lines = append(lines, "") - + for key, value := range envVars { // Quote values that contain spaces or special characters if strings.ContainsAny(value, " \t\n\r\"'\\$") { @@ -444,7 +444,7 @@ func (pcm *PackageConfigManager) saveEnvFile(packagePath string, envVars map[str } lines = append(lines, fmt.Sprintf("%s=%s", key, value)) } - + content := strings.Join(lines, "\n") + "\n" return os.WriteFile(envPath, []byte(content), 0600) // Restrictive permissions for secrets } diff --git a/control-plane/internal/cli/framework/command.go b/control-plane/internal/cli/framework/command.go index 63901878..83eb6ab1 100644 --- a/control-plane/internal/cli/framework/command.go +++ b/control-plane/internal/cli/framework/command.go @@ -1,10 +1,10 @@ package framework import ( + "github.com/Agent-Field/agentfield/control-plane/internal/core/interfaces" + "github.com/Agent-Field/agentfield/control-plane/internal/services" + "github.com/Agent-Field/agentfield/control-plane/internal/storage" "github.com/spf13/cobra" - "github.com/your-org/haxen/control-plane/internal/core/interfaces" - "github.com/your-org/haxen/control-plane/internal/services" - "github.com/your-org/haxen/control-plane/internal/storage" ) // Command represents a CLI command that can be built into a Cobra command @@ -16,14 +16,14 @@ type Command interface { // ServiceContainer holds all the services that commands might need type ServiceContainer struct { - PackageService interfaces.PackageService - AgentService interfaces.AgentService - DevService interfaces.DevService - DIDService *services.DIDService - VCService *services.VCService - KeystoreService *services.KeystoreService - DIDRegistry *services.DIDRegistry - StorageProvider storage.StorageProvider + PackageService interfaces.PackageService + AgentService interfaces.AgentService + DevService interfaces.DevService + DIDService *services.DIDService + VCService *services.VCService + KeystoreService *services.KeystoreService + DIDRegistry *services.DIDRegistry + StorageProvider storage.StorageProvider } // BaseCommand provides common functionality for all commands diff --git a/control-plane/internal/cli/framework/output.go b/control-plane/internal/cli/framework/output.go index 979e5190..5eae319e 100644 --- a/control-plane/internal/cli/framework/output.go +++ b/control-plane/internal/cli/framework/output.go @@ -35,7 +35,7 @@ func (o *OutputFormatter) PrintWarning(message string) { fmt.Printf("⚠️ %s\n", color.YellowString(message)) } -// PrintHeader prints a header message with haxen emoji and bold text +// PrintHeader prints a header message with agentfield emoji and bold text func (o *OutputFormatter) PrintHeader(message string) { fmt.Printf("\n%s %s\n", color.CyanString("🧠"), color.New(color.Bold).Sprint(message)) } diff --git a/control-plane/internal/cli/init.go b/control-plane/internal/cli/init.go index 8790c168..73187c14 100644 --- a/control-plane/internal/cli/init.go +++ b/control-plane/internal/cli/init.go @@ -15,7 +15,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" - "github.com/your-org/haxen/control-plane/internal/templates" + "github.com/Agent-Field/agentfield/control-plane/internal/templates" ) var ( @@ -39,7 +39,7 @@ var ( Foreground(lipgloss.Color("196")). Bold(true) - successStyle = lipgloss.NewStyle(). + successStyle = lipgloss.NewStyle(). //nolint:unused // Reserved for future use Foreground(lipgloss.Color("42")). Bold(true) ) @@ -56,7 +56,7 @@ type initModel struct { textInput string err error done bool - nonInteractive bool + nonInteractive bool //nolint:unused // Reserved for future use } func (m initModel) Init() tea.Cmd { @@ -153,7 +153,7 @@ func (m initModel) View() string { var s strings.Builder // Title - s.WriteString(titleStyle.Render("🎯 Creating Haxen Agent") + "\n\n") + s.WriteString(titleStyle.Render("🎯 Creating AgentField Agent") + "\n\n") switch m.step { case 0: // Project name @@ -200,21 +200,21 @@ func NewInitCommand() *cobra.Command { cmd := &cobra.Command{ Use: "init [project-name]", - Short: "Initialize a new Haxen agent project", - Long: `Initialize a new Haxen agent project with a predefined + Short: "Initialize a new AgentField agent project", + Long: `Initialize a new AgentField agent project with a predefined directory structure and essential files. This command sets up a new project, including: - Language-specific project structure (Python or Go) - Basic agent implementation with example reasoner - README.md and .gitignore files -- Configuration for connecting to the Haxen control plane +- Configuration for connecting to the AgentField control plane Example: - haxen init # Interactive mode - haxen init my-new-agent # With project name - haxen init my-agent --language python - haxen init my-agent -l go --author "John Doe" --email "john@example.com"`, + af init # Interactive mode + af init my-new-agent # With project name + af init my-agent --language python + af init my-agent -l go --author "John Doe" --email "john@example.com"`, Args: cobra.MaximumNArgs(1), Run: func(cmd *cobra.Command, args []string) { var projectName string @@ -373,7 +373,7 @@ Example: fmt.Println(" 2. go mod download") } - fmt.Println(" 3. haxen server # Start Haxen server") + fmt.Println(" 3. af server # Start AgentField server") if language == "python" { fmt.Println(" 4. python main.py # Start your agent") @@ -402,7 +402,7 @@ Example: fmt.Println(" 4. Restart your agent") fmt.Println() - printInfo("Learn more: https://docs.haxen.ai") + printInfo("Learn more: https://docs.agentfield.ai") fmt.Println() printSuccess("Happy building! πŸŽ‰") }, @@ -413,9 +413,15 @@ Example: cmd.Flags().StringVarP(&authorEmail, "email", "e", "", "Author email for the project") cmd.Flags().BoolVar(&nonInteractive, "non-interactive", false, "Run in non-interactive mode (use defaults)") - viper.BindPFlag("language", cmd.Flags().Lookup("language")) - viper.BindPFlag("author.name", cmd.Flags().Lookup("author")) - viper.BindPFlag("author.email", cmd.Flags().Lookup("email")) + if err := viper.BindPFlag("language", cmd.Flags().Lookup("language")); err != nil { + printError("failed to bind language flag: %v", err) + } + if err := viper.BindPFlag("author.name", cmd.Flags().Lookup("author")); err != nil { + printError("failed to bind author flag: %v", err) + } + if err := viper.BindPFlag("author.email", cmd.Flags().Lookup("email")); err != nil { + printError("failed to bind email flag: %v", err) + } return cmd } diff --git a/control-plane/internal/cli/list.go b/control-plane/internal/cli/list.go index f1d89e51..6b09c9b7 100644 --- a/control-plane/internal/cli/list.go +++ b/control-plane/internal/cli/list.go @@ -1,26 +1,27 @@ package cli import ( + "errors" "fmt" "os" "path/filepath" + "github.com/Agent-Field/agentfield/control-plane/internal/packages" "github.com/spf13/cobra" "gopkg.in/yaml.v3" - "github.com/your-org/haxen/control-plane/internal/packages" ) // NewListCommand creates the list command func NewListCommand() *cobra.Command { cmd := &cobra.Command{ Use: "list", - Short: "List installed Haxen agent node packages", - Long: `Display all installed Haxen agent node packages with their status. + Short: "List installed AgentField agent node packages", + Long: `Display all installed AgentField agent node packages with their status. Shows package name, version, status (running/stopped), and port if running. Examples: - haxen list`, + af list`, Run: runListCommand, } @@ -28,8 +29,8 @@ Examples: } func runListCommand(cmd *cobra.Command, args []string) { - haxenHome := getHaxenHomeDir() - registryPath := filepath.Join(haxenHome, "installed.yaml") + agentfieldHome := getAgentFieldHomeDir() + registryPath := filepath.Join(agentfieldHome, "installed.yaml") // Load registry registry := &packages.InstallationRegistry{ @@ -37,12 +38,18 @@ func runListCommand(cmd *cobra.Command, args []string) { } if data, err := os.ReadFile(registryPath); err == nil { - yaml.Unmarshal(data, registry) + if err := yaml.Unmarshal(data, registry); err != nil { + cmd.PrintErrf("failed to parse registry: %v\n", err) + return + } + } else if !errors.Is(err, os.ErrNotExist) { + cmd.PrintErrf("failed to read registry: %v\n", err) + return } if len(registry.Installed) == 0 { fmt.Println("πŸ“¦ No agent node packages installed") - fmt.Println("πŸ’‘ Install packages with: haxen install ") + fmt.Println("πŸ’‘ Install packages with: agentfield install ") return } @@ -69,7 +76,7 @@ func runListCommand(cmd *cobra.Command, args []string) { } fmt.Println("πŸ’‘ Commands:") - fmt.Println(" haxen run - Start an agent node") - fmt.Println(" haxen stop - Stop a running agent node") - fmt.Println(" haxen logs - View agent node logs") + fmt.Println(" af run - Start an agent node") + fmt.Println(" af stop - Stop a running agent node") + fmt.Println(" af logs - View agent node logs") } diff --git a/control-plane/internal/cli/logs.go b/control-plane/internal/cli/logs.go index 4c55dcc0..d5b05d19 100644 --- a/control-plane/internal/cli/logs.go +++ b/control-plane/internal/cli/logs.go @@ -1,15 +1,16 @@ package cli import ( + "errors" "fmt" "os" "os/exec" // Added missing import "path/filepath" + "github.com/Agent-Field/agentfield/control-plane/internal/logger" + "github.com/Agent-Field/agentfield/control-plane/internal/packages" "github.com/spf13/cobra" "gopkg.in/yaml.v3" - "github.com/your-org/haxen/control-plane/internal/logger" - "github.com/your-org/haxen/control-plane/internal/packages" ) var ( @@ -21,14 +22,14 @@ var ( func NewLogsCommand() *cobra.Command { cmd := &cobra.Command{ Use: "logs ", - Short: "View logs for a Haxen agent node", - Long: `Display logs for an installed Haxen agent node package. + Short: "View logs for a AgentField agent node", + Long: `Display logs for an installed AgentField agent node package. Shows the most recent log entries from the agent node's log file. Examples: - haxen logs email-helper - haxen logs data-analyzer --follow`, + af logs email-helper + af logs data-analyzer --follow`, Args: cobra.ExactArgs(1), Run: runLogsCommand, } @@ -43,9 +44,9 @@ func runLogsCommand(cmd *cobra.Command, args []string) { agentNodeName := args[0] logViewer := &LogViewer{ - HaxenHome: getHaxenHomeDir(), - Follow: logsFollow, - Tail: logsTail, + AgentFieldHome: getAgentFieldHomeDir(), + Follow: logsFollow, + Tail: logsTail, } if err := logViewer.ViewLogs(agentNodeName); err != nil { @@ -56,21 +57,25 @@ func runLogsCommand(cmd *cobra.Command, args []string) { // LogViewer handles viewing agent node logs type LogViewer struct { - HaxenHome string - Follow bool - Tail int + AgentFieldHome string + Follow bool + Tail int } // ViewLogs displays logs for an agent node func (lv *LogViewer) ViewLogs(agentNodeName string) error { // Load registry to get log file path - registryPath := filepath.Join(lv.HaxenHome, "installed.yaml") + registryPath := filepath.Join(lv.AgentFieldHome, "installed.yaml") registry := &packages.InstallationRegistry{ Installed: make(map[string]packages.InstalledPackage), } if data, err := os.ReadFile(registryPath); err == nil { - yaml.Unmarshal(data, registry) + if err := yaml.Unmarshal(data, registry); err != nil { + return fmt.Errorf("failed to parse registry: %w", err) + } + } else if !errors.Is(err, os.ErrNotExist) { + return fmt.Errorf("failed to read registry: %w", err) } agentNode, exists := registry.Installed[agentNodeName] diff --git a/control-plane/internal/cli/mcp.go b/control-plane/internal/cli/mcp.go index 4c4881ee..a82cc60e 100644 --- a/control-plane/internal/cli/mcp.go +++ b/control-plane/internal/cli/mcp.go @@ -6,8 +6,8 @@ import ( "path/filepath" "strings" - "github.com/your-org/haxen/control-plane/internal/config" // Ensured this import is correct - "github.com/your-org/haxen/control-plane/internal/mcp" + "github.com/Agent-Field/agentfield/control-plane/internal/config" // Ensured this import is correct + "github.com/Agent-Field/agentfield/control-plane/internal/mcp" "github.com/spf13/cobra" ) @@ -16,8 +16,8 @@ import ( func NewMCPCommand() *cobra.Command { cmd := &cobra.Command{ Use: "mcp", - Short: "Manage MCP servers in your Haxen agent project", - Long: `Manage Model Context Protocol (MCP) servers in your Haxen agent project. + Short: "Manage MCP servers in your AgentField agent project", + Long: `Manage Model Context Protocol (MCP) servers in your AgentField agent project. MCP servers provide external tools and resources that can be integrated into your agent.`, } @@ -54,15 +54,15 @@ func runMCPStatusCommand(cmd *cobra.Command, args []string) error { return fmt.Errorf("failed to get current directory: %w", err) } - if err := validateHaxenProject(projectDir); err != nil { + if err := validateAgentFieldProject(projectDir); err != nil { return err } - cfg, err := config.LoadConfig(filepath.Join(projectDir, "haxen.yaml")) + cfg, err := config.LoadConfig(filepath.Join(projectDir, "agentfield.yaml")) if err != nil { - cfg, err = config.LoadConfig("haxen.yaml") // Fallback + cfg, err = config.LoadConfig("agentfield.yaml") // Fallback if err != nil { - return fmt.Errorf("failed to load haxen configuration: %w", err) + return fmt.Errorf("failed to load af configuration: %w", err) } } manager := mcp.NewMCPManager(cfg, projectDir, verbose) @@ -72,10 +72,10 @@ func runMCPStatusCommand(cmd *cobra.Command, args []string) error { } PrintHeader("MCP Server Status") - + if len(servers) == 0 { PrintInfo("No MCP servers installed") - fmt.Printf("\n%s %s\n", Blue("β†’"), "Add an MCP server: haxen add --mcp @modelcontextprotocol/server-github") + fmt.Printf("\n%s %s\n", Blue("β†’"), "Add an MCP server: af add --mcp @modelcontextprotocol/server-github") return nil } @@ -112,12 +112,12 @@ func runMCPStatusCommand(cmd *cobra.Command, args []string) error { } fmt.Printf(" %s %s\n", Gray("Version:"), server.Version) fmt.Printf(" %s %s", Gray("Status:"), statusText) - + if server.Status == mcp.StatusRunning && server.PID > 0 { fmt.Printf(" (PID: %d, Port: %d)", server.PID, server.Port) } fmt.Printf("\n") - + if server.StartedAt != nil { fmt.Printf(" %s %s\n", Gray("Started:"), server.StartedAt.Format("2006-01-02 15:04:05")) } @@ -142,33 +142,33 @@ func NewMCPStartCommand() *cobra.Command { func runMCPStartCommand(cmd *cobra.Command, args []string) error { alias := args[0] - + projectDir, err := os.Getwd() if err != nil { return fmt.Errorf("failed to get current directory: %w", err) } - if err := validateHaxenProject(projectDir); err != nil { + if err := validateAgentFieldProject(projectDir); err != nil { return err } - cfg, err := config.LoadConfig(filepath.Join(projectDir, "haxen.yaml")) + cfg, err := config.LoadConfig(filepath.Join(projectDir, "agentfield.yaml")) if err != nil { - cfg, err = config.LoadConfig("haxen.yaml") // Fallback + cfg, err = config.LoadConfig("agentfield.yaml") // Fallback if err != nil { - return fmt.Errorf("failed to load haxen configuration: %w", err) + return fmt.Errorf("failed to load af configuration: %w", err) } } manager := mcp.NewMCPManager(cfg, projectDir, verbose) - + PrintInfo(fmt.Sprintf("Starting MCP server: %s", alias)) - + _, err = manager.Start(alias) if err != nil { PrintError(fmt.Sprintf("Failed to start MCP server: %v", err)) return err } - + PrintSuccess(fmt.Sprintf("MCP server '%s' started successfully", alias)) return nil } @@ -188,32 +188,32 @@ func NewMCPStopCommand() *cobra.Command { func runMCPStopCommand(cmd *cobra.Command, args []string) error { alias := args[0] - + projectDir, err := os.Getwd() if err != nil { return fmt.Errorf("failed to get current directory: %w", err) } - if err := validateHaxenProject(projectDir); err != nil { + if err := validateAgentFieldProject(projectDir); err != nil { return err } - cfg, err := config.LoadConfig(filepath.Join(projectDir, "haxen.yaml")) + cfg, err := config.LoadConfig(filepath.Join(projectDir, "agentfield.yaml")) if err != nil { - cfg, err = config.LoadConfig("haxen.yaml") // Fallback + cfg, err = config.LoadConfig("agentfield.yaml") // Fallback if err != nil { - return fmt.Errorf("failed to load haxen configuration: %w", err) + return fmt.Errorf("failed to load af configuration: %w", err) } } manager := mcp.NewMCPManager(cfg, projectDir, verbose) - + PrintInfo(fmt.Sprintf("Stopping MCP server: %s", alias)) - + if err := manager.Stop(alias); err != nil { PrintError(fmt.Sprintf("Failed to stop MCP server: %v", err)) return err } - + PrintSuccess(fmt.Sprintf("MCP server '%s' stopped successfully", alias)) return nil } @@ -233,14 +233,14 @@ func NewMCPRestartCommand() *cobra.Command { func runMCPRestartCommand(cmd *cobra.Command, args []string) error { alias := args[0] - + PrintInfo(fmt.Sprintf("Restarting MCP server: %s", alias)) - + // Stop then start if err := runMCPStopCommand(cmd, args); err != nil { return err } - + return runMCPStartCommand(cmd, args) } @@ -248,7 +248,7 @@ func runMCPRestartCommand(cmd *cobra.Command, args []string) error { func NewMCPLogsCommand() *cobra.Command { var followFlag bool var tailLines int - + cmd := &cobra.Command{ Use: "logs ", Short: "Show logs for an MCP server", @@ -267,18 +267,18 @@ func NewMCPLogsCommand() *cobra.Command { func runMCPLogsCommand(cmd *cobra.Command, args []string, follow bool, tail int) error { alias := args[0] - + projectDir, err := os.Getwd() if err != nil { return fmt.Errorf("failed to get current directory: %w", err) } - if err := validateHaxenProject(projectDir); err != nil { + if err := validateAgentFieldProject(projectDir); err != nil { return err } logFile := filepath.Join(projectDir, "packages", "mcp", alias, fmt.Sprintf("%s.log", alias)) - + if _, err := os.Stat(logFile); os.IsNotExist(err) { PrintError(fmt.Sprintf("Log file not found for MCP server '%s'", alias)) return nil @@ -286,14 +286,14 @@ func runMCPLogsCommand(cmd *cobra.Command, args []string, follow bool, tail int) PrintInfo(fmt.Sprintf("Showing logs for MCP server: %s", alias)) fmt.Printf("%s %s\n\n", Gray("Log file:"), logFile) - + // For now, just show that we would display logs // In a full implementation, we would use tail command or read the file fmt.Printf("%s Logs would be displayed here (last %d lines)\n", Gray("β†’"), tail) if follow { fmt.Printf("%s Following logs... (Press Ctrl+C to stop)\n", Gray("β†’")) } - + return nil } @@ -317,7 +317,7 @@ func NewMCPSkillsGenerateCommand() *cobra.Command { cmd := &cobra.Command{ Use: "generate [alias]", Short: "Generate skill files for MCP servers", - Long: `Generate Python skill files that wrap MCP tools as Haxen skills.`, + Long: `Generate Python skill files that wrap MCP tools as AgentField skills.`, Args: cobra.MaximumNArgs(1), RunE: runMCPSkillsGenerateCommand, } @@ -333,24 +333,24 @@ func runMCPSkillsGenerateCommand(cmd *cobra.Command, args []string) error { return fmt.Errorf("failed to get current directory: %w", err) } - if err := validateHaxenProject(projectDir); err != nil { + if err := validateAgentFieldProject(projectDir); err != nil { return err } verboseFlag, _ := cmd.Flags().GetBool("verbose") generator := mcp.NewSkillGenerator(projectDir, verboseFlag) - + if len(args) == 1 { // Generate skills for specific server alias := args[0] PrintInfo(fmt.Sprintf("Generating skills for MCP server: %s", alias)) - + result, err := generator.GenerateSkillsForServer(alias) if err != nil { PrintError(fmt.Sprintf("Failed to generate skills: %v", err)) return err } - + if result.Generated { PrintSuccess(fmt.Sprintf("Skills generated for '%s'", alias)) fmt.Printf(" %s Generated file: %s (%d tools)\n", Gray("β†’"), Gray(fmt.Sprintf("skills/mcp_%s.py", alias)), result.ToolCount) @@ -361,15 +361,15 @@ func runMCPSkillsGenerateCommand(cmd *cobra.Command, args []string) error { } else { // Generate skills for all servers PrintInfo("Generating skills for all MCP servers...") - + if err := generator.GenerateSkillsForAllServers(); err != nil { PrintError(fmt.Sprintf("Failed to generate skills: %v", err)) return err } - + PrintSuccess("All skills processed successfully") } - + return nil } @@ -391,7 +391,7 @@ func runMCPSkillsListCommand(cmd *cobra.Command, args []string) error { return fmt.Errorf("failed to get current directory: %w", err) } - if err := validateHaxenProject(projectDir); err != nil { + if err := validateAgentFieldProject(projectDir); err != nil { return err } @@ -407,7 +407,7 @@ func runMCPSkillsListCommand(cmd *cobra.Command, args []string) error { } PrintHeader("Auto-Generated MCP Skills") - + skillCount := 0 for _, entry := range entries { if entry.IsDir() || !strings.HasPrefix(entry.Name(), "mcp_") || !strings.HasSuffix(entry.Name(), ".py") { @@ -418,23 +418,23 @@ func runMCPSkillsListCommand(cmd *cobra.Command, args []string) error { alias := strings.TrimSuffix(strings.TrimPrefix(entry.Name(), "mcp_"), ".py") fmt.Printf("%s %s\n", Green("βœ“"), Bold(alias)) fmt.Printf(" %s %s\n", Gray("File:"), entry.Name()) - + // Try to get server info - if cfg, err := config.LoadConfig(filepath.Join(projectDir, "haxen.yaml")); err == nil { + if cfg, err := config.LoadConfig(filepath.Join(projectDir, "agentfield.yaml")); err == nil { discovery := mcp.NewCapabilityDiscovery(cfg, projectDir) if capability, err := discovery.GetServerCapability(alias); err == nil { fmt.Printf(" %s %d tools available\n", Gray("Tools:"), len(capability.Tools)) } } } - + if skillCount == 0 { PrintInfo("No auto-generated MCP skills found") - fmt.Printf("\n%s %s\n", Blue("β†’"), "Generate skills: haxen mcp skills generate") + fmt.Printf("\n%s %s\n", Blue("β†’"), "Generate skills: af mcp skills generate") } else { fmt.Printf("\n%s %d skill files found\n", Gray("Total:"), skillCount) } - + return nil } @@ -456,40 +456,40 @@ func runMCPSkillsRefreshCommand(cmd *cobra.Command, args []string) error { return fmt.Errorf("failed to get current directory: %w", err) } - if err := validateHaxenProject(projectDir); err != nil { + if err := validateAgentFieldProject(projectDir); err != nil { return err } - cfg, err := config.LoadConfig(filepath.Join(projectDir, "haxen.yaml")) + cfg, err := config.LoadConfig(filepath.Join(projectDir, "agentfield.yaml")) if err != nil { - cfg, err = config.LoadConfig("haxen.yaml") // Fallback + cfg, err = config.LoadConfig("agentfield.yaml") // Fallback if err != nil { - return fmt.Errorf("failed to load haxen configuration: %w", err) + return fmt.Errorf("failed to load af configuration: %w", err) } } manager := mcp.NewMCPManager(cfg, projectDir, verbose) - + PrintInfo("Refreshing capabilities and regenerating skills...") - + // Get all servers and refresh their capabilities servers, err := manager.Status() if err != nil { PrintError(fmt.Sprintf("Failed to get server list: %v", err)) return err } - + if len(servers) == 0 { PrintInfo("No MCP servers found") return nil } - + // For each server, discover capabilities for _, server := range servers { if _, err := manager.DiscoverCapabilities(server.Alias); err != nil { PrintWarning(fmt.Sprintf("Failed to refresh capabilities for %s: %v", server.Alias, err)) } } - + PrintSuccess("All skills refreshed successfully") return nil } @@ -497,7 +497,7 @@ func runMCPSkillsRefreshCommand(cmd *cobra.Command, args []string) error { // NewMCPRemoveCommand creates the mcp remove command func NewMCPRemoveCommand() *cobra.Command { var forceFlag bool - + cmd := &cobra.Command{ Use: "remove ", Short: "Remove an MCP server", @@ -515,36 +515,36 @@ func NewMCPRemoveCommand() *cobra.Command { func runMCPRemoveCommand(cmd *cobra.Command, args []string, force bool) error { alias := args[0] - + projectDir, err := os.Getwd() if err != nil { return fmt.Errorf("failed to get current directory: %w", err) } - if err := validateHaxenProject(projectDir); err != nil { + if err := validateAgentFieldProject(projectDir); err != nil { return err } - cfg, err := config.LoadConfig(filepath.Join(projectDir, "haxen.yaml")) + cfg, err := config.LoadConfig(filepath.Join(projectDir, "agentfield.yaml")) if err != nil { - cfg, err = config.LoadConfig("haxen.yaml") // Fallback + cfg, err = config.LoadConfig("agentfield.yaml") // Fallback if err != nil { - return fmt.Errorf("failed to load haxen configuration: %w", err) + return fmt.Errorf("failed to load af configuration: %w", err) } } manager := mcp.NewMCPManager(cfg, projectDir, verbose) - + PrintInfo(fmt.Sprintf("Removing MCP server: %s", alias)) - + if err := manager.Remove(alias); err != nil { if !force && strings.Contains(err.Error(), "is running") { - PrintError(fmt.Sprintf("MCP server is running. Stop it first or use --force")) + PrintError("MCP server is running. Stop it first or use --force") return err } PrintError(fmt.Sprintf("Failed to remove MCP server: %v", err)) return err } - + PrintSuccess(fmt.Sprintf("MCP server '%s' removed successfully", alias)) return nil } @@ -552,7 +552,7 @@ func runMCPRemoveCommand(cmd *cobra.Command, args []string, force bool) error { // NewMCPDiscoverCommand creates the mcp discover command func NewMCPDiscoverCommand() *cobra.Command { var refreshFlag bool - + cmd := &cobra.Command{ Use: "discover [alias]", Short: "Discover MCP server capabilities", @@ -574,61 +574,61 @@ func runMCPDiscoverCommand(cmd *cobra.Command, args []string, refresh bool) erro return fmt.Errorf("failed to get current directory: %w", err) } - if err := validateHaxenProject(projectDir); err != nil { + if err := validateAgentFieldProject(projectDir); err != nil { return err } - cfg, err := config.LoadConfig(filepath.Join(projectDir, "haxen.yaml")) + cfg, err := config.LoadConfig(filepath.Join(projectDir, "agentfield.yaml")) if err != nil { - cfg, err = config.LoadConfig("haxen.yaml") // Fallback + cfg, err = config.LoadConfig("agentfield.yaml") // Fallback if err != nil { - return fmt.Errorf("failed to load haxen configuration: %w", err) + return fmt.Errorf("failed to load af configuration: %w", err) } } - + discovery := mcp.NewCapabilityDiscovery(cfg, projectDir) - + if len(args) == 1 { // Discover capabilities for specific server alias := args[0] PrintInfo(fmt.Sprintf("Discovering capabilities for MCP server: %s", alias)) - + capability, err := discovery.GetServerCapability(alias) if err != nil { PrintError(fmt.Sprintf("Failed to discover capabilities: %v", err)) return err } - + displayServerCapability(*capability) } else { // Discover capabilities for all servers PrintInfo("Discovering capabilities for all MCP servers...") - + if refresh { if err := discovery.RefreshCapabilities(); err != nil { PrintError(fmt.Sprintf("Failed to refresh capabilities: %v", err)) return err } } - + capabilities, err := discovery.DiscoverCapabilities() if err != nil { PrintError(fmt.Sprintf("Failed to discover capabilities: %v", err)) return err } - + if len(capabilities) == 0 { PrintInfo("No MCP servers found") return nil } - + PrintHeader("MCP Server Capabilities") for _, capability := range capabilities { displayServerCapability(capability) fmt.Println() } } - + return nil } @@ -638,7 +638,7 @@ func displayServerCapability(capability mcp.MCPCapability) { fmt.Printf(" %s %s\n", Gray("Version:"), capability.Version) fmt.Printf(" %s %s\n", Gray("Transport:"), capability.Transport) fmt.Printf(" %s %s\n", Gray("Endpoint:"), capability.Endpoint) - + if len(capability.Tools) > 0 { fmt.Printf(" %s %s (%d)\n", Gray("Tools:"), Green("βœ“"), len(capability.Tools)) for _, tool := range capability.Tools { @@ -647,7 +647,7 @@ func displayServerCapability(capability mcp.MCPCapability) { } else { fmt.Printf(" %s %s\n", Gray("Tools:"), Red("None")) } - + if len(capability.Resources) > 0 { fmt.Printf(" %s %s (%d)\n", Gray("Resources:"), Green("βœ“"), len(capability.Resources)) for _, resource := range capability.Resources { @@ -677,15 +677,15 @@ func runMCPMigrateCommand(cmd *cobra.Command, args []string) error { return fmt.Errorf("failed to get current directory: %w", err) } - if err := validateHaxenProject(projectDir); err != nil { + if err := validateAgentFieldProject(projectDir); err != nil { return err } - cfg, err := config.LoadConfig(filepath.Join(projectDir, "haxen.yaml")) + cfg, err := config.LoadConfig(filepath.Join(projectDir, "agentfield.yaml")) if err != nil { - cfg, err = config.LoadConfig("haxen.yaml") // Fallback + cfg, err = config.LoadConfig("agentfield.yaml") // Fallback if err != nil { - return fmt.Errorf("failed to load haxen configuration: %w", err) + return fmt.Errorf("failed to load af configuration: %w", err) } } @@ -695,22 +695,22 @@ func runMCPMigrateCommand(cmd *cobra.Command, args []string) error { // Migrate specific server alias := args[0] PrintInfo(fmt.Sprintf("Migrating MCP server: %s", alias)) - + if err := migrateSingleServer(discovery, projectDir, alias); err != nil { PrintError(fmt.Sprintf("Failed to migrate %s: %v", alias, err)) return err } - + PrintSuccess(fmt.Sprintf("Successfully migrated %s", alias)) } else { // Migrate all servers PrintInfo("Migrating all MCP servers...") - + if err := migrateAllServers(discovery, projectDir); err != nil { PrintError(fmt.Sprintf("Migration failed: %v", err)) return err } - + PrintSuccess("All servers migrated successfully") } @@ -719,32 +719,32 @@ func runMCPMigrateCommand(cmd *cobra.Command, args []string) error { func migrateSingleServer(discovery *mcp.CapabilityDiscovery, projectDir, alias string) error { serverDir := filepath.Join(projectDir, "packages", "mcp", alias) - + // Check if server directory exists if _, err := os.Stat(serverDir); os.IsNotExist(err) { return fmt.Errorf("MCP server '%s' not found", alias) } - + // Check if already migrated configPath := filepath.Join(serverDir, "config.json") if _, err := os.Stat(configPath); err == nil { PrintInfo(fmt.Sprintf("Server '%s' already uses config.json format", alias)) return nil } - + // Check if old format exists oldPath := filepath.Join(serverDir, "mcp.json") if _, err := os.Stat(oldPath); os.IsNotExist(err) { return fmt.Errorf("no mcp.json found for server '%s'", alias) } - + // Perform migration using the discovery's migration function // We need to access the migration function, so let's trigger it by calling discoverServerCapability _, err := discovery.GetServerCapability(alias) if err != nil { return fmt.Errorf("migration failed: %w", err) } - + return nil } @@ -754,18 +754,18 @@ func migrateAllServers(discovery *mcp.CapabilityDiscovery, projectDir string) er PrintInfo("No MCP servers found") return nil } - + entries, err := os.ReadDir(mcpDir) if err != nil { return fmt.Errorf("failed to read MCP directory: %w", err) } - + migratedCount := 0 for _, entry := range entries { if !entry.IsDir() { continue } - + alias := entry.Name() if err := migrateSingleServer(discovery, projectDir, alias); err != nil { PrintWarning(fmt.Sprintf("Failed to migrate %s: %v", alias, err)) @@ -774,12 +774,12 @@ func migrateAllServers(discovery *mcp.CapabilityDiscovery, projectDir string) er PrintInfo(fmt.Sprintf("Migrated: %s", alias)) } } - + if migratedCount == 0 { PrintInfo("No servers needed migration") } else { PrintInfo(fmt.Sprintf("Migrated %d servers", migratedCount)) } - + return nil } diff --git a/control-plane/internal/cli/root.go b/control-plane/internal/cli/root.go index c0845d00..bbbc4899 100644 --- a/control-plane/internal/cli/root.go +++ b/control-plane/internal/cli/root.go @@ -4,10 +4,10 @@ import ( "fmt" "os" - "github.com/your-org/haxen/control-plane/internal/application" - "github.com/your-org/haxen/control-plane/internal/cli/commands" - "github.com/your-org/haxen/control-plane/internal/config" - "github.com/your-org/haxen/control-plane/internal/logger" + "github.com/Agent-Field/agentfield/control-plane/internal/application" + "github.com/Agent-Field/agentfield/control-plane/internal/cli/commands" + "github.com/Agent-Field/agentfield/control-plane/internal/config" + "github.com/Agent-Field/agentfield/control-plane/internal/logger" "github.com/spf13/cobra" "github.com/spf13/viper" ) @@ -24,12 +24,13 @@ var ( postgresURLFlag string ) -// NewRootCommand creates and returns the root Cobra command for the Haxen CLI. +// NewRootCommand creates and returns the root Cobra command for the AgentField CLI. func NewRootCommand(runServerFunc func(cmd *cobra.Command, args []string)) *cobra.Command { RootCmd := &cobra.Command{ - Use: "haxen", - Short: "Haxen AI Agent Platform", - Long: `Haxen is a comprehensive AI agent platform for building, managing, and deploying AI agent capabilities.`, + Use: "af", + Aliases: []string{"agentfield"}, + Short: "AgentField AI Agent Platform", + Long: `AgentField is a comprehensive AI agent platform for building, managing, and deploying AI agent capabilities.`, PersistentPreRunE: func(cmd *cobra.Command, args []string) error { // Initialize logging based on verbose flag logger.InitLogger(verbose) @@ -41,14 +42,14 @@ func NewRootCommand(runServerFunc func(cmd *cobra.Command, args []string)) *cobr // Default to server mode when no subcommand is provided (backward compatibility) Run: runServerFunc, } - RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "Path to configuration file (e.g., config/haxen.yaml)") + RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "Path to configuration file (e.g., config/agentfield.yaml)") RootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "Enable verbose logging") // Flags for the server command (moved from main.go) RootCmd.PersistentFlags().BoolVar(&openBrowserFlag, "open", true, "Open browser to UI (if UI is enabled)") RootCmd.PersistentFlags().BoolVar(&uiDevFlag, "ui-dev", false, "Run with UI development server (proxies to backend)") RootCmd.PersistentFlags().BoolVar(&backendOnlyFlag, "backend-only", false, "Run only backend APIs, UI served separately") - RootCmd.PersistentFlags().IntVar(&portFlag, "port", 0, "Port for the haxen server (overrides config if set)") + RootCmd.PersistentFlags().IntVar(&portFlag, "port", 0, "Port for the af server (overrides config if set)") RootCmd.PersistentFlags().BoolVar(&noVCExecution, "no-vc-execution", false, "Disable generating verifiable credentials for executions") RootCmd.PersistentFlags().StringVar(&storageModeFlag, "storage-mode", "", "Override the storage backend (local or postgres)") RootCmd.PersistentFlags().StringVar(&postgresURLFlag, "postgres-url", "", "PostgreSQL connection URL or DSN (implies --storage-mode=postgres)") @@ -60,7 +61,7 @@ func NewRootCommand(runServerFunc func(cmd *cobra.Command, args []string)) *cobr // Create service container for framework commands cfg := &config.Config{} // Use default config for now - services := application.CreateServiceContainer(cfg, getHaxenHomeDir()) + services := application.CreateServiceContainer(cfg, getAgentFieldHomeDir()) // Add framework-based commands (migrated commands) installCommand := commands.NewInstallCommand(services) @@ -85,8 +86,8 @@ func NewRootCommand(runServerFunc func(cmd *cobra.Command, args []string)) *cobr // Add the server command serverCmd := &cobra.Command{ Use: "server", - Short: "Run the Haxen AI Agent Platform server", - Long: `Starts the Haxen AI Agent Platform server, providing API endpoints and UI.`, + Short: "Run the AgentField AI Agent Platform server", + Long: `Starts the AgentField AI Agent Platform server, providing API endpoints and UI.`, Run: runServerFunc, } RootCmd.AddCommand(serverCmd) @@ -103,7 +104,7 @@ func initConfig() { // Search config in current directory and "config" directory viper.AddConfigPath(".") viper.AddConfigPath("./config") - viper.SetConfigName("haxen") + viper.SetConfigName("agentfield") viper.SetConfigType("yaml") } diff --git a/control-plane/internal/cli/root_test.go b/control-plane/internal/cli/root_test.go index 09b8506c..32a9c1ee 100644 --- a/control-plane/internal/cli/root_test.go +++ b/control-plane/internal/cli/root_test.go @@ -65,8 +65,8 @@ func TestRootCommandHonorsConfigFlag(t *testing.T) { resetCLIStateForTest() configDir := t.TempDir() - configPath := filepath.Join(configDir, "haxen.yaml") - require.NoError(t, os.WriteFile(configPath, []byte("haxen:\n port: 7000\n"), 0o644)) + configPath := filepath.Join(configDir, "agentfield.yaml") + require.NoError(t, os.WriteFile(configPath, []byte("agentfield:\n port: 7000\n"), 0o644)) var received string cmd := NewRootCommand(func(cmd *cobra.Command, args []string) { diff --git a/control-plane/internal/cli/stop.go b/control-plane/internal/cli/stop.go index 72ac6b46..57ee700f 100644 --- a/control-plane/internal/cli/stop.go +++ b/control-plane/internal/cli/stop.go @@ -10,24 +10,24 @@ import ( "syscall" "time" + "github.com/Agent-Field/agentfield/control-plane/internal/packages" "github.com/spf13/cobra" "gopkg.in/yaml.v3" - "github.com/your-org/haxen/control-plane/internal/packages" ) // NewStopCommand creates the stop command func NewStopCommand() *cobra.Command { cmd := &cobra.Command{ Use: "stop ", - Short: "Stop a running Haxen agent node", - Long: `Stop a running Haxen agent node package. + Short: "Stop a running AgentField agent node", + Long: `Stop a running AgentField agent node package. The agent node process will be terminated gracefully and its status will be updated in the registry. Examples: - haxen stop email-helper - haxen stop data-analyzer`, + af stop email-helper + af stop data-analyzer`, Args: cobra.ExactArgs(1), Run: runStopCommand, } @@ -39,7 +39,7 @@ func runStopCommand(cmd *cobra.Command, args []string) { agentNodeName := args[0] stopper := &AgentNodeStopper{ - HaxenHome: getHaxenHomeDir(), + AgentFieldHome: getAgentFieldHomeDir(), } if err := stopper.StopAgentNode(agentNodeName); err != nil { @@ -50,7 +50,7 @@ func runStopCommand(cmd *cobra.Command, args []string) { // AgentNodeStopper handles stopping agent nodes type AgentNodeStopper struct { - HaxenHome string + AgentFieldHome string } // StopAgentNode stops a running agent node @@ -87,24 +87,24 @@ func (as *AgentNodeStopper) StopAgentNode(agentNodeName string) error { httpShutdownSuccess := false if agentNode.Runtime.Port != nil { fmt.Printf("πŸ›‘ Attempting graceful HTTP shutdown for agent %s on port %d\n", agentNodeName, *agentNode.Runtime.Port) - + // Construct agent base URL baseURL := fmt.Sprintf("http://localhost:%d", *agentNode.Runtime.Port) shutdownURL := fmt.Sprintf("%s/shutdown", baseURL) - + // Create shutdown request requestBody := map[string]interface{}{ "graceful": true, "timeout_seconds": 30, } - + bodyBytes, err := json.Marshal(requestBody) if err == nil { req, err := http.NewRequest("POST", shutdownURL, bytes.NewReader(bodyBytes)) if err == nil { req.Header.Set("Content-Type", "application/json") - req.Header.Set("User-Agent", "Haxen-CLI/1.0") - + req.Header.Set("User-Agent", "AgentField-CLI/1.0") + client := &http.Client{Timeout: 10 * time.Second} resp, err := client.Do(req) if err == nil { @@ -112,7 +112,7 @@ func (as *AgentNodeStopper) StopAgentNode(agentNodeName string) error { if resp.StatusCode == 200 { fmt.Printf("βœ… HTTP shutdown request accepted for agent %s\n", agentNodeName) httpShutdownSuccess = true - + // Wait a moment for graceful shutdown time.Sleep(3 * time.Second) } else { @@ -128,7 +128,7 @@ func (as *AgentNodeStopper) StopAgentNode(agentNodeName string) error { // If HTTP shutdown failed or not available, fall back to process signals if !httpShutdownSuccess { fmt.Printf("πŸ”„ Falling back to process signal shutdown for agent %s\n", agentNodeName) - + // Send SIGTERM for graceful shutdown if err := process.Signal(os.Interrupt); err != nil { // If graceful shutdown fails, force kill @@ -138,7 +138,7 @@ func (as *AgentNodeStopper) StopAgentNode(agentNodeName string) error { } else { // Wait for graceful shutdown, then check if still running time.Sleep(3 * time.Second) - + // Check if process is still running if err := process.Signal(syscall.Signal(0)); err == nil { // Process still running, force kill @@ -168,7 +168,7 @@ func (as *AgentNodeStopper) StopAgentNode(agentNodeName string) error { // loadRegistry loads the installation registry func (as *AgentNodeStopper) loadRegistry() (*packages.InstallationRegistry, error) { - registryPath := filepath.Join(as.HaxenHome, "installed.yaml") + registryPath := filepath.Join(as.AgentFieldHome, "installed.yaml") registry := &packages.InstallationRegistry{ Installed: make(map[string]packages.InstalledPackage), @@ -185,7 +185,7 @@ func (as *AgentNodeStopper) loadRegistry() (*packages.InstallationRegistry, erro // saveRegistry saves the installation registry func (as *AgentNodeStopper) saveRegistry(registry *packages.InstallationRegistry) error { - registryPath := filepath.Join(as.HaxenHome, "installed.yaml") + registryPath := filepath.Join(as.AgentFieldHome, "installed.yaml") data, err := yaml.Marshal(registry) if err != nil { diff --git a/control-plane/internal/cli/uninstall.go b/control-plane/internal/cli/uninstall.go index 78dc1900..16d2ab26 100644 --- a/control-plane/internal/cli/uninstall.go +++ b/control-plane/internal/cli/uninstall.go @@ -4,8 +4,8 @@ import ( "fmt" "os" + "github.com/Agent-Field/agentfield/control-plane/internal/packages" "github.com/spf13/cobra" - "github.com/your-org/haxen/control-plane/internal/packages" ) var ( @@ -26,8 +26,8 @@ This command will: - Clean up any associated logs Examples: - haxen uninstall my-agent - haxen uninstall sentiment-analyzer --force`, + agentfield uninstall my-agent + agentfield uninstall sentiment-analyzer --force`, Args: cobra.ExactArgs(1), Run: runUninstallCommand, } @@ -42,8 +42,8 @@ func runUninstallCommand(cmd *cobra.Command, args []string) { // Create uninstaller uninstaller := &packages.PackageUninstaller{ - HaxenHome: getHaxenHomeDir(), - Force: uninstallForce, + AgentFieldHome: getAgentFieldHomeDir(), + Force: uninstallForce, } // Uninstall package diff --git a/control-plane/internal/cli/utils.go b/control-plane/internal/cli/utils.go index ded5cddc..dd3c65fe 100644 --- a/control-plane/internal/cli/utils.go +++ b/control-plane/internal/cli/utils.go @@ -10,32 +10,32 @@ import ( "github.com/fatih/color" ) -// getHaxenHomeDir returns the Haxen home directory (~/.haxen) and ensures it exists -func getHaxenHomeDir() string { +// getAgentFieldHomeDir returns the AgentField home directory (~/.agentfield) and ensures it exists +func getAgentFieldHomeDir() string { homeDir, err := os.UserHomeDir() if err != nil { PrintError(fmt.Sprintf("Failed to get user home directory: %v", err)) os.Exit(1) } - haxenHome := filepath.Join(homeDir, ".haxen") + agentfieldHome := filepath.Join(homeDir, ".agentfield") - // Ensure .haxen directory exists - if err := os.MkdirAll(haxenHome, 0755); err != nil { - PrintError(fmt.Sprintf("Failed to create .haxen directory: %v", err)) + // Ensure .agentfield directory exists + if err := os.MkdirAll(agentfieldHome, 0755); err != nil { + PrintError(fmt.Sprintf("Failed to create .agentfield directory: %v", err)) os.Exit(1) } // Ensure subdirectories exist subdirs := []string{"packages", "logs", "config"} for _, subdir := range subdirs { - if err := os.MkdirAll(filepath.Join(haxenHome, subdir), 0755); err != nil { + if err := os.MkdirAll(filepath.Join(agentfieldHome, subdir), 0755); err != nil { PrintError(fmt.Sprintf("Failed to create %s directory: %v", subdir, err)) os.Exit(1) } } - return haxenHome + return agentfieldHome } // Professional CLI status symbols diff --git a/control-plane/internal/cli/vc.go b/control-plane/internal/cli/vc.go index 38a58af6..14e32c2b 100644 --- a/control-plane/internal/cli/vc.go +++ b/control-plane/internal/cli/vc.go @@ -5,14 +5,13 @@ import ( "encoding/base64" "encoding/json" "fmt" - "io/ioutil" "net/http" "os" "strings" "time" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/spf13/cobra" - "github.com/your-org/haxen/control-plane/pkg/types" ) // NewVCCommand creates the vc command with subcommands @@ -20,7 +19,7 @@ func NewVCCommand() *cobra.Command { vcCmd := &cobra.Command{ Use: "vc", Short: "Verifiable Credential operations", - Long: `Commands for working with Haxen Verifiable Credentials (VCs)`, + Long: `Commands for working with AgentField Verifiable Credentials (VCs)`, } vcCmd.AddCommand(NewVCVerifyCommand()) @@ -36,8 +35,8 @@ func NewVCVerifyCommand() *cobra.Command { verifyCmd := &cobra.Command{ Use: "verify ", - Short: "Verify a Haxen Verifiable Credential", - Long: `Verify the cryptographic signature and integrity of a Haxen Verifiable Credential. + Short: "Verify a AgentField Verifiable Credential", + Long: `Verify the cryptographic signature and integrity of a AgentField Verifiable Credential. This command supports offline verification with bundled DIDs and online verification with web resolution.`, Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { @@ -69,25 +68,25 @@ type VerifyOptions struct { // DIDResolutionInfo represents DID resolution information type DIDResolutionInfo struct { - DID string `json:"did"` - Method string `json:"method"` - PublicKeyJWK map[string]interface{} `json:"public_key_jwk"` - WebURL string `json:"web_url,omitempty"` - CachedAt string `json:"cached_at,omitempty"` - ResolvedFrom string `json:"resolved_from"` + DID string `json:"did"` + Method string `json:"method"` + PublicKeyJWK map[string]interface{} `json:"public_key_jwk"` + WebURL string `json:"web_url,omitempty"` + CachedAt string `json:"cached_at,omitempty"` + ResolvedFrom string `json:"resolved_from"` } // EnhancedVCChain represents a VC chain with DID resolution bundle type EnhancedVCChain struct { - WorkflowID string `json:"workflow_id"` - GeneratedAt string `json:"generated_at"` - TotalExecutions int `json:"total_executions"` - CompletedExecutions int `json:"completed_executions"` - WorkflowStatus string `json:"workflow_status"` - ExecutionVCs []types.ExecutionVC `json:"execution_vcs"` - WorkflowVC types.WorkflowVC `json:"workflow_vc"` - DIDResolutionBundle map[string]DIDResolutionInfo `json:"did_resolution_bundle,omitempty"` - VerificationMetadata VerificationMetadata `json:"verification_metadata,omitempty"` + WorkflowID string `json:"workflow_id"` + GeneratedAt string `json:"generated_at"` + TotalExecutions int `json:"total_executions"` + CompletedExecutions int `json:"completed_executions"` + WorkflowStatus string `json:"workflow_status"` + ExecutionVCs []types.ExecutionVC `json:"execution_vcs"` + WorkflowVC types.WorkflowVC `json:"workflow_vc"` + DIDResolutionBundle map[string]DIDResolutionInfo `json:"did_resolution_bundle,omitempty"` + VerificationMetadata VerificationMetadata `json:"verification_metadata,omitempty"` } // VerificationMetadata contains metadata about the verification process @@ -100,18 +99,18 @@ type VerificationMetadata struct { // VCVerificationResult represents the comprehensive verification result type VCVerificationResult struct { - Valid bool `json:"valid"` - Type string `json:"type"` - WorkflowID string `json:"workflow_id,omitempty"` - SignatureValid bool `json:"signature_valid"` - FormatValid bool `json:"format_valid"` - Message string `json:"message"` - Error string `json:"error,omitempty"` - VerifiedAt string `json:"verified_at"` - ComponentResults []ComponentVerification `json:"component_results,omitempty"` - DIDResolutions []DIDResolutionResult `json:"did_resolutions,omitempty"` - VerificationSteps []VerificationStep `json:"verification_steps,omitempty"` - Summary VerificationSummary `json:"summary"` + Valid bool `json:"valid"` + Type string `json:"type"` + WorkflowID string `json:"workflow_id,omitempty"` + SignatureValid bool `json:"signature_valid"` + FormatValid bool `json:"format_valid"` + Message string `json:"message"` + Error string `json:"error,omitempty"` + VerifiedAt string `json:"verified_at"` + ComponentResults []ComponentVerification `json:"component_results,omitempty"` + DIDResolutions []DIDResolutionResult `json:"did_resolutions,omitempty"` + VerificationSteps []VerificationStep `json:"verification_steps,omitempty"` + Summary VerificationSummary `json:"summary"` } // ComponentVerification represents verification result for a single component @@ -149,12 +148,12 @@ type VerificationStep struct { // VerificationSummary provides a high-level summary type VerificationSummary struct { - TotalComponents int `json:"total_components"` - ValidComponents int `json:"valid_components"` - TotalDIDs int `json:"total_dids"` - ResolvedDIDs int `json:"resolved_dids"` - TotalSignatures int `json:"total_signatures"` - ValidSignatures int `json:"valid_signatures"` + TotalComponents int `json:"total_components"` + ValidComponents int `json:"valid_components"` + TotalDIDs int `json:"total_dids"` + ResolvedDIDs int `json:"resolved_dids"` + TotalSignatures int `json:"total_signatures"` + ValidSignatures int `json:"valid_signatures"` } func verifyVC(vcFilePath string, options VerifyOptions) error { @@ -167,7 +166,7 @@ func verifyVC(vcFilePath string, options VerifyOptions) error { // Step 1: Read and parse VC file step1 := VerificationStep{Step: 1, Description: "Reading VC file"} - vcData, err := ioutil.ReadFile(vcFilePath) + vcData, err := os.ReadFile(vcFilePath) if err != nil { step1.Success = false step1.Error = fmt.Sprintf("Failed to read VC file: %v", err) @@ -203,7 +202,7 @@ func verifyVC(vcFilePath string, options VerifyOptions) error { enhancedChain = convertLegacyChain(workflowChain) } else { step2.Success = false - step2.Error = "Invalid VC format: not a recognized Haxen VC structure" + step2.Error = "Invalid VC format: not a recognized AgentField VC structure" result.VerificationSteps = append(result.VerificationSteps, step2) result.Valid = false result.FormatValid = false @@ -224,14 +223,14 @@ func verifyVC(vcFilePath string, options VerifyOptions) error { step4 := VerificationStep{Step: 4, Description: "Resolving DIDs"} didResolutions := make(map[string]DIDResolutionInfo) resolvedCount := 0 - + for _, did := range uniqueDIDs { resolution, err := resolveDID(did, enhancedChain.DIDResolutionBundle, options) didResult := DIDResolutionResult{ DID: did, Method: getDIDMethod(did), } - + if err != nil { didResult.Success = false didResult.Error = err.Error() @@ -246,7 +245,7 @@ func verifyVC(vcFilePath string, options VerifyOptions) error { } result.DIDResolutions = append(result.DIDResolutions, didResult) } - + step4.Success = resolvedCount > 0 step4.Details = fmt.Sprintf("Resolved %d/%d DIDs", resolvedCount, len(uniqueDIDs)) if resolvedCount == 0 { @@ -256,18 +255,18 @@ func verifyVC(vcFilePath string, options VerifyOptions) error { // Step 5: Enhanced comprehensive verification step5 := VerificationStep{Step: 5, Description: "Performing comprehensive verification"} - + // Use the enhanced verifier for comprehensive checks enhancedVerifier := NewEnhancedVCVerifier(didResolutions, options.Verbose) comprehensiveResult := enhancedVerifier.VerifyEnhancedVCChain(enhancedChain) - + // Convert comprehensive result to legacy format for compatibility validSignatures := 0 totalSignatures := len(enhancedChain.ExecutionVCs) if enhancedChain.WorkflowVC.VCDocument != nil { totalSignatures++ } - + for _, compResult := range comprehensiveResult.ComponentResults { if compResult.SignatureValid { validSignatures++ @@ -285,7 +284,7 @@ func verifyVC(vcFilePath string, options VerifyOptions) error { } result.ComponentResults = append(result.ComponentResults, legacyResult) } - + step5.Success = comprehensiveResult.Valid step5.Details = fmt.Sprintf("Comprehensive verification completed - Score: %.1f/100", comprehensiveResult.OverallScore) if !comprehensiveResult.Valid { @@ -308,7 +307,7 @@ func verifyVC(vcFilePath string, options VerifyOptions) error { // Final result based on comprehensive verification result.SignatureValid = comprehensiveResult.SecurityAnalysis.SecurityScore > 80.0 result.Valid = comprehensiveResult.Valid - + if result.Valid { result.Message = fmt.Sprintf("Workflow VC chain verified successfully (Score: %.1f/100)", comprehensiveResult.OverallScore) } else { @@ -322,8 +321,8 @@ func verifyVC(vcFilePath string, options VerifyOptions) error { result.Summary = VerificationSummary{ TotalComponents: len(enhancedChain.ExecutionVCs), ValidComponents: len(result.ComponentResults), - TotalDIDs: len(uniqueDIDs), - ResolvedDIDs: resolvedCount, + TotalDIDs: len(uniqueDIDs), + ResolvedDIDs: resolvedCount, TotalSignatures: totalSignatures, ValidSignatures: validSignatures, } @@ -402,7 +401,7 @@ func resolveWebDID(did string) (DIDResolutionInfo, error) { } url := fmt.Sprintf("https://%s%s", domain, path) - + resp, err := http.Get(url) if err != nil { return DIDResolutionInfo{}, fmt.Errorf("failed to fetch DID document: %v", err) @@ -438,14 +437,14 @@ func resolveFromWeb(did, resolver string) (DIDResolutionInfo, error) { if strings.HasPrefix(did, "did:key:") { return resolveKeyDID(did) } - + // For other methods, would need a universal resolver return DIDResolutionInfo{}, fmt.Errorf("web resolution not supported for DID method: %s", getDIDMethod(did)) } func resolveFromCustom(did, resolver string) (DIDResolutionInfo, error) { url := fmt.Sprintf("%s/%s", strings.TrimSuffix(resolver, "/"), did) - + resp, err := http.Get(url) if err != nil { return DIDResolutionInfo{}, fmt.Errorf("failed to resolve DID: %v", err) @@ -500,6 +499,7 @@ func extractPublicKeyFromDIDDoc(didDoc map[string]interface{}) (map[string]inter return publicKeyJwk, nil } +//nolint:unused // Reserved for future signature verification func verifyVCSignature(vcDoc types.VCDocument, resolution DIDResolutionInfo) (bool, error) { // Create canonical representation for verification vcCopy := vcDoc @@ -533,6 +533,7 @@ func verifyVCSignature(vcDoc types.VCDocument, resolution DIDResolutionInfo) (bo return ed25519.Verify(publicKey, canonicalBytes, signatureBytes), nil } +//nolint:unused // Reserved for future signature verification func verifyWorkflowVCSignature(vcDoc types.WorkflowVCDocument, resolution DIDResolutionInfo) (bool, error) { // Create canonical representation for verification vcCopy := vcDoc @@ -618,7 +619,7 @@ func outputPretty(result VCVerificationResult, verbose bool) error { status = "βœ… VALID" } - fmt.Printf("Haxen VC Verification: %s\n", status) + fmt.Printf("AgentField VC Verification: %s\n", status) fmt.Printf("Type: %s\n", result.Type) if result.WorkflowID != "" { diff --git a/control-plane/internal/cli/vc_verification_enhanced.go b/control-plane/internal/cli/vc_verification_enhanced.go index 1a59d497..e0206ff6 100644 --- a/control-plane/internal/cli/vc_verification_enhanced.go +++ b/control-plane/internal/cli/vc_verification_enhanced.go @@ -7,7 +7,7 @@ import ( "fmt" "time" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" ) // EnhancedVCVerifier provides comprehensive VC verification with all integrity checks @@ -83,11 +83,11 @@ type SecurityAnalysis struct { // ComplianceChecks represents compliance and audit verification results type ComplianceChecks struct { - W3CCompliance bool `json:"w3c_compliance"` - HaxenStandardCompliance bool `json:"haxen_standard_compliance"` - AuditTrailIntegrity bool `json:"audit_trail_integrity"` - DataIntegrityChecks bool `json:"data_integrity_checks"` - Issues []VerificationIssue `json:"issues"` + W3CCompliance bool `json:"w3c_compliance"` + AgentFieldStandardCompliance bool `json:"agentfield_standard_compliance"` + AuditTrailIntegrity bool `json:"audit_trail_integrity"` + DataIntegrityChecks bool `json:"data_integrity_checks"` + Issues []VerificationIssue `json:"issues"` } // VerifyEnhancedVCChain performs comprehensive verification of a VC chain @@ -198,7 +198,7 @@ func (v *EnhancedVCVerifier) verifyExecutionVCComprehensive(execVC types.Executi return result } - // CRITICAL CHECK 7: Status consistency (with Haxen system status mapping) + // CRITICAL CHECK 7: Status consistency (with AgentField system status mapping) if !v.isStatusConsistent(execVC.Status, vcDoc.CredentialSubject.Execution.Status) { result.Valid = false result.Error = fmt.Sprintf("Status mismatch: metadata=%s, vc_document=%s", execVC.Status, vcDoc.CredentialSubject.Execution.Status) @@ -387,11 +387,11 @@ func (v *EnhancedVCVerifier) performSecurityAnalysis(chain EnhancedVCChain) Secu // performComplianceChecks performs compliance verification func (v *EnhancedVCVerifier) performComplianceChecks(chain EnhancedVCChain) ComplianceChecks { result := ComplianceChecks{ - W3CCompliance: true, - HaxenStandardCompliance: true, - AuditTrailIntegrity: true, - DataIntegrityChecks: true, - Issues: []VerificationIssue{}, + W3CCompliance: true, + AgentFieldStandardCompliance: true, + AuditTrailIntegrity: true, + DataIntegrityChecks: true, + Issues: []VerificationIssue{}, } // Check W3C compliance for each VC @@ -581,7 +581,7 @@ func (v *EnhancedVCVerifier) calculateOverallScore(result *ComprehensiveVerifica return score } -// isStatusConsistent checks if status values are consistent, accounting for Haxen system status mapping +// isStatusConsistent checks if status values are consistent, accounting for AgentField system status mapping func (v *EnhancedVCVerifier) isStatusConsistent(metadataStatus, vcDocStatus string) bool { return types.NormalizeExecutionStatus(metadataStatus) == types.NormalizeExecutionStatus(vcDocStatus) } diff --git a/control-plane/internal/config/config.go b/control-plane/internal/config/config.go index e0abe858..d21701be 100644 --- a/control-plane/internal/config/config.go +++ b/control-plane/internal/config/config.go @@ -1,18 +1,19 @@ package config import ( - "fmt" // Added for fmt.Errorf - "gopkg.in/yaml.v3" // Added for yaml.Unmarshal - "os" // Added for os.Stat, os.ReadFile - "path/filepath" // Added for filepath.Join + "fmt" // Added for fmt.Errorf + "os" // Added for os.Stat, os.ReadFile + "path/filepath" // Added for filepath.Join "time" - "github.com/your-org/haxen/control-plane/internal/storage" + "gopkg.in/yaml.v3" // Added for yaml.Unmarshal + + "github.com/Agent-Field/agentfield/control-plane/internal/storage" ) -// Config holds the entire configuration for the Haxen server. +// Config holds the entire configuration for the AgentField server. type Config struct { - Haxen HaxenConfig `yaml:"haxen" mapstructure:"haxen"` + AgentField AgentFieldConfig `yaml:"agentfield" mapstructure:"agentfield"` Agents AgentsConfig `yaml:"agents" mapstructure:"agents"` Features FeatureConfig `yaml:"features" mapstructure:"features"` Storage StorageConfig `yaml:"storage" mapstructure:"storage"` // Added storage config @@ -31,8 +32,8 @@ type UIConfig struct { BackendURL string `yaml:"backend_url" mapstructure:"backend_url"` // URL of the backend if UI is separate } -// HaxenConfig holds the core Haxen server configuration. -type HaxenConfig struct { +// AgentFieldConfig holds the core AgentField server configuration. +type AgentFieldConfig struct { Port int `yaml:"port"` DatabaseURL string `yaml:"database_url"` MaxConcurrentRequests int `yaml:"max_concurrent_requests"` @@ -165,21 +166,21 @@ type CORSConfig struct { // with the implementation in the storage package. type StorageConfig = storage.StorageConfig -// DataDirectoriesConfig holds configuration for Haxen data directory paths +// DataDirectoriesConfig holds configuration for AgentField data directory paths type DataDirectoriesConfig struct { - HaxenHome string `yaml:"haxen_home" mapstructure:"haxen_home"` // Can be overridden by HAXEN_HOME env var - DatabaseDir string `yaml:"database_dir" mapstructure:"database_dir"` // Relative to haxen_home - KeysDir string `yaml:"keys_dir" mapstructure:"keys_dir"` // Relative to haxen_home - DIDRegistriesDir string `yaml:"did_registries_dir" mapstructure:"did_registries_dir"` // Relative to haxen_home - VCsDir string `yaml:"vcs_dir" mapstructure:"vcs_dir"` // Relative to haxen_home - AgentsDir string `yaml:"agents_dir" mapstructure:"agents_dir"` // Relative to haxen_home - LogsDir string `yaml:"logs_dir" mapstructure:"logs_dir"` // Relative to haxen_home - ConfigDir string `yaml:"config_dir" mapstructure:"config_dir"` // Relative to haxen_home - TempDir string `yaml:"temp_dir" mapstructure:"temp_dir"` // Relative to haxen_home + AgentFieldHome string `yaml:"agentfield_home" mapstructure:"agentfield_home"` // Can be overridden by AGENTFIELD_HOME env var + DatabaseDir string `yaml:"database_dir" mapstructure:"database_dir"` // Relative to agentfield_home + KeysDir string `yaml:"keys_dir" mapstructure:"keys_dir"` // Relative to agentfield_home + DIDRegistriesDir string `yaml:"did_registries_dir" mapstructure:"did_registries_dir"` // Relative to agentfield_home + VCsDir string `yaml:"vcs_dir" mapstructure:"vcs_dir"` // Relative to agentfield_home + AgentsDir string `yaml:"agents_dir" mapstructure:"agents_dir"` // Relative to agentfield_home + LogsDir string `yaml:"logs_dir" mapstructure:"logs_dir"` // Relative to agentfield_home + ConfigDir string `yaml:"config_dir" mapstructure:"config_dir"` // Relative to agentfield_home + TempDir string `yaml:"temp_dir" mapstructure:"temp_dir"` // Relative to agentfield_home } -// DefaultConfigPath is the default path for the haxen configuration file. -const DefaultConfigPath = "haxen.yaml" // Or "./haxen.yaml", "config/haxen.yaml" depending on convention +// DefaultConfigPath is the default path for the af configuration file. +const DefaultConfigPath = "agentfield.yaml" // Or "./agentfield.yaml", "config/agentfield.yaml" depending on convention // LoadConfig reads the configuration from the given path or default paths. func LoadConfig(configPath string) (*Config, error) { @@ -193,7 +194,7 @@ func LoadConfig(configPath string) (*Config, error) { // This part might need more sophisticated logic depending on project structure // For now, let's assume configPath is either absolute or relative to CWD. // If not found, try a common "config/" subdirectory - altPath := filepath.Join("config", "haxen.yaml") + altPath := filepath.Join("config", "agentfield.yaml") if _, err2 := os.Stat(altPath); err2 == nil { configPath = altPath } else { diff --git a/control-plane/internal/config/package_config.go b/control-plane/internal/config/package_config.go index cc275d82..5dfdcf37 100644 --- a/control-plane/internal/config/package_config.go +++ b/control-plane/internal/config/package_config.go @@ -11,26 +11,26 @@ import ( "gopkg.in/yaml.v3" ) -// ConfigurationField represents a single configuration field in haxen-package.yaml +// ConfigurationField represents a single configuration field in agentfield-package.yaml type ConfigurationField struct { - Name string `yaml:"name" json:"name"` - Description string `yaml:"description" json:"description"` - Type string `yaml:"type" json:"type"` // "string", "secret", "integer", "float", "boolean", "select" - Default string `yaml:"default" json:"default,omitempty"` - Validation string `yaml:"validation" json:"validation,omitempty"` // regex pattern - Options []string `yaml:"options" json:"options,omitempty"` // for select type - Min *int `yaml:"min" json:"min,omitempty"` // for integer/float - Max *int `yaml:"max" json:"max,omitempty"` // for integer/float + Name string `yaml:"name" json:"name"` + Description string `yaml:"description" json:"description"` + Type string `yaml:"type" json:"type"` // "string", "secret", "integer", "float", "boolean", "select" + Default string `yaml:"default" json:"default,omitempty"` + Validation string `yaml:"validation" json:"validation,omitempty"` // regex pattern + Options []string `yaml:"options" json:"options,omitempty"` // for select type + Min *int `yaml:"min" json:"min,omitempty"` // for integer/float + Max *int `yaml:"max" json:"max,omitempty"` // for integer/float } -// ConfigurationSchema represents the configuration schema from haxen-package.yaml +// ConfigurationSchema represents the configuration schema from agentfield-package.yaml type ConfigurationSchema struct { Required []ConfigurationField `yaml:"required" json:"required"` Optional []ConfigurationField `yaml:"optional" json:"optional"` } -// HaxenPackageConfig represents the structure of haxen-package.yaml -type HaxenPackageConfig struct { +// AgentFieldPackageConfig represents the structure of agentfield-package.yaml +type AgentFieldPackageConfig struct { Name string `yaml:"name"` Version string `yaml:"version"` Description string `yaml:"description"` @@ -72,28 +72,28 @@ type RuntimeConfig struct { } type MetadataConfig struct { - CreatedAt string `yaml:"created_at"` - SDKVersion string `yaml:"sdk_version"` - Language string `yaml:"language"` - Platform string `yaml:"platform"` + CreatedAt string `yaml:"created_at"` + SDKVersion string `yaml:"sdk_version"` + Language string `yaml:"language"` + Platform string `yaml:"platform"` } -// LoadHaxenPackageConfig loads and parses a haxen-package.yaml file -func LoadHaxenPackageConfig(packagePath string) (*HaxenPackageConfig, error) { - configPath := filepath.Join(packagePath, "haxen-package.yaml") - +// LoadAgentFieldPackageConfig loads and parses a agentfield-package.yaml file +func LoadAgentFieldPackageConfig(packagePath string) (*AgentFieldPackageConfig, error) { + configPath := filepath.Join(packagePath, "agentfield-package.yaml") + if _, err := os.Stat(configPath); os.IsNotExist(err) { - return nil, fmt.Errorf("haxen-package.yaml not found at %s", configPath) + return nil, fmt.Errorf("agentfield-package.yaml not found at %s", configPath) } data, err := os.ReadFile(configPath) if err != nil { - return nil, fmt.Errorf("failed to read haxen-package.yaml: %w", err) + return nil, fmt.Errorf("failed to read agentfield-package.yaml: %w", err) } - var config HaxenPackageConfig + var config AgentFieldPackageConfig if err := yaml.Unmarshal(data, &config); err != nil { - return nil, fmt.Errorf("failed to parse haxen-package.yaml: %w", err) + return nil, fmt.Errorf("failed to parse agentfield-package.yaml: %w", err) } // Validate the configuration schema @@ -254,19 +254,19 @@ func ValidateConfiguration(schema *ConfigurationSchema, config map[string]string // GetConfigurationWithDefaults returns configuration with default values applied func GetConfigurationWithDefaults(schema *ConfigurationSchema, config map[string]string) map[string]string { result := make(map[string]string) - + // Copy provided values for k, v := range config { result[k] = v } - + // Apply defaults for missing optional fields for _, field := range schema.Optional { if _, exists := result[field.Name]; !exists && field.Default != "" { result[field.Name] = field.Default } } - + return result } diff --git a/control-plane/internal/config/package_config_test.go b/control-plane/internal/config/package_config_test.go index e427369b..ca6b40ef 100644 --- a/control-plane/internal/config/package_config_test.go +++ b/control-plane/internal/config/package_config_test.go @@ -6,11 +6,11 @@ import ( "testing" ) -func TestLoadHaxenPackageConfig(t *testing.T) { +func TestLoadAgentFieldPackageConfig(t *testing.T) { // Create a temporary test directory tempDir := t.TempDir() - - // Create a test haxen-package.yaml + + // Create a test agentfield-package.yaml testConfig := ` name: "test-agent" version: "1.0.0" @@ -24,7 +24,7 @@ agent_node: default_port: 8001 dependencies: - python: ["haxen-sdk>=1.0.0"] + python: ["agentfield>=1.0.0"] capabilities: reasoners: @@ -73,17 +73,17 @@ metadata: created_at: "2025-06-20" sdk_version: "1.0.0" language: "python" - platform: "haxen-agent-node" + platform: "agentfield-agent-node" ` - configPath := filepath.Join(tempDir, "haxen-package.yaml") + configPath := filepath.Join(tempDir, "agentfield-package.yaml") err := os.WriteFile(configPath, []byte(testConfig), 0644) if err != nil { t.Fatalf("Failed to write test config: %v", err) } // Test loading the configuration - config, err := LoadHaxenPackageConfig(tempDir) + config, err := LoadAgentFieldPackageConfig(tempDir) if err != nil { t.Fatalf("Failed to load config: %v", err) } @@ -240,8 +240,8 @@ func TestGetConfigurationWithDefaults(t *testing.T) { } config := map[string]string{ - "API_KEY": "sk-abc123", - "MAX_TOKENS": "3000", // Override default + "API_KEY": "sk-abc123", + "MAX_TOKENS": "3000", // Override default } result := GetConfigurationWithDefaults(schema, config) diff --git a/control-plane/internal/core/domain/mcp_health.go b/control-plane/internal/core/domain/mcp_health.go index c3ea3a99..1585187c 100644 --- a/control-plane/internal/core/domain/mcp_health.go +++ b/control-plane/internal/core/domain/mcp_health.go @@ -7,12 +7,12 @@ import ( // MCPSummaryForUI represents MCP health summary optimized for UI display type MCPSummaryForUI struct { - TotalServers int `json:"total_servers"` - RunningServers int `json:"running_servers"` - TotalTools int `json:"total_tools"` - OverallHealth float64 `json:"overall_health"` - HasIssues bool `json:"has_issues"` - + TotalServers int `json:"total_servers"` + RunningServers int `json:"running_servers"` + TotalTools int `json:"total_tools"` + OverallHealth float64 `json:"overall_health"` + HasIssues bool `json:"has_issues"` + // User mode: simplified capability status CapabilitiesAvailable bool `json:"capabilities_available"` ServiceStatus string `json:"service_status"` // "ready", "degraded", "unavailable" @@ -21,14 +21,14 @@ type MCPSummaryForUI struct { // AgentNodeDetailsForUI represents detailed agent node information including MCP data type AgentNodeDetailsForUI struct { // Embed the base agent node data - ID string `json:"id"` - TeamID string `json:"team_id"` - BaseURL string `json:"base_url"` - Version string `json:"version"` - HealthStatus string `json:"health_status"` - LastHeartbeat time.Time `json:"last_heartbeat"` - RegisteredAt time.Time `json:"registered_at"` - + ID string `json:"id"` + TeamID string `json:"team_id"` + BaseURL string `json:"base_url"` + Version string `json:"version"` + HealthStatus string `json:"health_status"` + LastHeartbeat time.Time `json:"last_heartbeat"` + RegisteredAt time.Time `json:"registered_at"` + // MCP-specific data (only in developer mode) MCPServers []MCPServerHealthForUI `json:"mcp_servers,omitempty"` MCPSummary *MCPSummaryForUI `json:"mcp_summary,omitempty"` @@ -46,7 +46,7 @@ type MCPServerHealthForUI struct { ProcessID int `json:"process_id,omitempty"` SuccessRate float64 `json:"success_rate,omitempty"` AvgResponseTime int `json:"avg_response_time_ms,omitempty"` - + // UI-specific fields StatusIcon string `json:"status_icon"` // Icon name for UI StatusColor string `json:"status_color"` // Color code for UI @@ -82,10 +82,10 @@ type MCPServerHealthData struct { // MCPSummaryData represents raw MCP summary data type MCPSummaryData struct { - TotalServers int `json:"total_servers"` - RunningServers int `json:"running_servers"` - TotalTools int `json:"total_tools"` - OverallHealth float64 `json:"overall_health"` + TotalServers int `json:"total_servers"` + RunningServers int `json:"running_servers"` + TotalTools int `json:"total_tools"` + OverallHealth float64 `json:"overall_health"` } // MCPHealthMode represents the mode for MCP health data display @@ -124,11 +124,11 @@ func TransformMCPHealthForMode(data *MCPHealthResponseData, mode MCPHealthMode) // Create summary summary := &MCPSummaryForUI{ - TotalServers: data.Summary.TotalServers, + TotalServers: data.Summary.TotalServers, RunningServers: data.Summary.RunningServers, - TotalTools: data.Summary.TotalTools, - OverallHealth: data.Summary.OverallHealth, - HasIssues: data.Summary.RunningServers < data.Summary.TotalServers || data.Summary.OverallHealth < 0.8, + TotalTools: data.Summary.TotalTools, + OverallHealth: data.Summary.OverallHealth, + HasIssues: data.Summary.RunningServers < data.Summary.TotalServers || data.Summary.OverallHealth < 0.8, } // Set user-mode specific fields @@ -206,7 +206,7 @@ func formatUptime(startedAt *time.Time) string { if startedAt == nil { return "N/A" } - + duration := time.Since(*startedAt) if duration < time.Minute { return "< 1m" @@ -219,4 +219,4 @@ func formatUptime(startedAt *time.Time) string { hours := int(duration.Hours()) % 24 return fmt.Sprintf("%dd %dh", days, hours) } -} \ No newline at end of file +} diff --git a/control-plane/internal/core/domain/models.go b/control-plane/internal/core/domain/models.go index 195423d3..96bbefd6 100644 --- a/control-plane/internal/core/domain/models.go +++ b/control-plane/internal/core/domain/models.go @@ -1,127 +1,127 @@ -// haxen/internal/core/domain/models.go +// agentfield/internal/core/domain/models.go package domain import "time" // AgentNode represents a running agent instance type AgentNode struct { - ID string `json:"id"` - Name string `json:"name"` - Port int `json:"port"` - PID int `json:"pid"` - Status string `json:"status"` - LifecycleStatus string `json:"lifecycle_status"` - StartedAt time.Time `json:"started_at"` - Environment map[string]string `json:"environment"` - LogFile string `json:"log_file"` + ID string `json:"id"` + Name string `json:"name"` + Port int `json:"port"` + PID int `json:"pid"` + Status string `json:"status"` + LifecycleStatus string `json:"lifecycle_status"` + StartedAt time.Time `json:"started_at"` + Environment map[string]string `json:"environment"` + LogFile string `json:"log_file"` } // PackageMetadata represents package information type PackageMetadata struct { - Name string `json:"name"` - Version string `json:"version"` - Description string `json:"description"` - Author string `json:"author"` - Path string `json:"path"` + Name string `json:"name"` + Version string `json:"version"` + Description string `json:"description"` + Author string `json:"author"` + Path string `json:"path"` } // InstallationSpec represents package installation configuration type InstallationSpec struct { - Source string `json:"source"` - Destination string `json:"destination"` - Force bool `json:"force"` - Environment map[string]string `json:"environment"` + Source string `json:"source"` + Destination string `json:"destination"` + Force bool `json:"force"` + Environment map[string]string `json:"environment"` } // ProcessSpec represents process execution configuration type ProcessSpec struct { - Command string `json:"command"` - Args []string `json:"args"` - WorkingDir string `json:"working_dir"` - Environment map[string]string `json:"environment"` - LogFile string `json:"log_file"` + Command string `json:"command"` + Args []string `json:"args"` + WorkingDir string `json:"working_dir"` + Environment map[string]string `json:"environment"` + LogFile string `json:"log_file"` } // InstallationRegistry tracks installed packages type InstallationRegistry struct { - Installed map[string]InstalledPackage `json:"installed"` + Installed map[string]InstalledPackage `json:"installed"` } // InstalledPackage represents an installed package type InstalledPackage struct { - Name string `json:"name"` - Version string `json:"version"` - Path string `json:"path"` - Environment map[string]string `json:"environment"` - InstalledAt time.Time `json:"installed_at"` + Name string `json:"name"` + Version string `json:"version"` + Path string `json:"path"` + Environment map[string]string `json:"environment"` + InstalledAt time.Time `json:"installed_at"` } -// HaxenConfig represents the Haxen configuration -type HaxenConfig struct { - HomeDir string `json:"home_dir"` - Environment map[string]string `json:"environment"` - MCP MCPConfig `json:"mcp"` +// AgentFieldConfig represents the AgentField configuration +type AgentFieldConfig struct { + HomeDir string `json:"home_dir"` + Environment map[string]string `json:"environment"` + MCP MCPConfig `json:"mcp"` } // MCPConfig contains MCP server configuration type MCPConfig struct { - Servers []MCPServer `json:"servers"` + Servers []MCPServer `json:"servers"` } // MCPServer represents an MCP server configuration type MCPServer struct { - Name string `json:"name"` - URL string `json:"url"` - Enabled bool `json:"enabled"` + Name string `json:"name"` + URL string `json:"url"` + Enabled bool `json:"enabled"` } // InstallOptions represents options for package installation type InstallOptions struct { - Force bool `json:"force"` - Verbose bool `json:"verbose"` + Force bool `json:"force"` + Verbose bool `json:"verbose"` } // RunOptions represents options for running an agent type RunOptions struct { - Port int `json:"port"` - Detach bool `json:"detach"` + Port int `json:"port"` + Detach bool `json:"detach"` } // RunningAgent represents a currently running agent instance type RunningAgent struct { - Name string `json:"name"` - PID int `json:"pid"` - Port int `json:"port"` - Status string `json:"status"` - StartedAt time.Time `json:"started_at"` - LogFile string `json:"log_file"` + Name string `json:"name"` + PID int `json:"pid"` + Port int `json:"port"` + Status string `json:"status"` + StartedAt time.Time `json:"started_at"` + LogFile string `json:"log_file"` } // AgentStatus represents the status of an agent type AgentStatus struct { - Name string `json:"name"` - IsRunning bool `json:"is_running"` - PID int `json:"pid"` - Port int `json:"port"` - Uptime string `json:"uptime"` - LastSeen time.Time `json:"last_seen"` + Name string `json:"name"` + IsRunning bool `json:"is_running"` + PID int `json:"pid"` + Port int `json:"port"` + Uptime string `json:"uptime"` + LastSeen time.Time `json:"last_seen"` } // DevOptions represents options for development mode type DevOptions struct { - Port int `json:"port"` - AutoReload bool `json:"auto_reload"` - Verbose bool `json:"verbose"` - WatchFiles bool `json:"watch_files"` + Port int `json:"port"` + AutoReload bool `json:"auto_reload"` + Verbose bool `json:"verbose"` + WatchFiles bool `json:"watch_files"` } // DevStatus represents the status of development mode type DevStatus struct { - Path string `json:"path"` - IsRunning bool `json:"is_running"` - PID int `json:"pid"` - Port int `json:"port"` - StartedAt time.Time `json:"started_at"` - AutoReload bool `json:"auto_reload"` - WatchedFiles []string `json:"watched_files"` + Path string `json:"path"` + IsRunning bool `json:"is_running"` + PID int `json:"pid"` + Port int `json:"port"` + StartedAt time.Time `json:"started_at"` + AutoReload bool `json:"auto_reload"` + WatchedFiles []string `json:"watched_files"` } diff --git a/control-plane/internal/core/interfaces/agent_client.go b/control-plane/internal/core/interfaces/agent_client.go index 46d8a9ff..95b6837a 100644 --- a/control-plane/internal/core/interfaces/agent_client.go +++ b/control-plane/internal/core/interfaces/agent_client.go @@ -12,16 +12,16 @@ import ( type AgentClient interface { // GetMCPHealth retrieves MCP health information from an agent node GetMCPHealth(ctx context.Context, nodeID string) (*MCPHealthResponse, error) - + // RestartMCPServer restarts a specific MCP server on an agent node RestartMCPServer(ctx context.Context, nodeID, alias string) error - + // GetMCPTools retrieves the list of tools from a specific MCP server GetMCPTools(ctx context.Context, nodeID, alias string) (*MCPToolsResponse, error) - + // ShutdownAgent requests graceful shutdown of an agent node via HTTP ShutdownAgent(ctx context.Context, nodeID string, graceful bool, timeoutSeconds int) (*AgentShutdownResponse, error) - + // GetAgentStatus retrieves detailed status information from an agent node GetAgentStatus(ctx context.Context, nodeID string) (*AgentStatusResponse, error) } @@ -42,18 +42,18 @@ func (ft *FlexibleTime) UnmarshalJSON(data []byte) error { if string(data) == "null" { return nil } - + // Remove quotes from JSON string timeStr := strings.Trim(string(data), `"`) - + // Try parsing with different formats formats := []string{ - time.RFC3339Nano, // "2006-01-02T15:04:05.999999999Z07:00" - time.RFC3339, // "2006-01-02T15:04:05Z07:00" + time.RFC3339Nano, // "2006-01-02T15:04:05.999999999Z07:00" + time.RFC3339, // "2006-01-02T15:04:05Z07:00" "2006-01-02T15:04:05.999999", // Without timezone (microseconds) - "2006-01-02T15:04:05", // Without timezone (seconds) + "2006-01-02T15:04:05", // Without timezone (seconds) } - + for _, format := range formats { if t, err := time.Parse(format, timeStr); err == nil { // If no timezone was provided, assume UTC @@ -64,7 +64,7 @@ func (ft *FlexibleTime) UnmarshalJSON(data []byte) error { return nil } } - + return fmt.Errorf("unable to parse time: %s", timeStr) } @@ -79,7 +79,7 @@ func (ft FlexibleTime) MarshalJSON() ([]byte, error) { // MCPServerHealth represents the health status of a single MCP server type MCPServerHealth struct { Alias string `json:"alias"` - Status string `json:"status"` // "running", "stopped", "error", "starting" + Status string `json:"status"` // "running", "stopped", "error", "starting" ToolCount int `json:"tool_count"` StartedAt *FlexibleTime `json:"started_at"` LastHealthCheck *FlexibleTime `json:"last_health_check"` @@ -92,10 +92,10 @@ type MCPServerHealth struct { // MCPSummary represents aggregated MCP health metrics type MCPSummary struct { - TotalServers int `json:"total_servers"` - RunningServers int `json:"running_servers"` - TotalTools int `json:"total_tools"` - OverallHealth float64 `json:"overall_health"` // 0.0 to 1.0 + TotalServers int `json:"total_servers"` + RunningServers int `json:"running_servers"` + TotalTools int `json:"total_tools"` + OverallHealth float64 `json:"overall_health"` // 0.0 to 1.0 } // MCPToolsResponse represents the tools available from an MCP server @@ -118,23 +118,23 @@ type MCPRestartResponse struct { // AgentShutdownResponse represents the response from requesting agent shutdown type AgentShutdownResponse struct { - Status string `json:"status"` // "shutting_down", "error" - Graceful bool `json:"graceful"` - TimeoutSeconds int `json:"timeout_seconds,omitempty"` - EstimatedShutdownTime string `json:"estimated_shutdown_time,omitempty"` - Message string `json:"message"` + Status string `json:"status"` // "shutting_down", "error" + Graceful bool `json:"graceful"` + TimeoutSeconds int `json:"timeout_seconds,omitempty"` + EstimatedShutdownTime string `json:"estimated_shutdown_time,omitempty"` + Message string `json:"message"` } // AgentStatusResponse represents detailed status information from an agent type AgentStatusResponse struct { - Status string `json:"status"` // "running", "stopping", "error" - Uptime string `json:"uptime"` // Human-readable uptime - UptimeSeconds int `json:"uptime_seconds"` // Uptime in seconds - PID int `json:"pid"` // Process ID - Version string `json:"version"` // Agent version - NodeID string `json:"node_id"` // Agent node ID - LastActivity string `json:"last_activity"` // ISO timestamp - Resources map[string]interface{} `json:"resources"` // Resource usage info + Status string `json:"status"` // "running", "stopping", "error" + Uptime string `json:"uptime"` // Human-readable uptime + UptimeSeconds int `json:"uptime_seconds"` // Uptime in seconds + PID int `json:"pid"` // Process ID + Version string `json:"version"` // Agent version + NodeID string `json:"node_id"` // Agent node ID + LastActivity string `json:"last_activity"` // ISO timestamp + Resources map[string]interface{} `json:"resources"` // Resource usage info MCPServers map[string]interface{} `json:"mcp_servers,omitempty"` // MCP server info - Message string `json:"message,omitempty"` // Additional status message -} \ No newline at end of file + Message string `json:"message,omitempty"` // Additional status message +} diff --git a/control-plane/internal/core/interfaces/services.go b/control-plane/internal/core/interfaces/services.go index c1fc82d7..37057412 100644 --- a/control-plane/internal/core/interfaces/services.go +++ b/control-plane/internal/core/interfaces/services.go @@ -1,7 +1,7 @@ package interfaces import ( - "github.com/your-org/haxen/control-plane/internal/core/domain" + "github.com/Agent-Field/agentfield/control-plane/internal/core/domain" ) // PackageService defines the contract for package management operations. diff --git a/control-plane/internal/core/interfaces/storage.go b/control-plane/internal/core/interfaces/storage.go index b70fc251..2a0f6746 100644 --- a/control-plane/internal/core/interfaces/storage.go +++ b/control-plane/internal/core/interfaces/storage.go @@ -1,24 +1,24 @@ -// haxen/internal/core/interfaces/storage.go +// agentfield/internal/core/interfaces/storage.go package interfaces -import "github.com/your-org/haxen/control-plane/internal/core/domain" +import "github.com/Agent-Field/agentfield/control-plane/internal/core/domain" type FileSystemAdapter interface { - ReadFile(path string) ([]byte, error) - WriteFile(path string, data []byte) error - Exists(path string) bool - CreateDirectory(path string) error - ListDirectory(path string) ([]string, error) + ReadFile(path string) ([]byte, error) + WriteFile(path string, data []byte) error + Exists(path string) bool + CreateDirectory(path string) error + ListDirectory(path string) ([]string, error) } type RegistryStorage interface { - LoadRegistry() (*domain.InstallationRegistry, error) - SaveRegistry(registry *domain.InstallationRegistry) error - GetPackage(name string) (*domain.InstalledPackage, error) - SavePackage(name string, pkg *domain.InstalledPackage) error + LoadRegistry() (*domain.InstallationRegistry, error) + SaveRegistry(registry *domain.InstallationRegistry) error + GetPackage(name string) (*domain.InstalledPackage, error) + SavePackage(name string, pkg *domain.InstalledPackage) error } type ConfigStorage interface { - LoadHaxenConfig(path string) (*domain.HaxenConfig, error) - SaveHaxenConfig(path string, config *domain.HaxenConfig) error + LoadAgentFieldConfig(path string) (*domain.AgentFieldConfig, error) + SaveAgentFieldConfig(path string, config *domain.AgentFieldConfig) error } diff --git a/control-plane/internal/core/services/agent_service.go b/control-plane/internal/core/services/agent_service.go index 0f520433..97d74e33 100644 --- a/control-plane/internal/core/services/agent_service.go +++ b/control-plane/internal/core/services/agent_service.go @@ -1,9 +1,10 @@ -// haxen/internal/core/services/agent_service.go +// agentfield/internal/core/services/agent_service.go package services import ( "context" "encoding/json" + "errors" "fmt" "net/http" "os" @@ -14,9 +15,9 @@ import ( "syscall" "time" - "github.com/your-org/haxen/control-plane/internal/core/interfaces" - "github.com/your-org/haxen/control-plane/internal/core/domain" - "github.com/your-org/haxen/control-plane/internal/packages" + "github.com/Agent-Field/agentfield/control-plane/internal/core/domain" + "github.com/Agent-Field/agentfield/control-plane/internal/core/interfaces" + "github.com/Agent-Field/agentfield/control-plane/internal/packages" "gopkg.in/yaml.v3" ) @@ -26,7 +27,7 @@ type DefaultAgentService struct { portManager interfaces.PortManager registryStorage interfaces.RegistryStorage agentClient interfaces.AgentClient - haxenHome string + agentfieldHome string } // NewAgentService creates a new agent service instance @@ -35,14 +36,14 @@ func NewAgentService( portManager interfaces.PortManager, registryStorage interfaces.RegistryStorage, agentClient interfaces.AgentClient, - haxenHome string, + agentfieldHome string, ) interfaces.AgentService { return &DefaultAgentService{ processManager: processManager, portManager: portManager, registryStorage: registryStorage, agentClient: agentClient, - haxenHome: haxenHome, + agentfieldHome: agentfieldHome, } } @@ -103,11 +104,13 @@ func (as *DefaultAgentService) RunAgent(name string, options domain.RunOptions) // 5. Wait for agent node to be ready if err := as.waitForAgentNode(port, 10*time.Second); err != nil { // Kill the process if it failed to start properly - as.processManager.Stop(pid) + if stopErr := as.processManager.Stop(pid); stopErr != nil { + return nil, fmt.Errorf("agent node failed to start: %w (additionally failed to stop process: %v)", err, stopErr) + } return nil, fmt.Errorf("agent node failed to start: %w", err) } - fmt.Printf("🧠 Agent node registered with Haxen Server\n") + fmt.Printf("🧠 Agent node registered with AgentField Server\n") // 6. Update registry with runtime info if err := as.updateRuntimeInfo(name, port, pid); err != nil { @@ -120,8 +123,8 @@ func (as *DefaultAgentService) RunAgent(name string, options domain.RunOptions) } fmt.Printf("\nπŸ’‘ Agent node running in background (PID: %d)\n", pid) - fmt.Printf("πŸ’‘ View logs: haxen logs %s\n", name) - fmt.Printf("πŸ’‘ Stop agent node: haxen stop %s\n", name) + fmt.Printf("πŸ’‘ View logs: af logs %s\n", name) + fmt.Printf("πŸ’‘ Stop agent node: af stop %s\n", name) // Convert to domain model and return runningAgent := as.convertToRunningAgent(agentNode) @@ -179,19 +182,19 @@ func (as *DefaultAgentService) StopAgent(name string) error { httpShutdownSuccess := false if as.agentClient != nil { fmt.Printf("πŸ›‘ Attempting graceful HTTP shutdown for agent %s\n", name) - + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - + // Construct node ID from agent name (assuming they match) nodeID := name - + // Try graceful shutdown with 30-second timeout shutdownResp, err := as.agentClient.ShutdownAgent(ctx, nodeID, true, 30) if err == nil && shutdownResp != nil && shutdownResp.Status == "shutting_down" { fmt.Printf("βœ… HTTP shutdown request accepted for agent %s\n", name) httpShutdownSuccess = true - + // Wait a moment for the agent to shut down gracefully time.Sleep(2 * time.Second) } else { @@ -202,7 +205,7 @@ func (as *DefaultAgentService) StopAgent(name string) error { // If HTTP shutdown failed or not available, fall back to process signals if !httpShutdownSuccess { fmt.Printf("πŸ”„ Falling back to process signal shutdown for agent %s\n", name) - + if pkg.Runtime.PID == nil { return fmt.Errorf("no PID found for agent %s", name) } @@ -229,7 +232,7 @@ func (as *DefaultAgentService) StopAgent(name string) error { if err := process.Kill(); err != nil { // Handle "process already finished" gracefully if strings.Contains(err.Error(), "process already finished") || - strings.Contains(err.Error(), "no such process") { + strings.Contains(err.Error(), "no such process") { fmt.Printf("Process %d for agent %s already finished - updating registry\n", *pkg.Runtime.PID, name) } else { return fmt.Errorf("failed to kill process: %w", err) @@ -238,7 +241,7 @@ func (as *DefaultAgentService) StopAgent(name string) error { } else { // Wait a moment for graceful shutdown, then force kill if needed time.Sleep(3 * time.Second) - + // Check if process is still running if err := process.Signal(syscall.Signal(0)); err == nil { // Process still running, force kill @@ -322,7 +325,7 @@ func (as *DefaultAgentService) GetAgentStatus(name string) (*domain.AgentStatus, // Returns (actuallyRunning, wasReconciled) func (as *DefaultAgentService) reconcileProcessState(pkg *packages.InstalledPackage, name string) (bool, bool) { registryRunning := pkg.Status == "running" - + // If registry says not running, trust it (no process to check) if !registryRunning { return false, false @@ -387,7 +390,7 @@ func (as *DefaultAgentService) ListRunningAgents() ([]domain.RunningAgent, error // loadRegistryDirect loads the registry using direct file system access // TODO: Eventually replace with registryStorage interface usage func (as *DefaultAgentService) loadRegistryDirect() (*packages.InstallationRegistry, error) { - registryPath := filepath.Join(as.haxenHome, "installed.yaml") + registryPath := filepath.Join(as.agentfieldHome, "installed.yaml") registry := &packages.InstallationRegistry{ Installed: make(map[string]packages.InstalledPackage), @@ -405,7 +408,7 @@ func (as *DefaultAgentService) loadRegistryDirect() (*packages.InstallationRegis // saveRegistryDirect saves the registry using direct file system access // TODO: Eventually replace with registryStorage interface usage func (as *DefaultAgentService) saveRegistryDirect(registry *packages.InstallationRegistry) error { - registryPath := filepath.Join(as.haxenHome, "installed.yaml") + registryPath := filepath.Join(as.agentfieldHome, "installed.yaml") data, err := yaml.Marshal(registry) if err != nil { @@ -446,7 +449,7 @@ func (as *DefaultAgentService) buildProcessConfig(agentNode packages.InstalledPa // Prepare environment variables env := os.Environ() env = append(env, fmt.Sprintf("PORT=%d", port)) - env = append(env, "HAXEN_SERVER_URL=http://localhost:8080") + env = append(env, "AGENTFIELD_SERVER_URL=http://localhost:8080") // Load environment variables from package .env file if envVars, err := as.loadPackageEnvFile(agentNode.Path); err == nil { @@ -459,52 +462,52 @@ func (as *DefaultAgentService) buildProcessConfig(agentNode packages.InstalledPa // Determine Python path - use virtual environment if available var pythonPath string venvPath := filepath.Join(agentNode.Path, "venv") - + // Check if virtual environment exists (Unix/Linux/macOS) if _, err := os.Stat(filepath.Join(venvPath, "bin", "python")); err == nil { pythonPath = filepath.Join(venvPath, "bin", "python") fmt.Printf("🐍 Using virtual environment: %s\n", venvPath) - + // Complete virtual environment activation for Unix/Linux/macOS venvBinPath := filepath.Join(venvPath, "bin") - + // Set VIRTUAL_ENV first (required for proper activation) env = append(env, fmt.Sprintf("VIRTUAL_ENV=%s", venvPath)) - + // Prepend virtual environment bin to PATH (critical for package resolution) currentPath := os.Getenv("PATH") env = append(env, fmt.Sprintf("PATH=%s:%s", venvBinPath, currentPath)) - + // Unset PYTHONHOME to avoid conflicts with virtual environment env = append(env, "PYTHONHOME=") - + // Set PYTHONPATH to ensure proper module resolution env = append(env, fmt.Sprintf("PYTHONPATH=%s", filepath.Join(venvPath, "lib"))) - + fmt.Printf("βœ… Virtual environment fully activated with PATH=%s\n", venvBinPath) - + } else if _, err := os.Stat(filepath.Join(venvPath, "Scripts", "python.exe")); err == nil { pythonPath = filepath.Join(venvPath, "Scripts", "python.exe") // Windows fmt.Printf("🐍 Using virtual environment: %s\n", venvPath) - + // Complete virtual environment activation for Windows venvScriptsPath := filepath.Join(venvPath, "Scripts") - + // Set VIRTUAL_ENV first (required for proper activation) env = append(env, fmt.Sprintf("VIRTUAL_ENV=%s", venvPath)) - + // Prepend virtual environment Scripts to PATH (critical for package resolution) currentPath := os.Getenv("PATH") env = append(env, fmt.Sprintf("PATH=%s;%s", venvScriptsPath, currentPath)) - + // Unset PYTHONHOME to avoid conflicts with virtual environment env = append(env, "PYTHONHOME=") - + // Set PYTHONPATH to ensure proper module resolution env = append(env, fmt.Sprintf("PYTHONPATH=%s", filepath.Join(venvPath, "Lib", "site-packages"))) - + fmt.Printf("βœ… Virtual environment fully activated with PATH=%s\n", venvScriptsPath) - + } else { // Try to find python3 or python if pythonPath = as.findPythonExecutable(); pythonPath == "" { @@ -544,12 +547,16 @@ func (as *DefaultAgentService) waitForAgentNode(port int, timeout time.Duration) // updateRuntimeInfo updates the registry with runtime information func (as *DefaultAgentService) updateRuntimeInfo(agentNodeName string, port, pid int) error { - registryPath := filepath.Join(as.haxenHome, "installed.yaml") + registryPath := filepath.Join(as.agentfieldHome, "installed.yaml") // Load registry registry := &packages.InstallationRegistry{} if data, err := os.ReadFile(registryPath); err == nil { - yaml.Unmarshal(data, registry) + if err := yaml.Unmarshal(data, registry); err != nil { + return fmt.Errorf("failed to parse registry: %w", err) + } + } else if !errors.Is(err, os.ErrNotExist) { + return fmt.Errorf("failed to read registry: %w", err) } // Update runtime info @@ -645,7 +652,7 @@ func (as *DefaultAgentService) findAgentInRegistry(registry *packages.Installati for registryName, agentNode := range registry.Installed { normalizedRegistryName := strings.ReplaceAll(registryName, "-", "") normalizedInputName := strings.ReplaceAll(name, "-", "") - + if normalizedRegistryName == normalizedInputName { return agentNode, registryName, true } @@ -658,7 +665,7 @@ func (as *DefaultAgentService) findAgentInRegistry(registry *packages.Installati // loadPackageEnvFile loads environment variables from package .env file func (as *DefaultAgentService) loadPackageEnvFile(packagePath string) (map[string]string, error) { envPath := filepath.Join(packagePath, ".env") - + data, err := os.ReadFile(envPath) if err != nil { return nil, err @@ -666,24 +673,24 @@ func (as *DefaultAgentService) loadPackageEnvFile(packagePath string) (map[strin envVars := make(map[string]string) lines := strings.Split(string(data), "\n") - + for _, line := range lines { line = strings.TrimSpace(line) if line == "" || strings.HasPrefix(line, "#") { continue } - + parts := strings.SplitN(line, "=", 2) if len(parts) == 2 { key := strings.TrimSpace(parts[0]) value := strings.TrimSpace(parts[1]) - + // Remove quotes if present if (strings.HasPrefix(value, "\"") && strings.HasSuffix(value, "\"")) || - (strings.HasPrefix(value, "'") && strings.HasSuffix(value, "'")) { + (strings.HasPrefix(value, "'") && strings.HasSuffix(value, "'")) { value = value[1 : len(value)-1] } - + envVars[key] = value } } @@ -695,17 +702,17 @@ func (as *DefaultAgentService) loadPackageEnvFile(packagePath string) (map[strin func (as *DefaultAgentService) findPythonExecutable() string { // Try common Python executable names in order of preference candidates := []string{"python3", "python", "python3.11", "python3.10", "python3.9", "python3.8"} - + for _, candidate := range candidates { if _, err := os.Stat(candidate); err == nil { return candidate } - + // Also try to find in PATH if path, err := exec.LookPath(candidate); err == nil { return path } } - + return "" // Not found } diff --git a/control-plane/internal/core/services/dev_service.go b/control-plane/internal/core/services/dev_service.go index 97f07a25..02326747 100644 --- a/control-plane/internal/core/services/dev_service.go +++ b/control-plane/internal/core/services/dev_service.go @@ -15,8 +15,8 @@ import ( "syscall" "time" - "github.com/your-org/haxen/control-plane/internal/core/domain" - "github.com/your-org/haxen/control-plane/internal/core/interfaces" + "github.com/Agent-Field/agentfield/control-plane/internal/core/domain" + "github.com/Agent-Field/agentfield/control-plane/internal/core/interfaces" ) type DefaultDevService struct { @@ -44,10 +44,10 @@ func (ds *DefaultDevService) RunInDevMode(path string, options domain.DevOptions return fmt.Errorf("failed to resolve path: %w", err) } - // Check if haxen.yaml exists - haxenYamlPath := filepath.Join(absPath, "haxen.yaml") - if !ds.fileSystem.Exists(haxenYamlPath) { - return fmt.Errorf("no haxen.yaml found in %s", absPath) + // Check if agentfield.yaml exists + agentfieldYamlPath := filepath.Join(absPath, "agentfield.yaml") + if !ds.fileSystem.Exists(agentfieldYamlPath) { + return fmt.Errorf("no agentfield.yaml found in %s", absPath) } return ds.runDev(absPath, options) @@ -152,7 +152,9 @@ func (ds *DefaultDevService) runDev(packagePath string, options domain.DevOption return nil } -// getFreePort finds an available port in the range 8001-8999 +// getFreePort finds an available port in the range 8001-8999. +// +//nolint:unused // retained for future dev-service enhancements func (ds *DefaultDevService) getFreePort() (int, error) { for port := 8001; port <= 8999; port++ { if ds.isPortAvailable(port) { @@ -162,7 +164,9 @@ func (ds *DefaultDevService) getFreePort() (int, error) { return 0, fmt.Errorf("no free port available in range 8001-8999") } -// isPortAvailable checks if a port is available +// isPortAvailable checks if a port is available. +// +//nolint:unused // retained for future dev-service enhancements func (ds *DefaultDevService) isPortAvailable(port int) bool { conn, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) if err != nil { @@ -180,8 +184,8 @@ func (ds *DefaultDevService) startDevProcess(packagePath string, port int, optio if port > 0 { env = append(env, fmt.Sprintf("PORT=%d", port)) } - env = append(env, "HAXEN_SERVER_URL=http://localhost:8080") - env = append(env, "HAXEN_DEV_MODE=true") + env = append(env, "AGENTFIELD_SERVER_URL=http://localhost:8080") + env = append(env, "AGENTFIELD_DEV_MODE=true") // Load environment variables from package .env file if envVars, err := ds.loadDevEnvFile(packagePath); err == nil { @@ -236,54 +240,56 @@ func (ds *DefaultDevService) startDevProcess(packagePath string, port int, optio func (ds *DefaultDevService) discoverAgentPort(timeout time.Duration) (int, error) { client := &http.Client{Timeout: 2 * time.Second} deadline := time.Now().Add(timeout) - + fmt.Printf("πŸ” Discovering agent port...\n") - + checkCount := 0 - + for time.Now().Before(deadline) { checkCount++ - + // Try ports in range 8001-8999 for port := 8001; port <= 8999; port++ { resp, err := client.Get(fmt.Sprintf("http://localhost:%d/health", port)) - + if err == nil && resp.StatusCode == 200 { resp.Body.Close() fmt.Printf("βœ… Discovered agent on port %d after %d checks\n", port, checkCount) return port, nil } - + if resp != nil { resp.Body.Close() } } - + // Log progress every 20 checks to avoid spam if checkCount%20 == 0 { fmt.Printf("πŸ”„ Port discovery attempt %d...\n", checkCount) } - + time.Sleep(500 * time.Millisecond) } return 0, fmt.Errorf("could not discover agent port within %v after %d attempts", timeout, checkCount) } -// waitForAgent waits for the agent to become ready in dev mode +// waitForAgent waits for the agent to become ready in dev mode. +// +//nolint:unused // retained for future dev-service enhancements func (ds *DefaultDevService) waitForAgent(port int, timeout time.Duration) error { client := &http.Client{Timeout: 2 * time.Second} deadline := time.Now().Add(timeout) - + fmt.Printf("πŸ” Waiting for agent to become ready on port %d...\n", port) - + lastError := "" checkCount := 0 - + for time.Now().Before(deadline) { checkCount++ resp, err := client.Get(fmt.Sprintf("http://localhost:%d/health", port)) - + if err == nil { if resp.StatusCode == 200 { resp.Body.Close() @@ -304,7 +310,7 @@ func (ds *DefaultDevService) waitForAgent(port int, timeout time.Duration) error } lastError = err.Error() } - + time.Sleep(500 * time.Millisecond) } @@ -409,4 +415,3 @@ func (ds *DefaultDevService) loadDevEnvFile(packagePath string) (map[string]stri return envVars, nil } - diff --git a/control-plane/internal/core/services/dev_service_windows.go b/control-plane/internal/core/services/dev_service_windows.go index 21cf4aaf..2090c01c 100644 --- a/control-plane/internal/core/services/dev_service_windows.go +++ b/control-plane/internal/core/services/dev_service_windows.go @@ -2,11 +2,33 @@ package services +import ( + "fmt" -// DevService is a stub for Windows builds. -type DevService struct{} + "github.com/Agent-Field/agentfield/control-plane/internal/core/domain" + "github.com/Agent-Field/agentfield/control-plane/internal/core/interfaces" +) -// NewDevService returns a stub DevService on Windows. -func NewDevService(processManager interface{}, portManager interface{}, fileSystem interface{}) *DevService { - return &DevService{} -} \ No newline at end of file +// DefaultDevService is a stub for Windows builds. +type DefaultDevService struct{} + +// NewDevService returns a stub implementation for Windows builds. +func NewDevService( + processManager interfaces.ProcessManager, + portManager interfaces.PortManager, + fileSystem interfaces.FileSystemAdapter, +) interfaces.DevService { + return &DefaultDevService{} +} + +func (ds *DefaultDevService) RunInDevMode(path string, options domain.DevOptions) error { + return fmt.Errorf("development mode is not supported on Windows yet") +} + +func (ds *DefaultDevService) StopDevMode(path string) error { + return fmt.Errorf("development mode is not supported on Windows yet") +} + +func (ds *DefaultDevService) GetDevStatus(path string) (*domain.DevStatus, error) { + return nil, fmt.Errorf("development mode is not supported on Windows yet") +} diff --git a/control-plane/internal/core/services/package_service.go b/control-plane/internal/core/services/package_service.go index af12015a..f4de9b40 100644 --- a/control-plane/internal/core/services/package_service.go +++ b/control-plane/internal/core/services/package_service.go @@ -1,4 +1,4 @@ -// haxen/internal/core/services/package_service.go +// agentfield/internal/core/services/package_service.go package services import ( @@ -10,9 +10,9 @@ import ( "sync" "time" - "github.com/your-org/haxen/control-plane/internal/core/interfaces" - "github.com/your-org/haxen/control-plane/internal/core/domain" - "github.com/your-org/haxen/control-plane/internal/packages" + "github.com/Agent-Field/agentfield/control-plane/internal/core/domain" + "github.com/Agent-Field/agentfield/control-plane/internal/core/interfaces" + "github.com/Agent-Field/agentfield/control-plane/internal/packages" "github.com/fatih/color" "gopkg.in/yaml.v3" ) @@ -21,19 +21,19 @@ import ( type DefaultPackageService struct { registryStorage interfaces.RegistryStorage fileSystem interfaces.FileSystemAdapter - haxenHome string + agentfieldHome string } // NewPackageService creates a new package service instance func NewPackageService( registryStorage interfaces.RegistryStorage, fileSystem interfaces.FileSystemAdapter, - haxenHome string, + agentfieldHome string, ) interfaces.PackageService { return &DefaultPackageService{ registryStorage: registryStorage, fileSystem: fileSystem, - haxenHome: haxenHome, + agentfieldHome: agentfieldHome, } } @@ -42,12 +42,12 @@ func (ps *DefaultPackageService) InstallPackage(source string, options domain.In // Check if it's a Git URL (GitHub, GitLab, Bitbucket, etc.) if packages.IsGitURL(source) { installer := &packages.GitInstaller{ - HaxenHome: ps.haxenHome, - Verbose: options.Verbose, + AgentFieldHome: ps.agentfieldHome, + Verbose: options.Verbose, } return installer.InstallFromGit(source, options.Force) } - + // Handle local package installation return ps.installLocalPackage(source, options.Force, options.Verbose) } @@ -77,7 +77,7 @@ func (ps *DefaultPackageService) installLocalPackage(sourcePath string, force bo } // 3. Copy package to global location - destPath := filepath.Join(ps.haxenHome, "packages", metadata.Name) + destPath := filepath.Join(ps.agentfieldHome, "packages", metadata.Name) spinner = ps.newSpinner("Setting up environment") spinner.Start() if err := ps.copyPackage(sourcePath, destPath); err != nil { @@ -102,11 +102,11 @@ func (ps *DefaultPackageService) installLocalPackage(sourcePath string, force bo fmt.Printf("%s Installed %s v%s\n", ps.green(ps.statusSuccess()), ps.bold(metadata.Name), ps.gray(metadata.Version)) fmt.Printf(" %s %s\n", ps.gray("Location:"), destPath) - + // 6. Check for required environment variables and provide guidance ps.checkEnvironmentVariables(metadata) - - fmt.Printf("\n%s %s\n", ps.blue("β†’"), ps.bold(fmt.Sprintf("Run: haxen run %s", metadata.Name))) + + fmt.Printf("\n%s %s\n", ps.blue("β†’"), ps.bold(fmt.Sprintf("Run: af run %s", metadata.Name))) return nil } @@ -188,7 +188,7 @@ func (ps *DefaultPackageService) stopAgentNode(agentNode *packages.InstalledPack // saveRegistry saves the installation registry func (ps *DefaultPackageService) saveRegistry(registry *packages.InstallationRegistry) error { - registryPath := filepath.Join(ps.haxenHome, "installed.yaml") + registryPath := filepath.Join(ps.agentfieldHome, "installed.yaml") data, err := yaml.Marshal(registry) if err != nil { @@ -239,7 +239,7 @@ func (ps *DefaultPackageService) GetPackageInfo(name string) (*domain.InstalledP // loadRegistryDirect loads the registry using direct file system access // TODO: Eventually replace with registryStorage interface usage func (ps *DefaultPackageService) loadRegistryDirect() (*packages.InstallationRegistry, error) { - registryPath := filepath.Join(ps.haxenHome, "installed.yaml") + registryPath := filepath.Join(ps.agentfieldHome, "installed.yaml") registry := &packages.InstallationRegistry{ Installed: make(map[string]packages.InstalledPackage), @@ -307,15 +307,19 @@ type Spinner struct { } // Color helper methods -func (ps *DefaultPackageService) green(text string) string { return green(text) } -func (ps *DefaultPackageService) red(text string) string { return red(text) } -func (ps *DefaultPackageService) yellow(text string) string { return yellow(text) } -func (ps *DefaultPackageService) blue(text string) string { return blue(text) } -func (ps *DefaultPackageService) cyan(text string) string { return cyan(text) } -func (ps *DefaultPackageService) gray(text string) string { return gray(text) } -func (ps *DefaultPackageService) bold(text string) string { return bold(text) } +func (ps *DefaultPackageService) green(text string) string { return green(text) } + +//nolint:unused // retained for console color helpers +func (ps *DefaultPackageService) red(text string) string { return red(text) } +func (ps *DefaultPackageService) yellow(text string) string { return yellow(text) } +func (ps *DefaultPackageService) blue(text string) string { return blue(text) } +func (ps *DefaultPackageService) cyan(text string) string { return cyan(text) } +func (ps *DefaultPackageService) gray(text string) string { return gray(text) } +func (ps *DefaultPackageService) bold(text string) string { return bold(text) } func (ps *DefaultPackageService) statusSuccess() string { return statusSuccess } -func (ps *DefaultPackageService) statusError() string { return statusError } + +//nolint:unused // retained for console status helpers +func (ps *DefaultPackageService) statusError() string { return statusError } // newSpinner creates a new spinner with the given message func (ps *DefaultPackageService) newSpinner(message string) *Spinner { @@ -373,10 +377,10 @@ func (s *Spinner) Error(message string) { // validatePackage checks if the package has required files func (ps *DefaultPackageService) validatePackage(sourcePath string) error { - // Check if haxen-package.yaml exists - packageYamlPath := filepath.Join(sourcePath, "haxen-package.yaml") + // Check if agentfield-package.yaml exists + packageYamlPath := filepath.Join(sourcePath, "agentfield-package.yaml") if _, err := os.Stat(packageYamlPath); os.IsNotExist(err) { - return fmt.Errorf("haxen-package.yaml not found in %s", sourcePath) + return fmt.Errorf("agentfield-package.yaml not found in %s", sourcePath) } // Check if main.py exists @@ -388,26 +392,26 @@ func (ps *DefaultPackageService) validatePackage(sourcePath string) error { return nil } -// parsePackageMetadata parses the haxen-package.yaml file +// parsePackageMetadata parses the agentfield-package.yaml file func (ps *DefaultPackageService) parsePackageMetadata(sourcePath string) (*packages.PackageMetadata, error) { - packageYamlPath := filepath.Join(sourcePath, "haxen-package.yaml") + packageYamlPath := filepath.Join(sourcePath, "agentfield-package.yaml") data, err := os.ReadFile(packageYamlPath) if err != nil { - return nil, fmt.Errorf("failed to read haxen-package.yaml: %w", err) + return nil, fmt.Errorf("failed to read agentfield-package.yaml: %w", err) } var metadata packages.PackageMetadata if err := yaml.Unmarshal(data, &metadata); err != nil { - return nil, fmt.Errorf("failed to parse haxen-package.yaml: %w", err) + return nil, fmt.Errorf("failed to parse agentfield-package.yaml: %w", err) } // Validate required fields if metadata.Name == "" { - return nil, fmt.Errorf("package name is required in haxen-package.yaml") + return nil, fmt.Errorf("package name is required in agentfield-package.yaml") } if metadata.Version == "" { - return nil, fmt.Errorf("package version is required in haxen-package.yaml") + return nil, fmt.Errorf("package version is required in agentfield-package.yaml") } if metadata.Main == "" { metadata.Main = "main.py" // Default @@ -418,13 +422,15 @@ func (ps *DefaultPackageService) parsePackageMetadata(sourcePath string) (*packa // isPackageInstalled checks if a package is already installed func (ps *DefaultPackageService) isPackageInstalled(packageName string) bool { - registryPath := filepath.Join(ps.haxenHome, "installed.yaml") + registryPath := filepath.Join(ps.agentfieldHome, "installed.yaml") registry := &packages.InstallationRegistry{ Installed: make(map[string]packages.InstalledPackage), } if data, err := os.ReadFile(registryPath); err == nil { - yaml.Unmarshal(data, registry) + if err := yaml.Unmarshal(data, registry); err != nil { + return false + } } _, exists := registry.Installed[packageName] @@ -490,7 +496,7 @@ func (ps *DefaultPackageService) installDependencies(packagePath string, metadat if len(metadata.Dependencies.Python) > 0 || ps.hasRequirementsFile(packagePath) { // Create virtual environment venvPath := filepath.Join(packagePath, "venv") - + cmd := exec.Command("python3", "-m", "venv", venvPath) if _, err := cmd.CombinedOutput(); err != nil { // Try with python if python3 fails @@ -508,11 +514,9 @@ func (ps *DefaultPackageService) installDependencies(packagePath string, metadat pipPath = filepath.Join(venvPath, "Scripts", "pip.exe") // Windows } - // Upgrade pip first + // Upgrade pip first (ignore failures) cmd = exec.Command(pipPath, "install", "--upgrade", "pip") - if _, err := cmd.CombinedOutput(); err != nil { - // Ignore pip upgrade failures - } + _, _ = cmd.CombinedOutput() // Install from requirements.txt if it exists requirementsPath := filepath.Join(packagePath, "requirements.txt") @@ -524,7 +528,7 @@ func (ps *DefaultPackageService) installDependencies(packagePath string, metadat } } - // Install dependencies from haxen-package.yaml + // Install dependencies from agentfield-package.yaml if len(metadata.Dependencies.Python) > 0 { for _, dep := range metadata.Dependencies.Python { cmd = exec.Command(pipPath, "install", dep) @@ -553,7 +557,7 @@ func (ps *DefaultPackageService) hasRequirementsFile(packagePath string) bool { // updateRegistry updates the installation registry with the new package func (ps *DefaultPackageService) updateRegistry(metadata *packages.PackageMetadata, sourcePath, destPath string) error { - registryPath := filepath.Join(ps.haxenHome, "installed.yaml") + registryPath := filepath.Join(ps.agentfieldHome, "installed.yaml") // Load existing registry or create new one registry := &packages.InstallationRegistry{ @@ -561,7 +565,9 @@ func (ps *DefaultPackageService) updateRegistry(metadata *packages.PackageMetada } if data, err := os.ReadFile(registryPath); err == nil { - yaml.Unmarshal(data, registry) + if err := yaml.Unmarshal(data, registry); err != nil { + return fmt.Errorf("failed to parse registry: %w", err) + } } // Add/update package entry @@ -578,7 +584,7 @@ func (ps *DefaultPackageService) updateRegistry(metadata *packages.PackageMetada Port: nil, PID: nil, StartedAt: nil, - LogFile: filepath.Join(ps.haxenHome, "logs", metadata.Name+".log"), + LogFile: filepath.Join(ps.agentfieldHome, "logs", metadata.Name+".log"), }, } @@ -617,7 +623,7 @@ func (ps *DefaultPackageService) checkEnvironmentVariables(metadata *packages.Pa if len(missingRequired) > 0 { fmt.Printf("\n%s %s\n", ps.yellow("⚠"), ps.bold("Missing required environment variables:")) for _, envVar := range missingRequired { - fmt.Printf(" %s\n", ps.cyan(fmt.Sprintf("haxen config %s --set %s=your-value-here", metadata.Name, envVar.Name))) + fmt.Printf(" %s\n", ps.cyan(fmt.Sprintf("af config %s --set %s=your-value-here", metadata.Name, envVar.Name))) } } diff --git a/control-plane/internal/embedded/ui.go b/control-plane/internal/embedded/ui.go index 4a93b9b7..d92c7bf2 100644 --- a/control-plane/internal/embedded/ui.go +++ b/control-plane/internal/embedded/ui.go @@ -14,6 +14,7 @@ import ( ) // UIFiles embeds the entire web/client/dist directory into the binary +// //go:embed web/client/dist var UIFiles embed.FS diff --git a/control-plane/internal/encryption/encryption.go b/control-plane/internal/encryption/encryption.go index c56538e0..572f6871 100644 --- a/control-plane/internal/encryption/encryption.go +++ b/control-plane/internal/encryption/encryption.go @@ -100,7 +100,7 @@ func (es *EncryptionService) Decrypt(ciphertext string) (string, error) { // EncryptConfigurationValues encrypts sensitive values in a configuration map func (es *EncryptionService) EncryptConfigurationValues(config map[string]interface{}, secretFields []string) (map[string]interface{}, error) { result := make(map[string]interface{}) - + // Copy all values for key, value := range config { result[key] = value @@ -125,7 +125,7 @@ func (es *EncryptionService) EncryptConfigurationValues(config map[string]interf // DecryptConfigurationValues decrypts sensitive values in a configuration map func (es *EncryptionService) DecryptConfigurationValues(config map[string]interface{}, secretFields []string) (map[string]interface{}, error) { result := make(map[string]interface{}) - + // Copy all values for key, value := range config { result[key] = value diff --git a/control-plane/internal/events/execution_events.go b/control-plane/internal/events/execution_events.go index 8f11a104..c2ff374f 100644 --- a/control-plane/internal/events/execution_events.go +++ b/control-plane/internal/events/execution_events.go @@ -5,8 +5,8 @@ import ( "sync" "time" - "github.com/your-org/haxen/control-plane/internal/logger" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/internal/logger" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" ) // ExecutionEventType represents the type of execution event diff --git a/control-plane/internal/events/node_events.go b/control-plane/internal/events/node_events.go index 07634026..ed580704 100644 --- a/control-plane/internal/events/node_events.go +++ b/control-plane/internal/events/node_events.go @@ -6,7 +6,7 @@ import ( "sync" "time" - "github.com/your-org/haxen/control-plane/internal/logger" + "github.com/Agent-Field/agentfield/control-plane/internal/logger" ) // NodeEventType represents the type of node event @@ -250,12 +250,9 @@ func StartNodeHeartbeat(interval time.Duration) { ticker := time.NewTicker(interval) defer ticker.Stop() - for { - select { - case <-ticker.C: - if GlobalNodeEventBus.GetSubscriberCount() > 0 { - PublishNodeHeartbeat() - } + for range ticker.C { + if GlobalNodeEventBus.GetSubscriberCount() > 0 { + PublishNodeHeartbeat() } } }() diff --git a/control-plane/internal/events/reasoner_events.go b/control-plane/internal/events/reasoner_events.go index 83c7d706..54bdadf5 100644 --- a/control-plane/internal/events/reasoner_events.go +++ b/control-plane/internal/events/reasoner_events.go @@ -10,22 +10,22 @@ import ( type ReasonerEventType string const ( - ReasonerOnline ReasonerEventType = "reasoner_online" - ReasonerOffline ReasonerEventType = "reasoner_offline" - ReasonerUpdated ReasonerEventType = "reasoner_updated" - NodeStatusChanged ReasonerEventType = "node_status_changed" - ReasonersRefresh ReasonerEventType = "reasoners_refresh" + ReasonerOnline ReasonerEventType = "reasoner_online" + ReasonerOffline ReasonerEventType = "reasoner_offline" + ReasonerUpdated ReasonerEventType = "reasoner_updated" + NodeStatusChanged ReasonerEventType = "node_status_changed" + ReasonersRefresh ReasonerEventType = "reasoners_refresh" Heartbeat ReasonerEventType = "heartbeat" ) // ReasonerEvent represents a reasoner state change event type ReasonerEvent struct { - Type ReasonerEventType `json:"type"` - ReasonerID string `json:"reasoner_id,omitempty"` - NodeID string `json:"node_id,omitempty"` - Status string `json:"status,omitempty"` - Timestamp time.Time `json:"timestamp"` - Data interface{} `json:"data,omitempty"` + Type ReasonerEventType `json:"type"` + ReasonerID string `json:"reasoner_id,omitempty"` + NodeID string `json:"node_id,omitempty"` + Status string `json:"status,omitempty"` + Timestamp time.Time `json:"timestamp"` + Data interface{} `json:"data,omitempty"` } // ReasonerEventBus manages reasoner event broadcasting @@ -48,7 +48,7 @@ func (bus *ReasonerEventBus) Subscribe(subscriberID string) chan ReasonerEvent { ch := make(chan ReasonerEvent, 100) // Buffer to prevent blocking bus.subscribers[subscriberID] = ch - + return ch } @@ -68,7 +68,6 @@ func (bus *ReasonerEventBus) Publish(event ReasonerEvent) { bus.mutex.RLock() defer bus.mutex.RUnlock() - for _, ch := range bus.subscribers { select { case ch <- event: @@ -110,8 +109,7 @@ func PublishReasonerOnline(reasonerID, nodeID string, data interface{}) { Timestamp: time.Now(), Data: data, } - - + GlobalReasonerEventBus.Publish(event) } @@ -125,8 +123,7 @@ func PublishReasonerOffline(reasonerID, nodeID string, data interface{}) { Timestamp: time.Now(), Data: data, } - - + GlobalReasonerEventBus.Publish(event) } @@ -152,8 +149,7 @@ func PublishNodeStatusChanged(nodeID, status string, data interface{}) { Timestamp: time.Now(), Data: data, } - - + GlobalReasonerEventBus.Publish(event) } @@ -181,13 +177,10 @@ func StartHeartbeat(interval time.Duration) { go func() { ticker := time.NewTicker(interval) defer ticker.Stop() - - for { - select { - case <-ticker.C: - if GlobalReasonerEventBus.GetSubscriberCount() > 0 { - PublishHeartbeat() - } + + for range ticker.C { + if GlobalReasonerEventBus.GetSubscriberCount() > 0 { + PublishHeartbeat() } } }() diff --git a/control-plane/internal/handlers/did_handlers.go b/control-plane/internal/handlers/did_handlers.go index efc0e4c4..46122b19 100644 --- a/control-plane/internal/handlers/did_handlers.go +++ b/control-plane/internal/handlers/did_handlers.go @@ -7,8 +7,8 @@ import ( "github.com/gin-gonic/gin" - "github.com/your-org/haxen/control-plane/internal/logger" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/internal/logger" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" ) // DIDService defines the DID operations required by handlers. @@ -417,10 +417,10 @@ func (h *DIDHandlers) GetDIDDocument(c *gin.Context) { }, "service": []map[string]interface{}{ { - "id": did + "#haxen-service", - "type": "HaxenAgentService", - "serviceEndpoint": "https://haxen.example.com/api/v1", - "description": "Haxen Agent Platform Service", + "id": did + "#agentfield-service", + "type": "AgentFieldAgentService", + "serviceEndpoint": "https://agentfield.example.com/api/v1", + "description": "AgentField Agent Platform Service", }, }, } diff --git a/control-plane/internal/handlers/did_handlers_test.go b/control-plane/internal/handlers/did_handlers_test.go index 8bdf59f5..ef11a3fa 100644 --- a/control-plane/internal/handlers/did_handlers_test.go +++ b/control-plane/internal/handlers/did_handlers_test.go @@ -9,7 +9,7 @@ import ( "testing" "time" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/gin-gonic/gin" "github.com/stretchr/testify/require" diff --git a/control-plane/internal/handlers/execute.go b/control-plane/internal/handlers/execute.go index 339a394e..715d8d54 100644 --- a/control-plane/internal/handlers/execute.go +++ b/control-plane/internal/handlers/execute.go @@ -16,10 +16,10 @@ import ( "sync" "time" - "github.com/your-org/haxen/control-plane/internal/logger" - "github.com/your-org/haxen/control-plane/internal/services" - "github.com/your-org/haxen/control-plane/internal/utils" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/internal/logger" + "github.com/Agent-Field/agentfield/control-plane/internal/services" + "github.com/Agent-Field/agentfield/control-plane/internal/utils" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/gin-gonic/gin" ) @@ -109,11 +109,7 @@ type asyncExecutionJob struct { } type asyncWorkerPool struct { - queue chan asyncExecutionJob - metrics struct { - submitted uint64 - rejected uint64 - } + queue chan asyncExecutionJob } type completionJob struct { @@ -883,12 +879,12 @@ func (p *asyncWorkerPool) submit(job asyncExecutionJob) bool { func getAsyncWorkerPool() *asyncWorkerPool { asyncPoolOnce.Do(func() { - workerCount := resolveIntFromEnv("HAXEN_EXEC_ASYNC_WORKERS", runtime.NumCPU()) + workerCount := resolveIntFromEnv("AGENTFIELD_EXEC_ASYNC_WORKERS", runtime.NumCPU()) if workerCount <= 0 { workerCount = runtime.NumCPU() } - queueCapacity := resolveIntFromEnv("HAXEN_EXEC_ASYNC_QUEUE_CAPACITY", 1024) + queueCapacity := resolveIntFromEnv("AGENTFIELD_EXEC_ASYNC_QUEUE_CAPACITY", 1024) if queueCapacity <= 0 { queueCapacity = 1024 } @@ -916,7 +912,7 @@ func resolveIntFromEnv(key string, fallback int) int { func ensureCompletionWorker() { completionOnce.Do(func() { - size := resolveIntFromEnv("HAXEN_EXEC_COMPLETION_QUEUE", 2048) + size := resolveIntFromEnv("AGENTFIELD_EXEC_COMPLETION_QUEUE", 2048) if size <= 0 { size = 2048 } diff --git a/control-plane/internal/handlers/execute_handler_test.go b/control-plane/internal/handlers/execute_handler_test.go index 7906d544..0f61b2d7 100644 --- a/control-plane/internal/handlers/execute_handler_test.go +++ b/control-plane/internal/handlers/execute_handler_test.go @@ -11,8 +11,8 @@ import ( "testing" "time" - "github.com/your-org/haxen/control-plane/internal/services" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/internal/services" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/gin-gonic/gin" "github.com/stretchr/testify/require" diff --git a/control-plane/internal/handlers/execute_test.go b/control-plane/internal/handlers/execute_test.go index c9ddec08..0c47e5b1 100644 --- a/control-plane/internal/handlers/execute_test.go +++ b/control-plane/internal/handlers/execute_test.go @@ -4,7 +4,6 @@ package handlers import ( - "github.com/your-org/haxen/control-plane/pkg/types" "bytes" "context" "encoding/json" @@ -13,6 +12,8 @@ import ( "testing" "time" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" + "github.com/gin-gonic/gin" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -224,16 +225,16 @@ func (m *MockStorageProvider) GetDID(ctx context.Context, did string) (*types.DI func (m *MockStorageProvider) ListDIDs(ctx context.Context) ([]*types.DIDRegistryEntry, error) { return nil, nil } -func (m *MockStorageProvider) StoreHaxenServerDID(ctx context.Context, haxenServerID, rootDID string, masterSeed []byte, createdAt, lastKeyRotation time.Time) error { +func (m *MockStorageProvider) StoreAgentFieldServerDID(ctx context.Context, agentfieldServerID, rootDID string, masterSeed []byte, createdAt, lastKeyRotation time.Time) error { return nil } -func (m *MockStorageProvider) GetHaxenServerDID(ctx context.Context, haxenServerID string) (*types.HaxenServerDIDInfo, error) { +func (m *MockStorageProvider) GetAgentFieldServerDID(ctx context.Context, agentfieldServerID string) (*types.AgentFieldServerDIDInfo, error) { return nil, nil } -func (m *MockStorageProvider) ListHaxenServerDIDs(ctx context.Context) ([]*types.HaxenServerDIDInfo, error) { +func (m *MockStorageProvider) ListAgentFieldServerDIDs(ctx context.Context) ([]*types.AgentFieldServerDIDInfo, error) { return nil, nil } -func (m *MockStorageProvider) StoreAgentDID(ctx context.Context, agentID, agentDID, haxenServerDID, publicKeyJWK string, derivationIndex int) error { +func (m *MockStorageProvider) StoreAgentDID(ctx context.Context, agentID, agentDID, agentfieldServerDID, publicKeyJWK string, derivationIndex int) error { return nil } func (m *MockStorageProvider) GetAgentDID(ctx context.Context, agentID string) (*types.AgentDIDInfo, error) { @@ -251,7 +252,7 @@ func (m *MockStorageProvider) GetComponentDID(ctx context.Context, componentID s func (m *MockStorageProvider) ListComponentDIDs(ctx context.Context, agentDID string) ([]*types.ComponentDIDInfo, error) { return nil, nil } -func (m *MockStorageProvider) StoreAgentDIDWithComponents(ctx context.Context, agentID, agentDID, haxenServerDID, publicKeyJWK string, derivationIndex int, components []interface{}) error { +func (m *MockStorageProvider) StoreAgentDIDWithComponents(ctx context.Context, agentID, agentDID, agentfieldServerDID, publicKeyJWK string, derivationIndex int, components []interface{}) error { return nil } func (m *MockStorageProvider) StoreExecutionVC(ctx context.Context, vcID, executionID, workflowID, sessionID, issuerDID, targetDID, callerDID, inputHash, outputHash, status string, vcDocument []byte, signature string, storageURI string, documentSizeBytes int64) error { diff --git a/control-plane/internal/handlers/execution_cleanup.go b/control-plane/internal/handlers/execution_cleanup.go index 3b079127..acd7f41f 100644 --- a/control-plane/internal/handlers/execution_cleanup.go +++ b/control-plane/internal/handlers/execution_cleanup.go @@ -1,12 +1,13 @@ package handlers import ( - "github.com/your-org/haxen/control-plane/internal/config" - "github.com/your-org/haxen/control-plane/internal/logger" - "github.com/your-org/haxen/control-plane/internal/storage" "context" "sync" "time" + + "github.com/Agent-Field/agentfield/control-plane/internal/config" + "github.com/Agent-Field/agentfield/control-plane/internal/logger" + "github.com/Agent-Field/agentfield/control-plane/internal/storage" ) // ExecutionCleanupService manages the background cleanup of old executions diff --git a/control-plane/internal/handlers/execution_notes.go b/control-plane/internal/handlers/execution_notes.go index 823e28bf..88ee4b36 100644 --- a/control-plane/internal/handlers/execution_notes.go +++ b/control-plane/internal/handlers/execution_notes.go @@ -7,8 +7,8 @@ import ( "strings" "time" - "github.com/your-org/haxen/control-plane/internal/events" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/internal/events" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/gin-gonic/gin" ) diff --git a/control-plane/internal/handlers/execution_notes_test.go b/control-plane/internal/handlers/execution_notes_test.go index 859e4d7c..e362d8c2 100644 --- a/control-plane/internal/handlers/execution_notes_test.go +++ b/control-plane/internal/handlers/execution_notes_test.go @@ -9,7 +9,7 @@ import ( "testing" "time" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/gin-gonic/gin" "github.com/stretchr/testify/require" diff --git a/control-plane/internal/handlers/memory.go b/control-plane/internal/handlers/memory.go index dde2b8d6..78bdb57b 100644 --- a/control-plane/internal/handlers/memory.go +++ b/control-plane/internal/handlers/memory.go @@ -6,8 +6,8 @@ import ( "net/http" "time" - "github.com/your-org/haxen/control-plane/internal/logger" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/internal/logger" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/gin-gonic/gin" ) diff --git a/control-plane/internal/handlers/memory_events.go b/control-plane/internal/handlers/memory_events.go index 6d25ed6e..f201561c 100644 --- a/control-plane/internal/handlers/memory_events.go +++ b/control-plane/internal/handlers/memory_events.go @@ -1,8 +1,6 @@ package handlers import ( - "github.com/your-org/haxen/control-plane/internal/storage" - "github.com/your-org/haxen/control-plane/pkg/types" "encoding/json" "net/http" "path/filepath" @@ -10,6 +8,10 @@ import ( "strings" "time" + "github.com/Agent-Field/agentfield/control-plane/internal/logger" + "github.com/Agent-Field/agentfield/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" + "github.com/gin-gonic/gin" "github.com/gorilla/websocket" ) @@ -50,7 +52,9 @@ func (h *MemoryEventsHandler) WebSocketHandler(c *gin.Context) { // Subscribe to memory changes eventChan, err := h.storage.SubscribeToMemoryChanges(ctx, scope, scopeID) if err != nil { - conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseInternalServerErr, "failed to subscribe to events")) + if writeErr := conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseInternalServerErr, "failed to subscribe to events")); writeErr != nil { + logger.Logger.Warn().Err(writeErr).Msg("failed to send websocket close message") + } return } @@ -58,7 +62,9 @@ func (h *MemoryEventsHandler) WebSocketHandler(c *gin.Context) { go func() { for { if _, _, err := conn.NextReader(); err != nil { - conn.Close() + if closeErr := conn.Close(); closeErr != nil { + logger.Logger.Debug().Err(closeErr).Msg("websocket close returned error") + } break } } diff --git a/control-plane/internal/handlers/memory_handler_test.go b/control-plane/internal/handlers/memory_handler_test.go index 30a69ea2..12b8440d 100644 --- a/control-plane/internal/handlers/memory_handler_test.go +++ b/control-plane/internal/handlers/memory_handler_test.go @@ -10,7 +10,7 @@ import ( "sync" "testing" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/gin-gonic/gin" "github.com/stretchr/testify/require" diff --git a/control-plane/internal/handlers/nodes.go b/control-plane/internal/handlers/nodes.go index 0b1c70bb..cfeea141 100644 --- a/control-plane/internal/handlers/nodes.go +++ b/control-plane/internal/handlers/nodes.go @@ -12,10 +12,10 @@ import ( "sync" "time" - "github.com/your-org/haxen/control-plane/internal/logger" - "github.com/your-org/haxen/control-plane/internal/services" // Import services package - "github.com/your-org/haxen/control-plane/internal/storage" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/internal/logger" + "github.com/Agent-Field/agentfield/control-plane/internal/services" // Import services package + "github.com/Agent-Field/agentfield/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/gin-gonic/gin" "github.com/go-playground/validator/v10" @@ -1139,7 +1139,7 @@ func RegisterServerlessAgentHandler(storageProvider storage.StorageProvider, uiS if err != nil { logger.Logger.Error().Err(err).Msgf("❌ Failed to call discovery endpoint: %s", discoveryURL) c.JSON(http.StatusBadGateway, gin.H{ - "error": "Failed to discover serverless agent", + "error": "Failed to discover serverless agent", "details": fmt.Sprintf("Could not reach discovery endpoint: %v", err), }) return @@ -1149,7 +1149,7 @@ func RegisterServerlessAgentHandler(storageProvider storage.StorageProvider, uiS if resp.StatusCode != http.StatusOK { logger.Logger.Error().Msgf("❌ Discovery endpoint returned status %d", resp.StatusCode) c.JSON(http.StatusBadGateway, gin.H{ - "error": "Discovery endpoint failed", + "error": "Discovery endpoint failed", "details": fmt.Sprintf("Discovery endpoint returned status %d", resp.StatusCode), }) return @@ -1157,20 +1157,20 @@ func RegisterServerlessAgentHandler(storageProvider storage.StorageProvider, uiS // Parse discovery response var discoveryData struct { - NodeID string `json:"node_id"` - Version string `json:"version"` - Reasoners []struct { - ID string `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - InputSchema map[string]interface{} `json:"input_schema"` + NodeID string `json:"node_id"` + Version string `json:"version"` + Reasoners []struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + InputSchema map[string]interface{} `json:"input_schema"` OutputSchema map[string]interface{} `json:"output_schema"` } `json:"reasoners"` Skills []struct { - ID string `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - InputSchema map[string]interface{} `json:"input_schema"` + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + InputSchema map[string]interface{} `json:"input_schema"` OutputSchema map[string]interface{} `json:"output_schema"` } `json:"skills"` } @@ -1178,7 +1178,7 @@ func RegisterServerlessAgentHandler(storageProvider storage.StorageProvider, uiS if err := json.NewDecoder(resp.Body).Decode(&discoveryData); err != nil { logger.Logger.Error().Err(err).Msg("❌ Failed to parse discovery response") c.JSON(http.StatusBadGateway, gin.H{ - "error": "Invalid discovery response", + "error": "Invalid discovery response", "details": fmt.Sprintf("Could not parse discovery data: %v", err), }) return @@ -1187,7 +1187,7 @@ func RegisterServerlessAgentHandler(storageProvider storage.StorageProvider, uiS // Validate required fields if discoveryData.NodeID == "" { c.JSON(http.StatusBadRequest, gin.H{ - "error": "Invalid discovery response", + "error": "Invalid discovery response", "details": "node_id is missing from discovery response", }) return @@ -1221,21 +1221,21 @@ func RegisterServerlessAgentHandler(storageProvider storage.StorageProvider, uiS executionURL := strings.TrimSuffix(req.InvocationURL, "/") + "/execute" newNode := types.AgentNode{ - ID: discoveryData.NodeID, - TeamID: "default", // Default team for serverless agents - BaseURL: req.InvocationURL, - Version: discoveryData.Version, - DeploymentType: "serverless", - InvocationURL: &executionURL, - Reasoners: reasoners, - Skills: skills, - RegisteredAt: time.Now().UTC(), - LastHeartbeat: time.Now().UTC(), - HealthStatus: types.HealthStatusUnknown, // Serverless agents don't have persistent health - LifecycleStatus: types.AgentStatusReady, // Serverless agents are always ready + ID: discoveryData.NodeID, + TeamID: "default", // Default team for serverless agents + BaseURL: req.InvocationURL, + Version: discoveryData.Version, + DeploymentType: "serverless", + InvocationURL: &executionURL, + Reasoners: reasoners, + Skills: skills, + RegisteredAt: time.Now().UTC(), + LastHeartbeat: time.Now().UTC(), + HealthStatus: types.HealthStatusUnknown, // Serverless agents don't have persistent health + LifecycleStatus: types.AgentStatusReady, // Serverless agents are always ready Metadata: types.AgentMetadata{ Custom: map[string]interface{}{ - "serverless": true, + "serverless": true, "discovery_url": discoveryURL, }, }, @@ -1251,7 +1251,7 @@ func RegisterServerlessAgentHandler(storageProvider storage.StorageProvider, uiS if err := storageProvider.RegisterAgent(ctx, &newNode); err != nil { logger.Logger.Error().Err(err).Msg("❌ Failed to register serverless agent") c.JSON(http.StatusInternalServerError, gin.H{ - "error": "Failed to register serverless agent", + "error": "Failed to register serverless agent", "details": err.Error(), }) return @@ -1285,12 +1285,12 @@ func RegisterServerlessAgentHandler(storageProvider storage.StorageProvider, uiS "success": true, "message": "Serverless agent registered successfully", "node": gin.H{ - "id": newNode.ID, - "version": newNode.Version, + "id": newNode.ID, + "version": newNode.Version, "deployment_type": newNode.DeploymentType, - "invocation_url": newNode.InvocationURL, + "invocation_url": newNode.InvocationURL, "reasoners_count": len(newNode.Reasoners), - "skills_count": len(newNode.Skills), + "skills_count": len(newNode.Skills), }, }) } diff --git a/control-plane/internal/handlers/nodes_rest.go b/control-plane/internal/handlers/nodes_rest.go index 5905e14c..d8d42626 100644 --- a/control-plane/internal/handlers/nodes_rest.go +++ b/control-plane/internal/handlers/nodes_rest.go @@ -6,10 +6,10 @@ import ( "strings" "time" - "github.com/your-org/haxen/control-plane/internal/logger" - "github.com/your-org/haxen/control-plane/internal/services" - "github.com/your-org/haxen/control-plane/internal/storage" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/internal/logger" + "github.com/Agent-Field/agentfield/control-plane/internal/services" + "github.com/Agent-Field/agentfield/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/gin-gonic/gin" ) diff --git a/control-plane/internal/handlers/reasoners.go b/control-plane/internal/handlers/reasoners.go index a53296ac..19362d31 100644 --- a/control-plane/internal/handlers/reasoners.go +++ b/control-plane/internal/handlers/reasoners.go @@ -2,6 +2,7 @@ package handlers import ( "bytes" + "context" "encoding/json" "fmt" "io" @@ -9,10 +10,10 @@ import ( "strings" "time" // Added for time.Now() - "github.com/your-org/haxen/control-plane/internal/logger" - "github.com/your-org/haxen/control-plane/internal/storage" - "github.com/your-org/haxen/control-plane/internal/utils" // Added for ID generation - "github.com/your-org/haxen/control-plane/pkg/types" // Added for new types + "github.com/Agent-Field/agentfield/control-plane/internal/logger" + "github.com/Agent-Field/agentfield/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/internal/utils" // Added for ID generation + "github.com/Agent-Field/agentfield/control-plane/pkg/types" // Added for new types "github.com/gin-gonic/gin" ) @@ -23,6 +24,15 @@ type ExecuteReasonerRequest struct { Context map[string]interface{} `json:"context,omitempty"` } +func persistWorkflowExecution(ctx context.Context, storageProvider storage.StorageProvider, execution *types.WorkflowExecution) { + if err := storageProvider.StoreWorkflowExecution(ctx, execution); err != nil { + logger.Logger.Error(). + Err(err). + Str("execution_id", execution.ExecutionID). + Msg("failed to persist workflow execution state") + } +} + // ExecuteReasonerResponse represents the response from executing a reasoner type ExecuteReasonerResponse struct { Result interface{} `json:"result"` @@ -37,8 +47,8 @@ func ExecuteReasonerHandler(storageProvider storage.StorageProvider) gin.Handler ctx := c.Request.Context() startTime := time.Now() - // Generate Haxen Request ID - haxenRequestID := utils.GenerateHaxenRequestID() + // Generate AgentField Request ID + agentfieldRequestID := utils.GenerateAgentFieldRequestID() // Extract headers workflowID := c.GetHeader("X-Workflow-ID") @@ -120,15 +130,15 @@ func ExecuteReasonerHandler(storageProvider storage.StorageProvider) gin.Handler // Create workflow execution record workflowExecution := &types.WorkflowExecution{ - WorkflowID: workflowID, - ExecutionID: executionID, - HaxenRequestID: haxenRequestID, - AgentNodeID: nodeID, - ReasonerID: reasonerName, - Status: "running", - StartedAt: startTime, - CreatedAt: startTime, - UpdatedAt: startTime, + WorkflowID: workflowID, + ExecutionID: executionID, + AgentFieldRequestID: agentfieldRequestID, + AgentNodeID: nodeID, + ReasonerID: reasonerName, + Status: "running", + StartedAt: startTime, + CreatedAt: startTime, + UpdatedAt: startTime, } // Set optional fields @@ -181,7 +191,7 @@ func ExecuteReasonerHandler(storageProvider storage.StorageProvider) gin.Handler duration := endTime.Sub(startTime).Milliseconds() workflowExecution.DurationMS = &duration workflowExecution.UpdatedAt = endTime - storageProvider.StoreWorkflowExecution(ctx, workflowExecution) + persistWorkflowExecution(ctx, storageProvider, workflowExecution) c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to create agent request"}) return } @@ -189,7 +199,7 @@ func ExecuteReasonerHandler(storageProvider storage.StorageProvider) gin.Handler agentReq.Header.Set("Content-Type", "application/json") agentReq.Header.Set("X-Workflow-ID", workflowID) agentReq.Header.Set("X-Execution-ID", executionID) - agentReq.Header.Set("X-Haxen-Request-ID", haxenRequestID) + agentReq.Header.Set("X-AgentField-Request-ID", agentfieldRequestID) if parentWorkflowID != "" { agentReq.Header.Set("X-Parent-Workflow-ID", parentWorkflowID) } @@ -235,7 +245,7 @@ func ExecuteReasonerHandler(storageProvider storage.StorageProvider) gin.Handler workflowExecution.UpdatedAt = endTime // Store execution record - storageProvider.StoreWorkflowExecution(ctx, workflowExecution) + persistWorkflowExecution(ctx, storageProvider, workflowExecution) c.JSON(http.StatusServiceUnavailable, gin.H{ "error": fmt.Sprintf("failed to call agent node: %v", err), @@ -258,7 +268,7 @@ func ExecuteReasonerHandler(storageProvider storage.StorageProvider) gin.Handler workflowExecution.UpdatedAt = endTime // Store execution record - storageProvider.StoreWorkflowExecution(ctx, workflowExecution) + persistWorkflowExecution(ctx, storageProvider, workflowExecution) c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to read agent response"}) return @@ -278,7 +288,7 @@ func ExecuteReasonerHandler(storageProvider storage.StorageProvider) gin.Handler workflowExecution.UpdatedAt = endTime // Store execution record - storageProvider.StoreWorkflowExecution(ctx, workflowExecution) + persistWorkflowExecution(ctx, storageProvider, workflowExecution) c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to parse agent response"}) return @@ -295,15 +305,13 @@ func ExecuteReasonerHandler(storageProvider storage.StorageProvider) gin.Handler workflowExecution.UpdatedAt = endTime // Store execution record - if err := storageProvider.StoreWorkflowExecution(ctx, workflowExecution); err != nil { - // Log error but don't fail the request - logger.Logger.Error().Err(err).Msg("Failed to store workflow execution") - } + // Store execution record + persistWorkflowExecution(ctx, storageProvider, workflowExecution) // Set response headers c.Header("X-Workflow-ID", workflowID) c.Header("X-Execution-ID", executionID) - c.Header("X-Haxen-Request-ID", haxenRequestID) + c.Header("X-AgentField-Request-ID", agentfieldRequestID) c.Header("X-Agent-Node-ID", nodeID) c.Header("X-Duration-MS", fmt.Sprintf("%d", duration)) @@ -317,14 +325,14 @@ func ExecuteReasonerHandler(storageProvider storage.StorageProvider) gin.Handler } } -// ExecuteSkillHandler handles execution of skills via Haxen server +// ExecuteSkillHandler handles execution of skills via AgentField server func ExecuteSkillHandler(storageProvider storage.StorageProvider) gin.HandlerFunc { return func(c *gin.Context) { ctx := c.Request.Context() startTime := time.Now() - // Generate Haxen Request ID - haxenRequestID := utils.GenerateHaxenRequestID() + // Generate AgentField Request ID + agentfieldRequestID := utils.GenerateAgentFieldRequestID() // Extract headers workflowID := c.GetHeader("X-Workflow-ID") @@ -406,15 +414,15 @@ func ExecuteSkillHandler(storageProvider storage.StorageProvider) gin.HandlerFun // Create workflow execution record workflowExecution := &types.WorkflowExecution{ - WorkflowID: workflowID, - ExecutionID: executionID, - HaxenRequestID: haxenRequestID, - AgentNodeID: nodeID, - ReasonerID: skillName, // For skills, ReasonerID will store skillName - Status: "running", - StartedAt: startTime, - CreatedAt: startTime, - UpdatedAt: startTime, + WorkflowID: workflowID, + ExecutionID: executionID, + AgentFieldRequestID: agentfieldRequestID, + AgentNodeID: nodeID, + ReasonerID: skillName, // For skills, ReasonerID will store skillName + Status: "running", + StartedAt: startTime, + CreatedAt: startTime, + UpdatedAt: startTime, } // Set optional fields @@ -467,7 +475,7 @@ func ExecuteSkillHandler(storageProvider storage.StorageProvider) gin.HandlerFun duration := endTime.Sub(startTime).Milliseconds() workflowExecution.DurationMS = &duration workflowExecution.UpdatedAt = endTime - storageProvider.StoreWorkflowExecution(ctx, workflowExecution) + persistWorkflowExecution(ctx, storageProvider, workflowExecution) c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to create agent request"}) return } @@ -475,7 +483,7 @@ func ExecuteSkillHandler(storageProvider storage.StorageProvider) gin.HandlerFun agentReq.Header.Set("Content-Type", "application/json") agentReq.Header.Set("X-Workflow-ID", workflowID) agentReq.Header.Set("X-Execution-ID", executionID) - agentReq.Header.Set("X-Haxen-Request-ID", haxenRequestID) + agentReq.Header.Set("X-AgentField-Request-ID", agentfieldRequestID) if parentWorkflowID != "" { agentReq.Header.Set("X-Parent-Workflow-ID", parentWorkflowID) } @@ -521,7 +529,7 @@ func ExecuteSkillHandler(storageProvider storage.StorageProvider) gin.HandlerFun workflowExecution.UpdatedAt = endTime // Store execution record - storageProvider.StoreWorkflowExecution(ctx, workflowExecution) + persistWorkflowExecution(ctx, storageProvider, workflowExecution) c.JSON(http.StatusServiceUnavailable, gin.H{ "error": fmt.Sprintf("failed to call agent node: %v", err), @@ -544,7 +552,7 @@ func ExecuteSkillHandler(storageProvider storage.StorageProvider) gin.HandlerFun workflowExecution.UpdatedAt = endTime // Store execution record - storageProvider.StoreWorkflowExecution(ctx, workflowExecution) + persistWorkflowExecution(ctx, storageProvider, workflowExecution) c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to read agent response"}) return @@ -564,7 +572,7 @@ func ExecuteSkillHandler(storageProvider storage.StorageProvider) gin.HandlerFun workflowExecution.UpdatedAt = endTime // Store execution record - storageProvider.StoreWorkflowExecution(ctx, workflowExecution) + persistWorkflowExecution(ctx, storageProvider, workflowExecution) c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to parse agent response"}) return @@ -581,15 +589,12 @@ func ExecuteSkillHandler(storageProvider storage.StorageProvider) gin.HandlerFun workflowExecution.UpdatedAt = endTime // Store execution record - if err := storageProvider.StoreWorkflowExecution(ctx, workflowExecution); err != nil { - // Log error but don't fail the request - logger.Logger.Error().Err(err).Msg("Failed to store workflow execution") - } + persistWorkflowExecution(ctx, storageProvider, workflowExecution) // Set response headers c.Header("X-Workflow-ID", workflowID) c.Header("X-Execution-ID", executionID) - c.Header("X-Haxen-Request-ID", haxenRequestID) + c.Header("X-AgentField-Request-ID", agentfieldRequestID) c.Header("X-Agent-Node-ID", nodeID) c.Header("X-Duration-MS", fmt.Sprintf("%d", duration)) diff --git a/control-plane/internal/handlers/test_helpers_test.go b/control-plane/internal/handlers/test_helpers_test.go index 80bc9d15..978410b4 100644 --- a/control-plane/internal/handlers/test_helpers_test.go +++ b/control-plane/internal/handlers/test_helpers_test.go @@ -4,10 +4,9 @@ import ( "context" "fmt" "sync" - "time" - "github.com/your-org/haxen/control-plane/internal/events" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/internal/events" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" ) type testExecutionStorage struct { @@ -278,11 +277,3 @@ func (s *testExecutionStorage) QueryExecutionRecords(ctx context.Context, filter } return results, nil } - -func ptrTime(t time.Time) *time.Time { - return &t -} - -func ptrInt64(v int64) *int64 { - return &v -} diff --git a/control-plane/internal/handlers/ui/config.go b/control-plane/internal/handlers/ui/config.go index 7a195160..1700cfdf 100644 --- a/control-plane/internal/handlers/ui/config.go +++ b/control-plane/internal/handlers/ui/config.go @@ -4,8 +4,8 @@ import ( "encoding/json" "net/http" - "github.com/your-org/haxen/control-plane/internal/storage" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/gin-gonic/gin" ) @@ -157,7 +157,7 @@ func (h *ConfigHandler) SetConfigHandler(c *gin.Context) { if !validationResult.Valid { c.JSON(http.StatusBadRequest, map[string]interface{}{ - "error": "configuration validation failed", + "error": "configuration validation failed", "validation_errors": validationResult.Errors, }) return @@ -171,7 +171,7 @@ func (h *ConfigHandler) SetConfigHandler(c *gin.Context) { // Check if configuration already exists existingConfig, err := h.storage.GetAgentConfiguration(ctx, agentID, packageID) - + if err != nil { // Configuration doesn't exist, create new one newConfig := &types.AgentConfiguration{ diff --git a/control-plane/internal/handlers/ui/config_test.go b/control-plane/internal/handlers/ui/config_test.go index 3ed9425f..35632f00 100644 --- a/control-plane/internal/handlers/ui/config_test.go +++ b/control-plane/internal/handlers/ui/config_test.go @@ -13,9 +13,9 @@ import ( "testing" "time" - "github.com/your-org/haxen/control-plane/internal/events" - "github.com/your-org/haxen/control-plane/internal/storage" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/internal/events" + "github.com/Agent-Field/agentfield/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/gin-gonic/gin" "github.com/stretchr/testify/assert" @@ -454,31 +454,31 @@ func (m *MockStorageProvider) ListDIDs(ctx context.Context) ([]*types.DIDRegistr return args.Get(0).([]*types.DIDRegistryEntry), args.Error(1) } -// Haxen Server DID operations -func (m *MockStorageProvider) StoreHaxenServerDID(ctx context.Context, haxenServerID, rootDID string, masterSeed []byte, createdAt, lastKeyRotation time.Time) error { - args := m.Called(ctx, haxenServerID, rootDID, masterSeed, createdAt, lastKeyRotation) +// AgentField Server DID operations +func (m *MockStorageProvider) StoreAgentFieldServerDID(ctx context.Context, agentfieldServerID, rootDID string, masterSeed []byte, createdAt, lastKeyRotation time.Time) error { + args := m.Called(ctx, agentfieldServerID, rootDID, masterSeed, createdAt, lastKeyRotation) return args.Error(0) } -func (m *MockStorageProvider) GetHaxenServerDID(ctx context.Context, haxenServerID string) (*types.HaxenServerDIDInfo, error) { - args := m.Called(ctx, haxenServerID) +func (m *MockStorageProvider) GetAgentFieldServerDID(ctx context.Context, agentfieldServerID string) (*types.AgentFieldServerDIDInfo, error) { + args := m.Called(ctx, agentfieldServerID) if args.Get(0) == nil { return nil, args.Error(1) } - return args.Get(0).(*types.HaxenServerDIDInfo), args.Error(1) + return args.Get(0).(*types.AgentFieldServerDIDInfo), args.Error(1) } -func (m *MockStorageProvider) ListHaxenServerDIDs(ctx context.Context) ([]*types.HaxenServerDIDInfo, error) { +func (m *MockStorageProvider) ListAgentFieldServerDIDs(ctx context.Context) ([]*types.AgentFieldServerDIDInfo, error) { args := m.Called(ctx) if args.Get(0) == nil { return nil, args.Error(1) } - return args.Get(0).([]*types.HaxenServerDIDInfo), args.Error(1) + return args.Get(0).([]*types.AgentFieldServerDIDInfo), args.Error(1) } // Agent DID operations -func (m *MockStorageProvider) StoreAgentDID(ctx context.Context, agentID, agentDID, haxenServerDID, publicKeyJWK string, derivationIndex int) error { - args := m.Called(ctx, agentID, agentDID, haxenServerDID, publicKeyJWK, derivationIndex) +func (m *MockStorageProvider) StoreAgentDID(ctx context.Context, agentID, agentDID, agentfieldServerDID, publicKeyJWK string, derivationIndex int) error { + args := m.Called(ctx, agentID, agentDID, agentfieldServerDID, publicKeyJWK, derivationIndex) return args.Error(0) } diff --git a/control-plane/internal/handlers/ui/dashboard.go b/control-plane/internal/handlers/ui/dashboard.go index 8d295655..7ac4116e 100644 --- a/control-plane/internal/handlers/ui/dashboard.go +++ b/control-plane/internal/handlers/ui/dashboard.go @@ -7,10 +7,10 @@ import ( "sync" "time" - "github.com/your-org/haxen/control-plane/internal/core/interfaces" - "github.com/your-org/haxen/control-plane/internal/logger" - "github.com/your-org/haxen/control-plane/internal/storage" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/internal/core/interfaces" + "github.com/Agent-Field/agentfield/control-plane/internal/logger" + "github.com/Agent-Field/agentfield/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/gin-gonic/gin" ) diff --git a/control-plane/internal/handlers/ui/did.go b/control-plane/internal/handlers/ui/did.go index 898d637e..b47d1f37 100644 --- a/control-plane/internal/handlers/ui/did.go +++ b/control-plane/internal/handlers/ui/did.go @@ -4,11 +4,13 @@ import ( "encoding/json" "fmt" "net/http" + "strconv" + "strings" "time" - "github.com/your-org/haxen/control-plane/internal/services" - "github.com/your-org/haxen/control-plane/internal/storage" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/internal/services" + "github.com/Agent-Field/agentfield/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/gin-gonic/gin" ) @@ -50,21 +52,21 @@ func (h *DIDHandler) GetNodeDIDHandler(c *gin.Context) { return } - // Get haxen server ID dynamically - haxenServerID, err := h.didService.GetHaxenServerID() + // Get af server ID dynamically + agentfieldServerID, err := h.didService.GetAgentFieldServerID() if err != nil { c.JSON(http.StatusOK, gin.H{ "has_did": false, "did_status": "inactive", "reasoner_count": 0, "skill_count": 0, - "error": fmt.Sprintf("Failed to get haxen server ID: %v", err), + "error": fmt.Sprintf("Failed to get af server ID: %v", err), }) return } - // Get DID registry for the haxen server (not the node) - registry, err := h.didService.GetRegistry(haxenServerID) + // Get DID registry for the af server (not the node) + registry, err := h.didService.GetRegistry(agentfieldServerID) if err != nil || registry == nil { c.JSON(http.StatusOK, gin.H{ "has_did": false, @@ -96,15 +98,15 @@ func (h *DIDHandler) GetNodeDIDHandler(c *gin.Context) { } c.JSON(http.StatusOK, gin.H{ - "did": agentInfo.DID, - "agent_node_id": nodeID, - "haxen_server_id": registry.HaxenServerID, - "public_key_jwk": agentInfo.PublicKeyJWK, - "derivation_path": agentInfo.DerivationPath, - "reasoners": agentInfo.Reasoners, - "skills": agentInfo.Skills, - "status": status, - "registered_at": agentInfo.RegisteredAt.Format(time.RFC3339), + "did": agentInfo.DID, + "agent_node_id": nodeID, + "agentfield_server_id": registry.AgentFieldServerID, + "public_key_jwk": agentInfo.PublicKeyJWK, + "derivation_path": agentInfo.DerivationPath, + "reasoners": agentInfo.Reasoners, + "skills": agentInfo.Skills, + "status": status, + "registered_at": agentInfo.RegisteredAt.Format(time.RFC3339), }) } @@ -252,9 +254,9 @@ func (h *DIDHandler) GetExecutionVCStatusHandler(c *gin.Context) { } } else if executionVC.StorageURI != "" { vcDocumentForResponse = map[string]interface{}{ - "storage_uri": executionVC.StorageURI, + "storage_uri": executionVC.StorageURI, "document_size_bytes": executionVC.DocumentSize, - "note": "VC document stored via external URI", + "note": "VC document stored via external URI", } documentStatus = "external" } else { @@ -303,7 +305,7 @@ func (h *DIDHandler) GetExecutionVCHandler(c *gin.Context) { return } - if executionVC.VCDocument == nil || len(executionVC.VCDocument) == 0 { + if len(executionVC.VCDocument) == 0 { if executionVC.StorageURI == "" { fmt.Printf("DEBUG: VC document is empty for execution_id: %s\n", executionID) c.JSON(http.StatusNotFound, gin.H{"error": "VC document not found or empty"}) @@ -500,18 +502,17 @@ func (h *DIDHandler) ExportVCsHandler(c *gin.Context) { // Parse query parameters for filtering filters := &types.VCFilters{} + filters.Limit = 100 // default if limit := c.Query("limit"); limit != "" { - if limitInt, err := fmt.Sscanf(limit, "%d", &filters.Limit); err == nil && limitInt == 1 { - // limit was successfully parsed - } else { - filters.Limit = 100 // default + if parsedLimit, err := strconv.Atoi(limit); err == nil { + filters.Limit = parsedLimit } - } else { - filters.Limit = 100 // default } if offset := c.Query("offset"); offset != "" { - fmt.Sscanf(offset, "%d", &filters.Offset) + if parsedOffset, err := strconv.Atoi(offset); err == nil { + filters.Offset = parsedOffset + } } if status := c.Query("status"); status != "" { @@ -591,7 +592,7 @@ func (h *DIDHandler) GetDIDResolutionBundleHandler(c *gin.Context) { "component_dids": []interface{}{}, "resolution_metadata": gin.H{ "resolved_at": time.Now().Format(time.RFC3339), - "resolver": "haxen-server", + "resolver": "agentfield-server", "status": "inactive", }, }) @@ -633,7 +634,7 @@ func (h *DIDHandler) GetDIDResolutionBundleHandler(c *gin.Context) { { "id": did + "#agent-service", "type": "AgentService", - "serviceEndpoint": fmt.Sprintf("https://haxen-server/agents/%s", agentDID.AgentNodeID), + "serviceEndpoint": fmt.Sprintf("https://agentfield-server/agents/%s", agentDID.AgentNodeID), }, }, } @@ -651,7 +652,7 @@ func (h *DIDHandler) GetDIDResolutionBundleHandler(c *gin.Context) { serviceEndpoints = append(serviceEndpoints, gin.H{ "id": did + "#agent-service", "type": "AgentService", - "serviceEndpoint": fmt.Sprintf("https://haxen-server/agents/%s", agentDID.AgentNodeID), + "serviceEndpoint": fmt.Sprintf("https://agentfield-server/agents/%s", agentDID.AgentNodeID), }) // Add component DIDs (reasoners and skills) @@ -708,7 +709,7 @@ func (h *DIDHandler) GetDIDResolutionBundleHandler(c *gin.Context) { { "id": did + "#component-service", "type": fmt.Sprintf("%sService", componentDID.ComponentType), - "serviceEndpoint": fmt.Sprintf("https://haxen-server/components/%s", componentDID.ComponentID), + "serviceEndpoint": fmt.Sprintf("https://agentfield-server/components/%s", componentDID.ComponentID), }, }, } @@ -717,7 +718,7 @@ func (h *DIDHandler) GetDIDResolutionBundleHandler(c *gin.Context) { serviceEndpoints = append(serviceEndpoints, gin.H{ "id": did + "#component-service", "type": fmt.Sprintf("%sService", componentDID.ComponentType), - "serviceEndpoint": fmt.Sprintf("https://haxen-server/components/%s", componentDID.ComponentID), + "serviceEndpoint": fmt.Sprintf("https://agentfield-server/components/%s", componentDID.ComponentID), }) break @@ -753,9 +754,9 @@ func (h *DIDHandler) GetDIDResolutionBundleHandler(c *gin.Context) { // Build resolution metadata resolutionMetadata := gin.H{ "resolved_at": time.Now().Format(time.RFC3339), - "resolver": "haxen-server", + "resolver": "agentfield-server", "status": resolutionStatus, - "method": "haxen", + "method": "agentfield", } if resolutionStatus == "resolved" { @@ -801,8 +802,8 @@ func (h *DIDHandler) DownloadDIDResolutionBundleHandler(c *gin.Context) { "did": did, "resolution_metadata": gin.H{ "resolved_at": time.Now().Format(time.RFC3339), - "resolver": "haxen-server", - "method": "haxen", + "resolver": "agentfield-server", + "method": "agentfield", }, "bundle_type": "did_resolution", "generated_at": time.Now().Format(time.RFC3339), @@ -860,38 +861,23 @@ func getDIDRole(did string, vc types.ExecutionVC) string { // sanitizeDIDForFilename sanitizes a DID string to be safe for use in filenames func sanitizeDIDForFilename(did string) string { - // Replace characters that are not safe for filenames - result := did - result = fmt.Sprintf("%s", result) // Ensure it's a string - - // Replace common DID characters that might cause issues in filenames - replacements := map[string]string{ - ":": "_", - "/": "_", - "\\": "_", - "?": "_", - "*": "_", - "<": "_", - ">": "_", - "|": "_", - "\"": "_", - " ": "_", - } - - for old, new := range replacements { - result = fmt.Sprintf("%s", result) - // Simple string replacement - for i := 0; i < len(result); i++ { - if i < len(result) && string(result[i]) == old { - result = result[:i] + new + result[i+1:] - } - } - } - - // Limit length to avoid filesystem issues - if len(result) > 100 { - result = result[:100] - } - - return result + replacer := strings.NewReplacer( + ":", "_", + "/", "_", + "\\", "_", + "?", "_", + "*", "_", + "<", "_", + ">", "_", + "|", "_", + "\"", "_", + " ", "_", + ) + + sanitized := replacer.Replace(did) + if len(sanitized) > 100 { + sanitized = sanitized[:100] + } + + return sanitized } diff --git a/control-plane/internal/handlers/ui/env.go b/control-plane/internal/handlers/ui/env.go index 558297a6..7890708e 100644 --- a/control-plane/internal/handlers/ui/env.go +++ b/control-plane/internal/handlers/ui/env.go @@ -8,17 +8,17 @@ import ( "strings" "time" - "github.com/your-org/haxen/control-plane/internal/storage" - "github.com/your-org/haxen/control-plane/internal/core/interfaces" + "github.com/Agent-Field/agentfield/control-plane/internal/core/interfaces" + "github.com/Agent-Field/agentfield/control-plane/internal/storage" "github.com/gin-gonic/gin" ) // EnvHandler provides handlers for .env file management operations. type EnvHandler struct { - storage storage.StorageProvider - agentService interfaces.AgentService - haxenHome string + storage storage.StorageProvider + agentService interfaces.AgentService + agentfieldHome string } // DELETE /api/ui/v1/agents/:agentId/env/:key @@ -120,10 +120,10 @@ func (h *EnvHandler) DeleteEnvVarHandler(c *gin.Context) { } c.JSON(http.StatusOK, gin.H{ - "message": "variable deleted from .env file", - "agent_id": agentID, + "message": "variable deleted from .env file", + "agent_id": agentID, "package_id": packageID, - "key": key, + "key": key, }) } @@ -224,8 +224,8 @@ func (h *EnvHandler) PatchEnvHandler(c *gin.Context) { } c.JSON(http.StatusOK, gin.H{ - "message": ".env file patched successfully", - "agent_id": agentID, + "message": ".env file patched successfully", + "agent_id": agentID, "package_id": packageID, }) } @@ -241,11 +241,11 @@ type EnvResponse struct { } // NewEnvHandler creates a new EnvHandler. -func NewEnvHandler(storage storage.StorageProvider, agentService interfaces.AgentService, haxenHome string) *EnvHandler { +func NewEnvHandler(storage storage.StorageProvider, agentService interfaces.AgentService, agentfieldHome string) *EnvHandler { return &EnvHandler{ - storage: storage, - agentService: agentService, - haxenHome: haxenHome, + storage: storage, + agentService: agentService, + agentfieldHome: agentfieldHome, } } @@ -419,8 +419,8 @@ func (h *EnvHandler) PutEnvHandler(c *gin.Context) { } c.JSON(http.StatusOK, gin.H{ - "message": ".env file updated successfully", - "agent_id": agentID, + "message": ".env file updated successfully", + "agent_id": agentID, "package_id": packageID, }) } diff --git a/control-plane/internal/handlers/ui/execution_timeline.go b/control-plane/internal/handlers/ui/execution_timeline.go index d9e598c8..cd99fe2e 100644 --- a/control-plane/internal/handlers/ui/execution_timeline.go +++ b/control-plane/internal/handlers/ui/execution_timeline.go @@ -7,9 +7,9 @@ import ( "sync" "time" - "github.com/your-org/haxen/control-plane/internal/logger" - "github.com/your-org/haxen/control-plane/internal/storage" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/internal/logger" + "github.com/Agent-Field/agentfield/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/gin-gonic/gin" ) diff --git a/control-plane/internal/handlers/ui/executions.go b/control-plane/internal/handlers/ui/executions.go index f452ef87..e809f683 100644 --- a/control-plane/internal/handlers/ui/executions.go +++ b/control-plane/internal/handlers/ui/executions.go @@ -11,10 +11,10 @@ import ( "strings" "time" - "github.com/your-org/haxen/control-plane/internal/logger" - "github.com/your-org/haxen/control-plane/internal/services" - "github.com/your-org/haxen/control-plane/internal/storage" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/internal/logger" + "github.com/Agent-Field/agentfield/control-plane/internal/services" + "github.com/Agent-Field/agentfield/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/gin-gonic/gin" ) @@ -32,6 +32,15 @@ type ExecutionHandler struct { webhooks services.WebhookDispatcher } +func writeSSE(c *gin.Context, payload []byte) bool { + if _, err := c.Writer.WriteString("data: " + string(payload) + "\n\n"); err != nil { + logger.Logger.Warn().Err(err).Msg("failed to write SSE payload") + return false + } + c.Writer.Flush() + return true +} + // NewExecutionHandler creates a new ExecutionHandler. func NewExecutionHandler(store storage.StorageProvider, payloadStore services.PayloadStore, webhooks services.WebhookDispatcher) *ExecutionHandler { return &ExecutionHandler{ @@ -69,8 +78,9 @@ func (h *ExecutionHandler) StreamWorkflowNodeNotesHandler(c *gin.Context) { "timestamp": time.Now().Format(time.RFC3339), } if payload, err := json.Marshal(initialEvent); err == nil { - c.Writer.WriteString("data: " + string(payload) + "\n\n") - c.Writer.Flush() + if !writeSSE(c, payload) { + return + } } ctx := c.Request.Context() @@ -87,16 +97,18 @@ func (h *ExecutionHandler) StreamWorkflowNodeNotesHandler(c *gin.Context) { "timestamp": time.Now().Format(time.RFC3339), } if payload, err := json.Marshal(heartbeat); err == nil { - c.Writer.WriteString("data: " + string(payload) + "\n\n") - c.Writer.Flush() + if !writeSSE(c, payload) { + return + } } case event, ok := <-eventChan: if !ok { return } if payload, err := json.Marshal(event); err == nil { - c.Writer.WriteString("data: " + string(payload) + "\n\n") - c.Writer.Flush() + if !writeSSE(c, payload) { + return + } } } } @@ -142,36 +154,36 @@ type ExecutionStatsResponse struct { // ExecutionDetailsResponse represents detailed execution information. type ExecutionDetailsResponse struct { - ID int64 `json:"id"` - ExecutionID string `json:"execution_id"` - WorkflowID string `json:"workflow_id"` - HaxenRequestID *string `json:"haxen_request_id,omitempty"` - SessionID *string `json:"session_id,omitempty"` - ActorID *string `json:"actor_id,omitempty"` - AgentNodeID string `json:"agent_node_id"` - ParentWorkflowID *string `json:"parent_workflow_id,omitempty"` - RootWorkflowID *string `json:"root_workflow_id,omitempty"` - WorkflowDepth *int `json:"workflow_depth,omitempty"` - ReasonerID string `json:"reasoner_id"` - InputData interface{} `json:"input_data"` - OutputData interface{} `json:"output_data"` - InputSize int `json:"input_size"` - OutputSize int `json:"output_size"` - WorkflowName *string `json:"workflow_name,omitempty"` - WorkflowTags []string `json:"workflow_tags"` - Status string `json:"status"` - StartedAt *string `json:"started_at,omitempty"` - CompletedAt *string `json:"completed_at,omitempty"` - DurationMS *int `json:"duration_ms,omitempty"` - ErrorMessage *string `json:"error_message,omitempty"` - RetryCount int `json:"retry_count"` - CreatedAt string `json:"created_at"` - UpdatedAt *string `json:"updated_at,omitempty"` - Notes []types.ExecutionNote `json:"notes"` - NotesCount int `json:"notes_count"` - LatestNote *types.ExecutionNote `json:"latest_note,omitempty"` - WebhookRegistered bool `json:"webhook_registered"` - WebhookEvents []*types.ExecutionWebhookEvent `json:"webhook_events,omitempty"` + ID int64 `json:"id"` + ExecutionID string `json:"execution_id"` + WorkflowID string `json:"workflow_id"` + AgentFieldRequestID *string `json:"agentfield_request_id,omitempty"` + SessionID *string `json:"session_id,omitempty"` + ActorID *string `json:"actor_id,omitempty"` + AgentNodeID string `json:"agent_node_id"` + ParentWorkflowID *string `json:"parent_workflow_id,omitempty"` + RootWorkflowID *string `json:"root_workflow_id,omitempty"` + WorkflowDepth *int `json:"workflow_depth,omitempty"` + ReasonerID string `json:"reasoner_id"` + InputData interface{} `json:"input_data"` + OutputData interface{} `json:"output_data"` + InputSize int `json:"input_size"` + OutputSize int `json:"output_size"` + WorkflowName *string `json:"workflow_name,omitempty"` + WorkflowTags []string `json:"workflow_tags"` + Status string `json:"status"` + StartedAt *string `json:"started_at,omitempty"` + CompletedAt *string `json:"completed_at,omitempty"` + DurationMS *int `json:"duration_ms,omitempty"` + ErrorMessage *string `json:"error_message,omitempty"` + RetryCount int `json:"retry_count"` + CreatedAt string `json:"created_at"` + UpdatedAt *string `json:"updated_at,omitempty"` + Notes []types.ExecutionNote `json:"notes"` + NotesCount int `json:"notes_count"` + LatestNote *types.ExecutionNote `json:"latest_note,omitempty"` + WebhookRegistered bool `json:"webhook_registered"` + WebhookEvents []*types.ExecutionWebhookEvent `json:"webhook_events,omitempty"` } type EnhancedExecution struct { @@ -619,16 +631,18 @@ func (h *ExecutionHandler) StreamExecutionEventsHandler(c *gin.Context) { "timestamp": time.Now().Format(time.RFC3339), } if payload, err := json.Marshal(heartbeat); err == nil { - c.Writer.WriteString("data: " + string(payload) + "\n\n") - c.Writer.Flush() + if !writeSSE(c, payload) { + return + } } case event, ok := <-eventChan: if !ok { return } if payload, err := json.Marshal(event); err == nil { - c.Writer.WriteString("data: " + string(payload) + "\n\n") - c.Writer.Flush() + if !writeSSE(c, payload) { + return + } } } } @@ -688,36 +702,36 @@ func (h *ExecutionHandler) toExecutionDetails(ctx context.Context, exec *types.E webhookEvents := exec.WebhookEvents return ExecutionDetailsResponse{ - ID: 0, - ExecutionID: exec.ExecutionID, - WorkflowID: exec.RunID, - HaxenRequestID: nil, - SessionID: exec.SessionID, - ActorID: exec.ActorID, - AgentNodeID: exec.AgentNodeID, - ParentWorkflowID: exec.ParentExecutionID, - RootWorkflowID: nil, - WorkflowDepth: nil, - ReasonerID: exec.ReasonerID, - InputData: inputData, - OutputData: outputData, - InputSize: inputSize, - OutputSize: outputSize, - WorkflowName: nil, - WorkflowTags: nil, - Status: types.NormalizeExecutionStatus(exec.Status), - StartedAt: startedAt, - CompletedAt: completedAt, - DurationMS: durationPtr, - ErrorMessage: exec.ErrorMessage, - RetryCount: 0, - CreatedAt: exec.StartedAt.Format(time.RFC3339), - UpdatedAt: &updated, - Notes: nil, - NotesCount: 0, - LatestNote: nil, - WebhookRegistered: webhookRegistered, - WebhookEvents: webhookEvents, + ID: 0, + ExecutionID: exec.ExecutionID, + WorkflowID: exec.RunID, + AgentFieldRequestID: nil, + SessionID: exec.SessionID, + ActorID: exec.ActorID, + AgentNodeID: exec.AgentNodeID, + ParentWorkflowID: exec.ParentExecutionID, + RootWorkflowID: nil, + WorkflowDepth: nil, + ReasonerID: exec.ReasonerID, + InputData: inputData, + OutputData: outputData, + InputSize: inputSize, + OutputSize: outputSize, + WorkflowName: nil, + WorkflowTags: nil, + Status: types.NormalizeExecutionStatus(exec.Status), + StartedAt: startedAt, + CompletedAt: completedAt, + DurationMS: durationPtr, + ErrorMessage: exec.ErrorMessage, + RetryCount: 0, + CreatedAt: exec.StartedAt.Format(time.RFC3339), + UpdatedAt: &updated, + Notes: nil, + NotesCount: 0, + LatestNote: nil, + WebhookRegistered: webhookRegistered, + WebhookEvents: webhookEvents, } } @@ -904,10 +918,6 @@ func (h *ExecutionHandler) groupExecutionSummaries(summaries []ExecutionSummary, return grouped } -func pointerString(value string) *string { - return &value -} - func formatRelativeTimeString(now, started time.Time) string { if started.IsZero() { return "" diff --git a/control-plane/internal/handlers/ui/executions_helpers_test.go b/control-plane/internal/handlers/ui/executions_helpers_test.go index ddb058f4..38b36c0a 100644 --- a/control-plane/internal/handlers/ui/executions_helpers_test.go +++ b/control-plane/internal/handlers/ui/executions_helpers_test.go @@ -8,8 +8,8 @@ import ( "io" "testing" + "github.com/Agent-Field/agentfield/control-plane/internal/services" "github.com/stretchr/testify/require" - "github.com/your-org/haxen/control-plane/internal/services" ) type testPayloadStore struct { diff --git a/control-plane/internal/handlers/ui/executions_test.go b/control-plane/internal/handlers/ui/executions_test.go index a49c9a2b..89aa8b63 100644 --- a/control-plane/internal/handlers/ui/executions_test.go +++ b/control-plane/internal/handlers/ui/executions_test.go @@ -14,8 +14,8 @@ import ( "testing" "time" - "github.com/your-org/haxen/control-plane/internal/services" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/internal/services" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/gin-gonic/gin" "github.com/stretchr/testify/assert" diff --git a/control-plane/internal/handlers/ui/lifecycle.go b/control-plane/internal/handlers/ui/lifecycle.go index 93076fe7..0a3e9f19 100644 --- a/control-plane/internal/handlers/ui/lifecycle.go +++ b/control-plane/internal/handlers/ui/lifecycle.go @@ -6,10 +6,10 @@ import ( "strconv" "strings" - "github.com/your-org/haxen/control-plane/internal/core/domain" - "github.com/your-org/haxen/control-plane/internal/core/interfaces" - "github.com/your-org/haxen/control-plane/internal/storage" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/internal/core/domain" + "github.com/Agent-Field/agentfield/control-plane/internal/core/interfaces" + "github.com/Agent-Field/agentfield/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/gin-gonic/gin" ) @@ -34,7 +34,7 @@ func (h *LifecycleHandler) getAgentBaseURL(ctx context.Context, agentID string, if registeredAgent, err := h.storage.GetAgent(ctx, agentID); err == nil && registeredAgent != nil && registeredAgent.BaseURL != "" { return registeredAgent.BaseURL } - + // Fallback to localhost construction for locally running agents return "http://localhost:" + strconv.Itoa(port) } @@ -51,7 +51,7 @@ func (h *LifecycleHandler) buildEndpoints(ctx context.Context, agentID string, p // StartAgentRequest represents the request body for starting an agent type StartAgentRequest struct { - Port *int `json:"port,omitempty"` + Port *int `json:"port,omitempty"` Detach *bool `json:"detach,omitempty"` } @@ -180,11 +180,11 @@ func (h *LifecycleHandler) GetAgentStatusHandler(c *gin.Context) { if err != nil { // Agent not installed, return basic status response := map[string]interface{}{ - "agent_id": agentID, - "name": agentPackage.Name, - "is_running": false, - "status": "not_installed", - "message": "agent package found but not installed", + "agent_id": agentID, + "name": agentPackage.Name, + "is_running": false, + "status": "not_installed", + "message": "agent package found but not installed", } c.JSON(http.StatusOK, response) return @@ -318,14 +318,14 @@ func (h *LifecycleHandler) ReconcileAgentHandler(c *gin.Context) { // Return reconciled status response := map[string]interface{}{ - "agent_id": agentID, - "status": "reconciled", - "is_running": status.IsRunning, - "pid": status.PID, - "port": status.Port, - "last_seen": status.LastSeen, - "uptime": status.Uptime, - "message": "agent state reconciled with actual process state", + "agent_id": agentID, + "status": "reconciled", + "is_running": status.IsRunning, + "pid": status.PID, + "port": status.Port, + "last_seen": status.LastSeen, + "uptime": status.Uptime, + "message": "agent state reconciled with actual process state", } c.JSON(http.StatusOK, response) diff --git a/control-plane/internal/handlers/ui/mcp.go b/control-plane/internal/handlers/ui/mcp.go index fe160bac..b7eec9d1 100644 --- a/control-plane/internal/handlers/ui/mcp.go +++ b/control-plane/internal/handlers/ui/mcp.go @@ -4,9 +4,9 @@ import ( "net/http" "time" - "github.com/your-org/haxen/control-plane/internal/core/domain" - "github.com/your-org/haxen/control-plane/internal/core/interfaces" - "github.com/your-org/haxen/control-plane/internal/services" + "github.com/Agent-Field/agentfield/control-plane/internal/core/domain" + "github.com/Agent-Field/agentfield/control-plane/internal/core/interfaces" + "github.com/Agent-Field/agentfield/control-plane/internal/services" "github.com/gin-gonic/gin" ) @@ -205,7 +205,7 @@ func (h *MCPHandler) GetMCPStatusHandler(c *gin.Context) { totalMCPServers += summary.MCPSummary.TotalServers runningMCPServers += summary.MCPSummary.RunningServers totalTools += summary.MCPSummary.TotalTools - + // Calculate weighted average health if summary.MCPSummary.TotalServers > 0 { overallHealth = (overallHealth + summary.MCPSummary.OverallHealth) / 2 @@ -270,7 +270,7 @@ func (h *MCPHandler) GetMCPEventsHandler(c *gin.Context) { "timestamp": time.Now().Format(time.RFC3339), "message": "Connected to MCP events stream", } - + // Write SSE formatted data c.SSEvent("mcp-event", initialEvent) w.Flush() @@ -316,6 +316,6 @@ func (h *MCPHandler) GetMCPMetricsHandler(c *gin.Context) { "node_id": nodeID, "timestamp": time.Now(), } - + c.JSON(http.StatusOK, response) } diff --git a/control-plane/internal/handlers/ui/nodes.go b/control-plane/internal/handlers/ui/nodes.go index 725820d5..933d1f67 100644 --- a/control-plane/internal/handlers/ui/nodes.go +++ b/control-plane/internal/handlers/ui/nodes.go @@ -6,9 +6,9 @@ import ( "net/http" "time" - "github.com/your-org/haxen/control-plane/internal/events" - "github.com/your-org/haxen/control-plane/internal/logger" - "github.com/your-org/haxen/control-plane/internal/services" + "github.com/Agent-Field/agentfield/control-plane/internal/events" + "github.com/Agent-Field/agentfield/control-plane/internal/logger" + "github.com/Agent-Field/agentfield/control-plane/internal/services" "github.com/gin-gonic/gin" ) @@ -67,7 +67,7 @@ func (h *NodesHandler) StreamNodeEventsHandler(c *gin.Context) { // Generate unique subscriber ID subscriberID := fmt.Sprintf("node_sse_%d_%s", time.Now().UnixNano(), c.ClientIP()) - + // Subscribe to node events using the dedicated event bus eventChan := events.GlobalNodeEventBus.Subscribe(subscriberID) defer events.GlobalNodeEventBus.Unsubscribe(subscriberID) @@ -78,15 +78,16 @@ func (h *NodesHandler) StreamNodeEventsHandler(c *gin.Context) { "message": "Node events stream connected", "timestamp": time.Now().Format(time.RFC3339), } - + if eventJSON, err := json.Marshal(initialEvent); err == nil { - c.Writer.WriteString("data: " + string(eventJSON) + "\n\n") - c.Writer.Flush() + if !writeSSE(c, eventJSON) { + return + } } // Set up context for handling client disconnection ctx := c.Request.Context() - + // Send periodic heartbeat to keep connection alive heartbeatTicker := time.NewTicker(30 * time.Second) defer heartbeatTicker.Stop() @@ -103,13 +104,14 @@ func (h *NodesHandler) StreamNodeEventsHandler(c *gin.Context) { logger.Logger.Error().Err(err).Msg("❌ Error marshalling node event") continue } - + // Send event to client using SSE format - c.Writer.WriteString("data: " + string(eventData) + "\n\n") - c.Writer.Flush() - + if !writeSSE(c, eventData) { + return + } + logger.Logger.Debug().Msgf("πŸ“‘ Sent node event to client %s: %s", subscriberID, event.Type) - + case <-heartbeatTicker.C: // Send heartbeat to keep connection alive heartbeatEvent := map[string]interface{}{ @@ -117,10 +119,11 @@ func (h *NodesHandler) StreamNodeEventsHandler(c *gin.Context) { "timestamp": time.Now().Format(time.RFC3339), } if heartbeatJSON, err := json.Marshal(heartbeatEvent); err == nil { - c.Writer.WriteString("data: " + string(heartbeatJSON) + "\n\n") - c.Writer.Flush() + if !writeSSE(c, heartbeatJSON) { + return + } } - + case <-ctx.Done(): // Client disconnected logger.Logger.Debug().Msgf("πŸ”Œ Node SSE client disconnected: %s", subscriberID) diff --git a/control-plane/internal/handlers/ui/packages.go b/control-plane/internal/handlers/ui/packages.go index ef6a3d53..e77931b9 100644 --- a/control-plane/internal/handlers/ui/packages.go +++ b/control-plane/internal/handlers/ui/packages.go @@ -6,8 +6,8 @@ import ( "net/http" "strings" - "github.com/your-org/haxen/control-plane/internal/storage" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/gin-gonic/gin" ) @@ -30,30 +30,30 @@ type PackageListResponse struct { // PackageInfo represents package information in the list type PackageInfo struct { - ID string `json:"id"` - Name string `json:"name"` - Version string `json:"version"` - Status string `json:"status"` - InstallPath string `json:"install_path"` - ConfigurationRequired bool `json:"configuration_required"` - ConfigurationComplete bool `json:"configuration_complete"` - RunningNodeID string `json:"running_node_id,omitempty"` - LastStarted string `json:"last_started,omitempty"` - ProcessID int `json:"process_id,omitempty"` - Port int `json:"port,omitempty"` - Description string `json:"description"` - Author string `json:"author"` + ID string `json:"id"` + Name string `json:"name"` + Version string `json:"version"` + Status string `json:"status"` + InstallPath string `json:"install_path"` + ConfigurationRequired bool `json:"configuration_required"` + ConfigurationComplete bool `json:"configuration_complete"` + RunningNodeID string `json:"running_node_id,omitempty"` + LastStarted string `json:"last_started,omitempty"` + ProcessID int `json:"process_id,omitempty"` + Port int `json:"port,omitempty"` + Description string `json:"description"` + Author string `json:"author"` } // PackageDetailsResponse represents detailed package information type PackageDetailsResponse struct { - ID string `json:"id"` - Name string `json:"name"` - Version string `json:"version"` - Description string `json:"description"` - Author string `json:"author"` - InstallPath string `json:"install_path"` - Status string `json:"status"` + ID string `json:"id"` + Name string `json:"name"` + Version string `json:"version"` + Description string `json:"description"` + Author string `json:"author"` + InstallPath string `json:"install_path"` + Status string `json:"status"` Configuration PackageConfiguration `json:"configuration"` Capabilities *PackageCapabilities `json:"capabilities,omitempty"` Runtime *PackageRuntime `json:"runtime,omitempty"` @@ -69,17 +69,17 @@ type PackageConfiguration struct { // PackageCapabilities represents package capabilities type PackageCapabilities struct { - Reasoners []ReasonerDefinition `json:"reasoners"` - Skills []SkillDefinition `json:"skills"` - MCPServers []MCPServerDefinition `json:"mcp_servers"` + Reasoners []ReasonerDefinition `json:"reasoners"` + Skills []SkillDefinition `json:"skills"` + MCPServers []MCPServerDefinition `json:"mcp_servers"` } // ReasonerDefinition represents a reasoner definition type ReasonerDefinition struct { - ID string `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - InputSchema map[string]interface{} `json:"input_schema"` + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + InputSchema map[string]interface{} `json:"input_schema"` OutputSchema map[string]interface{} `json:"output_schema,omitempty"` } @@ -141,7 +141,7 @@ func (h *PackageHandler) ListPackagesHandler(c *gin.Context) { // Check configuration status configRequired := len(pkg.ConfigurationSchema) > 0 configComplete := false - + if configRequired { // Check if configuration exists and is complete config, err := h.storage.GetAgentConfiguration(ctx, pkg.ID, pkg.ID) @@ -256,14 +256,14 @@ func (h *PackageHandler) GetPackageDetailsHandler(c *gin.Context) { func (h *PackageHandler) determinePackageStatus(ctx context.Context, pkg *types.AgentPackage) string { // Check if configuration is required configRequired := len(pkg.ConfigurationSchema) > 0 - + if configRequired { // Check configuration status config, err := h.storage.GetAgentConfiguration(ctx, pkg.ID, pkg.ID) if err != nil { return "not_configured" } - + switch config.Status { case types.ConfigurationStatusDraft: return "configured" @@ -275,7 +275,7 @@ func (h *PackageHandler) determinePackageStatus(ctx context.Context, pkg *types. return "not_configured" } } - + // No configuration required // TODO: Check if agent is running // For now, return "configured" until lifecycle management is implemented @@ -285,11 +285,11 @@ func (h *PackageHandler) determinePackageStatus(ctx context.Context, pkg *types. // matchesSearch checks if a package matches the search query func (h *PackageHandler) matchesSearch(pkg *types.AgentPackage, search string) bool { search = strings.ToLower(search) - + return strings.Contains(strings.ToLower(pkg.Name), search) || - strings.Contains(strings.ToLower(h.safeStringValue(pkg.Description)), search) || - strings.Contains(strings.ToLower(h.safeStringValue(pkg.Author)), search) || - strings.Contains(strings.ToLower(pkg.ID), search) + strings.Contains(strings.ToLower(h.safeStringValue(pkg.Description)), search) || + strings.Contains(strings.ToLower(h.safeStringValue(pkg.Author)), search) || + strings.Contains(strings.ToLower(pkg.ID), search) } // safeStringValue safely converts a *string to string, returning empty string if nil diff --git a/control-plane/internal/handlers/ui/packages_test.go b/control-plane/internal/handlers/ui/packages_test.go index 954c15dd..2b290333 100644 --- a/control-plane/internal/handlers/ui/packages_test.go +++ b/control-plane/internal/handlers/ui/packages_test.go @@ -9,7 +9,7 @@ import ( "net/http/httptest" "testing" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/gin-gonic/gin" "github.com/stretchr/testify/assert" @@ -48,21 +48,21 @@ func TestListPackagesHandler(t *testing.T) { author := "Test Author" packages := []*types.AgentPackage{ { - ID: "test-package-1", - Name: "Test Package 1", - Version: "1.0.0", - Description: &description, - Author: &author, - InstallPath: "/path/to/package1", + ID: "test-package-1", + Name: "Test Package 1", + Version: "1.0.0", + Description: &description, + Author: &author, + InstallPath: "/path/to/package1", ConfigurationSchema: json.RawMessage(`{"required": {"api_key": {"type": "secret"}}}`), }, { - ID: "test-package-2", - Name: "Test Package 2", - Version: "2.0.0", - Description: nil, - Author: nil, - InstallPath: "/path/to/package2", + ID: "test-package-2", + Name: "Test Package 2", + Version: "2.0.0", + Description: nil, + Author: nil, + InstallPath: "/path/to/package2", ConfigurationSchema: json.RawMessage(`{}`), }, } diff --git a/control-plane/internal/handlers/ui/reasoners.go b/control-plane/internal/handlers/ui/reasoners.go index 70a77eb2..76ace351 100644 --- a/control-plane/internal/handlers/ui/reasoners.go +++ b/control-plane/internal/handlers/ui/reasoners.go @@ -8,9 +8,9 @@ import ( "strings" "time" - "github.com/your-org/haxen/control-plane/internal/events" - "github.com/your-org/haxen/control-plane/internal/storage" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/internal/events" + "github.com/Agent-Field/agentfield/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/gin-gonic/gin" ) @@ -28,26 +28,26 @@ func NewReasonersHandler(storageProvider storage.StorageProvider) *ReasonersHand // ReasonerWithNode represents a reasoner with its associated node information. type ReasonerWithNode struct { // Reasoner identification - ReasonerID string `json:"reasoner_id"` // Format: "node_id.reasoner_id" - Name string `json:"name"` // Human-readable name - Description string `json:"description"` // Reasoner description - + ReasonerID string `json:"reasoner_id"` // Format: "node_id.reasoner_id" + Name string `json:"name"` // Human-readable name + Description string `json:"description"` // Reasoner description + // Node context - NodeID string `json:"node_id"` - NodeStatus types.HealthStatus `json:"node_status"` - NodeVersion string `json:"node_version"` - + NodeID string `json:"node_id"` + NodeStatus types.HealthStatus `json:"node_status"` + NodeVersion string `json:"node_version"` + // Reasoner details - InputSchema interface{} `json:"input_schema"` - OutputSchema interface{} `json:"output_schema"` + InputSchema interface{} `json:"input_schema"` + OutputSchema interface{} `json:"output_schema"` MemoryConfig types.MemoryConfig `json:"memory_config"` - + // Performance metrics (placeholder for future implementation) - AvgResponseTime *int `json:"avg_response_time_ms,omitempty"` - SuccessRate *float64 `json:"success_rate,omitempty"` - TotalRuns *int `json:"total_runs,omitempty"` + AvgResponseTime *int `json:"avg_response_time_ms,omitempty"` + SuccessRate *float64 `json:"success_rate,omitempty"` + TotalRuns *int `json:"total_runs,omitempty"` LastExecuted *time.Time `json:"last_executed,omitempty"` - + // Timestamps LastUpdated time.Time `json:"last_updated"` } @@ -64,23 +64,23 @@ type ReasonersResponse struct { // GetAllReasonersHandler handles requests for all reasoners across all nodes. func (h *ReasonersHandler) GetAllReasonersHandler(c *gin.Context) { // Parse query parameters - statusFilter := c.Query("status") // "online", "offline", "all" (default: "all") - searchTerm := c.Query("search") // Search in reasoner names/descriptions - limitStr := c.Query("limit") // Pagination limit - offsetStr := c.Query("offset") // Pagination offset - + statusFilter := c.Query("status") // "online", "offline", "all" (default: "all") + searchTerm := c.Query("search") // Search in reasoner names/descriptions + limitStr := c.Query("limit") // Pagination limit + offsetStr := c.Query("offset") // Pagination offset + // Set defaults if statusFilter == "" { statusFilter = "all" } - + limit := 50 // Default limit if limitStr != "" { if parsedLimit, err := strconv.Atoi(limitStr); err == nil && parsedLimit > 0 { limit = parsedLimit } } - + offset := 0 // Default offset if offsetStr != "" { if parsedOffset, err := strconv.Atoi(offsetStr); err == nil && parsedOffset >= 0 { @@ -94,7 +94,7 @@ func (h *ReasonersHandler) GetAllReasonersHandler(c *gin.Context) { activeStatus := types.HealthStatusActive filters.HealthStatus = &activeStatus } - + ctx := c.Request.Context() nodes, err := h.storage.ListAgents(ctx, filters) if err != nil { @@ -116,21 +116,21 @@ func (h *ReasonersHandler) GetAllReasonersHandler(c *gin.Context) { } for _, node := range nodes { - fmt.Printf(" Processing node %s with %d reasoners (status: %s)\n", + fmt.Printf(" Processing node %s with %d reasoners (status: %s)\n", node.ID, len(node.Reasoners), node.HealthStatus) - + for _, reasoner := range node.Reasoners { // Create full reasoner ID fullReasonerID := fmt.Sprintf("%s.%s", node.ID, reasoner.ID) - + // Extract name from reasoner ID (use ID as name for now) name := reasoner.ID description := fmt.Sprintf("Reasoner %s from node %s", reasoner.ID, node.ID) - + // DIAGNOSTIC LOG: Track reasoner status determination fmt.Printf("πŸ” REASONER_STATUS_DEBUG: Reasoner %s - NodeHealth: %s, NodeLifecycle: %s, LastHeartbeat: %s\n", fullReasonerID, node.HealthStatus, node.LifecycleStatus, node.LastHeartbeat.Format(time.RFC3339)) - + reasonerWithNode := ReasonerWithNode{ ReasonerID: fullReasonerID, Name: name, @@ -143,24 +143,24 @@ func (h *ReasonersHandler) GetAllReasonersHandler(c *gin.Context) { MemoryConfig: reasoner.MemoryConfig, LastUpdated: node.LastHeartbeat, } - + // Apply search filter if searchTerm != "" { searchLower := strings.ToLower(searchTerm) if !strings.Contains(strings.ToLower(name), searchLower) && - !strings.Contains(strings.ToLower(description), searchLower) && - !strings.Contains(strings.ToLower(reasoner.ID), searchLower) { + !strings.Contains(strings.ToLower(description), searchLower) && + !strings.Contains(strings.ToLower(reasoner.ID), searchLower) { continue } } - + // Count by status if node.HealthStatus == types.HealthStatusActive { onlineCount++ } else { offlineCount++ } - + allReasoners = append(allReasoners, reasonerWithNode) } } @@ -187,14 +187,14 @@ func (h *ReasonersHandler) GetAllReasonersHandler(c *gin.Context) { total := len(filteredReasoners) start := offset end := offset + limit - + if start > total { start = total } if end > total { end = total } - + paginatedReasoners := filteredReasoners[start:end] fmt.Printf("πŸ“‹ Returning %d reasoners (total: %d, online: %d, offline: %d) from %d nodes\n", @@ -272,11 +272,11 @@ func (h *ReasonersHandler) GetReasonerDetailsHandler(c *gin.Context) { // PerformanceMetrics represents performance data for a reasoner type PerformanceMetrics struct { - AvgResponseTimeMs int `json:"avg_response_time_ms"` - SuccessRate float64 `json:"success_rate"` - TotalExecutions int `json:"total_executions"` - ExecutionsLast24h int `json:"executions_last_24h"` - RecentExecutions []RecentExecution `json:"recent_executions"` + AvgResponseTimeMs int `json:"avg_response_time_ms"` + SuccessRate float64 `json:"success_rate"` + TotalExecutions int `json:"total_executions"` + ExecutionsLast24h int `json:"executions_last_24h"` + RecentExecutions []RecentExecution `json:"recent_executions"` } // RecentExecution represents a recent execution for metrics @@ -340,7 +340,7 @@ func (h *ReasonersHandler) GetPerformanceMetricsHandler(c *gin.Context) { return } - fmt.Printf("πŸ“Š Retrieved performance metrics for reasoner %s: %d executions, %.2f%% success rate\n", + fmt.Printf("πŸ“Š Retrieved performance metrics for reasoner %s: %d executions, %.2f%% success rate\n", reasonerID, metrics.TotalExecutions, metrics.SuccessRate*100) c.JSON(http.StatusOK, metrics) @@ -357,12 +357,12 @@ func (h *ReasonersHandler) GetExecutionHistoryHandler(c *gin.Context) { // Parse pagination parameters pageStr := c.DefaultQuery("page", "1") limitStr := c.DefaultQuery("limit", "20") - + page, err := strconv.Atoi(pageStr) if err != nil || page < 1 { page = 1 } - + limit, err := strconv.Atoi(limitStr) if err != nil || limit < 1 || limit > 100 { limit = 20 @@ -384,7 +384,7 @@ func (h *ReasonersHandler) GetExecutionHistoryHandler(c *gin.Context) { return } - fmt.Printf("πŸ“‹ Retrieved execution history for reasoner %s: %d executions (page %d, limit %d)\n", + fmt.Printf("πŸ“‹ Retrieved execution history for reasoner %s: %d executions (page %d, limit %d)\n", reasonerID, len(history.Executions), page, limit) c.JSON(http.StatusOK, history) @@ -477,7 +477,7 @@ func (h *ReasonersHandler) StreamReasonerEventsHandler(c *gin.Context) { // Generate unique subscriber ID subscriberID := fmt.Sprintf("reasoner_sse_%d_%s", time.Now().UnixNano(), c.ClientIP()) - + // Subscribe to reasoner events eventChan := events.GlobalReasonerEventBus.Subscribe(subscriberID) defer events.GlobalReasonerEventBus.Unsubscribe(subscriberID) @@ -488,15 +488,16 @@ func (h *ReasonersHandler) StreamReasonerEventsHandler(c *gin.Context) { "message": "Reasoner events stream connected", "timestamp": time.Now().Format(time.RFC3339), } - + if eventJSON, err := json.Marshal(initialEvent); err == nil { - c.Writer.WriteString("data: " + string(eventJSON) + "\n\n") - c.Writer.Flush() + if !writeSSE(c, eventJSON) { + return + } } // Set up context for handling client disconnection ctx := c.Request.Context() - + // Send periodic heartbeat to keep connection alive heartbeatTicker := time.NewTicker(30 * time.Second) defer heartbeatTicker.Stop() @@ -516,8 +517,9 @@ func (h *ReasonersHandler) StreamReasonerEventsHandler(c *gin.Context) { "timestamp": time.Now().Format(time.RFC3339), } if heartbeatJSON, err := json.Marshal(heartbeat); err == nil { - c.Writer.WriteString("data: " + string(heartbeatJSON) + "\n\n") - c.Writer.Flush() + if !writeSSE(c, heartbeatJSON) { + return + } } case event, ok := <-eventChan: if !ok { @@ -525,11 +527,12 @@ func (h *ReasonersHandler) StreamReasonerEventsHandler(c *gin.Context) { fmt.Printf("πŸ“‘ Reasoner SSE channel closed for: %s\n", subscriberID) return } - + // Convert event to JSON and send if eventJSON, err := event.ToJSON(); err == nil { - c.Writer.WriteString("data: " + eventJSON + "\n\n") - c.Writer.Flush() + if !writeSSE(c, []byte(eventJSON)) { + return + } fmt.Printf("πŸ“€ Sent reasoner event %s to client %s\n", event.Type, subscriberID) } } diff --git a/control-plane/internal/handlers/ui/recent_activity.go b/control-plane/internal/handlers/ui/recent_activity.go index f91cedc6..c408c860 100644 --- a/control-plane/internal/handlers/ui/recent_activity.go +++ b/control-plane/internal/handlers/ui/recent_activity.go @@ -7,9 +7,9 @@ import ( "sync" "time" - "github.com/your-org/haxen/control-plane/internal/logger" - "github.com/your-org/haxen/control-plane/internal/storage" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/internal/logger" + "github.com/Agent-Field/agentfield/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/gin-gonic/gin" ) diff --git a/control-plane/internal/handlers/ui/workflow_runs.go b/control-plane/internal/handlers/ui/workflow_runs.go index 85151ef4..75ae3251 100644 --- a/control-plane/internal/handlers/ui/workflow_runs.go +++ b/control-plane/internal/handlers/ui/workflow_runs.go @@ -7,9 +7,9 @@ import ( "strings" "time" - "github.com/your-org/haxen/control-plane/internal/handlers" - "github.com/your-org/haxen/control-plane/internal/storage" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/internal/handlers" + "github.com/Agent-Field/agentfield/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/gin-gonic/gin" ) diff --git a/control-plane/internal/handlers/utils.go b/control-plane/internal/handlers/utils.go index 2798b2aa..86d443cc 100644 --- a/control-plane/internal/handlers/utils.go +++ b/control-plane/internal/handlers/utils.go @@ -4,7 +4,7 @@ import ( "encoding/json" "fmt" - "github.com/your-org/haxen/control-plane/internal/logger" + "github.com/Agent-Field/agentfield/control-plane/internal/logger" ) // marshalDataWithLogging marshals data to JSON with proper error handling and logging @@ -13,17 +13,17 @@ func marshalDataWithLogging(data interface{}, fieldName string) ([]byte, error) logger.Logger.Debug().Msgf("πŸ” MARSHAL_DEBUG: %s is nil, returning null", fieldName) return []byte("null"), nil } - + // Log the type and content of data being marshaled logger.Logger.Debug().Msgf("πŸ” MARSHAL_DEBUG: Marshaling %s (type: %T)", fieldName, data) - + // Attempt to marshal with detailed error reporting jsonData, err := json.Marshal(data) if err != nil { logger.Logger.Error().Err(err).Msgf("❌ MARSHAL_ERROR: Failed to marshal %s (type: %T): %v", fieldName, data, data) return nil, fmt.Errorf("failed to marshal %s: %w", fieldName, err) } - + logger.Logger.Debug().Msgf("βœ… MARSHAL_SUCCESS: Successfully marshaled %s (%d bytes): %s", fieldName, len(jsonData), string(jsonData)) return jsonData, nil } diff --git a/control-plane/internal/handlers/workflow_cleanup.go b/control-plane/internal/handlers/workflow_cleanup.go index 060165ea..d9fec12c 100644 --- a/control-plane/internal/handlers/workflow_cleanup.go +++ b/control-plane/internal/handlers/workflow_cleanup.go @@ -4,7 +4,7 @@ import ( "net/http" "strings" - "github.com/your-org/haxen/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/internal/storage" "github.com/gin-gonic/gin" ) diff --git a/control-plane/internal/handlers/workflow_cleanup_test.go b/control-plane/internal/handlers/workflow_cleanup_test.go index f46f6630..5f3a9b0c 100644 --- a/control-plane/internal/handlers/workflow_cleanup_test.go +++ b/control-plane/internal/handlers/workflow_cleanup_test.go @@ -9,7 +9,7 @@ import ( "net/http/httptest" "testing" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/gin-gonic/gin" "github.com/stretchr/testify/assert" diff --git a/control-plane/internal/handlers/workflow_dag.go b/control-plane/internal/handlers/workflow_dag.go index 7b154c1b..be324894 100644 --- a/control-plane/internal/handlers/workflow_dag.go +++ b/control-plane/internal/handlers/workflow_dag.go @@ -8,8 +8,8 @@ import ( "strings" "time" - "github.com/your-org/haxen/control-plane/internal/storage" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/gin-gonic/gin" ) diff --git a/control-plane/internal/infrastructure/communication/agent_client.go b/control-plane/internal/infrastructure/communication/agent_client.go index 68b4340b..8a2019e6 100644 --- a/control-plane/internal/infrastructure/communication/agent_client.go +++ b/control-plane/internal/infrastructure/communication/agent_client.go @@ -10,8 +10,8 @@ import ( "sync" "time" - "github.com/your-org/haxen/control-plane/internal/core/interfaces" - "github.com/your-org/haxen/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/internal/core/interfaces" + "github.com/Agent-Field/agentfield/control-plane/internal/storage" ) // HTTPAgentClient implements the AgentClient interface using HTTP communication @@ -19,7 +19,7 @@ type HTTPAgentClient struct { httpClient *http.Client storage storage.StorageProvider timeout time.Duration - + // Cache for MCP health data (30-second TTL) cache map[string]*CachedMCPHealth cacheMutex sync.RWMutex @@ -72,7 +72,7 @@ func (c *HTTPAgentClient) GetMCPHealth(ctx context.Context, nodeID string) (*int // Set headers req.Header.Set("Content-Type", "application/json") - req.Header.Set("User-Agent", "Haxen-Server/1.0") + req.Header.Set("User-Agent", "AgentField-Server/1.0") // Make the request resp, err := c.httpClient.Do(req) @@ -87,10 +87,10 @@ func (c *HTTPAgentClient) GetMCPHealth(ctx context.Context, nodeID string) (*int return &interfaces.MCPHealthResponse{ Servers: []interfaces.MCPServerHealth{}, Summary: interfaces.MCPSummary{ - TotalServers: 0, + TotalServers: 0, RunningServers: 0, - TotalTools: 0, - OverallHealth: 1.0, // Consider healthy if no MCP servers + TotalTools: 0, + OverallHealth: 1.0, // Consider healthy if no MCP servers }, }, nil } @@ -130,7 +130,7 @@ func (c *HTTPAgentClient) RestartMCPServer(ctx context.Context, nodeID, alias st // Set headers req.Header.Set("Content-Type", "application/json") - req.Header.Set("User-Agent", "Haxen-Server/1.0") + req.Header.Set("User-Agent", "AgentField-Server/1.0") // Make the request resp, err := c.httpClient.Do(req) @@ -183,7 +183,7 @@ func (c *HTTPAgentClient) GetMCPTools(ctx context.Context, nodeID, alias string) // Set headers req.Header.Set("Content-Type", "application/json") - req.Header.Set("User-Agent", "Haxen-Server/1.0") + req.Header.Set("User-Agent", "AgentField-Server/1.0") // Make the request resp, err := c.httpClient.Do(req) @@ -241,7 +241,7 @@ func (c *HTTPAgentClient) ShutdownAgent(ctx context.Context, nodeID string, grac // Set headers req.Header.Set("Content-Type", "application/json") - req.Header.Set("User-Agent", "Haxen-Server/1.0") + req.Header.Set("User-Agent", "AgentField-Server/1.0") // Make the request resp, err := c.httpClient.Do(req) @@ -275,7 +275,7 @@ func (c *HTTPAgentClient) GetAgentStatus(ctx context.Context, nodeID string) (*i if err != nil { return nil, fmt.Errorf("failed to get agent node %s: %w", nodeID, err) } - + // Check for nil agent (can happen when database returns no error but also no rows) if agent == nil { return nil, fmt.Errorf("agent node %s not found in storage", nodeID) @@ -287,11 +287,11 @@ func (c *HTTPAgentClient) GetAgentStatus(ctx context.Context, nodeID string) (*i // Implement retry logic (1 retry for transient network failures) maxRetries := 1 var lastErr error - + for attempt := 0; attempt <= maxRetries; attempt++ { // Create timeout context for each attempt (2-3 seconds) timeoutCtx, cancel := context.WithTimeout(ctx, 3*time.Second) - + // Create HTTP request with timeout context req, err := http.NewRequestWithContext(timeoutCtx, "GET", statusURL, nil) if err != nil { @@ -301,12 +301,12 @@ func (c *HTTPAgentClient) GetAgentStatus(ctx context.Context, nodeID string) (*i // Set headers req.Header.Set("Content-Type", "application/json") - req.Header.Set("User-Agent", "Haxen-Server/1.0") + req.Header.Set("User-Agent", "AgentField-Server/1.0") // Make the request resp, err := c.httpClient.Do(req) cancel() // Always cancel the timeout context - + if err != nil { lastErr = err // Check if this is a transient network error that might benefit from retry @@ -337,7 +337,7 @@ func (c *HTTPAgentClient) GetAgentStatus(ctx context.Context, nodeID string) (*i return &statusResponse, nil } - + // All retries exhausted return nil, fmt.Errorf("failed after %d retries, last error: %w", maxRetries+1, lastErr) } @@ -401,8 +401,8 @@ func (c *HTTPAgentClient) GetCacheStats() map[string]interface{} { for nodeID, cached := range c.cache { entry := map[string]interface{}{ - "node_id": nodeID, - "timestamp": cached.Timestamp, + "node_id": nodeID, + "timestamp": cached.Timestamp, "age_seconds": time.Since(cached.Timestamp).Seconds(), } stats["entries"] = append(stats["entries"].([]map[string]interface{}), entry) @@ -430,7 +430,7 @@ func isRetryableError(err error) bool { if err == nil { return false } - + errStr := err.Error() // Common transient errors that might benefit from retry transientErrors := []string{ @@ -440,12 +440,12 @@ func isRetryableError(err error) bool { "temporary failure", "network is unreachable", } - + for _, transient := range transientErrors { if strings.Contains(strings.ToLower(errStr), transient) { return true } } - + return false -} \ No newline at end of file +} diff --git a/control-plane/internal/infrastructure/process/manager.go b/control-plane/internal/infrastructure/process/manager.go index 6213a366..b2e9618e 100644 --- a/control-plane/internal/infrastructure/process/manager.go +++ b/control-plane/internal/infrastructure/process/manager.go @@ -7,7 +7,7 @@ import ( "path/filepath" "syscall" - "github.com/your-org/haxen/control-plane/internal/core/interfaces" + "github.com/Agent-Field/agentfield/control-plane/internal/core/interfaces" ) // DefaultProcessManager provides a default implementation for managing system processes. @@ -101,11 +101,8 @@ func (pm *DefaultProcessManager) Stop(pid int) error { } // Wait for the process to actually terminate - _, waitErr := cmd.Process.Wait() - if waitErr != nil { - // Process might have already exited, which is fine - // We'll still clean up our tracking - } + // Ignore errors as process might have already exited + _, _ = cmd.Process.Wait() // Clean up tracking delete(pm.runningProcesses, pid) @@ -127,18 +124,16 @@ func (pm *DefaultProcessManager) Status(pid int) (interfaces.ProcessInfo, error) } // Determine status - status := "unknown" + status := "stopped" if cmd.Process != nil { // Check if process is still running by sending signal 0 if err := cmd.Process.Signal(syscall.Signal(0)); err == nil { status = "running" } else { - status = "stopped" // Clean up if process is no longer running delete(pm.runningProcesses, pid) } } else { - status = "stopped" delete(pm.runningProcesses, pid) } diff --git a/control-plane/internal/infrastructure/process/port_manager.go b/control-plane/internal/infrastructure/process/port_manager.go index efbc0c58..230fc556 100644 --- a/control-plane/internal/infrastructure/process/port_manager.go +++ b/control-plane/internal/infrastructure/process/port_manager.go @@ -1,11 +1,11 @@ -// haxen/internal/infrastructure/process/port_manager.go +// agentfield/internal/infrastructure/process/port_manager.go package process import ( "fmt" "net" - "github.com/your-org/haxen/control-plane/internal/core/interfaces" + "github.com/Agent-Field/agentfield/control-plane/internal/core/interfaces" ) // DefaultPortManager provides a default implementation for managing network ports. diff --git a/control-plane/internal/infrastructure/storage/config.go b/control-plane/internal/infrastructure/storage/config.go index 1d71cee1..78318e8e 100644 --- a/control-plane/internal/infrastructure/storage/config.go +++ b/control-plane/internal/infrastructure/storage/config.go @@ -1,55 +1,56 @@ -// haxen/internal/infrastructure/storage/config.go +// agentfield/internal/infrastructure/storage/config.go package storage import ( - "os" - "path/filepath" - "github.com/your-org/haxen/control-plane/internal/core/domain" - "github.com/your-org/haxen/control-plane/internal/core/interfaces" - "gopkg.in/yaml.v3" + "os" + "path/filepath" + + "github.com/Agent-Field/agentfield/control-plane/internal/core/domain" + "github.com/Agent-Field/agentfield/control-plane/internal/core/interfaces" + "gopkg.in/yaml.v3" ) type LocalConfigStorage struct { - fs interfaces.FileSystemAdapter + fs interfaces.FileSystemAdapter } func NewLocalConfigStorage(fs interfaces.FileSystemAdapter) interfaces.ConfigStorage { - return &LocalConfigStorage{fs: fs} + return &LocalConfigStorage{fs: fs} } -func (s *LocalConfigStorage) LoadHaxenConfig(path string) (*domain.HaxenConfig, error) { - if !s.fs.Exists(path) { - return &domain.HaxenConfig{ - HomeDir: filepath.Dir(path), - Environment: make(map[string]string), - MCP: domain.MCPConfig{ - Servers: []domain.MCPServer{}, - }, - }, nil - } - - data, err := s.fs.ReadFile(path) - if err != nil { - return nil, err - } - - var config domain.HaxenConfig - if err := yaml.Unmarshal(data, &config); err != nil { - return nil, err - } - - return &config, nil +func (s *LocalConfigStorage) LoadAgentFieldConfig(path string) (*domain.AgentFieldConfig, error) { + if !s.fs.Exists(path) { + return &domain.AgentFieldConfig{ + HomeDir: filepath.Dir(path), + Environment: make(map[string]string), + MCP: domain.MCPConfig{ + Servers: []domain.MCPServer{}, + }, + }, nil + } + + data, err := s.fs.ReadFile(path) + if err != nil { + return nil, err + } + + var config domain.AgentFieldConfig + if err := yaml.Unmarshal(data, &config); err != nil { + return nil, err + } + + return &config, nil } -func (s *LocalConfigStorage) SaveHaxenConfig(path string, config *domain.HaxenConfig) error { - data, err := yaml.Marshal(config) - if err != nil { - return err - } +func (s *LocalConfigStorage) SaveAgentFieldConfig(path string, config *domain.AgentFieldConfig) error { + data, err := yaml.Marshal(config) + if err != nil { + return err + } - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { - return err - } + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } - return s.fs.WriteFile(path, data) + return s.fs.WriteFile(path, data) } diff --git a/control-plane/internal/infrastructure/storage/filesystem.go b/control-plane/internal/infrastructure/storage/filesystem.go index 9c4cf9d3..adea2b22 100644 --- a/control-plane/internal/infrastructure/storage/filesystem.go +++ b/control-plane/internal/infrastructure/storage/filesystem.go @@ -1,43 +1,44 @@ -// haxen/internal/infrastructure/storage/filesystem.go +// agentfield/internal/infrastructure/storage/filesystem.go package storage import ( - "os" - "github.com/your-org/haxen/control-plane/internal/core/interfaces" + "os" + + "github.com/Agent-Field/agentfield/control-plane/internal/core/interfaces" ) type DefaultFileSystemAdapter struct{} func NewFileSystemAdapter() interfaces.FileSystemAdapter { - return &DefaultFileSystemAdapter{} + return &DefaultFileSystemAdapter{} } func (fs *DefaultFileSystemAdapter) ReadFile(path string) ([]byte, error) { - return os.ReadFile(path) + return os.ReadFile(path) } func (fs *DefaultFileSystemAdapter) WriteFile(path string, data []byte) error { - return os.WriteFile(path, data, 0644) + return os.WriteFile(path, data, 0644) } func (fs *DefaultFileSystemAdapter) Exists(path string) bool { - _, err := os.Stat(path) - return err == nil + _, err := os.Stat(path) + return err == nil } func (fs *DefaultFileSystemAdapter) CreateDirectory(path string) error { - return os.MkdirAll(path, 0755) + return os.MkdirAll(path, 0755) } func (fs *DefaultFileSystemAdapter) ListDirectory(path string) ([]string, error) { - entries, err := os.ReadDir(path) - if err != nil { - return nil, err - } - - var names []string - for _, entry := range entries { - names = append(names, entry.Name()) - } - return names, nil + entries, err := os.ReadDir(path) + if err != nil { + return nil, err + } + + var names []string + for _, entry := range entries { + names = append(names, entry.Name()) + } + return names, nil } diff --git a/control-plane/internal/infrastructure/storage/registry.go b/control-plane/internal/infrastructure/storage/registry.go index 0fc8ef88..98776753 100644 --- a/control-plane/internal/infrastructure/storage/registry.go +++ b/control-plane/internal/infrastructure/storage/registry.go @@ -1,79 +1,80 @@ -// haxen/internal/infrastructure/storage/registry.go +// agentfield/internal/infrastructure/storage/registry.go package storage import ( - "encoding/json" - "os" - "path/filepath" - "github.com/your-org/haxen/control-plane/internal/core/domain" - "github.com/your-org/haxen/control-plane/internal/core/interfaces" + "encoding/json" + "os" + "path/filepath" + + "github.com/Agent-Field/agentfield/control-plane/internal/core/domain" + "github.com/Agent-Field/agentfield/control-plane/internal/core/interfaces" ) type LocalRegistryStorage struct { - fs interfaces.FileSystemAdapter - storePath string + fs interfaces.FileSystemAdapter + storePath string } func NewLocalRegistryStorage(fs interfaces.FileSystemAdapter, path string) interfaces.RegistryStorage { - return &LocalRegistryStorage{ - fs: fs, - storePath: path, - } + return &LocalRegistryStorage{ + fs: fs, + storePath: path, + } } func (s *LocalRegistryStorage) LoadRegistry() (*domain.InstallationRegistry, error) { - if !s.fs.Exists(s.storePath) { - return &domain.InstallationRegistry{ - Installed: make(map[string]domain.InstalledPackage), - }, nil - } - - data, err := s.fs.ReadFile(s.storePath) - if err != nil { - return nil, err - } - - var registry domain.InstallationRegistry - if err := json.Unmarshal(data, ®istry); err != nil { - return nil, err - } - - return ®istry, nil + if !s.fs.Exists(s.storePath) { + return &domain.InstallationRegistry{ + Installed: make(map[string]domain.InstalledPackage), + }, nil + } + + data, err := s.fs.ReadFile(s.storePath) + if err != nil { + return nil, err + } + + var registry domain.InstallationRegistry + if err := json.Unmarshal(data, ®istry); err != nil { + return nil, err + } + + return ®istry, nil } func (s *LocalRegistryStorage) SaveRegistry(registry *domain.InstallationRegistry) error { - data, err := json.MarshalIndent(registry, "", " ") - if err != nil { - return err - } + data, err := json.MarshalIndent(registry, "", " ") + if err != nil { + return err + } - if err := os.MkdirAll(filepath.Dir(s.storePath), 0755); err != nil { - return err - } + if err := os.MkdirAll(filepath.Dir(s.storePath), 0755); err != nil { + return err + } - return s.fs.WriteFile(s.storePath, data) + return s.fs.WriteFile(s.storePath, data) } func (s *LocalRegistryStorage) GetPackage(name string) (*domain.InstalledPackage, error) { - registry, err := s.LoadRegistry() - if err != nil { - return nil, err - } + registry, err := s.LoadRegistry() + if err != nil { + return nil, err + } - pkg, exists := registry.Installed[name] - if !exists { - return nil, os.ErrNotExist - } + pkg, exists := registry.Installed[name] + if !exists { + return nil, os.ErrNotExist + } - return &pkg, nil + return &pkg, nil } func (s *LocalRegistryStorage) SavePackage(name string, pkg *domain.InstalledPackage) error { - registry, err := s.LoadRegistry() - if err != nil { - return err - } + registry, err := s.LoadRegistry() + if err != nil { + return err + } - registry.Installed[name] = *pkg - return s.SaveRegistry(registry) + registry.Installed[name] = *pkg + return s.SaveRegistry(registry) } diff --git a/control-plane/internal/logger/helpers.go b/control-plane/internal/logger/helpers.go index f85805bd..a5769c3d 100644 --- a/control-plane/internal/logger/helpers.go +++ b/control-plane/internal/logger/helpers.go @@ -28,4 +28,4 @@ func Errorf(format string, args ...interface{}) { // Successf logs a success-level message (info with a checkmark). func Successf(format string, args ...interface{}) { Logger.Info().Msg("βœ… " + fmt.Sprintf(format, args...)) -} \ No newline at end of file +} diff --git a/control-plane/internal/logger/logger.go b/control-plane/internal/logger/logger.go index c2704a8b..6054e209 100644 --- a/control-plane/internal/logger/logger.go +++ b/control-plane/internal/logger/logger.go @@ -1,4 +1,4 @@ -// Package logger provides a global zerolog logger for the Haxen CLI. +// Package logger provides a global zerolog logger for the AgentField CLI. package logger import ( @@ -19,4 +19,4 @@ func InitLogger(verbose bool) { level = zerolog.DebugLevel } Logger = zerolog.New(os.Stderr).With().Timestamp().Logger().Level(level) -} \ No newline at end of file +} diff --git a/control-plane/internal/mcp/capability_discovery.go b/control-plane/internal/mcp/capability_discovery.go index 6b1c28db..eec9b793 100644 --- a/control-plane/internal/mcp/capability_discovery.go +++ b/control-plane/internal/mcp/capability_discovery.go @@ -4,6 +4,7 @@ import ( "bufio" "context" "encoding/json" + "errors" "fmt" "io" "os" @@ -13,21 +14,20 @@ import ( "syscall" "time" - "github.com/your-org/haxen/control-plane/internal/config" + "github.com/Agent-Field/agentfield/control-plane/internal/config" ) // MCPCapability represents a discovered MCP server capability type MCPCapability struct { - ServerAlias string `json:"server_alias"` - ServerName string `json:"server_name"` - Version string `json:"version"` - Tools []MCPTool `json:"tools"` + ServerAlias string `json:"server_alias"` + ServerName string `json:"server_name"` + Version string `json:"version"` + Tools []MCPTool `json:"tools"` Resources []MCPResource `json:"resources"` - Endpoint string `json:"endpoint"` - Transport string `json:"transport"` + Endpoint string `json:"endpoint"` + Transport string `json:"transport"` } - // CapabilityDiscovery handles MCP server capability discovery type CapabilityDiscovery struct { projectPath string @@ -81,49 +81,49 @@ func (cd *CapabilityDiscovery) DiscoverCapabilities() ([]MCPCapability, error) { func (cd *CapabilityDiscovery) migrateOldFormat(serverDir string) error { oldPath := filepath.Join(serverDir, "mcp.json") newPath := filepath.Join(serverDir, "config.json") - + // Check if new format already exists if _, err := os.Stat(newPath); err == nil { return nil // Already migrated } - + // Read old format oldData, err := os.ReadFile(oldPath) if err != nil { return fmt.Errorf("failed to read mcp.json: %w", err) } - + var oldFormat map[string]interface{} if err := json.Unmarshal(oldData, &oldFormat); err != nil { return fmt.Errorf("failed to parse mcp.json: %w", err) } - + // Convert to new format newConfig := MCPServerConfig{} - + if alias, ok := oldFormat["alias"].(string); ok { newConfig.Alias = alias } - + if startCmd, ok := oldFormat["start_command"].(string); ok { newConfig.RunCmd = startCmd } - + if source, ok := oldFormat["source"].(string); ok { // If source looks like a URL, use it as URL, otherwise as run command if strings.HasPrefix(source, "http") { newConfig.URL = source } } - + if version, ok := oldFormat["version"].(string); ok { newConfig.Version = version } - + if healthCheck, ok := oldFormat["health_check"].(string); ok { newConfig.HealthCheck = healthCheck } - + // Convert environment variables if env, ok := oldFormat["env"].(map[string]interface{}); ok { newConfig.Env = make(map[string]string) @@ -133,12 +133,12 @@ func (cd *CapabilityDiscovery) migrateOldFormat(serverDir string) error { } } } - + // Convert port if present if port, ok := oldFormat["port"].(float64); ok { newConfig.Port = int(port) } - + // Convert install commands to setup commands if installCmds, ok := oldFormat["install_commands"].([]interface{}); ok { for _, cmd := range installCmds { @@ -147,22 +147,22 @@ func (cd *CapabilityDiscovery) migrateOldFormat(serverDir string) error { } } } - + // Save in new format newData, err := json.MarshalIndent(newConfig, "", " ") if err != nil { return fmt.Errorf("failed to marshal new config: %w", err) } - + if err := os.WriteFile(newPath, newData, 0644); err != nil { return fmt.Errorf("failed to write config.json: %w", err) } - + // Remove old file if err := os.Remove(oldPath); err != nil { fmt.Printf("Warning: failed to remove old mcp.json: %v\n", err) } - + fmt.Printf("Migrated %s from mcp.json to config.json format\n", filepath.Base(serverDir)) return nil } @@ -170,7 +170,7 @@ func (cd *CapabilityDiscovery) migrateOldFormat(serverDir string) error { // discoverServerCapability discovers capabilities for a specific MCP server func (cd *CapabilityDiscovery) discoverServerCapability(serverAlias string) (*MCPCapability, error) { serverDir := filepath.Join(cd.projectPath, "packages", "mcp", serverAlias) - + // Try migration first if config.json doesn't exist metadataPath := filepath.Join(serverDir, "config.json") if _, err := os.Stat(metadataPath); os.IsNotExist(err) { @@ -178,11 +178,11 @@ func (cd *CapabilityDiscovery) discoverServerCapability(serverAlias string) (*MC return nil, fmt.Errorf("failed to migrate old format: %w", err) } } - + // Read config.json metadata file metadataBytes, err := os.ReadFile(metadataPath) if err != nil { - return nil, fmt.Errorf("failed to read config.json (try running: haxen mcp migrate %s): %w", serverAlias, err) + return nil, fmt.Errorf("failed to read config.json (try running: af mcp migrate %s): %w", serverAlias, err) } var metadata MCPServerConfig @@ -190,7 +190,6 @@ func (cd *CapabilityDiscovery) discoverServerCapability(serverAlias string) (*MC return nil, fmt.Errorf("failed to parse metadata: %w", err) } - // Initialize capability structure capability := &MCPCapability{ ServerAlias: serverAlias, @@ -214,7 +213,7 @@ func (cd *CapabilityDiscovery) discoverServerCapability(serverAlias string) (*MC capabilitiesPath := filepath.Join(serverDir, "capabilities.json") if capabilitiesBytes, err := os.ReadFile(capabilitiesPath); err == nil { var cachedCapabilities struct { - Tools []struct { + Tools []struct { Name string `json:"name"` Description string `json:"description"` InputSchema map[string]interface{} `json:"inputSchema"` @@ -226,7 +225,7 @@ func (cd *CapabilityDiscovery) discoverServerCapability(serverAlias string) (*MC MimeType string `json:"mimeType,omitempty"` } `json:"resources"` } - + if err := json.Unmarshal(capabilitiesBytes, &cachedCapabilities); err == nil { // Convert cached tools to MCPTool format capability.Tools = make([]MCPTool, len(cachedCapabilities.Tools)) @@ -237,7 +236,7 @@ func (cd *CapabilityDiscovery) discoverServerCapability(serverAlias string) (*MC InputSchema: tool.InputSchema, } } - + // Convert cached resources to MCPResource format capability.Resources = make([]MCPResource, len(cachedCapabilities.Resources)) for i, resource := range cachedCapabilities.Resources { @@ -248,7 +247,7 @@ func (cd *CapabilityDiscovery) discoverServerCapability(serverAlias string) (*MC MimeType: resource.MimeType, } } - + // If we have cached capabilities, return them if len(capability.Tools) > 0 || len(capability.Resources) > 0 { return capability, nil @@ -261,13 +260,13 @@ func (cd *CapabilityDiscovery) discoverServerCapability(serverAlias string) (*MC if err != nil { // Create a structured error for better error reporting discoveryErr := CapabilityDiscoveryError(serverAlias, "live capability discovery failed", err) - + // Log the detailed error information fmt.Printf("Warning: Failed to discover live capabilities for %s: %v\n", serverAlias, discoveryErr.Error()) fmt.Printf("Detailed error: %s\n", discoveryErr.DetailedError()) fmt.Printf("Suggestion: %s\n", discoveryErr.GetSuggestion()) fmt.Printf("Falling back to static analysis for %s...\n", serverAlias) - + // Try static analysis as fallback staticTools, staticResources, staticErr := cd.discoverFromStaticAnalysis(filepath.Join(cd.projectPath, "packages", "mcp", serverAlias), metadata) if staticErr != nil { @@ -276,7 +275,7 @@ func (cd *CapabilityDiscovery) discoverServerCapability(serverAlias string) (*MC fmt.Printf("Detailed error: %s\n", staticAnalysisErr.DetailedError()) return capability, nil } - + // Use static analysis results liveTools = staticTools liveResources = staticResources @@ -294,7 +293,7 @@ func (cd *CapabilityDiscovery) discoverServerCapability(serverAlias string) (*MC } // Update config file with detected transport type - if capability != nil && capability.Transport != "" { + if capability.Transport != "" { if err := cd.updateConfigWithTransport(serverAlias, capability.Transport); err != nil { fmt.Printf("Warning: failed to update transport in config for %s: %v\n", serverAlias, err) } @@ -306,21 +305,21 @@ func (cd *CapabilityDiscovery) discoverServerCapability(serverAlias string) (*MC // CacheCapabilities saves discovered capabilities to cache func (cd *CapabilityDiscovery) CacheCapabilities(serverAlias string, tools []MCPTool, resources []MCPResource) error { serverDir := filepath.Join(cd.projectPath, "packages", "mcp", serverAlias) - + // Create a structure that matches our expected output format type CachedTool struct { Name string `json:"name"` Description string `json:"description"` InputSchema map[string]interface{} `json:"inputSchema"` } - + type CachedResource struct { URI string `json:"uri"` Name string `json:"name"` Description string `json:"description"` MimeType string `json:"mimeType,omitempty"` } - + // Convert tools to cached format cachedTools := make([]CachedTool, len(tools)) for i, tool := range tools { @@ -330,18 +329,13 @@ func (cd *CapabilityDiscovery) CacheCapabilities(serverAlias string, tools []MCP InputSchema: tool.InputSchema, } } - + // Convert resources to cached format cachedResources := make([]CachedResource, len(resources)) for i, resource := range resources { - cachedResources[i] = CachedResource{ - URI: resource.URI, - Name: resource.Name, - Description: resource.Description, - MimeType: resource.MimeType, - } + cachedResources[i] = CachedResource(resource) } - + capabilities := struct { Tools []CachedTool `json:"tools"` Resources []CachedResource `json:"resources"` @@ -388,7 +382,7 @@ func (cd *CapabilityDiscovery) RefreshCapabilities() error { // discoverLiveCapabilities attempts to discover capabilities by running the MCP server func (cd *CapabilityDiscovery) discoverLiveCapabilities(serverAlias string, metadata MCPServerConfig) ([]MCPTool, []MCPResource, error) { fmt.Printf("Attempting live capability discovery for %s...\n", serverAlias) - + if metadata.URL != "" { // Remote MCP server - discover from URL return cd.discoverFromURL(metadata.URL) @@ -396,7 +390,7 @@ func (cd *CapabilityDiscovery) discoverLiveCapabilities(serverAlias string, meta // Local MCP server - start temporarily and discover return cd.discoverFromLocalProcess(serverAlias, metadata) } - + // No URL or run command - fall back to static analysis fmt.Printf("No URL or run command for %s, using static analysis\n", serverAlias) return cd.discoverFromStaticAnalysis(filepath.Join(cd.projectPath, "packages", "mcp", serverAlias), metadata) @@ -405,41 +399,41 @@ func (cd *CapabilityDiscovery) discoverLiveCapabilities(serverAlias string, meta // discoverFromURL discovers capabilities from a remote MCP server URL func (cd *CapabilityDiscovery) discoverFromURL(url string) ([]MCPTool, []MCPResource, error) { fmt.Printf("Discovering capabilities from URL: %s\n", url) - + // TODO: Implement actual HTTP/WebSocket connection to MCP server // For now, return empty capabilities // This would involve: // 1. Connect to the MCP server at the URL // 2. Send MCP protocol messages to list tools and resources // 3. Parse the responses - + return []MCPTool{}, []MCPResource{}, fmt.Errorf("URL-based discovery not yet implemented") } // discoverFromLocalProcess discovers capabilities by temporarily starting a local MCP server func (cd *CapabilityDiscovery) discoverFromLocalProcess(serverAlias string, metadata MCPServerConfig) ([]MCPTool, []MCPResource, error) { fmt.Printf("Discovering capabilities from local process for %s\n", serverAlias) - + // Try stdio discovery first (most common for local MCP servers) tools, resources, err := cd.tryStdioDiscovery(serverAlias, metadata) if err == nil { fmt.Printf("Stdio discovery successful for %s: found %d tools, %d resources\n", serverAlias, len(tools), len(resources)) return tools, resources, nil } - + fmt.Printf("Stdio discovery failed for %s: %v\n", serverAlias, err) fmt.Printf("Trying HTTP discovery as fallback for %s...\n", serverAlias) - + // Fallback to HTTP discovery tools, resources, err = cd.tryHTTPDiscovery(serverAlias, metadata) if err == nil { fmt.Printf("HTTP discovery successful for %s: found %d tools, %d resources\n", serverAlias, len(tools), len(resources)) return tools, resources, nil } - + fmt.Printf("Both stdio and HTTP discovery failed for %s: %v\n", serverAlias, err) fmt.Printf("Falling back to static analysis for %s...\n", serverAlias) - + // Fall back to static analysis return cd.discoverFromStaticAnalysis(filepath.Join(cd.projectPath, "packages", "mcp", serverAlias), metadata) } @@ -447,21 +441,21 @@ func (cd *CapabilityDiscovery) discoverFromLocalProcess(serverAlias string, meta // tryStdioDiscovery attempts to discover capabilities using stdio transport with timeout func (cd *CapabilityDiscovery) tryStdioDiscovery(serverAlias string, metadata MCPServerConfig) ([]MCPTool, []MCPResource, error) { fmt.Printf("Attempting stdio discovery for %s\n", serverAlias) - + // Create context with timeout for entire discovery operation (60 seconds) ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) defer cancel() - + // Create template processor template := NewTemplateProcessor(cd.projectPath, false) // Non-verbose for discovery - + // Process template variables in the run command vars := template.CreateTemplateVars(metadata, 0) // Port 0 for stdio-based servers processedCmd, err := template.ProcessCommand(metadata.RunCmd, vars) if err != nil { return nil, nil, fmt.Errorf("failed to process run command template: %w", err) } - + // Set working directory workingDir := vars.ServerDir if metadata.WorkingDir != "" { @@ -471,43 +465,43 @@ func (cd *CapabilityDiscovery) tryStdioDiscovery(serverAlias string, metadata MC } workingDir = processedWorkingDir } - + // Create command with context timeout cmd := exec.CommandContext(ctx, "sh", "-c", processedCmd) cmd.Dir = workingDir - + // Set environment variables if len(metadata.Env) > 0 { processedEnv, err := template.ProcessEnvironment(metadata.Env, vars) if err != nil { return nil, nil, fmt.Errorf("failed to process environment variables: %w", err) } - + cmd.Env = append(cmd.Env, os.Environ()...) for key, value := range processedEnv { cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", key, value)) } } - + // Create pipes before starting the process stdin, err := cmd.StdinPipe() if err != nil { return nil, nil, fmt.Errorf("failed to create stdin pipe: %w", err) } - + stdout, err := cmd.StdoutPipe() if err != nil { stdin.Close() return nil, nil, fmt.Errorf("failed to create stdout pipe: %w", err) } - + stderr, err := cmd.StderrPipe() if err != nil { stdin.Close() stdout.Close() return nil, nil, fmt.Errorf("failed to create stderr pipe: %w", err) } - + // Start the process if err := cmd.Start(); err != nil { stdin.Close() @@ -515,30 +509,38 @@ func (cd *CapabilityDiscovery) tryStdioDiscovery(serverAlias string, metadata MC stderr.Close() return nil, nil, fmt.Errorf("failed to start MCP server: %w", err) } - + // Ensure proper cleanup of all resources defer func() { stdin.Close() stdout.Close() stderr.Close() - + if cmd.Process != nil { + forceKill := func(reason string) { + if killErr := cmd.Process.Kill(); killErr != nil && !errors.Is(killErr, os.ErrProcessDone) { + fmt.Printf("⚠️ Failed to force kill MCP server (%s): %v\n", reason, killErr) + } + } + // Try graceful shutdown first if err := cmd.Process.Signal(syscall.SIGTERM); err != nil { // Force kill if graceful shutdown fails - cmd.Process.Kill() + forceKill("sigterm") } else { // Wait briefly for graceful shutdown time.Sleep(1 * time.Second) // Check if still running and force kill if needed if cmd.ProcessState == nil || !cmd.ProcessState.Exited() { - cmd.Process.Kill() + forceKill("graceful timeout") } } - cmd.Wait() // Wait for cleanup + if waitErr := cmd.Wait(); waitErr != nil && !errors.Is(waitErr, os.ErrProcessDone) { + fmt.Printf("⚠️ MCP server wait returned error: %v\n", waitErr) + } } }() - + // Monitor stderr for debugging in a separate goroutine var stderrOutput strings.Builder go func() { @@ -549,7 +551,7 @@ func (cd *CapabilityDiscovery) tryStdioDiscovery(serverAlias string, metadata MC fmt.Printf("MCP stderr [%s]: %s\n", serverAlias, line) } }() - + // Wait a moment for the server to start select { case <-time.After(2 * time.Second): @@ -557,7 +559,7 @@ func (cd *CapabilityDiscovery) tryStdioDiscovery(serverAlias string, metadata MC case <-ctx.Done(): return nil, nil, fmt.Errorf("timeout waiting for server to start: %w", ctx.Err()) } - + // Perform discovery with pipes using timeout-aware implementation tools, resources, err := cd.performDiscoveryWithPipes(ctx, stdin, stdout, serverAlias) if err != nil { @@ -568,7 +570,7 @@ func (cd *CapabilityDiscovery) tryStdioDiscovery(serverAlias string, metadata MC } return nil, nil, fmt.Errorf("stdio discovery failed: %w", err) } - + return tools, resources, nil } @@ -577,15 +579,15 @@ func (cd *CapabilityDiscovery) performDiscoveryWithPipes(ctx context.Context, st // Create JSON encoder/decoder for line-buffered communication encoder := json.NewEncoder(stdin) scanner := bufio.NewScanner(stdout) - + // Helper function to read JSON response with timeout readJSONResponse := func(timeout time.Duration) (*MCPResponse, error) { responseCtx, responseCancel := context.WithTimeout(ctx, timeout) defer responseCancel() - + responseChan := make(chan *MCPResponse, 1) errorChan := make(chan error, 1) - + go func() { if scanner.Scan() { var response MCPResponse @@ -602,7 +604,7 @@ func (cd *CapabilityDiscovery) performDiscoveryWithPipes(ctx context.Context, st } } }() - + select { case response := <-responseChan: return response, nil @@ -612,7 +614,7 @@ func (cd *CapabilityDiscovery) performDiscoveryWithPipes(ctx context.Context, st return nil, fmt.Errorf("timeout waiting for response: %w", responseCtx.Err()) } } - + // Step 1: Send initialize request (15 second timeout) fmt.Printf("Sending initialize request to %s...\n", serverAlias) initRequest := MCPRequest{ @@ -627,39 +629,39 @@ func (cd *CapabilityDiscovery) performDiscoveryWithPipes(ctx context.Context, st }, }, ClientInfo: ClientInfo{ - Name: "haxen-mcp-client", + Name: "agentfield-mcp-client", Version: "1.0.0", }, }, } - + if err := encoder.Encode(initRequest); err != nil { return nil, nil, fmt.Errorf("failed to send initialize request: %w", err) } - + // Read initialize response initResponse, err := readJSONResponse(15 * time.Second) if err != nil { return nil, nil, fmt.Errorf("failed to read initialize response: %w", err) } - + if initResponse.Error != nil { return nil, nil, fmt.Errorf("initialize failed: %s", initResponse.Error.Message) } - + fmt.Printf("Initialize successful for %s, sending initialized notification...\n", serverAlias) - + // Step 2: Send initialized notification (must be a notification, not a request - no ID field) initializedNotification := MCPNotification{ JSONRPC: "2.0", Method: "notifications/initialized", Params: map[string]interface{}{}, } - + if err := encoder.Encode(initializedNotification); err != nil { return nil, nil, fmt.Errorf("failed to send initialized notification: %w", err) } - + // Step 3: Request tools list (10 second timeout) fmt.Printf("Requesting tools list from %s...\n", serverAlias) toolsRequest := MCPRequest{ @@ -667,29 +669,29 @@ func (cd *CapabilityDiscovery) performDiscoveryWithPipes(ctx context.Context, st ID: 2, Method: "tools/list", } - + if err := encoder.Encode(toolsRequest); err != nil { return nil, nil, fmt.Errorf("failed to send tools/list request: %w", err) } - + // Read tools response toolsResponse, err := readJSONResponse(10 * time.Second) if err != nil { return nil, nil, fmt.Errorf("failed to read tools response: %w", err) } - + if toolsResponse.Error != nil { return nil, nil, fmt.Errorf("tools/list failed: %s", toolsResponse.Error.Message) } - + // Parse tools from response tools, err := cd.parseToolsResponse(toolsResponse.Result) if err != nil { return nil, nil, fmt.Errorf("failed to parse tools response: %w", err) } - + fmt.Printf("Successfully discovered %d tools from %s\n", len(tools), serverAlias) - + // Step 4: Request resources list (5 second timeout) fmt.Printf("Requesting resources list from %s...\n", serverAlias) resourcesRequest := MCPRequest{ @@ -697,11 +699,11 @@ func (cd *CapabilityDiscovery) performDiscoveryWithPipes(ctx context.Context, st ID: 3, Method: "resources/list", } - + if err := encoder.Encode(resourcesRequest); err != nil { return nil, nil, fmt.Errorf("failed to send resources/list request: %w", err) } - + // Read resources response resourcesResponse, err := readJSONResponse(5 * time.Second) if err != nil { @@ -709,22 +711,22 @@ func (cd *CapabilityDiscovery) performDiscoveryWithPipes(ctx context.Context, st fmt.Printf("Resources not supported or failed to read response from %s: %v\n", serverAlias, err) return tools, []MCPResource{}, nil } - + if resourcesResponse.Error != nil { // Resources might not be supported, that's okay fmt.Printf("Resources not supported by %s: %s\n", serverAlias, resourcesResponse.Error.Message) return tools, []MCPResource{}, nil } - + // Parse resources from response resources, err := cd.parseResourcesResponse(resourcesResponse.Result) if err != nil { fmt.Printf("Failed to parse resources response from %s: %v\n", serverAlias, err) return tools, []MCPResource{}, nil } - + fmt.Printf("Successfully discovered %d resources from %s\n", len(resources), serverAlias) - + return tools, resources, nil } @@ -823,49 +825,49 @@ func (cd *CapabilityDiscovery) parseResourcesResponse(result json.RawMessage) ([ // tryHTTPDiscovery attempts to discover capabilities using HTTP transport func (cd *CapabilityDiscovery) tryHTTPDiscovery(serverAlias string, metadata MCPServerConfig) ([]MCPTool, []MCPResource, error) { fmt.Printf("Attempting HTTP discovery for %s\n", serverAlias) - + // Create a temporary manager and process manager for discovery template := NewTemplateProcessor(cd.projectPath, false) // Non-verbose for discovery processManager := NewProcessManager(cd.projectPath, template, false) - + // Start the MCP server temporarily with port assignment process, err := processManager.StartLocalMCP(metadata) if err != nil { return nil, nil, fmt.Errorf("failed to start MCP server for HTTP discovery: %w", err) } - + // Ensure we clean up the process defer func() { if err := processManager.StopProcess(process); err != nil { fmt.Printf("Warning: failed to stop HTTP discovery process for %s: %v\n", serverAlias, err) } }() - + // Wait a moment for the server to fully start time.Sleep(2 * time.Second) - + // Connect and discover capabilities via HTTP tools, resources, err := cd.connectAndDiscover(process.Config.Port) if err != nil { return nil, nil, fmt.Errorf("HTTP discovery failed: %w", err) } - + return tools, resources, nil } // connectAndDiscover connects to a running MCP server and discovers its capabilities func (cd *CapabilityDiscovery) connectAndDiscover(port int) ([]MCPTool, []MCPResource, error) { fmt.Printf("Connecting to MCP server on port %d for capability discovery\n", port) - + // Create MCP protocol client client := NewMCPProtocolClient(false) // Use non-verbose mode for discovery - + // Try to discover capabilities via HTTP tools, resources, err := client.discoverFromHTTP(port) if err != nil { return nil, nil, fmt.Errorf("failed to discover capabilities via HTTP: %w", err) } - + return tools, resources, nil } @@ -877,16 +879,16 @@ func (cd *CapabilityDiscovery) discoverFromStaticAnalysis(serverDir string, meta if metadata.RunCmd != "" { urlOrCmd = metadata.RunCmd } - + if strings.Contains(urlOrCmd, "node") || strings.Contains(urlOrCmd, "npm") || strings.Contains(urlOrCmd, "npx") { return cd.discoverNodeJSCapabilities(serverDir, metadata) } - + // For Python servers if strings.Contains(urlOrCmd, "python") || strings.Contains(urlOrCmd, "pip") || strings.Contains(urlOrCmd, "uvx") { return cd.discoverPythonCapabilities(serverDir, metadata) } - + // Default: try to parse from package.json or other metadata return cd.discoverFromMetadata(serverDir, metadata) } @@ -898,16 +900,16 @@ func (cd *CapabilityDiscovery) discoverNodeJSCapabilities(serverDir string, meta if _, err := os.Stat(packageJSONPath); err == nil { return cd.parseNodeJSPackage(packageJSONPath) } - + // Try to find main server file and parse it serverFiles := []string{ // GitHub cloned MCP servers - files are in root directory - filepath.Join(serverDir, "build", "index.js"), // Built TypeScript (most common) - filepath.Join(serverDir, "dist", "index.js"), // Alternative build dir - filepath.Join(serverDir, "src", "index.js"), // Source JS - filepath.Join(serverDir, "src", "index.ts"), // Source TypeScript - filepath.Join(serverDir, "index.js"), // Root JS - filepath.Join(serverDir, "index.ts"), // Root TypeScript + filepath.Join(serverDir, "build", "index.js"), // Built TypeScript (most common) + filepath.Join(serverDir, "dist", "index.js"), // Alternative build dir + filepath.Join(serverDir, "src", "index.js"), // Source JS + filepath.Join(serverDir, "src", "index.ts"), // Source TypeScript + filepath.Join(serverDir, "index.js"), // Root JS + filepath.Join(serverDir, "index.ts"), // Root TypeScript // Legacy server subdirectory support (keep for backward compatibility) filepath.Join(serverDir, "server", "index.js"), filepath.Join(serverDir, "server", "src", "index.js"), @@ -916,7 +918,7 @@ func (cd *CapabilityDiscovery) discoverNodeJSCapabilities(serverDir string, meta filepath.Join(serverDir, "server", "src", "index.ts"), filepath.Join(serverDir, "server", "dist", "index.ts"), } - + for _, serverFile := range serverFiles { if _, err := os.Stat(serverFile); err == nil { fmt.Printf("Debug: Found server file: %s\n", serverFile) @@ -925,7 +927,7 @@ func (cd *CapabilityDiscovery) discoverNodeJSCapabilities(serverDir string, meta fmt.Printf("Debug: Checked but not found: %s\n", serverFile) } } - + return []MCPTool{}, []MCPResource{}, fmt.Errorf("no server files found") } @@ -934,19 +936,19 @@ func (cd *CapabilityDiscovery) discoverPythonCapabilities(serverDir string, meta // For Python servers, try to find and parse the main module pythonFiles := []string{ // GitHub cloned Python MCP servers - files are in root directory - filepath.Join(serverDir, "src", "__main__.py"), // Source directory + filepath.Join(serverDir, "src", "__main__.py"), // Source directory filepath.Join(serverDir, "src", "main.py"), filepath.Join(serverDir, "src", "server.py"), - filepath.Join(serverDir, "__main__.py"), // Root directory + filepath.Join(serverDir, "__main__.py"), // Root directory filepath.Join(serverDir, "main.py"), filepath.Join(serverDir, "server.py"), - filepath.Join(serverDir, "app.py"), // Common Python entry + filepath.Join(serverDir, "app.py"), // Common Python entry // Legacy server subdirectory support filepath.Join(serverDir, "server", "__main__.py"), filepath.Join(serverDir, "server", "main.py"), filepath.Join(serverDir, "server", "server.py"), } - + for _, pythonFile := range pythonFiles { if _, err := os.Stat(pythonFile); err == nil { fmt.Printf("Debug: Found Python server file: %s\n", pythonFile) @@ -955,7 +957,7 @@ func (cd *CapabilityDiscovery) discoverPythonCapabilities(serverDir string, meta fmt.Printf("Debug: Checked but not found: %s\n", pythonFile) } } - + return []MCPTool{}, []MCPResource{}, fmt.Errorf("no Python server files found") } @@ -967,13 +969,13 @@ func (cd *CapabilityDiscovery) discoverFromMetadata(serverDir string, metadata M filepath.Join(serverDir, "capabilities.json"), filepath.Join(serverDir, "server", "manifest.json"), } - + for _, manifestFile := range manifestFiles { if _, err := os.Stat(manifestFile); err == nil { return cd.parseManifestFile(manifestFile) } } - + return []MCPTool{}, []MCPResource{}, nil } @@ -983,17 +985,17 @@ func (cd *CapabilityDiscovery) parseNodeJSPackage(packagePath string) ([]MCPTool if err != nil { return nil, nil, err } - + var pkg map[string]interface{} if err := json.Unmarshal(data, &pkg); err != nil { return nil, nil, err } - + // Look for MCP-specific metadata in package.json if mcpData, ok := pkg["mcp"].(map[string]interface{}); ok { return cd.parseMCPMetadata(mcpData) } - + return []MCPTool{}, []MCPResource{}, nil } @@ -1003,15 +1005,15 @@ func (cd *CapabilityDiscovery) parseNodeJSServerFile(serverFile string) ([]MCPTo if err != nil { return nil, nil, err } - + content := string(data) tools := []MCPTool{} resources := []MCPResource{} - + fmt.Printf("Debug: Parsing server file: %s\n", serverFile) fmt.Printf("Debug: File contains ListToolsRequestSchema: %v\n", strings.Contains(content, "ListToolsRequestSchema")) fmt.Printf("Debug: File contains 'tools: [': %v\n", strings.Contains(content, "tools: [")) - + // Look for tool definitions in the tools array within ListToolsRequestSchema handler if strings.Contains(content, "ListToolsRequestSchema") { tools = cd.extractToolsFromContent(content) @@ -1020,13 +1022,13 @@ func (cd *CapabilityDiscovery) parseNodeJSServerFile(serverFile string) ([]MCPTo fmt.Printf("Debug: Tool %d: %s - %s\n", i+1, tool.Name, tool.Description) } } - + // Look for resource definitions in the resources array within ListResourcesRequestSchema handler if strings.Contains(content, "ListResourcesRequestSchema") { resources = cd.extractResourcesFromContent(content) fmt.Printf("Debug: Extracted %d resources\n", len(resources)) } - + return tools, resources, nil } @@ -1036,16 +1038,16 @@ func (cd *CapabilityDiscovery) parsePythonServerFile(pythonFile string) ([]MCPTo if err != nil { return nil, nil, err } - + content := string(data) tools := []MCPTool{} resources := []MCPResource{} - + // Simple pattern matching for Python MCP patterns lines := strings.Split(content, "\n") for _, line := range lines { line = strings.TrimSpace(line) - + // Look for @server.call_tool decorators or similar patterns if strings.Contains(line, "@server.call_tool") || strings.Contains(line, "def ") && strings.Contains(line, "_tool") { if toolName := cd.extractToolNameFromPython(line); toolName != "" { @@ -1055,7 +1057,7 @@ func (cd *CapabilityDiscovery) parsePythonServerFile(pythonFile string) ([]MCPTo }) } } - + // Look for resource handlers if strings.Contains(line, "@server.list_resources") || strings.Contains(line, "def ") && strings.Contains(line, "_resource") { if resourceName := cd.extractResourceNameFromPython(line); resourceName != "" { @@ -1066,7 +1068,7 @@ func (cd *CapabilityDiscovery) parsePythonServerFile(pythonFile string) ([]MCPTo } } } - + return tools, resources, nil } @@ -1076,16 +1078,16 @@ func (cd *CapabilityDiscovery) parseManifestFile(manifestFile string) ([]MCPTool if err != nil { return nil, nil, err } - + var manifest struct { Tools []MCPTool `json:"tools"` Resources []MCPResource `json:"resources"` } - + if err := json.Unmarshal(data, &manifest); err != nil { return nil, nil, err } - + return manifest.Tools, manifest.Resources, nil } @@ -1093,7 +1095,7 @@ func (cd *CapabilityDiscovery) parseManifestFile(manifestFile string) ([]MCPTool func (cd *CapabilityDiscovery) parseMCPMetadata(mcpData map[string]interface{}) ([]MCPTool, []MCPResource, error) { tools := []MCPTool{} resources := []MCPResource{} - + if toolsData, ok := mcpData["tools"].([]interface{}); ok { for _, toolData := range toolsData { if toolMap, ok := toolData.(map[string]interface{}); ok { @@ -1108,7 +1110,7 @@ func (cd *CapabilityDiscovery) parseMCPMetadata(mcpData map[string]interface{}) } } } - + if resourcesData, ok := mcpData["resources"].([]interface{}); ok { for _, resourceData := range resourcesData { if resourceMap, ok := resourceData.(map[string]interface{}); ok { @@ -1123,11 +1125,14 @@ func (cd *CapabilityDiscovery) parseMCPMetadata(mcpData map[string]interface{}) } } } - + return tools, resources, nil } // Helper functions for extracting names from code patterns +// +//nolint:unused // Reserved for enhanced static analysis +//nolint:unused // reserved for future JS analysis fallback improvements func (cd *CapabilityDiscovery) extractToolNameFromJS(line string) string { // Simple extraction - could be enhanced if strings.Contains(line, "CallToolRequestSchema") { @@ -1137,6 +1142,8 @@ func (cd *CapabilityDiscovery) extractToolNameFromJS(line string) string { return "" } +//nolint:unused // Reserved for enhanced static analysis +//nolint:unused // reserved for future JS analysis fallback improvements func (cd *CapabilityDiscovery) extractResourceNameFromJS(line string) string { // Simple extraction - could be enhanced if strings.Contains(line, "ListResourcesRequestSchema") { @@ -1172,18 +1179,18 @@ func (cd *CapabilityDiscovery) extractResourceNameFromPython(line string) string // extractToolsFromContent extracts tool definitions from JavaScript/TypeScript content func (cd *CapabilityDiscovery) extractToolsFromContent(content string) []MCPTool { tools := []MCPTool{} - + // Look for tools array in the ListToolsRequestSchema handler toolsStart := strings.Index(content, "tools: [") if toolsStart == -1 { return tools } - + // Find the end of the tools array remaining := content[toolsStart:] bracketCount := 0 toolsEnd := -1 - + for i, char := range remaining { if char == '[' { bracketCount++ @@ -1195,41 +1202,41 @@ func (cd *CapabilityDiscovery) extractToolsFromContent(content string) []MCPTool } } } - + if toolsEnd == -1 { return tools } - + toolsSection := remaining[:toolsEnd+1] - + // Extract individual tool objects toolObjects := cd.extractObjectsFromArray(toolsSection) - + for _, toolObj := range toolObjects { tool := cd.parseToolObject(toolObj) if tool.Name != "" { tools = append(tools, tool) } } - + return tools } // extractResourcesFromContent extracts resource definitions from JavaScript/TypeScript content func (cd *CapabilityDiscovery) extractResourcesFromContent(content string) []MCPResource { resources := []MCPResource{} - + // Look for resources array in the ListResourcesRequestSchema handler resourcesStart := strings.Index(content, "resources: [") if resourcesStart == -1 { return resources } - + // Find the end of the resources array remaining := content[resourcesStart:] bracketCount := 0 resourcesEnd := -1 - + for i, char := range remaining { if char == '[' { bracketCount++ @@ -1241,34 +1248,34 @@ func (cd *CapabilityDiscovery) extractResourcesFromContent(content string) []MCP } } } - + if resourcesEnd == -1 { return resources } - + resourcesSection := remaining[:resourcesEnd+1] - + // Extract individual resource objects resourceObjects := cd.extractObjectsFromArray(resourcesSection) - + for _, resourceObj := range resourceObjects { resource := cd.parseResourceObject(resourceObj) if resource.Name != "" { resources = append(resources, resource) } } - + return resources } // extractObjectsFromArray extracts individual objects from a JavaScript array string func (cd *CapabilityDiscovery) extractObjectsFromArray(arrayContent string) []string { objects := []string{} - + // Simple extraction - look for objects between { and } braceCount := 0 objectStart := -1 - + for i, char := range arrayContent { if char == '{' { if braceCount == 0 { @@ -1283,41 +1290,41 @@ func (cd *CapabilityDiscovery) extractObjectsFromArray(arrayContent string) []st } } } - + return objects } // parseToolObject parses a JavaScript tool object string to extract tool information func (cd *CapabilityDiscovery) parseToolObject(objectStr string) MCPTool { tool := MCPTool{} - + // Extract name if nameMatch := cd.extractStringValue(objectStr, "name"); nameMatch != "" { tool.Name = nameMatch } - + // Extract description if descMatch := cd.extractStringValue(objectStr, "description"); descMatch != "" { tool.Description = descMatch } - + return tool } // parseResourceObject parses a JavaScript resource object string to extract resource information func (cd *CapabilityDiscovery) parseResourceObject(objectStr string) MCPResource { resource := MCPResource{} - + // Extract name if nameMatch := cd.extractStringValue(objectStr, "name"); nameMatch != "" { resource.Name = nameMatch } - + // Extract description if descMatch := cd.extractStringValue(objectStr, "description"); descMatch != "" { resource.Description = descMatch } - + return resource } @@ -1330,13 +1337,13 @@ func (cd *CapabilityDiscovery) extractStringValue(objectStr, key string) string key + `: '`, key + `:'`, } - + for _, pattern := range patterns { startIdx := strings.Index(objectStr, pattern) if startIdx != -1 { valueStart := startIdx + len(pattern) quote := objectStr[valueStart-1] - + // Find the closing quote for i := valueStart; i < len(objectStr); i++ { if objectStr[i] == quote && (i == 0 || objectStr[i-1] != '\\') { @@ -1345,38 +1352,38 @@ func (cd *CapabilityDiscovery) extractStringValue(objectStr, key string) string } } } - + return "" } // updateConfigWithTransport updates the server config file with detected transport type func (cd *CapabilityDiscovery) updateConfigWithTransport(serverAlias, transport string) error { configPath := filepath.Join(cd.projectPath, "packages", "mcp", serverAlias, "config.json") - + // Read existing config configData, err := os.ReadFile(configPath) if err != nil { return fmt.Errorf("failed to read config: %w", err) } - + // Parse as raw JSON to preserve any extra fields var configMap map[string]interface{} if err := json.Unmarshal(configData, &configMap); err != nil { return fmt.Errorf("failed to parse config: %w", err) } - + // Update transport field configMap["transport"] = transport - + // Save updated config with proper formatting updatedData, err := json.MarshalIndent(configMap, "", " ") if err != nil { return fmt.Errorf("failed to marshal config: %w", err) } - + if err := os.WriteFile(configPath, updatedData, 0644); err != nil { return fmt.Errorf("failed to write config: %w", err) } - + return nil } diff --git a/control-plane/internal/mcp/errors.go b/control-plane/internal/mcp/errors.go index e65b7294..8404552f 100644 --- a/control-plane/internal/mcp/errors.go +++ b/control-plane/internal/mcp/errors.go @@ -21,41 +21,41 @@ type MCPOperationError struct { type MCPOperationErrorType string const ( - OpErrorTypeInstallation MCPOperationErrorType = "installation" - OpErrorTypeBuild MCPOperationErrorType = "build" - OpErrorTypeStartup MCPOperationErrorType = "startup" + OpErrorTypeInstallation MCPOperationErrorType = "installation" + OpErrorTypeBuild MCPOperationErrorType = "build" + OpErrorTypeStartup MCPOperationErrorType = "startup" OpErrorTypeCapabilityDiscovery MCPOperationErrorType = "capability_discovery" - OpErrorTypeValidation MCPOperationErrorType = "validation" - OpErrorTypeConfiguration MCPOperationErrorType = "configuration" - OpErrorTypeTemplate MCPOperationErrorType = "template" - OpErrorTypeProtocol MCPOperationErrorType = "protocol" - OpErrorTypeEnvironment MCPOperationErrorType = "environment" + OpErrorTypeValidation MCPOperationErrorType = "validation" + OpErrorTypeConfiguration MCPOperationErrorType = "configuration" + OpErrorTypeTemplate MCPOperationErrorType = "template" + OpErrorTypeProtocol MCPOperationErrorType = "protocol" + OpErrorTypeEnvironment MCPOperationErrorType = "environment" ) // Error implements the error interface func (e *MCPOperationError) Error() string { var parts []string - + if e.ServerID != "" { parts = append(parts, fmt.Sprintf("server '%s'", e.ServerID)) } - + if e.Operation != "" { parts = append(parts, fmt.Sprintf("operation '%s'", e.Operation)) } - + parts = append(parts, string(e.Type), "failed") - + if e.Message != "" { parts = append(parts, "-", e.Message) } - + result := strings.Join(parts, " ") - + if e.Cause != nil { result += fmt.Sprintf(": %v", e.Cause) } - + return result } @@ -67,26 +67,26 @@ func (e *MCPOperationError) Unwrap() error { // DetailedError returns a detailed error message including stdout/stderr and context func (e *MCPOperationError) DetailedError() string { var details []string - + details = append(details, e.Error()) - + if len(e.Context) > 0 { details = append(details, "\nContext:") for key, value := range e.Context { details = append(details, fmt.Sprintf(" %s: %s", key, value)) } } - + if e.Stdout != "" { details = append(details, "\nStdout:") details = append(details, e.Stdout) } - + if e.Stderr != "" { details = append(details, "\nStderr:") details = append(details, e.Stderr) } - + return strings.Join(details, "\n") } @@ -101,7 +101,7 @@ func (e *MCPOperationError) GetSuggestion() string { return "Set NODE_ENV environment variable (e.g., --env NODE_ENV=production)" } return "Check that all required environment variables are set" - + case OpErrorTypeStartup: if strings.Contains(e.Message, "permission denied") { return "Check file permissions and ensure the executable is accessible" @@ -110,7 +110,7 @@ func (e *MCPOperationError) GetSuggestion() string { return "Ensure the required runtime (node, python, etc.) is installed and in PATH" } return "Check server configuration and ensure all dependencies are installed" - + case OpErrorTypeInstallation: if strings.Contains(e.Message, "npm install") { return "Try running 'npm install' manually in the server directory" @@ -119,16 +119,16 @@ func (e *MCPOperationError) GetSuggestion() string { return "Try running 'pip install' manually in the server directory" } return "Check network connectivity and package availability" - + case OpErrorTypeBuild: if strings.Contains(e.Message, "typescript") || strings.Contains(e.Message, "tsc") { return "Ensure TypeScript is installed and tsconfig.json is valid" } return "Check build configuration and ensure all build dependencies are available" - + case OpErrorTypeCapabilityDiscovery: return "Server may require specific environment variables or configuration to start properly" - + default: return "Check the detailed error output above for more information" } @@ -181,7 +181,7 @@ func CommandExecutionError(operation, serverID, command string, cause error, std } else if strings.Contains(operation, "start") { errorType = OpErrorTypeStartup } - + return NewMCPOperationErrorWithCause(errorType, operation, serverID, fmt.Sprintf("command failed: %s", command), cause). WithContext("command", command). WithOutput(stdout, stderr) @@ -234,7 +234,7 @@ func (f *ErrorFormatter) Format(err error) string { if !ok { return err.Error() } - + if f.Verbose { result := mcpErr.DetailedError() suggestion := mcpErr.GetSuggestion() @@ -243,7 +243,7 @@ func (f *ErrorFormatter) Format(err error) string { } return result } - + result := mcpErr.Error() suggestion := mcpErr.GetSuggestion() if suggestion != "" { diff --git a/control-plane/internal/mcp/interfaces.go b/control-plane/internal/mcp/interfaces.go index c7d3aa6d..0cb4ea6f 100644 --- a/control-plane/internal/mcp/interfaces.go +++ b/control-plane/internal/mcp/interfaces.go @@ -11,51 +11,51 @@ import ( // MCPServerConfig represents the simplified MCP configuration type MCPServerConfig struct { // Core identification - Alias string `yaml:"alias" json:"alias"` - Description string `yaml:"description,omitempty" json:"description,omitempty"` - + Alias string `yaml:"alias" json:"alias"` + Description string `yaml:"description,omitempty" json:"description,omitempty"` + // Connection (mutually exclusive) - URL string `yaml:"url,omitempty" json:"url,omitempty"` // For remote MCPs - RunCmd string `yaml:"run,omitempty" json:"run,omitempty"` // For local MCPs - + URL string `yaml:"url,omitempty" json:"url,omitempty"` // For remote MCPs + RunCmd string `yaml:"run,omitempty" json:"run,omitempty"` // For local MCPs + // Transport type (stdio or http) - Transport string `yaml:"transport,omitempty" json:"transport,omitempty"` - + Transport string `yaml:"transport,omitempty" json:"transport,omitempty"` + // Setup (optional - runs once during add) - SetupCmds []string `yaml:"setup,omitempty" json:"setup,omitempty"` - + SetupCmds []string `yaml:"setup,omitempty" json:"setup,omitempty"` + // Runtime configuration - WorkingDir string `yaml:"working_dir,omitempty" json:"working_dir,omitempty"` - Env map[string]string `yaml:"environment,omitempty" json:"environment,omitempty"` - Timeout time.Duration `yaml:"timeout,omitempty" json:"timeout,omitempty"` - + WorkingDir string `yaml:"working_dir,omitempty" json:"working_dir,omitempty"` + Env map[string]string `yaml:"environment,omitempty" json:"environment,omitempty"` + Timeout time.Duration `yaml:"timeout,omitempty" json:"timeout,omitempty"` + // Health & Monitoring - HealthCheck string `yaml:"health_check,omitempty" json:"health_check,omitempty"` - Port int `yaml:"port,omitempty" json:"port,omitempty"` // Auto-assigned if 0 - + HealthCheck string `yaml:"health_check,omitempty" json:"health_check,omitempty"` + Port int `yaml:"port,omitempty" json:"port,omitempty"` // Auto-assigned if 0 + // Metadata - Version string `yaml:"version,omitempty" json:"version,omitempty"` - Tags []string `yaml:"tags,omitempty" json:"tags,omitempty"` - + Version string `yaml:"version,omitempty" json:"version,omitempty"` + Tags []string `yaml:"tags,omitempty" json:"tags,omitempty"` + // Installation options - Force bool `yaml:"-" json:"-"` // Force reinstall, not persisted - + Force bool `yaml:"-" json:"-"` // Force reinstall, not persisted + // Internal runtime fields - PID int `yaml:"-" json:"pid,omitempty"` - Status string `yaml:"-" json:"status,omitempty"` - StartedAt *time.Time `yaml:"-" json:"started_at,omitempty"` + PID int `yaml:"-" json:"pid,omitempty"` + Status string `yaml:"-" json:"status,omitempty"` + StartedAt *time.Time `yaml:"-" json:"started_at,omitempty"` } // MCPProcess represents a running MCP server type MCPProcess struct { - Config MCPServerConfig `json:"config"` - Cmd *exec.Cmd `json:"-"` - Stdin io.WriteCloser `json:"-"` - Stdout io.ReadCloser `json:"-"` - Stderr io.ReadCloser `json:"-"` - Context context.Context `json:"-"` - Cancel context.CancelFunc `json:"-"` - LogFile string `json:"log_file"` + Config MCPServerConfig `json:"config"` + Cmd *exec.Cmd `json:"-"` + Stdin io.WriteCloser `json:"-"` + Stdout io.ReadCloser `json:"-"` + Stderr io.ReadCloser `json:"-"` + Context context.Context `json:"-"` + Cancel context.CancelFunc `json:"-"` + LogFile string `json:"log_file"` } // MCPServerStatus represents the status of an MCP server diff --git a/control-plane/internal/mcp/manager.go b/control-plane/internal/mcp/manager.go index c1cf479f..d8ee0ee0 100644 --- a/control-plane/internal/mcp/manager.go +++ b/control-plane/internal/mcp/manager.go @@ -8,7 +8,7 @@ import ( "path/filepath" "time" - "github.com/your-org/haxen/control-plane/internal/config" + "github.com/Agent-Field/agentfield/control-plane/internal/config" "gopkg.in/yaml.v3" ) @@ -39,11 +39,11 @@ func (m *MCPManager) Add(config MCPServerConfig) error { if config.Alias == "" { return fmt.Errorf("alias is required") } - + if config.URL == "" && config.RunCmd == "" { return fmt.Errorf("either URL or run command is required") } - + if config.URL != "" && config.RunCmd != "" { return fmt.Errorf("URL and run command are mutually exclusive") } @@ -59,7 +59,7 @@ func (m *MCPManager) Add(config MCPServerConfig) error { // Create server directory serverDir := filepath.Join(m.projectDir, "packages", "mcp", config.Alias) - + // Handle force flag - remove existing directory if it exists if config.Force { if _, err := os.Stat(serverDir); err == nil { @@ -71,7 +71,7 @@ func (m *MCPManager) Add(config MCPServerConfig) error { } } } - + if err := os.MkdirAll(serverDir, 0755); err != nil { return fmt.Errorf("failed to create server directory: %w", err) } @@ -89,9 +89,9 @@ func (m *MCPManager) Add(config MCPServerConfig) error { return fmt.Errorf("failed to save configuration: %w", err) } - // Update haxen.yaml - if err := m.updateHaxenYAML(config); err != nil { - return fmt.Errorf("failed to update haxen.yaml: %w", err) + // Update agentfield.yaml + if err := m.updateAgentFieldYAML(config); err != nil { + return fmt.Errorf("failed to update agentfield.yaml: %w", err) } // Attempt to start and discover capabilities @@ -111,7 +111,7 @@ func (m *MCPManager) Add(config MCPServerConfig) error { } else if capability != nil && m.verbose { fmt.Printf("Updated config with transport type: %s\n", capability.Transport) } - + // Stop the server after discovery (it will be started again when needed) if err := m.Stop(config.Alias); err != nil { if m.verbose { @@ -170,7 +170,7 @@ func (m *MCPManager) Start(alias string) (*MCPProcess, error) { config.PID = process.Config.PID config.Status = string(StatusRunning) config.StartedAt = &now - + if err := m.saveConfig(filepath.Join(m.projectDir, "packages", "mcp", alias, "config.json"), *config); err != nil { if m.verbose { fmt.Printf("Warning: failed to update configuration: %v\n", err) @@ -208,7 +208,11 @@ func (m *MCPManager) Stop(alias string) error { config.PID = 0 config.Status = string(StatusStopped) config.StartedAt = nil - m.saveConfig(filepath.Join(m.projectDir, "packages", "mcp", alias, "config.json"), *config) + if err := m.saveConfig(filepath.Join(m.projectDir, "packages", "mcp", alias, "config.json"), *config); err != nil { + return fmt.Errorf("failed to persist MCP server config: %w", err) + } + } else if m.verbose { + fmt.Printf("WARN: Unable to load MCP config for %s during stop: %v\n", alias, err) } if m.verbose { @@ -243,9 +247,9 @@ func (m *MCPManager) Remove(alias string) error { return fmt.Errorf("failed to remove server directory: %w", err) } - // Update haxen.yaml - if err := m.removeMCPFromHaxenYAML(alias); err != nil { - return fmt.Errorf("failed to update haxen.yaml: %w", err) + // Update agentfield.yaml + if err := m.removeMCPFromAgentFieldYAML(alias); err != nil { + return fmt.Errorf("failed to update agentfield.yaml: %w", err) } if m.verbose { @@ -323,7 +327,7 @@ func (m *MCPManager) Restart(alias string) error { // Logs returns a reader for the server logs func (m *MCPManager) Logs(alias string, follow bool, lines int) (io.ReadCloser, error) { logFile := filepath.Join(m.projectDir, "packages", "mcp", alias, fmt.Sprintf("%s.log", alias)) - + if _, err := os.Stat(logFile); os.IsNotExist(err) { return nil, fmt.Errorf("log file not found for server %s", alias) } @@ -399,20 +403,20 @@ func (m *MCPManager) getServerInfo(alias string) (*MCPServerInfo, error) { return info, nil } -// updateHaxenYAML updates the haxen.yaml file with the new MCP server -func (m *MCPManager) updateHaxenYAML(config MCPServerConfig) error { - haxenYAMLPath := filepath.Join(m.projectDir, "haxen.yaml") +// updateAgentFieldYAML updates the agentfield.yaml file with the new MCP server +func (m *MCPManager) updateAgentFieldYAML(config MCPServerConfig) error { + agentfieldYAMLPath := filepath.Join(m.projectDir, "agentfield.yaml") - // Read existing haxen.yaml - data, err := os.ReadFile(haxenYAMLPath) + // Read existing agentfield.yaml + data, err := os.ReadFile(agentfieldYAMLPath) if err != nil { - return fmt.Errorf("failed to read haxen.yaml: %w", err) + return fmt.Errorf("failed to read agentfield.yaml: %w", err) } // Parse YAML var yamlConfig map[string]interface{} if err := yaml.Unmarshal(data, &yamlConfig); err != nil { - return fmt.Errorf("failed to parse haxen.yaml: %w", err) + return fmt.Errorf("failed to parse agentfield.yaml: %w", err) } // Ensure dependencies section exists @@ -474,30 +478,30 @@ func (m *MCPManager) updateHaxenYAML(config MCPServerConfig) error { // Write back to file updatedData, err := yaml.Marshal(yamlConfig) if err != nil { - return fmt.Errorf("failed to marshal haxen.yaml: %w", err) + return fmt.Errorf("failed to marshal agentfield.yaml: %w", err) } - if err := os.WriteFile(haxenYAMLPath, updatedData, 0644); err != nil { - return fmt.Errorf("failed to write haxen.yaml: %w", err) + if err := os.WriteFile(agentfieldYAMLPath, updatedData, 0644); err != nil { + return fmt.Errorf("failed to write agentfield.yaml: %w", err) } return nil } -// removeMCPFromHaxenYAML removes an MCP server from haxen.yaml -func (m *MCPManager) removeMCPFromHaxenYAML(alias string) error { - haxenYAMLPath := filepath.Join(m.projectDir, "haxen.yaml") +// removeMCPFromAgentFieldYAML removes an MCP server from agentfield.yaml +func (m *MCPManager) removeMCPFromAgentFieldYAML(alias string) error { + agentfieldYAMLPath := filepath.Join(m.projectDir, "agentfield.yaml") - // Read existing haxen.yaml - data, err := os.ReadFile(haxenYAMLPath) + // Read existing agentfield.yaml + data, err := os.ReadFile(agentfieldYAMLPath) if err != nil { - return fmt.Errorf("failed to read haxen.yaml: %w", err) + return fmt.Errorf("failed to read agentfield.yaml: %w", err) } // Parse YAML var config map[string]interface{} if err := yaml.Unmarshal(data, &config); err != nil { - return fmt.Errorf("failed to parse haxen.yaml: %w", err) + return fmt.Errorf("failed to parse agentfield.yaml: %w", err) } // Navigate to mcp_servers section @@ -510,28 +514,30 @@ func (m *MCPManager) removeMCPFromHaxenYAML(alias string) error { // Write back to file updatedData, err := yaml.Marshal(config) if err != nil { - return fmt.Errorf("failed to marshal haxen.yaml: %w", err) + return fmt.Errorf("failed to marshal agentfield.yaml: %w", err) } - if err := os.WriteFile(haxenYAMLPath, updatedData, 0644); err != nil { - return fmt.Errorf("failed to write haxen.yaml: %w", err) + if err := os.WriteFile(agentfieldYAMLPath, updatedData, 0644); err != nil { + return fmt.Errorf("failed to write agentfield.yaml: %w", err) } return nil } -// loadMCPConfigsFromYAML loads MCP configurations from haxen.yaml +// loadMCPConfigsFromYAML loads MCP configurations from agentfield.yaml +// +//nolint:unused // Reserved for future YAML config support func (m *MCPManager) loadMCPConfigsFromYAML() (map[string]MCPServerConfig, error) { - haxenYAMLPath := filepath.Join(m.projectDir, "haxen.yaml") + agentfieldYAMLPath := filepath.Join(m.projectDir, "agentfield.yaml") - data, err := os.ReadFile(haxenYAMLPath) + data, err := os.ReadFile(agentfieldYAMLPath) if err != nil { - return nil, fmt.Errorf("failed to read haxen.yaml: %w", err) + return nil, fmt.Errorf("failed to read agentfield.yaml: %w", err) } var config map[string]interface{} if err := yaml.Unmarshal(data, &config); err != nil { - return nil, fmt.Errorf("failed to parse haxen.yaml: %w", err) + return nil, fmt.Errorf("failed to parse agentfield.yaml: %w", err) } configs := make(map[string]MCPServerConfig) @@ -635,9 +641,9 @@ func (m *MCPManager) DiscoverCapabilities(alias string) (*MCPManifest, error) { } if m.verbose { - fmt.Printf("Successfully discovered capabilities for %s: %d tools, %d resources\n", + fmt.Printf("Successfully discovered capabilities for %s: %d tools, %d resources\n", alias, len(manifest.Tools), len(manifest.Resources)) - fmt.Printf("Note: MCP skills will be auto-registered by Haxen SDK when agent starts\n") + fmt.Printf("Note: MCP skills will be auto-registered by AgentField SDK when agent starts\n") } return manifest, nil @@ -692,6 +698,8 @@ func (m *MCPManager) discoverFromLocalProcess(config MCPServerConfig) (*MCPManif } // connectAndDiscover connects to an MCP server endpoint and discovers capabilities +// +//nolint:unused // Reserved for future HTTP-based MCP discovery func (m *MCPManager) connectAndDiscover(endpoint string) (*MCPManifest, error) { if m.verbose { fmt.Printf("Connecting to MCP server at: %s\n", endpoint) @@ -715,6 +723,8 @@ func (m *MCPManager) connectAndDiscover(endpoint string) (*MCPManifest, error) { } // parseCapabilityResponse parses a raw capability response into an MCPManifest +// +//nolint:unused // Reserved for future HTTP-based MCP discovery func (m *MCPManager) parseCapabilityResponse(response []byte) (*MCPManifest, error) { var manifest MCPManifest if err := json.Unmarshal(response, &manifest); err != nil { @@ -757,7 +767,7 @@ func (m *MCPManager) GenerateSkills(alias string, manifest *MCPManifest) error { // Use the new SkillGenerator instead of the old template-based approach generator := NewSkillGenerator(m.projectDir, m.verbose) - + result, err := generator.GenerateSkillsForServer(alias) if err != nil { return fmt.Errorf("failed to generate skills: %w", err) diff --git a/control-plane/internal/mcp/process.go b/control-plane/internal/mcp/process.go index 538c2d9b..24e2065d 100644 --- a/control-plane/internal/mcp/process.go +++ b/control-plane/internal/mcp/process.go @@ -40,7 +40,7 @@ func (pm *ProcessManager) StartLocalMCP(config MCPServerConfig) (*MCPProcess, er // Create template variables vars := pm.template.CreateTemplateVars(config, port) - + // Process run command processedCmd, err := pm.template.ProcessCommand(config.RunCmd, vars) if err != nil { @@ -77,7 +77,7 @@ func (pm *ProcessManager) StartLocalMCP(config MCPServerConfig) (*MCPProcess, er cancel() return nil, fmt.Errorf("failed to process environment variables: %w", err) } - + for key, value := range processedEnv { cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", key, value)) } @@ -160,7 +160,7 @@ func (pm *ProcessManager) MonitorProcess(process *MCPProcess, onExit func(alias // Wait for process to finish err := process.Cmd.Wait() - + // Update status if err != nil { process.Config.Status = string(StatusError) @@ -187,10 +187,10 @@ func (pm *ProcessManager) ExecuteSetupCommands(config MCPServerConfig) error { } serverDir := filepath.Join(pm.projectDir, "packages", "mcp", config.Alias) - + // Create template variables (port 0 for setup) vars := pm.template.CreateTemplateVars(config, 0) - + // Process setup commands processedCmds, err := pm.template.ProcessCommands(config.SetupCmds, vars) if err != nil { @@ -217,14 +217,14 @@ func (pm *ProcessManager) ExecuteSetupCommands(config MCPServerConfig) error { execCmd := exec.Command("sh", "-c", cmd) execCmd.Dir = workingDir - + // Set environment variables if len(config.Env) > 0 { processedEnv, err := pm.template.ProcessEnvironment(config.Env, vars) if err != nil { return fmt.Errorf("failed to process environment variables: %w", err) } - + for key, value := range processedEnv { execCmd.Env = append(execCmd.Env, fmt.Sprintf("%s=%s", key, value)) } @@ -247,7 +247,7 @@ func (pm *ProcessManager) ExecuteSetupCommands(config MCPServerConfig) error { func (pm *ProcessManager) ExecuteRunCommand(config MCPServerConfig, port int) (*exec.Cmd, error) { // Create template variables vars := pm.template.CreateTemplateVars(config, port) - + // Process run command processedCmd, err := pm.template.ProcessCommand(config.RunCmd, vars) if err != nil { @@ -274,7 +274,7 @@ func (pm *ProcessManager) ExecuteRunCommand(config MCPServerConfig, port int) (* if err != nil { return nil, fmt.Errorf("failed to process environment variables: %w", err) } - + for key, value := range processedEnv { cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", key, value)) } @@ -313,7 +313,7 @@ func (pm *ProcessManager) HealthCheck(config MCPServerConfig, port int) error { // Create template variables vars := pm.template.CreateTemplateVars(config, port) - + // Process health check command processedCmd, err := pm.template.ProcessCommand(config.HealthCheck, vars) if err != nil { @@ -324,15 +324,13 @@ func (pm *ProcessManager) HealthCheck(config MCPServerConfig, port int) error { fmt.Printf("Executing health check: %s\n", processedCmd) } - // Execute health check - cmd := exec.Command("sh", "-c", processedCmd) - // Set timeout for health check ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - - cmd = exec.CommandContext(ctx, "sh", "-c", processedCmd) - + + // Execute health check with timeout + cmd := exec.CommandContext(ctx, "sh", "-c", processedCmd) + output, err := cmd.CombinedOutput() if err != nil { return fmt.Errorf("health check failed: %s\nOutput: %s", err, string(output)) @@ -402,7 +400,7 @@ func (pm *ProcessManager) IsProcessRunning(process *MCPProcess) bool { // GetProcessLogs returns a reader for the process logs func (pm *ProcessManager) GetProcessLogs(alias string, follow bool, lines int) (io.ReadCloser, error) { logFile := filepath.Join(pm.projectDir, "packages", "mcp", alias, fmt.Sprintf("%s.log", alias)) - + if _, err := os.Stat(logFile); os.IsNotExist(err) { return nil, fmt.Errorf("log file not found for server %s", alias) } diff --git a/control-plane/internal/mcp/protocol_client.go b/control-plane/internal/mcp/protocol_client.go index c49f0080..179ba8cd 100644 --- a/control-plane/internal/mcp/protocol_client.go +++ b/control-plane/internal/mcp/protocol_client.go @@ -23,7 +23,6 @@ func NewMCPProtocolClient(verbose bool) *MCPProtocolClient { } } - // DiscoverCapabilitiesFromProcess discovers capabilities from a running MCP process func (client *MCPProtocolClient) DiscoverCapabilitiesFromProcess(process *MCPProcess) ([]MCPTool, []MCPResource, error) { if client.verbose { @@ -164,7 +163,7 @@ func (client *MCPProtocolClient) sendInitialize(stdin io.Writer) error { "resources": map[string]interface{}{}, }, "clientInfo": map[string]interface{}{ - "name": "haxen-mcp-client", + "name": "agentfield-mcp-client", "version": "1.0.0", }, }, @@ -177,7 +176,7 @@ func (client *MCPProtocolClient) sendInitialize(stdin io.Writer) error { func (client *MCPProtocolClient) waitForInitializeResponse(stdout io.Reader) error { scanner := bufio.NewScanner(stdout) scanner.Scan() - + var response MCPResponse if err := json.Unmarshal(scanner.Bytes(), &response); err != nil { return fmt.Errorf("failed to parse initialize response: %w", err) @@ -270,12 +269,7 @@ func (client *MCPProtocolClient) requestResourcesList(stdin io.Writer, stdout io // Convert to our internal format resources := make([]MCPResource, len(resourcesResponse.Resources)) for i, resource := range resourcesResponse.Resources { - resources[i] = MCPResource{ - URI: resource.URI, - Name: resource.Name, - Description: resource.Description, - MimeType: resource.MimeType, - } + resources[i] = MCPResource(resource) } return resources, nil @@ -333,12 +327,7 @@ func (client *MCPProtocolClient) sendHTTPResourcesRequest(httpClient *http.Clien // Convert to our internal format resources := make([]MCPResource, len(resourcesResponse.Resources)) for i, resource := range resourcesResponse.Resources { - resources[i] = MCPResource{ - URI: resource.URI, - Name: resource.Name, - Description: resource.Description, - MimeType: resource.MimeType, - } + resources[i] = MCPResource(resource) } return resources, nil @@ -428,7 +417,7 @@ func (client *MCPProtocolClient) StartMCPServerForDiscovery(config MCPServerConf if err != nil { return nil, fmt.Errorf("failed to process environment variables: %w", err) } - + for key, value := range processedEnv { cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", key, value)) } diff --git a/control-plane/internal/mcp/skill_generator.go b/control-plane/internal/mcp/skill_generator.go index 2193c253..5584bd03 100644 --- a/control-plane/internal/mcp/skill_generator.go +++ b/control-plane/internal/mcp/skill_generator.go @@ -8,7 +8,7 @@ import ( "text/template" "time" - "github.com/your-org/haxen/control-plane/internal/config" + "github.com/Agent-Field/agentfield/control-plane/internal/config" ) // SkillGenerator handles the generation of Python skill files from MCP tools @@ -41,15 +41,15 @@ func (sg *SkillGenerator) GenerateSkillsForServer(serverAlias string) (*SkillGen // Discover server capabilities using the new simplified architecture // Load config for capability discovery - cfg, err := config.LoadConfig(filepath.Join(sg.projectDir, "haxen.yaml")) + cfg, err := config.LoadConfig(filepath.Join(sg.projectDir, "agentfield.yaml")) if err != nil { // Fallback to current directory - cfg, err = config.LoadConfig("haxen.yaml") + cfg, err = config.LoadConfig("agentfield.yaml") if err != nil { - return nil, fmt.Errorf("failed to load haxen configuration: %w", err) + return nil, fmt.Errorf("failed to load af configuration: %w", err) } } - + discovery := NewCapabilityDiscovery(cfg, sg.projectDir) capability, err := discovery.GetServerCapability(serverAlias) if err != nil { @@ -161,15 +161,15 @@ func getMapKeys(m map[string]interface{}) []string { // GenerateSkillsForAllServers generates skill files for all installed MCP servers func (sg *SkillGenerator) GenerateSkillsForAllServers() error { // Load config for capability discovery - cfg, err := config.LoadConfig(filepath.Join(sg.projectDir, "haxen.yaml")) + cfg, err := config.LoadConfig(filepath.Join(sg.projectDir, "agentfield.yaml")) if err != nil { // Fallback to current directory - cfg, err = config.LoadConfig("haxen.yaml") + cfg, err = config.LoadConfig("agentfield.yaml") if err != nil { - return fmt.Errorf("failed to load haxen configuration: %w", err) + return fmt.Errorf("failed to load af configuration: %w", err) } } - + discovery := NewCapabilityDiscovery(cfg, sg.projectDir) capabilities, err := discovery.DiscoverCapabilities() if err != nil { @@ -229,7 +229,7 @@ func (sg *SkillGenerator) generateSkillFileContent(capability *MCPCapability) (s "InputSchema": tool.InputSchema, } } - + templateData := struct { ServerAlias string ServerName string @@ -296,7 +296,7 @@ type SkillParameter struct { func (sg *SkillGenerator) generateSkillFunction(tool MCPTool, serverAlias string) (SkillFunction, error) { // Generate function name: serveralias_toolname functionName := sg.generateFunctionName(serverAlias, tool.Name) - + // Parse input schema to extract parameters parameters, err := sg.parseInputSchema(tool.InputSchema) if err != nil { @@ -322,19 +322,19 @@ func (sg *SkillGenerator) generateFunctionName(serverAlias, toolName string) str normalizedAlias := strings.ReplaceAll(serverAlias, "-", "_") normalizedAlias = strings.ReplaceAll(normalizedAlias, ".", "_") normalizedAlias = strings.ReplaceAll(normalizedAlias, "/", "_") - + // Then normalize the tool name normalizedTool := strings.ReplaceAll(toolName, "-", "_") normalizedTool = strings.ReplaceAll(normalizedTool, ".", "_") normalizedTool = strings.ReplaceAll(normalizedTool, "/", "_") - + name := fmt.Sprintf("%s_%s", normalizedAlias, normalizedTool) - + // Ensure it starts with a letter or underscore if len(name) > 0 && (name[0] >= '0' && name[0] <= '9') { name = "_" + name } - + return name } @@ -447,14 +447,14 @@ func (sg *SkillGenerator) escapeForDocstring(s string) string { // generateDocString generates a Python docstring for the skill function func (sg *SkillGenerator) generateDocString(tool MCPTool, parameters []SkillParameter) string { var docString strings.Builder - + // Escape the description for safe use in docstring escapedDescription := sg.escapeForDocstring(tool.Description) - + docString.WriteString(fmt.Sprintf(`"""%s - + This is an auto-generated skill function that wraps the MCP tool '%s'. - + Args:`, escapedDescription, tool.Name)) for _, param := range parameters { @@ -462,25 +462,25 @@ func (sg *SkillGenerator) generateDocString(tool MCPTool, parameters []SkillPara if !param.Required { required = ", optional" } - + defaultInfo := "" if param.Default != "" { defaultInfo = fmt.Sprintf(", defaults to %s", param.Default) } - + // Escape parameter description escapedParamDesc := sg.escapeForDocstring(param.Description) - + docString.WriteString(fmt.Sprintf(` %s (%s%s): %s%s`, param.Name, param.Type, required, escapedParamDesc, defaultInfo)) } docString.WriteString(` - execution_context (ExecutionContext, optional): Haxen execution context for workflow tracking - + execution_context (ExecutionContext, optional): AgentField execution context for workflow tracking + Returns: Any: The result from the MCP tool execution - + Raises: MCPError: If the MCP server is not available or the tool execution fails """`) @@ -499,13 +499,13 @@ Do not modify this file manually - it will be regenerated when the MCP server is """ from typing import Any, Dict, List, Optional -from haxen_sdk import app -from haxen_sdk.execution_context import ExecutionContext -from haxen_sdk.mcp.client import MCPClient -from haxen_sdk.mcp.exceptions import ( +from agentfield import app +from agentfield.execution_context import ExecutionContext +from agentfield.mcp.client import MCPClient +from agentfield.mcp.exceptions import ( MCPError, MCPConnectionError, MCPToolError, MCPTimeoutError ) -from haxen_sdk.agent import Agent +from agentfield.agent import Agent # MCP server configuration MCP_ALIAS = "{{.ServerAlias}}" @@ -528,15 +528,15 @@ async def _get_mcp_client(execution_context: Optional[ExecutionContext] = None) try: # Get client from registry client = MCPClient.get_or_create(MCP_ALIAS) - + # Validate server health is_healthy = await client.validate_server_health() if not is_healthy: raise MCPConnectionError( - f"MCP server '{MCP_ALIAS}' is not healthy. Please check server status with: haxen mcp status {MCP_ALIAS}", + f"MCP server '{MCP_ALIAS}' is not healthy. Please check server status with: af mcp status {MCP_ALIAS}", endpoint=f"mcp://{MCP_ALIAS}" ) - + # Set execution context for workflow tracking if execution_context: client.set_execution_context(execution_context) @@ -545,13 +545,13 @@ async def _get_mcp_client(execution_context: Optional[ExecutionContext] = None) current_agent = Agent.get_current() if hasattr(current_agent, '_current_execution_context') and current_agent._current_execution_context: client.set_execution_context(current_agent._current_execution_context) - + return client - + except ValueError as e: # Handle unregistered alias raise MCPConnectionError( - f"MCP server '{MCP_ALIAS}' is not configured. Please install it with: haxen add --mcp {MCP_ALIAS}", + f"MCP server '{MCP_ALIAS}' is not configured. Please install it with: af add --mcp {MCP_ALIAS}", endpoint=f"mcp://{MCP_ALIAS}" ) from e except Exception as e: @@ -565,22 +565,22 @@ async def _get_mcp_client(execution_context: Optional[ExecutionContext] = None) @app.skill(tags=["mcp", "{{$.ServerAlias}}"]) async def {{.Name}}({{range $i, $param := .Parameters}}{{if $i}}, {{end}}{{$param.Name}}: {{$param.Type}}{{if not $param.Required}}{{if $param.Default}} = {{$param.Default}}{{else}} = None{{end}}{{end}}{{end}}{{if .Parameters}}, {{end}}execution_context: Optional[ExecutionContext] = None) -> Any: {{.DocString}} - + try: # Get MCP client with execution context client = await _get_mcp_client(execution_context) - + # Prepare arguments, filtering out None values for optional parameters kwargs = {} {{range .Parameters}} if {{.Name}} is not None: kwargs["{{.Name}}"] = {{.Name}} {{end}} - + # Call the MCP tool result = await client.call_tool("{{.ToolName}}", kwargs) return result - + except MCPConnectionError: # Re-raise connection errors as-is (they have helpful messages) raise diff --git a/control-plane/internal/mcp/stdio_client.go b/control-plane/internal/mcp/stdio_client.go index 324ec9cd..6749079e 100644 --- a/control-plane/internal/mcp/stdio_client.go +++ b/control-plane/internal/mcp/stdio_client.go @@ -4,8 +4,10 @@ import ( "bufio" "context" "encoding/json" + "errors" "fmt" "io" + "os" "os/exec" "time" ) @@ -22,7 +24,6 @@ func NewStdioMCPClient(verbose bool) *StdioMCPClient { } } - // DiscoverCapabilitiesFromProcess discovers capabilities from a stdio-based MCP server process func (c *StdioMCPClient) DiscoverCapabilitiesFromProcess(config MCPServerConfig) ([]MCPTool, []MCPResource, error) { if c.verbose { @@ -35,7 +36,7 @@ func (c *StdioMCPClient) DiscoverCapabilitiesFromProcess(config MCPServerConfig) // Start the MCP server process cmd := exec.CommandContext(ctx, "sh", "-c", config.RunCmd) - + // Set working directory if specified if config.WorkingDir != "" { cmd.Dir = config.WorkingDir @@ -75,7 +76,9 @@ func (c *StdioMCPClient) DiscoverCapabilitiesFromProcess(config MCPServerConfig) // Ensure process cleanup defer func() { if cmd.Process != nil { - cmd.Process.Kill() + if killErr := cmd.Process.Kill(); killErr != nil && !errors.Is(killErr, os.ErrProcessDone) { + fmt.Printf("WARN: failed to terminate MCP stdio process: %v\n", killErr) + } } }() @@ -125,7 +128,7 @@ func (c *StdioMCPClient) performDiscovery(stdin io.WriteCloser, stdout io.ReadCl }, }, ClientInfo: ClientInfo{ - Name: "haxen-mcp-client", + Name: "agentfield-mcp-client", Version: "1.0.0", }, }, diff --git a/control-plane/internal/mcp/storage.go b/control-plane/internal/mcp/storage.go index 2276e450..18544e5b 100644 --- a/control-plane/internal/mcp/storage.go +++ b/control-plane/internal/mcp/storage.go @@ -31,7 +31,7 @@ type ConfigStorage interface { } // YAMLConfigStorage implements ConfigStorage using a YAML file. -// It stores all MCP server configurations in a single haxen.yaml file +// It stores all MCP server configurations in a single agentfield.yaml file // under the dependencies.mcp_servers key. type YAMLConfigStorage struct { ProjectDir string @@ -40,30 +40,30 @@ type YAMLConfigStorage struct { } // NewYAMLConfigStorage creates a new YAMLConfigStorage. -// projectDir is the root directory of the haxen project. +// projectDir is the root directory of the agentfield project. func NewYAMLConfigStorage(projectDir string) *YAMLConfigStorage { return &YAMLConfigStorage{ ProjectDir: projectDir, - filePath: filepath.Join(projectDir, "haxen.yaml"), + filePath: filepath.Join(projectDir, "agentfield.yaml"), } } -// haxenYAML represents the structure of the haxen.yaml file. +// agentfieldYAML represents the structure of the agentfield.yaml file. // We only care about the mcp_servers part for this storage. -type haxenYAML struct { +type agentfieldYAML struct { Dependencies struct { MCPServers map[string]*MCPServerConfig `yaml:"mcp_servers,omitempty"` } `yaml:"dependencies,omitempty"` - // Other fields in haxen.yaml are preserved but not directly managed here. + // Other fields in agentfield.yaml are preserved but not directly managed here. OtherFields map[string]interface{} `yaml:",inline"` } -func (s *YAMLConfigStorage) loadHaxenYAML() (*haxenYAML, error) { +func (s *YAMLConfigStorage) loadAgentFieldYAML() (*agentfieldYAML, error) { data, err := os.ReadFile(s.filePath) if err != nil { if os.IsNotExist(err) { - // If haxen.yaml doesn't exist, return an empty structure - return &haxenYAML{ + // If agentfield.yaml doesn't exist, return an empty structure + return &agentfieldYAML{ Dependencies: struct { MCPServers map[string]*MCPServerConfig `yaml:"mcp_servers,omitempty"` }{ @@ -72,24 +72,24 @@ func (s *YAMLConfigStorage) loadHaxenYAML() (*haxenYAML, error) { OtherFields: make(map[string]interface{}), }, nil } - return nil, fmt.Errorf("failed to read haxen.yaml: %w", err) + return nil, fmt.Errorf("failed to read agentfield.yaml: %w", err) } - var cfg haxenYAML + var cfg agentfieldYAML // Initialize maps to avoid nil panics if sections are missing cfg.Dependencies.MCPServers = make(map[string]*MCPServerConfig) cfg.OtherFields = make(map[string]interface{}) if err := yaml.Unmarshal(data, &cfg); err != nil { - return nil, fmt.Errorf("failed to unmarshal haxen.yaml: %w", err) + return nil, fmt.Errorf("failed to unmarshal agentfield.yaml: %w", err) } return &cfg, nil } -func (s *YAMLConfigStorage) saveHaxenYAML(cfg *haxenYAML) error { +func (s *YAMLConfigStorage) saveAgentFieldYAML(cfg *agentfieldYAML) error { data, err := yaml.Marshal(cfg) if err != nil { - return fmt.Errorf("failed to marshal haxen.yaml: %w", err) + return fmt.Errorf("failed to marshal agentfield.yaml: %w", err) } return os.WriteFile(s.filePath, data, 0644) } @@ -99,7 +99,7 @@ func (s *YAMLConfigStorage) LoadMCPServerConfig(alias string) (*MCPServerConfig, s.mu.RLock() defer s.mu.RUnlock() - cfg, err := s.loadHaxenYAML() + cfg, err := s.loadAgentFieldYAML() if err != nil { return nil, err } @@ -116,7 +116,7 @@ func (s *YAMLConfigStorage) SaveMCPServerConfig(alias string, config *MCPServerC s.mu.Lock() defer s.mu.Unlock() - cfg, err := s.loadHaxenYAML() + cfg, err := s.loadAgentFieldYAML() if err != nil { return err } @@ -126,7 +126,7 @@ func (s *YAMLConfigStorage) SaveMCPServerConfig(alias string, config *MCPServerC } cfg.Dependencies.MCPServers[alias] = config - return s.saveHaxenYAML(cfg) + return s.saveAgentFieldYAML(cfg) } // DeleteMCPServerConfig removes the configuration for a specific MCP server. @@ -134,7 +134,7 @@ func (s *YAMLConfigStorage) DeleteMCPServerConfig(alias string) error { s.mu.Lock() defer s.mu.Unlock() - cfg, err := s.loadHaxenYAML() + cfg, err := s.loadAgentFieldYAML() if err != nil { return err } @@ -144,7 +144,7 @@ func (s *YAMLConfigStorage) DeleteMCPServerConfig(alias string) error { } delete(cfg.Dependencies.MCPServers, alias) - return s.saveHaxenYAML(cfg) + return s.saveAgentFieldYAML(cfg) } // LoadAllMCPServerConfigs retrieves all stored MCP server configurations. @@ -152,7 +152,7 @@ func (s *YAMLConfigStorage) LoadAllMCPServerConfigs() (map[string]*MCPServerConf s.mu.RLock() defer s.mu.RUnlock() - cfg, err := s.loadHaxenYAML() + cfg, err := s.loadAgentFieldYAML() if err != nil { return nil, err } @@ -169,12 +169,12 @@ func (s *YAMLConfigStorage) UpdateConfig(alias string, updateFn func(currentConf s.mu.Lock() defer s.mu.Unlock() - cfg, err := s.loadHaxenYAML() + cfg, err := s.loadAgentFieldYAML() if err != nil { return err } - currentConfig, _ := cfg.Dependencies.MCPServers[alias] // currentConfig will be nil if not found + currentConfig := cfg.Dependencies.MCPServers[alias] // currentConfig will be nil if not found newConfig, err := updateFn(currentConfig) if err != nil { @@ -195,7 +195,7 @@ func (s *YAMLConfigStorage) UpdateConfig(alias string, updateFn func(currentConf cfg.Dependencies.MCPServers[alias] = newConfig } - return s.saveHaxenYAML(cfg) + return s.saveAgentFieldYAML(cfg) } // ListMCPServerAliases retrieves a list of all configured MCP server aliases. @@ -203,7 +203,7 @@ func (s *YAMLConfigStorage) ListMCPServerAliases() ([]string, error) { s.mu.RLock() defer s.mu.RUnlock() - cfg, err := s.loadHaxenYAML() + cfg, err := s.loadAgentFieldYAML() if err != nil { return nil, err } diff --git a/control-plane/internal/mcp/template.go b/control-plane/internal/mcp/template.go index 91d54f29..3f46c953 100644 --- a/control-plane/internal/mcp/template.go +++ b/control-plane/internal/mcp/template.go @@ -16,13 +16,13 @@ type TemplateProcessor struct { // TemplateVars holds all available template variables type TemplateVars struct { - Port int `json:"port"` - DataDir string `json:"data_dir"` - ConfigFile string `json:"config_file"` - LogFile string `json:"log_file"` - ServerDir string `json:"server_dir"` - ProjectDir string `json:"project_dir"` - Alias string `json:"alias"` + Port int `json:"port"` + DataDir string `json:"data_dir"` + ConfigFile string `json:"config_file"` + LogFile string `json:"log_file"` + ServerDir string `json:"server_dir"` + ProjectDir string `json:"project_dir"` + Alias string `json:"alias"` } // NewTemplateProcessor creates a new template processor @@ -42,7 +42,7 @@ func (tp *TemplateProcessor) ProcessCommand(command string, vars TemplateVars) ( } processed := command - + // Replace all template variables processed = strings.ReplaceAll(processed, "{{port}}", strconv.Itoa(vars.Port)) processed = strings.ReplaceAll(processed, "{{data_dir}}", vars.DataDir) @@ -80,7 +80,7 @@ func (tp *TemplateProcessor) ProcessCommands(commands []string, vars TemplateVar // CreateTemplateVars creates template variables for the given configuration and port func (tp *TemplateProcessor) CreateTemplateVars(config MCPServerConfig, port int) TemplateVars { serverDir := filepath.Join(tp.dataDir, config.Alias) - + return TemplateVars{ Port: port, DataDir: tp.dataDir, diff --git a/control-plane/internal/packages/git.go b/control-plane/internal/packages/git.go index c4e50651..3a431447 100644 --- a/control-plane/internal/packages/git.go +++ b/control-plane/internal/packages/git.go @@ -2,6 +2,7 @@ package packages import ( "bytes" + "errors" "fmt" "os" "os/exec" @@ -9,7 +10,7 @@ import ( "strings" "time" - "github.com/your-org/haxen/control-plane/internal/logger" + "github.com/Agent-Field/agentfield/control-plane/internal/logger" "gopkg.in/yaml.v3" ) @@ -22,8 +23,8 @@ type GitPackageInfo struct { // GitInstaller handles Git package installation from any Git repository type GitInstaller struct { - HaxenHome string - Verbose bool + AgentFieldHome string + Verbose bool } // newSpinner creates a new spinner with the given message @@ -49,8 +50,8 @@ func IsGitURL(url string) bool { // isHTTPSGitURL checks if it's an HTTPS URL that might be a Git repo func isHTTPSGitURL(url string) bool { // Check if it's an HTTPS URL that might be a Git repo - return strings.HasPrefix(url, "https://") && - strings.Contains(url, "/") && + return strings.HasPrefix(url, "https://") && + strings.Contains(url, "/") && !strings.HasSuffix(url, "/") } @@ -59,7 +60,7 @@ func ParseGitURL(url string) (*GitPackageInfo, error) { info := &GitPackageInfo{ URL: url, } - + // Handle URLs with @ for branch/tag specification // e.g., https://github.com/owner/repo@branch // But not SSH URLs like git@github.com:owner/repo.git @@ -83,7 +84,7 @@ func ParseGitURL(url string) (*GitPackageInfo, error) { } else { info.CloneURL = url } - + return info, nil } @@ -92,13 +93,13 @@ func checkGitAvailable() error { if _, err := exec.LookPath("git"); err != nil { return fmt.Errorf("git is required but not found in PATH\n\nPlease install Git:\n β€’ macOS: brew install git\n β€’ Ubuntu: sudo apt-get install git\n β€’ Windows: https://git-scm.com/download/win") } - + // Check git version (optional - ensure modern git) cmd := exec.Command("git", "--version") if err := cmd.Run(); err != nil { return fmt.Errorf("git installation appears to be broken") } - + return nil } @@ -124,26 +125,26 @@ func (gi *GitInstaller) InstallFromGit(gitURL string, force bool) error { // 1. Clone repository spinner := gi.newSpinner("Cloning repository") spinner.Start() - + tempDir, err := gi.cloneRepository(info) if err != nil { spinner.Error("Failed to clone repository") return fmt.Errorf("failed to clone repository: %w", err) } defer os.RemoveAll(tempDir) // Always clean up - + spinner.Success("Repository cloned") // 2. Find and validate package structure spinner = gi.newSpinner("Validating package structure") spinner.Start() - + packagePath, err := gi.findPackageRoot(tempDir) if err != nil { spinner.Error("Invalid package structure") return fmt.Errorf("invalid package structure: %w", err) } - + spinner.Success("Package structure validated") // 3. Parse metadata to get package name @@ -154,8 +155,8 @@ func (gi *GitInstaller) InstallFromGit(gitURL string, force bool) error { // 4. Use existing installer for the rest installer := &PackageInstaller{ - HaxenHome: gi.HaxenHome, - Verbose: gi.Verbose, + AgentFieldHome: gi.AgentFieldHome, + Verbose: gi.Verbose, } // Check if already installed @@ -164,8 +165,8 @@ func (gi *GitInstaller) InstallFromGit(gitURL string, force bool) error { } // Install using existing flow - destPath := filepath.Join(gi.HaxenHome, "packages", metadata.Name) - + destPath := filepath.Join(gi.AgentFieldHome, "packages", metadata.Name) + spinner = gi.newSpinner("Setting up environment") spinner.Start() if err := installer.copyPackage(packagePath, destPath); err != nil { @@ -193,11 +194,11 @@ func (gi *GitInstaller) InstallFromGit(gitURL string, force bool) error { logger.Logger.Info().Msgf(" %s %s", Gray("Reference:"), info.Ref) } logger.Logger.Info().Msgf(" %s %s", Gray("Location:"), destPath) - + // Check for required environment variables installer.checkEnvironmentVariables(metadata) - - logger.Logger.Info().Msgf("\n%s %s", Blue("β†’"), Bold(fmt.Sprintf("Run: haxen run %s", metadata.Name))) + + logger.Logger.Info().Msgf("\n%s %s", Blue("β†’"), Bold(fmt.Sprintf("Run: af run %s", metadata.Name))) return nil } @@ -205,44 +206,44 @@ func (gi *GitInstaller) InstallFromGit(gitURL string, force bool) error { // cloneRepository clones the Git repository with optimizations func (gi *GitInstaller) cloneRepository(info *GitPackageInfo) (string, error) { // Create temporary directory - tempDir, err := os.MkdirTemp("", "haxen-git-install-") + tempDir, err := os.MkdirTemp("", "agentfield-git-install-") if err != nil { return "", fmt.Errorf("failed to create temp directory: %w", err) } // Build git clone command with optimizations args := []string{"clone"} - + // Shallow clone for efficiency (only latest commit) args = append(args, "--depth", "1") - + // Clone specific branch/tag if specified if info.Ref != "" { args = append(args, "--branch", info.Ref) } - + // Add URLs args = append(args, info.CloneURL, tempDir) // Execute git clone cmd := exec.Command("git", args...) - + // Capture both stdout and stderr for better error messages var stdout, stderr bytes.Buffer cmd.Stdout = &stdout cmd.Stderr = &stderr - + if gi.Verbose { logger.Logger.Info().Msgf("Executing: git %s", strings.Join(args, " ")) } - + if err := cmd.Run(); err != nil { // Clean up temp directory on failure os.RemoveAll(tempDir) - + // Provide helpful error messages based on common failure scenarios stderrStr := stderr.String() - + if strings.Contains(stderrStr, "Authentication failed") || strings.Contains(stderrStr, "authentication failed") { return "", fmt.Errorf("authentication failed - please check your credentials\n\nFor private repositories, you can:\n β€’ Use SSH: git@github.com:owner/repo.git\n β€’ Use token: https://token:your_token@github.com/owner/repo\n β€’ Configure Git credentials: git config --global credential.helper") } @@ -255,14 +256,14 @@ func (gi *GitInstaller) cloneRepository(info *GitPackageInfo) (string, error) { if strings.Contains(stderrStr, "Could not resolve host") { return "", fmt.Errorf("could not resolve host - please check your internet connection and the repository URL") } - + return "", fmt.Errorf("git clone failed: %w\nError output: %s", err, stderrStr) } return tempDir, nil } -// findPackageRoot finds the root directory containing haxen-package.yaml +// findPackageRoot finds the root directory containing agentfield-package.yaml func (gi *GitInstaller) findPackageRoot(cloneDir string) (string, error) { var packageRoot string @@ -271,7 +272,7 @@ func (gi *GitInstaller) findPackageRoot(cloneDir string) (string, error) { return err } - if info.Name() == "haxen-package.yaml" { + if info.Name() == "agentfield-package.yaml" { packageRoot = filepath.Dir(path) return filepath.SkipDir // Found it, stop walking } @@ -284,7 +285,7 @@ func (gi *GitInstaller) findPackageRoot(cloneDir string) (string, error) { } if packageRoot == "" { - return "", fmt.Errorf("haxen-package.yaml not found in the repository") + return "", fmt.Errorf("agentfield-package.yaml not found in the repository") } // Also check for main.py @@ -296,18 +297,18 @@ func (gi *GitInstaller) findPackageRoot(cloneDir string) (string, error) { return packageRoot, nil } -// parsePackageMetadata parses the haxen-package.yaml file (reuse from installer.go) +// parsePackageMetadata parses the agentfield-package.yaml file (reuse from installer.go) func (gi *GitInstaller) parsePackageMetadata(packagePath string) (*PackageMetadata, error) { installer := &PackageInstaller{ - HaxenHome: gi.HaxenHome, - Verbose: gi.Verbose, + AgentFieldHome: gi.AgentFieldHome, + Verbose: gi.Verbose, } return installer.parsePackageMetadata(packagePath) } // updateRegistryWithGit updates the installation registry with Git source info func (gi *GitInstaller) updateRegistryWithGit(metadata *PackageMetadata, info *GitPackageInfo, sourcePath, destPath string) error { - registryPath := filepath.Join(gi.HaxenHome, "installed.yaml") + registryPath := filepath.Join(gi.AgentFieldHome, "installed.yaml") // Load existing registry or create new one registry := &InstallationRegistry{ @@ -315,7 +316,11 @@ func (gi *GitInstaller) updateRegistryWithGit(metadata *PackageMetadata, info *G } if data, err := os.ReadFile(registryPath); err == nil { - yaml.Unmarshal(data, registry) + if err := yaml.Unmarshal(data, registry); err != nil { + return fmt.Errorf("failed to parse registry %s: %w", registryPath, err) + } + } else if !errors.Is(err, os.ErrNotExist) { + return fmt.Errorf("failed to read registry %s: %w", registryPath, err) } // Determine source type based on URL @@ -348,7 +353,7 @@ func (gi *GitInstaller) updateRegistryWithGit(metadata *PackageMetadata, info *G Port: nil, PID: nil, StartedAt: nil, - LogFile: filepath.Join(gi.HaxenHome, "logs", metadata.Name+".log"), + LogFile: filepath.Join(gi.AgentFieldHome, "logs", metadata.Name+".log"), }, } diff --git a/control-plane/internal/packages/github.go b/control-plane/internal/packages/github.go index 80c5c7d3..cac23dda 100644 --- a/control-plane/internal/packages/github.go +++ b/control-plane/internal/packages/github.go @@ -2,6 +2,7 @@ package packages import ( "archive/zip" + "errors" "fmt" "io" "net/http" @@ -11,7 +12,7 @@ import ( "strings" "time" - "github.com/your-org/haxen/control-plane/internal/logger" + "github.com/Agent-Field/agentfield/control-plane/internal/logger" "gopkg.in/yaml.v3" ) @@ -26,8 +27,8 @@ type GitHubPackageInfo struct { // GitHubInstaller handles GitHub package installation type GitHubInstaller struct { - HaxenHome string - Verbose bool + AgentFieldHome string + Verbose bool } // newSpinner creates a new spinner with the given message @@ -131,26 +132,26 @@ func (gi *GitHubInstaller) InstallFromGitHub(githubURL string, force bool) error // 1. Download archive spinner := gi.newSpinner("Downloading package from GitHub") spinner.Start() - + tempDir, err := gi.downloadAndExtract(info) if err != nil { spinner.Error("Failed to download package") return fmt.Errorf("failed to download package: %w", err) } defer os.RemoveAll(tempDir) // Clean up temp directory - + spinner.Success("Package downloaded and extracted") // 2. Validate package structure spinner = gi.newSpinner("Validating package structure") spinner.Start() - + packagePath, err := gi.findPackageRoot(tempDir) if err != nil { spinner.Error("Invalid package structure") return fmt.Errorf("invalid package structure: %w", err) } - + spinner.Success("Package structure validated") // 3. Parse metadata to get package name @@ -161,8 +162,8 @@ func (gi *GitHubInstaller) InstallFromGitHub(githubURL string, force bool) error // 4. Use existing installer for the rest installer := &PackageInstaller{ - HaxenHome: gi.HaxenHome, - Verbose: gi.Verbose, + AgentFieldHome: gi.AgentFieldHome, + Verbose: gi.Verbose, } // Check if already installed @@ -171,8 +172,8 @@ func (gi *GitHubInstaller) InstallFromGitHub(githubURL string, force bool) error } // Install using existing flow - destPath := filepath.Join(gi.HaxenHome, "packages", metadata.Name) - + destPath := filepath.Join(gi.AgentFieldHome, "packages", metadata.Name) + spinner = gi.newSpinner("Setting up environment") spinner.Start() if err := installer.copyPackage(packagePath, destPath); err != nil { @@ -197,11 +198,11 @@ func (gi *GitHubInstaller) InstallFromGitHub(githubURL string, force bool) error logger.Logger.Info().Msgf("%s Installed %s v%s from GitHub", Green(StatusSuccess), Bold(metadata.Name), Gray(metadata.Version)) logger.Logger.Info().Msgf(" %s %s", Gray("Source:"), fmt.Sprintf("%s/%s@%s", info.Owner, info.Repo, info.Ref)) logger.Logger.Info().Msgf(" %s %s", Gray("Location:"), destPath) - + // Check for required environment variables installer.checkEnvironmentVariables(metadata) - - logger.Logger.Info().Msgf("\n%s %s", Blue("β†’"), Bold(fmt.Sprintf("Run: haxen run %s", metadata.Name))) + + logger.Logger.Info().Msgf("\n%s %s", Blue("β†’"), Bold(fmt.Sprintf("Run: af run %s", metadata.Name))) return nil } @@ -209,7 +210,7 @@ func (gi *GitHubInstaller) InstallFromGitHub(githubURL string, force bool) error // downloadAndExtract downloads and extracts the GitHub archive func (gi *GitHubInstaller) downloadAndExtract(info *GitHubPackageInfo) (string, error) { // Create temporary directory - tempDir, err := os.MkdirTemp("", "haxen-github-install-") + tempDir, err := os.MkdirTemp("", "agentfield-github-install-") if err != nil { return "", fmt.Errorf("failed to create temp directory: %w", err) } @@ -269,14 +270,16 @@ func (gi *GitHubInstaller) extractZip(src, dest string) error { for _, file := range reader.File { path := filepath.Join(dest, file.Name) - + // Security check: ensure path is within destination if !strings.HasPrefix(path, filepath.Clean(dest)+string(os.PathSeparator)) { return fmt.Errorf("invalid file path: %s", file.Name) } if file.FileInfo().IsDir() { - os.MkdirAll(path, file.FileInfo().Mode()) + if err := os.MkdirAll(path, file.FileInfo().Mode()); err != nil { + return err + } continue } @@ -308,7 +311,7 @@ func (gi *GitHubInstaller) extractZip(src, dest string) error { return nil } -// findPackageRoot finds the root directory containing haxen-package.yaml +// findPackageRoot finds the root directory containing agentfield-package.yaml func (gi *GitHubInstaller) findPackageRoot(extractDir string) (string, error) { var packageRoot string @@ -317,7 +320,7 @@ func (gi *GitHubInstaller) findPackageRoot(extractDir string) (string, error) { return err } - if info.Name() == "haxen-package.yaml" { + if info.Name() == "agentfield-package.yaml" { packageRoot = filepath.Dir(path) return filepath.SkipDir // Found it, stop walking } @@ -330,7 +333,7 @@ func (gi *GitHubInstaller) findPackageRoot(extractDir string) (string, error) { } if packageRoot == "" { - return "", fmt.Errorf("haxen-package.yaml not found in the repository") + return "", fmt.Errorf("agentfield-package.yaml not found in the repository") } // Also check for main.py @@ -342,18 +345,18 @@ func (gi *GitHubInstaller) findPackageRoot(extractDir string) (string, error) { return packageRoot, nil } -// parsePackageMetadata parses the haxen-package.yaml file (reuse from installer.go) +// parsePackageMetadata parses the agentfield-package.yaml file (reuse from installer.go) func (gi *GitHubInstaller) parsePackageMetadata(packagePath string) (*PackageMetadata, error) { installer := &PackageInstaller{ - HaxenHome: gi.HaxenHome, - Verbose: gi.Verbose, + AgentFieldHome: gi.AgentFieldHome, + Verbose: gi.Verbose, } return installer.parsePackageMetadata(packagePath) } // updateRegistryWithGitHub updates the installation registry with GitHub source info func (gi *GitHubInstaller) updateRegistryWithGitHub(metadata *PackageMetadata, info *GitHubPackageInfo, sourcePath, destPath string) error { - registryPath := filepath.Join(gi.HaxenHome, "installed.yaml") + registryPath := filepath.Join(gi.AgentFieldHome, "installed.yaml") // Load existing registry or create new one registry := &InstallationRegistry{ @@ -361,7 +364,11 @@ func (gi *GitHubInstaller) updateRegistryWithGitHub(metadata *PackageMetadata, i } if data, err := os.ReadFile(registryPath); err == nil { - yaml.Unmarshal(data, registry) + if err := yaml.Unmarshal(data, registry); err != nil { + return fmt.Errorf("failed to parse registry %s: %w", registryPath, err) + } + } else if !errors.Is(err, os.ErrNotExist) { + return fmt.Errorf("failed to read registry %s: %w", registryPath, err) } // Add/update package entry with GitHub information @@ -378,7 +385,7 @@ func (gi *GitHubInstaller) updateRegistryWithGitHub(metadata *PackageMetadata, i Port: nil, PID: nil, StartedAt: nil, - LogFile: filepath.Join(gi.HaxenHome, "logs", metadata.Name+".log"), + LogFile: filepath.Join(gi.AgentFieldHome, "logs", metadata.Name+".log"), }, } diff --git a/control-plane/internal/packages/installer.go b/control-plane/internal/packages/installer.go index 394ffcdf..939e5af3 100644 --- a/control-plane/internal/packages/installer.go +++ b/control-plane/internal/packages/installer.go @@ -17,10 +17,10 @@ import ( type UserEnvironmentVar struct { Name string `yaml:"name"` Description string `yaml:"description"` - Type string `yaml:"type"` // "string", "secret", "integer", "boolean", "float" + Type string `yaml:"type"` // "string", "secret", "integer", "boolean", "float" Default string `yaml:"default"` Optional bool `yaml:"optional"` - Validation string `yaml:"validation"` // regex pattern + Validation string `yaml:"validation"` // regex pattern } // UserEnvironmentConfig represents user-configurable environment variables @@ -29,7 +29,7 @@ type UserEnvironmentConfig struct { Optional []UserEnvironmentVar `yaml:"optional"` } -// PackageMetadata represents the structure of haxen-package.yaml +// PackageMetadata represents the structure of agentfield-package.yaml type PackageMetadata struct { Name string `yaml:"name"` Version string `yaml:"version"` @@ -96,8 +96,8 @@ type RuntimeInfo struct { // PackageInstaller handles package installation type PackageInstaller struct { - HaxenHome string - Verbose bool + AgentFieldHome string + Verbose bool } // Spinner represents a CLI spinner for progress indication @@ -186,7 +186,7 @@ func (s *Spinner) Error(message string) { func (pi *PackageInstaller) InstallPackage(sourcePath string, force bool) error { // Import the CLI utilities // Note: We'll need to import this properly, but for now let's define local functions - + // Get package name first for better messaging metadata, err := pi.parsePackageMetadata(sourcePath) if err != nil { @@ -210,7 +210,7 @@ func (pi *PackageInstaller) InstallPackage(sourcePath string, force bool) error } // 3. Copy package to global location - destPath := filepath.Join(pi.HaxenHome, "packages", metadata.Name) + destPath := filepath.Join(pi.AgentFieldHome, "packages", metadata.Name) spinner = pi.newSpinner("Setting up environment") spinner.Start() if err := pi.copyPackage(sourcePath, destPath); err != nil { @@ -235,11 +235,11 @@ func (pi *PackageInstaller) InstallPackage(sourcePath string, force bool) error fmt.Printf("%s Installed %s v%s\n", Green(StatusSuccess), Bold(metadata.Name), Gray(metadata.Version)) fmt.Printf(" %s %s\n", Gray("Location:"), destPath) - + // 6. Check for required environment variables and provide guidance pi.checkEnvironmentVariables(metadata) - - fmt.Printf("\n%s %s\n", Blue("β†’"), Bold(fmt.Sprintf("Run: haxen run %s", metadata.Name))) + + fmt.Printf("\n%s %s\n", Blue("β†’"), Bold(fmt.Sprintf("Run: af run %s", metadata.Name))) return nil } @@ -261,7 +261,7 @@ func (pi *PackageInstaller) checkEnvironmentVariables(metadata *PackageMetadata) if len(missingRequired) > 0 { fmt.Printf("\n%s %s\n", Yellow("⚠"), Bold("Missing required environment variables:")) for _, envVar := range missingRequired { - fmt.Printf(" %s\n", Cyan(fmt.Sprintf("haxen config %s --set %s=your-value-here", metadata.Name, envVar.Name))) + fmt.Printf(" %s\n", Cyan(fmt.Sprintf("af config %s --set %s=your-value-here", metadata.Name, envVar.Name))) } } @@ -281,8 +281,8 @@ func (pi *PackageInstaller) checkEnvironmentVariables(metadata *PackageMetadata) // PackageUninstaller handles package uninstallation type PackageUninstaller struct { - HaxenHome string - Force bool + AgentFieldHome string + Force bool } // UninstallPackage removes an installed package @@ -357,7 +357,7 @@ func (pu *PackageUninstaller) stopAgentNode(agentNode *InstalledPackage) error { // loadRegistry loads the installation registry func (pu *PackageUninstaller) loadRegistry() (*InstallationRegistry, error) { - registryPath := filepath.Join(pu.HaxenHome, "installed.yaml") + registryPath := filepath.Join(pu.AgentFieldHome, "installed.yaml") registry := &InstallationRegistry{ Installed: make(map[string]InstalledPackage), @@ -374,7 +374,7 @@ func (pu *PackageUninstaller) loadRegistry() (*InstallationRegistry, error) { // saveRegistry saves the installation registry func (pu *PackageUninstaller) saveRegistry(registry *InstallationRegistry) error { - registryPath := filepath.Join(pu.HaxenHome, "installed.yaml") + registryPath := filepath.Join(pu.AgentFieldHome, "installed.yaml") data, err := yaml.Marshal(registry) if err != nil { @@ -390,10 +390,10 @@ func (pu *PackageUninstaller) saveRegistry(registry *InstallationRegistry) error // validatePackage checks if the package has required files func (pi *PackageInstaller) validatePackage(sourcePath string) error { - // Check if haxen-package.yaml exists - packageYamlPath := filepath.Join(sourcePath, "haxen-package.yaml") + // Check if agentfield-package.yaml exists + packageYamlPath := filepath.Join(sourcePath, "agentfield-package.yaml") if _, err := os.Stat(packageYamlPath); os.IsNotExist(err) { - return fmt.Errorf("haxen-package.yaml not found in %s", sourcePath) + return fmt.Errorf("agentfield-package.yaml not found in %s", sourcePath) } // Check if main.py exists @@ -405,26 +405,26 @@ func (pi *PackageInstaller) validatePackage(sourcePath string) error { return nil } -// parsePackageMetadata parses the haxen-package.yaml file +// parsePackageMetadata parses the agentfield-package.yaml file func (pi *PackageInstaller) parsePackageMetadata(sourcePath string) (*PackageMetadata, error) { - packageYamlPath := filepath.Join(sourcePath, "haxen-package.yaml") + packageYamlPath := filepath.Join(sourcePath, "agentfield-package.yaml") data, err := os.ReadFile(packageYamlPath) if err != nil { - return nil, fmt.Errorf("failed to read haxen-package.yaml: %w", err) + return nil, fmt.Errorf("failed to read agentfield-package.yaml: %w", err) } var metadata PackageMetadata if err := yaml.Unmarshal(data, &metadata); err != nil { - return nil, fmt.Errorf("failed to parse haxen-package.yaml: %w", err) + return nil, fmt.Errorf("failed to parse agentfield-package.yaml: %w", err) } // Validate required fields if metadata.Name == "" { - return nil, fmt.Errorf("package name is required in haxen-package.yaml") + return nil, fmt.Errorf("package name is required in agentfield-package.yaml") } if metadata.Version == "" { - return nil, fmt.Errorf("package version is required in haxen-package.yaml") + return nil, fmt.Errorf("package version is required in agentfield-package.yaml") } if metadata.Main == "" { metadata.Main = "main.py" // Default @@ -435,13 +435,15 @@ func (pi *PackageInstaller) parsePackageMetadata(sourcePath string) (*PackageMet // isPackageInstalled checks if a package is already installed func (pi *PackageInstaller) isPackageInstalled(packageName string) bool { - registryPath := filepath.Join(pi.HaxenHome, "installed.yaml") + registryPath := filepath.Join(pi.AgentFieldHome, "installed.yaml") registry := &InstallationRegistry{ Installed: make(map[string]InstalledPackage), } if data, err := os.ReadFile(registryPath); err == nil { - yaml.Unmarshal(data, registry) + if err := yaml.Unmarshal(data, registry); err != nil { + return false + } } _, exists := registry.Installed[packageName] @@ -507,7 +509,7 @@ func (pi *PackageInstaller) installDependencies(packagePath string, metadata *Pa if len(metadata.Dependencies.Python) > 0 || pi.hasRequirementsFile(packagePath) { // Create virtual environment venvPath := filepath.Join(packagePath, "venv") - + cmd := exec.Command("python3", "-m", "venv", venvPath) if _, err := cmd.CombinedOutput(); err != nil { // Try with python if python3 fails @@ -525,11 +527,9 @@ func (pi *PackageInstaller) installDependencies(packagePath string, metadata *Pa pipPath = filepath.Join(venvPath, "Scripts", "pip.exe") // Windows } - // Upgrade pip first + // Upgrade pip first (ignore failures) cmd = exec.Command(pipPath, "install", "--upgrade", "pip") - if _, err := cmd.CombinedOutput(); err != nil { - // Ignore pip upgrade failures - } + _, _ = cmd.CombinedOutput() // Install from requirements.txt if it exists requirementsPath := filepath.Join(packagePath, "requirements.txt") @@ -541,7 +541,7 @@ func (pi *PackageInstaller) installDependencies(packagePath string, metadata *Pa } } - // Install dependencies from haxen-package.yaml + // Install dependencies from agentfield-package.yaml if len(metadata.Dependencies.Python) > 0 { for _, dep := range metadata.Dependencies.Python { cmd = exec.Command(pipPath, "install", dep) @@ -570,7 +570,7 @@ func (pi *PackageInstaller) hasRequirementsFile(packagePath string) bool { // updateRegistry updates the installation registry with the new package func (pi *PackageInstaller) updateRegistry(metadata *PackageMetadata, sourcePath, destPath string) error { - registryPath := filepath.Join(pi.HaxenHome, "installed.yaml") + registryPath := filepath.Join(pi.AgentFieldHome, "installed.yaml") // Load existing registry or create new one registry := &InstallationRegistry{ @@ -578,11 +578,13 @@ func (pi *PackageInstaller) updateRegistry(metadata *PackageMetadata, sourcePath } if data, err := os.ReadFile(registryPath); err == nil { - yaml.Unmarshal(data, registry) + if err := yaml.Unmarshal(data, registry); err != nil { + return fmt.Errorf("failed to parse registry: %w", err) + } } // Ensure logs directory exists before setting LogFile path - logsDir := filepath.Join(pi.HaxenHome, "logs") + logsDir := filepath.Join(pi.AgentFieldHome, "logs") if err := os.MkdirAll(logsDir, 0755); err != nil { return fmt.Errorf("failed to create logs directory: %w", err) } @@ -602,7 +604,7 @@ func (pi *PackageInstaller) updateRegistry(metadata *PackageMetadata, sourcePath Port: nil, PID: nil, StartedAt: nil, - LogFile: filepath.Join(pi.HaxenHome, "logs", metadata.Name+".log"), + LogFile: filepath.Join(pi.AgentFieldHome, "logs", metadata.Name+".log"), }, } diff --git a/control-plane/internal/packages/runner.go b/control-plane/internal/packages/runner.go index 193a2d94..5698c89f 100644 --- a/control-plane/internal/packages/runner.go +++ b/control-plane/internal/packages/runner.go @@ -2,6 +2,7 @@ package packages import ( "encoding/json" + "errors" "fmt" "net" "net/http" @@ -16,12 +17,11 @@ import ( // AgentNodeRunner handles running agent nodes type AgentNodeRunner struct { - HaxenHome string - Port int - Detach bool + AgentFieldHome string + Port int + Detach bool } - // RunAgentNode starts an installed agent node func (ar *AgentNodeRunner) RunAgentNode(agentNodeName string) error { fmt.Printf("πŸš€ Launching agent node: %s\n", agentNodeName) @@ -63,11 +63,13 @@ func (ar *AgentNodeRunner) RunAgentNode(agentNodeName string) error { // 5. Wait for agent node to be ready if err := ar.waitForAgentNode(port, 10*time.Second); err != nil { - cmd.Process.Kill() + if killErr := cmd.Process.Kill(); killErr != nil && !errors.Is(killErr, os.ErrProcessDone) { + fmt.Printf("⚠️ Failed to kill agent node process: %v\n", killErr) + } return fmt.Errorf("agent node failed to start: %w", err) } - fmt.Printf("🧠 Agent node registered with Haxen Server\n") + fmt.Printf("🧠 Agent node registered with AgentField Server\n") // 6. Update registry with runtime info if err := ar.updateRuntimeInfo(agentNodeName, port, cmd.Process.Pid); err != nil { @@ -80,8 +82,8 @@ func (ar *AgentNodeRunner) RunAgentNode(agentNodeName string) error { } fmt.Printf("\nπŸ’‘ Agent node running in background (PID: %d)\n", cmd.Process.Pid) - fmt.Printf("πŸ’‘ View logs: haxen logs %s\n", agentNodeName) - fmt.Printf("πŸ’‘ Stop agent node: haxen stop %s\n", agentNodeName) + fmt.Printf("πŸ’‘ View logs: af logs %s\n", agentNodeName) + fmt.Printf("πŸ’‘ Stop agent node: af stop %s\n", agentNodeName) return nil } @@ -111,7 +113,7 @@ func (ar *AgentNodeRunner) startAgentNodeProcess(agentNode InstalledPackage, por // Prepare environment variables env := os.Environ() env = append(env, fmt.Sprintf("PORT=%d", port)) - env = append(env, "HAXEN_SERVER_URL=http://localhost:8080") + env = append(env, "AGENTFIELD_SERVER_URL=http://localhost:8080") // Load environment variables from package .env file if envVars, err := ar.loadPackageEnvFile(agentNode.Path); err == nil { @@ -124,7 +126,7 @@ func (ar *AgentNodeRunner) startAgentNodeProcess(agentNode InstalledPackage, por // Prepare command - use virtual environment if available var pythonPath string venvPath := filepath.Join(agentNode.Path, "venv") - + // Check if virtual environment exists if _, err := os.Stat(filepath.Join(venvPath, "bin", "python")); err == nil { pythonPath = filepath.Join(venvPath, "bin", "python") @@ -137,7 +139,7 @@ func (ar *AgentNodeRunner) startAgentNodeProcess(agentNode InstalledPackage, por pythonPath = "python" fmt.Printf("⚠️ Virtual environment not found, using system Python\n") } - + cmd := exec.Command(pythonPath, "main.py") cmd.Dir = agentNode.Path cmd.Env = env @@ -243,12 +245,14 @@ func (ar *AgentNodeRunner) displayCapabilities(agentNode InstalledPackage, port // updateRuntimeInfo updates the registry with runtime information func (ar *AgentNodeRunner) updateRuntimeInfo(agentNodeName string, port, pid int) error { - registryPath := filepath.Join(ar.HaxenHome, "installed.yaml") + registryPath := filepath.Join(ar.AgentFieldHome, "installed.yaml") // Load registry registry := &InstallationRegistry{} if data, err := os.ReadFile(registryPath); err == nil { - yaml.Unmarshal(data, registry) + if err := yaml.Unmarshal(data, registry); err != nil { + return fmt.Errorf("failed to parse registry: %w", err) + } } // Update runtime info @@ -272,7 +276,7 @@ func (ar *AgentNodeRunner) updateRuntimeInfo(agentNodeName string, port, pid int // loadRegistry loads the installation registry func (ar *AgentNodeRunner) loadRegistry() (*InstallationRegistry, error) { - registryPath := filepath.Join(ar.HaxenHome, "installed.yaml") + registryPath := filepath.Join(ar.AgentFieldHome, "installed.yaml") registry := &InstallationRegistry{ Installed: make(map[string]InstalledPackage), @@ -290,7 +294,7 @@ func (ar *AgentNodeRunner) loadRegistry() (*InstallationRegistry, error) { // loadPackageEnvFile loads environment variables from package .env file func (ar *AgentNodeRunner) loadPackageEnvFile(packagePath string) (map[string]string, error) { envPath := filepath.Join(packagePath, ".env") - + data, err := os.ReadFile(envPath) if err != nil { return nil, err @@ -298,24 +302,24 @@ func (ar *AgentNodeRunner) loadPackageEnvFile(packagePath string) (map[string]st envVars := make(map[string]string) lines := strings.Split(string(data), "\n") - + for _, line := range lines { line = strings.TrimSpace(line) if line == "" || strings.HasPrefix(line, "#") { continue } - + parts := strings.SplitN(line, "=", 2) if len(parts) == 2 { key := strings.TrimSpace(parts[0]) value := strings.TrimSpace(parts[1]) - + // Remove quotes if present if (strings.HasPrefix(value, "\"") && strings.HasSuffix(value, "\"")) || - (strings.HasPrefix(value, "'") && strings.HasSuffix(value, "'")) { + (strings.HasPrefix(value, "'") && strings.HasSuffix(value, "'")) { value = value[1 : len(value)-1] } - + envVars[key] = value } } diff --git a/control-plane/internal/server/package_sync.go b/control-plane/internal/server/package_sync.go index f05a577e..34d6e240 100644 --- a/control-plane/internal/server/package_sync.go +++ b/control-plane/internal/server/package_sync.go @@ -1,8 +1,6 @@ package server import ( - "github.com/your-org/haxen/control-plane/internal/logger" - "github.com/your-org/haxen/control-plane/pkg/types" "context" "encoding/json" "fmt" @@ -11,6 +9,9 @@ import ( "sync" "time" + "github.com/Agent-Field/agentfield/control-plane/internal/logger" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" + "github.com/fsnotify/fsnotify" "gopkg.in/yaml.v3" ) @@ -47,9 +48,9 @@ type InstalledPackage struct { } // SyncPackagesFromRegistry ensures all packages in installed.yaml are present in the database. -func SyncPackagesFromRegistry(haxenHome string, storageProvider packageStorage) error { +func SyncPackagesFromRegistry(agentfieldHome string, storageProvider packageStorage) error { ctx := context.Background() - registryPath := filepath.Join(haxenHome, "installed.yaml") + registryPath := filepath.Join(agentfieldHome, "installed.yaml") data, err := os.ReadFile(registryPath) if err != nil { return nil // No registry, nothing to sync @@ -64,8 +65,8 @@ func SyncPackagesFromRegistry(haxenHome string, storageProvider packageStorage) if err == nil { continue // Already present } - // Load haxen-package.yaml - packageYamlPath := filepath.Join(pkg.Path, "haxen-package.yaml") + // Load agentfield-package.yaml + packageYamlPath := filepath.Join(pkg.Path, "agentfield-package.yaml") packageYamlData, err := os.ReadFile(packageYamlPath) if err != nil { continue // Skip if missing @@ -95,13 +96,13 @@ func SyncPackagesFromRegistry(haxenHome string, storageProvider packageStorage) } // StartPackageRegistryWatcher watches the installed.yaml registry and keeps storage in sync. -func StartPackageRegistryWatcher(parentCtx context.Context, haxenHome string, storageProvider packageStorage) (context.CancelFunc, error) { +func StartPackageRegistryWatcher(parentCtx context.Context, agentfieldHome string, storageProvider packageStorage) (context.CancelFunc, error) { watcher, err := fsnotify.NewWatcher() if err != nil { return nil, fmt.Errorf("failed to create registry watcher: %w", err) } - registryDir := haxenHome + registryDir := agentfieldHome if err := watcher.Add(registryDir); err != nil { watcher.Close() return nil, fmt.Errorf("failed to watch registry directory %s: %w", registryDir, err) @@ -122,7 +123,7 @@ func StartPackageRegistryWatcher(parentCtx context.Context, haxenHome string, st go func() { defer watcher.Close() defer close(syncCh) - registryFile := filepath.Join(haxenHome, "installed.yaml") + registryFile := filepath.Join(agentfieldHome, "installed.yaml") for { select { case event, ok := <-watcher.Events: @@ -162,7 +163,7 @@ func StartPackageRegistryWatcher(parentCtx context.Context, haxenHome string, st return } time.Sleep(250 * time.Millisecond) - if err := SyncPackagesFromRegistry(haxenHome, storageProvider); err != nil { + if err := SyncPackagesFromRegistry(agentfieldHome, storageProvider); err != nil { logger.Logger.Error().Err(err).Msg("failed to sync packages from registry") } else { logger.Logger.Debug().Msg("registry sync completed") diff --git a/control-plane/internal/server/package_sync_test.go b/control-plane/internal/server/package_sync_test.go index e446190e..8737e41a 100644 --- a/control-plane/internal/server/package_sync_test.go +++ b/control-plane/internal/server/package_sync_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/stretchr/testify/require" ) @@ -14,8 +14,8 @@ import ( func TestSyncPackagesFromRegistryStoresMissingPackages(t *testing.T) { t.Parallel() - haxenHome := t.TempDir() - pkgDir := filepath.Join(haxenHome, "example-agent") + agentfieldHome := t.TempDir() + pkgDir := filepath.Join(agentfieldHome, "example-agent") require.NoError(t, os.MkdirAll(pkgDir, 0o755)) installed := `installed: @@ -27,17 +27,17 @@ func TestSyncPackagesFromRegistryStoresMissingPackages(t *testing.T) { source: local status: installed ` - require.NoError(t, os.WriteFile(filepath.Join(haxenHome, "installed.yaml"), []byte(installed), 0o644)) + require.NoError(t, os.WriteFile(filepath.Join(agentfieldHome, "installed.yaml"), []byte(installed), 0o644)) packageYAML := `name: Example Agent version: 1.0.0 schema: type: object ` - require.NoError(t, os.WriteFile(filepath.Join(pkgDir, "haxen-package.yaml"), []byte(packageYAML), 0o644)) + require.NoError(t, os.WriteFile(filepath.Join(pkgDir, "agentfield-package.yaml"), []byte(packageYAML), 0o644)) storage := newStubPackageStorage() - require.NoError(t, SyncPackagesFromRegistry(haxenHome, storage)) + require.NoError(t, SyncPackagesFromRegistry(agentfieldHome, storage)) pkg, ok := storage.packages["example-agent"] require.True(t, ok) @@ -48,20 +48,20 @@ schema: func TestSyncPackagesSkipsExistingEntries(t *testing.T) { t.Parallel() - haxenHome := t.TempDir() + agentfieldHome := t.TempDir() installed := `installed: existing-agent: name: Existing version: 0.1.0 description: already present - path: ` + haxenHome + ` + path: ` + agentfieldHome + ` ` - require.NoError(t, os.WriteFile(filepath.Join(haxenHome, "installed.yaml"), []byte(installed), 0o644)) + require.NoError(t, os.WriteFile(filepath.Join(agentfieldHome, "installed.yaml"), []byte(installed), 0o644)) storage := newStubPackageStorage() storage.packages["existing-agent"] = &types.AgentPackage{ID: "existing-agent", Name: "Existing", InstalledAt: time.Now()} - require.NoError(t, SyncPackagesFromRegistry(haxenHome, storage)) + require.NoError(t, SyncPackagesFromRegistry(agentfieldHome, storage)) require.Len(t, storage.packages, 1) } diff --git a/control-plane/internal/server/server.go b/control-plane/internal/server/server.go index 4717b70f..c41a69af 100644 --- a/control-plane/internal/server/server.go +++ b/control-plane/internal/server/server.go @@ -1,22 +1,6 @@ package server import ( - "github.com/your-org/haxen/control-plane/internal/config" - "github.com/your-org/haxen/control-plane/internal/core/interfaces" - coreservices "github.com/your-org/haxen/control-plane/internal/core/services" // Core services - "github.com/your-org/haxen/control-plane/internal/events" // Event system - "github.com/your-org/haxen/control-plane/internal/handlers" // Agent handlers - "github.com/your-org/haxen/control-plane/internal/handlers/ui" // UI handlers - "github.com/your-org/haxen/control-plane/internal/infrastructure/communication" - "github.com/your-org/haxen/control-plane/internal/infrastructure/process" - infrastorage "github.com/your-org/haxen/control-plane/internal/infrastructure/storage" - "github.com/your-org/haxen/control-plane/internal/logger" - "github.com/your-org/haxen/control-plane/internal/services" // Services - "github.com/your-org/haxen/control-plane/internal/storage" - "github.com/your-org/haxen/control-plane/internal/utils" - "github.com/your-org/haxen/control-plane/pkg/adminpb" - "github.com/your-org/haxen/control-plane/pkg/types" - client "github.com/your-org/haxen/control-plane/web/client" "context" "crypto/sha256" "encoding/hex" @@ -30,6 +14,23 @@ import ( "strings" "time" + "github.com/Agent-Field/agentfield/control-plane/internal/config" + "github.com/Agent-Field/agentfield/control-plane/internal/core/interfaces" + coreservices "github.com/Agent-Field/agentfield/control-plane/internal/core/services" // Core services + "github.com/Agent-Field/agentfield/control-plane/internal/events" // Event system + "github.com/Agent-Field/agentfield/control-plane/internal/handlers" // Agent handlers + "github.com/Agent-Field/agentfield/control-plane/internal/handlers/ui" // UI handlers + "github.com/Agent-Field/agentfield/control-plane/internal/infrastructure/communication" + "github.com/Agent-Field/agentfield/control-plane/internal/infrastructure/process" + infrastorage "github.com/Agent-Field/agentfield/control-plane/internal/infrastructure/storage" + "github.com/Agent-Field/agentfield/control-plane/internal/logger" + "github.com/Agent-Field/agentfield/control-plane/internal/services" // Services + "github.com/Agent-Field/agentfield/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/internal/utils" + "github.com/Agent-Field/agentfield/control-plane/pkg/adminpb" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" + client "github.com/Agent-Field/agentfield/control-plane/web/client" + "github.com/gin-contrib/cors" // CORS middleware "github.com/gin-gonic/gin" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -38,8 +39,8 @@ import ( "google.golang.org/grpc/status" ) -// HaxenServer represents the core Haxen orchestration service. -type HaxenServer struct { +// AgentFieldServer represents the core AgentField orchestration service. +type AgentFieldServer struct { adminpb.UnimplementedAdminReasonerServiceServer storage storage.StorageProvider cache storage.CacheProvider @@ -59,7 +60,7 @@ type HaxenServer struct { didService *services.DIDService vcService *services.VCService didRegistry *services.DIDRegistry - haxenHome string + agentfieldHome string // Cleanup service cleanupService *handlers.ExecutionCleanupService payloadStore services.PayloadStore @@ -70,16 +71,16 @@ type HaxenServer struct { webhookDispatcher services.WebhookDispatcher } -// NewHaxenServer creates a new instance of the HaxenServer. -func NewHaxenServer(cfg *config.Config) (*HaxenServer, error) { - // Define haxenHome at the very top - haxenHome := os.Getenv("HAXEN_HOME") - if haxenHome == "" { +// NewAgentFieldServer creates a new instance of the AgentFieldServer. +func NewAgentFieldServer(cfg *config.Config) (*AgentFieldServer, error) { + // Define agentfieldHome at the very top + agentfieldHome := os.Getenv("AGENTFIELD_HOME") + if agentfieldHome == "" { homeDir, err := os.UserHomeDir() if err != nil { return nil, err } - haxenHome = filepath.Join(homeDir, ".haxen") + agentfieldHome = filepath.Join(homeDir, ".agentfield") } dirs, err := utils.EnsureDataDirectories() @@ -96,20 +97,20 @@ func NewHaxenServer(cfg *config.Config) (*HaxenServer, error) { Router := gin.Default() // Sync installed.yaml to database for package visibility - _ = SyncPackagesFromRegistry(haxenHome, storageProvider) + _ = SyncPackagesFromRegistry(agentfieldHome, storageProvider) // Initialize agent client for communication with agent nodes agentClient := communication.NewHTTPAgentClient(storageProvider, 5*time.Second) // Create infrastructure components for AgentService fileSystem := infrastorage.NewFileSystemAdapter() - registryPath := filepath.Join(haxenHome, "installed.json") + registryPath := filepath.Join(agentfieldHome, "installed.json") registryStorage := infrastorage.NewLocalRegistryStorage(fileSystem, registryPath) processManager := process.NewProcessManager() portManager := process.NewPortManager() // Create AgentService - agentService := coreservices.NewAgentService(processManager, portManager, registryStorage, agentClient, haxenHome) + agentService := coreservices.NewAgentService(processManager, portManager, registryStorage, agentClient, agentfieldHome) // Initialize StatusManager for unified status management statusManagerConfig := services.StatusManagerConfig{ @@ -189,25 +190,25 @@ func NewHaxenServer(cfg *config.Config) (*HaxenServer, error) { return nil, fmt.Errorf("failed to initialize VC service: %w", err) } - // Generate haxen server ID based on haxen home directory - haxenServerID := generateHaxenServerID(haxenHome) + // Generate af server ID based on agentfield home directory + agentfieldServerID := generateAgentFieldServerID(agentfieldHome) - // Initialize haxen server DID with dynamic ID - fmt.Printf("🧠 Initializing haxen server DID (ID: %s)...\n", haxenServerID) - if err := didService.Initialize(haxenServerID); err != nil { - return nil, fmt.Errorf("failed to initialize haxen server DID: %w", err) + // Initialize af server DID with dynamic ID + fmt.Printf("🧠 Initializing af server DID (ID: %s)...\n", agentfieldServerID) + if err := didService.Initialize(agentfieldServerID); err != nil { + return nil, fmt.Errorf("failed to initialize af server DID: %w", err) } - // Validate that haxen server DID was successfully created - registry, err := didService.GetRegistry(haxenServerID) + // Validate that af server DID was successfully created + registry, err := didService.GetRegistry(agentfieldServerID) if err != nil { - return nil, fmt.Errorf("failed to validate haxen server DID creation: %w", err) + return nil, fmt.Errorf("failed to validate af server DID creation: %w", err) } if registry == nil || registry.RootDID == "" { - return nil, fmt.Errorf("haxen server DID validation failed: registry or root DID is empty") + return nil, fmt.Errorf("af server DID validation failed: registry or root DID is empty") } - fmt.Printf("βœ… Haxen server DID created successfully: %s\n", registry.RootDID) + fmt.Printf("βœ… AgentField server DID created successfully: %s\n", registry.RootDID) // Backfill existing nodes with DIDs fmt.Println("πŸ”„ Starting DID backfill for existing nodes...") @@ -224,28 +225,28 @@ func NewHaxenServer(cfg *config.Config) (*HaxenServer, error) { payloadStore := services.NewFilePayloadStore(dirs.PayloadsDir) webhookDispatcher := services.NewWebhookDispatcher(storageProvider, services.WebhookDispatcherConfig{ - Timeout: cfg.Haxen.ExecutionQueue.WebhookTimeout, - MaxAttempts: cfg.Haxen.ExecutionQueue.WebhookMaxAttempts, - RetryBackoff: cfg.Haxen.ExecutionQueue.WebhookRetryBackoff, - MaxRetryBackoff: cfg.Haxen.ExecutionQueue.WebhookMaxRetryBackoff, + Timeout: cfg.AgentField.ExecutionQueue.WebhookTimeout, + MaxAttempts: cfg.AgentField.ExecutionQueue.WebhookMaxAttempts, + RetryBackoff: cfg.AgentField.ExecutionQueue.WebhookRetryBackoff, + MaxRetryBackoff: cfg.AgentField.ExecutionQueue.WebhookMaxRetryBackoff, }) if err := webhookDispatcher.Start(context.Background()); err != nil { logger.Logger.Warn().Err(err).Msg("failed to start webhook dispatcher") } // Initialize execution cleanup service - cleanupService := handlers.NewExecutionCleanupService(storageProvider, cfg.Haxen.ExecutionCleanup) + cleanupService := handlers.NewExecutionCleanupService(storageProvider, cfg.AgentField.ExecutionCleanup) - adminPort := cfg.Haxen.Port + 100 - if envPort := os.Getenv("HAXEN_ADMIN_GRPC_PORT"); envPort != "" { + adminPort := cfg.AgentField.Port + 100 + if envPort := os.Getenv("AGENTFIELD_ADMIN_GRPC_PORT"); envPort != "" { if parsedPort, parseErr := strconv.Atoi(envPort); parseErr == nil { adminPort = parsedPort } else { - logger.Logger.Warn().Err(parseErr).Str("value", envPort).Msg("invalid HAXEN_ADMIN_GRPC_PORT, using default offset") + logger.Logger.Warn().Err(parseErr).Str("value", envPort).Msg("invalid AGENTFIELD_ADMIN_GRPC_PORT, using default offset") } } - return &HaxenServer{ + return &AgentFieldServer{ storage: storageProvider, cache: cacheProvider, Router: Router, @@ -261,7 +262,7 @@ func NewHaxenServer(cfg *config.Config) (*HaxenServer, error) { didService: didService, vcService: vcService, didRegistry: didRegistry, - haxenHome: haxenHome, + agentfieldHome: agentfieldHome, cleanupService: cleanupService, payloadStore: payloadStore, webhookDispatcher: webhookDispatcher, @@ -270,8 +271,8 @@ func NewHaxenServer(cfg *config.Config) (*HaxenServer, error) { }, nil } -// Start initializes and starts the HaxenServer. -func (s *HaxenServer) Start() error { +// Start initializes and starts the AgentFieldServer. +func (s *AgentFieldServer) Start() error { // Setup routes s.setupRoutes() @@ -299,7 +300,7 @@ func (s *HaxenServer) Start() error { events.StartNodeHeartbeat(30 * time.Second) if s.registryWatcherCancel == nil { - cancel, err := StartPackageRegistryWatcher(context.Background(), s.haxenHome, s.storage) + cancel, err := StartPackageRegistryWatcher(context.Background(), s.agentfieldHome, s.storage) if err != nil { logger.Logger.Error().Err(err).Msg("failed to start package registry watcher") } else { @@ -313,10 +314,10 @@ func (s *HaxenServer) Start() error { // TODO: Implement WebSocket, gRPC // Start HTTP server - return s.Router.Run(":" + strconv.Itoa(s.config.Haxen.Port)) + return s.Router.Run(":" + strconv.Itoa(s.config.AgentField.Port)) } -func (s *HaxenServer) startAdminGRPCServer() error { +func (s *AgentFieldServer) startAdminGRPCServer() error { if s.adminGRPCServer != nil { return nil } @@ -341,7 +342,7 @@ func (s *HaxenServer) startAdminGRPCServer() error { } // ListReasoners implements the admin gRPC surface for listing registered reasoners. -func (s *HaxenServer) ListReasoners(ctx context.Context, _ *adminpb.ListReasonersRequest) (*adminpb.ListReasonersResponse, error) { +func (s *AgentFieldServer) ListReasoners(ctx context.Context, _ *adminpb.ListReasonersRequest) (*adminpb.ListReasonersResponse, error) { nodes, err := s.storage.ListAgents(ctx, types.AgentFilters{}) if err != nil { return nil, status.Errorf(codes.Internal, "failed to list agent nodes: %v", err) @@ -368,8 +369,8 @@ func (s *HaxenServer) ListReasoners(ctx context.Context, _ *adminpb.ListReasoner return resp, nil } -// Stop gracefully shuts down the HaxenServer. -func (s *HaxenServer) Stop() error { +// Stop gracefully shuts down the AgentFieldServer. +func (s *AgentFieldServer) Stop() error { if s.adminGRPCServer != nil { s.adminGRPCServer.GracefulStop() } @@ -411,7 +412,7 @@ func (s *HaxenServer) Stop() error { } // unregisterAgentFromMonitoring removes an agent from health monitoring -func (s *HaxenServer) unregisterAgentFromMonitoring(c *gin.Context) { +func (s *AgentFieldServer) unregisterAgentFromMonitoring(c *gin.Context) { nodeID := c.Param("node_id") if nodeID == "" { c.JSON(http.StatusBadRequest, gin.H{"error": "node_id is required"}) @@ -430,7 +431,7 @@ func (s *HaxenServer) unregisterAgentFromMonitoring(c *gin.Context) { } // healthCheckHandler provides comprehensive health check for container orchestration -func (s *HaxenServer) healthCheckHandler(c *gin.Context) { +func (s *AgentFieldServer) healthCheckHandler(c *gin.Context) { ctx, cancel := context.WithTimeout(c.Request.Context(), 10*time.Second) defer cancel() @@ -484,7 +485,7 @@ func (s *HaxenServer) healthCheckHandler(c *gin.Context) { } // checkStorageHealth performs storage-specific health checks -func (s *HaxenServer) checkStorageHealth(ctx context.Context) gin.H { +func (s *AgentFieldServer) checkStorageHealth(ctx context.Context) gin.H { if s.storageHealthOverride != nil { return s.storageHealthOverride(ctx) } @@ -507,7 +508,7 @@ func (s *HaxenServer) checkStorageHealth(ctx context.Context) gin.H { } // checkCacheHealth performs cache-specific health checks -func (s *HaxenServer) checkCacheHealth(ctx context.Context) gin.H { +func (s *AgentFieldServer) checkCacheHealth(ctx context.Context) gin.H { if s.cacheHealthOverride != nil { return s.cacheHealthOverride(ctx) } @@ -538,7 +539,13 @@ func (s *HaxenServer) checkCacheHealth(ctx context.Context) gin.H { } // Clean up - s.cache.Delete(testKey) + if err := s.cache.Delete(testKey); err != nil { + return gin.H{ + "status": "unhealthy", + "message": fmt.Sprintf("cache delete operation failed: %v", err), + "response_time": time.Since(startTime).Milliseconds(), + } + } return gin.H{ "status": "healthy", @@ -547,7 +554,7 @@ func (s *HaxenServer) checkCacheHealth(ctx context.Context) gin.H { } } -func (s *HaxenServer) setupRoutes() { +func (s *AgentFieldServer) setupRoutes() { // Configure CORS from configuration corsConfig := cors.Config{ AllowOrigins: s.config.API.CORS.AllowedOrigins, @@ -613,7 +620,7 @@ func (s *HaxenServer) setupRoutes() { // Get the executable path and find UI dist relative to it execPath, err := os.Executable() if err != nil { - distPath = filepath.Join("apps", "platform", "haxen", "web", "client", "dist") + distPath = filepath.Join("apps", "platform", "agentfield", "web", "client", "dist") if _, statErr := os.Stat(distPath); os.IsNotExist(statErr) { distPath = filepath.Join("web", "client", "dist") } @@ -622,14 +629,14 @@ func (s *HaxenServer) setupRoutes() { // Look for web/client/dist relative to the executable directory distPath = filepath.Join(execDir, "web", "client", "dist") - // If that doesn't exist, try going up one level (if binary is in apps/platform/haxen/) + // If that doesn't exist, try going up one level (if binary is in apps/platform/agentfield/) if _, err := os.Stat(distPath); os.IsNotExist(err) { - distPath = filepath.Join(filepath.Dir(execDir), "apps", "platform", "haxen", "web", "client", "dist") + distPath = filepath.Join(filepath.Dir(execDir), "apps", "platform", "agentfield", "web", "client", "dist") } // Final fallback to current working directory if _, err := os.Stat(distPath); os.IsNotExist(err) { - altPath := filepath.Join("apps", "platform", "haxen", "web", "client", "dist") + altPath := filepath.Join("apps", "platform", "agentfield", "web", "client", "dist") if _, altErr := os.Stat(altPath); altErr == nil { distPath = altPath } else { @@ -684,7 +691,7 @@ func (s *HaxenServer) setupRoutes() { agents.POST("/:agentId/config", configHandler.SetConfigHandler) // Environment file endpoints - envHandler := ui.NewEnvHandler(s.storage, s.agentService, s.haxenHome) + envHandler := ui.NewEnvHandler(s.storage, s.agentService, s.agentfieldHome) agents.GET("/:agentId/env", envHandler.GetEnvHandler) agents.PUT("/:agentId/env", envHandler.PutEnvHandler) agents.PATCH("/:agentId/env", envHandler.PatchEnvHandler) @@ -901,23 +908,23 @@ func (s *HaxenServer) setupRoutes() { // Register service-backed DID routes didHandlers.RegisterRoutes(agentAPI) - // Add haxen server DID endpoint - agentAPI.GET("/did/haxen-server", func(c *gin.Context) { - // Get haxen server ID dynamically - haxenServerID, err := s.didService.GetHaxenServerID() + // Add af server DID endpoint + agentAPI.GET("/did/agentfield-server", func(c *gin.Context) { + // Get af server ID dynamically + agentfieldServerID, err := s.didService.GetAgentFieldServerID() if err != nil { c.JSON(http.StatusInternalServerError, gin.H{ - "error": "Failed to get haxen server ID", - "details": fmt.Sprintf("Haxen server ID error: %v", err), + "error": "Failed to get af server ID", + "details": fmt.Sprintf("AgentField server ID error: %v", err), }) return } - // Get the actual haxen server DID from the registry - registry, err := s.didService.GetRegistry(haxenServerID) + // Get the actual af server DID from the registry + registry, err := s.didService.GetRegistry(agentfieldServerID) if err != nil { c.JSON(http.StatusInternalServerError, gin.H{ - "error": "Failed to get haxen server DID", + "error": "Failed to get af server DID", "details": fmt.Sprintf("Registry error: %v", err), }) return @@ -925,24 +932,24 @@ func (s *HaxenServer) setupRoutes() { if registry == nil { c.JSON(http.StatusNotFound, gin.H{ - "error": "Haxen server DID not found", - "details": "No DID registry exists for haxen server 'default'. The DID system may not be properly initialized.", + "error": "AgentField server DID not found", + "details": "No DID registry exists for af server 'default'. The DID system may not be properly initialized.", }) return } if registry.RootDID == "" { c.JSON(http.StatusInternalServerError, gin.H{ - "error": "Haxen server DID is empty", + "error": "AgentField server DID is empty", "details": "Registry exists but root DID is empty. The DID system may be corrupted.", }) return } c.JSON(http.StatusOK, gin.H{ - "haxen_server_id": "default", - "haxen_server_did": registry.RootDID, - "message": "Haxen server DID retrieved successfully", + "agentfield_server_id": "default", + "agentfield_server_did": registry.RootDID, + "message": "AgentField server DID retrieved successfully", }) }) } else { @@ -995,7 +1002,7 @@ func (s *HaxenServer) setupRoutes() { // Get the executable path and find UI dist relative to it execPath, err := os.Executable() if err != nil { - distPath = filepath.Join("apps", "platform", "haxen", "web", "client", "dist") + distPath = filepath.Join("apps", "platform", "agentfield", "web", "client", "dist") if _, statErr := os.Stat(distPath); os.IsNotExist(statErr) { distPath = filepath.Join("web", "client", "dist") } @@ -1004,14 +1011,14 @@ func (s *HaxenServer) setupRoutes() { // Look for web/client/dist relative to the executable directory distPath = filepath.Join(execDir, "web", "client", "dist") - // If that doesn't exist, try going up one level (if binary is in apps/platform/haxen/) + // If that doesn't exist, try going up one level (if binary is in apps/platform/agentfield/) if _, err := os.Stat(distPath); os.IsNotExist(err) { - distPath = filepath.Join(filepath.Dir(execDir), "apps", "platform", "haxen", "web", "client", "dist") + distPath = filepath.Join(filepath.Dir(execDir), "apps", "platform", "agentfield", "web", "client", "dist") } // Final fallback to current working directory if _, err := os.Stat(distPath); os.IsNotExist(err) { - altPath := filepath.Join("apps", "platform", "haxen", "web", "client", "dist") + altPath := filepath.Join("apps", "platform", "agentfield", "web", "client", "dist") if _, altErr := os.Stat(altPath); altErr == nil { distPath = altPath } else { @@ -1029,22 +1036,22 @@ func (s *HaxenServer) setupRoutes() { } } -// generateHaxenServerID creates a deterministic haxen server ID based on the haxen home directory. -// This ensures each haxen instance has a unique ID while being deterministic for the same installation. -func generateHaxenServerID(haxenHome string) string { - // Use the absolute path of haxen home to generate a deterministic ID - absPath, err := filepath.Abs(haxenHome) +// generateAgentFieldServerID creates a deterministic af server ID based on the agentfield home directory. +// This ensures each agentfield instance has a unique ID while being deterministic for the same installation. +func generateAgentFieldServerID(agentfieldHome string) string { + // Use the absolute path of agentfield home to generate a deterministic ID + absPath, err := filepath.Abs(agentfieldHome) if err != nil { // Fallback to the original path if absolute path fails - absPath = haxenHome + absPath = agentfieldHome } - // Create a hash of the haxen home path to generate a unique but deterministic ID + // Create a hash of the agentfield home path to generate a unique but deterministic ID hash := sha256.Sum256([]byte(absPath)) - // Use first 16 characters of the hex hash as the haxen server ID + // Use first 16 characters of the hex hash as the af server ID // This provides uniqueness while keeping the ID manageable - haxenServerID := hex.EncodeToString(hash[:])[:16] + agentfieldServerID := hex.EncodeToString(hash[:])[:16] - return haxenServerID + return agentfieldServerID } diff --git a/control-plane/internal/server/server_grpc_test.go b/control-plane/internal/server/server_grpc_test.go index 9ffa7bb6..0cfbd5f5 100644 --- a/control-plane/internal/server/server_grpc_test.go +++ b/control-plane/internal/server/server_grpc_test.go @@ -8,9 +8,9 @@ import ( "testing" "time" - "github.com/your-org/haxen/control-plane/internal/storage" - "github.com/your-org/haxen/control-plane/pkg/adminpb" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/pkg/adminpb" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/stretchr/testify/require" ) @@ -23,8 +23,8 @@ func TestListReasonersAggregatesNodes(t *testing.T) { cfg := storage.StorageConfig{ Mode: "local", Local: storage.LocalStorageConfig{ - DatabasePath: filepath.Join(tempDir, "haxen.db"), - KVStorePath: filepath.Join(tempDir, "haxen.bolt"), + DatabasePath: filepath.Join(tempDir, "agentfield.db"), + KVStorePath: filepath.Join(tempDir, "agentfield.bolt"), }, } @@ -37,7 +37,7 @@ func TestListReasonersAggregatesNodes(t *testing.T) { } t.Cleanup(func() { _ = localStore.Close(ctx) }) - srv := &HaxenServer{storage: localStore} + srv := &AgentFieldServer{storage: localStore} schema := json.RawMessage("{}") node := &types.AgentNode{ diff --git a/control-plane/internal/server/server_routes_test.go b/control-plane/internal/server/server_routes_test.go index bf19509e..157cc6d9 100644 --- a/control-plane/internal/server/server_routes_test.go +++ b/control-plane/internal/server/server_routes_test.go @@ -5,8 +5,8 @@ import ( "net/http/httptest" "testing" - "github.com/your-org/haxen/control-plane/internal/config" - "github.com/your-org/haxen/control-plane/internal/services" + "github.com/Agent-Field/agentfield/control-plane/internal/config" + "github.com/Agent-Field/agentfield/control-plane/internal/services" "github.com/gin-gonic/gin" "github.com/stretchr/testify/require" @@ -17,7 +17,7 @@ func TestSetupRoutesRegistersMetricsAndUI(t *testing.T) { gin.SetMode(gin.TestMode) - srv := &HaxenServer{ + srv := &AgentFieldServer{ Router: gin.New(), config: &config.Config{ UI: config.UIConfig{Enabled: true, Mode: "embedded"}, @@ -45,6 +45,7 @@ func TestSetupRoutesRegistersMetricsAndUI(t *testing.T) { }) } +//nolint:unused // Reserved for future test cases type stubHealthMonitor struct { *services.HealthMonitor } @@ -54,7 +55,7 @@ func TestUnregisterAgentFromMonitoringResponses(t *testing.T) { gin.SetMode(gin.TestMode) - srv := &HaxenServer{} + srv := &AgentFieldServer{} t.Run("missing node id returns 400", func(t *testing.T) { req, _ := http.NewRequest(http.MethodDelete, "/internal/nodes//monitor", nil) diff --git a/control-plane/internal/server/server_test.go b/control-plane/internal/server/server_test.go index 63c61e1f..d61353b1 100644 --- a/control-plane/internal/server/server_test.go +++ b/control-plane/internal/server/server_test.go @@ -11,14 +11,14 @@ import ( "testing" "time" - "github.com/your-org/haxen/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/internal/storage" "github.com/gin-gonic/gin" "gopkg.in/yaml.v3" ) func TestCheckStorageHealthOverride(t *testing.T) { - srv := &HaxenServer{ + srv := &AgentFieldServer{ storageHealthOverride: func(context.Context) gin.H { return gin.H{"status": "healthy"} }, @@ -31,7 +31,7 @@ func TestCheckStorageHealthOverride(t *testing.T) { } func TestCheckStorageHealthWithoutStorage(t *testing.T) { - srv := &HaxenServer{} + srv := &AgentFieldServer{} result := srv.checkStorageHealth(context.Background()) if status, ok := result["status"].(string); !ok || status != "healthy" { t.Fatalf("expected default healthy status when storage nil, got %+v", result) @@ -39,7 +39,7 @@ func TestCheckStorageHealthWithoutStorage(t *testing.T) { } func TestCheckStorageHealthContextError(t *testing.T) { - srv := &HaxenServer{} + srv := &AgentFieldServer{} ctx, cancel := context.WithCancel(context.Background()) cancel() @@ -108,7 +108,7 @@ func (c *fakeCache) Publish(channel string, message interface{}) error { func TestCheckCacheHealthHealthy(t *testing.T) { cache := newFakeCache() - srv := &HaxenServer{cache: cache} + srv := &AgentFieldServer{cache: cache} result := srv.checkCacheHealth(context.Background()) if status, ok := result["status"].(string); !ok || status != "healthy" { @@ -119,7 +119,7 @@ func TestCheckCacheHealthHealthy(t *testing.T) { func TestCheckCacheHealthSetError(t *testing.T) { cache := newFakeCache() cache.setErr = context.DeadlineExceeded - srv := &HaxenServer{cache: cache} + srv := &AgentFieldServer{cache: cache} result := srv.checkCacheHealth(context.Background()) if status, ok := result["status"].(string); !ok || status != "unhealthy" { @@ -130,7 +130,7 @@ func TestCheckCacheHealthSetError(t *testing.T) { func TestCheckCacheHealthGetError(t *testing.T) { cache := newFakeCache() cache.getErr = context.DeadlineExceeded - srv := &HaxenServer{cache: cache} + srv := &AgentFieldServer{cache: cache} result := srv.checkCacheHealth(context.Background()) if status, ok := result["status"].(string); !ok || status != "unhealthy" { @@ -140,7 +140,7 @@ func TestCheckCacheHealthGetError(t *testing.T) { func TestHealthCheckHandlerHealthy(t *testing.T) { gin.SetMode(gin.TestMode) - srv := &HaxenServer{ + srv := &AgentFieldServer{ storageHealthOverride: func(context.Context) gin.H { return gin.H{"status": "healthy"} }, cacheHealthOverride: func(context.Context) gin.H { return gin.H{"status": "healthy"} }, } @@ -167,7 +167,7 @@ func TestHealthCheckHandlerHealthy(t *testing.T) { func TestHealthCheckHandlerCacheOptional(t *testing.T) { gin.SetMode(gin.TestMode) - srv := &HaxenServer{ + srv := &AgentFieldServer{ storageHealthOverride: func(context.Context) gin.H { return gin.H{"status": "healthy"} }, } @@ -195,7 +195,7 @@ func TestHealthCheckHandlerCacheOptional(t *testing.T) { func TestHealthCheckHandlerUnhealthyStorage(t *testing.T) { gin.SetMode(gin.TestMode) - srv := &HaxenServer{ + srv := &AgentFieldServer{ storageHealthOverride: func(context.Context) gin.H { return gin.H{"status": "unhealthy"} }, cacheHealthOverride: func(context.Context) gin.H { return gin.H{"status": "healthy"} }, } @@ -214,7 +214,7 @@ func TestHealthCheckHandlerUnhealthyStorage(t *testing.T) { func TestHealthCheckHandlerWithoutStorage(t *testing.T) { gin.SetMode(gin.TestMode) - srv := &HaxenServer{} + srv := &AgentFieldServer{} w := httptest.NewRecorder() c, _ := gin.CreateTestContext(w) @@ -228,13 +228,13 @@ func TestHealthCheckHandlerWithoutStorage(t *testing.T) { } } -func TestGenerateHaxenServerIDDeterministic(t *testing.T) { - dir1 := filepath.Join("/tmp", "haxen-test-1") - dir2 := filepath.Join("/tmp", "haxen-test-2") +func TestGenerateAgentFieldServerIDDeterministic(t *testing.T) { + dir1 := filepath.Join("/tmp", "agentfield-test-1") + dir2 := filepath.Join("/tmp", "agentfield-test-2") - id1 := generateHaxenServerID(dir1) - id1Again := generateHaxenServerID(dir1) - id2 := generateHaxenServerID(dir2) + id1 := generateAgentFieldServerID(dir1) + id1Again := generateAgentFieldServerID(dir1) + id2 := generateAgentFieldServerID(dir2) if id1 != id1Again { t.Fatal("expected deterministic ID for same path") @@ -246,7 +246,7 @@ func TestGenerateHaxenServerIDDeterministic(t *testing.T) { func TestUnregisterAgentFromMonitoring_NoNodeID(t *testing.T) { gin.SetMode(gin.TestMode) - srv := &HaxenServer{} + srv := &AgentFieldServer{} router := gin.New() router.DELETE("/nodes/:node_id/monitoring", srv.unregisterAgentFromMonitoring) @@ -262,7 +262,7 @@ func TestUnregisterAgentFromMonitoring_NoNodeID(t *testing.T) { func TestUnregisterAgentFromMonitoring_NoMonitor(t *testing.T) { gin.SetMode(gin.TestMode) - srv := &HaxenServer{} + srv := &AgentFieldServer{} router := gin.New() router.DELETE("/nodes/:node_id/monitoring", srv.unregisterAgentFromMonitoring) @@ -279,30 +279,30 @@ func TestUnregisterAgentFromMonitoring_NoMonitor(t *testing.T) { func TestSyncPackagesFromRegistry(t *testing.T) { storage := newStubPackageStorage() - haxenHome := t.TempDir() - pkgDir := filepath.Join(haxenHome, "packages", "mypkg") + agentfieldHome := t.TempDir() + pkgDir := filepath.Join(agentfieldHome, "packages", "mypkg") if err := os.MkdirAll(pkgDir, 0o755); err != nil { t.Fatalf("failed to create package dir: %v", err) } packageContent := []byte(`name: Test Package\nversion: 1.0.0`) - if err := os.WriteFile(filepath.Join(pkgDir, "haxen-package.yaml"), packageContent, 0o644); err != nil { - t.Fatalf("failed to write haxen-package.yaml: %v", err) + if err := os.WriteFile(filepath.Join(pkgDir, "agentfield-package.yaml"), packageContent, 0o644); err != nil { + t.Fatalf("failed to write agentfield-package.yaml: %v", err) } installedContent := []byte("installed:\n test-package:\n name: Test Package\n version: \"1.0.0\"\n description: Test description\n path: \"" + pkgDir + "\"\n source: local\n") - if err := os.WriteFile(filepath.Join(haxenHome, "installed.yaml"), installedContent, 0o644); err != nil { + if err := os.WriteFile(filepath.Join(agentfieldHome, "installed.yaml"), installedContent, 0o644); err != nil { t.Fatalf("failed to write installed.yaml: %v", err) } var reg InstallationRegistry - if data, err := os.ReadFile(filepath.Join(haxenHome, "installed.yaml")); err == nil { + if data, err := os.ReadFile(filepath.Join(agentfieldHome, "installed.yaml")); err == nil { _ = yaml.Unmarshal(data, ®) } if len(reg.Installed) == 0 { t.Fatal("expected registry to contain installed package") } - if err := SyncPackagesFromRegistry(haxenHome, storage); err != nil { + if err := SyncPackagesFromRegistry(agentfieldHome, storage); err != nil { t.Fatalf("SyncPackagesFromRegistry returned error: %v", err) } @@ -313,9 +313,9 @@ func TestSyncPackagesFromRegistry(t *testing.T) { func TestSyncPackagesFromRegistryMissingFile(t *testing.T) { storage := newStubPackageStorage() - haxenHome := t.TempDir() + agentfieldHome := t.TempDir() - if err := SyncPackagesFromRegistry(haxenHome, storage); err != nil { + if err := SyncPackagesFromRegistry(agentfieldHome, storage); err != nil { t.Fatalf("expected nil error when registry file missing, got %v", err) } if len(storage.packages) != 0 { diff --git a/control-plane/internal/server/test_helpers_test.go b/control-plane/internal/server/test_helpers_test.go index 544262a1..c55ca926 100644 --- a/control-plane/internal/server/test_helpers_test.go +++ b/control-plane/internal/server/test_helpers_test.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" ) type stubPackageStorage struct { diff --git a/control-plane/internal/services/did_registry.go b/control-plane/internal/services/did_registry.go index cca85ea7..e7414048 100644 --- a/control-plane/internal/services/did_registry.go +++ b/control-plane/internal/services/did_registry.go @@ -5,8 +5,8 @@ import ( "fmt" "sync" - "github.com/your-org/haxen/control-plane/pkg/types" - "github.com/your-org/haxen/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" ) // DIDRegistry manages the storage and retrieval of DID registries using database-only operations. @@ -29,18 +29,18 @@ func (r *DIDRegistry) Initialize() error { if r.storageProvider == nil { return fmt.Errorf("storage provider not available") } - + // Load existing registries from database return r.loadRegistriesFromDatabase() } -// GetRegistry retrieves a DID registry for a haxen server. +// GetRegistry retrieves a DID registry for a af server. // Returns (nil, nil) if registry doesn't exist, (nil, error) for actual errors. -func (r *DIDRegistry) GetRegistry(haxenServerID string) (*types.DIDRegistry, error) { +func (r *DIDRegistry) GetRegistry(agentfieldServerID string) (*types.DIDRegistry, error) { r.mu.RLock() defer r.mu.RUnlock() - registry, exists := r.registries[haxenServerID] + registry, exists := r.registries[agentfieldServerID] if !exists { // Return nil, nil for "not found" to distinguish from actual errors return nil, nil @@ -49,19 +49,19 @@ func (r *DIDRegistry) GetRegistry(haxenServerID string) (*types.DIDRegistry, err return registry, nil } -// StoreRegistry stores a DID registry for a haxen server. +// StoreRegistry stores a DID registry for a af server. func (r *DIDRegistry) StoreRegistry(registry *types.DIDRegistry) error { r.mu.Lock() defer r.mu.Unlock() // Store in memory - r.registries[registry.HaxenServerID] = registry + r.registries[registry.AgentFieldServerID] = registry // Persist to database return r.saveRegistryToDatabase(registry) } -// ListRegistries lists all haxen server registries. +// ListRegistries lists all af server registries. func (r *DIDRegistry) ListRegistries() ([]*types.DIDRegistry, error) { r.mu.RLock() defer r.mu.RUnlock() @@ -74,13 +74,13 @@ func (r *DIDRegistry) ListRegistries() ([]*types.DIDRegistry, error) { return registries, nil } -// DeleteRegistry deletes a DID registry for a haxen server. -func (r *DIDRegistry) DeleteRegistry(haxenServerID string) error { +// DeleteRegistry deletes a DID registry for a af server. +func (r *DIDRegistry) DeleteRegistry(agentfieldServerID string) error { r.mu.Lock() defer r.mu.Unlock() // Remove from memory - delete(r.registries, haxenServerID) + delete(r.registries, agentfieldServerID) // TODO: Add database deletion method to storage interface // For now, we'll just remove from memory @@ -88,13 +88,13 @@ func (r *DIDRegistry) DeleteRegistry(haxenServerID string) error { } // UpdateAgentStatus updates the status of an agent DID. -func (r *DIDRegistry) UpdateAgentStatus(haxenServerID, agentNodeID string, status types.AgentDIDStatus) error { +func (r *DIDRegistry) UpdateAgentStatus(agentfieldServerID, agentNodeID string, status types.AgentDIDStatus) error { r.mu.Lock() defer r.mu.Unlock() - registry, exists := r.registries[haxenServerID] + registry, exists := r.registries[agentfieldServerID] if !exists { - return fmt.Errorf("registry not found for haxen server: %s", haxenServerID) + return fmt.Errorf("registry not found for af server: %s", agentfieldServerID) } agentInfo, exists := registry.AgentNodes[agentNodeID] @@ -110,13 +110,13 @@ func (r *DIDRegistry) UpdateAgentStatus(haxenServerID, agentNodeID string, statu } // FindDIDByComponent finds a DID by component type and function name. -func (r *DIDRegistry) FindDIDByComponent(haxenServerID, componentType, functionName string) (*types.DIDIdentity, error) { +func (r *DIDRegistry) FindDIDByComponent(agentfieldServerID, componentType, functionName string) (*types.DIDIdentity, error) { r.mu.RLock() defer r.mu.RUnlock() - registry, exists := r.registries[haxenServerID] + registry, exists := r.registries[agentfieldServerID] if !exists { - return nil, fmt.Errorf("registry not found for haxen server: %s", haxenServerID) + return nil, fmt.Errorf("registry not found for af server: %s", agentfieldServerID) } // Search through all agent nodes @@ -162,13 +162,13 @@ func (r *DIDRegistry) FindDIDByComponent(haxenServerID, componentType, functionN } // GetAgentDIDs retrieves all DIDs for a specific agent node. -func (r *DIDRegistry) GetAgentDIDs(haxenServerID, agentNodeID string) (*types.DIDIdentityPackage, error) { +func (r *DIDRegistry) GetAgentDIDs(agentfieldServerID, agentNodeID string) (*types.DIDIdentityPackage, error) { r.mu.RLock() defer r.mu.RUnlock() - registry, exists := r.registries[haxenServerID] + registry, exists := r.registries[agentfieldServerID] if !exists { - return nil, fmt.Errorf("registry not found for haxen server: %s", haxenServerID) + return nil, fmt.Errorf("registry not found for af server: %s", agentfieldServerID) } agentInfo, exists := registry.AgentNodes[agentNodeID] @@ -206,9 +206,9 @@ func (r *DIDRegistry) GetAgentDIDs(haxenServerID, agentNodeID string) (*types.DI DerivationPath: agentInfo.DerivationPath, ComponentType: "agent", }, - ReasonerDIDs: reasonerDIDs, - SkillDIDs: skillDIDs, - HaxenServerID: haxenServerID, + ReasonerDIDs: reasonerDIDs, + SkillDIDs: skillDIDs, + AgentFieldServerID: agentfieldServerID, }, nil } @@ -219,45 +219,45 @@ func (r *DIDRegistry) loadRegistriesFromDatabase() error { } ctx := context.Background() - // Load haxen server DID information - haxenServerDIDs, err := r.storageProvider.ListHaxenServerDIDs(ctx) + // Load af server DID information + agentfieldServerDIDs, err := r.storageProvider.ListAgentFieldServerDIDs(ctx) if err != nil { - return fmt.Errorf("failed to list haxen server DIDs: %w", err) + return fmt.Errorf("failed to list af server DIDs: %w", err) } - // Create registries for each haxen server - for _, haxenServerDIDInfo := range haxenServerDIDs { + // Create registries for each af server + for _, agentfieldServerDIDInfo := range agentfieldServerDIDs { registry := &types.DIDRegistry{ - HaxenServerID: haxenServerDIDInfo.HaxenServerID, - RootDID: haxenServerDIDInfo.RootDID, - MasterSeed: haxenServerDIDInfo.MasterSeed, - AgentNodes: make(map[string]types.AgentDIDInfo), - TotalDIDs: 0, - CreatedAt: haxenServerDIDInfo.CreatedAt, - LastKeyRotation: haxenServerDIDInfo.LastKeyRotation, + AgentFieldServerID: agentfieldServerDIDInfo.AgentFieldServerID, + RootDID: agentfieldServerDIDInfo.RootDID, + MasterSeed: agentfieldServerDIDInfo.MasterSeed, + AgentNodes: make(map[string]types.AgentDIDInfo), + TotalDIDs: 0, + CreatedAt: agentfieldServerDIDInfo.CreatedAt, + LastKeyRotation: agentfieldServerDIDInfo.LastKeyRotation, } - // Load agent DIDs for this haxen server + // Load agent DIDs for this af server agentDIDs, err := r.storageProvider.ListAgentDIDs(ctx) if err != nil { return fmt.Errorf("failed to list agent DIDs: %w", err) } for _, agentDIDInfo := range agentDIDs { - // Filter agents for this haxen server (assuming we can match by some criteria) - // For now, we'll add all agents to the default haxen server - // TODO: Add haxen server filtering when the storage interface supports it - + // Filter agents for this af server (assuming we can match by some criteria) + // For now, we'll add all agents to the default af server + // TODO: Add af server filtering when the storage interface supports it + agentInfo := types.AgentDIDInfo{ - DID: agentDIDInfo.DID, - AgentNodeID: agentDIDInfo.AgentNodeID, - HaxenServerID: haxenServerDIDInfo.HaxenServerID, - PublicKeyJWK: agentDIDInfo.PublicKeyJWK, - DerivationPath: agentDIDInfo.DerivationPath, - Status: agentDIDInfo.Status, - RegisteredAt: agentDIDInfo.RegisteredAt, - Reasoners: make(map[string]types.ReasonerDIDInfo), - Skills: make(map[string]types.SkillDIDInfo), + DID: agentDIDInfo.DID, + AgentNodeID: agentDIDInfo.AgentNodeID, + AgentFieldServerID: agentfieldServerDIDInfo.AgentFieldServerID, + PublicKeyJWK: agentDIDInfo.PublicKeyJWK, + DerivationPath: agentDIDInfo.DerivationPath, + Status: agentDIDInfo.Status, + RegisteredAt: agentDIDInfo.RegisteredAt, + Reasoners: make(map[string]types.ReasonerDIDInfo), + Skills: make(map[string]types.SkillDIDInfo), } // Load component DIDs for this agent @@ -274,7 +274,7 @@ func (r *DIDRegistry) loadRegistriesFromDatabase() error { FunctionName: componentDID.ComponentName, DerivationPath: fmt.Sprintf("m/44'/0'/0'/%d", componentDID.DerivationIndex), Capabilities: []string{}, // TODO: Load from database - ExposureLevel: "private", // TODO: Load from database + ExposureLevel: "private", // TODO: Load from database CreatedAt: componentDID.CreatedAt, } agentInfo.Reasoners[componentDID.ComponentName] = reasonerInfo @@ -285,7 +285,7 @@ func (r *DIDRegistry) loadRegistriesFromDatabase() error { FunctionName: componentDID.ComponentName, DerivationPath: fmt.Sprintf("m/44'/0'/0'/%d", componentDID.DerivationIndex), Tags: []string{}, // TODO: Load from database - ExposureLevel: "private", // TODO: Load from database + ExposureLevel: "private", // TODO: Load from database CreatedAt: componentDID.CreatedAt, } agentInfo.Skills[componentDID.ComponentName] = skillInfo @@ -296,7 +296,7 @@ func (r *DIDRegistry) loadRegistriesFromDatabase() error { registry.TotalDIDs++ } - r.registries[haxenServerDIDInfo.HaxenServerID] = registry + r.registries[agentfieldServerDIDInfo.AgentFieldServerID] = registry } return nil @@ -309,17 +309,17 @@ func (r *DIDRegistry) saveRegistryToDatabase(registry *types.DIDRegistry) error } ctx := context.Background() - // Store haxen server DID information - err := r.storageProvider.StoreHaxenServerDID( + // Store af server DID information + err := r.storageProvider.StoreAgentFieldServerDID( ctx, - registry.HaxenServerID, + registry.AgentFieldServerID, registry.RootDID, registry.MasterSeed, registry.CreatedAt, registry.LastKeyRotation, ) if err != nil { - return fmt.Errorf("failed to store haxen server DID: %w", err) + return fmt.Errorf("failed to store af server DID: %w", err) } // Store each agent DID and its components using transaction-safe method @@ -329,7 +329,7 @@ func (r *DIDRegistry) saveRegistryToDatabase(registry *types.DIDRegistry) error // Prepare component DIDs for batch storage var components []storage.ComponentDIDRequest - + // Add reasoner DIDs for _, reasonerInfo := range agentInfo.Reasoners { reasonerDerivationIndex := 0 // TODO: Parse from reasonerInfo.DerivationPath @@ -359,7 +359,7 @@ func (r *DIDRegistry) saveRegistryToDatabase(registry *types.DIDRegistry) error ctx, agentInfo.AgentNodeID, agentInfo.DID, - registry.HaxenServerID, // Use haxen server ID instead of root DID + registry.AgentFieldServerID, // Use af server ID instead of root DID string(agentInfo.PublicKeyJWK), derivationIndex, components, diff --git a/control-plane/internal/services/did_registry_test.go b/control-plane/internal/services/did_registry_test.go index aa3973ca..30f3c3c2 100644 --- a/control-plane/internal/services/did_registry_test.go +++ b/control-plane/internal/services/did_registry_test.go @@ -7,8 +7,8 @@ import ( "testing" "time" - "github.com/your-org/haxen/control-plane/internal/storage" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/stretchr/testify/require" ) @@ -21,8 +21,8 @@ func setupTestStorage(t *testing.T) (storage.StorageProvider, context.Context) { cfg := storage.StorageConfig{ Mode: "local", Local: storage.LocalStorageConfig{ - DatabasePath: filepath.Join(tempDir, "haxen.db"), - KVStorePath: filepath.Join(tempDir, "haxen.bolt"), + DatabasePath: filepath.Join(tempDir, "agentfield.db"), + KVStorePath: filepath.Join(tempDir, "agentfield.bolt"), }, } @@ -44,10 +44,10 @@ func setupTestStorage(t *testing.T) (storage.StorageProvider, context.Context) { func TestDIDRegistryInitializeAndLookup(t *testing.T) { provider, ctx := setupTestStorage(t) - haxenID := "haxen-1" + agentfieldID := "agentfield-1" now := time.Now().UTC().Truncate(time.Second) - require.NoError(t, provider.StoreHaxenServerDID(ctx, haxenID, "did:haxen:root", []byte("seed"), now, now)) + require.NoError(t, provider.StoreAgentFieldServerDID(ctx, agentfieldID, "did:agentfield:root", []byte("seed"), now, now)) components := []storage.ComponentDIDRequest{ { @@ -66,34 +66,34 @@ func TestDIDRegistryInitializeAndLookup(t *testing.T) { }, } - require.NoError(t, provider.StoreAgentDIDWithComponents(ctx, "agent-1", "did:agent:1", haxenID, "{}", 0, components)) + require.NoError(t, provider.StoreAgentDIDWithComponents(ctx, "agent-1", "did:agent:1", agentfieldID, "{}", 0, components)) registry := NewDIDRegistryWithStorage(provider) require.NoError(t, registry.Initialize()) - loaded, err := registry.GetRegistry(haxenID) + loaded, err := registry.GetRegistry(agentfieldID) require.NoError(t, err) require.NotNil(t, loaded) require.Contains(t, loaded.AgentNodes, "agent-1") // Validate reasoner lookup - reasonerID, err := registry.FindDIDByComponent(haxenID, "reasoner", "reasoner.fn") + reasonerID, err := registry.FindDIDByComponent(agentfieldID, "reasoner", "reasoner.fn") require.NoError(t, err) require.Equal(t, "did:reasoner:1", reasonerID.DID) // Validate skill lookup - skillID, err := registry.FindDIDByComponent(haxenID, "skill", "skill.fn") + skillID, err := registry.FindDIDByComponent(agentfieldID, "skill", "skill.fn") require.NoError(t, err) require.Equal(t, "did:skill:1", skillID.DID) // Update status and ensure it is persisted in-memory - require.NoError(t, registry.UpdateAgentStatus(haxenID, "agent-1", types.AgentDIDStatusActive)) + require.NoError(t, registry.UpdateAgentStatus(agentfieldID, "agent-1", types.AgentDIDStatusActive)) - loadedAfterUpdate, err := registry.GetRegistry(haxenID) + loadedAfterUpdate, err := registry.GetRegistry(agentfieldID) require.NoError(t, err) require.Equal(t, types.AgentDIDStatusActive, loadedAfterUpdate.AgentNodes["agent-1"].Status) - packageResult, err := registry.GetAgentDIDs(haxenID, "agent-1") + packageResult, err := registry.GetAgentDIDs(agentfieldID, "agent-1") require.NoError(t, err) require.Equal(t, "did:agent:1", packageResult.AgentDID.DID) require.Contains(t, packageResult.ReasonerDIDs, "reasoner.fn") diff --git a/control-plane/internal/services/did_service.go b/control-plane/internal/services/did_service.go index dbd7a566..66c0ff50 100644 --- a/control-plane/internal/services/did_service.go +++ b/control-plane/internal/services/did_service.go @@ -11,47 +11,47 @@ import ( "hash/fnv" "time" - "github.com/your-org/haxen/control-plane/internal/config" - "github.com/your-org/haxen/control-plane/internal/logger" - "github.com/your-org/haxen/control-plane/internal/storage" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/internal/config" + "github.com/Agent-Field/agentfield/control-plane/internal/logger" + "github.com/Agent-Field/agentfield/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" ) // DIDService handles DID generation, management, and resolution. type DIDService struct { - config *config.DIDConfig - keystore *KeystoreService - registry *DIDRegistry - haxenServerID string + config *config.DIDConfig + keystore *KeystoreService + registry *DIDRegistry + agentfieldServerID string } // NewDIDService creates a new DID service instance. func NewDIDService(cfg *config.DIDConfig, keystore *KeystoreService, registry *DIDRegistry) *DIDService { return &DIDService{ - config: cfg, - keystore: keystore, - registry: registry, - haxenServerID: "", // Will be set during initialization + config: cfg, + keystore: keystore, + registry: registry, + agentfieldServerID: "", // Will be set during initialization } } -// Initialize initializes the DID service and creates haxen server master seed if needed. -func (s *DIDService) Initialize(haxenServerID string) error { +// Initialize initializes the DID service and creates af server master seed if needed. +func (s *DIDService) Initialize(agentfieldServerID string) error { if !s.config.Enabled { return nil } - // Store the haxen server ID for dynamic resolution - s.haxenServerID = haxenServerID + // Store the af server ID for dynamic resolution + s.agentfieldServerID = agentfieldServerID - // Check if haxen server already has a DID registry - registry, err := s.registry.GetRegistry(haxenServerID) + // Check if af server already has a DID registry + registry, err := s.registry.GetRegistry(agentfieldServerID) if err != nil { return fmt.Errorf("failed to check existing registry: %w", err) } if registry == nil { - // Create new haxen server registry + // Create new af server registry masterSeed := make([]byte, 32) if _, err := rand.Read(masterSeed); err != nil { return fmt.Errorf("failed to generate master seed: %w", err) @@ -65,13 +65,13 @@ func (s *DIDService) Initialize(haxenServerID string) error { // Create and store registry registry = &types.DIDRegistry{ - HaxenServerID: haxenServerID, - MasterSeed: masterSeed, - RootDID: rootDID, - AgentNodes: make(map[string]types.AgentDIDInfo), - TotalDIDs: 1, - CreatedAt: time.Now(), - LastKeyRotation: time.Now(), + AgentFieldServerID: agentfieldServerID, + MasterSeed: masterSeed, + RootDID: rootDID, + AgentNodes: make(map[string]types.AgentDIDInfo), + TotalDIDs: 1, + CreatedAt: time.Now(), + LastKeyRotation: time.Now(), } if err := s.registry.StoreRegistry(registry); err != nil { @@ -83,45 +83,45 @@ func (s *DIDService) Initialize(haxenServerID string) error { return nil } -// GetHaxenServerID returns the haxen server ID for this DID service instance. -// This method provides dynamic haxen server ID resolution instead of hardcoded "default". -func (s *DIDService) GetHaxenServerID() (string, error) { - if s.haxenServerID == "" { - return "", fmt.Errorf("haxen server ID not initialized - call Initialize() first") +// GetAgentFieldServerID returns the af server ID for this DID service instance. +// This method provides dynamic af server ID resolution instead of hardcoded "default". +func (s *DIDService) GetAgentFieldServerID() (string, error) { + if s.agentfieldServerID == "" { + return "", fmt.Errorf("af server ID not initialized - call Initialize() first") } - return s.haxenServerID, nil + return s.agentfieldServerID, nil } -// getHaxenServerID is an internal helper that returns the haxen server ID. -func (s *DIDService) getHaxenServerID() (string, error) { - return s.GetHaxenServerID() +// getAgentFieldServerID is an internal helper that returns the af server ID. +func (s *DIDService) getAgentFieldServerID() (string, error) { + return s.GetAgentFieldServerID() } -// validateHaxenServerRegistry ensures that the haxen server registry exists before operations. -func (s *DIDService) validateHaxenServerRegistry() error { - haxenServerID, err := s.getHaxenServerID() +// validateAgentFieldServerRegistry ensures that the af server registry exists before operations. +func (s *DIDService) validateAgentFieldServerRegistry() error { + agentfieldServerID, err := s.getAgentFieldServerID() if err != nil { return err } - registry, err := s.registry.GetRegistry(haxenServerID) + registry, err := s.registry.GetRegistry(agentfieldServerID) if err != nil { - return fmt.Errorf("failed to get haxen server registry: %w", err) + return fmt.Errorf("failed to get af server registry: %w", err) } if registry == nil { - return fmt.Errorf("haxen server registry not found for ID: %s - ensure Initialize() was called", haxenServerID) + return fmt.Errorf("af server registry not found for ID: %s - ensure Initialize() was called", agentfieldServerID) } return nil } -// GetRegistry retrieves a DID registry for a haxen server. -func (s *DIDService) GetRegistry(haxenServerID string) (*types.DIDRegistry, error) { +// GetRegistry retrieves a DID registry for a af server. +func (s *DIDService) GetRegistry(agentfieldServerID string) (*types.DIDRegistry, error) { if !s.config.Enabled { return nil, fmt.Errorf("DID system is disabled") } - return s.registry.GetRegistry(haxenServerID) + return s.registry.GetRegistry(agentfieldServerID) } // RegisterAgent generates DIDs for an agent node and all its components. @@ -134,11 +134,11 @@ func (s *DIDService) RegisterAgent(req *types.DIDRegistrationRequest) (*types.DI }, nil } - // Validate haxen server registry exists - if err := s.validateHaxenServerRegistry(); err != nil { + // Validate af server registry exists + if err := s.validateAgentFieldServerRegistry(); err != nil { return &types.DIDRegistrationResponse{ Success: false, - Error: fmt.Sprintf("haxen server registry validation failed: %v", err), + Error: fmt.Sprintf("af server registry validation failed: %v", err), }, nil } @@ -184,17 +184,17 @@ func (s *DIDService) RegisterAgent(req *types.DIDRegistrationRequest) (*types.DI // handleNewRegistration handles registration for new agents (original logic). func (s *DIDService) handleNewRegistration(req *types.DIDRegistrationRequest) (*types.DIDRegistrationResponse, error) { - // Get haxen server ID dynamically - haxenServerID, err := s.getHaxenServerID() + // Get af server ID dynamically + agentfieldServerID, err := s.getAgentFieldServerID() if err != nil { return &types.DIDRegistrationResponse{ Success: false, - Error: fmt.Sprintf("failed to get haxen server ID: %v", err), + Error: fmt.Sprintf("failed to get af server ID: %v", err), }, nil } - // Get haxen server registry using dynamic ID - registry, err := s.registry.GetRegistry(haxenServerID) + // Get af server registry using dynamic ID + registry, err := s.registry.GetRegistry(agentfieldServerID) if err != nil { return &types.DIDRegistrationResponse{ Success: false, @@ -202,14 +202,14 @@ func (s *DIDService) handleNewRegistration(req *types.DIDRegistrationRequest) (* }, nil } - // Generate haxen server hash for derivation path - haxenServerHash := s.hashHaxenServerID(registry.HaxenServerID) + // Generate af server hash for derivation path + agentfieldServerHash := s.hashAgentFieldServerID(registry.AgentFieldServerID) // Get next agent index agentIndex := len(registry.AgentNodes) // Generate agent DID - agentPath := fmt.Sprintf("m/44'/%d'/%d'", haxenServerHash, agentIndex) + agentPath := fmt.Sprintf("m/44'/%d'/%d'", agentfieldServerHash, agentIndex) agentDID, agentPrivKey, agentPubKey, err := s.generateDIDWithKeys(registry.MasterSeed, agentPath) if err != nil { return &types.DIDRegistrationResponse{ @@ -232,7 +232,7 @@ func (s *DIDService) handleNewRegistration(req *types.DIDRegistrationRequest) (* continue } - reasonerPath := fmt.Sprintf("m/44'/%d'/%d'/0'/%d'", haxenServerHash, agentIndex, validReasonerIndex) + reasonerPath := fmt.Sprintf("m/44'/%d'/%d'/0'/%d'", agentfieldServerHash, agentIndex, validReasonerIndex) reasonerDID, reasonerPrivKey, reasonerPubKey, err := s.generateDIDWithKeys(registry.MasterSeed, reasonerPath) if err != nil { return &types.DIDRegistrationResponse{ @@ -278,7 +278,7 @@ func (s *DIDService) handleNewRegistration(req *types.DIDRegistrationRequest) (* continue } - skillPath := fmt.Sprintf("m/44'/%d'/%d'/1'/%d'", haxenServerHash, agentIndex, validSkillIndex) + skillPath := fmt.Sprintf("m/44'/%d'/%d'/1'/%d'", agentfieldServerHash, agentIndex, validSkillIndex) skillDID, skillPrivKey, skillPubKey, err := s.generateDIDWithKeys(registry.MasterSeed, skillPath) if err != nil { return &types.DIDRegistrationResponse{ @@ -345,9 +345,9 @@ func (s *DIDService) handleNewRegistration(req *types.DIDRegistrationRequest) (* DerivationPath: agentPath, ComponentType: "agent", }, - ReasonerDIDs: reasonerDIDs, - SkillDIDs: skillDIDs, - HaxenServerID: registry.HaxenServerID, + ReasonerDIDs: reasonerDIDs, + SkillDIDs: skillDIDs, + AgentFieldServerID: registry.AgentFieldServerID, } // Debug log the response structure @@ -370,24 +370,24 @@ func (s *DIDService) ResolveDID(did string) (*types.DIDIdentity, error) { return nil, fmt.Errorf("DID system is disabled") } - // Validate haxen server registry exists - if err := s.validateHaxenServerRegistry(); err != nil { - return nil, fmt.Errorf("haxen server registry validation failed: %w", err) + // Validate af server registry exists + if err := s.validateAgentFieldServerRegistry(); err != nil { + return nil, fmt.Errorf("af server registry validation failed: %w", err) } - // Get haxen server ID dynamically - haxenServerID, err := s.getHaxenServerID() + // Get af server ID dynamically + agentfieldServerID, err := s.getAgentFieldServerID() if err != nil { - return nil, fmt.Errorf("failed to get haxen server ID: %w", err) + return nil, fmt.Errorf("failed to get af server ID: %w", err) } - // Get haxen server registry using dynamic ID - registry, err := s.registry.GetRegistry(haxenServerID) + // Get af server registry using dynamic ID + registry, err := s.registry.GetRegistry(agentfieldServerID) if err != nil { return nil, fmt.Errorf("failed to get DID registry: %w", err) } - // Check if this is the haxen server root DID + // Check if this is the af server root DID if registry.RootDID == did { // Regenerate private key for root DID using root derivation path privateKeyJWK, err := s.regeneratePrivateKeyJWK(registry.MasterSeed, "m/44'/0'") @@ -406,7 +406,7 @@ func (s *DIDService) ResolveDID(did string) (*types.DIDIdentity, error) { PrivateKeyJWK: privateKeyJWK, PublicKeyJWK: publicKeyJWK, DerivationPath: "m/44'/0'", - ComponentType: "haxen_server", + ComponentType: "agentfield_server", }, nil } @@ -574,10 +574,10 @@ func (s *DIDService) ed25519PublicKeyToJWK(publicKey ed25519.PublicKey) (string, return string(jwkBytes), nil } -// hashHaxenServerID creates a deterministic hash of haxen server ID for derivation paths. -func (s *DIDService) hashHaxenServerID(haxenServerID string) uint32 { +// hashAgentFieldServerID creates a deterministic hash of af server ID for derivation paths. +func (s *DIDService) hashAgentFieldServerID(agentfieldServerID string) uint32 { h := fnv.New32a() - h.Write([]byte(haxenServerID)) + h.Write([]byte(agentfieldServerID)) return h.Sum32() % (1 << 31) // Ensure it fits in BIP32 hardened derivation } @@ -624,19 +624,19 @@ func (s *DIDService) ListAllAgentDIDs() ([]string, error) { return nil, fmt.Errorf("DID system is disabled") } - // Validate haxen server registry exists - if err := s.validateHaxenServerRegistry(); err != nil { - return nil, fmt.Errorf("haxen server registry validation failed: %w", err) + // Validate af server registry exists + if err := s.validateAgentFieldServerRegistry(); err != nil { + return nil, fmt.Errorf("af server registry validation failed: %w", err) } - // Get haxen server ID dynamically - haxenServerID, err := s.getHaxenServerID() + // Get af server ID dynamically + agentfieldServerID, err := s.getAgentFieldServerID() if err != nil { - return nil, fmt.Errorf("failed to get haxen server ID: %w", err) + return nil, fmt.Errorf("failed to get af server ID: %w", err) } - // Get haxen server registry using dynamic ID - registry, err := s.registry.GetRegistry(haxenServerID) + // Get af server registry using dynamic ID + registry, err := s.registry.GetRegistry(agentfieldServerID) if err != nil { return nil, fmt.Errorf("failed to get DID registry: %w", err) } @@ -669,19 +669,19 @@ func (s *DIDService) BackfillExistingNodes(ctx context.Context, storageProvider return nil } - // Validate haxen server registry exists - if err := s.validateHaxenServerRegistry(); err != nil { - return fmt.Errorf("haxen server registry validation failed: %w", err) + // Validate af server registry exists + if err := s.validateAgentFieldServerRegistry(); err != nil { + return fmt.Errorf("af server registry validation failed: %w", err) } - // Get haxen server ID dynamically - haxenServerID, err := s.getHaxenServerID() + // Get af server ID dynamically + agentfieldServerID, err := s.getAgentFieldServerID() if err != nil { - return fmt.Errorf("failed to get haxen server ID: %w", err) + return fmt.Errorf("failed to get af server ID: %w", err) } // Get current DID registry using dynamic ID - registry, err := s.GetRegistry(haxenServerID) + registry, err := s.GetRegistry(agentfieldServerID) if err != nil { return fmt.Errorf("failed to get DID registry: %w", err) } @@ -728,19 +728,19 @@ func (s *DIDService) GetExistingAgentDID(agentNodeID string) (*types.AgentDIDInf return nil, fmt.Errorf("DID system is disabled") } - // Validate haxen server registry exists - if err := s.validateHaxenServerRegistry(); err != nil { - return nil, fmt.Errorf("haxen server registry validation failed: %w", err) + // Validate af server registry exists + if err := s.validateAgentFieldServerRegistry(); err != nil { + return nil, fmt.Errorf("af server registry validation failed: %w", err) } - // Get haxen server ID dynamically - haxenServerID, err := s.getHaxenServerID() + // Get af server ID dynamically + agentfieldServerID, err := s.getAgentFieldServerID() if err != nil { - return nil, fmt.Errorf("failed to get haxen server ID: %w", err) + return nil, fmt.Errorf("failed to get af server ID: %w", err) } - // Get haxen server registry using dynamic ID - registry, err := s.registry.GetRegistry(haxenServerID) + // Get af server registry using dynamic ID + registry, err := s.registry.GetRegistry(agentfieldServerID) if err != nil { return nil, fmt.Errorf("failed to get DID registry: %w", err) } @@ -868,22 +868,22 @@ func (s *DIDService) findSkillByID(skills []types.SkillDefinition, id string) *t // generateReasonerPath generates a derivation path for a reasoner. func (s *DIDService) generateReasonerPath(agentNodeID, reasonerID string) string { - // Get haxen server ID dynamically - haxenServerID, err := s.getHaxenServerID() + // Get af server ID dynamically + agentfieldServerID, err := s.getAgentFieldServerID() if err != nil { - logger.Logger.Error().Err(err).Msg("Failed to get haxen server ID for reasoner path generation") + logger.Logger.Error().Err(err).Msg("Failed to get af server ID for reasoner path generation") return "" } // Get registry to find agent index - registry, err := s.registry.GetRegistry(haxenServerID) + registry, err := s.registry.GetRegistry(agentfieldServerID) if err != nil { logger.Logger.Error().Err(err).Msg("Failed to get registry for reasoner path generation") return "" } - // Generate haxen server hash for derivation path - haxenServerHash := s.hashHaxenServerID(registry.HaxenServerID) + // Generate af server hash for derivation path + agentfieldServerHash := s.hashAgentFieldServerID(registry.AgentFieldServerID) // Find agent index (this is a simplified approach - in production you might want to store this) agentIndex := 0 @@ -898,27 +898,27 @@ func (s *DIDService) generateReasonerPath(agentNodeID, reasonerID string) string existingAgent := registry.AgentNodes[agentNodeID] reasonerIndex := len(existingAgent.Reasoners) - return fmt.Sprintf("m/44'/%d'/%d'/0'/%d'", haxenServerHash, agentIndex, reasonerIndex) + return fmt.Sprintf("m/44'/%d'/%d'/0'/%d'", agentfieldServerHash, agentIndex, reasonerIndex) } // generateSkillPath generates a derivation path for a skill. func (s *DIDService) generateSkillPath(agentNodeID, skillID string) string { - // Get haxen server ID dynamically - haxenServerID, err := s.getHaxenServerID() + // Get af server ID dynamically + agentfieldServerID, err := s.getAgentFieldServerID() if err != nil { - logger.Logger.Error().Err(err).Msg("Failed to get haxen server ID for skill path generation") + logger.Logger.Error().Err(err).Msg("Failed to get af server ID for skill path generation") return "" } // Get registry to find agent index - registry, err := s.registry.GetRegistry(haxenServerID) + registry, err := s.registry.GetRegistry(agentfieldServerID) if err != nil { logger.Logger.Error().Err(err).Msg("Failed to get registry for skill path generation") return "" } - // Generate haxen server hash for derivation path - haxenServerHash := s.hashHaxenServerID(registry.HaxenServerID) + // Generate af server hash for derivation path + agentfieldServerHash := s.hashAgentFieldServerID(registry.AgentFieldServerID) // Find agent index (this is a simplified approach - in production you might want to store this) agentIndex := 0 @@ -933,16 +933,16 @@ func (s *DIDService) generateSkillPath(agentNodeID, skillID string) string { existingAgent := registry.AgentNodes[agentNodeID] skillIndex := len(existingAgent.Skills) - return fmt.Sprintf("m/44'/%d'/%d'/1'/%d'", haxenServerHash, agentIndex, skillIndex) + return fmt.Sprintf("m/44'/%d'/%d'/1'/%d'", agentfieldServerHash, agentIndex, skillIndex) } // buildExistingIdentityPackage builds an identity package from existing agent DID info. func (s *DIDService) buildExistingIdentityPackage(existingAgent *types.AgentDIDInfo) types.DIDIdentityPackage { - // Get haxen server ID dynamically - haxenServerID, err := s.getHaxenServerID() + // Get af server ID dynamically + agentfieldServerID, err := s.getAgentFieldServerID() if err != nil { - logger.Logger.Error().Err(err).Msg("Failed to get haxen server ID for identity package") - haxenServerID = "unknown" + logger.Logger.Error().Err(err).Msg("Failed to get af server ID for identity package") + agentfieldServerID = "unknown" } // Build reasoner DIDs map @@ -979,9 +979,9 @@ func (s *DIDService) buildExistingIdentityPackage(existingAgent *types.AgentDIDI DerivationPath: existingAgent.DerivationPath, ComponentType: "agent", }, - ReasonerDIDs: reasonerDIDs, - SkillDIDs: skillDIDs, - HaxenServerID: haxenServerID, + ReasonerDIDs: reasonerDIDs, + SkillDIDs: skillDIDs, + AgentFieldServerID: agentfieldServerID, } } @@ -1054,25 +1054,25 @@ func (s *DIDService) PartialRegisterAgent(req *types.PartialDIDRegistrationReque }, nil } - // Validate haxen server registry exists - if err := s.validateHaxenServerRegistry(); err != nil { + // Validate af server registry exists + if err := s.validateAgentFieldServerRegistry(); err != nil { return &types.DIDRegistrationResponse{ Success: false, - Error: fmt.Sprintf("haxen server registry validation failed: %v", err), + Error: fmt.Sprintf("af server registry validation failed: %v", err), }, nil } - // Get haxen server ID dynamically - haxenServerID, err := s.getHaxenServerID() + // Get af server ID dynamically + agentfieldServerID, err := s.getAgentFieldServerID() if err != nil { return &types.DIDRegistrationResponse{ Success: false, - Error: fmt.Sprintf("failed to get haxen server ID: %v", err), + Error: fmt.Sprintf("failed to get af server ID: %v", err), }, nil } - // Get haxen server registry using dynamic ID - registry, err := s.registry.GetRegistry(haxenServerID) + // Get af server registry using dynamic ID + registry, err := s.registry.GetRegistry(agentfieldServerID) if err != nil { return &types.DIDRegistrationResponse{ Success: false, @@ -1220,9 +1220,9 @@ func (s *DIDService) PartialRegisterAgent(req *types.PartialDIDRegistrationReque DerivationPath: existingAgent.DerivationPath, ComponentType: "agent", }, - ReasonerDIDs: newReasonerDIDs, - SkillDIDs: newSkillDIDs, - HaxenServerID: registry.HaxenServerID, + ReasonerDIDs: newReasonerDIDs, + SkillDIDs: newSkillDIDs, + AgentFieldServerID: registry.AgentFieldServerID, } logger.Logger.Debug().Msgf("βœ… Partial registration successful for agent %s: %d new reasoners, %d new skills", @@ -1244,25 +1244,25 @@ func (s *DIDService) DeregisterComponents(req *types.ComponentDeregistrationRequ }, nil } - // Validate haxen server registry exists - if err := s.validateHaxenServerRegistry(); err != nil { + // Validate af server registry exists + if err := s.validateAgentFieldServerRegistry(); err != nil { return &types.ComponentDeregistrationResponse{ Success: false, - Error: fmt.Sprintf("haxen server registry validation failed: %v", err), + Error: fmt.Sprintf("af server registry validation failed: %v", err), }, nil } - // Get haxen server ID dynamically - haxenServerID, err := s.getHaxenServerID() + // Get af server ID dynamically + agentfieldServerID, err := s.getAgentFieldServerID() if err != nil { return &types.ComponentDeregistrationResponse{ Success: false, - Error: fmt.Sprintf("failed to get haxen server ID: %v", err), + Error: fmt.Sprintf("failed to get af server ID: %v", err), }, nil } - // Get haxen server registry using dynamic ID - registry, err := s.registry.GetRegistry(haxenServerID) + // Get af server registry using dynamic ID + registry, err := s.registry.GetRegistry(agentfieldServerID) if err != nil { return &types.ComponentDeregistrationResponse{ Success: false, diff --git a/control-plane/internal/services/did_service_test.go b/control-plane/internal/services/did_service_test.go index 6b346378..f7cbc9b7 100644 --- a/control-plane/internal/services/did_service_test.go +++ b/control-plane/internal/services/did_service_test.go @@ -5,9 +5,9 @@ import ( "path/filepath" "testing" - "github.com/your-org/haxen/control-plane/internal/config" - "github.com/your-org/haxen/control-plane/internal/storage" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/internal/config" + "github.com/Agent-Field/agentfield/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/stretchr/testify/require" ) @@ -27,14 +27,14 @@ func setupDIDTestEnvironment(t *testing.T) (*DIDService, *DIDRegistry, storage.S service := NewDIDService(cfg, ks, registry) - haxenID := "haxen-test" - require.NoError(t, service.Initialize(haxenID)) + agentfieldID := "agentfield-test" + require.NoError(t, service.Initialize(agentfieldID)) - return service, registry, provider, ctx, haxenID + return service, registry, provider, ctx, agentfieldID } func TestDIDServiceRegisterAgentAndResolve(t *testing.T) { - service, registry, provider, ctx, haxenID := setupDIDTestEnvironment(t) + service, registry, provider, ctx, agentfieldID := setupDIDTestEnvironment(t) req := &types.DIDRegistrationRequest{ AgentNodeID: "agent-alpha", @@ -49,7 +49,7 @@ func TestDIDServiceRegisterAgentAndResolve(t *testing.T) { require.Contains(t, resp.IdentityPackage.ReasonerDIDs, "reasoner.fn") require.Contains(t, resp.IdentityPackage.SkillDIDs, "skill.fn") - storedRegistry, err := registry.GetRegistry(haxenID) + storedRegistry, err := registry.GetRegistry(agentfieldID) require.NoError(t, err) require.NotNil(t, storedRegistry) require.Contains(t, storedRegistry.AgentNodes, "agent-alpha") @@ -86,14 +86,14 @@ func TestDIDServiceValidateRegistryFailure(t *testing.T) { cfg := &config.DIDConfig{Enabled: true, Keystore: config.KeystoreConfig{Path: keystoreDir, Type: "local"}} service := NewDIDService(cfg, ks, registry) - err = service.validateHaxenServerRegistry() + err = service.validateAgentFieldServerRegistry() require.Error(t, err) - haxenID := "haxen-validate" - require.NoError(t, service.Initialize(haxenID)) - require.NoError(t, service.validateHaxenServerRegistry()) + agentfieldID := "agentfield-validate" + require.NoError(t, service.Initialize(agentfieldID)) + require.NoError(t, service.validateAgentFieldServerRegistry()) - stored, err := registry.GetRegistry(haxenID) + stored, err := registry.GetRegistry(agentfieldID) require.NoError(t, err) require.NotNil(t, stored) require.False(t, stored.CreatedAt.IsZero()) diff --git a/control-plane/internal/services/execution_metrics.go b/control-plane/internal/services/execution_metrics.go index 5f2739ee..2af9046f 100644 --- a/control-plane/internal/services/execution_metrics.go +++ b/control-plane/internal/services/execution_metrics.go @@ -4,7 +4,7 @@ import ( "strings" "time" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -12,33 +12,33 @@ import ( var ( queueDepthGauge = promauto.NewGauge(prometheus.GaugeOpts{ - Name: "haxen_gateway_queue_depth", + Name: "agentfield_gateway_queue_depth", Help: "Number of workflow steps currently queued or in-flight for execution.", }) - workerInflightGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{ - Name: "haxen_worker_inflight", + workerInflightGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{ //nolint:unused // Reserved for future use + Name: "agentfield_worker_inflight", Help: "Number of active worker executions grouped by agent node.", }, []string{"agent"}) - stepDurationHistogram = promauto.NewHistogramVec(prometheus.HistogramOpts{ - Name: "haxen_step_duration_seconds", + stepDurationHistogram = promauto.NewHistogramVec(prometheus.HistogramOpts{ //nolint:unused // Reserved for future use + Name: "agentfield_step_duration_seconds", Help: "Duration of workflow step executions split by terminal status.", Buckets: prometheus.DefBuckets, }, []string{"status"}) - stepRetriesCounter = promauto.NewCounterVec(prometheus.CounterOpts{ - Name: "haxen_step_retries_total", + stepRetriesCounter = promauto.NewCounterVec(prometheus.CounterOpts{ //nolint:unused // Reserved for future use + Name: "agentfield_step_retries_total", Help: "Total number of workflow step retry attempts grouped by agent node.", }, []string{"agent"}) waiterInflightGauge = promauto.NewGauge(prometheus.GaugeOpts{ - Name: "haxen_waiters_inflight", + Name: "agentfield_waiters_inflight", Help: "Number of synchronous waiter channels currently registered.", }) backpressureCounter = promauto.NewCounterVec(prometheus.CounterOpts{ - Name: "haxen_gateway_backpressure_total", + Name: "agentfield_gateway_backpressure_total", Help: "Count of backpressure events emitted by the execution gateway grouped by reason.", }, []string{"reason"}) ) @@ -57,19 +57,23 @@ func recordWaiterCount(count int) { waiterInflightGauge.Set(float64(count)) } +//nolint:unused // Reserved for future use func recordWorkerAcquire(agent string) { workerInflightGauge.WithLabelValues(normalizeAgentLabel(agent)).Inc() } +//nolint:unused // Reserved for future use func recordWorkerRelease(agent string) { workerInflightGauge.WithLabelValues(normalizeAgentLabel(agent)).Dec() } +//nolint:unused // Reserved for future use func observeStepDuration(status string, duration time.Duration) { normalized := types.NormalizeExecutionStatus(status) stepDurationHistogram.WithLabelValues(normalized).Observe(duration.Seconds()) } +//nolint:unused // Reserved for future use func incrementStepRetry(agent string) { stepRetriesCounter.WithLabelValues(normalizeAgentLabel(agent)).Inc() } diff --git a/control-plane/internal/services/executions_ui_service.go b/control-plane/internal/services/executions_ui_service.go index f028a3c4..46511ca5 100644 --- a/control-plane/internal/services/executions_ui_service.go +++ b/control-plane/internal/services/executions_ui_service.go @@ -1,14 +1,15 @@ package services import ( - "github.com/your-org/haxen/control-plane/internal/storage" - "github.com/your-org/haxen/control-plane/pkg/types" "context" "fmt" "sync" "time" - "github.com/your-org/haxen/control-plane/internal/logger" + "github.com/Agent-Field/agentfield/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" + + "github.com/Agent-Field/agentfield/control-plane/internal/logger" ) // ExecutionEvent represents a real-time event related to executions. @@ -311,6 +312,7 @@ func (s *ExecutionsUIService) convertToUISummary(exec *types.WorkflowExecution) } } +//nolint:unused // retained for future UI sorting enhancements func (s *ExecutionsUIService) sortExecutions(executions []ExecutionSummaryForUI, sortBy, sortOrder string) { // Implementation for sorting executions // TODO: Implement sorting logic based on sortBy and sortOrder diff --git a/control-plane/internal/services/health_monitor.go b/control-plane/internal/services/health_monitor.go index ef3375a0..e239e1d4 100644 --- a/control-plane/internal/services/health_monitor.go +++ b/control-plane/internal/services/health_monitor.go @@ -1,15 +1,16 @@ package services import ( - "github.com/your-org/haxen/control-plane/internal/core/domain" - "github.com/your-org/haxen/control-plane/internal/core/interfaces" - "github.com/your-org/haxen/control-plane/internal/events" - "github.com/your-org/haxen/control-plane/internal/logger" - "github.com/your-org/haxen/control-plane/internal/storage" - "github.com/your-org/haxen/control-plane/pkg/types" "context" "sync" "time" + + "github.com/Agent-Field/agentfield/control-plane/internal/core/domain" + "github.com/Agent-Field/agentfield/control-plane/internal/core/interfaces" + "github.com/Agent-Field/agentfield/control-plane/internal/events" + "github.com/Agent-Field/agentfield/control-plane/internal/logger" + "github.com/Agent-Field/agentfield/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" ) // HealthMonitorConfig holds configuration for the health monitor service diff --git a/control-plane/internal/services/keystore_service.go b/control-plane/internal/services/keystore_service.go index 97793e53..670d64c0 100644 --- a/control-plane/internal/services/keystore_service.go +++ b/control-plane/internal/services/keystore_service.go @@ -9,7 +9,7 @@ import ( "os" "path/filepath" - "github.com/your-org/haxen/control-plane/internal/config" + "github.com/Agent-Field/agentfield/control-plane/internal/config" ) // KeystoreService handles secure storage and management of cryptographic keys. diff --git a/control-plane/internal/services/keystore_service_test.go b/control-plane/internal/services/keystore_service_test.go index b8104e20..9a5175eb 100644 --- a/control-plane/internal/services/keystore_service_test.go +++ b/control-plane/internal/services/keystore_service_test.go @@ -6,7 +6,7 @@ import ( "path/filepath" "testing" - "github.com/your-org/haxen/control-plane/internal/config" + "github.com/Agent-Field/agentfield/control-plane/internal/config" "github.com/stretchr/testify/require" ) diff --git a/control-plane/internal/services/presence_manager.go b/control-plane/internal/services/presence_manager.go index 6b49e22f..f165e414 100644 --- a/control-plane/internal/services/presence_manager.go +++ b/control-plane/internal/services/presence_manager.go @@ -5,8 +5,8 @@ import ( "sync" "time" - "github.com/your-org/haxen/control-plane/internal/logger" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/internal/logger" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" ) type PresenceManagerConfig struct { diff --git a/control-plane/internal/services/status_manager.go b/control-plane/internal/services/status_manager.go index 264d129e..e49ac067 100644 --- a/control-plane/internal/services/status_manager.go +++ b/control-plane/internal/services/status_manager.go @@ -6,11 +6,11 @@ import ( "sync" "time" - "github.com/your-org/haxen/control-plane/internal/core/interfaces" - "github.com/your-org/haxen/control-plane/internal/events" - "github.com/your-org/haxen/control-plane/internal/logger" - "github.com/your-org/haxen/control-plane/internal/storage" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/internal/core/interfaces" + "github.com/Agent-Field/agentfield/control-plane/internal/events" + "github.com/Agent-Field/agentfield/control-plane/internal/logger" + "github.com/Agent-Field/agentfield/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" ) // StatusManagerConfig holds configuration for the status manager @@ -683,7 +683,12 @@ func (sm *StatusManager) checkTransitionTimeouts() { ctx := context.Background() if status, err := sm.GetAgentStatus(ctx, nodeID); err == nil { status.CompleteTransition() - sm.persistStatus(ctx, nodeID, status) + if err := sm.persistStatus(ctx, nodeID, status); err != nil { + logger.Logger.Warn(). + Err(err). + Str("node_id", nodeID). + Msg("failed to persist status during transition timeout") + } } delete(sm.activeTransitions, nodeID) diff --git a/control-plane/internal/services/status_manager_test.go b/control-plane/internal/services/status_manager_test.go index 44b5e83a..f4d9a62a 100644 --- a/control-plane/internal/services/status_manager_test.go +++ b/control-plane/internal/services/status_manager_test.go @@ -8,9 +8,9 @@ import ( "testing" "time" - "github.com/your-org/haxen/control-plane/internal/core/interfaces" - "github.com/your-org/haxen/control-plane/internal/storage" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/internal/core/interfaces" + "github.com/Agent-Field/agentfield/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/stretchr/testify/require" ) @@ -59,8 +59,8 @@ func setupStatusManagerStorage(t *testing.T) (storage.StorageProvider, context.C cfg := storage.StorageConfig{ Mode: "local", Local: storage.LocalStorageConfig{ - DatabasePath: filepath.Join(tempDir, "haxen.db"), - KVStorePath: filepath.Join(tempDir, "haxen.bolt"), + DatabasePath: filepath.Join(tempDir, "agentfield.db"), + KVStorePath: filepath.Join(tempDir, "agentfield.bolt"), }, } diff --git a/control-plane/internal/services/ui_service.go b/control-plane/internal/services/ui_service.go index aeff917d..d2327019 100644 --- a/control-plane/internal/services/ui_service.go +++ b/control-plane/internal/services/ui_service.go @@ -1,17 +1,18 @@ package services import ( - "github.com/your-org/haxen/control-plane/internal/core/domain" - "github.com/your-org/haxen/control-plane/internal/core/interfaces" - "github.com/your-org/haxen/control-plane/internal/events" - "github.com/your-org/haxen/control-plane/internal/logger" - "github.com/your-org/haxen/control-plane/internal/storage" - "github.com/your-org/haxen/control-plane/pkg/types" "context" "encoding/json" "fmt" "sync" "time" + + "github.com/Agent-Field/agentfield/control-plane/internal/core/domain" + "github.com/Agent-Field/agentfield/control-plane/internal/core/interfaces" + "github.com/Agent-Field/agentfield/control-plane/internal/events" + "github.com/Agent-Field/agentfield/control-plane/internal/logger" + "github.com/Agent-Field/agentfield/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" ) // NodeEvent represents a real-time event related to an agent node. @@ -265,7 +266,7 @@ func (s *UIService) DeregisterClient(clientChan chan NodeEvent) { // Use a safe close approach defer func() { if r := recover(); r != nil { - // Channel was already closed, ignore the panic + logger.Logger.Debug().Msg("attempted to close an already-closed SSE client channel") } }() close(clientChan) diff --git a/control-plane/internal/services/vc_service.go b/control-plane/internal/services/vc_service.go index 15189ef9..f7e15990 100644 --- a/control-plane/internal/services/vc_service.go +++ b/control-plane/internal/services/vc_service.go @@ -10,10 +10,10 @@ import ( "strings" "time" - "github.com/your-org/haxen/control-plane/internal/config" - "github.com/your-org/haxen/control-plane/internal/logger" - "github.com/your-org/haxen/control-plane/internal/storage" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/internal/config" + "github.com/Agent-Field/agentfield/control-plane/internal/logger" + "github.com/Agent-Field/agentfield/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" ) // VCService handles verifiable credential generation, verification, and management. @@ -143,21 +143,21 @@ func (s *VCService) GenerateExecutionVC(ctx *types.ExecutionContext, inputData, // Create execution VC executionVC := &types.ExecutionVC{ - VCID: s.generateVCID(), - ExecutionID: ctx.ExecutionID, - WorkflowID: ctx.WorkflowID, - SessionID: ctx.SessionID, - IssuerDID: ctx.CallerDID, - TargetDID: ctx.TargetDID, - CallerDID: ctx.CallerDID, - VCDocument: json.RawMessage(vcDocBytes), - Signature: signature, - StorageURI: "", + VCID: s.generateVCID(), + ExecutionID: ctx.ExecutionID, + WorkflowID: ctx.WorkflowID, + SessionID: ctx.SessionID, + IssuerDID: ctx.CallerDID, + TargetDID: ctx.TargetDID, + CallerDID: ctx.CallerDID, + VCDocument: json.RawMessage(vcDocBytes), + Signature: signature, + StorageURI: "", DocumentSize: int64(len(vcDocBytes)), - InputHash: inputHash, - OutputHash: outputHash, - Status: dbStatus, - CreatedAt: time.Now(), + InputHash: inputHash, + OutputHash: outputHash, + Status: dbStatus, + CreatedAt: time.Now(), } // Store VC @@ -339,8 +339,8 @@ func (s *VCService) createVCDocument(ctx *types.ExecutionContext, callerIdentity InputDataHash: inputHash, OutputDataHash: outputHash, Metadata: map[string]interface{}{ - "haxen_version": "1.0.0", - "vc_version": "1.0", + "agentfield_version": "1.0.0", + "vc_version": "1.0", }, }, } @@ -352,13 +352,13 @@ func (s *VCService) createVCDocument(ctx *types.ExecutionContext, callerIdentity return &types.VCDocument{ Context: []string{ "https://www.w3.org/2018/credentials/v1", - "https://haxen.example.com/contexts/execution/v1", + "https://agentfield.example.com/contexts/execution/v1", }, Type: []string{ "VerifiableCredential", - "HaxenExecutionCredential", + "AgentFieldExecutionCredential", }, - ID: fmt.Sprintf("urn:haxen:vc:%s", vcID), + ID: fmt.Sprintf("urn:agentfield:vc:%s", vcID), Issuer: ctx.CallerDID, IssuanceDate: time.Now().UTC().Format(time.RFC3339), CredentialSubject: credentialSubject, @@ -498,15 +498,15 @@ func (s *VCService) generateWorkflowVCDocument(workflowID string, executionVCs [ startTime = time.Now() } - // Get haxen server DID as issuer using dynamic resolution - haxenServerID, err := s.didService.GetHaxenServerID() + // Get af server DID as issuer using dynamic resolution + agentfieldServerID, err := s.didService.GetAgentFieldServerID() if err != nil { - return nil, fmt.Errorf("failed to get haxen server ID: %w", err) + return nil, fmt.Errorf("failed to get af server ID: %w", err) } - registry, err := s.didService.GetRegistry(haxenServerID) + registry, err := s.didService.GetRegistry(agentfieldServerID) if err != nil { - return nil, fmt.Errorf("failed to get haxen server DID: %w", err) + return nil, fmt.Errorf("failed to get af server DID: %w", err) } issuerDID := registry.RootDID @@ -581,17 +581,17 @@ func (s *VCService) createWorkflowVCDocument(workflowID, sessionID string, compo SnapshotTime: time.Now().UTC().Format(time.RFC3339), Orchestrator: types.VCCaller{ DID: issuerDID, - Type: "haxen_server", + Type: "agentfield_server", AgentNodeDID: issuerDID, }, Audit: types.VCAudit{ InputDataHash: "", // Workflow-level doesn't have specific input/output OutputDataHash: "", Metadata: map[string]interface{}{ - "haxen_version": "1.0.0", - "vc_version": "1.0", - "workflow_type": "agent_execution_chain", - "total_executions": len(componentVCIDs), + "agentfield_version": "1.0.0", + "vc_version": "1.0", + "workflow_type": "agent_execution_chain", + "total_executions": len(componentVCIDs), }, }, } @@ -604,13 +604,13 @@ func (s *VCService) createWorkflowVCDocument(workflowID, sessionID string, compo return &types.WorkflowVCDocument{ Context: []string{ "https://www.w3.org/2018/credentials/v1", - "https://haxen.example.com/contexts/workflow/v1", + "https://agentfield.example.com/contexts/workflow/v1", }, Type: []string{ "VerifiableCredential", - "HaxenWorkflowCredential", + "AgentFieldWorkflowCredential", }, - ID: fmt.Sprintf("urn:haxen:workflow-vc:%s", vcID), + ID: fmt.Sprintf("urn:agentfield:workflow-vc:%s", vcID), Issuer: issuerDID, IssuanceDate: time.Now().UTC().Format(time.RFC3339), CredentialSubject: credentialSubject, @@ -850,11 +850,11 @@ type SecurityAnalysis struct { // ComplianceChecks represents compliance and audit verification results type ComplianceChecks struct { - W3CCompliance bool `json:"w3c_compliance"` - HaxenStandardCompliance bool `json:"haxen_standard_compliance"` - AuditTrailIntegrity bool `json:"audit_trail_integrity"` - DataIntegrityChecks bool `json:"data_integrity_checks"` - Issues []VerificationIssue `json:"issues"` + W3CCompliance bool `json:"w3c_compliance"` + AgentFieldStandardCompliance bool `json:"agentfield_standard_compliance"` + AuditTrailIntegrity bool `json:"audit_trail_integrity"` + DataIntegrityChecks bool `json:"data_integrity_checks"` + Issues []VerificationIssue `json:"issues"` } // VerifyExecutionVCComprehensive performs comprehensive verification of an execution VC @@ -1034,7 +1034,7 @@ func (s *VCService) performIntegrityChecks(execVC *types.ExecutionVC, vcDoc *typ }) } - // CRITICAL CHECK 7: Status consistency (with Haxen system status mapping) + // CRITICAL CHECK 7: Status consistency (with AgentField system status mapping) if !s.isStatusConsistent(execVC.Status, vcDoc.CredentialSubject.Execution.Status) { result.FieldConsistency = false result.Issues = append(result.Issues, VerificationIssue{ @@ -1170,11 +1170,11 @@ func (s *VCService) performSecurityAnalysis(execVC *types.ExecutionVC, vcDoc *ty // performComplianceChecks performs compliance verification func (s *VCService) performComplianceChecks(vcDoc *types.VCDocument) ComplianceChecks { result := ComplianceChecks{ - W3CCompliance: true, - HaxenStandardCompliance: true, - AuditTrailIntegrity: true, - DataIntegrityChecks: true, - Issues: []VerificationIssue{}, + W3CCompliance: true, + AgentFieldStandardCompliance: true, + AuditTrailIntegrity: true, + DataIntegrityChecks: true, + Issues: []VerificationIssue{}, } // Check W3C compliance @@ -1188,14 +1188,14 @@ func (s *VCService) performComplianceChecks(vcDoc *types.VCDocument) ComplianceC }) } - // Check Haxen standard compliance - if !s.checkHaxenStandardCompliance(vcDoc) { - result.HaxenStandardCompliance = false + // Check AgentField standard compliance + if !s.checkAgentFieldStandardCompliance(vcDoc) { + result.AgentFieldStandardCompliance = false result.Issues = append(result.Issues, VerificationIssue{ - Type: "haxen_compliance_failure", + Type: "agentfield_compliance_failure", Severity: "warning", Component: vcDoc.ID, - Description: "VC does not meet Haxen standard requirements", + Description: "VC does not meet AgentField standard requirements", }) } @@ -1275,9 +1275,9 @@ func (s *VCService) checkW3CCompliance(vcDoc *types.VCDocument) bool { return true } -func (s *VCService) checkHaxenStandardCompliance(vcDoc *types.VCDocument) bool { - // Check Haxen-specific compliance requirements - requiredTypes := []string{"VerifiableCredential", "HaxenExecutionCredential"} +func (s *VCService) checkAgentFieldStandardCompliance(vcDoc *types.VCDocument) bool { + // Check AgentField-specific compliance requirements + requiredTypes := []string{"VerifiableCredential", "AgentFieldExecutionCredential"} for _, required := range requiredTypes { found := false for _, vcType := range vcDoc.Type { @@ -1312,7 +1312,7 @@ func (s *VCService) calculateOverallScore(result *ComprehensiveVCVerificationRes return score } -// isStatusConsistent checks if status values are consistent, accounting for Haxen system status mapping +// isStatusConsistent checks if status values are consistent, accounting for AgentField system status mapping func (s *VCService) isStatusConsistent(metadataStatus, vcDocStatus string) bool { return types.NormalizeExecutionStatus(metadataStatus) == types.NormalizeExecutionStatus(vcDocStatus) } @@ -1366,11 +1366,11 @@ func (s *VCService) VerifyWorkflowVCComprehensive(workflowID string) (*Comprehen } allComplianceChecks := ComplianceChecks{ - W3CCompliance: true, - HaxenStandardCompliance: true, - AuditTrailIntegrity: true, - DataIntegrityChecks: true, - Issues: []VerificationIssue{}, + W3CCompliance: true, + AgentFieldStandardCompliance: true, + AuditTrailIntegrity: true, + DataIntegrityChecks: true, + Issues: []VerificationIssue{}, } // Verify each execution VC in the workflow @@ -1422,8 +1422,8 @@ func (s *VCService) VerifyWorkflowVCComprehensive(workflowID string) (*Comprehen if !complianceChecks.W3CCompliance { allComplianceChecks.W3CCompliance = false } - if !complianceChecks.HaxenStandardCompliance { - allComplianceChecks.HaxenStandardCompliance = false + if !complianceChecks.AgentFieldStandardCompliance { + allComplianceChecks.AgentFieldStandardCompliance = false } if !complianceChecks.AuditTrailIntegrity { allComplianceChecks.AuditTrailIntegrity = false @@ -1482,12 +1482,12 @@ func (s *VCService) VerifyWorkflowVCComprehensive(workflowID string) (*Comprehen // Check workflow VC compliance if !s.checkWorkflowVCCompliance(&workflowVCDoc) { - allComplianceChecks.HaxenStandardCompliance = false + allComplianceChecks.AgentFieldStandardCompliance = false allComplianceChecks.Issues = append(allComplianceChecks.Issues, VerificationIssue{ Type: "workflow_compliance_failure", Severity: "warning", Component: vcChain.WorkflowVC.WorkflowVCID, - Description: "Workflow VC does not meet Haxen standard requirements", + Description: "Workflow VC does not meet AgentField standard requirements", }) } } @@ -1559,10 +1559,10 @@ func (s *VCService) verifyWorkflowVCSignature(vcDoc *types.WorkflowVCDocument, i return ed25519.Verify(publicKey, canonicalBytes, signatureBytes), nil } -// checkWorkflowVCCompliance checks if a workflow VC meets Haxen standard compliance +// checkWorkflowVCCompliance checks if a workflow VC meets AgentField standard compliance func (s *VCService) checkWorkflowVCCompliance(vcDoc *types.WorkflowVCDocument) bool { - // Check Haxen-specific compliance requirements for workflow VCs - requiredTypes := []string{"VerifiableCredential", "HaxenWorkflowCredential"} + // Check AgentField-specific compliance requirements for workflow VCs + requiredTypes := []string{"VerifiableCredential", "AgentFieldWorkflowCredential"} for _, required := range requiredTypes { found := false for _, vcType := range vcDoc.Type { diff --git a/control-plane/internal/services/vc_storage.go b/control-plane/internal/services/vc_storage.go index 8a94c463..21a6c845 100644 --- a/control-plane/internal/services/vc_storage.go +++ b/control-plane/internal/services/vc_storage.go @@ -1,298 +1,298 @@ package services import ( - "context" - "encoding/json" - "fmt" + "context" + "encoding/json" + "fmt" - "github.com/your-org/haxen/control-plane/internal/logger" - "github.com/your-org/haxen/control-plane/internal/storage" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/internal/logger" + "github.com/Agent-Field/agentfield/control-plane/internal/storage" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" ) // VCStorage manages the storage and retrieval of verifiable credentials. type VCStorage struct { - storageProvider storage.StorageProvider + storageProvider storage.StorageProvider } // NewVCStorageWithStorage creates a new VC storage instance backed by the configured storage provider. func NewVCStorageWithStorage(storageProvider storage.StorageProvider) *VCStorage { - return &VCStorage{storageProvider: storageProvider} + return &VCStorage{storageProvider: storageProvider} } // Initialize performs a lightweight sanity check that the storage provider is available. func (s *VCStorage) Initialize() error { - if s.storageProvider == nil { - logger.Logger.Warn().Msg("No storage provider available - VC persistence disabled") - } - return nil + if s.storageProvider == nil { + logger.Logger.Warn().Msg("No storage provider available - VC persistence disabled") + } + return nil } // StoreExecutionVC persists an execution VC using the backing storage provider. func (s *VCStorage) StoreExecutionVC(ctx context.Context, vc *types.ExecutionVC) error { - if s.storageProvider == nil { - return fmt.Errorf("no storage provider configured for VC storage") - } - - documentSizeBytes := vc.DocumentSize - if documentSizeBytes == 0 && len(vc.VCDocument) > 0 { - documentSizeBytes = int64(len(vc.VCDocument)) - } - - return s.storageProvider.StoreExecutionVC( - ctx, - vc.VCID, - vc.ExecutionID, - vc.WorkflowID, - vc.SessionID, - vc.IssuerDID, - vc.TargetDID, - vc.CallerDID, - vc.InputHash, - vc.OutputHash, - vc.Status, - []byte(vc.VCDocument), - vc.Signature, - vc.StorageURI, - documentSizeBytes, - ) + if s.storageProvider == nil { + return fmt.Errorf("no storage provider configured for VC storage") + } + + documentSizeBytes := vc.DocumentSize + if documentSizeBytes == 0 && len(vc.VCDocument) > 0 { + documentSizeBytes = int64(len(vc.VCDocument)) + } + + return s.storageProvider.StoreExecutionVC( + ctx, + vc.VCID, + vc.ExecutionID, + vc.WorkflowID, + vc.SessionID, + vc.IssuerDID, + vc.TargetDID, + vc.CallerDID, + vc.InputHash, + vc.OutputHash, + vc.Status, + []byte(vc.VCDocument), + vc.Signature, + vc.StorageURI, + documentSizeBytes, + ) } // GetExecutionVC fetches a single execution VC by its VC identifier. func (s *VCStorage) GetExecutionVC(vcID string) (*types.ExecutionVC, error) { - if s.storageProvider == nil { - return nil, fmt.Errorf("no storage provider configured for VC storage") - } + if s.storageProvider == nil { + return nil, fmt.Errorf("no storage provider configured for VC storage") + } - ctx := context.Background() - vcInfo, err := s.storageProvider.GetExecutionVC(ctx, vcID) - if err != nil { - return nil, err - } + ctx := context.Background() + vcInfo, err := s.storageProvider.GetExecutionVC(ctx, vcID) + if err != nil { + return nil, err + } - return s.convertVCInfoToExecutionVC(vcInfo) + return s.convertVCInfoToExecutionVC(vcInfo) } // GetExecutionVCsByWorkflow returns all execution VCs associated with a workflow. func (s *VCStorage) GetExecutionVCsByWorkflow(workflowID string) ([]types.ExecutionVC, error) { - filters := types.VCFilters{WorkflowID: &workflowID} - return s.loadExecutionVCsFromDatabaseWithFilters(filters) + filters := types.VCFilters{WorkflowID: &workflowID} + return s.loadExecutionVCsFromDatabaseWithFilters(filters) } // GetExecutionVCsBySession returns all execution VCs associated with a session. func (s *VCStorage) GetExecutionVCsBySession(sessionID string) ([]types.ExecutionVC, error) { - filters := types.VCFilters{SessionID: &sessionID} - return s.loadExecutionVCsFromDatabaseWithFilters(filters) + filters := types.VCFilters{SessionID: &sessionID} + return s.loadExecutionVCsFromDatabaseWithFilters(filters) } // GetExecutionVCByExecutionID fetches the most recent VC for a specific execution ID. func (s *VCStorage) GetExecutionVCByExecutionID(executionID string) (*types.ExecutionVC, error) { - filters := types.VCFilters{ExecutionID: &executionID, Limit: 1} - vcs, err := s.loadExecutionVCsFromDatabaseWithFilters(filters) - if err != nil { - return nil, err - } - if len(vcs) == 0 { - return nil, fmt.Errorf("execution VC not found for execution_id: %s", executionID) - } - return &vcs[0], nil + filters := types.VCFilters{ExecutionID: &executionID, Limit: 1} + vcs, err := s.loadExecutionVCsFromDatabaseWithFilters(filters) + if err != nil { + return nil, err + } + if len(vcs) == 0 { + return nil, fmt.Errorf("execution VC not found for execution_id: %s", executionID) + } + return &vcs[0], nil } // QueryExecutionVCs runs a filtered VC query against the backing store. func (s *VCStorage) QueryExecutionVCs(filters *types.VCFilters) ([]types.ExecutionVC, error) { - var applied types.VCFilters - if filters != nil { - applied = *filters - } - return s.loadExecutionVCsFromDatabaseWithFilters(applied) + var applied types.VCFilters + if filters != nil { + applied = *filters + } + return s.loadExecutionVCsFromDatabaseWithFilters(applied) } // StoreWorkflowVC persists workflow-level VC metadata. func (s *VCStorage) StoreWorkflowVC(ctx context.Context, vc *types.WorkflowVC) error { - if s.storageProvider == nil { - return fmt.Errorf("no storage provider configured for VC storage") - } - - documentSizeBytes := vc.DocumentSize - if documentSizeBytes == 0 && len(vc.VCDocument) > 0 { - documentSizeBytes = int64(len(vc.VCDocument)) - } - - return s.storageProvider.StoreWorkflowVC( - ctx, - vc.WorkflowVCID, - vc.WorkflowID, - vc.SessionID, - vc.ComponentVCs, - vc.Status, - &vc.StartTime, - vc.EndTime, - vc.TotalSteps, - vc.CompletedSteps, - vc.StorageURI, - documentSizeBytes, - ) + if s.storageProvider == nil { + return fmt.Errorf("no storage provider configured for VC storage") + } + + documentSizeBytes := vc.DocumentSize + if documentSizeBytes == 0 && len(vc.VCDocument) > 0 { + documentSizeBytes = int64(len(vc.VCDocument)) + } + + return s.storageProvider.StoreWorkflowVC( + ctx, + vc.WorkflowVCID, + vc.WorkflowID, + vc.SessionID, + vc.ComponentVCs, + vc.Status, + &vc.StartTime, + vc.EndTime, + vc.TotalSteps, + vc.CompletedSteps, + vc.StorageURI, + documentSizeBytes, + ) } // GetWorkflowVC fetches the latest workflow VC for a workflow identifier. func (s *VCStorage) GetWorkflowVC(workflowID string) (*types.WorkflowVC, error) { - if s.storageProvider == nil { - return nil, fmt.Errorf("no storage provider configured for VC storage") - } - - ctx := context.Background() - infos, err := s.storageProvider.ListWorkflowVCs(ctx, workflowID) - if err != nil { - return nil, err - } - if len(infos) == 0 { - return nil, fmt.Errorf("workflow VC not found: %s", workflowID) - } - - return s.convertWorkflowVCInfo(infos[0]) + if s.storageProvider == nil { + return nil, fmt.Errorf("no storage provider configured for VC storage") + } + + ctx := context.Background() + infos, err := s.storageProvider.ListWorkflowVCs(ctx, workflowID) + if err != nil { + return nil, err + } + if len(infos) == 0 { + return nil, fmt.Errorf("workflow VC not found: %s", workflowID) + } + + return s.convertWorkflowVCInfo(infos[0]) } // ListWorkflowVCs returns all workflow VCs. func (s *VCStorage) ListWorkflowVCs() ([]*types.WorkflowVC, error) { - if s.storageProvider == nil { - return []*types.WorkflowVC{}, fmt.Errorf("no storage provider configured for VC storage") - } - - ctx := context.Background() - infos, err := s.storageProvider.ListWorkflowVCs(ctx, "") - if err != nil { - return nil, err - } - - results := make([]*types.WorkflowVC, 0, len(infos)) - for _, info := range infos { - vc, err := s.convertWorkflowVCInfo(info) - if err != nil { - logger.Logger.Warn().Err(err).Str("workflow_vc_id", info.WorkflowVCID).Msg("failed to convert workflow VC info") - continue - } - results = append(results, vc) - } - - return results, nil + if s.storageProvider == nil { + return []*types.WorkflowVC{}, fmt.Errorf("no storage provider configured for VC storage") + } + + ctx := context.Background() + infos, err := s.storageProvider.ListWorkflowVCs(ctx, "") + if err != nil { + return nil, err + } + + results := make([]*types.WorkflowVC, 0, len(infos)) + for _, info := range infos { + vc, err := s.convertWorkflowVCInfo(info) + if err != nil { + logger.Logger.Warn().Err(err).Str("workflow_vc_id", info.WorkflowVCID).Msg("failed to convert workflow VC info") + continue + } + results = append(results, vc) + } + + return results, nil } // DeleteExecutionVC is currently a no-op placeholder. func (s *VCStorage) DeleteExecutionVC(vcID string) error { - logger.Logger.Debug().Str("vc_id", vcID).Msg("DeleteExecutionVC is not implemented - skipping") - return nil + logger.Logger.Debug().Str("vc_id", vcID).Msg("DeleteExecutionVC is not implemented - skipping") + return nil } // DeleteWorkflowVC is currently a no-op placeholder. func (s *VCStorage) DeleteWorkflowVC(workflowID string) error { - logger.Logger.Debug().Str("workflow_id", workflowID).Msg("DeleteWorkflowVC is not implemented - skipping") - return nil + logger.Logger.Debug().Str("workflow_id", workflowID).Msg("DeleteWorkflowVC is not implemented - skipping") + return nil } // GetVCStats returns simple metrics about stored VCs. func (s *VCStorage) GetVCStats() map[string]interface{} { - stats := map[string]interface{}{ - "execution_vcs": 0, - "workflow_vcs": 0, - } - - if s.storageProvider == nil { - return stats - } - - ctx := context.Background() - - executionInfos, err := s.storageProvider.ListExecutionVCs(ctx, types.VCFilters{}) - if err == nil { - stats["execution_vcs"] = len(executionInfos) - } else { - logger.Logger.Warn().Err(err).Msg("failed to collect execution VC stats") - } - - workflowInfos, err := s.storageProvider.ListWorkflowVCs(ctx, "") - if err == nil { - stats["workflow_vcs"] = len(workflowInfos) - } else { - logger.Logger.Warn().Err(err).Msg("failed to collect workflow VC stats") - } - - return stats + stats := map[string]interface{}{ + "execution_vcs": 0, + "workflow_vcs": 0, + } + + if s.storageProvider == nil { + return stats + } + + ctx := context.Background() + + executionInfos, err := s.storageProvider.ListExecutionVCs(ctx, types.VCFilters{}) + if err == nil { + stats["execution_vcs"] = len(executionInfos) + } else { + logger.Logger.Warn().Err(err).Msg("failed to collect execution VC stats") + } + + workflowInfos, err := s.storageProvider.ListWorkflowVCs(ctx, "") + if err == nil { + stats["workflow_vcs"] = len(workflowInfos) + } else { + logger.Logger.Warn().Err(err).Msg("failed to collect workflow VC stats") + } + + return stats } // convertVCInfoToExecutionVC hydrates a full ExecutionVC from summary data. func (s *VCStorage) convertVCInfoToExecutionVC(vcInfo *types.ExecutionVCInfo) (*types.ExecutionVC, error) { - if vcInfo == nil { - return nil, fmt.Errorf("execution VC info is nil") - } - - vcDocument, signature, err := s.getFullVCFromDatabase(vcInfo.VCID) - if err != nil { - return nil, fmt.Errorf("failed to load VC document for %s: %w", vcInfo.VCID, err) - } - - return &types.ExecutionVC{ - VCID: vcInfo.VCID, - ExecutionID: vcInfo.ExecutionID, - WorkflowID: vcInfo.WorkflowID, - SessionID: vcInfo.SessionID, - IssuerDID: vcInfo.IssuerDID, - TargetDID: vcInfo.TargetDID, - CallerDID: vcInfo.CallerDID, - VCDocument: vcDocument, - Signature: signature, - StorageURI: vcInfo.StorageURI, - DocumentSize: vcInfo.DocumentSize, - InputHash: vcInfo.InputHash, - OutputHash: vcInfo.OutputHash, - Status: vcInfo.Status, - CreatedAt: vcInfo.CreatedAt, - }, nil + if vcInfo == nil { + return nil, fmt.Errorf("execution VC info is nil") + } + + vcDocument, signature, err := s.getFullVCFromDatabase(vcInfo.VCID) + if err != nil { + return nil, fmt.Errorf("failed to load VC document for %s: %w", vcInfo.VCID, err) + } + + return &types.ExecutionVC{ + VCID: vcInfo.VCID, + ExecutionID: vcInfo.ExecutionID, + WorkflowID: vcInfo.WorkflowID, + SessionID: vcInfo.SessionID, + IssuerDID: vcInfo.IssuerDID, + TargetDID: vcInfo.TargetDID, + CallerDID: vcInfo.CallerDID, + VCDocument: vcDocument, + Signature: signature, + StorageURI: vcInfo.StorageURI, + DocumentSize: vcInfo.DocumentSize, + InputHash: vcInfo.InputHash, + OutputHash: vcInfo.OutputHash, + Status: vcInfo.Status, + CreatedAt: vcInfo.CreatedAt, + }, nil } // convertWorkflowVCInfo hydrates a WorkflowVC struct from stored metadata. func (s *VCStorage) convertWorkflowVCInfo(info *types.WorkflowVCInfo) (*types.WorkflowVC, error) { - if info == nil { - return nil, fmt.Errorf("workflow VC info is nil") - } - - return &types.WorkflowVC{ - WorkflowID: info.WorkflowID, - SessionID: info.SessionID, - ComponentVCs: info.ComponentVCIDs, - WorkflowVCID: info.WorkflowVCID, - Status: info.Status, - StartTime: info.StartTime, - EndTime: info.EndTime, - TotalSteps: info.TotalSteps, - CompletedSteps: info.CompletedSteps, - StorageURI: info.StorageURI, - DocumentSize: info.DocumentSize, - }, nil + if info == nil { + return nil, fmt.Errorf("workflow VC info is nil") + } + + return &types.WorkflowVC{ + WorkflowID: info.WorkflowID, + SessionID: info.SessionID, + ComponentVCs: info.ComponentVCIDs, + WorkflowVCID: info.WorkflowVCID, + Status: info.Status, + StartTime: info.StartTime, + EndTime: info.EndTime, + TotalSteps: info.TotalSteps, + CompletedSteps: info.CompletedSteps, + StorageURI: info.StorageURI, + DocumentSize: info.DocumentSize, + }, nil } // loadExecutionVCsFromDatabaseWithFilters retrieves execution VCs that match the provided filters. func (s *VCStorage) loadExecutionVCsFromDatabaseWithFilters(filters types.VCFilters) ([]types.ExecutionVC, error) { - if s.storageProvider == nil { - return []types.ExecutionVC{}, fmt.Errorf("no storage provider configured for VC storage") - } - - ctx := context.Background() - vcInfos, err := s.storageProvider.ListExecutionVCs(ctx, filters) - if err != nil { - return nil, fmt.Errorf("failed to list execution VCs from database: %w", err) - } - - result := make([]types.ExecutionVC, 0, len(vcInfos)) - for _, info := range vcInfos { - vc, err := s.convertVCInfoToExecutionVC(info) - if err != nil { - logger.Logger.Warn().Err(err).Str("vc_id", info.VCID).Msg("failed to convert execution VC info") - continue - } - result = append(result, *vc) - } - - return result, nil + if s.storageProvider == nil { + return []types.ExecutionVC{}, fmt.Errorf("no storage provider configured for VC storage") + } + + ctx := context.Background() + vcInfos, err := s.storageProvider.ListExecutionVCs(ctx, filters) + if err != nil { + return nil, fmt.Errorf("failed to list execution VCs from database: %w", err) + } + + result := make([]types.ExecutionVC, 0, len(vcInfos)) + for _, info := range vcInfos { + vc, err := s.convertVCInfoToExecutionVC(info) + if err != nil { + logger.Logger.Warn().Err(err).Str("vc_id", info.VCID).Msg("failed to convert execution VC info") + continue + } + result = append(result, *vc) + } + + return result, nil } // getFullVCFromDatabase retrieves the full VC document and signature from the storage provider. @@ -307,5 +307,5 @@ func (s *VCStorage) getFullVCFromDatabase(vcID string) (json.RawMessage, string, // getFullVCFromLocalStorage retrieves the VC payload from local SQLite storage. func (s *VCStorage) getFullVCFromLocalStorage(localStorage *storage.LocalStorage, vcID string) (json.RawMessage, string, error) { - return localStorage.GetFullExecutionVC(vcID) + return localStorage.GetFullExecutionVC(vcID) } diff --git a/control-plane/internal/services/webhook_dispatcher.go b/control-plane/internal/services/webhook_dispatcher.go index 5f30487e..e5d30bb4 100644 --- a/control-plane/internal/services/webhook_dispatcher.go +++ b/control-plane/internal/services/webhook_dispatcher.go @@ -14,8 +14,8 @@ import ( "sync" "time" - "github.com/your-org/haxen/control-plane/internal/logger" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/internal/logger" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" ) type WebhookStore interface { @@ -316,7 +316,7 @@ func (d *webhookDispatcher) process(job webhookJob) { req.Header.Set(trimmedKey, value) } if webhook.Secret != nil { - req.Header.Set("X-Haxen-Signature", generateWebhookSignature(*webhook.Secret, body)) + req.Header.Set("X-AgentField-Signature", generateWebhookSignature(*webhook.Secret, body)) } var ( diff --git a/control-plane/internal/services/workflowstatus/aggregator.go b/control-plane/internal/services/workflowstatus/aggregator.go index be4ffc07..6200f0c3 100644 --- a/control-plane/internal/services/workflowstatus/aggregator.go +++ b/control-plane/internal/services/workflowstatus/aggregator.go @@ -1,9 +1,10 @@ package workflowstatus import ( - "github.com/your-org/haxen/control-plane/pkg/types" "strings" "time" + + "github.com/Agent-Field/agentfield/control-plane/pkg/types" ) // AggregatedWorkflowStatus captures lifecycle information derived from execution nodes and pending steps. diff --git a/control-plane/internal/services/workflowstatus/aggregator_test.go b/control-plane/internal/services/workflowstatus/aggregator_test.go index 809dea8f..bd862cc8 100644 --- a/control-plane/internal/services/workflowstatus/aggregator_test.go +++ b/control-plane/internal/services/workflowstatus/aggregator_test.go @@ -1,9 +1,10 @@ package workflowstatus import ( - "github.com/your-org/haxen/control-plane/pkg/types" "testing" "time" + + "github.com/Agent-Field/agentfield/control-plane/pkg/types" ) func makeExecution(status string, startedAt time.Time, completedAt *time.Time, parent bool) *types.WorkflowExecution { diff --git a/control-plane/internal/storage/events.go b/control-plane/internal/storage/events.go index 941e640c..5d13fa44 100644 --- a/control-plane/internal/storage/events.go +++ b/control-plane/internal/storage/events.go @@ -1,7 +1,6 @@ package storage import ( - "github.com/your-org/haxen/control-plane/pkg/types" "context" "database/sql" "encoding/json" @@ -11,6 +10,8 @@ import ( "sync" "time" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" + "github.com/boltdb/bolt" ) @@ -74,14 +75,11 @@ func (ls *LocalStorage) startEventCleanup() { ticker := time.NewTicker(1 * time.Hour) // Clean up every hour defer ticker.Stop() - for { - select { - case <-ticker.C: - if ls.mode == "postgres" { - ls.cleanupExpiredEventsPostgres() - } else { - ls.cleanupExpiredEvents() - } + for range ticker.C { + if ls.mode == "postgres" { + ls.cleanupExpiredEventsPostgres() + } else { + ls.cleanupExpiredEvents() } } } diff --git a/control-plane/internal/storage/execution_records.go b/control-plane/internal/storage/execution_records.go index 7d14990c..0456ee41 100644 --- a/control-plane/internal/storage/execution_records.go +++ b/control-plane/internal/storage/execution_records.go @@ -1,14 +1,15 @@ package storage import ( - "github.com/your-org/haxen/control-plane/internal/logger" - "github.com/your-org/haxen/control-plane/pkg/types" "context" "database/sql" "encoding/json" "fmt" "strings" "time" + + "github.com/Agent-Field/agentfield/control-plane/internal/logger" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" ) // CreateExecutionRecord inserts a new execution row using the simplified schema. @@ -103,7 +104,7 @@ func (ls *LocalStorage) UpdateExecutionRecord(ctx context.Context, executionID s if err != nil { return nil, fmt.Errorf("begin transaction: %w", err) } - defer tx.Rollback() + defer rollbackTx(tx, "UpdateExecutionRecord:"+executionID) row := tx.QueryRowContext(ctx, ` SELECT execution_id, run_id, parent_execution_id, @@ -270,10 +271,6 @@ func (ls *LocalStorage) QueryExecutionRecords(ctx context.Context, filter types. orderColumn = "created_at" case "updated_at": orderColumn = "updated_at" - case "started_at": - fallthrough - default: - orderColumn = "started_at" } orderDirection := "DESC" if !filter.SortDescending { @@ -361,7 +358,7 @@ func (ls *LocalStorage) MarkStaleExecutions(ctx context.Context, staleAfter time if err != nil { return 0, fmt.Errorf("begin stale execution transaction: %w", err) } - defer tx.Rollback() + defer rollbackTx(tx, "MarkStaleExecutions") updateStmt, err := tx.PrepareContext(ctx, ` UPDATE executions diff --git a/control-plane/internal/storage/execution_state_validation.go b/control-plane/internal/storage/execution_state_validation.go index a3746063..8651ddc6 100644 --- a/control-plane/internal/storage/execution_state_validation.go +++ b/control-plane/internal/storage/execution_state_validation.go @@ -3,7 +3,7 @@ package storage import ( "fmt" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" ) type InvalidExecutionStateTransitionError struct { diff --git a/control-plane/internal/storage/execution_webhooks.go b/control-plane/internal/storage/execution_webhooks.go index 4a2bf594..1f24183b 100644 --- a/control-plane/internal/storage/execution_webhooks.go +++ b/control-plane/internal/storage/execution_webhooks.go @@ -1,13 +1,14 @@ package storage import ( - "github.com/your-org/haxen/control-plane/pkg/types" "context" "database/sql" "encoding/json" "fmt" "strings" "time" + + "github.com/Agent-Field/agentfield/control-plane/pkg/types" ) // RegisterExecutionWebhook stores or updates the webhook registration for an execution. diff --git a/control-plane/internal/storage/local.go b/control-plane/internal/storage/local.go index 34b93551..eb98e0c0 100644 --- a/control-plane/internal/storage/local.go +++ b/control-plane/internal/storage/local.go @@ -7,8 +7,6 @@ import ( "encoding/json" "errors" "fmt" - "github.com/your-org/haxen/control-plane/internal/events" - "github.com/your-org/haxen/control-plane/pkg/types" "log" "net/url" "os" @@ -20,6 +18,9 @@ import ( "sync" "time" + "github.com/Agent-Field/agentfield/control-plane/internal/events" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" + "github.com/boltdb/bolt" "github.com/jackc/pgx/v5/pgconn" _ "github.com/jackc/pgx/v5/stdlib" // Import pgx driver for PostgreSQL @@ -68,7 +69,7 @@ func (e *ValidationError) Error() string { // getWorkflowExecutionByID is a helper function that retrieves a workflow execution using DBTX interface func (ls *LocalStorage) getWorkflowExecutionByID(ctx context.Context, q DBTX, executionID string) (*types.WorkflowExecution, error) { query := ` - SELECT workflow_id, execution_id, haxen_request_id, run_id, session_id, actor_id, + SELECT workflow_id, execution_id, agentfield_request_id, run_id, session_id, actor_id, agent_node_id, parent_workflow_id, parent_execution_id, root_workflow_id, workflow_depth, reasoner_id, input_data, output_data, input_size, output_size, status, started_at, completed_at, duration_ms, @@ -88,7 +89,7 @@ func (ls *LocalStorage) getWorkflowExecutionByID(ctx context.Context, q DBTX, ex var leaseOwner sql.NullString var leaseExpires sql.NullTime err := row.Scan( - &execution.WorkflowID, &execution.ExecutionID, &execution.HaxenRequestID, + &execution.WorkflowID, &execution.ExecutionID, &execution.AgentFieldRequestID, &runID, &execution.SessionID, &execution.ActorID, &execution.AgentNodeID, &execution.ParentWorkflowID, &execution.ParentExecutionID, &execution.RootWorkflowID, &execution.WorkflowDepth, &execution.ReasonerID, &inputData, &outputData, @@ -518,7 +519,7 @@ func (ls *LocalStorage) initializeSQLite(ctx context.Context) error { log.Printf("πŸ“ Initializing SQLite database at: %s", dbPath) - busyTimeout := resolveEnvInt("HAXEN_SQLITE_BUSY_TIMEOUT_MS", 60000) + busyTimeout := resolveEnvInt("AGENTFIELD_SQLITE_BUSY_TIMEOUT_MS", 60000) if busyTimeout <= 0 { busyTimeout = 60000 } @@ -533,12 +534,12 @@ func (ls *LocalStorage) initializeSQLite(ctx context.Context) error { ls.db = newSQLDatabase(db, "local") - maxOpen := resolveEnvInt("HAXEN_SQLITE_MAX_OPEN_CONNS", 1) + maxOpen := resolveEnvInt("AGENTFIELD_SQLITE_MAX_OPEN_CONNS", 1) if maxOpen <= 0 { maxOpen = 1 } ls.db.SetMaxOpenConns(maxOpen) - idleConns := resolveEnvInt("HAXEN_SQLITE_MAX_IDLE_CONNS", 1) + idleConns := resolveEnvInt("AGENTFIELD_SQLITE_MAX_IDLE_CONNS", 1) if idleConns < 0 { idleConns = 0 } @@ -1122,7 +1123,7 @@ func (ls *LocalStorage) ensureSQLiteIndexes() error { "CREATE INDEX IF NOT EXISTS idx_agent_nodes_health ON agent_nodes(health_status)", "CREATE INDEX IF NOT EXISTS idx_agent_nodes_lifecycle ON agent_nodes(lifecycle_status)", "CREATE INDEX IF NOT EXISTS idx_agent_dids_agent_node ON agent_dids(agent_node_id)", - "CREATE INDEX IF NOT EXISTS idx_agent_dids_haxen_server ON agent_dids(haxen_server_id)", + "CREATE INDEX IF NOT EXISTS idx_agent_dids_agentfield_server ON agent_dids(agentfield_server_id)", "CREATE INDEX IF NOT EXISTS idx_component_dids_agent_did ON component_dids(agent_did)", "CREATE INDEX IF NOT EXISTS idx_component_dids_type ON component_dids(component_type)", "CREATE INDEX IF NOT EXISTS idx_execution_vcs_execution_id ON execution_vcs(execution_id)", @@ -1283,7 +1284,7 @@ func (ls *LocalStorage) ensureExecutionVCSchema() error { committed := false defer func() { if !committed { - _ = tx.Rollback() + rollbackTx(tx, "migrate_execution_vcs") } }() @@ -1349,7 +1350,7 @@ func (ls *LocalStorage) ensureWorkflowVCSchema() error { committed := false defer func() { if !committed { - _ = tx.Rollback() + rollbackTx(tx, "migrate_workflow_vcs") } }() @@ -1839,7 +1840,7 @@ func (ls *LocalStorage) storeWorkflowExecutionInternal(ctx context.Context, exec } return fmt.Errorf("failed to begin transaction: %w", err) } - defer tx.Rollback() // Will be no-op if tx.Commit() succeeds + defer rollbackTx(tx, "storeWorkflowExecution:"+execution.ExecutionID) // Execute the workflow insert using the transaction if err := ls.executeWorkflowInsert(ctx, tx, execution); err != nil { @@ -1942,7 +1943,7 @@ func (ls *LocalStorage) retryDatabaseOperation(ctx context.Context, operationID // sqliteWorkflowExecutionInsertQuery captures the column order for workflow execution inserts. const sqliteWorkflowExecutionInsertQuery = `INSERT INTO workflow_executions ( - workflow_id, execution_id, haxen_request_id, run_id, session_id, actor_id, + workflow_id, execution_id, agentfield_request_id, run_id, session_id, actor_id, agent_node_id, parent_workflow_id, parent_execution_id, root_workflow_id, workflow_depth, reasoner_id, input_data, output_data, input_size, output_size, status, started_at, completed_at, duration_ms, @@ -2030,7 +2031,7 @@ func (ls *LocalStorage) executeWorkflowInsert(ctx context.Context, q DBTX, execu // Execute INSERT query using the DBTX interface _, err = q.ExecContext(ctx, insertQuery, - execution.WorkflowID, execution.ExecutionID, execution.HaxenRequestID, execution.RunID, + execution.WorkflowID, execution.ExecutionID, execution.AgentFieldRequestID, execution.RunID, execution.SessionID, execution.ActorID, execution.AgentNodeID, execution.ParentWorkflowID, execution.ParentExecutionID, execution.RootWorkflowID, execution.WorkflowDepth, execution.ReasonerID, execution.InputData, execution.OutputData, @@ -2100,7 +2101,7 @@ func (ls *LocalStorage) attemptWorkflowExecutionUpdate(ctx context.Context, exec if err != nil { return fmt.Errorf("failed to begin transaction: %w", err) } - defer tx.Rollback() // Will be no-op if tx.Commit() succeeds + defer rollbackTx(tx, "attemptWorkflowExecutionUpdate:"+executionID) // Read the current execution within the transaction currentExecution, err := ls.getWorkflowExecutionWithTx(txCtx, tx, executionID) @@ -2296,7 +2297,7 @@ func (ls *LocalStorage) QueryWorkflowExecutions(ctx context.Context, filters typ baseQuery := ` SELECT workflow_executions.id, workflow_executions.workflow_id, workflow_executions.execution_id, - workflow_executions.haxen_request_id, workflow_executions.run_id, workflow_executions.session_id, workflow_executions.actor_id, + workflow_executions.agentfield_request_id, workflow_executions.run_id, workflow_executions.session_id, workflow_executions.actor_id, workflow_executions.agent_node_id, workflow_executions.parent_workflow_id, workflow_executions.parent_execution_id, workflow_executions.root_workflow_id, workflow_executions.workflow_depth, workflow_executions.reasoner_id, workflow_executions.input_data, workflow_executions.output_data, @@ -2424,7 +2425,7 @@ func (ls *LocalStorage) QueryWorkflowExecutions(ctx context.Context, filters typ err := rows.Scan( &execution.ID, &execution.WorkflowID, &execution.ExecutionID, - &execution.HaxenRequestID, &runID, &execution.SessionID, &execution.ActorID, + &execution.AgentFieldRequestID, &runID, &execution.SessionID, &execution.ActorID, &execution.AgentNodeID, &execution.ParentWorkflowID, &execution.ParentExecutionID, &execution.RootWorkflowID, &execution.WorkflowDepth, &execution.ReasonerID, &inputData, &outputData, &execution.InputSize, &execution.OutputSize, @@ -2498,7 +2499,7 @@ func (ls *LocalStorage) QueryWorkflowDAG(ctx context.Context, rootWorkflowID str WITH RECURSIVE workflow_dag AS ( -- Base case: Find the root execution(s) SELECT - id, workflow_id, execution_id, haxen_request_id, run_id, session_id, actor_id, + id, workflow_id, execution_id, agentfield_request_id, run_id, session_id, actor_id, agent_node_id, parent_workflow_id, parent_execution_id, root_workflow_id, workflow_depth, reasoner_id, input_data, output_data, input_size, output_size, status, started_at, completed_at, duration_ms, @@ -2516,7 +2517,7 @@ func (ls *LocalStorage) QueryWorkflowDAG(ctx context.Context, rootWorkflowID str -- Recursive case: Find children of current level SELECT - we.id, we.workflow_id, we.execution_id, we.haxen_request_id, we.run_id, we.session_id, we.actor_id, + we.id, we.workflow_id, we.execution_id, we.agentfield_request_id, we.run_id, we.session_id, we.actor_id, we.agent_node_id, we.parent_workflow_id, we.parent_execution_id, we.root_workflow_id, we.workflow_depth, we.reasoner_id, we.input_data, we.output_data, we.input_size, we.output_size, we.status, we.started_at, we.completed_at, we.duration_ms, @@ -2532,7 +2533,7 @@ func (ls *LocalStorage) QueryWorkflowDAG(ctx context.Context, rootWorkflowID str AND wd.path NOT LIKE '%' || we.execution_id || '%' -- Cycle detection ) SELECT - id, workflow_id, execution_id, haxen_request_id, run_id, session_id, actor_id, + id, workflow_id, execution_id, agentfield_request_id, run_id, session_id, actor_id, agent_node_id, parent_workflow_id, parent_execution_id, root_workflow_id, workflow_depth, reasoner_id, input_data, output_data, input_size, output_size, status, started_at, completed_at, duration_ms, @@ -2565,7 +2566,7 @@ func (ls *LocalStorage) QueryWorkflowDAG(ctx context.Context, rootWorkflowID str err := rows.Scan( &execution.ID, &execution.WorkflowID, &execution.ExecutionID, - &execution.HaxenRequestID, &runID, &execution.SessionID, &execution.ActorID, + &execution.AgentFieldRequestID, &runID, &execution.SessionID, &execution.ActorID, &execution.AgentNodeID, &execution.ParentWorkflowID, &execution.ParentExecutionID, &execution.RootWorkflowID, &execution.WorkflowDepth, &execution.ReasonerID, &inputData, &outputData, &execution.InputSize, &execution.OutputSize, @@ -2675,7 +2676,7 @@ func (ls *LocalStorage) CleanupOldExecutions(ctx context.Context, retentionPerio if err != nil { return 0, fmt.Errorf("failed to begin cleanup transaction: %w", err) } - defer tx.Rollback() + defer rollbackTx(tx, "cleanupOldExecutions") // Delete executions in batch // Use placeholders for safe deletion @@ -2727,14 +2728,14 @@ func (ls *LocalStorage) CleanupWorkflow(ctx context.Context, identifier string, if trimmedID == "" { errMsg := "workflow ID cannot be empty" result.ErrorMessage = &errMsg - return result, fmt.Errorf(errMsg) + return result, errors.New(errMsg) } targets, err := ls.resolveWorkflowCleanupTargets(ctx, trimmedID) if err != nil { errMsg := fmt.Sprintf("failed to resolve workflow cleanup targets: %v", err) result.ErrorMessage = &errMsg - return result, fmt.Errorf(errMsg) + return result, errors.New(errMsg) } if targets.primaryWorkflowID != "" { @@ -2759,20 +2760,20 @@ func (ls *LocalStorage) CleanupWorkflow(ctx context.Context, identifier string, if err != nil { errMsg := fmt.Sprintf("failed to begin cleanup transaction: %v", err) result.ErrorMessage = &errMsg - return result, fmt.Errorf(errMsg) + return result, errors.New(errMsg) } - defer tx.Rollback() + defer rollbackTx(tx, "CleanupWorkflow:"+trimmedID) if err := ls.performWorkflowCleanup(ctx, tx, targets); err != nil { errMsg := fmt.Sprintf("failed to cleanup workflow: %v", err) result.ErrorMessage = &errMsg - return result, fmt.Errorf(errMsg) + return result, errors.New(errMsg) } if err := tx.Commit(); err != nil { errMsg := fmt.Sprintf("failed to commit cleanup transaction: %v", err) result.ErrorMessage = &errMsg - return result, fmt.Errorf(errMsg) + return result, errors.New(errMsg) } result.Success = true @@ -3083,6 +3084,7 @@ func (ls *LocalStorage) deleteWorkflowExecutions(ctx context.Context, tx DBTX, w return int(rows), nil } +//nolint:unused // retained for future workflow cleanup optimizations func (ls *LocalStorage) deleteWorkflowRuns(ctx context.Context, tx DBTX, primaryWorkflowID string, workflowIDs, runIDs []string) (int, error) { conditions := []string{} args := []interface{}{} @@ -3932,7 +3934,7 @@ func (ls *LocalStorage) RegisterAgent(ctx context.Context, agent *types.AgentNod if err != nil { return fmt.Errorf("failed to begin transaction for agent registration: %w", err) } - defer tx.Rollback() // Will be no-op if tx.Commit() succeeds + defer rollbackTx(tx, "RegisterAgent:"+agent.ID) // Execute the agent registration using the transaction if err := ls.executeRegisterAgent(ctx, tx, agent); err != nil { @@ -4098,7 +4100,7 @@ func (ls *LocalStorage) ListAgents(ctx context.Context, filters types.AgentFilte // Add WHERE clause if there are conditions if len(conditions) > 0 { - query += " WHERE " + fmt.Sprintf("%s", conditions[0]) + query += " WHERE " + conditions[0] for i := 1; i < len(conditions); i++ { query += " AND " + conditions[i] } @@ -4183,7 +4185,7 @@ func (ls *LocalStorage) UpdateAgentHealth(ctx context.Context, id string, status if err != nil { return fmt.Errorf("failed to begin transaction for agent health update: %w", err) } - defer tx.Rollback() // Will be no-op if tx.Commit() succeeds + defer rollbackTx(tx, "UpdateAgentHealth:"+id) // Execute the health update using the transaction if err := ls.executeUpdateAgentHealth(ctx, tx, id, status); err != nil { @@ -4276,7 +4278,7 @@ func (ls *LocalStorage) UpdateAgentHeartbeat(ctx context.Context, id string, hea if err != nil { return fmt.Errorf("failed to begin transaction for agent heartbeat update: %w", err) } - defer tx.Rollback() // Will be no-op if tx.Commit() succeeds + defer rollbackTx(tx, "UpdateAgentHeartbeat:"+id) // Execute the heartbeat update using the transaction if err := ls.executeUpdateAgentHeartbeat(ctx, tx, id, heartbeatTime); err != nil { @@ -4319,7 +4321,7 @@ func (ls *LocalStorage) UpdateAgentLifecycleStatus(ctx context.Context, id strin if err != nil { return fmt.Errorf("failed to begin transaction for agent lifecycle update: %w", err) } - defer tx.Rollback() // Will be no-op if tx.Commit() succeeds + defer rollbackTx(tx, "UpdateAgentLifecycleStatus:"+id) // Execute the lifecycle status update using the transaction if err := ls.executeUpdateAgentLifecycleStatus(ctx, tx, id, status); err != nil { @@ -4979,6 +4981,8 @@ func (ls *LocalStorage) GetReasonerPerformanceMetrics(ctx context.Context, reaso } // executeReasonerMetricsQuery performs the reasoner metrics query within a transaction +// +//nolint:unused // retained for upcoming analytics endpoints func (ls *LocalStorage) executeReasonerMetricsQuery(tx DBTX, nodeID, localReasonerID string) (*types.ReasonerPerformanceMetrics, error) { // Query for metrics from workflow_executions table using separate node_id and reasoner_id metricsQuery := ` @@ -5154,6 +5158,8 @@ func (ls *LocalStorage) GetReasonerExecutionHistory(ctx context.Context, reasone } // executeReasonerHistoryQuery performs the reasoner history query within a transaction +// +//nolint:unused // retained for upcoming analytics endpoints func (ls *LocalStorage) executeReasonerHistoryQuery(tx DBTX, nodeID, localReasonerID string, page, limit, offset int) (*types.ReasonerExecutionHistory, error) { // Use a single optimized query with window functions to get both count and data efficiently // This reduces lock time and improves performance @@ -5337,20 +5343,20 @@ func (ls *LocalStorage) GetWorkflowExecutionEventBus() *events.EventBus[*types.W return ls.workflowExecutionEventBus } -// Haxen Server DID operations -func (ls *LocalStorage) StoreHaxenServerDID(ctx context.Context, haxenServerID, rootDID string, masterSeed []byte, createdAt, lastKeyRotation time.Time) error { +// AgentField Server DID operations +func (ls *LocalStorage) StoreAgentFieldServerDID(ctx context.Context, agentfieldServerID, rootDID string, masterSeed []byte, createdAt, lastKeyRotation time.Time) error { // Check context cancellation early if err := ctx.Err(); err != nil { - return fmt.Errorf("context cancelled during store haxen server DID: %w", err) + return fmt.Errorf("context cancelled during store af server DID: %w", err) } // Validate input parameters - if haxenServerID == "" { + if agentfieldServerID == "" { return &ValidationError{ - Field: "haxen_server_id", - Value: haxenServerID, - Reason: "haxen server ID cannot be empty", - Context: "StoreHaxenServerDID", + Field: "agentfield_server_id", + Value: agentfieldServerID, + Reason: "af server ID cannot be empty", + Context: "StoreAgentFieldServerDID", } } if rootDID == "" { @@ -5358,7 +5364,7 @@ func (ls *LocalStorage) StoreHaxenServerDID(ctx context.Context, haxenServerID, Field: "root_did", Value: rootDID, Reason: "root DID cannot be empty", - Context: "StoreHaxenServerDID", + Context: "StoreAgentFieldServerDID", } } if len(masterSeed) == 0 { @@ -5366,7 +5372,7 @@ func (ls *LocalStorage) StoreHaxenServerDID(ctx context.Context, haxenServerID, Field: "master_seed", Value: "", Reason: "master seed cannot be empty", - Context: "StoreHaxenServerDID", + Context: "StoreAgentFieldServerDID", } } @@ -5377,21 +5383,21 @@ func (ls *LocalStorage) StoreHaxenServerDID(ctx context.Context, haxenServerID, } defer func() { if err != nil { - tx.Rollback() + rollbackTx(tx, "StoreAgentFieldServerDID") } }() // Execute with retry logic err = ls.retryOnConstraintFailure(ctx, func() error { query := ` - INSERT OR REPLACE INTO did_registry (haxen_server_id, root_did, master_seed_encrypted, created_at, last_key_rotation, total_dids) + INSERT OR REPLACE INTO did_registry (agentfield_server_id, root_did, master_seed_encrypted, created_at, last_key_rotation, total_dids) VALUES (?, ?, ?, ?, ?, 0) ` if ls.mode == "postgres" { query = ` - INSERT INTO did_registry (haxen_server_id, root_did, master_seed_encrypted, created_at, last_key_rotation, total_dids) + INSERT INTO did_registry (agentfield_server_id, root_did, master_seed_encrypted, created_at, last_key_rotation, total_dids) VALUES (?, ?, ?, ?, ?, 0) - ON CONFLICT (haxen_server_id) DO UPDATE SET + ON CONFLICT (agentfield_server_id) DO UPDATE SET root_did = EXCLUDED.root_did, master_seed_encrypted = EXCLUDED.master_seed_encrypted, created_at = EXCLUDED.created_at, @@ -5399,9 +5405,9 @@ func (ls *LocalStorage) StoreHaxenServerDID(ctx context.Context, haxenServerID, total_dids = did_registry.total_dids ` } - _, execErr := tx.ExecContext(ctx, query, haxenServerID, rootDID, masterSeed, createdAt, lastKeyRotation) + _, execErr := tx.ExecContext(ctx, query, agentfieldServerID, rootDID, masterSeed, createdAt, lastKeyRotation) if execErr != nil { - return fmt.Errorf("failed to store haxen server DID: %w", execErr) + return fmt.Errorf("failed to store af server DID: %w", execErr) } return nil }, 3) // Retry up to 3 times for transient errors @@ -5415,19 +5421,19 @@ func (ls *LocalStorage) StoreHaxenServerDID(ctx context.Context, haxenServerID, return fmt.Errorf("failed to commit transaction: %w", err) } - log.Printf("Successfully stored haxen server DID: haxen_server_id=%s, root_did=%s", haxenServerID, rootDID) + log.Printf("Successfully stored af server DID: agentfield_server_id=%s, root_did=%s", agentfieldServerID, rootDID) return nil } // StoreAgentDIDWithComponents stores an agent DID along with its component DIDs in a single transaction -func (ls *LocalStorage) StoreAgentDIDWithComponents(ctx context.Context, agentID, agentDID, haxenServerDID, publicKeyJWK string, derivationIndex int, components []ComponentDIDRequest) error { +func (ls *LocalStorage) StoreAgentDIDWithComponents(ctx context.Context, agentID, agentDID, agentfieldServerDID, publicKeyJWK string, derivationIndex int, components []ComponentDIDRequest) error { // Check context cancellation early if err := ctx.Err(); err != nil { return fmt.Errorf("context cancelled during store agent DID with components: %w", err) } // Pre-storage validation - if err := ls.validateHaxenServerExists(ctx, haxenServerDID); err != nil { + if err := ls.validateAgentFieldServerExists(ctx, agentfieldServerDID); err != nil { return fmt.Errorf("pre-storage validation failed: %w", err) } @@ -5438,7 +5444,7 @@ func (ls *LocalStorage) StoreAgentDIDWithComponents(ctx context.Context, agentID } defer func() { if err != nil { - tx.Rollback() + rollbackTx(tx, "StoreAgentDIDWithComponents") } }() @@ -5446,24 +5452,24 @@ func (ls *LocalStorage) StoreAgentDIDWithComponents(ctx context.Context, agentID err = ls.retryOnConstraintFailure(ctx, func() error { query := ` INSERT INTO agent_dids ( - agent_node_id, did, haxen_server_id, public_key_jwk, derivation_path, registered_at, status + agent_node_id, did, agentfield_server_id, public_key_jwk, derivation_path, registered_at, status ) VALUES (?, ?, ?, ?, ?, ?, ?)` derivationPath := fmt.Sprintf("m/44'/0'/0'/%d", derivationIndex) - _, execErr := tx.ExecContext(ctx, query, agentID, agentDID, haxenServerDID, publicKeyJWK, derivationPath, time.Now(), "active") + _, execErr := tx.ExecContext(ctx, query, agentID, agentDID, agentfieldServerDID, publicKeyJWK, derivationPath, time.Now(), "active") if execErr != nil { if strings.Contains(execErr.Error(), "UNIQUE constraint failed") || strings.Contains(execErr.Error(), "agent_dids") { return &DuplicateDIDError{ - DID: fmt.Sprintf("agent:%s@%s", agentID, haxenServerDID), + DID: fmt.Sprintf("agent:%s@%s", agentID, agentfieldServerDID), Type: "agent", } } if strings.Contains(execErr.Error(), "FOREIGN KEY constraint failed") { return &ForeignKeyConstraintError{ Table: "agent_dids", - Column: "haxen_server_id", + Column: "agentfield_server_id", ReferencedTable: "did_registry", - ReferencedValue: haxenServerDID, + ReferencedValue: agentfieldServerDID, Operation: "INSERT", } } @@ -5521,56 +5527,56 @@ func (ls *LocalStorage) StoreAgentDIDWithComponents(ctx context.Context, agentID return nil } -func (ls *LocalStorage) GetHaxenServerDID(ctx context.Context, haxenServerID string) (*types.HaxenServerDIDInfo, error) { +func (ls *LocalStorage) GetAgentFieldServerDID(ctx context.Context, agentfieldServerID string) (*types.AgentFieldServerDIDInfo, error) { // Check context cancellation early if err := ctx.Err(); err != nil { - return nil, fmt.Errorf("context cancelled during get haxen server DID: %w", err) + return nil, fmt.Errorf("context cancelled during get af server DID: %w", err) } query := ` - SELECT haxen_server_id, root_did, master_seed_encrypted, created_at, last_key_rotation - FROM did_registry WHERE haxen_server_id = ? + SELECT agentfield_server_id, root_did, master_seed_encrypted, created_at, last_key_rotation + FROM did_registry WHERE agentfield_server_id = ? ` - row := ls.db.QueryRowContext(ctx, query, haxenServerID) - info := &types.HaxenServerDIDInfo{} + row := ls.db.QueryRowContext(ctx, query, agentfieldServerID) + info := &types.AgentFieldServerDIDInfo{} - err := row.Scan(&info.HaxenServerID, &info.RootDID, &info.MasterSeed, &info.CreatedAt, &info.LastKeyRotation) + err := row.Scan(&info.AgentFieldServerID, &info.RootDID, &info.MasterSeed, &info.CreatedAt, &info.LastKeyRotation) if err != nil { if err == sql.ErrNoRows { return nil, nil // Return nil, nil for "not found" } - return nil, fmt.Errorf("failed to get haxen server DID: %w", err) + return nil, fmt.Errorf("failed to get af server DID: %w", err) } return info, nil } -func (ls *LocalStorage) ListHaxenServerDIDs(ctx context.Context) ([]*types.HaxenServerDIDInfo, error) { +func (ls *LocalStorage) ListAgentFieldServerDIDs(ctx context.Context) ([]*types.AgentFieldServerDIDInfo, error) { // Check context cancellation early if err := ctx.Err(); err != nil { - return nil, fmt.Errorf("context cancelled during list haxen server DIDs: %w", err) + return nil, fmt.Errorf("context cancelled during list af server DIDs: %w", err) } query := ` - SELECT haxen_server_id, root_did, master_seed_encrypted, created_at, last_key_rotation + SELECT agentfield_server_id, root_did, master_seed_encrypted, created_at, last_key_rotation FROM did_registry ORDER BY created_at DESC ` rows, err := ls.db.QueryContext(ctx, query) if err != nil { - return nil, fmt.Errorf("failed to list haxen server DIDs: %w", err) + return nil, fmt.Errorf("failed to list af server DIDs: %w", err) } defer rows.Close() - var infos []*types.HaxenServerDIDInfo + var infos []*types.AgentFieldServerDIDInfo for rows.Next() { // Check context cancellation during iteration if err := ctx.Err(); err != nil { - return nil, fmt.Errorf("context cancelled during haxen server DID list iteration: %w", err) + return nil, fmt.Errorf("context cancelled during af server DID list iteration: %w", err) } - info := &types.HaxenServerDIDInfo{} - err := rows.Scan(&info.HaxenServerID, &info.RootDID, &info.MasterSeed, &info.CreatedAt, &info.LastKeyRotation) + info := &types.AgentFieldServerDIDInfo{} + err := rows.Scan(&info.AgentFieldServerID, &info.RootDID, &info.MasterSeed, &info.CreatedAt, &info.LastKeyRotation) if err != nil { - return nil, fmt.Errorf("failed to scan haxen server DID: %w", err) + return nil, fmt.Errorf("failed to scan af server DID: %w", err) } infos = append(infos, info) } @@ -5669,31 +5675,31 @@ func (ls *LocalStorage) ListDIDs(ctx context.Context) ([]*types.DIDRegistryEntry return entries, nil } -// validateHaxenServerExists checks if a haxen server registry exists -func (ls *LocalStorage) validateHaxenServerExists(ctx context.Context, haxenServerID string) error { - if haxenServerID == "" { +// validateAgentFieldServerExists checks if a af server registry exists +func (ls *LocalStorage) validateAgentFieldServerExists(ctx context.Context, agentfieldServerID string) error { + if agentfieldServerID == "" { return &ValidationError{ - Field: "haxen_server_id", - Value: haxenServerID, - Reason: "haxen server ID cannot be empty", + Field: "agentfield_server_id", + Value: agentfieldServerID, + Reason: "af server ID cannot be empty", Context: "pre-storage validation", } } - query := `SELECT 1 FROM did_registry WHERE haxen_server_id = ? LIMIT 1` + query := `SELECT 1 FROM did_registry WHERE agentfield_server_id = ? LIMIT 1` var exists int - err := ls.db.QueryRowContext(ctx, query, haxenServerID).Scan(&exists) + err := ls.db.QueryRowContext(ctx, query, agentfieldServerID).Scan(&exists) if err != nil { if err == sql.ErrNoRows { return &ForeignKeyConstraintError{ Table: "agent_dids", - Column: "haxen_server_id", + Column: "agentfield_server_id", ReferencedTable: "did_registry", - ReferencedValue: haxenServerID, + ReferencedValue: agentfieldServerID, Operation: "INSERT", } } - return fmt.Errorf("failed to validate haxen server existence: %w", err) + return fmt.Errorf("failed to validate af server existence: %w", err) } return nil } @@ -5770,14 +5776,14 @@ func (ls *LocalStorage) retryOnConstraintFailure(ctx context.Context, operation } // Agent DID operations -func (ls *LocalStorage) StoreAgentDID(ctx context.Context, agentID, agentDID, haxenServerDID, publicKeyJWK string, derivationIndex int) error { +func (ls *LocalStorage) StoreAgentDID(ctx context.Context, agentID, agentDID, agentfieldServerDID, publicKeyJWK string, derivationIndex int) error { // Check context cancellation early if err := ctx.Err(); err != nil { return fmt.Errorf("context cancelled during store agent DID: %w", err) } // Pre-storage validation - if err := ls.validateHaxenServerExists(ctx, haxenServerDID); err != nil { + if err := ls.validateAgentFieldServerExists(ctx, agentfieldServerDID); err != nil { return fmt.Errorf("pre-storage validation failed: %w", err) } @@ -5814,7 +5820,7 @@ func (ls *LocalStorage) StoreAgentDID(ctx context.Context, agentID, agentDID, ha } defer func() { if err != nil { - tx.Rollback() + rollbackTx(tx, "StoreAgentDID") } }() @@ -5823,17 +5829,17 @@ func (ls *LocalStorage) StoreAgentDID(ctx context.Context, agentID, agentDID, ha // INSERT-only query - no ON CONFLICT clause for security query := ` INSERT INTO agent_dids ( - agent_node_id, did, haxen_server_id, public_key_jwk, derivation_path, registered_at, status + agent_node_id, did, agentfield_server_id, public_key_jwk, derivation_path, registered_at, status ) VALUES (?, ?, ?, ?, ?, ?, ?)` derivationPath := fmt.Sprintf("m/44'/0'/0'/%d", derivationIndex) - _, execErr := tx.ExecContext(ctx, query, agentID, agentDID, haxenServerDID, publicKeyJWK, derivationPath, time.Now(), "active") + _, execErr := tx.ExecContext(ctx, query, agentID, agentDID, agentfieldServerDID, publicKeyJWK, derivationPath, time.Now(), "active") if execErr != nil { // Check if this is a unique constraint violation (duplicate agent DID) if strings.Contains(execErr.Error(), "UNIQUE constraint failed") || strings.Contains(execErr.Error(), "agent_dids") { - log.Printf("Duplicate agent DID entry detected: agent_id=%s, haxen_server_id=%s", agentID, haxenServerDID) + log.Printf("Duplicate agent DID entry detected: agent_id=%s, agentfield_server_id=%s", agentID, agentfieldServerDID) return &DuplicateDIDError{ - DID: fmt.Sprintf("agent:%s@%s", agentID, haxenServerDID), + DID: fmt.Sprintf("agent:%s@%s", agentID, agentfieldServerDID), Type: "agent", } } @@ -5841,9 +5847,9 @@ func (ls *LocalStorage) StoreAgentDID(ctx context.Context, agentID, agentDID, ha if strings.Contains(execErr.Error(), "FOREIGN KEY constraint failed") { return &ForeignKeyConstraintError{ Table: "agent_dids", - Column: "haxen_server_id", + Column: "agentfield_server_id", ReferencedTable: "did_registry", - ReferencedValue: haxenServerDID, + ReferencedValue: agentfieldServerDID, Operation: "INSERT", } } @@ -5872,7 +5878,7 @@ func (ls *LocalStorage) GetAgentDID(ctx context.Context, agentID string) (*types } query := ` - SELECT agent_node_id, did, haxen_server_id, public_key_jwk, derivation_path, + SELECT agent_node_id, did, agentfield_server_id, public_key_jwk, derivation_path, reasoners, skills, status, registered_at FROM agent_dids WHERE agent_node_id = ?` @@ -5880,7 +5886,7 @@ func (ls *LocalStorage) GetAgentDID(ctx context.Context, agentID string) (*types info := &types.AgentDIDInfo{} var reasonersJSON, skillsJSON, publicKeyJWK string - err := row.Scan(&info.AgentNodeID, &info.DID, &info.HaxenServerID, &publicKeyJWK, + err := row.Scan(&info.AgentNodeID, &info.DID, &info.AgentFieldServerID, &publicKeyJWK, &info.DerivationPath, &reasonersJSON, &skillsJSON, &info.Status, &info.RegisteredAt) if err != nil { if err == sql.ErrNoRows { @@ -5917,7 +5923,7 @@ func (ls *LocalStorage) ListAgentDIDs(ctx context.Context) ([]*types.AgentDIDInf } query := ` - SELECT agent_node_id, did, haxen_server_id, public_key_jwk, derivation_path, + SELECT agent_node_id, did, agentfield_server_id, public_key_jwk, derivation_path, reasoners, skills, status, registered_at FROM agent_dids ORDER BY registered_at DESC` @@ -5936,7 +5942,7 @@ func (ls *LocalStorage) ListAgentDIDs(ctx context.Context) ([]*types.AgentDIDInf info := &types.AgentDIDInfo{} var reasonersJSON, skillsJSON, publicKeyJWK string - err := rows.Scan(&info.AgentNodeID, &info.DID, &info.HaxenServerID, &publicKeyJWK, + err := rows.Scan(&info.AgentNodeID, &info.DID, &info.AgentFieldServerID, &publicKeyJWK, &info.DerivationPath, &reasonersJSON, &skillsJSON, &info.Status, &info.RegisteredAt) if err != nil { return nil, fmt.Errorf("failed to scan agent DID: %w", err) @@ -6020,7 +6026,7 @@ func (ls *LocalStorage) StoreComponentDID(ctx context.Context, componentID, comp } defer func() { if err != nil { - tx.Rollback() + rollbackTx(tx, "StoreComponentDID") } }() @@ -6453,7 +6459,7 @@ func (ls *LocalStorage) StoreWorkflowExecutionEvent(ctx context.Context, event * if err != nil { return fmt.Errorf("failed to begin transaction: %w", err) } - defer tx.Rollback() + defer rollbackTx(tx, "StoreWorkflowExecutionEvent:"+event.ExecutionID) if err := ls.storeWorkflowExecutionEventTx(ctx, tx, event); err != nil { return err diff --git a/control-plane/internal/storage/local_agent_execution_test.go b/control-plane/internal/storage/local_agent_execution_test.go index 07e001e0..761fa926 100644 --- a/control-plane/internal/storage/local_agent_execution_test.go +++ b/control-plane/internal/storage/local_agent_execution_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/stretchr/testify/require" ) diff --git a/control-plane/internal/storage/local_cleanup_test.go b/control-plane/internal/storage/local_cleanup_test.go index 5f278890..2a7cd9bc 100644 --- a/control-plane/internal/storage/local_cleanup_test.go +++ b/control-plane/internal/storage/local_cleanup_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/stretchr/testify/require" ) @@ -20,8 +20,8 @@ func TestLocalStorageCleanupWorkflowByRunID(t *testing.T) { cfg := StorageConfig{ Mode: "local", Local: LocalStorageConfig{ - DatabasePath: filepath.Join(tempDir, "haxen.db"), - KVStorePath: filepath.Join(tempDir, "haxen.bolt"), + DatabasePath: filepath.Join(tempDir, "agentfield.db"), + KVStorePath: filepath.Join(tempDir, "agentfield.bolt"), }, } @@ -67,22 +67,22 @@ func TestLocalStorageCleanupWorkflowByRunID(t *testing.T) { } exec := &types.WorkflowExecution{ - WorkflowID: workflowID, - ExecutionID: "exec_cleanup_test", - HaxenRequestID: "req_cleanup_test", - RunID: &runID, - AgentNodeID: "agent_cleanup", - ReasonerID: "reasoner.cleanup", - InputData: json.RawMessage("{}"), - OutputData: json.RawMessage("{}"), - InputSize: 0, - OutputSize: 0, - Status: string(types.ExecutionStatusRunning), - StartedAt: now, - CreatedAt: now, - UpdatedAt: now, - WorkflowDepth: 0, - WorkflowTags: []string{}, + WorkflowID: workflowID, + ExecutionID: "exec_cleanup_test", + AgentFieldRequestID: "req_cleanup_test", + RunID: &runID, + AgentNodeID: "agent_cleanup", + ReasonerID: "reasoner.cleanup", + InputData: json.RawMessage("{}"), + OutputData: json.RawMessage("{}"), + InputSize: 0, + OutputSize: 0, + Status: string(types.ExecutionStatusRunning), + StartedAt: now, + CreatedAt: now, + UpdatedAt: now, + WorkflowDepth: 0, + WorkflowTags: []string{}, } if err := ls.StoreWorkflowExecution(ctx, exec); err != nil { t.Fatalf("store workflow execution: %v", err) @@ -157,8 +157,8 @@ func TestLocalStorageCleanupOldExecutions(t *testing.T) { cfg := StorageConfig{ Mode: "local", Local: LocalStorageConfig{ - DatabasePath: filepath.Join(tempDir, "haxen.db"), - KVStorePath: filepath.Join(tempDir, "haxen.bolt"), + DatabasePath: filepath.Join(tempDir, "agentfield.db"), + KVStorePath: filepath.Join(tempDir, "agentfield.bolt"), }, } @@ -179,17 +179,17 @@ func TestLocalStorageCleanupOldExecutions(t *testing.T) { insertExecution := func(executionID string, completedAt time.Time) { exec := &types.WorkflowExecution{ - WorkflowID: workflowID, - ExecutionID: executionID, - HaxenRequestID: executionID + "_req", - AgentNodeID: "agent", - ReasonerID: "reasoner", - Status: "completed", - StartedAt: completedAt, - CreatedAt: completedAt, - UpdatedAt: completedAt, - WorkflowDepth: 0, - WorkflowTags: []string{}, + WorkflowID: workflowID, + ExecutionID: executionID, + AgentFieldRequestID: executionID + "_req", + AgentNodeID: "agent", + ReasonerID: "reasoner", + Status: "completed", + StartedAt: completedAt, + CreatedAt: completedAt, + UpdatedAt: completedAt, + WorkflowDepth: 0, + WorkflowTags: []string{}, } exec.CompletedAt = &completedAt require.NoError(t, ls.StoreWorkflowExecution(ctx, exec)) diff --git a/control-plane/internal/storage/local_query_test.go b/control-plane/internal/storage/local_query_test.go index 14d21fa0..ecd93da0 100644 --- a/control-plane/internal/storage/local_query_test.go +++ b/control-plane/internal/storage/local_query_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" "github.com/stretchr/testify/require" ) @@ -20,8 +20,8 @@ func setupLocalStorage(t *testing.T) (*LocalStorage, context.Context) { cfg := StorageConfig{ Mode: "local", Local: LocalStorageConfig{ - DatabasePath: filepath.Join(tempDir, "haxen.db"), - KVStorePath: filepath.Join(tempDir, "haxen.bolt"), + DatabasePath: filepath.Join(tempDir, "agentfield.db"), + KVStorePath: filepath.Join(tempDir, "agentfield.bolt"), }, } @@ -65,32 +65,32 @@ func TestQueryWorkflowExecutionsFiltersAndSearch(t *testing.T) { executions := []*types.WorkflowExecution{ { - WorkflowID: "wf-root", - ExecutionID: "exec-alpha", - HaxenRequestID: "req-1", - RunID: &runID, - AgentNodeID: "agent-one", - ReasonerID: "reasoner.alpha", - WorkflowName: &alphaName, - Status: runningStatus, - StartedAt: now.Add(-5 * time.Minute), - CreatedAt: now.Add(-5 * time.Minute), - UpdatedAt: now.Add(-4 * time.Minute), - DurationMS: &alphaDuration, + WorkflowID: "wf-root", + ExecutionID: "exec-alpha", + AgentFieldRequestID: "req-1", + RunID: &runID, + AgentNodeID: "agent-one", + ReasonerID: "reasoner.alpha", + WorkflowName: &alphaName, + Status: runningStatus, + StartedAt: now.Add(-5 * time.Minute), + CreatedAt: now.Add(-5 * time.Minute), + UpdatedAt: now.Add(-4 * time.Minute), + DurationMS: &alphaDuration, }, { - WorkflowID: "wf-root", - ExecutionID: "exec-beta", - HaxenRequestID: "req-2", - RunID: &runID, - AgentNodeID: "agent-two", - ReasonerID: "reasoner.beta", - WorkflowName: &betaName, - Status: succeededStatus, - StartedAt: now.Add(-3 * time.Minute), - CreatedAt: now.Add(-3 * time.Minute), - UpdatedAt: now.Add(-2 * time.Minute), - DurationMS: &betaDuration, + WorkflowID: "wf-root", + ExecutionID: "exec-beta", + AgentFieldRequestID: "req-2", + RunID: &runID, + AgentNodeID: "agent-two", + ReasonerID: "reasoner.beta", + WorkflowName: &betaName, + Status: succeededStatus, + StartedAt: now.Add(-3 * time.Minute), + CreatedAt: now.Add(-3 * time.Minute), + UpdatedAt: now.Add(-2 * time.Minute), + DurationMS: &betaDuration, }, } @@ -155,34 +155,34 @@ func TestQueryWorkflowDAGReturnsHierarchy(t *testing.T) { require.NoError(t, ls.StoreWorkflowRun(ctx, run)) root := &types.WorkflowExecution{ - WorkflowID: "wf-root", - ExecutionID: "root-exec", - HaxenRequestID: "req-root", - RunID: &runID, - AgentNodeID: "agent-root", - ReasonerID: "root", - Status: string(types.ExecutionStatusRunning), - StartedAt: now.Add(-2 * time.Minute), - CreatedAt: now.Add(-2 * time.Minute), - UpdatedAt: now.Add(-2 * time.Minute), + WorkflowID: "wf-root", + ExecutionID: "root-exec", + AgentFieldRequestID: "req-root", + RunID: &runID, + AgentNodeID: "agent-root", + ReasonerID: "root", + Status: string(types.ExecutionStatusRunning), + StartedAt: now.Add(-2 * time.Minute), + CreatedAt: now.Add(-2 * time.Minute), + UpdatedAt: now.Add(-2 * time.Minute), } require.NoError(t, ls.StoreWorkflowExecution(ctx, root)) parentID := root.ExecutionID child := &types.WorkflowExecution{ - WorkflowID: "wf-root", - ExecutionID: "child-exec", - HaxenRequestID: "req-child", - RunID: &runID, - AgentNodeID: "agent-child", - ReasonerID: "child", - ParentExecutionID: &parentID, - RootWorkflowID: &root.WorkflowID, - WorkflowDepth: 1, - Status: string(types.ExecutionStatusRunning), - StartedAt: now.Add(-time.Minute), - CreatedAt: now.Add(-time.Minute), - UpdatedAt: now.Add(-time.Minute), + WorkflowID: "wf-root", + ExecutionID: "child-exec", + AgentFieldRequestID: "req-child", + RunID: &runID, + AgentNodeID: "agent-child", + ReasonerID: "child", + ParentExecutionID: &parentID, + RootWorkflowID: &root.WorkflowID, + WorkflowDepth: 1, + Status: string(types.ExecutionStatusRunning), + StartedAt: now.Add(-time.Minute), + CreatedAt: now.Add(-time.Minute), + UpdatedAt: now.Add(-time.Minute), } require.NoError(t, ls.StoreWorkflowExecution(ctx, child)) diff --git a/control-plane/internal/storage/local_storage_test.go b/control-plane/internal/storage/local_storage_test.go index 6b699dff..9b783c3d 100644 --- a/control-plane/internal/storage/local_storage_test.go +++ b/control-plane/internal/storage/local_storage_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - "github.com/your-org/haxen/control-plane/pkg/types" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" ) func TestLocalStorageStoreWorkflowExecutionPersistsLifecycleFields(t *testing.T) { @@ -17,8 +17,8 @@ func TestLocalStorageStoreWorkflowExecutionPersistsLifecycleFields(t *testing.T) cfg := StorageConfig{ Mode: "local", Local: LocalStorageConfig{ - DatabasePath: filepath.Join(tempDir, "haxen.db"), - KVStorePath: filepath.Join(tempDir, "haxen.bolt"), + DatabasePath: filepath.Join(tempDir, "agentfield.db"), + KVStorePath: filepath.Join(tempDir, "agentfield.bolt"), }, } @@ -50,21 +50,21 @@ func TestLocalStorageStoreWorkflowExecutionPersistsLifecycleFields(t *testing.T) } execID := "exec_test" - haxenRequestID := "req_test" + agentfieldRequestID := "req_test" agentID := "agent_1" reasonerID := "reasoner.alpha" exec := &types.WorkflowExecution{ - WorkflowID: workflowID, - ExecutionID: execID, - HaxenRequestID: haxenRequestID, - RunID: &runID, - AgentNodeID: agentID, - ReasonerID: reasonerID, - Status: string(types.ExecutionStatusPending), - StartedAt: now, - CreatedAt: now, - UpdatedAt: now, + WorkflowID: workflowID, + ExecutionID: execID, + AgentFieldRequestID: agentfieldRequestID, + RunID: &runID, + AgentNodeID: agentID, + ReasonerID: reasonerID, + Status: string(types.ExecutionStatusPending), + StartedAt: now, + CreatedAt: now, + UpdatedAt: now, } if err := ls.StoreWorkflowExecution(ctx, exec); err != nil { diff --git a/control-plane/internal/storage/locks.go b/control-plane/internal/storage/locks.go index 09b1297c..01496192 100644 --- a/control-plane/internal/storage/locks.go +++ b/control-plane/internal/storage/locks.go @@ -1,18 +1,19 @@ package storage import ( - "github.com/your-org/haxen/control-plane/pkg/types" "context" "database/sql" "fmt" "time" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" + "github.com/boltdb/bolt" "github.com/google/uuid" ) const ( - locksBucket = "locks" + locksBucket = "locks" //nolint:unused // Reserved for future use ) // AcquireLock attempts to acquire a distributed lock. diff --git a/control-plane/internal/storage/models.go b/control-plane/internal/storage/models.go index 328a21cf..dfbd0c3e 100644 --- a/control-plane/internal/storage/models.go +++ b/control-plane/internal/storage/models.go @@ -106,7 +106,7 @@ type WorkflowExecutionModel struct { ID int64 `gorm:"column:id;primaryKey;autoIncrement"` WorkflowID string `gorm:"column:workflow_id;not null;index;index:idx_workflow_executions_workflow_status,priority:1"` ExecutionID string `gorm:"column:execution_id;not null;uniqueIndex"` - HaxenRequestID string `gorm:"column:haxen_request_id;not null;index"` + AgentFieldRequestID string `gorm:"column:agentfield_request_id;not null;index"` RunID *string `gorm:"column:run_id;index"` SessionID *string `gorm:"column:session_id;index;index:idx_workflow_executions_session_status,priority:1;index:idx_workflow_executions_session_status_time,priority:1;index:idx_workflow_executions_session_time,priority:1"` ActorID *string `gorm:"column:actor_id;index;index:idx_workflow_executions_actor_status,priority:1;index:idx_workflow_executions_actor_status_time,priority:1;index:idx_workflow_executions_actor_time,priority:1"` @@ -260,7 +260,7 @@ type SessionModel struct { func (SessionModel) TableName() string { return "sessions" } type DIDRegistryModel struct { - HaxenServerID string `gorm:"column:haxen_server_id;primaryKey"` + AgentFieldServerID string `gorm:"column:agentfield_server_id;primaryKey"` MasterSeedEncrypted []byte `gorm:"column:master_seed_encrypted;not null"` RootDID string `gorm:"column:root_did;not null;unique"` AgentNodes string `gorm:"column:agent_nodes;default:'{}'"` @@ -272,17 +272,17 @@ type DIDRegistryModel struct { func (DIDRegistryModel) TableName() string { return "did_registry" } type AgentDIDModel struct { - DID string `gorm:"column:did;primaryKey"` - AgentNodeID string `gorm:"column:agent_node_id;not null;index"` - HaxenServerID string `gorm:"column:haxen_server_id;not null;index"` - PublicKeyJWK string `gorm:"column:public_key_jwk;not null"` - DerivationPath string `gorm:"column:derivation_path;not null"` - Reasoners string `gorm:"column:reasoners;default:'{}'"` - Skills string `gorm:"column:skills;default:'{}'"` - Status string `gorm:"column:status;not null;default:'active'"` - RegisteredAt time.Time `gorm:"column:registered_at;autoCreateTime"` - CreatedAt time.Time `gorm:"column:created_at;autoCreateTime"` - UpdatedAt time.Time `gorm:"column:updated_at;autoUpdateTime"` + DID string `gorm:"column:did;primaryKey"` + AgentNodeID string `gorm:"column:agent_node_id;not null;index"` + AgentFieldServerID string `gorm:"column:agentfield_server_id;not null;index"` + PublicKeyJWK string `gorm:"column:public_key_jwk;not null"` + DerivationPath string `gorm:"column:derivation_path;not null"` + Reasoners string `gorm:"column:reasoners;default:'{}'"` + Skills string `gorm:"column:skills;default:'{}'"` + Status string `gorm:"column:status;not null;default:'active'"` + RegisteredAt time.Time `gorm:"column:registered_at;autoCreateTime"` + CreatedAt time.Time `gorm:"column:created_at;autoCreateTime"` + UpdatedAt time.Time `gorm:"column:updated_at;autoUpdateTime"` } func (AgentDIDModel) TableName() string { return "agent_dids" } diff --git a/control-plane/internal/storage/storage.go b/control-plane/internal/storage/storage.go index 40cf8722..20a05cd3 100644 --- a/control-plane/internal/storage/storage.go +++ b/control-plane/internal/storage/storage.go @@ -1,12 +1,13 @@ package storage import ( - "github.com/your-org/haxen/control-plane/internal/events" - "github.com/your-org/haxen/control-plane/pkg/types" "context" "fmt" "os" "time" + + "github.com/Agent-Field/agentfield/control-plane/internal/events" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" ) // StorageProvider is the interface for the primary data storage backend. @@ -124,13 +125,13 @@ type StorageProvider interface { GetDID(ctx context.Context, did string) (*types.DIDRegistryEntry, error) ListDIDs(ctx context.Context) ([]*types.DIDRegistryEntry, error) - // Haxen Server DID operations - StoreHaxenServerDID(ctx context.Context, haxenServerID, rootDID string, masterSeed []byte, createdAt, lastKeyRotation time.Time) error - GetHaxenServerDID(ctx context.Context, haxenServerID string) (*types.HaxenServerDIDInfo, error) - ListHaxenServerDIDs(ctx context.Context) ([]*types.HaxenServerDIDInfo, error) + // AgentField Server DID operations + StoreAgentFieldServerDID(ctx context.Context, agentfieldServerID, rootDID string, masterSeed []byte, createdAt, lastKeyRotation time.Time) error + GetAgentFieldServerDID(ctx context.Context, agentfieldServerID string) (*types.AgentFieldServerDIDInfo, error) + ListAgentFieldServerDIDs(ctx context.Context) ([]*types.AgentFieldServerDIDInfo, error) // Agent DID operations - StoreAgentDID(ctx context.Context, agentID, agentDID, haxenServerDID, publicKeyJWK string, derivationIndex int) error + StoreAgentDID(ctx context.Context, agentID, agentDID, agentfieldServerDID, publicKeyJWK string, derivationIndex int) error GetAgentDID(ctx context.Context, agentID string) (*types.AgentDIDInfo, error) ListAgentDIDs(ctx context.Context) ([]*types.AgentDIDInfo, error) @@ -140,7 +141,7 @@ type StorageProvider interface { ListComponentDIDs(ctx context.Context, agentDID string) ([]*types.ComponentDIDInfo, error) // Multi-step DID operations with transaction safety - StoreAgentDIDWithComponents(ctx context.Context, agentID, agentDID, haxenServerDID, publicKeyJWK string, derivationIndex int, components []ComponentDIDRequest) error + StoreAgentDIDWithComponents(ctx context.Context, agentID, agentDID, agentfieldServerDID, publicKeyJWK string, derivationIndex int, components []ComponentDIDRequest) error // Execution VC operations StoreExecutionVC(ctx context.Context, vcID, executionID, workflowID, sessionID, issuerDID, targetDID, callerDID, inputHash, outputHash, status string, vcDocument []byte, signature string, storageURI string, documentSizeBytes int64) error @@ -226,7 +227,7 @@ func (sf *StorageFactory) CreateStorage(config StorageConfig) (StorageProvider, } // Allow environment variable to override mode - if envMode := os.Getenv("HAXEN_STORAGE_MODE"); envMode != "" { + if envMode := os.Getenv("AGENTFIELD_STORAGE_MODE"); envMode != "" { mode = envMode } diff --git a/control-plane/internal/storage/tx_utils.go b/control-plane/internal/storage/tx_utils.go new file mode 100644 index 00000000..8918cc1c --- /dev/null +++ b/control-plane/internal/storage/tx_utils.go @@ -0,0 +1,26 @@ +package storage + +import ( + "database/sql" + "errors" + + "github.com/Agent-Field/agentfield/control-plane/internal/logger" +) + +type rollbacker interface { + Rollback() error +} + +// rollbackTx attempts to rollback the transaction and logs a warning when the rollback fails. +func rollbackTx(tx rollbacker, context string) { + if tx == nil { + return + } + + if err := tx.Rollback(); err != nil && !errors.Is(err, sql.ErrTxDone) { + logger.Logger.Warn(). + Err(err). + Str("context", context). + Msg("transaction rollback failed") + } +} diff --git a/control-plane/internal/storage/unitofwork.go b/control-plane/internal/storage/unitofwork.go index 76ba0195..6d6b0092 100644 --- a/control-plane/internal/storage/unitofwork.go +++ b/control-plane/internal/storage/unitofwork.go @@ -1,12 +1,13 @@ package storage import ( - "github.com/your-org/haxen/control-plane/pkg/types" "context" "fmt" "strings" "sync" "time" + + "github.com/Agent-Field/agentfield/control-plane/pkg/types" ) // UnitOfWork manages a collection of changes as a single transaction @@ -209,11 +210,7 @@ func (uow *unitOfWorkImpl) executeCommit() error { uow.tx = tx } - defer func() { - if uow.tx != nil { - uow.tx.Rollback() // Will be no-op if tx.Commit() succeeds - } - }() + defer rollbackTx(uow.tx, "unitOfWork:executeCommit") // Execute all changes in order for i, change := range uow.changes { diff --git a/control-plane/internal/storage/unitofwork_test.go b/control-plane/internal/storage/unitofwork_test.go index d553d183..4053d3b4 100644 --- a/control-plane/internal/storage/unitofwork_test.go +++ b/control-plane/internal/storage/unitofwork_test.go @@ -4,13 +4,14 @@ package storage import ( - "github.com/your-org/haxen/control-plane/pkg/types" "context" // Add context import "encoding/json" "os" "testing" "time" + "github.com/Agent-Field/agentfield/control-plane/pkg/types" + _ "modernc.org/sqlite" ) @@ -120,31 +121,31 @@ func TestWorkflowUnitOfWork_AtomicWorkflowExecution(t *testing.T) { } execution := &types.WorkflowExecution{ - WorkflowID: "test-workflow-001", - ExecutionID: "test-execution-001", - HaxenRequestID: "test-request-001", - SessionID: ptrString("test-session-001"), - ActorID: ptrString("test-actor-001"), - AgentNodeID: "test-agent-001", - ParentWorkflowID: nil, - ParentExecutionID: nil, - RootWorkflowID: ptrString("test-workflow-001"), - WorkflowDepth: 0, - ReasonerID: "test-reasoner", - InputData: []byte(`{"input": "test"}`), - OutputData: []byte(`{"output": "test"}`), - InputSize: 18, - OutputSize: 19, - Status: string(types.ExecutionStatusSucceeded), - StartedAt: time.Now().Add(-time.Minute), - CompletedAt: ptrTime(time.Now()), - DurationMS: ptrInt64(60000), - ErrorMessage: nil, - RetryCount: 0, - WorkflowName: ptrString("Test Workflow"), - WorkflowTags: []string{"test", "unit-test"}, - CreatedAt: time.Now(), - UpdatedAt: time.Now(), + WorkflowID: "test-workflow-001", + ExecutionID: "test-execution-001", + AgentFieldRequestID: "test-request-001", + SessionID: ptrString("test-session-001"), + ActorID: ptrString("test-actor-001"), + AgentNodeID: "test-agent-001", + ParentWorkflowID: nil, + ParentExecutionID: nil, + RootWorkflowID: ptrString("test-workflow-001"), + WorkflowDepth: 0, + ReasonerID: "test-reasoner", + InputData: []byte(`{"input": "test"}`), + OutputData: []byte(`{"output": "test"}`), + InputSize: 18, + OutputSize: 19, + Status: string(types.ExecutionStatusSucceeded), + StartedAt: time.Now().Add(-time.Minute), + CompletedAt: ptrTime(time.Now()), + DurationMS: ptrInt64(60000), + ErrorMessage: nil, + RetryCount: 0, + WorkflowName: ptrString("Test Workflow"), + WorkflowTags: []string{"test", "unit-test"}, + CreatedAt: time.Now(), + UpdatedAt: time.Now(), } // Create workflow unit of work @@ -238,22 +239,22 @@ func TestWorkflowUnitOfWork_UpdateWorkflowStatus(t *testing.T) { // Create new execution execution := &types.WorkflowExecution{ - WorkflowID: "test-workflow-002", - ExecutionID: "test-execution-002", - HaxenRequestID: "test-request-002", - AgentNodeID: "test-agent-002", - ReasonerID: "test-reasoner", - InputData: json.RawMessage(`{"test": "input"}`), - OutputData: json.RawMessage(`{"test": "output"}`), - InputSize: 15, - OutputSize: 16, - Status: string(types.ExecutionStatusSucceeded), - StartedAt: time.Now().Add(-time.Minute), - CompletedAt: ptrTime(time.Now()), - DurationMS: ptrInt64(60000), - WorkflowName: ptrString("Test Workflow"), - CreatedAt: time.Now(), - UpdatedAt: time.Now(), + WorkflowID: "test-workflow-002", + ExecutionID: "test-execution-002", + AgentFieldRequestID: "test-request-002", + AgentNodeID: "test-agent-002", + ReasonerID: "test-reasoner", + InputData: json.RawMessage(`{"test": "input"}`), + OutputData: json.RawMessage(`{"test": "output"}`), + InputSize: 15, + OutputSize: 16, + Status: string(types.ExecutionStatusSucceeded), + StartedAt: time.Now().Add(-time.Minute), + CompletedAt: ptrTime(time.Now()), + DurationMS: ptrInt64(60000), + WorkflowName: ptrString("Test Workflow"), + CreatedAt: time.Now(), + UpdatedAt: time.Now(), } // Update workflow status with new execution atomically diff --git a/control-plane/internal/storage/workflow_execution_queries_test.go b/control-plane/internal/storage/workflow_execution_queries_test.go index ad375a80..6ce45cd5 100644 --- a/control-plane/internal/storage/workflow_execution_queries_test.go +++ b/control-plane/internal/storage/workflow_execution_queries_test.go @@ -8,7 +8,7 @@ import ( var workflowExecutionLifecycleColumns = []string{ "workflow_id", "execution_id", - "haxen_request_id", + "agentfield_request_id", "run_id", "session_id", "actor_id", diff --git a/control-plane/internal/templates/go/.env.example.tmpl b/control-plane/internal/templates/go/.env.example.tmpl index 83de98a8..45f58364 100644 --- a/control-plane/internal/templates/go/.env.example.tmpl +++ b/control-plane/internal/templates/go/.env.example.tmpl @@ -7,5 +7,5 @@ # Google AI Configuration # GOOGLE_API_KEY=... -# Haxen Server (optional override) -# HAXEN_SERVER_URL=http://localhost:8080 +# AgentField Server (optional override) +# AGENTFIELD_SERVER_URL=http://localhost:8080 diff --git a/control-plane/internal/templates/go/README.md.tmpl b/control-plane/internal/templates/go/README.md.tmpl index 5baecb70..6cd1389a 100644 --- a/control-plane/internal/templates/go/README.md.tmpl +++ b/control-plane/internal/templates/go/README.md.tmpl @@ -1,6 +1,6 @@ # {{.ProjectName}} -A Haxen agent created with `haxen init`. +A AgentField agent created with `af init`. ## Quick Start @@ -9,9 +9,9 @@ A Haxen agent created with `haxen init`. go mod download ``` -2. **Start the Haxen server** (in another terminal): +2. **Start the AgentField server** (in another terminal): ```bash - haxen server + af server ``` 3. **Run the agent:** @@ -19,7 +19,7 @@ A Haxen agent created with `haxen init`. go run . ``` - The agent will auto-discover an available port and register with Haxen. + The agent will auto-discover an available port and register with AgentField. ## Test the Agent @@ -54,8 +54,8 @@ curl -X POST http://localhost:8080/api/v1/execute/{{.NodeID}}.analyze_sentiment ## Learn More -- [Haxen Documentation](https://docs.haxen.ai) -- [SDK Reference](https://docs.haxen.ai/sdk) +- [AgentField Documentation](https://docs.agentfield.ai) +- [SDK Reference](https://docs.agentfield.ai/sdk) --- diff --git a/control-plane/internal/templates/go/go.mod.tmpl b/control-plane/internal/templates/go/go.mod.tmpl index d0a35454..f61a766d 100644 --- a/control-plane/internal/templates/go/go.mod.tmpl +++ b/control-plane/internal/templates/go/go.mod.tmpl @@ -2,4 +2,4 @@ module {{.GoModule}} go 1.23 -require github.com/agentfield/haxen/sdk/go v0.1.0 +require github.com/Agent-Field/agentfield/sdk/go v0.1.0 diff --git a/control-plane/internal/templates/go/main.go.tmpl b/control-plane/internal/templates/go/main.go.tmpl index ff82bdc0..0f788d89 100644 --- a/control-plane/internal/templates/go/main.go.tmpl +++ b/control-plane/internal/templates/go/main.go.tmpl @@ -5,15 +5,15 @@ import ( "log" "os" - "github.com/agentfield/haxen/sdk/go/agent" - "github.com/agentfield/haxen/sdk/go/ai" + "github.com/Agent-Field/agentfield/sdk/go/agent" + "github.com/Agent-Field/agentfield/sdk/go/ai" ) func main() { cfg := agent.Config{ NodeID: "{{.NodeID}}", Version: "1.0.0", - HaxenURL: "http://localhost:8080", + AgentFieldURL: "http://localhost:8080", ListenAddress: ":0", // Auto-discover available port // πŸ”§ Uncomment to enable AI features: diff --git a/control-plane/internal/templates/go/reasoners.go.tmpl b/control-plane/internal/templates/go/reasoners.go.tmpl index 17887236..1a36f7f3 100644 --- a/control-plane/internal/templates/go/reasoners.go.tmpl +++ b/control-plane/internal/templates/go/reasoners.go.tmpl @@ -4,8 +4,8 @@ import ( "context" "fmt" - "github.com/agentfield/haxen/sdk/go/agent" - "github.com/agentfield/haxen/sdk/go/ai" + "github.com/Agent-Field/agentfield/sdk/go/agent" + "github.com/Agent-Field/agentfield/sdk/go/ai" ) func registerReasoners(app *agent.Agent) { diff --git a/control-plane/internal/templates/python/.env.example.tmpl b/control-plane/internal/templates/python/.env.example.tmpl index 83de98a8..45f58364 100644 --- a/control-plane/internal/templates/python/.env.example.tmpl +++ b/control-plane/internal/templates/python/.env.example.tmpl @@ -7,5 +7,5 @@ # Google AI Configuration # GOOGLE_API_KEY=... -# Haxen Server (optional override) -# HAXEN_SERVER_URL=http://localhost:8080 +# AgentField Server (optional override) +# AGENTFIELD_SERVER_URL=http://localhost:8080 diff --git a/control-plane/internal/templates/python/README.md.tmpl b/control-plane/internal/templates/python/README.md.tmpl index 5b42e2bc..f191a533 100644 --- a/control-plane/internal/templates/python/README.md.tmpl +++ b/control-plane/internal/templates/python/README.md.tmpl @@ -1,6 +1,6 @@ # {{.ProjectName}} -A Haxen agent created with `haxen init`. +A AgentField agent created with `af init`. ## Quick Start @@ -9,9 +9,9 @@ A Haxen agent created with `haxen init`. pip install -r requirements.txt ``` -2. **Start the Haxen server** (in another terminal): +2. **Start the AgentField server** (in another terminal): ```bash - haxen server + af server ``` 3. **Run the agent:** @@ -19,7 +19,7 @@ A Haxen agent created with `haxen init`. python main.py ``` - The agent will auto-discover an available port and register with Haxen. + The agent will auto-discover an available port and register with AgentField. ## Test the Agent @@ -54,8 +54,8 @@ curl -X POST http://localhost:8080/api/v1/execute/{{.NodeID}}.demo_analyze_senti ## Learn More -- [Haxen Documentation](https://docs.haxen.ai) -- [SDK Reference](https://docs.haxen.ai/sdk) +- [AgentField Documentation](https://docs.agentfield.ai) +- [SDK Reference](https://docs.agentfield.ai/sdk) --- diff --git a/control-plane/internal/templates/python/main.py.tmpl b/control-plane/internal/templates/python/main.py.tmpl index 47bb88eb..db91d49f 100644 --- a/control-plane/internal/templates/python/main.py.tmpl +++ b/control-plane/internal/templates/python/main.py.tmpl @@ -1,10 +1,10 @@ -from haxen_sdk import Agent, AIConfig +from agentfield import Agent, AIConfig from reasoners import reasoners_router # Basic agent setup - works immediately app = Agent( node_id="{{.NodeID}}", - haxen_server="http://localhost:8080", + agentfield_server="http://localhost:8080", version="1.0.0", dev_mode=True, diff --git a/control-plane/internal/templates/python/reasoners.py.tmpl b/control-plane/internal/templates/python/reasoners.py.tmpl index 199d6a2b..3a8ddaa8 100644 --- a/control-plane/internal/templates/python/reasoners.py.tmpl +++ b/control-plane/internal/templates/python/reasoners.py.tmpl @@ -1,4 +1,4 @@ -from haxen_sdk import AgentRouter +from agentfield import AgentRouter from pydantic import BaseModel, Field # Group related reasoners with a router diff --git a/control-plane/internal/templates/python/requirements.txt.tmpl b/control-plane/internal/templates/python/requirements.txt.tmpl index ec4d5b9d..fe1f7429 100644 --- a/control-plane/internal/templates/python/requirements.txt.tmpl +++ b/control-plane/internal/templates/python/requirements.txt.tmpl @@ -1 +1 @@ -haxen-sdk +agentfield diff --git a/control-plane/internal/utils/id_generator.go b/control-plane/internal/utils/id_generator.go index 84da9dc7..36d12911 100644 --- a/control-plane/internal/utils/id_generator.go +++ b/control-plane/internal/utils/id_generator.go @@ -1,8 +1,9 @@ package utils import ( - "crypto/rand" + cryptoRand "crypto/rand" "fmt" + mathrand "math/rand" "time" ) @@ -27,8 +28,8 @@ func GenerateRunID() string { return fmt.Sprintf("run_%s_%s", timestamp, random) } -// GenerateHaxenRequestID generates a new haxen request ID -func GenerateHaxenRequestID() string { +// GenerateAgentFieldRequestID generates a new agentfield request ID +func GenerateAgentFieldRequestID() string { timestamp := time.Now().Format("20060102_150405") random := generateRandomString(8) return fmt.Sprintf("req_%s_%s", timestamp, random) @@ -44,7 +45,14 @@ func ValidateWorkflowID(workflowID string) bool { func generateRandomString(length int) string { const charset = "abcdefghijklmnopqrstuvwxyz0123456789" b := make([]byte, length) - rand.Read(b) + if _, err := cryptoRand.Read(b); err != nil { + // Fallback to pseudo-random source if crypto source is unavailable. + src := mathrand.New(mathrand.NewSource(time.Now().UnixNano())) + for i := range b { + b[i] = charset[src.Intn(len(charset))] + } + return string(b) + } for i := range b { b[i] = charset[b[i]%byte(len(charset))] } diff --git a/control-plane/internal/utils/paths.go b/control-plane/internal/utils/paths.go index 1d8ce2a9..3cc23ca2 100644 --- a/control-plane/internal/utils/paths.go +++ b/control-plane/internal/utils/paths.go @@ -6,9 +6,9 @@ import ( "runtime" ) -// DataDirectories holds all the standardized paths for Haxen data storage +// DataDirectories holds all the standardized paths for AgentField data storage type DataDirectories struct { - HaxenHome string + AgentFieldHome string DataDir string DatabaseDir string KeysDir string @@ -23,49 +23,49 @@ type DataDirectories struct { PayloadsDir string } -// GetHaxenDataDirectories returns the standardized data directories for Haxen +// GetAgentFieldDataDirectories returns the standardized data directories for AgentField // It respects environment variables and provides sensible defaults -func GetHaxenDataDirectories() (*DataDirectories, error) { - // Determine Haxen home directory - haxenHome := os.Getenv("HAXEN_HOME") - if haxenHome == "" { +func GetAgentFieldDataDirectories() (*DataDirectories, error) { + // Determine AgentField home directory + agentfieldHome := os.Getenv("AGENTFIELD_HOME") + if agentfieldHome == "" { homeDir, err := os.UserHomeDir() if err != nil { return nil, err } - haxenHome = filepath.Join(homeDir, ".haxen") + agentfieldHome = filepath.Join(homeDir, ".agentfield") } // Create the data directories structure dirs := &DataDirectories{ - HaxenHome: haxenHome, - DataDir: filepath.Join(haxenHome, "data"), - DatabaseDir: filepath.Join(haxenHome, "data"), - KeysDir: filepath.Join(haxenHome, "data", "keys"), - DIDRegistriesDir: filepath.Join(haxenHome, "data", "did_registries"), - VCsDir: filepath.Join(haxenHome, "data", "vcs"), - VCsExecutionsDir: filepath.Join(haxenHome, "data", "vcs", "executions"), - VCsWorkflowsDir: filepath.Join(haxenHome, "data", "vcs", "workflows"), - AgentsDir: filepath.Join(haxenHome, "agents"), - LogsDir: filepath.Join(haxenHome, "logs"), - ConfigDir: filepath.Join(haxenHome, "config"), - TempDir: filepath.Join(haxenHome, "temp"), - PayloadsDir: filepath.Join(haxenHome, "data", "payloads"), + AgentFieldHome: agentfieldHome, + DataDir: filepath.Join(agentfieldHome, "data"), + DatabaseDir: filepath.Join(agentfieldHome, "data"), + KeysDir: filepath.Join(agentfieldHome, "data", "keys"), + DIDRegistriesDir: filepath.Join(agentfieldHome, "data", "did_registries"), + VCsDir: filepath.Join(agentfieldHome, "data", "vcs"), + VCsExecutionsDir: filepath.Join(agentfieldHome, "data", "vcs", "executions"), + VCsWorkflowsDir: filepath.Join(agentfieldHome, "data", "vcs", "workflows"), + AgentsDir: filepath.Join(agentfieldHome, "agents"), + LogsDir: filepath.Join(agentfieldHome, "logs"), + ConfigDir: filepath.Join(agentfieldHome, "config"), + TempDir: filepath.Join(agentfieldHome, "temp"), + PayloadsDir: filepath.Join(agentfieldHome, "data", "payloads"), } return dirs, nil } -// EnsureDataDirectories creates all necessary Haxen data directories +// EnsureDataDirectories creates all necessary AgentField data directories func EnsureDataDirectories() (*DataDirectories, error) { - dirs, err := GetHaxenDataDirectories() + dirs, err := GetAgentFieldDataDirectories() if err != nil { return nil, err } // Create all directories with appropriate permissions directoriesToCreate := []string{ - dirs.HaxenHome, + dirs.AgentFieldHome, dirs.DataDir, dirs.DatabaseDir, dirs.KeysDir, @@ -101,36 +101,36 @@ func EnsureDataDirectories() (*DataDirectories, error) { return dirs, nil } -// GetDatabasePath returns the path to the main Haxen database +// GetDatabasePath returns the path to the main AgentField database func GetDatabasePath() (string, error) { - dirs, err := GetHaxenDataDirectories() + dirs, err := GetAgentFieldDataDirectories() if err != nil { return "", err } - return filepath.Join(dirs.DatabaseDir, "haxen.db"), nil + return filepath.Join(dirs.DatabaseDir, "agentfield.db"), nil } -// GetKVStorePath returns the path to the Haxen key-value store +// GetKVStorePath returns the path to the AgentField key-value store func GetKVStorePath() (string, error) { - dirs, err := GetHaxenDataDirectories() + dirs, err := GetAgentFieldDataDirectories() if err != nil { return "", err } - return filepath.Join(dirs.DatabaseDir, "haxen.bolt"), nil + return filepath.Join(dirs.DatabaseDir, "agentfield.bolt"), nil } // GetAgentRegistryPath returns the path to the agent registry file func GetAgentRegistryPath() (string, error) { - dirs, err := GetHaxenDataDirectories() + dirs, err := GetAgentFieldDataDirectories() if err != nil { return "", err } - return filepath.Join(dirs.HaxenHome, "installed.json"), nil + return filepath.Join(dirs.AgentFieldHome, "installed.json"), nil } // GetConfigPath returns the path to a configuration file func GetConfigPath(filename string) (string, error) { - dirs, err := GetHaxenDataDirectories() + dirs, err := GetAgentFieldDataDirectories() if err != nil { return "", err } @@ -139,7 +139,7 @@ func GetConfigPath(filename string) (string, error) { // GetLogPath returns the path to a log file func GetLogPath(filename string) (string, error) { - dirs, err := GetHaxenDataDirectories() + dirs, err := GetAgentFieldDataDirectories() if err != nil { return "", err } @@ -148,7 +148,7 @@ func GetLogPath(filename string) (string, error) { // GetTempPath returns the path to a temporary file func GetTempPath(filename string) (string, error) { - dirs, err := GetHaxenDataDirectories() + dirs, err := GetAgentFieldDataDirectories() if err != nil { return "", err } @@ -181,13 +181,13 @@ func GetPlatformSpecificPaths() map[string]string { // ValidatePaths checks if all required paths are accessible func ValidatePaths() error { - dirs, err := GetHaxenDataDirectories() + dirs, err := GetAgentFieldDataDirectories() if err != nil { return err } - // Check if we can write to the Haxen home directory - testFile := filepath.Join(dirs.HaxenHome, ".write_test") + // Check if we can write to the AgentField home directory + testFile := filepath.Join(dirs.AgentFieldHome, ".write_test") if err := os.WriteFile(testFile, []byte("test"), 0644); err != nil { return err } diff --git a/control-plane/migrations/000_migration_runner.sql b/control-plane/migrations/000_migration_runner.sql index 87990c42..04e07872 100644 --- a/control-plane/migrations/000_migration_runner.sql +++ b/control-plane/migrations/000_migration_runner.sql @@ -1,9 +1,9 @@ -- Migration Runner: DID Schema Migration Script --- Description: Complete DID database schema setup for the Haxen platform +-- Description: Complete DID database schema setup for the AgentField platform -- Created: 2025-01-08 -- -- This script creates all necessary tables for the DID (Decentralized Identity) implementation --- in the Haxen platform, enabling the transition from file-based to database-backed storage. +-- in the AgentField platform, enabling the transition from file-based to database-backed storage. -- Create migrations tracking table CREATE TABLE IF NOT EXISTS schema_migrations ( diff --git a/control-plane/migrations/008_fix_haxen_server_id_schema.sql b/control-plane/migrations/008_fix_agentfield_server_id_schema.sql similarity index 50% rename from control-plane/migrations/008_fix_haxen_server_id_schema.sql rename to control-plane/migrations/008_fix_agentfield_server_id_schema.sql index c6734a8e..eaca2af6 100644 --- a/control-plane/migrations/008_fix_haxen_server_id_schema.sql +++ b/control-plane/migrations/008_fix_agentfield_server_id_schema.sql @@ -1,21 +1,21 @@ --- Migration: Fix Haxen Server ID Schema Inconsistency --- Description: Update schema to use haxen_server_id consistently instead of organization_id +-- Migration: Fix AgentField Server ID Schema Inconsistency +-- Description: Update schema to use agentfield_server_id consistently instead of organization_id -- Created: 2025-01-21 --- Step 1: Add haxen_server_id column to did_registry table -ALTER TABLE did_registry ADD COLUMN haxen_server_id TEXT; +-- Step 1: Add agentfield_server_id column to did_registry table +ALTER TABLE did_registry ADD COLUMN agentfield_server_id TEXT; --- Step 2: Copy organization_id values to haxen_server_id for existing records -UPDATE did_registry SET haxen_server_id = organization_id WHERE haxen_server_id IS NULL; +-- Step 2: Copy organization_id values to agentfield_server_id for existing records +UPDATE did_registry SET agentfield_server_id = organization_id WHERE agentfield_server_id IS NULL; --- Step 3: Make haxen_server_id NOT NULL and add unique constraint --- First, ensure all records have haxen_server_id populated -UPDATE did_registry SET haxen_server_id = 'default' WHERE haxen_server_id IS NULL OR haxen_server_id = ''; +-- Step 3: Make agentfield_server_id NOT NULL and add unique constraint +-- First, ensure all records have agentfield_server_id populated +UPDATE did_registry SET agentfield_server_id = 'default' WHERE agentfield_server_id IS NULL OR agentfield_server_id = ''; -- Now make it NOT NULL -- Note: SQLite doesn't support ALTER COLUMN, so we need to recreate the table CREATE TABLE did_registry_new ( - haxen_server_id TEXT PRIMARY KEY, + agentfield_server_id TEXT PRIMARY KEY, organization_id TEXT, -- Keep for backward compatibility during transition master_seed_encrypted BLOB NOT NULL, root_did TEXT NOT NULL UNIQUE, @@ -26,8 +26,8 @@ CREATE TABLE did_registry_new ( ); -- Copy data from old table to new table -INSERT INTO did_registry_new (haxen_server_id, organization_id, master_seed_encrypted, root_did, agent_nodes, total_dids, created_at, last_key_rotation) -SELECT haxen_server_id, organization_id, master_seed_encrypted, root_did, agent_nodes, total_dids, created_at, last_key_rotation +INSERT INTO did_registry_new (agentfield_server_id, organization_id, master_seed_encrypted, root_did, agent_nodes, total_dids, created_at, last_key_rotation) +SELECT agentfield_server_id, organization_id, master_seed_encrypted, root_did, agent_nodes, total_dids, created_at, last_key_rotation FROM did_registry; -- Drop old table and rename new table @@ -40,20 +40,20 @@ CREATE INDEX IF NOT EXISTS idx_did_registry_created_at ON did_registry(created_a CREATE INDEX IF NOT EXISTS idx_did_registry_last_key_rotation ON did_registry(last_key_rotation); CREATE INDEX IF NOT EXISTS idx_did_registry_organization_id ON did_registry(organization_id); -- For backward compatibility --- Step 4: Update agent_dids table to use haxen_server_id -ALTER TABLE agent_dids ADD COLUMN haxen_server_id TEXT; +-- Step 4: Update agent_dids table to use agentfield_server_id +ALTER TABLE agent_dids ADD COLUMN agentfield_server_id TEXT; --- Copy organization_id values to haxen_server_id for existing records -UPDATE agent_dids SET haxen_server_id = organization_id WHERE haxen_server_id IS NULL; +-- Copy organization_id values to agentfield_server_id for existing records +UPDATE agent_dids SET agentfield_server_id = organization_id WHERE agentfield_server_id IS NULL; --- Ensure all records have haxen_server_id populated -UPDATE agent_dids SET haxen_server_id = 'default' WHERE haxen_server_id IS NULL OR haxen_server_id = ''; +-- Ensure all records have agentfield_server_id populated +UPDATE agent_dids SET agentfield_server_id = 'default' WHERE agentfield_server_id IS NULL OR agentfield_server_id = ''; --- Recreate agent_dids table with haxen_server_id as the foreign key +-- Recreate agent_dids table with agentfield_server_id as the foreign key CREATE TABLE agent_dids_new ( did TEXT PRIMARY KEY, agent_node_id TEXT NOT NULL, - haxen_server_id TEXT NOT NULL, + agentfield_server_id TEXT NOT NULL, organization_id TEXT, -- Keep for backward compatibility during transition public_key_jwk TEXT NOT NULL, -- JSON Web Key format derivation_path TEXT NOT NULL, @@ -65,12 +65,12 @@ CREATE TABLE agent_dids_new ( updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, -- Foreign key constraints - FOREIGN KEY (haxen_server_id) REFERENCES did_registry(haxen_server_id) ON DELETE CASCADE + FOREIGN KEY (agentfield_server_id) REFERENCES did_registry(agentfield_server_id) ON DELETE CASCADE ); -- Copy data from old table to new table -INSERT INTO agent_dids_new (did, agent_node_id, haxen_server_id, organization_id, public_key_jwk, derivation_path, reasoners, skills, status, registered_at, created_at, updated_at) -SELECT did, agent_node_id, haxen_server_id, organization_id, public_key_jwk, derivation_path, reasoners, skills, status, registered_at, created_at, updated_at +INSERT INTO agent_dids_new (did, agent_node_id, agentfield_server_id, organization_id, public_key_jwk, derivation_path, reasoners, skills, status, registered_at, created_at, updated_at) +SELECT did, agent_node_id, agentfield_server_id, organization_id, public_key_jwk, derivation_path, reasoners, skills, status, registered_at, created_at, updated_at FROM agent_dids; -- Drop old table and rename new table @@ -79,8 +79,8 @@ ALTER TABLE agent_dids_new RENAME TO agent_dids; -- Recreate indexes for performance CREATE INDEX IF NOT EXISTS idx_agent_dids_agent_node_id ON agent_dids(agent_node_id); -CREATE INDEX IF NOT EXISTS idx_agent_dids_haxen_server_id ON agent_dids(haxen_server_id); +CREATE INDEX IF NOT EXISTS idx_agent_dids_agentfield_server_id ON agent_dids(agentfield_server_id); CREATE INDEX IF NOT EXISTS idx_agent_dids_organization_id ON agent_dids(organization_id); -- For backward compatibility CREATE INDEX IF NOT EXISTS idx_agent_dids_status ON agent_dids(status); CREATE INDEX IF NOT EXISTS idx_agent_dids_registered_at ON agent_dids(registered_at); -CREATE UNIQUE INDEX IF NOT EXISTS idx_agent_dids_agent_node_haxen_server ON agent_dids(agent_node_id, haxen_server_id); \ No newline at end of file +CREATE UNIQUE INDEX IF NOT EXISTS idx_agent_dids_agent_node_agentfield_server ON agent_dids(agent_node_id, agentfield_server_id); \ No newline at end of file diff --git a/control-plane/migrations/README.md b/control-plane/migrations/README.md index 0ceb4dcb..620ad3c2 100644 --- a/control-plane/migrations/README.md +++ b/control-plane/migrations/README.md @@ -1,6 +1,6 @@ # DID Database Schema Migrations -This directory contains SQL migration files for the DID (Decentralized Identity) implementation in the Haxen platform. These migrations create the necessary database tables to support DID-based authentication, verifiable credentials, and workflow execution tracking. +This directory contains SQL migration files for the DID (Decentralized Identity) implementation in the AgentField platform. These migrations create the necessary database tables to support DID-based authentication, verifiable credentials, and workflow execution tracking. ## Migration Files @@ -106,18 +106,18 @@ Execute the migration files in order, or use the complete migration runner: ```sql -- Option 1: Run complete migration -.read haxen/migrations/000_migration_runner.sql +.read agentfield/migrations/000_migration_runner.sql -- Option 2: Run individual migrations -.read haxen/migrations/001_create_did_registry.sql -.read haxen/migrations/002_create_agent_dids.sql -.read haxen/migrations/003_create_component_dids.sql -.read haxen/migrations/004_create_execution_vcs.sql -.read haxen/migrations/005_create_workflow_vcs.sql +.read agentfield/migrations/001_create_did_registry.sql +.read agentfield/migrations/002_create_agent_dids.sql +.read agentfield/migrations/003_create_component_dids.sql +.read agentfield/migrations/004_create_execution_vcs.sql +.read agentfield/migrations/005_create_workflow_vcs.sql ``` ### Integration with Go Services -The existing DID services in `haxen/internal/services/` and types in `haxen/pkg/types/did_types.go` are designed to work with these database tables. The migration from file-based storage to database storage should be seamless. +The existing DID services in `agentfield/internal/services/` and types in `agentfield/pkg/types/did_types.go` are designed to work with these database tables. The migration from file-based storage to database storage should be seamless. ## Migration from File Storage diff --git a/control-plane/pkg/adminpb/reasoner_admin.pb.go b/control-plane/pkg/adminpb/reasoner_admin.pb.go index 68cb79ee..339074ac 100644 --- a/control-plane/pkg/adminpb/reasoner_admin.pb.go +++ b/control-plane/pkg/adminpb/reasoner_admin.pb.go @@ -211,7 +211,7 @@ const file_proto_admin_reasoner_admin_proto_rawDesc = "" + "\x15ListReasonersResponse\x120\n" + "\treasoners\x18\x01 \x03(\v2\x12.admin.v1.ReasonerR\treasoners2h\n" + "\x14AdminReasonerService\x12P\n" + - "\rListReasoners\x12\x1e.admin.v1.ListReasonersRequest\x1a\x1f.admin.v1.ListReasonersResponseB\x1bZ\x19haxen/pkg/adminpb;adminpbb\x06proto3" + "\rListReasoners\x12\x1e.admin.v1.ListReasonersRequest\x1a\x1f.admin.v1.ListReasonersResponseB\x1bZ\x19agentfield/pkg/adminpb;adminpbb\x06proto3" var ( file_proto_admin_reasoner_admin_proto_rawDescOnce sync.Once diff --git a/control-plane/pkg/types/configuration.go b/control-plane/pkg/types/configuration.go index 2573d728..f6cb848d 100644 --- a/control-plane/pkg/types/configuration.go +++ b/control-plane/pkg/types/configuration.go @@ -32,19 +32,19 @@ const ( // AgentPackage represents an installed agent package type AgentPackage struct { - ID string `json:"id" db:"id"` - Name string `json:"name" db:"name"` - Version string `json:"version" db:"version"` - Description *string `json:"description,omitempty" db:"description"` - Author *string `json:"author,omitempty" db:"author"` - Repository *string `json:"repository,omitempty" db:"repository"` - InstallPath string `json:"install_path" db:"install_path"` - ConfigurationSchema json.RawMessage `json:"configuration_schema" db:"configuration_schema"` - Status PackageStatus `json:"status" db:"status"` - ConfigurationStatus ConfigurationStatus `json:"configuration_status" db:"configuration_status"` - InstalledAt time.Time `json:"installed_at" db:"installed_at"` - UpdatedAt time.Time `json:"updated_at" db:"updated_at"` - Metadata PackageMetadata `json:"metadata" db:"metadata"` + ID string `json:"id" db:"id"` + Name string `json:"name" db:"name"` + Version string `json:"version" db:"version"` + Description *string `json:"description,omitempty" db:"description"` + Author *string `json:"author,omitempty" db:"author"` + Repository *string `json:"repository,omitempty" db:"repository"` + InstallPath string `json:"install_path" db:"install_path"` + ConfigurationSchema json.RawMessage `json:"configuration_schema" db:"configuration_schema"` + Status PackageStatus `json:"status" db:"status"` + ConfigurationStatus ConfigurationStatus `json:"configuration_status" db:"configuration_status"` + InstalledAt time.Time `json:"installed_at" db:"installed_at"` + UpdatedAt time.Time `json:"updated_at" db:"updated_at"` + Metadata PackageMetadata `json:"metadata" db:"metadata"` } // PackageStatus represents the status of an agent package @@ -60,29 +60,29 @@ const ( // PackageMetadata holds extensible metadata for an agent package type PackageMetadata struct { - Dependencies []string `json:"dependencies,omitempty"` - Runtime *RuntimeMetadata `json:"runtime,omitempty"` - Configuration *ConfigurationMetadata `json:"configuration,omitempty"` - Custom map[string]interface{} `json:"custom,omitempty"` + Dependencies []string `json:"dependencies,omitempty"` + Runtime *RuntimeMetadata `json:"runtime,omitempty"` + Configuration *ConfigurationMetadata `json:"configuration,omitempty"` + Custom map[string]interface{} `json:"custom,omitempty"` } // RuntimeMetadata holds runtime-related metadata for a package type RuntimeMetadata struct { - Language string `json:"language"` - Version string `json:"version"` - Environment map[string]string `json:"environment,omitempty"` - ProcessID *int `json:"process_id,omitempty"` - StartedAt *time.Time `json:"started_at,omitempty"` - HealthCheckURL *string `json:"health_check_url,omitempty"` + Language string `json:"language"` + Version string `json:"version"` + Environment map[string]string `json:"environment,omitempty"` + ProcessID *int `json:"process_id,omitempty"` + StartedAt *time.Time `json:"started_at,omitempty"` + HealthCheckURL *string `json:"health_check_url,omitempty"` } // ConfigurationMetadata holds configuration-related metadata type ConfigurationMetadata struct { - RequiredFields []string `json:"required_fields,omitempty"` - OptionalFields []string `json:"optional_fields,omitempty"` - SecretFields []string `json:"secret_fields,omitempty"` - LastValidated *time.Time `json:"last_validated,omitempty"` - ValidationErrors []string `json:"validation_errors,omitempty"` + RequiredFields []string `json:"required_fields,omitempty"` + OptionalFields []string `json:"optional_fields,omitempty"` + SecretFields []string `json:"secret_fields,omitempty"` + LastValidated *time.Time `json:"last_validated,omitempty"` + ValidationErrors []string `json:"validation_errors,omitempty"` } // ConfigurationFilters holds filters for querying agent configurations diff --git a/control-plane/pkg/types/did_types.go b/control-plane/pkg/types/did_types.go index 50dfbebf..f49fa25c 100644 --- a/control-plane/pkg/types/did_types.go +++ b/control-plane/pkg/types/did_types.go @@ -5,28 +5,28 @@ import ( "time" ) -// DIDRegistry represents the master DID registry for a haxen server. +// DIDRegistry represents the master DID registry for a af server. type DIDRegistry struct { - HaxenServerID string `json:"haxen_server_id" db:"haxen_server_id"` - MasterSeed []byte `json:"master_seed" db:"master_seed_encrypted"` - RootDID string `json:"root_did" db:"root_did"` - AgentNodes map[string]AgentDIDInfo `json:"agent_nodes" db:"agent_nodes"` - TotalDIDs int `json:"total_dids" db:"total_dids"` - CreatedAt time.Time `json:"created_at" db:"created_at"` - LastKeyRotation time.Time `json:"last_key_rotation" db:"last_key_rotation"` + AgentFieldServerID string `json:"agentfield_server_id" db:"agentfield_server_id"` + MasterSeed []byte `json:"master_seed" db:"master_seed_encrypted"` + RootDID string `json:"root_did" db:"root_did"` + AgentNodes map[string]AgentDIDInfo `json:"agent_nodes" db:"agent_nodes"` + TotalDIDs int `json:"total_dids" db:"total_dids"` + CreatedAt time.Time `json:"created_at" db:"created_at"` + LastKeyRotation time.Time `json:"last_key_rotation" db:"last_key_rotation"` } // AgentDIDInfo represents DID information for an agent node. type AgentDIDInfo struct { - DID string `json:"did" db:"did"` - AgentNodeID string `json:"agent_node_id" db:"agent_node_id"` - HaxenServerID string `json:"haxen_server_id" db:"haxen_server_id"` - PublicKeyJWK json.RawMessage `json:"public_key_jwk" db:"public_key_jwk"` - DerivationPath string `json:"derivation_path" db:"derivation_path"` - Reasoners map[string]ReasonerDIDInfo `json:"reasoners" db:"reasoners"` - Skills map[string]SkillDIDInfo `json:"skills" db:"skills"` - Status AgentDIDStatus `json:"status" db:"status"` - RegisteredAt time.Time `json:"registered_at" db:"registered_at"` + DID string `json:"did" db:"did"` + AgentNodeID string `json:"agent_node_id" db:"agent_node_id"` + AgentFieldServerID string `json:"agentfield_server_id" db:"agentfield_server_id"` + PublicKeyJWK json.RawMessage `json:"public_key_jwk" db:"public_key_jwk"` + DerivationPath string `json:"derivation_path" db:"derivation_path"` + Reasoners map[string]ReasonerDIDInfo `json:"reasoners" db:"reasoners"` + Skills map[string]SkillDIDInfo `json:"skills" db:"skills"` + Status AgentDIDStatus `json:"status" db:"status"` + RegisteredAt time.Time `json:"registered_at" db:"registered_at"` } // ReasonerDIDInfo represents DID information for a reasoner. @@ -62,23 +62,23 @@ const ( // ExecutionVC represents a verifiable credential for an execution. type ExecutionVC struct { - VCID string `json:"vc_id" db:"vc_id"` - ExecutionID string `json:"execution_id" db:"execution_id"` - WorkflowID string `json:"workflow_id" db:"workflow_id"` - SessionID string `json:"session_id" db:"session_id"` - IssuerDID string `json:"issuer_did" db:"issuer_did"` - TargetDID string `json:"target_did" db:"target_did"` - CallerDID string `json:"caller_did" db:"caller_did"` - VCDocument json.RawMessage `json:"vc_document" db:"vc_document"` - Signature string `json:"signature" db:"signature"` - StorageURI string `json:"storage_uri" db:"storage_uri"` - DocumentSize int64 `json:"document_size_bytes" db:"document_size_bytes"` - InputHash string `json:"input_hash" db:"input_hash"` - OutputHash string `json:"output_hash" db:"output_hash"` - Status string `json:"status" db:"status"` - CreatedAt time.Time `json:"created_at" db:"created_at"` - ParentVCID *string `json:"parent_vc_id,omitempty" db:"parent_vc_id"` - ChildVCIDs []string `json:"child_vc_ids,omitempty" db:"child_vc_ids"` + VCID string `json:"vc_id" db:"vc_id"` + ExecutionID string `json:"execution_id" db:"execution_id"` + WorkflowID string `json:"workflow_id" db:"workflow_id"` + SessionID string `json:"session_id" db:"session_id"` + IssuerDID string `json:"issuer_did" db:"issuer_did"` + TargetDID string `json:"target_did" db:"target_did"` + CallerDID string `json:"caller_did" db:"caller_did"` + VCDocument json.RawMessage `json:"vc_document" db:"vc_document"` + Signature string `json:"signature" db:"signature"` + StorageURI string `json:"storage_uri" db:"storage_uri"` + DocumentSize int64 `json:"document_size_bytes" db:"document_size_bytes"` + InputHash string `json:"input_hash" db:"input_hash"` + OutputHash string `json:"output_hash" db:"output_hash"` + Status string `json:"status" db:"status"` + CreatedAt time.Time `json:"created_at" db:"created_at"` + ParentVCID *string `json:"parent_vc_id,omitempty" db:"parent_vc_id"` + ChildVCIDs []string `json:"child_vc_ids,omitempty" db:"child_vc_ids"` } // WorkflowVC represents a workflow-level verifiable credential. @@ -102,10 +102,10 @@ type WorkflowVC struct { // DIDIdentityPackage represents the complete DID identity package for an agent. type DIDIdentityPackage struct { - AgentDID DIDIdentity `json:"agent_did"` - ReasonerDIDs map[string]DIDIdentity `json:"reasoner_dids"` - SkillDIDs map[string]DIDIdentity `json:"skill_dids"` - HaxenServerID string `json:"haxen_server_id"` + AgentDID DIDIdentity `json:"agent_did"` + ReasonerDIDs map[string]DIDIdentity `json:"reasoner_dids"` + SkillDIDs map[string]DIDIdentity `json:"skill_dids"` + AgentFieldServerID string `json:"agentfield_server_id"` } // DIDIdentity represents a single DID identity with keys. @@ -120,24 +120,24 @@ type DIDIdentity struct { // ExecutionContext represents the context for DID-enabled execution. type ExecutionContext struct { - ExecutionID string `json:"execution_id"` - WorkflowID string `json:"workflow_id"` - SessionID string `json:"session_id"` - CallerDID string `json:"caller_did"` - TargetDID string `json:"target_did"` - AgentNodeDID string `json:"agent_node_did"` - Timestamp time.Time `json:"timestamp"` + ExecutionID string `json:"execution_id"` + WorkflowID string `json:"workflow_id"` + SessionID string `json:"session_id"` + CallerDID string `json:"caller_did"` + TargetDID string `json:"target_did"` + AgentNodeDID string `json:"agent_node_did"` + Timestamp time.Time `json:"timestamp"` } // VCDocument represents a complete verifiable credential document. type VCDocument struct { - Context []string `json:"@context"` - Type []string `json:"type"` - ID string `json:"id"` - Issuer string `json:"issuer"` - IssuanceDate string `json:"issuanceDate"` - CredentialSubject VCCredentialSubject `json:"credentialSubject"` - Proof VCProof `json:"proof"` + Context []string `json:"@context"` + Type []string `json:"type"` + ID string `json:"id"` + Issuer string `json:"issuer"` + IssuanceDate string `json:"issuanceDate"` + CredentialSubject VCCredentialSubject `json:"credentialSubject"` + Proof VCProof `json:"proof"` } // WorkflowVCDocument represents a complete workflow-level verifiable credential document. @@ -153,28 +153,28 @@ type WorkflowVCDocument struct { // VCCredentialSubject represents the subject of a verifiable credential. type VCCredentialSubject struct { - ExecutionID string `json:"executionId"` - WorkflowID string `json:"workflowId"` - SessionID string `json:"sessionId"` - Caller VCCaller `json:"caller"` - Target VCTarget `json:"target"` + ExecutionID string `json:"executionId"` + WorkflowID string `json:"workflowId"` + SessionID string `json:"sessionId"` + Caller VCCaller `json:"caller"` + Target VCTarget `json:"target"` Execution VCExecution `json:"execution"` - Audit VCAudit `json:"audit"` + Audit VCAudit `json:"audit"` } // WorkflowVCCredentialSubject represents the subject of a workflow-level verifiable credential. type WorkflowVCCredentialSubject struct { - WorkflowID string `json:"workflowId"` - SessionID string `json:"sessionId"` - ComponentVCIDs []string `json:"componentVcIds"` - TotalSteps int `json:"totalSteps"` - CompletedSteps int `json:"completedSteps"` - Status string `json:"status"` - StartTime string `json:"startTime"` - EndTime *string `json:"endTime,omitempty"` - SnapshotTime string `json:"snapshotTime"` - Orchestrator VCCaller `json:"orchestrator"` - Audit VCAudit `json:"audit"` + WorkflowID string `json:"workflowId"` + SessionID string `json:"sessionId"` + ComponentVCIDs []string `json:"componentVcIds"` + TotalSteps int `json:"totalSteps"` + CompletedSteps int `json:"completedSteps"` + Status string `json:"status"` + StartTime string `json:"startTime"` + EndTime *string `json:"endTime,omitempty"` + SnapshotTime string `json:"snapshotTime"` + Orchestrator VCCaller `json:"orchestrator"` + Audit VCAudit `json:"audit"` } // VCCaller represents the caller information in a VC. @@ -219,15 +219,15 @@ type VCProof struct { // DIDFilters holds filters for querying DIDs. type DIDFilters struct { - HaxenServerID *string `json:"haxen_server_id,omitempty"` - AgentNodeID *string `json:"agent_node_id,omitempty"` - ComponentType *string `json:"component_type,omitempty"` - Status *AgentDIDStatus `json:"status,omitempty"` - ExposureLevel *string `json:"exposure_level,omitempty"` - CreatedAfter *time.Time `json:"created_after,omitempty"` - CreatedBefore *time.Time `json:"created_before,omitempty"` - Limit int `json:"limit,omitempty"` - Offset int `json:"offset,omitempty"` + AgentFieldServerID *string `json:"agentfield_server_id,omitempty"` + AgentNodeID *string `json:"agent_node_id,omitempty"` + ComponentType *string `json:"component_type,omitempty"` + Status *AgentDIDStatus `json:"status,omitempty"` + ExposureLevel *string `json:"exposure_level,omitempty"` + CreatedAfter *time.Time `json:"created_after,omitempty"` + CreatedBefore *time.Time `json:"created_before,omitempty"` + Limit int `json:"limit,omitempty"` + Offset int `json:"offset,omitempty"` } // VCFilters holds filters for querying VCs. @@ -245,12 +245,11 @@ type VCFilters struct { Offset int `json:"offset,omitempty"` } - // DIDRegistrationRequest represents a request to register an agent with DIDs. type DIDRegistrationRequest struct { - AgentNodeID string `json:"agent_node_id"` - Reasoners []ReasonerDefinition `json:"reasoners"` - Skills []SkillDefinition `json:"skills"` + AgentNodeID string `json:"agent_node_id"` + Reasoners []ReasonerDefinition `json:"reasoners"` + Skills []SkillDefinition `json:"skills"` } // DIDRegistrationResponse represents the response to a DID registration request. @@ -268,11 +267,11 @@ type VCVerificationRequest struct { // VCVerificationResponse represents the response to a VC verification request. type VCVerificationResponse struct { - Valid bool `json:"valid"` - IssuerDID string `json:"issuer_did,omitempty"` - IssuedAt string `json:"issued_at,omitempty"` - Message string `json:"message,omitempty"` - Error string `json:"error,omitempty"` + Valid bool `json:"valid"` + IssuerDID string `json:"issuer_did,omitempty"` + IssuedAt string `json:"issued_at,omitempty"` + Message string `json:"message,omitempty"` + Error string `json:"error,omitempty"` } // WorkflowVCChainRequest represents a request to get a workflow VC chain. @@ -293,7 +292,7 @@ type WorkflowVCChainResponse struct { // DIDResolutionEntry represents a resolved DID with its public key for offline verification. type DIDResolutionEntry struct { - Method string `json:"method"` // "key", "web", etc. + Method string `json:"method"` // "key", "web", etc. PublicKeyJWK json.RawMessage `json:"public_key_jwk"` ResolvedFrom string `json:"resolved_from"` // "bundled", "web", "resolver" ResolvedAt string `json:"resolved_at"` // ISO 8601 timestamp @@ -313,54 +312,54 @@ type DIDRegistryEntry struct { // ComponentDIDInfo represents DID information for a component (reasoner or skill). type ComponentDIDInfo struct { - ComponentID string `json:"component_id" db:"component_id"` - ComponentDID string `json:"component_did" db:"component_did"` - AgentDID string `json:"agent_did" db:"agent_did"` - ComponentType string `json:"component_type" db:"component_type"` - ComponentName string `json:"component_name" db:"component_name"` - DerivationIndex int `json:"derivation_index" db:"derivation_index"` - CreatedAt time.Time `json:"created_at" db:"created_at"` + ComponentID string `json:"component_id" db:"component_id"` + ComponentDID string `json:"component_did" db:"component_did"` + AgentDID string `json:"agent_did" db:"agent_did"` + ComponentType string `json:"component_type" db:"component_type"` + ComponentName string `json:"component_name" db:"component_name"` + DerivationIndex int `json:"derivation_index" db:"derivation_index"` + CreatedAt time.Time `json:"created_at" db:"created_at"` } // ExecutionVCInfo represents information about an execution VC stored in database. type ExecutionVCInfo struct { - VCID string `json:"vc_id" db:"vc_id"` - ExecutionID string `json:"execution_id" db:"execution_id"` - WorkflowID string `json:"workflow_id" db:"workflow_id"` - SessionID string `json:"session_id" db:"session_id"` - IssuerDID string `json:"issuer_did" db:"issuer_did"` - TargetDID string `json:"target_did" db:"target_did"` - CallerDID string `json:"caller_did" db:"caller_did"` - InputHash string `json:"input_hash" db:"input_hash"` - OutputHash string `json:"output_hash" db:"output_hash"` - Status string `json:"status" db:"status"` - CreatedAt time.Time `json:"created_at" db:"created_at"` - StorageURI string `json:"storage_uri" db:"storage_uri"` - DocumentSize int64 `json:"document_size_bytes" db:"document_size_bytes"` + VCID string `json:"vc_id" db:"vc_id"` + ExecutionID string `json:"execution_id" db:"execution_id"` + WorkflowID string `json:"workflow_id" db:"workflow_id"` + SessionID string `json:"session_id" db:"session_id"` + IssuerDID string `json:"issuer_did" db:"issuer_did"` + TargetDID string `json:"target_did" db:"target_did"` + CallerDID string `json:"caller_did" db:"caller_did"` + InputHash string `json:"input_hash" db:"input_hash"` + OutputHash string `json:"output_hash" db:"output_hash"` + Status string `json:"status" db:"status"` + CreatedAt time.Time `json:"created_at" db:"created_at"` + StorageURI string `json:"storage_uri" db:"storage_uri"` + DocumentSize int64 `json:"document_size_bytes" db:"document_size_bytes"` } // WorkflowVCInfo represents information about a workflow VC. type WorkflowVCInfo struct { - WorkflowVCID string `json:"workflow_vc_id" db:"workflow_vc_id"` - WorkflowID string `json:"workflow_id" db:"workflow_id"` - SessionID string `json:"session_id" db:"session_id"` - ComponentVCIDs []string `json:"component_vc_ids" db:"component_vc_ids"` - Status string `json:"status" db:"status"` - StartTime time.Time `json:"start_time" db:"start_time"` - EndTime *time.Time `json:"end_time" db:"end_time"` - TotalSteps int `json:"total_steps" db:"total_steps"` - CompletedSteps int `json:"completed_steps" db:"completed_steps"` - StorageURI string `json:"storage_uri" db:"storage_uri"` - DocumentSize int64 `json:"document_size_bytes" db:"document_size_bytes"` -} - -// HaxenServerDIDInfo represents haxen server-level DID information stored in the database. -type HaxenServerDIDInfo struct { - HaxenServerID string `json:"haxen_server_id" db:"haxen_server_id"` - RootDID string `json:"root_did" db:"root_did"` - MasterSeed []byte `json:"master_seed" db:"master_seed_encrypted"` - CreatedAt time.Time `json:"created_at" db:"created_at"` - LastKeyRotation time.Time `json:"last_key_rotation" db:"last_key_rotation"` + WorkflowVCID string `json:"workflow_vc_id" db:"workflow_vc_id"` + WorkflowID string `json:"workflow_id" db:"workflow_id"` + SessionID string `json:"session_id" db:"session_id"` + ComponentVCIDs []string `json:"component_vc_ids" db:"component_vc_ids"` + Status string `json:"status" db:"status"` + StartTime time.Time `json:"start_time" db:"start_time"` + EndTime *time.Time `json:"end_time" db:"end_time"` + TotalSteps int `json:"total_steps" db:"total_steps"` + CompletedSteps int `json:"completed_steps" db:"completed_steps"` + StorageURI string `json:"storage_uri" db:"storage_uri"` + DocumentSize int64 `json:"document_size_bytes" db:"document_size_bytes"` +} + +// AgentFieldServerDIDInfo represents af server-level DID information stored in the database. +type AgentFieldServerDIDInfo struct { + AgentFieldServerID string `json:"agentfield_server_id" db:"agentfield_server_id"` + RootDID string `json:"root_did" db:"root_did"` + MasterSeed []byte `json:"master_seed" db:"master_seed_encrypted"` + CreatedAt time.Time `json:"created_at" db:"created_at"` + LastKeyRotation time.Time `json:"last_key_rotation" db:"last_key_rotation"` } // RegistrationType represents the type of DID registration being performed. @@ -374,11 +373,11 @@ const ( // EnhancedDIDRegistrationRequest represents an enhanced request to register an agent with DIDs. type EnhancedDIDRegistrationRequest struct { - AgentNodeID string `json:"agent_node_id"` - Reasoners []ReasonerDefinition `json:"reasoners"` - Skills []SkillDefinition `json:"skills"` - RegistrationType RegistrationType `json:"registration_type"` - ForceOverwrite bool `json:"force_overwrite,omitempty"` + AgentNodeID string `json:"agent_node_id"` + Reasoners []ReasonerDefinition `json:"reasoners"` + Skills []SkillDefinition `json:"skills"` + RegistrationType RegistrationType `json:"registration_type"` + ForceOverwrite bool `json:"force_overwrite,omitempty"` } // DifferentialAnalysisResult represents the result of comparing existing vs new reasoners/skills. @@ -394,13 +393,13 @@ type DifferentialAnalysisResult struct { // PartialDIDRegistrationRequest represents a request for partial DID registration. type PartialDIDRegistrationRequest struct { - AgentNodeID string `json:"agent_node_id"` - NewReasonerIDs []string `json:"new_reasoner_ids"` - NewSkillIDs []string `json:"new_skill_ids"` - UpdatedReasonerIDs []string `json:"updated_reasoner_ids"` - UpdatedSkillIDs []string `json:"updated_skill_ids"` - AllReasoners []ReasonerDefinition `json:"all_reasoners"` - AllSkills []SkillDefinition `json:"all_skills"` + AgentNodeID string `json:"agent_node_id"` + NewReasonerIDs []string `json:"new_reasoner_ids"` + NewSkillIDs []string `json:"new_skill_ids"` + UpdatedReasonerIDs []string `json:"updated_reasoner_ids"` + UpdatedSkillIDs []string `json:"updated_skill_ids"` + AllReasoners []ReasonerDefinition `json:"all_reasoners"` + AllSkills []SkillDefinition `json:"all_skills"` } // ComponentDeregistrationRequest represents a request to deregister specific components. diff --git a/control-plane/pkg/types/status.go b/control-plane/pkg/types/status.go index b44872f4..9888c0bb 100644 --- a/control-plane/pkg/types/status.go +++ b/control-plane/pkg/types/status.go @@ -46,7 +46,7 @@ var executionStatusAliases = map[string]ExecutionStatus{ "processing": ExecutionStatusRunning, } -// NormalizeExecutionStatus maps arbitrary status strings onto the canonical execution statuses used by the Haxen platform. +// NormalizeExecutionStatus maps arbitrary status strings onto the canonical execution statuses used by the AgentField platform. // Unknown or unsupported statuses resolve to ExecutionStatusUnknown. func NormalizeExecutionStatus(status string) string { normalized := ExecutionStatus(strings.ToLower(strings.TrimSpace(status))) diff --git a/control-plane/pkg/types/types.go b/control-plane/pkg/types/types.go index 21f5e3b1..e341089c 100644 --- a/control-plane/pkg/types/types.go +++ b/control-plane/pkg/types/types.go @@ -139,7 +139,7 @@ type AgentNode struct { Version string `json:"version" db:"version"` // Serverless support - DeploymentType string `json:"deployment_type" db:"deployment_type"` // "long_running" or "serverless" + DeploymentType string `json:"deployment_type" db:"deployment_type"` // "long_running" or "serverless" InvocationURL *string `json:"invocation_url,omitempty" db:"invocation_url"` // For serverless agents CallbackDiscovery *CallbackDiscoveryInfo `json:"callback_discovery,omitempty" db:"-"` @@ -157,7 +157,7 @@ type AgentNode struct { Metadata AgentMetadata `json:"metadata" db:"metadata"` } -// CallbackDiscoveryInfo captures how the Haxen server resolved an agent callback URL. +// CallbackDiscoveryInfo captures how the AgentField server resolved an agent callback URL. type CallbackDiscoveryInfo struct { Mode string `json:"mode,omitempty"` Preferred string `json:"preferred,omitempty"` @@ -599,13 +599,13 @@ type WorkflowExecution struct { ID int64 `json:"id" db:"id"` // Core IDs - WorkflowID string `json:"workflow_id" db:"workflow_id"` - ExecutionID string `json:"execution_id" db:"execution_id"` - HaxenRequestID string `json:"haxen_request_id" db:"haxen_request_id"` - RunID *string `json:"run_id,omitempty" db:"run_id"` - SessionID *string `json:"session_id,omitempty" db:"session_id"` - ActorID *string `json:"actor_id,omitempty" db:"actor_id"` - AgentNodeID string `json:"agent_node_id" db:"agent_node_id"` + WorkflowID string `json:"workflow_id" db:"workflow_id"` + ExecutionID string `json:"execution_id" db:"execution_id"` + AgentFieldRequestID string `json:"agentfield_request_id" db:"agentfield_request_id"` + RunID *string `json:"run_id,omitempty" db:"run_id"` + SessionID *string `json:"session_id,omitempty" db:"session_id"` + ActorID *string `json:"actor_id,omitempty" db:"actor_id"` + AgentNodeID string `json:"agent_node_id" db:"agent_node_id"` // DAG Relationship Fields ParentWorkflowID *string `json:"parent_workflow_id,omitempty" db:"parent_workflow_id"` diff --git a/control-plane/pkg/types/webhook.go b/control-plane/pkg/types/webhook.go index 3994f9f4..211a1d39 100644 --- a/control-plane/pkg/types/webhook.go +++ b/control-plane/pkg/types/webhook.go @@ -38,7 +38,7 @@ type ExecutionWebhookStateUpdate struct { LastError *string } -// ExecutionWebhookPayload defines the shape Haxen sends to webhook consumers. +// ExecutionWebhookPayload defines the shape AgentField sends to webhook consumers. type ExecutionWebhookPayload struct { Event string `json:"event"` ExecutionID string `json:"execution_id"` diff --git a/control-plane/proto/admin/reasoner_admin.proto b/control-plane/proto/admin/reasoner_admin.proto index 9a9389b0..d06b7ad8 100644 --- a/control-plane/proto/admin/reasoner_admin.proto +++ b/control-plane/proto/admin/reasoner_admin.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package admin.v1; -option go_package = "github.com/your-org/haxen/control-plane/pkg/adminpb;adminpb"; +option go_package = "github.com/Agent-Field/agentfield/control-plane/pkg/adminpb;adminpb"; // AdminReasonerService exposes control-surface operations for reasoning components. service AdminReasonerService { diff --git a/control-plane/scripts/README.md b/control-plane/scripts/README.md index 43b8ff9e..17a1d4be 100644 --- a/control-plane/scripts/README.md +++ b/control-plane/scripts/README.md @@ -1,6 +1,6 @@ -# Haxen Server Release Scripts +# AgentField Server Release Scripts -This directory contains automation scripts for building and releasing Haxen Server binaries to GitHub. +This directory contains automation scripts for building and releasing AgentField Server binaries to GitHub. ## Overview @@ -95,13 +95,13 @@ User-friendly installation script for end users. **Usage:** ```bash # Quick install (user-specific, no sudo) -curl -sSL https://raw.githubusercontent.com/your-org/haxen/main/scripts/install.sh | bash +curl -sSL https://raw.githubusercontent.com/Agent-Field/agentfield/main/scripts/install.sh | bash # System-wide install (requires sudo) -curl -sSL https://raw.githubusercontent.com/your-org/haxen/main/scripts/install.sh | bash -s -- --system +curl -sSL https://raw.githubusercontent.com/Agent-Field/agentfield/main/scripts/install.sh | bash -s -- --system # Custom directory -curl -sSL https://raw.githubusercontent.com/your-org/haxen/main/scripts/install.sh | bash -s -- --dir ~/bin +curl -sSL https://raw.githubusercontent.com/Agent-Field/agentfield/main/scripts/install.sh | bash -s -- --dir ~/bin ``` ## Workflow @@ -110,7 +110,7 @@ curl -sSL https://raw.githubusercontent.com/your-org/haxen/main/scripts/install. 1. **Prepare for release:** ```bash - cd apps/platform/haxen/scripts + cd apps/platform/agentfield/scripts ./release.sh dry-run # Check prerequisites and preview ``` @@ -142,10 +142,10 @@ curl -sSL https://raw.githubusercontent.com/your-org/haxen/main/scripts/install. ### Build Output Each release creates these assets: -- `haxen-linux-amd64` - Linux Intel/AMD binary -- `haxen-linux-arm64` - Linux ARM binary -- `haxen-darwin-amd64` - macOS Intel binary -- `haxen-darwin-arm64` - macOS Apple Silicon binary +- `agentfield-linux-amd64` - Linux Intel/AMD binary +- `agentfield-linux-arm64` - Linux ARM binary +- `agentfield-darwin-amd64` - macOS Intel binary +- `agentfield-darwin-arm64` - macOS Apple Silicon binary - `checksums.txt` - SHA256 checksums - `build-info.txt` - Build metadata - `README.md` - Installation instructions @@ -153,18 +153,18 @@ Each release creates these assets: ## Configuration ### GitHub Repository -The scripts are configured for: `your-org/haxen` +The scripts are configured for: `Agent-Field/agentfield` To change the repository, set the environment variable: ```bash -export GITHUB_REPO="your-org/your-repo" +export GITHUB_REPO="Agent-Field/your-repo" ./release.sh ``` ### Build Configuration The release script uses the existing `../build-single-binary.sh` with these settings: - Embedded UI included -- Universal path management (stores data in `~/.haxen/`) +- Universal path management (stores data in `~/.agentfield/`) - Cross-platform binaries - Single binary deployment @@ -248,7 +248,7 @@ gh release delete v0.1.0-alpha.X ### Complete Release Workflow ```bash # 1. Check current state -cd apps/platform/haxen/scripts +cd apps/platform/agentfield/scripts ./version-manager.sh info # 2. Test the release process @@ -264,10 +264,10 @@ gh release list ### User Installation Testing ```bash # Test the installation script -curl -sSL https://raw.githubusercontent.com/your-org/haxen/main/scripts/install.sh | bash +curl -sSL https://raw.githubusercontent.com/Agent-Field/agentfield/main/scripts/install.sh | bash # Verify installation -haxen --help +agentfield --help ``` ### Build Only (No Release) @@ -282,7 +282,7 @@ ls -la ../dist/releases/ ## File Structure ``` -apps/platform/haxen/scripts/ +apps/platform/agentfield/scripts/ β”œβ”€β”€ README.md # This documentation β”œβ”€β”€ .version # Version tracking file β”œβ”€β”€ version-manager.sh # Version management script diff --git a/control-plane/scripts/quick-mcp-test.sh b/control-plane/scripts/quick-mcp-test.sh index 3eff5251..50f26d19 100755 --- a/control-plane/scripts/quick-mcp-test.sh +++ b/control-plane/scripts/quick-mcp-test.sh @@ -4,34 +4,34 @@ # Quick MCP Endpoints Test Script # ============================================================================= # A simplified version for quick testing of MCP endpoints -# Usage: ./haxen/scripts/quick-mcp-test.sh +# Usage: ./agentfield/scripts/quick-mcp-test.sh # ============================================================================= -HAXEN_SERVER="${HAXEN_SERVER:-http://localhost:8080}" +AGENTFIELD_SERVER="${AGENTFIELD_SERVER:-http://localhost:8080}" echo "🧠 Quick MCP Endpoints Test" echo "==========================" -echo "Server: $HAXEN_SERVER" +echo "Server: $AGENTFIELD_SERVER" echo "" # Check if server is running -echo "1. Checking Haxen server..." -if curl -s --connect-timeout 5 "$HAXEN_SERVER/health" > /dev/null; then - echo "βœ… Haxen server is running" +echo "1. Checking AgentField server..." +if curl -s --connect-timeout 5 "$AGENTFIELD_SERVER/health" > /dev/null; then + echo "βœ… AgentField server is running" else - echo "❌ Haxen server is not responding" + echo "❌ AgentField server is not responding" exit 1 fi # Test overall MCP status echo "" echo "2. Testing overall MCP status..." -curl -s "$HAXEN_SERVER/api/ui/v1/mcp/status" | jq . 2>/dev/null || echo "❌ Failed to get MCP status" +curl -s "$AGENTFIELD_SERVER/api/ui/v1/mcp/status" | jq . 2>/dev/null || echo "❌ Failed to get MCP status" # Get first node ID echo "" echo "3. Getting available nodes..." -FIRST_NODE=$(curl -s "$HAXEN_SERVER/api/ui/v1/nodes" | jq -r '.[0].id // empty' 2>/dev/null) +FIRST_NODE=$(curl -s "$AGENTFIELD_SERVER/api/ui/v1/nodes" | jq -r '.[0].id // empty' 2>/dev/null) if [ -n "$FIRST_NODE" ] && [ "$FIRST_NODE" != "null" ]; then echo "βœ… Found node: $FIRST_NODE" @@ -39,12 +39,12 @@ if [ -n "$FIRST_NODE" ] && [ "$FIRST_NODE" != "null" ]; then # Test node MCP health echo "" echo "4. Testing node MCP health..." - curl -s "$HAXEN_SERVER/api/ui/v1/nodes/$FIRST_NODE/mcp/health" | jq . 2>/dev/null || echo "❌ Failed to get node MCP health" + curl -s "$AGENTFIELD_SERVER/api/ui/v1/nodes/$FIRST_NODE/mcp/health" | jq . 2>/dev/null || echo "❌ Failed to get node MCP health" # Test developer mode echo "" echo "5. Testing developer mode..." - curl -s "$HAXEN_SERVER/api/ui/v1/nodes/$FIRST_NODE/mcp/health?mode=developer" | jq . 2>/dev/null || echo "❌ Failed to get developer mode health" + curl -s "$AGENTFIELD_SERVER/api/ui/v1/nodes/$FIRST_NODE/mcp/health?mode=developer" | jq . 2>/dev/null || echo "❌ Failed to get developer mode health" else echo "⚠️ No nodes found - skipping node-specific tests" fi @@ -53,4 +53,4 @@ echo "" echo "πŸŽ‰ Quick test completed!" echo "" echo "For comprehensive testing, run:" -echo " ./haxen/scripts/test-mcp-endpoints.sh" \ No newline at end of file +echo " ./agentfield/scripts/test-mcp-endpoints.sh" \ No newline at end of file diff --git a/control-plane/scripts/release.sh b/control-plane/scripts/release.sh index a2721c97..d963494b 100755 --- a/control-plane/scripts/release.sh +++ b/control-plane/scripts/release.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Haxen Release Automation Script +# AgentField Release Automation Script # Builds binaries and creates GitHub releases with auto-incrementing versions set -e @@ -21,9 +21,9 @@ BUILD_SCRIPT="$PROJECT_ROOT/build-single-binary.sh" DIST_DIR="$PROJECT_ROOT/dist/releases" # GitHub configuration -GITHUB_REPO="your-org/haxen" -GITHUB_OWNER="your-org" -GITHUB_REPO_NAME="haxen" +GITHUB_REPO="Agent-Field/agentfield" +GITHUB_OWNER="Agent-Field" +GITHUB_REPO_NAME="agentfield" print_header() { echo -e "${CYAN}================================${NC}" @@ -59,35 +59,35 @@ command_exists() { # Check prerequisites check_prerequisites() { print_header "Checking Prerequisites" - + local missing_deps=() - + # Check GitHub CLI if ! command_exists gh; then missing_deps+=("GitHub CLI (gh) - https://cli.github.com/") else print_success "GitHub CLI found: $(gh --version | head -n1)" fi - + # Check jq if ! command_exists jq; then missing_deps+=("jq - JSON processor") else print_success "jq found: $(jq --version)" fi - + # Check git if ! command_exists git; then missing_deps+=("git") else print_success "git found: $(git --version | head -n1)" fi - + # Check if we're in a git repository if ! git rev-parse --git-dir > /dev/null 2>&1; then missing_deps+=("Must be run from within a git repository") fi - + # Check version manager if [ ! -f "$VERSION_MANAGER" ]; then missing_deps+=("Version manager script not found: $VERSION_MANAGER") @@ -96,7 +96,7 @@ check_prerequisites() { else print_success "Version manager found" fi - + # Check build script if [ ! -f "$BUILD_SCRIPT" ]; then missing_deps+=("Build script not found: $BUILD_SCRIPT") @@ -105,7 +105,7 @@ check_prerequisites() { else print_success "Build script found" fi - + if [ ${#missing_deps[@]} -ne 0 ]; then print_error "Missing dependencies:" for dep in "${missing_deps[@]}"; do @@ -117,82 +117,82 @@ check_prerequisites() { print_info "jq: brew install jq (macOS) or apt-get install jq (Ubuntu)" exit 1 fi - + print_success "All prerequisites satisfied!" } # Check GitHub authentication check_github_auth() { print_header "Checking GitHub Authentication" - + if ! gh auth status >/dev/null 2>&1; then print_error "GitHub CLI not authenticated" print_info "Please run: gh auth login" exit 1 fi - + print_success "GitHub CLI authenticated" } # Get version information get_version_info() { print_header "Version Information" - + # Get current version info CURRENT_VERSION=$("$VERSION_MANAGER" current) CURRENT_TAG=$("$VERSION_MANAGER" current-tag) NEXT_VERSION=$("$VERSION_MANAGER" next) NEXT_TAG=$("$VERSION_MANAGER" next-tag) - + print_status "Current version: $CURRENT_TAG" print_status "Next version: $NEXT_TAG" - + # Check if tag already exists if git tag -l | grep -q "^$NEXT_TAG$"; then print_error "Tag $NEXT_TAG already exists" print_info "Use '$VERSION_MANAGER set ' to set a different version" exit 1 fi - + # Check if GitHub release already exists if gh release view "$NEXT_TAG" >/dev/null 2>&1; then print_error "GitHub release $NEXT_TAG already exists" exit 1 fi - + print_success "Version $NEXT_TAG is available for release" } # Build binaries build_binaries() { print_header "Building Binaries" - + # Set version for build script export VERSION="$NEXT_VERSION" - + # Navigate to project root and run build script cd "$PROJECT_ROOT" - + print_status "Running build script with version: $VERSION" if ! "$BUILD_SCRIPT"; then print_error "Build script failed" exit 1 fi - + # Verify build outputs if [ ! -d "$DIST_DIR" ]; then print_error "Build output directory not found: $DIST_DIR" exit 1 fi - + # Check for expected binaries local expected_binaries=( - "haxen-linux-amd64" - "haxen-linux-arm64" - "haxen-darwin-amd64" - "haxen-darwin-arm64" + "agentfield-linux-amd64" + "agentfield-linux-arm64" + "agentfield-darwin-amd64" + "agentfield-darwin-arm64" ) - + local missing_binaries=() local available_binaries=() for binary in "${expected_binaries[@]}"; do @@ -202,7 +202,7 @@ build_binaries() { available_binaries+=("$binary") fi done - + if [ ${#missing_binaries[@]} -ne 0 ]; then print_warning "Missing binaries:" for binary in "${missing_binaries[@]}"; do @@ -210,40 +210,40 @@ build_binaries() { done print_warning "Continuing with available binaries..." fi - + if [ ${#available_binaries[@]} -eq 0 ]; then print_error "No binaries were built successfully" exit 1 fi - + print_success "Build completed with ${#available_binaries[@]} of ${#expected_binaries[@]} binaries" - + # Show build summary print_status "Built files:" - ls -la "$DIST_DIR" | grep -E "(haxen-|checksums|build-info|README)" + ls -la "$DIST_DIR" | grep -E "(agentfield-|checksums|build-info|README)" } # Generate release notes generate_release_notes() { print_header "Generating Release Notes" - + local release_notes_file="$DIST_DIR/release-notes.md" - + # Get git log since last tag local last_tag="" if git tag -l | grep -E "^v[0-9]+\.[0-9]+\.[0-9]+-alpha\.[0-9]+$" | sort -V | tail -n1 | read -r tag; then last_tag="$tag" fi - + cat > "$release_notes_file" << EOF -# Haxen $NEXT_TAG (Pre-release) +# AgentField $NEXT_TAG (Pre-release) -This is a pre-release version of Haxen for testing purposes. +This is a pre-release version of AgentField for testing purposes. ## πŸš€ What's New EOF - + if [ -n "$last_tag" ]; then echo "### Changes since $last_tag" >> "$release_notes_file" echo "" >> "$release_notes_file" @@ -252,50 +252,50 @@ EOF else echo "### Initial Alpha Release" >> "$release_notes_file" echo "" >> "$release_notes_file" - echo "- Initial release of Haxen Server" >> "$release_notes_file" + echo "- Initial release of AgentField Server" >> "$release_notes_file" echo "- Single binary deployment with embedded UI" >> "$release_notes_file" - echo "- Universal path management (stores data in ~/.haxen/)" >> "$release_notes_file" + echo "- Universal path management (stores data in ~/.agentfield/)" >> "$release_notes_file" echo "- Cross-platform support (Linux, macOS)" >> "$release_notes_file" fi - + cat >> "$release_notes_file" << 'EOF' ## πŸ“¦ Installation ### Quick Install (Recommended) ```bash - curl -sSL https://raw.githubusercontent.com/your-org/haxen/main/scripts/install.sh | bash + curl -sSL https://raw.githubusercontent.com/Agent-Field/agentfield/main/scripts/install.sh | bash ``` ### Manual Download 1. Download the appropriate binary for your platform from the assets below -2. Make it executable: `chmod +x haxen-*` -3. Run: `./haxen-linux-amd64` (or your platform's binary) +2. Make it executable: `chmod +x agentfield-*` +3. Run: `./agentfield-linux-amd64` (or your platform's binary) 4. Open http://localhost:8080 in your browser ## πŸ—οΈ Available Binaries -- **haxen-linux-amd64** - Linux (Intel/AMD 64-bit) -- **haxen-linux-arm64** - Linux (ARM 64-bit) -- **haxen-darwin-amd64** - macOS (Intel) -- **haxen-darwin-arm64** - macOS (Apple Silicon) +- **agentfield-linux-amd64** - Linux (Intel/AMD 64-bit) +- **agentfield-linux-arm64** - Linux (ARM 64-bit) +- **agentfield-darwin-amd64** - macOS (Intel) +- **agentfield-darwin-arm64** - macOS (Apple Silicon) ## πŸ”§ Features - **Single Binary**: Everything bundled in one executable -- **Universal Storage**: All data stored in `~/.haxen/` directory +- **Universal Storage**: All data stored in `~/.agentfield/` directory - **Embedded UI**: Web interface included in binary - **Cross-Platform**: Works on Linux and macOS - **Portable**: Run from anywhere, data stays consistent ## πŸ“ Data Directory -All Haxen data is stored in `~/.haxen/`: +All AgentField data is stored in `~/.agentfield/`: ``` -~/.haxen/ +~/.agentfield/ β”œβ”€β”€ data/ -β”‚ β”œβ”€β”€ haxen.db # Main database -β”‚ β”œβ”€β”€ haxen.bolt # Cache +β”‚ β”œβ”€β”€ agentfield.db # Main database +β”‚ β”œβ”€β”€ agentfield.bolt # Cache β”‚ β”œβ”€β”€ keys/ # Cryptographic keys β”‚ β”œβ”€β”€ did_registries/ # DID registries β”‚ └── vcs/ # Verifiable credentials @@ -310,59 +310,59 @@ This is an alpha pre-release intended for testing and development. Not recommend ## πŸ› Issues & Support -Report issues at: https://github.com/your-org/haxen/issues +Report issues at: https://github.com/Agent-Field/agentfield/issues EOF - + print_success "Release notes generated: $release_notes_file" } # Create GitHub release create_github_release() { print_header "Creating GitHub Release" - + local release_notes_file="$DIST_DIR/release-notes.md" - + # Increment version print_status "Incrementing version..." "$VERSION_MANAGER" increment >/dev/null - + # Create git tag print_status "Creating git tag: $NEXT_TAG" git tag -a "$NEXT_TAG" -m "Release $NEXT_TAG" - + # Push tag to remote print_status "Pushing tag to remote..." git push origin "$NEXT_TAG" - + # Create GitHub release print_status "Creating GitHub release..." gh release create "$NEXT_TAG" \ - --title "Haxen $NEXT_TAG" \ + --title "AgentField $NEXT_TAG" \ --notes-file "$release_notes_file" \ --prerelease \ --repo "$GITHUB_REPO" - + print_success "GitHub release created: $NEXT_TAG" } # Upload release assets upload_assets() { print_header "Uploading Release Assets" - + cd "$DIST_DIR" - + # List of assets to upload local assets=( - "haxen-linux-amd64" - "haxen-linux-arm64" - "haxen-darwin-amd64" - "haxen-darwin-arm64" + "agentfield-linux-amd64" + "agentfield-linux-arm64" + "agentfield-darwin-amd64" + "agentfield-darwin-arm64" "checksums.txt" "build-info.txt" "README.md" ) - + # Upload each asset for asset in "${assets[@]}"; do if [ -f "$asset" ]; then @@ -373,25 +373,25 @@ upload_assets() { print_warning "Asset not found: $asset" fi done - + print_success "All assets uploaded" } # Show release summary show_summary() { print_header "Release Summary" - + print_success "πŸŽ‰ Release $NEXT_TAG created successfully!" echo "" print_status "Release URL: https://github.com/$GITHUB_REPO/releases/tag/$NEXT_TAG" print_status "Version: $NEXT_TAG" print_status "Type: Pre-release" - + if [ -d "$DIST_DIR" ]; then local total_size=$(du -sh "$DIST_DIR" | cut -f1) print_status "Total package size: $total_size" fi - + echo "" print_status "Users can install with:" echo " curl -sSL https://raw.githubusercontent.com/$GITHUB_REPO/main/ops/scripts/install.sh | bash" @@ -413,8 +413,8 @@ cleanup() { # Main release function main() { - print_header "Haxen Server Release Automation" - + print_header "AgentField Server Release Automation" + echo "This script will:" echo " β€’ Check prerequisites and authentication" echo " β€’ Auto-increment version number" @@ -422,10 +422,10 @@ main() { echo " β€’ Create GitHub release with assets" echo " β€’ Tag as pre-release" echo "" - + # Set up cleanup trap trap cleanup EXIT - + # Run release steps check_prerequisites check_github_auth @@ -435,7 +435,7 @@ main() { create_github_release upload_assets show_summary - + # Remove cleanup trap on success trap - EXIT } @@ -459,7 +459,7 @@ case "${1:-}" in print_success "Build completed - no release created" ;; "help"|"-h"|"--help") - echo "Haxen Release Automation Script" + echo "AgentField Release Automation Script" echo "" echo "Usage:" echo " $0 Create full release (build + GitHub release)" diff --git a/control-plane/scripts/test-mcp-endpoints.sh b/control-plane/scripts/test-mcp-endpoints.sh index 3d088af4..94dbf789 100755 --- a/control-plane/scripts/test-mcp-endpoints.sh +++ b/control-plane/scripts/test-mcp-endpoints.sh @@ -4,25 +4,25 @@ # MCP Endpoints Testing Script # ============================================================================= # This script tests all the new MCP health integration endpoints implemented -# in the Haxen server. It includes comprehensive testing for all 4 endpoints +# in the AgentField server. It includes comprehensive testing for all 4 endpoints # with both success and error scenarios. # # Prerequisites: -# - Haxen server running on http://localhost:8080 (default) +# - AgentField server running on http://localhost:8080 (default) # - jq installed for JSON formatting (brew install jq on macOS) # - curl available (should be pre-installed) # - At least one agent node running and registered # # Usage: -# chmod +x haxen/scripts/test-mcp-endpoints.sh -# ./haxen/scripts/test-mcp-endpoints.sh +# chmod +x agentfield/scripts/test-mcp-endpoints.sh +# ./agentfield/scripts/test-mcp-endpoints.sh # # ============================================================================= set -e # Exit on any error # Configuration -HAXEN_SERVER="${HAXEN_SERVER:-http://localhost:8080}" +AGENTFIELD_SERVER="${AGENTFIELD_SERVER:-http://localhost:8080}" VERBOSE="${VERBOSE:-false}" SLEEP_BETWEEN_TESTS="${SLEEP_BETWEEN_TESTS:-2}" @@ -74,16 +74,16 @@ check_dependencies() { log_success "All dependencies are available" } -# Check if Haxen server is running -check_haxen_server() { - log_info "Checking if Haxen server is running at $HAXEN_SERVER..." +# Check if AgentField server is running +check_agentfield_server() { + log_info "Checking if AgentField server is running at $AGENTFIELD_SERVER..." - if curl -s --connect-timeout 5 "$HAXEN_SERVER/health" > /dev/null 2>&1; then - log_success "Haxen server is running" + if curl -s --connect-timeout 5 "$AGENTFIELD_SERVER/health" > /dev/null 2>&1; then + log_success "AgentField server is running" else - log_error "Haxen server is not responding at $HAXEN_SERVER" - log_error "Please ensure the Haxen server is running:" - log_error " cd haxen/apps/platform/haxen && go run cmd/haxen-server/main.go" + log_error "AgentField server is not responding at $AGENTFIELD_SERVER" + log_error "Please ensure the AgentField server is running:" + log_error " cd agentfield/apps/platform/agentfield && go run cmd/agentfield-server/main.go" exit 1 fi } @@ -93,10 +93,10 @@ get_test_nodes() { log_info "Fetching available nodes for testing..." local response - response=$(curl -s "$HAXEN_SERVER/api/ui/v1/nodes" 2>/dev/null) + response=$(curl -s "$AGENTFIELD_SERVER/api/ui/v1/nodes" 2>/dev/null) if [ $? -ne 0 ] || [ -z "$response" ]; then - log_error "Failed to fetch nodes from Haxen server" + log_error "Failed to fetch nodes from AgentField server" return 1 fi @@ -185,7 +185,7 @@ test_overall_mcp_status() { log_separator execute_curl "GET" \ - "$HAXEN_SERVER/api/ui/v1/mcp/status" \ + "$AGENTFIELD_SERVER/api/ui/v1/mcp/status" \ "Get overall MCP status across all nodes" \ "" \ "200" @@ -203,7 +203,7 @@ test_node_mcp_health_user() { fi execute_curl "GET" \ - "$HAXEN_SERVER/api/ui/v1/nodes/$FIRST_NODE_ID/mcp/health" \ + "$AGENTFIELD_SERVER/api/ui/v1/nodes/$FIRST_NODE_ID/mcp/health" \ "Get MCP health for node $FIRST_NODE_ID (user mode)" \ "" \ "200" @@ -221,7 +221,7 @@ test_node_mcp_health_developer() { fi execute_curl "GET" \ - "$HAXEN_SERVER/api/ui/v1/nodes/$FIRST_NODE_ID/mcp/health?mode=developer" \ + "$AGENTFIELD_SERVER/api/ui/v1/nodes/$FIRST_NODE_ID/mcp/health?mode=developer" \ "Get MCP health for node $FIRST_NODE_ID (developer mode)" \ "" \ "200" @@ -241,7 +241,7 @@ test_mcp_server_restart() { # First, try to get available MCP servers for this node log_info "Getting available MCP servers for node $FIRST_NODE_ID..." local health_response - health_response=$(curl -s "$HAXEN_SERVER/api/ui/v1/nodes/$FIRST_NODE_ID/mcp/health?mode=developer" 2>/dev/null) + health_response=$(curl -s "$AGENTFIELD_SERVER/api/ui/v1/nodes/$FIRST_NODE_ID/mcp/health?mode=developer" 2>/dev/null) if [ $? -eq 0 ] && [ -n "$health_response" ]; then local server_alias @@ -250,14 +250,14 @@ test_mcp_server_restart() { if [ -n "$server_alias" ] && [ "$server_alias" != "null" ]; then log_info "Found MCP server alias: $server_alias" execute_curl "POST" \ - "$HAXEN_SERVER/api/ui/v1/nodes/$FIRST_NODE_ID/mcp/servers/$server_alias/restart" \ + "$AGENTFIELD_SERVER/api/ui/v1/nodes/$FIRST_NODE_ID/mcp/servers/$server_alias/restart" \ "Restart MCP server '$server_alias' on node $FIRST_NODE_ID" \ "" \ "200" else log_warning "No MCP servers found for node $FIRST_NODE_ID, testing with dummy alias" execute_curl "POST" \ - "$HAXEN_SERVER/api/ui/v1/nodes/$FIRST_NODE_ID/mcp/servers/test-server/restart" \ + "$AGENTFIELD_SERVER/api/ui/v1/nodes/$FIRST_NODE_ID/mcp/servers/test-server/restart" \ "Restart MCP server 'test-server' on node $FIRST_NODE_ID (should fail)" \ "" \ "404" @@ -265,7 +265,7 @@ test_mcp_server_restart() { else log_warning "Could not fetch MCP servers, testing with dummy alias" execute_curl "POST" \ - "$HAXEN_SERVER/api/ui/v1/nodes/$FIRST_NODE_ID/mcp/servers/test-server/restart" \ + "$AGENTFIELD_SERVER/api/ui/v1/nodes/$FIRST_NODE_ID/mcp/servers/test-server/restart" \ "Restart MCP server 'test-server' on node $FIRST_NODE_ID (should fail)" \ "" \ "404" @@ -286,7 +286,7 @@ test_mcp_tools_listing() { # First, try to get available MCP servers for this node log_info "Getting available MCP servers for node $FIRST_NODE_ID..." local health_response - health_response=$(curl -s "$HAXEN_SERVER/api/ui/v1/nodes/$FIRST_NODE_ID/mcp/health?mode=developer" 2>/dev/null) + health_response=$(curl -s "$AGENTFIELD_SERVER/api/ui/v1/nodes/$FIRST_NODE_ID/mcp/health?mode=developer" 2>/dev/null) if [ $? -eq 0 ] && [ -n "$health_response" ]; then local server_alias @@ -295,14 +295,14 @@ test_mcp_tools_listing() { if [ -n "$server_alias" ] && [ "$server_alias" != "null" ]; then log_info "Found MCP server alias: $server_alias" execute_curl "GET" \ - "$HAXEN_SERVER/api/ui/v1/nodes/$FIRST_NODE_ID/mcp/servers/$server_alias/tools" \ + "$AGENTFIELD_SERVER/api/ui/v1/nodes/$FIRST_NODE_ID/mcp/servers/$server_alias/tools" \ "Get tools for MCP server '$server_alias' on node $FIRST_NODE_ID" \ "" \ "200" else log_warning "No MCP servers found for node $FIRST_NODE_ID, testing with dummy alias" execute_curl "GET" \ - "$HAXEN_SERVER/api/ui/v1/nodes/$FIRST_NODE_ID/mcp/servers/test-server/tools" \ + "$AGENTFIELD_SERVER/api/ui/v1/nodes/$FIRST_NODE_ID/mcp/servers/test-server/tools" \ "Get tools for MCP server 'test-server' on node $FIRST_NODE_ID (should fail)" \ "" \ "404" @@ -310,7 +310,7 @@ test_mcp_tools_listing() { else log_warning "Could not fetch MCP servers, testing with dummy alias" execute_curl "GET" \ - "$HAXEN_SERVER/api/ui/v1/nodes/$FIRST_NODE_ID/mcp/servers/test-server/tools" \ + "$AGENTFIELD_SERVER/api/ui/v1/nodes/$FIRST_NODE_ID/mcp/servers/test-server/tools" \ "Get tools for MCP server 'test-server' on node $FIRST_NODE_ID (should fail)" \ "" \ "404" @@ -325,7 +325,7 @@ test_error_cases() { # Test with invalid node ID execute_curl "GET" \ - "$HAXEN_SERVER/api/ui/v1/nodes/invalid-node-id/mcp/health" \ + "$AGENTFIELD_SERVER/api/ui/v1/nodes/invalid-node-id/mcp/health" \ "Get MCP health for invalid node ID (should fail)" \ "" \ "404" @@ -333,13 +333,13 @@ test_error_cases() { # Test with non-existent server alias if [ -n "$FIRST_NODE_ID" ]; then execute_curl "GET" \ - "$HAXEN_SERVER/api/ui/v1/nodes/$FIRST_NODE_ID/mcp/servers/non-existent-server/tools" \ + "$AGENTFIELD_SERVER/api/ui/v1/nodes/$FIRST_NODE_ID/mcp/servers/non-existent-server/tools" \ "Get tools for non-existent MCP server (should fail)" \ "" \ "404" execute_curl "POST" \ - "$HAXEN_SERVER/api/ui/v1/nodes/$FIRST_NODE_ID/mcp/servers/non-existent-server/restart" \ + "$AGENTFIELD_SERVER/api/ui/v1/nodes/$FIRST_NODE_ID/mcp/servers/non-existent-server/restart" \ "Restart non-existent MCP server (should fail)" \ "" \ "404" @@ -357,7 +357,7 @@ test_sse_events() { # Test SSE connection briefly timeout 5 curl -s -H "Accept: text/event-stream" \ - "$HAXEN_SERVER/api/ui/v1/events" 2>/dev/null || true + "$AGENTFIELD_SERVER/api/ui/v1/events" 2>/dev/null || true log_info "SSE connection test completed (use browser or SSE client for full testing)" } @@ -371,13 +371,13 @@ main() { echo -e "${NC}" log_info "Starting MCP endpoints testing..." - log_info "Haxen Server: $HAXEN_SERVER" + log_info "AgentField Server: $AGENTFIELD_SERVER" log_info "Verbose Mode: $VERBOSE" echo "" # Pre-flight checks check_dependencies - check_haxen_server + check_agentfield_server get_test_nodes # Run all tests @@ -404,10 +404,10 @@ main() { log_info " - Server restart: Success confirmation message" log_info "" log_info "Common troubleshooting:" - log_info " - Ensure Haxen server is running: go run cmd/haxen-server/main.go" + log_info " - Ensure AgentField server is running: go run cmd/agentfield-server/main.go" log_info " - Ensure at least one agent is running and registered" log_info " - Check agent MCP server configurations" - log_info " - Verify network connectivity to $HAXEN_SERVER" + log_info " - Verify network connectivity to $AGENTFIELD_SERVER" echo "" } @@ -420,7 +420,7 @@ while [[ $# -gt 0 ]]; do shift ;; -s|--server) - HAXEN_SERVER="$2" + AGENTFIELD_SERVER="$2" shift 2 ;; -h|--help) @@ -428,11 +428,11 @@ while [[ $# -gt 0 ]]; do echo "" echo "Options:" echo " -v, --verbose Enable verbose curl output" - echo " -s, --server URL Set Haxen server URL (default: http://localhost:8080)" + echo " -s, --server URL Set AgentField server URL (default: http://localhost:8080)" echo " -h, --help Show this help message" echo "" echo "Environment variables:" - echo " HAXEN_SERVER Haxen server URL" + echo " AGENTFIELD_SERVER AgentField server URL" echo " VERBOSE Enable verbose mode (true/false)" echo " SLEEP_BETWEEN_TESTS Seconds to sleep between tests (default: 2)" exit 0 diff --git a/control-plane/scripts/version-manager.sh b/control-plane/scripts/version-manager.sh index d975198b..2fe32641 100755 --- a/control-plane/scripts/version-manager.sh +++ b/control-plane/scripts/version-manager.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Haxen Version Manager +# AgentField Version Manager # Handles version tracking and incrementing for releases set -e @@ -237,7 +237,7 @@ main() { show_version_info ;; "help"|"-h"|"--help") - echo "Haxen Version Manager" + echo "AgentField Version Manager" echo "" echo "Usage:" echo " $0 current Show current version" diff --git a/control-plane/tools/perf/README.md b/control-plane/tools/perf/README.md index aaea4680..de81e9af 100644 --- a/control-plane/tools/perf/README.md +++ b/control-plane/tools/perf/README.md @@ -5,20 +5,20 @@ path introduced in the orchestration refactor. ## Quick start -1. Build the gateway binary (or run `go run ./cmd/haxen-server`). The production +1. Build the gateway binary (or run `go run ./cmd/agentfield-server`). The production build script is still the source of truth: ```bash - cd haxen/apps/platform/haxen + cd agentfield/apps/platform/agentfield ./build-single-binary.sh - ./dist/releases/haxen-darwin-arm64 --config ./configs/local.yaml + ./dist/releases/agentfield-darwin-arm64 --config ./configs/local.yaml ``` 2. Install the load-test dependencies in a virtual environment (or build the optional Docker image described below): ```bash - cd haxen/apps/platform/haxen/tools/perf + cd agentfield/apps/platform/agentfield/tools/perf python -m venv .venv source .venv/bin/activate pip install -r requirements.txt @@ -48,7 +48,7 @@ path introduced in the orchestration refactor. Build the utility image from the repo root: ```bash -docker build -t haxen-perf ./apps/platform/haxen/tools/perf +docker build -t agentfield-perf ./apps/platform/agentfield/tools/perf ``` Run it by either supplying CLI arguments or setting environment variables. @@ -56,7 +56,7 @@ Examples (Linux users may need `--add-host host.docker.internal:host-gateway`): ```bash # Pass arguments directly -docker run --rm --network host haxen-perf \ +docker run --rm --network host agentfield-perf \ --base-url http://host.docker.internal:8080 \ --target demo-agent.synthetic_nested \ --mode async \ @@ -74,7 +74,7 @@ docker run --rm --network host \ -e PRINT_FAILURES=true \ -e METRICS_URL=http://host.docker.internal:8080/metrics \ -e METRICS=process_resident_memory_bytes,go_goroutines \ - haxen-perf + agentfield-perf ``` Supported environment variables when no CLI arguments are provided: @@ -101,24 +101,24 @@ so you can mix and match as needed. ### Local gateway stack (SQLite/GORM) -To exercise the harness against a containerised Haxen server backed by the new +To exercise the harness against a containerised AgentField server backed by the new SQLite + GORM storage layer, reuse the Docker image from -`apps/platform/haxen/docker`: +`apps/platform/agentfield/docker`: ```bash -# 1) Build and run the Haxen server (see docker/README.md for details) -docker build -t haxen-local apps/platform/haxen/docker -docker run --rm -d --name haxen-local \ +# 1) Build and run the AgentField server (see docker/README.md for details) +docker build -t agentfield-local apps/platform/agentfield/docker +docker run --rm -d --name agentfield-local \ -p 8080:8080 \ - -v "$(pwd)/apps/platform/haxen/data:/app/data" \ - haxen-local + -v "$(pwd)/apps/platform/agentfield/data:/app/data" \ + agentfield-local # 2) Drive load with the harness (inside this directory) python nested_workflow_stress.py --base-url http://localhost:8080 \ --target demo-agent.synthetic_nested --requests 200 --concurrency 16 -# 3) Stop the Haxen container when finished -docker stop haxen-local +# 3) Stop the AgentField container when finished +docker stop agentfield-local ``` Because the server persists all durable state in SQLite, no external services or @@ -163,7 +163,7 @@ Example `scenarios.json`: - **Backpressure verification**: Drive queue overload by setting `--requests` well above steady-state capacity. Watch for HTTP 429 responses (`status_counts` will include `queue_full`) and confirm Prometheus metrics - (`haxen_gateway_queue_depth`, `haxen_gateway_backpressure_total`) move as + (`agentfield_gateway_queue_depth`, `agentfield_gateway_backpressure_total`) move as expected. - **Memory/CPU sampling**: While the stress harness is running, capture runtime stats (`go tool pprof`, `top`, `ps`, or `gops stats`). Persist the JSON metrics diff --git a/control-plane/tools/perf/nested_workflow_stress.py b/control-plane/tools/perf/nested_workflow_stress.py index d22d5d4a..6bfee488 100644 --- a/control-plane/tools/perf/nested_workflow_stress.py +++ b/control-plane/tools/perf/nested_workflow_stress.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -"""Lightweight load generator for Haxen durable execution flows. +"""Lightweight load generator for AgentField durable execution flows. This script exercises the /execute and /execute/async gateways with configurable concurrency, nested payload parameters, and adaptive polling. It records latency @@ -57,8 +57,8 @@ "process_resident_memory_bytes", "go_memstats_heap_alloc_bytes", "go_goroutines", - "haxen_gateway_queue_depth", - "haxen_worker_inflight", + "agentfield_gateway_queue_depth", + "agentfield_worker_inflight", ] import httpx @@ -89,8 +89,8 @@ def load_template(path: Optional[Path]) -> Optional[str]: def build_parser() -> argparse.ArgumentParser: - parser = argparse.ArgumentParser(description="Haxen durable execution load tester") - parser.add_argument("--base-url", default=os.getenv("HAXEN_BASE_URL", "http://localhost:8080")) + parser = argparse.ArgumentParser(description="AgentField durable execution load tester") + parser.add_argument("--base-url", default=os.getenv("AGENTFIELD_BASE_URL", "http://localhost:8080")) parser.add_argument("--target", required=True, help="Target in node.reasoner form") parser.add_argument("--mode", choices=["sync", "async"], default="sync") parser.add_argument("--requests", type=int, default=100) diff --git a/control-plane/web/client/.env b/control-plane/web/client/.env index 02d5f110..90e49d23 100644 --- a/control-plane/web/client/.env +++ b/control-plane/web/client/.env @@ -1,4 +1,4 @@ -# Development environment variables for Haxen UI +# Development environment variables for AgentField UI # These are defaults that can be overridden by .env.local or system environment variables # API Configuration @@ -15,5 +15,5 @@ VITE_BUILD_OUT_DIR=dist VITE_BUILD_SOURCEMAP=false # Application Configuration -VITE_APP_TITLE=Haxen Server Dashboard -VITE_APP_DESCRIPTION=Haxen AI Agent Orchestration Platform +VITE_APP_TITLE=AgentField Server Dashboard +VITE_APP_DESCRIPTION=AgentField AI Agent Orchestration Platform diff --git a/control-plane/web/client/.env.example b/control-plane/web/client/.env.example index 7b85a095..be677a80 100644 --- a/control-plane/web/client/.env.example +++ b/control-plane/web/client/.env.example @@ -1,4 +1,4 @@ -# Haxen UI Environment Variables +# AgentField UI Environment Variables # Copy this file to .env.local and modify values as needed # API Configuration @@ -15,8 +15,8 @@ VITE_BUILD_OUT_DIR=dist VITE_BUILD_SOURCEMAP=false # Application Configuration -VITE_APP_TITLE=Haxen Server Dashboard -VITE_APP_DESCRIPTION=Haxen AI Agent Orchestration Platform +VITE_APP_TITLE=AgentField Server Dashboard +VITE_APP_DESCRIPTION=AgentField AI Agent Orchestration Platform VITE_APP_VERSION=1.0.0 # Feature Flags (optional) diff --git a/control-plane/web/client/.env.production b/control-plane/web/client/.env.production index 9349073e..78f90f41 100644 --- a/control-plane/web/client/.env.production +++ b/control-plane/web/client/.env.production @@ -1,4 +1,4 @@ -# Production environment variables for Haxen UI +# Production environment variables for AgentField UI # API Configuration VITE_API_BASE_URL=/api/ui/v1 @@ -9,5 +9,5 @@ VITE_BUILD_OUT_DIR=dist VITE_BUILD_SOURCEMAP=false # Application Configuration -VITE_APP_TITLE=Haxen Server Dashboard -VITE_APP_DESCRIPTION=Haxen AI Agent Orchestration Platform +VITE_APP_TITLE=AgentField Server Dashboard +VITE_APP_DESCRIPTION=AgentField AI Agent Orchestration Platform diff --git a/control-plane/web/client/index.html b/control-plane/web/client/index.html index 9acc2ce7..af21b8d5 100644 --- a/control-plane/web/client/index.html +++ b/control-plane/web/client/index.html @@ -4,7 +4,7 @@ - Haxen - Open infrastructure for autonomous software + AgentField - Open infrastructure for autonomous software
diff --git a/control-plane/web/client/src/components/AgentNodesTable.tsx b/control-plane/web/client/src/components/AgentNodesTable.tsx index 5c204262..772ea7e3 100644 --- a/control-plane/web/client/src/components/AgentNodesTable.tsx +++ b/control-plane/web/client/src/components/AgentNodesTable.tsx @@ -1,5 +1,5 @@ import React, { useState } from 'react'; -import type { AgentNodeSummary, AgentNode } from '../types/haxen'; +import type { AgentNodeSummary, AgentNode } from '../types/agentfield'; import { Table, TableBody, @@ -69,7 +69,7 @@ const AgentNodesTable: React.FC = ({ nodes, isLoading, err

No Agent Nodes

-

No agent nodes are currently registered with the Haxen server.

+

No agent nodes are currently registered with the AgentField server.

); } diff --git a/control-plane/web/client/src/components/HealthBadge.tsx b/control-plane/web/client/src/components/HealthBadge.tsx index d9978954..9501e5ca 100644 --- a/control-plane/web/client/src/components/HealthBadge.tsx +++ b/control-plane/web/client/src/components/HealthBadge.tsx @@ -1,5 +1,5 @@ import React from 'react'; -import type { HealthStatus } from '../types/haxen'; +import type { HealthStatus } from '../types/agentfield'; import { cn } from '@/lib/utils'; interface HealthBadgeProps { diff --git a/control-plane/web/client/src/components/Navigation/Sidebar.tsx b/control-plane/web/client/src/components/Navigation/Sidebar.tsx index 6c703933..0ad9f604 100644 --- a/control-plane/web/client/src/components/Navigation/Sidebar.tsx +++ b/control-plane/web/client/src/components/Navigation/Sidebar.tsx @@ -54,7 +54,7 @@ export function Sidebar({ sections, isCollapsed = false }: SidebarProps) { typography["heading-sm"], "text-foreground font-semibold" )}> - Haxen + AgentField

- Haxen + AgentField Open Control Plane
diff --git a/control-plane/web/client/src/components/NodeCard.tsx b/control-plane/web/client/src/components/NodeCard.tsx index 114c4097..238374ad 100644 --- a/control-plane/web/client/src/components/NodeCard.tsx +++ b/control-plane/web/client/src/components/NodeCard.tsx @@ -14,7 +14,7 @@ import { startAgent, stopAgent, reconcileAgent } from "../services/configuration import { AgentControlButton, type AgentState } from "@/components/ui/AgentControlButton"; import { useDIDStatus } from "../hooks/useDIDInfo"; import { cn } from "../lib/utils"; -import type { AgentNodeSummary } from "../types/haxen"; +import type { AgentNodeSummary } from "../types/agentfield"; import type { DensityMode } from "./DensityToggle"; import { CompositeDIDStatus } from "./did/DIDStatusBadge"; import { DIDIdentityBadge } from "./did/DIDDisplay"; diff --git a/control-plane/web/client/src/components/NodesList.tsx b/control-plane/web/client/src/components/NodesList.tsx index 1bd16cff..a45839f7 100644 --- a/control-plane/web/client/src/components/NodesList.tsx +++ b/control-plane/web/client/src/components/NodesList.tsx @@ -1,5 +1,5 @@ import React, { useState, useEffect, useCallback } from 'react'; -import type { AgentNodeSummary } from '../types/haxen'; +import type { AgentNodeSummary } from '../types/agentfield'; import { getNodesSummary, streamNodeEvents } from '../services/api'; import AgentNodesTable from './AgentNodesTable'; import { Badge } from '@/components/ui/badge'; @@ -28,7 +28,7 @@ const NodesList: React.FC = () => { setTotalCount(data.count); } catch (err) { console.error('Failed to load nodes summary:', err); - setError('Failed to load agent nodes. Please ensure the Haxen server is running and accessible.'); + setError('Failed to load agent nodes. Please ensure the AgentField server is running and accessible.'); } finally { setIsLoading(false); } diff --git a/control-plane/web/client/src/components/NodesStatusSummary.tsx b/control-plane/web/client/src/components/NodesStatusSummary.tsx index d640f0ac..3a7a9eca 100644 --- a/control-plane/web/client/src/components/NodesStatusSummary.tsx +++ b/control-plane/web/client/src/components/NodesStatusSummary.tsx @@ -3,7 +3,7 @@ import { summarizeNodeStatuses, getNodeStatusPresentation } from "@/utils/node-s import { Checklist } from "@/components/ui/icon-bridge"; import { cn } from "@/lib/utils"; import { Card } from "@/components/ui/card"; -import type { AgentNodeSummary } from "../types/haxen"; +import type { AgentNodeSummary } from "../types/agentfield"; interface NodesStatusSummaryProps { nodes: AgentNodeSummary[]; diff --git a/control-plane/web/client/src/components/NodesVirtualList.tsx b/control-plane/web/client/src/components/NodesVirtualList.tsx index 10412899..2fe47984 100644 --- a/control-plane/web/client/src/components/NodesVirtualList.tsx +++ b/control-plane/web/client/src/components/NodesVirtualList.tsx @@ -10,7 +10,7 @@ import { EmptyMedia, EmptyTitle, } from "@/components/ui/empty"; -import type { AgentNodeSummary } from '../types/haxen'; +import type { AgentNodeSummary } from '../types/agentfield'; import { NodeCard } from './NodeCard'; import type { DensityMode } from './DensityToggle'; import { getStatusPriority } from './ui/status-indicator'; diff --git a/control-plane/web/client/src/components/ReasonersList.tsx b/control-plane/web/client/src/components/ReasonersList.tsx index 63833357..496fc75f 100644 --- a/control-plane/web/client/src/components/ReasonersList.tsx +++ b/control-plane/web/client/src/components/ReasonersList.tsx @@ -1,5 +1,5 @@ import React from 'react'; -import type { ReasonerDefinition } from '../types/haxen'; +import type { ReasonerDefinition } from '../types/agentfield'; import { Badge } from '@/components/ui/badge'; import { WatsonxAi } from '@/components/ui/icon-bridge'; diff --git a/control-plane/web/client/src/components/ReasonersSkillsTable.tsx b/control-plane/web/client/src/components/ReasonersSkillsTable.tsx index d53f199b..9bfcd4af 100644 --- a/control-plane/web/client/src/components/ReasonersSkillsTable.tsx +++ b/control-plane/web/client/src/components/ReasonersSkillsTable.tsx @@ -19,7 +19,7 @@ import { Identification, } from "@/components/ui/icon-bridge"; import { cn } from "@/lib/utils"; -import type { ReasonerDefinition, SkillDefinition } from "@/types/haxen"; +import type { ReasonerDefinition, SkillDefinition } from "@/types/agentfield"; import type { ReasonerDIDInfo, SkillDIDInfo } from "@/types/did"; interface ReasonersSkillsTableProps { diff --git a/control-plane/web/client/src/components/SkillsList.tsx b/control-plane/web/client/src/components/SkillsList.tsx index fd77a972..5d98c3b4 100644 --- a/control-plane/web/client/src/components/SkillsList.tsx +++ b/control-plane/web/client/src/components/SkillsList.tsx @@ -3,7 +3,7 @@ import { Button } from "@/components/ui/button"; import { Card, CardContent } from "@/components/ui/card"; import { Function, Information } from "@/components/ui/icon-bridge"; import React, { useState } from "react"; -import type { SkillDefinition } from "../types/haxen"; +import type { SkillDefinition } from "../types/agentfield"; import type { AgentDIDInfo, SkillDIDInfo } from "../types/did"; import { DIDIdentityBadge, DIDStatusBadge } from "./did/DIDStatusBadge"; import { useDIDNotifications } from "./ui/notification"; diff --git a/control-plane/web/client/src/components/did/DIDIdentityCard.tsx b/control-plane/web/client/src/components/did/DIDIdentityCard.tsx index 6a87c5b0..f541c4c5 100644 --- a/control-plane/web/client/src/components/did/DIDIdentityCard.tsx +++ b/control-plane/web/client/src/components/did/DIDIdentityCard.tsx @@ -223,7 +223,7 @@ export function DIDIdentityCard({ - Haxen Server: {didInfo.haxen_server_id} + AgentField Server: {didInfo.agentfield_server_id} diff --git a/control-plane/web/client/src/components/did/DIDInfoModal.tsx b/control-plane/web/client/src/components/did/DIDInfoModal.tsx index 4f159be8..e381a019 100644 --- a/control-plane/web/client/src/components/did/DIDInfoModal.tsx +++ b/control-plane/web/client/src/components/did/DIDInfoModal.tsx @@ -228,10 +228,10 @@ export function DIDInfoModal({ nodeId, isOpen, onClose }: DIDInfoModalProps) {
- Haxen Server: + AgentField Server: - {didInfo.haxen_server_id} + {didInfo.agentfield_server_id}
@@ -398,10 +398,10 @@ export function DIDInfoModal({ nodeId, isOpen, onClose }: DIDInfoModalProps) {
- Haxen Server: + AgentField Server:
- {didInfo.haxen_server_id} + {didInfo.agentfield_server_id}
diff --git a/control-plane/web/client/src/components/execution/EnhancedExecutionHeader.tsx b/control-plane/web/client/src/components/execution/EnhancedExecutionHeader.tsx index 57209435..0c469e97 100644 --- a/control-plane/web/client/src/components/execution/EnhancedExecutionHeader.tsx +++ b/control-plane/web/client/src/components/execution/EnhancedExecutionHeader.tsx @@ -375,11 +375,11 @@ export function EnhancedExecutionHeader({
Request: - {execution.haxen_request_id ? truncateId(execution.haxen_request_id) : 'n/a'} + {execution.agentfield_request_id ? truncateId(execution.agentfield_request_id) : 'n/a'} - {execution.haxen_request_id && ( + {execution.agentfield_request_id && ( Request: - {execution.haxen_request_id - ? truncateId(execution.haxen_request_id) + {execution.agentfield_request_id + ? truncateId(execution.agentfield_request_id) : "n/a"} - {execution.haxen_request_id && ( + {execution.agentfield_request_id && ( , title: "Getting started", - body: "Launch an agent node to register reasoners with Haxen. They will appear here as soon as they are online.", + body: "Launch an agent node to register reasoners with AgentField. They will appear here as soon as they are online.", }, }; diff --git a/control-plane/web/client/src/components/status/StatusBadge.tsx b/control-plane/web/client/src/components/status/StatusBadge.tsx index c65106a8..2aa04430 100644 --- a/control-plane/web/client/src/components/status/StatusBadge.tsx +++ b/control-plane/web/client/src/components/status/StatusBadge.tsx @@ -4,7 +4,7 @@ import type { AgentStatus, HealthStatus, LifecycleStatus, -} from "@/types/haxen"; +} from "@/types/agentfield"; import { cn } from "@/lib/utils"; import { statusTone, type StatusTone } from "@/lib/theme"; import { diff --git a/control-plane/web/client/src/components/status/StatusRefreshButton.tsx b/control-plane/web/client/src/components/status/StatusRefreshButton.tsx index 8a1caf8a..65d0ed99 100644 --- a/control-plane/web/client/src/components/status/StatusRefreshButton.tsx +++ b/control-plane/web/client/src/components/status/StatusRefreshButton.tsx @@ -1,6 +1,6 @@ import { Button } from "@/components/ui/button"; import { bulkNodeStatus, refreshNodeStatus } from "@/services/api"; -import type { AgentStatus } from "@/types/haxen"; +import type { AgentStatus } from "@/types/agentfield"; import { ArrowClockwise } from "@/components/ui/icon-bridge"; import { useState } from "react"; import { cn } from "@/lib/utils"; diff --git a/control-plane/web/client/src/components/status/UnifiedStatusIndicator.tsx b/control-plane/web/client/src/components/status/UnifiedStatusIndicator.tsx index 435ba18a..9b441070 100644 --- a/control-plane/web/client/src/components/status/UnifiedStatusIndicator.tsx +++ b/control-plane/web/client/src/components/status/UnifiedStatusIndicator.tsx @@ -1,5 +1,5 @@ import { Badge } from "@/components/ui/badge"; -import type { AgentState, AgentStatus } from "@/types/haxen"; +import type { AgentState, AgentStatus } from "@/types/agentfield"; import { cn } from "@/lib/utils"; import { statusTone, type StatusTone } from "@/lib/theme"; import type { ComponentProps } from "react"; diff --git a/control-plane/web/client/src/components/status/index.ts b/control-plane/web/client/src/components/status/index.ts index 453dd289..2dfcfcdc 100644 --- a/control-plane/web/client/src/components/status/index.ts +++ b/control-plane/web/client/src/components/status/index.ts @@ -11,4 +11,4 @@ export { } from './StatusBadge'; // Re-export types for convenience -export type { AgentStatus, AgentState, AgentStatusUpdate, StatusSource } from '../../types/haxen'; \ No newline at end of file +export type { AgentStatus, AgentState, AgentStatusUpdate, StatusSource } from '../../types/agentfield'; \ No newline at end of file diff --git a/control-plane/web/client/src/components/ui/data-formatters.tsx b/control-plane/web/client/src/components/ui/data-formatters.tsx index e6420ed4..a63a7c90 100644 --- a/control-plane/web/client/src/components/ui/data-formatters.tsx +++ b/control-plane/web/client/src/components/ui/data-formatters.tsx @@ -4,7 +4,7 @@ import { cn } from "../../lib/utils"; /** * Reusable data formatting components for consistent data display - * across the Haxen application. These components ensure uniform + * across the AgentField application. These components ensure uniform * formatting of timestamps, durations, file sizes, and other common data types. */ diff --git a/control-plane/web/client/src/components/ui/icon-bridge.tsx b/control-plane/web/client/src/components/ui/icon-bridge.tsx index 62a83ec1..981b5d44 100644 --- a/control-plane/web/client/src/components/ui/icon-bridge.tsx +++ b/control-plane/web/client/src/components/ui/icon-bridge.tsx @@ -159,7 +159,7 @@ export const BracketsCurly = BracketsCurlyIcon; export const CornersIn = CornersInIcon; export const Copy = CopySimpleIcon; export const CopySimple = CopySimpleIcon; -// Phosphor does not expose a dedicated Haxen glyph; reuse the Brain icon asset. +// Phosphor does not expose a dedicated AgentField glyph; reuse the Brain icon asset. export const Cognitive = CognitivePhosphorIcon; export const DataBase = DatabaseIcon; export const Database = DatabaseIcon; diff --git a/control-plane/web/client/src/components/ui/status-indicator.tsx b/control-plane/web/client/src/components/ui/status-indicator.tsx index 6b49fbe5..3e12a718 100644 --- a/control-plane/web/client/src/components/ui/status-indicator.tsx +++ b/control-plane/web/client/src/components/ui/status-indicator.tsx @@ -1,6 +1,6 @@ import { cn } from "@/lib/utils"; import React from "react"; -import type { HealthStatus, LifecycleStatus } from "../../types/haxen"; +import type { HealthStatus, LifecycleStatus } from "../../types/agentfield"; import type { CanonicalStatus } from "../../utils/status"; import { getStatusLabel, getStatusTheme, normalizeExecutionStatus } from "../../utils/status"; diff --git a/control-plane/web/client/src/components/vc/VerifiableCredentialBadge.tsx b/control-plane/web/client/src/components/vc/VerifiableCredentialBadge.tsx index 2c96cc37..3418a8a7 100644 --- a/control-plane/web/client/src/components/vc/VerifiableCredentialBadge.tsx +++ b/control-plane/web/client/src/components/vc/VerifiableCredentialBadge.tsx @@ -276,8 +276,8 @@ function VerificationModal({ )}
- Haxen Standard - {verificationResult.compliance_checks.haxen_standard_compliance ? ( + AgentField Standard + {verificationResult.compliance_checks.agentfield_standard_compliance ? ( ) : ( diff --git a/control-plane/web/client/src/contexts/ModeContext.tsx b/control-plane/web/client/src/contexts/ModeContext.tsx index 57bd7959..38f4cf4b 100644 --- a/control-plane/web/client/src/contexts/ModeContext.tsx +++ b/control-plane/web/client/src/contexts/ModeContext.tsx @@ -18,7 +18,7 @@ interface ModeProviderProps { export function ModeProvider({ children }: ModeProviderProps) { const [mode, setModeState] = useState(() => { // Load mode from localStorage on initialization - const savedMode = localStorage.getItem("haxen-app-mode"); + const savedMode = localStorage.getItem("agentfield-app-mode"); return savedMode === "developer" || savedMode === "user" ? savedMode : "developer"; @@ -26,7 +26,7 @@ export function ModeProvider({ children }: ModeProviderProps) { const setMode = (newMode: AppMode) => { setModeState(newMode); - localStorage.setItem("haxen-app-mode", newMode); + localStorage.setItem("agentfield-app-mode", newMode); }; const toggleMode = () => { @@ -36,7 +36,7 @@ export function ModeProvider({ children }: ModeProviderProps) { // Persist mode changes to localStorage useEffect(() => { - localStorage.setItem("haxen-app-mode", mode); + localStorage.setItem("agentfield-app-mode", mode); }, [mode]); return ( diff --git a/control-plane/web/client/src/hooks/useMCPHealth.ts b/control-plane/web/client/src/hooks/useMCPHealth.ts index ba70f621..72ef25dd 100644 --- a/control-plane/web/client/src/hooks/useMCPHealth.ts +++ b/control-plane/web/client/src/hooks/useMCPHealth.ts @@ -5,7 +5,7 @@ import type { MCPSummaryForUI, MCPHealthEvent, AppMode -} from '../types/haxen'; +} from '../types/agentfield'; import { getMCPHealthModeAware } from '../services/api'; import { useMode } from '../contexts/ModeContext'; import { useMCPHealthSSE } from './useSSE'; diff --git a/control-plane/web/client/src/hooks/useMCPMetrics.ts b/control-plane/web/client/src/hooks/useMCPMetrics.ts index 4e29d993..b0e6c035 100644 --- a/control-plane/web/client/src/hooks/useMCPMetrics.ts +++ b/control-plane/web/client/src/hooks/useMCPMetrics.ts @@ -3,7 +3,7 @@ import type { MCPServerMetrics, MCPNodeMetrics, MCPServerMetricsResponse -} from '../types/haxen'; +} from '../types/agentfield'; import { getMCPServerMetrics } from '../services/api'; import { calculatePerformanceMetrics } from '../utils/mcpUtils'; diff --git a/control-plane/web/client/src/hooks/useMCPServers.ts b/control-plane/web/client/src/hooks/useMCPServers.ts index c509f463..04ce2b65 100644 --- a/control-plane/web/client/src/hooks/useMCPServers.ts +++ b/control-plane/web/client/src/hooks/useMCPServers.ts @@ -3,7 +3,7 @@ import type { MCPServerActionResponse, MCPServerAction, MCPServerHealthForUI -} from '../types/haxen'; +} from '../types/agentfield'; import { startMCPServer, stopMCPServer, diff --git a/control-plane/web/client/src/hooks/useMCPTools.ts b/control-plane/web/client/src/hooks/useMCPTools.ts index 24efa853..5307d03c 100644 --- a/control-plane/web/client/src/hooks/useMCPTools.ts +++ b/control-plane/web/client/src/hooks/useMCPTools.ts @@ -3,7 +3,7 @@ import type { MCPTool, MCPToolsResponse, MCPToolTestResponse -} from '../types/haxen'; +} from '../types/agentfield'; import { getMCPTools, testMCPTool } from '../services/api'; import { validateToolParameters } from '../utils/mcpUtils'; diff --git a/control-plane/web/client/src/mcp/README.md b/control-plane/web/client/src/mcp/README.md index e609e032..6256d891 100644 --- a/control-plane/web/client/src/mcp/README.md +++ b/control-plane/web/client/src/mcp/README.md @@ -20,22 +20,22 @@ The MCP UI system is built with a modular architecture consisting of: ``` MCP UI System -β”œβ”€β”€ Components (haxen/web/client/src/components/mcp/) +β”œβ”€β”€ Components (agentfield/web/client/src/components/mcp/) β”‚ β”œβ”€β”€ MCPHealthIndicator - Status indicators and health displays β”‚ β”œβ”€β”€ MCPServerCard - Individual server information cards β”‚ β”œβ”€β”€ MCPServerList - List of all MCP servers β”‚ β”œβ”€β”€ MCPToolExplorer - Tool discovery and exploration β”‚ β”œβ”€β”€ MCPToolTester - Interactive tool testing interface β”‚ └── MCPServerControls - Bulk server management controls -β”œβ”€β”€ Hooks (haxen/web/client/src/hooks/) +β”œβ”€β”€ Hooks (agentfield/web/client/src/hooks/) β”‚ β”œβ”€β”€ useMCPHealth - Health monitoring and real-time updates β”‚ β”œβ”€β”€ useMCPServers - Server management operations β”‚ β”œβ”€β”€ useMCPTools - Tool discovery and execution β”‚ β”œβ”€β”€ useMCPMetrics - Performance metrics monitoring β”‚ └── useSSE - Server-Sent Events for real-time updates -β”œβ”€β”€ Utilities (haxen/web/client/src/utils/) +β”œβ”€β”€ Utilities (agentfield/web/client/src/utils/) β”‚ └── mcpUtils - Formatting, validation, and helper functions -└── Integration (haxen/web/client/src/mcp/) +└── Integration (agentfield/web/client/src/mcp/) └── index.ts - Centralized exports and integration patterns ``` @@ -345,19 +345,19 @@ if (process.env.NODE_ENV === 'development') { ### Adding New Components -1. Create component in `haxen/web/client/src/components/mcp/` +1. Create component in `agentfield/web/client/src/components/mcp/` 2. Add proper TypeScript types 3. Include accessibility features 4. Add error boundary support -5. Export from `haxen/web/client/src/components/mcp/index.ts` +5. Export from `agentfield/web/client/src/components/mcp/index.ts` ### Adding New Hooks -1. Create hook in `haxen/web/client/src/hooks/` +1. Create hook in `agentfield/web/client/src/hooks/` 2. Follow existing patterns for state management 3. Include proper cleanup and error handling 4. Add TypeScript documentation -5. Export from `haxen/web/client/src/mcp/index.ts` +5. Export from `agentfield/web/client/src/mcp/index.ts` ## API Reference diff --git a/control-plane/web/client/src/mcp/index.ts b/control-plane/web/client/src/mcp/index.ts index 09ef048c..fb6ffcc3 100644 --- a/control-plane/web/client/src/mcp/index.ts +++ b/control-plane/web/client/src/mcp/index.ts @@ -123,4 +123,4 @@ export type { MCPHealthResponseUser, MCPHealthResponseDeveloper, AppMode -} from '../types/haxen'; +} from '../types/agentfield'; diff --git a/control-plane/web/client/src/pages/ExecutionDetailPage.tsx b/control-plane/web/client/src/pages/ExecutionDetailPage.tsx index dfaafe96..c38829ca 100644 --- a/control-plane/web/client/src/pages/ExecutionDetailPage.tsx +++ b/control-plane/web/client/src/pages/ExecutionDetailPage.tsx @@ -224,9 +224,9 @@ export function ExecutionDetailPage() { onClick={execution.session_id ? navigateToSession : undefined} /> { const [selectedPackage, setSelectedPackage] = useState( diff --git a/control-plane/web/client/src/services/api.ts b/control-plane/web/client/src/services/api.ts index a8058acd..82d72f50 100644 --- a/control-plane/web/client/src/services/api.ts +++ b/control-plane/web/client/src/services/api.ts @@ -19,7 +19,7 @@ import type { ConfigSchemaResponse, AgentStatus, AgentStatusUpdate -} from '../types/haxen'; +} from '../types/agentfield'; const API_BASE_URL = import.meta.env.VITE_API_BASE_URL || '/api/ui/v1'; diff --git a/control-plane/web/client/src/services/configurationApi.ts b/control-plane/web/client/src/services/configurationApi.ts index afbad36e..66a5132e 100644 --- a/control-plane/web/client/src/services/configurationApi.ts +++ b/control-plane/web/client/src/services/configurationApi.ts @@ -1,4 +1,4 @@ -import type { ConfigurationSchema, AgentConfiguration, AgentPackage, AgentLifecycleInfo } from '../types/haxen'; +import type { ConfigurationSchema, AgentConfiguration, AgentPackage, AgentLifecycleInfo } from '../types/agentfield'; const API_BASE = '/api/ui/v1'; diff --git a/control-plane/web/client/src/services/executionsApi.ts b/control-plane/web/client/src/services/executionsApi.ts index 68e4dbd3..21b173d0 100644 --- a/control-plane/web/client/src/services/executionsApi.ts +++ b/control-plane/web/client/src/services/executionsApi.ts @@ -155,7 +155,7 @@ function transformExecutionDetailsResponse(raw: any): WorkflowExecution { id: raw.id, workflow_id: raw.workflow_id, execution_id: raw.execution_id, - haxen_request_id: raw.haxen_request_id ?? "", + agentfield_request_id: raw.agentfield_request_id ?? "", session_id: raw.session_id ?? undefined, actor_id: raw.actor_id ?? undefined, agent_node_id: raw.agent_node_id, diff --git a/control-plane/web/client/src/services/searchService.ts b/control-plane/web/client/src/services/searchService.ts index 383c23fe..8cf92c77 100644 --- a/control-plane/web/client/src/services/searchService.ts +++ b/control-plane/web/client/src/services/searchService.ts @@ -1,5 +1,5 @@ import { getNodesSummary } from './api'; -import type { AgentNodeSummary } from '../types/haxen'; +import type { AgentNodeSummary } from '../types/agentfield'; export interface SearchResult { id: string; diff --git a/control-plane/web/client/src/types/haxen.ts b/control-plane/web/client/src/types/agentfield.ts similarity index 100% rename from control-plane/web/client/src/types/haxen.ts rename to control-plane/web/client/src/types/agentfield.ts diff --git a/control-plane/web/client/src/types/did.ts b/control-plane/web/client/src/types/did.ts index 555780f6..5de371b6 100644 --- a/control-plane/web/client/src/types/did.ts +++ b/control-plane/web/client/src/types/did.ts @@ -3,7 +3,7 @@ export interface AgentDIDInfo { did: string; agent_node_id: string; - haxen_server_id: string; + agentfield_server_id: string; public_key_jwk: any; derivation_path: string; reasoners: Record; @@ -76,7 +76,7 @@ export interface DIDIdentityPackage { agent_did: DIDIdentity; reasoner_dids: Record; skill_dids: Record; - haxen_server_id: string; + agentfield_server_id: string; } export interface DIDIdentity { @@ -222,7 +222,7 @@ export interface SecurityAnalysis { export interface ComplianceChecks { w3c_compliance: boolean; - haxen_standard_compliance: boolean; + agentfield_standard_compliance: boolean; audit_trail_integrity: boolean; data_integrity_checks: boolean; issues: VerificationIssue[]; @@ -265,7 +265,7 @@ export interface DIDResolutionEntry { } export interface DIDFilters { - haxen_server_id?: string; + agentfield_server_id?: string; agent_node_id?: string; component_type?: string; status?: AgentDIDStatus; diff --git a/control-plane/web/client/src/types/executions.ts b/control-plane/web/client/src/types/executions.ts index 92ced2a7..27861dc3 100644 --- a/control-plane/web/client/src/types/executions.ts +++ b/control-plane/web/client/src/types/executions.ts @@ -98,7 +98,7 @@ export interface WorkflowExecution { id: number; workflow_id: string; execution_id: string; - haxen_request_id: string; + agentfield_request_id: string; session_id?: string; actor_id?: string; agent_node_id: string; diff --git a/control-plane/web/client/src/utils/mcpUtils.ts b/control-plane/web/client/src/utils/mcpUtils.ts index d08c7e15..69f19002 100644 --- a/control-plane/web/client/src/utils/mcpUtils.ts +++ b/control-plane/web/client/src/utils/mcpUtils.ts @@ -5,7 +5,7 @@ import type { MCPServerStatus, MCPHealthEvent, AppMode -} from '../types/haxen'; +} from '../types/agentfield'; /** * Status color mapping for MCP servers diff --git a/control-plane/web/client/src/utils/node-status.ts b/control-plane/web/client/src/utils/node-status.ts index 35286a1c..126a39a7 100644 --- a/control-plane/web/client/src/utils/node-status.ts +++ b/control-plane/web/client/src/utils/node-status.ts @@ -2,7 +2,7 @@ import type { AgentNodeSummary, HealthStatus, LifecycleStatus, -} from "@/types/haxen"; +} from "@/types/agentfield"; import { getStatusTheme, type CanonicalStatus, diff --git a/control-plane/web/client/ui_embed.go b/control-plane/web/client/ui_embed.go index 6a67b50f..c44697ca 100644 --- a/control-plane/web/client/ui_embed.go +++ b/control-plane/web/client/ui_embed.go @@ -1,4 +1,4 @@ -// UI embedding and route registration for Haxen +// UI embedding and route registration for AgentField package client @@ -29,7 +29,7 @@ func RegisterUIRoutes(router *gin.Engine) { router.GET("/ui/*filepath", func(c *gin.Context) { path := c.Param("filepath") - + // If accessing root UI path or a directory, serve index.html if path == "/" || path == "" || strings.HasSuffix(path, "/") { indexHTML, err := UIFiles.ReadFile("dist/index.html") @@ -43,7 +43,7 @@ func RegisterUIRoutes(router *gin.Engine) { c.String(http.StatusOK, string(indexHTML)) return } - + // Check if it's a static asset by looking for common web asset file extensions // This prevents reasoner IDs with dots (like "deepresearchagent.meta_research_methodology_reasoner") // from being treated as static assets @@ -71,7 +71,7 @@ func RegisterUIRoutes(router *gin.Engine) { http.StripPrefix("/ui", fileServer).ServeHTTP(c.Writer, c.Request) return } - + // For all other paths (SPA routes), serve index.html indexHTML, err := UIFiles.ReadFile("dist/index.html") if err != nil { diff --git a/deployments/docker/Dockerfile.control-plane b/deployments/docker/Dockerfile.control-plane index cddbe7a0..2a27c4c1 100644 --- a/deployments/docker/Dockerfile.control-plane +++ b/deployments/docker/Dockerfile.control-plane @@ -6,7 +6,7 @@ RUN npm install COPY control-plane/web/client/ . RUN npm run build -FROM golang:1.23-alpine AS go-builder +FROM golang:1.24-alpine AS go-builder WORKDIR /app COPY control-plane/go.mod control-plane/go.sum ./control-plane/ @@ -17,10 +17,10 @@ COPY control-plane/ ./control-plane/ COPY --from=ui-builder /app/web/dist ./control-plane/web/client/dist RUN --mount=type=cache,target=/root/.cache/go-build --mount=type=cache,target=/go/pkg/mod \ - cd control-plane && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o /app/bin/haxen-server ./cmd/haxen-server + cd control-plane && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o /app/bin/agentfield-server ./cmd/agentfield-server FROM gcr.io/distroless/base-debian12 -COPY --from=go-builder /app/bin/haxen-server /usr/local/bin/haxen-server -COPY --from=go-builder /app/control-plane/config /etc/haxen/config +COPY --from=go-builder /app/bin/agentfield-server /usr/local/bin/agentfield-server +COPY --from=go-builder /app/control-plane/config /etc/agentfield/config USER nonroot:nonroot -ENTRYPOINT ["/usr/local/bin/haxen-server"] +ENTRYPOINT ["/usr/local/bin/agentfield-server"] diff --git a/deployments/docker/Dockerfile.go-agent b/deployments/docker/Dockerfile.go-agent index eee342fc..3e1d8bb3 100644 --- a/deployments/docker/Dockerfile.go-agent +++ b/deployments/docker/Dockerfile.go-agent @@ -1,4 +1,4 @@ -FROM golang:1.23-alpine +FROM golang:1.24-alpine WORKDIR /workspace @@ -9,4 +9,4 @@ RUN --mount=type=cache,target=/root/.cache/go-build --mount=type=cache,target=/g ENV GO111MODULE=on -CMD ["sh", "-c", "echo 'Haxen Go agent image ready. Mount or COPY your agent source and override CMD to run it.' && tail -f /dev/null"] +CMD ["sh", "-c", "echo 'AgentField Go agent image ready. Mount or COPY your agent source and override CMD to run it.' && tail -f /dev/null"] diff --git a/deployments/docker/Dockerfile.python-agent b/deployments/docker/Dockerfile.python-agent index 7892a408..dedae718 100644 --- a/deployments/docker/Dockerfile.python-agent +++ b/deployments/docker/Dockerfile.python-agent @@ -11,4 +11,4 @@ RUN pip install --no-cache-dir --upgrade pip && \ pip install --no-cache-dir /tmp/python-sdk && \ rm -rf /tmp/python-sdk -CMD ["python", "-c", "print('Haxen Python agent image ready. Override CMD to run your agent.')"] +CMD ["python", "-c", "print('AgentField Python agent image ready. Override CMD to run your agent.')"] diff --git a/deployments/docker/README.md b/deployments/docker/README.md index 8275b4b1..73f2adb5 100644 --- a/deployments/docker/README.md +++ b/deployments/docker/README.md @@ -1,4 +1,4 @@ -# Haxen Docker Deployments +# AgentField Docker Deployments This directory contains reference Dockerfiles and a Compose stack for local development. diff --git a/deployments/docker/docker-compose.yml b/deployments/docker/docker-compose.yml index 5dcfa79c..3c6788ef 100644 --- a/deployments/docker/docker-compose.yml +++ b/deployments/docker/docker-compose.yml @@ -5,9 +5,9 @@ services: image: postgres:15-alpine restart: unless-stopped environment: - POSTGRES_USER: haxen - POSTGRES_PASSWORD: haxen - POSTGRES_DB: haxen + POSTGRES_USER: agentfield + POSTGRES_PASSWORD: agentfield + POSTGRES_DB: agentfield volumes: - pgdata:/var/lib/postgresql/data @@ -17,8 +17,8 @@ services: context: ../.. dockerfile: deployments/docker/Dockerfile.control-plane environment: - HAXEN_DATABASE_URL: postgres://haxen:haxen@postgres:5432/haxen?sslmode=disable - HAXEN_HTTP_ADDR: 0.0.0.0:8080 + AGENTFIELD_DATABASE_URL: postgres://agentfield:agentfield@postgres:5432/agentfield?sslmode=disable + AGENTFIELD_HTTP_ADDR: 0.0.0.0:8080 ports: - "8080:8080" depends_on: diff --git a/docs/ARCHITECTURE.md b/docs/ARCHITECTURE.md index 2e1bb2f4..9209ef46 100644 --- a/docs/ARCHITECTURE.md +++ b/docs/ARCHITECTURE.md @@ -1,6 +1,6 @@ -# Haxen Architecture +# AgentField Architecture -Haxen provides a modular platform for orchestrating AI agents. The system is composed of a Go-based control plane, SDKs for client languages, and optional runtime services. +AgentField provides a modular platform for orchestrating AI agents. The system is composed of a Go-based control plane, SDKs for client languages, and optional runtime services. ## High-Level Overview diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index f9e807b1..da8b26d8 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -1,6 +1,6 @@ # Contributing Guide -Thank you for your interest in contributing to Haxen! This guide outlines how to propose changes, report issues, and participate in the community. +Thank you for your interest in contributing to AgentField! This guide outlines how to propose changes, report issues, and participate in the community. ## Ground Rules diff --git a/docs/DEVELOPMENT.md b/docs/DEVELOPMENT.md index 75f86698..585c0941 100644 --- a/docs/DEVELOPMENT.md +++ b/docs/DEVELOPMENT.md @@ -1,6 +1,6 @@ # Development Guide -This document provides instructions for working on the Haxen monorepo locally. +This document provides instructions for working on the AgentField monorepo locally. ## Prerequisites @@ -12,8 +12,8 @@ This document provides instructions for working on the Haxen monorepo locally. ## Initial Setup ```bash -git clone https://github.com/your-org/haxen.git -cd haxen +git clone https://github.com/Agent-Field/agentfield.git +cd agentfield ./scripts/install.sh ``` @@ -33,29 +33,29 @@ The install script performs: ## Useful Commands -| Action | Command | -| --- | --- | -| Build everything | `./scripts/build-all.sh` | -| Run tests | `./scripts/test-all.sh` | -| Format Go code | `make fmt` | -| Tidy Go modules | `make tidy` | -| Run the control plane | `cd control-plane && go run cmd/server/main.go` | -| Run UI in development | `cd control-plane/web && npm run dev` | -| Start local stack | `docker compose -f deployments/docker/docker-compose.yml up` | +| Action | Command | +| --------------------- | ------------------------------------------------------------ | +| Build everything | `./scripts/build-all.sh` | +| Run tests | `./scripts/test-all.sh` | +| Format Go code | `make fmt` | +| Tidy Go modules | `make tidy` | +| Run the control plane | `cd control-plane && go run cmd/server/main.go` | +| Run UI in development | `cd control-plane/web && npm run dev` | +| Start local stack | `docker compose -f deployments/docker/docker-compose.yml up` | ## Environment Variables Copy `control-plane/config/.env.example` to `.env` (if available) and adjust: -- `HAXEN_DATABASE_URL` β€” PostgreSQL connection string. -- `HAXEN_JWT_SECRET` β€” Authentication secret (development only). +- `AGENTFIELD_DATABASE_URL` β€” PostgreSQL connection string. +- `AGENTFIELD_JWT_SECRET` β€” Authentication secret (development only). ## Database Migrations ```bash cd control-plane -goose -dir ./migrations postgres "$HAXEN_DATABASE_URL" status -goose -dir ./migrations postgres "$HAXEN_DATABASE_URL" up +goose -dir ./migrations postgres "$AGENTFIELD_DATABASE_URL" status +goose -dir ./migrations postgres "$AGENTFIELD_DATABASE_URL" up ``` ## Frontend Development diff --git a/docs/RELEASING.md b/docs/RELEASING.md index e820fb6d..2d328655 100644 --- a/docs/RELEASING.md +++ b/docs/RELEASING.md @@ -1,13 +1,13 @@ -# Releasing Haxen +# Releasing AgentField -This guide explains how maintainers publish new versions of the Haxen control plane, SDKs, and container images. +This guide explains how maintainers publish new versions of the AgentField control plane, SDKs, and container images. ## Versioning - Tag releases with semantic versions: `vMAJOR.MINOR.PATCH`. - The tag drives the automation. Tagging `main` (or dispatching the workflow manually) kicks off Go binary builds, Python package publishing, and Docker image pushes. - Update component versions before tagging: - - `sdk/python/pyproject.toml` & `sdk/python/haxen_sdk/__init__.py` + - `sdk/python/pyproject.toml` & `sdk/python/agentfield/__init__.py` - Release notes in `CHANGELOG.md` ## Required Secrets @@ -35,11 +35,11 @@ What happens: 3. Build the control plane UI (`npm install && npm run build`). 4. Run [GoReleaser](https://goreleaser.com) using `.goreleaser.yml` to produce multi-platform binaries and attach them to the GitHub release. 5. Build the Python SDK (`python -m build`) and, if enabled, publish to PyPI with `twine upload`. -6. Build and push the `Dockerfile.control-plane` image (defaults to `ghcr.io//haxen-control-plane:`). +6. Build and push the `Dockerfile.control-plane` image (defaults to `ghcr.io//agentfield-control-plane:`). Artifacts: -- Release binaries (`haxen-server` for Linux/Darwin/Windows, amd64/arm64). +- Release binaries (`agentfield-server` for Linux/Darwin/Windows, amd64/arm64). - Python SDK wheel & sdist on PyPI (and attached to the release for manual runs). - Multi-architecture Docker image. @@ -54,9 +54,9 @@ Use `workflow_dispatch` to stage a release without pushing external artifacts: ## Testing Release Artifacts -- **Go binaries**: download from the release page or workflow artifacts and run `haxen-server --help`. Cross-platform builds are generated for Linux (amd64/arm64), Darwin (amd64/arm64), and Windows (amd64). -- **Python package**: install locally via `pip install --index-url https://test.pypi.org/simple haxen-sdk` if you push to TestPyPI first, or install from the generated wheel. -- **Docker image**: `docker run --rm ghcr.io//haxen-control-plane: --help`. +- **Go binaries**: download from the release page or workflow artifacts and run `agentfield-server --help`. Cross-platform builds are generated for Linux (amd64/arm64), Darwin (amd64/arm64), and Windows (amd64). +- **Python package**: install locally via `pip install --index-url https://test.pypi.org/simple agentfield` if you push to TestPyPI first, or install from the generated wheel. +- **Docker image**: `docker run --rm ghcr.io//agentfield-control-plane: --help`. ## Emergency Fixes diff --git a/sdk/go/README.md b/sdk/go/README.md index 0de17fa8..53442f00 100644 --- a/sdk/go/README.md +++ b/sdk/go/README.md @@ -1,11 +1,11 @@ -# Haxen Go SDK +# AgentField Go SDK -The Haxen Go SDK provides idiomatic Go bindings for interacting with the Haxen control plane. +The AgentField Go SDK provides idiomatic Go bindings for interacting with the AgentField control plane. ## Installation ```bash -go get github.com/agentfield/haxen/sdk/go +go get github.com/Agent-Field/agentfield/sdk/go ``` ## Quick Start @@ -17,13 +17,13 @@ import ( "context" "log" - haxenagent "github.com/agentfield/haxen/sdk/go/agent" + agentfieldagent "github.com/Agent-Field/agentfield/sdk/go/agent" ) func main() { - agent, err := haxenagent.New(haxenagent.Config{ + agent, err := agentfieldagent.New(agentfieldagent.Config{ NodeID: "example-agent", - HaxenURL: "http://localhost:8080", + AgentFieldURL: "http://localhost:8080", }) if err != nil { log.Fatal(err) @@ -41,8 +41,8 @@ func main() { ## Modules -- `agent`: Build Haxen-compatible agents and register reasoners/skills. -- `client`: Low-level HTTP client for the Haxen control plane. +- `agent`: Build AgentField-compatible agents and register reasoners/skills. +- `client`: Low-level HTTP client for the AgentField control plane. - `types`: Shared data structures and contracts. - `ai`: Helpers for interacting with AI providers via the control plane. diff --git a/sdk/go/agent/agent.go b/sdk/go/agent/agent.go index 3f9a2801..f52019a6 100644 --- a/sdk/go/agent/agent.go +++ b/sdk/go/agent/agent.go @@ -17,14 +17,14 @@ import ( "syscall" "time" - "github.com/agentfield/haxen/sdk/go/ai" - "github.com/agentfield/haxen/sdk/go/client" - "github.com/agentfield/haxen/sdk/go/types" + "github.com/Agent-Field/agentfield/sdk/go/ai" + "github.com/Agent-Field/agentfield/sdk/go/client" + "github.com/Agent-Field/agentfield/sdk/go/types" ) type executionContextKey struct{} -// ExecutionContext captures the headers Haxen sends with each execution request. +// ExecutionContext captures the headers AgentField sends with each execution request. type ExecutionContext struct { RunID string ExecutionID string @@ -74,7 +74,7 @@ type Config struct { NodeID string Version string TeamID string - HaxenURL string + AgentFieldURL string ListenAddress string PublicURL string Token string @@ -121,8 +121,8 @@ func New(cfg Config) (*Agent, error) { if cfg.TeamID == "" { cfg.TeamID = "default" } - if cfg.HaxenURL == "" { - return nil, errors.New("config.HaxenURL is required") + if cfg.AgentFieldURL == "" { + return nil, errors.New("config.AgentFieldURL is required") } if cfg.ListenAddress == "" { cfg.ListenAddress = ":8001" @@ -141,7 +141,7 @@ func New(cfg Config) (*Agent, error) { Timeout: 15 * time.Second, } - c, err := client.New(cfg.HaxenURL, client.WithHTTPClient(httpClient), client.WithBearerToken(cfg.Token)) + c, err := client.New(cfg.AgentFieldURL, client.WithHTTPClient(httpClient), client.WithBearerToken(cfg.Token)) if err != nil { return nil, err } @@ -203,7 +203,7 @@ func (a *Agent) RegisterReasoner(name string, handler HandlerFunc, opts ...Reaso a.reasoners[name] = meta } -// Initialize registers the agent with the Haxen control plane without starting a listener. +// Initialize registers the agent with the AgentField control plane without starting a listener. func (a *Agent) Initialize(ctx context.Context) error { a.initMu.Lock() defer a.initMu.Unlock() @@ -295,7 +295,7 @@ func (a *Agent) registerNode(ctx context.Context) error { return err } - a.logger.Printf("node %s registered with Haxen", a.cfg.NodeID) + a.logger.Printf("node %s registered with AgentField", a.cfg.NodeID) return nil } @@ -399,7 +399,7 @@ func (a *Agent) handleReasoner(w http.ResponseWriter, r *http.Request) { writeJSON(w, http.StatusOK, result) } -// Call invokes another reasoner via the Haxen control plane, preserving execution context. +// Call invokes another reasoner via the AgentField control plane, preserving execution context. func (a *Agent) Call(ctx context.Context, target string, input map[string]any) (map[string]any, error) { if !strings.Contains(target, ".") { target = fmt.Sprintf("%s.%s", a.cfg.NodeID, strings.TrimPrefix(target, ".")) @@ -417,7 +417,7 @@ func (a *Agent) Call(ctx context.Context, target string, input map[string]any) ( return nil, fmt.Errorf("marshal call payload: %w", err) } - url := fmt.Sprintf("%s/api/v1/execute/%s", strings.TrimSuffix(a.cfg.HaxenURL, "/"), strings.TrimPrefix(target, "/")) + url := fmt.Sprintf("%s/api/v1/execute/%s", strings.TrimSuffix(a.cfg.AgentFieldURL, "/"), strings.TrimPrefix(target, "/")) req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(body)) if err != nil { return nil, fmt.Errorf("build request: %w", err) diff --git a/sdk/go/ai/README.md b/sdk/go/ai/README.md index b5cf1acf..dc80d364 100644 --- a/sdk/go/ai/README.md +++ b/sdk/go/ai/README.md @@ -1,6 +1,6 @@ # Go SDK AI Package -This package provides AI/LLM capabilities for the Haxen Go SDK, supporting both OpenAI and OpenRouter APIs with structured output support. +This package provides AI/LLM capabilities for the AgentField Go SDK, supporting both OpenAI and OpenRouter APIs with structured output support. ## Features @@ -18,8 +18,8 @@ This package provides AI/LLM capabilities for the Haxen Go SDK, supporting both ```go import ( "context" - "github.com/agentfield/haxen/sdk/go/agent" - "github.com/agentfield/haxen/sdk/go/ai" + "github.com/Agent-Field/agentfield/sdk/go/agent" + "github.com/Agent-Field/agentfield/sdk/go/ai" ) // Create agent with AI configured @@ -27,7 +27,7 @@ aiConfig := ai.DefaultConfig() // Reads from env vars agent, err := agent.New(agent.Config{ NodeID: "my-agent", Version: "1.0.0", - HaxenURL: "http://localhost:8080", + AgentFieldURL: "http://localhost:8080", AIConfig: aiConfig, }) @@ -205,14 +205,14 @@ agent.RegisterReasoner("smart_reasoner", func(ctx context.Context, input map[str The Go SDK provides similar functionality to the Python SDK's `agent.ai()` method: -| Feature | Python SDK | Go SDK | -|---------|-----------|--------| -| Simple text calls | `agent.ai("prompt")` | `agent.AI(ctx, "prompt")` | -| System prompts | `system="..."` kwarg | `ai.WithSystem("...")` option | -| Structured outputs | `schema=Model` kwarg | `ai.WithSchema(Model{})` option | -| Streaming | `stream=True` kwarg | `agent.AIStream()` method | -| Model override | `model="..."` kwarg | `ai.WithModel("...")` option | -| Temperature | `temperature=0.7` kwarg | `ai.WithTemperature(0.7)` option | +| Feature | Python SDK | Go SDK | +| ------------------ | ----------------------- | -------------------------------- | +| Simple text calls | `agent.ai("prompt")` | `agent.AI(ctx, "prompt")` | +| System prompts | `system="..."` kwarg | `ai.WithSystem("...")` option | +| Structured outputs | `schema=Model` kwarg | `ai.WithSchema(Model{})` option | +| Streaming | `stream=True` kwarg | `agent.AIStream()` method | +| Model override | `model="..."` kwarg | `ai.WithModel("...")` option | +| Temperature | `temperature=0.7` kwarg | `ai.WithTemperature(0.7)` option | ## Error Handling diff --git a/sdk/go/client/client.go b/sdk/go/client/client.go index d2e5ec1a..49699df3 100644 --- a/sdk/go/client/client.go +++ b/sdk/go/client/client.go @@ -12,10 +12,10 @@ import ( "strings" "time" - "github.com/agentfield/haxen/sdk/go/types" + "github.com/Agent-Field/agentfield/sdk/go/types" ) -// Client provides a thin wrapper over the Haxen control plane REST API. +// Client provides a thin wrapper over the AgentField control plane REST API. type Client struct { baseURL *url.URL httpClient *http.Client @@ -193,12 +193,12 @@ func (c *Client) legacyHeartbeat(ctx context.Context, nodeID string, payload typ }, nil } -// APIError captures non-success responses from the Haxen API. +// APIError captures non-success responses from the AgentField API. type APIError struct { StatusCode int Body []byte } func (e *APIError) Error() string { - return fmt.Sprintf("haxen api error (%d): %s", e.StatusCode, strings.TrimSpace(string(e.Body))) + return fmt.Sprintf("agentfield api error (%d): %s", e.StatusCode, strings.TrimSpace(string(e.Body))) } diff --git a/sdk/go/go.mod b/sdk/go/go.mod index ee0b8de9..f5fa7aba 100644 --- a/sdk/go/go.mod +++ b/sdk/go/go.mod @@ -1,3 +1,3 @@ -module github.com/agentfield/haxen/sdk/go +module github.com/Agent-Field/agentfield/sdk/go go 1.21 diff --git a/sdk/go/types/types.go b/sdk/go/types/types.go index d67e2f24..c20cab5d 100644 --- a/sdk/go/types/types.go +++ b/sdk/go/types/types.go @@ -5,7 +5,7 @@ import ( "time" ) -// ReasonerDefinition mirrors the Haxen server registration contract. +// ReasonerDefinition mirrors the AgentField server registration contract. type ReasonerDefinition struct { ID string `json:"id"` InputSchema json.RawMessage `json:"input_schema"` diff --git a/sdk/python/CHANGELOG.md b/sdk/python/CHANGELOG.md index 9b65c3aa..640219d4 100644 --- a/sdk/python/CHANGELOG.md +++ b/sdk/python/CHANGELOG.md @@ -1,5 +1,5 @@ # Changelog ## [0.1.0] - 2024-XX-XX -- Initial public release of the Haxen Python SDK. +- Initial public release of the AgentField Python SDK. - Provides agent runtime, workflow helpers, async execution, and credential tooling. diff --git a/sdk/python/MANIFEST.in b/sdk/python/MANIFEST.in index 8593f925..fd760729 100644 --- a/sdk/python/MANIFEST.in +++ b/sdk/python/MANIFEST.in @@ -1,7 +1,7 @@ include README.md include LICENSE include requirements.txt -recursive-include haxen_sdk *.py +recursive-include agentfield *.py exclude examples/* exclude tests/* global-exclude *.pyc diff --git a/sdk/python/README.md b/sdk/python/README.md index 783c0944..940605bf 100644 --- a/sdk/python/README.md +++ b/sdk/python/README.md @@ -1,29 +1,29 @@ -# Haxen Python SDK +# AgentField Python SDK -The Haxen SDK provides a production-ready Python interface for registering agents, executing workflows, and integrating with the Haxen control plane. +The AgentField SDK provides a production-ready Python interface for registering agents, executing workflows, and integrating with the AgentField control plane. ## Installation ```bash -pip install haxen-sdk +pip install agentfield ``` To work on the SDK locally: ```bash -git clone https://github.com/your-org/haxen.git -cd haxen/sdk/python +git clone https://github.com/Agent-Field/agentfield.git +cd agentfield/sdk/python python -m pip install -e .[dev] ``` ## Quick Start ```python -from haxen_sdk import Agent +from agentfield import Agent agent = Agent( node_id="example-agent", - haxen_server="http://localhost:8080", + agentfield_server="http://localhost:8080", dev_mode=True, ) @@ -50,7 +50,7 @@ pytest To run coverage locally: ```bash -pytest --cov=haxen_sdk --cov-report=term-missing +pytest --cov=agentfield --cov-report=term-missing ``` ## License diff --git a/sdk/python/haxen_sdk/README_stdio_bridge.md b/sdk/python/agentfield/README_stdio_bridge.md similarity index 93% rename from sdk/python/haxen_sdk/README_stdio_bridge.md rename to sdk/python/agentfield/README_stdio_bridge.md index 18202f36..5857fd34 100644 --- a/sdk/python/haxen_sdk/README_stdio_bridge.md +++ b/sdk/python/agentfield/README_stdio_bridge.md @@ -4,7 +4,7 @@ The `mcp_stdio_bridge.py` module provides a bridge that converts stdio-based MCP ## Overview -Some MCP servers (like `@modelcontextprotocol/server-sequential-thinking`) use stdio transport instead of HTTP. The current Haxen SDK implementation assumes all servers are HTTP-based, causing failures when trying to communicate with stdio servers. This bridge solves that problem. +Some MCP servers (like `@modelcontextprotocol/server-sequential-thinking`) use stdio transport instead of HTTP. The current AgentField SDK implementation assumes all servers are HTTP-based, causing failures when trying to communicate with stdio servers. This bridge solves that problem. ## How It Works @@ -28,7 +28,7 @@ Some MCP servers (like `@modelcontextprotocol/server-sequential-thinking`) use s ```python import asyncio -from haxen_sdk.mcp_stdio_bridge import StdioMCPBridge +from agentfield.mcp_stdio_bridge import StdioMCPBridge async def main(): # Configure your stdio MCP server @@ -95,8 +95,8 @@ async def test_bridge(): The bridge is designed to work seamlessly with the existing `MCPClient`: ```python -from haxen_sdk.mcp_client import MCPClient -from haxen_sdk.mcp_stdio_bridge import StdioMCPBridge +from agentfield.mcp_client import MCPClient +from agentfield.mcp_stdio_bridge import StdioMCPBridge # Start the bridge bridge = StdioMCPBridge(server_config, port=8200) diff --git a/sdk/python/haxen_sdk/__init__.py b/sdk/python/agentfield/__init__.py similarity index 100% rename from sdk/python/haxen_sdk/__init__.py rename to sdk/python/agentfield/__init__.py diff --git a/sdk/python/haxen_sdk/agent.py b/sdk/python/agentfield/agent.py similarity index 93% rename from sdk/python/haxen_sdk/agent.py rename to sdk/python/agentfield/agent.py index 9cc44b46..305df4a9 100644 --- a/sdk/python/haxen_sdk/agent.py +++ b/sdk/python/agentfield/agent.py @@ -20,29 +20,29 @@ Dict, Literal, ) -from haxen_sdk.agent_ai import AgentAI -from haxen_sdk.agent_haxen import AgentHaxen -from haxen_sdk.agent_mcp import AgentMCP -from haxen_sdk.agent_registry import clear_current_agent, set_current_agent -from haxen_sdk.agent_server import AgentServer -from haxen_sdk.agent_workflow import AgentWorkflow -from haxen_sdk.client import HaxenClient -from haxen_sdk.dynamic_skills import DynamicMCPSkillManager -from haxen_sdk.execution_context import ExecutionContext, get_current_context -from haxen_sdk.did_manager import DIDManager -from haxen_sdk.vc_generator import VCGenerator -from haxen_sdk.mcp_client import MCPClientRegistry -from haxen_sdk.mcp_manager import MCPManager -from haxen_sdk.memory import MemoryClient, MemoryInterface -from haxen_sdk.memory_events import MemoryEventClient -from haxen_sdk.logger import log_debug, log_error, log_info, log_warn -from haxen_sdk.router import AgentRouter -from haxen_sdk.connection_manager import ConnectionManager -from haxen_sdk.types import AgentStatus, AIConfig, MemoryConfig -from haxen_sdk.multimodal_response import MultimodalResponse -from haxen_sdk.async_config import AsyncConfig -from haxen_sdk.async_execution_manager import AsyncExecutionManager -from haxen_sdk.pydantic_utils import convert_function_args, should_convert_args +from agentfield.agent_ai import AgentAI +from agentfield.agent_field_handler import AgentFieldHandler +from agentfield.agent_mcp import AgentMCP +from agentfield.agent_registry import clear_current_agent, set_current_agent +from agentfield.agent_server import AgentServer +from agentfield.agent_workflow import AgentWorkflow +from agentfield.client import AgentFieldClient +from agentfield.dynamic_skills import DynamicMCPSkillManager +from agentfield.execution_context import ExecutionContext, get_current_context +from agentfield.did_manager import DIDManager +from agentfield.vc_generator import VCGenerator +from agentfield.mcp_client import MCPClientRegistry +from agentfield.mcp_manager import MCPManager +from agentfield.memory import MemoryClient, MemoryInterface +from agentfield.memory_events import MemoryEventClient +from agentfield.logger import log_debug, log_error, log_info, log_warn +from agentfield.router import AgentRouter +from agentfield.connection_manager import ConnectionManager +from agentfield.types import AgentStatus, AIConfig, MemoryConfig +from agentfield.multimodal_response import MultimodalResponse +from agentfield.async_config import AsyncConfig +from agentfield.async_execution_manager import AsyncExecutionManager +from agentfield.pydantic_utils import convert_function_args, should_convert_args from fastapi import FastAPI, Request, HTTPException from pydantic import create_model, BaseModel, ValidationError @@ -294,15 +294,15 @@ def _resolve_callback_url(callback_url: Optional[str], port: int) -> str: class Agent(FastAPI): """ - Haxen Agent - FastAPI subclass for creating AI agent nodes. + AgentField Agent - FastAPI subclass for creating AI agent nodes. - The Agent class is the core component of the Haxen SDK that enables developers to create + The Agent class is the core component of the AgentField SDK that enables developers to create intelligent agent nodes. It inherits from FastAPI to provide HTTP endpoints and integrates - with the Haxen ecosystem for distributed AI workflows. + with the AgentField ecosystem for distributed AI workflows. Key Features: - Decorator-based reasoner and skill registration - - Cross-agent communication via the Haxen execution gateway + - Cross-agent communication via the AgentField execution gateway - Memory interface for persistent and session-based storage - MCP (Model Context Protocol) server integration - Automatic workflow tracking and DAG building @@ -310,12 +310,12 @@ class Agent(FastAPI): Example: ```python - from haxen_sdk import Agent + from agentfield import Agent # Create an agent instance app = Agent( node_id="my_agent", - haxen_server="http://localhost:8080" + agentfield_server="http://localhost:8080" ) # Define a reasoner (AI-powered function) @@ -341,7 +341,7 @@ def format_response(sentiment: str, confidence: float) -> str: def __init__( self, node_id: str, - haxen_server: str = "http://localhost:8080", + agentfield_server: str = "http://localhost:8080", version: str = "1.0.0", ai_config: Optional[AIConfig] = None, memory_config: Optional[MemoryConfig] = None, @@ -352,25 +352,25 @@ def __init__( **kwargs, ): """ - Initialize a new Haxen Agent instance. + Initialize a new AgentField Agent instance. Sets log level to DEBUG if dev_mode is True, else INFO. """ # Set logging level based on dev_mode - from haxen_sdk.logger import set_log_level + from agentfield.logger import set_log_level set_log_level("DEBUG" if dev_mode else "INFO") """ Creates a new agent node that can host reasoners (AI-powered functions) and skills - (deterministic functions) while integrating with the Haxen ecosystem for distributed + (deterministic functions) while integrating with the AgentField ecosystem for distributed AI workflows and cross-agent communication. Args: node_id (str): Unique identifier for this agent node. Used for routing and cross-agent communication. Should be descriptive and unique - within your Haxen ecosystem. - haxen_server (str, optional): URL of the Haxen server for registration and + within your AgentField ecosystem. + agentfield_server (str, optional): URL of the AgentField server for registration and execution gateway. Defaults to "http://localhost:8080". version (str, optional): Version string for this agent. Used for compatibility checking and deployment tracking. Defaults to "1.0.0". @@ -382,7 +382,7 @@ def __init__( dev_mode (bool, optional): Enable development mode with verbose logging and debugging features. Defaults to False. async_config (AsyncConfig, optional): Configuration for async execution behavior. - callback_url (str, optional): Explicit callback URL for Haxen server to reach this agent. + callback_url (str, optional): Explicit callback URL for AgentField server to reach this agent. If not provided, will use AGENT_CALLBACK_URL environment variable, auto-detection for containers, or fallback to localhost. **kwargs: Additional keyword arguments passed to FastAPI constructor. @@ -395,7 +395,7 @@ def __init__( # Advanced configuration app = Agent( node_id="advanced_agent", - haxen_server="https://haxen.company.com", + agentfield_server="https://agentfield.company.com", version="2.1.0", ai_config=AIConfig( provider="openai", @@ -419,7 +419,7 @@ def __init__( super().__init__(**kwargs) self.node_id = node_id - self.haxen_server = haxen_server + self.agentfield_server = agentfield_server self.version = version self.reasoners = [] self.skills = [] @@ -431,7 +431,7 @@ def __init__( self._heartbeat_thread = None self._heartbeat_stop_event = threading.Event() self.dev_mode = dev_mode - self.haxen_connected = False + self.agentfield_connected = False self.auto_register = ( auto_register # Auto-register on first invocation (serverless mode) ) @@ -447,8 +447,10 @@ def __init__( # Initialize async configuration self.async_config = async_config or AsyncConfig.from_environment() - # Initialize HaxenClient with async configuration - self.client = HaxenClient(base_url=haxen_server, async_config=self.async_config) + # Initialize AgentFieldClient with async configuration + self.client = AgentFieldClient( + base_url=agentfield_server, async_config=self.async_config + ) self._current_execution_context: Optional[ExecutionContext] = None # Initialize async execution manager (will be lazily created when needed) @@ -481,13 +483,13 @@ def __init__( self.vc_generator: Optional[VCGenerator] = None self.did_enabled = False - # Add connection management for resilient Haxen server connectivity + # Add connection management for resilient AgentField server connectivity self.connection_manager: Optional[ConnectionManager] = None # Initialize handlers self.ai_handler = AgentAI(self) self.mcp_handler = AgentMCP(self) - self.haxen_handler = AgentHaxen(self) + self.agentfield_handler = AgentFieldHandler(self) self.workflow_handler = AgentWorkflow(self) self.server_handler = AgentServer(self) @@ -519,8 +521,8 @@ def __init__( # Initialize DID components self._initialize_did_system() - # Setup standard Haxen routes and memory event listeners - self.server_handler.setup_haxen_routes() + # Setup standard AgentField routes and memory event listeners + self.server_handler.setup_agentfield_routes() self._register_memory_event_listeners() # Register this agent instance for automatic workflow tracking @@ -528,7 +530,7 @@ def __init__( # Limit concurrent outbound calls to avoid overloading the local runtime. default_limit = max(1, min(self.async_config.connection_pool_size, 256)) - max_calls_env = os.getenv("HAXEN_AGENT_MAX_CONCURRENT_CALLS") + max_calls_env = os.getenv("AGENTFIELD_AGENT_MAX_CONCURRENT_CALLS") if max_calls_env: try: parsed_limit = int(max_calls_env) @@ -536,7 +538,7 @@ def __init__( except ValueError: self._max_concurrent_calls = default_limit log_warn( - f"Invalid HAXEN_AGENT_MAX_CONCURRENT_CALLS='{max_calls_env}', defaulting to {default_limit}" + f"Invalid AGENTFIELD_AGENT_MAX_CONCURRENT_CALLS='{max_calls_env}', defaulting to {default_limit}" ) else: self._max_concurrent_calls = default_limit @@ -553,7 +555,7 @@ def handle_serverless(self, event: dict) -> dict: and returns the result. Special Endpoints: - - /discover: Returns agent metadata for Haxen server registration + - /discover: Returns agent metadata for AgentField server registration - /execute: Executes reasoners and skills Args: @@ -569,7 +571,7 @@ def handle_serverless(self, event: dict) -> dict: Example: ```python # AWS Lambda handler with API Gateway - from haxen_sdk import Agent + from agentfield import Agent app = Agent("my_agent", auto_register=False) @@ -589,15 +591,15 @@ def lambda_handler(event, context): action = event.get("action", "") if path == "/discover" or action == "discover": - # Return agent metadata for Haxen server registration + # Return agent metadata for AgentField server registration return self._handle_discovery() - # Auto-register with Haxen if needed (for execution requests) - if self.auto_register and not self.haxen_connected: + # Auto-register with AgentField if needed (for execution requests) + if self.auto_register and not self.agentfield_connected: try: # Attempt registration (non-blocking) - self.haxen_handler._register_agent() - self.haxen_connected = True + self.agentfield_handler._register_agent() + self.agentfield_connected = True except Exception as e: if self.dev_mode: log_warn(f"Auto-registration failed: {e}") @@ -661,7 +663,7 @@ def _handle_discovery(self) -> dict: Handle discovery requests for serverless agent registration. Returns agent metadata including reasoners, skills, and configuration - for automatic registration with the Haxen server. + for automatic registration with the AgentField server. Returns: dict: Agent metadata for registration @@ -693,10 +695,10 @@ def _initialize_did_system(self): """Initialize DID and VC components.""" try: # Initialize DID Manager - self.did_manager = DIDManager(self.haxen_server, self.node_id) + self.did_manager = DIDManager(self.agentfield_server, self.node_id) # Initialize VC Generator - self.vc_generator = VCGenerator(self.haxen_server) + self.vc_generator = VCGenerator(self.agentfield_server) if self.dev_mode: log_debug("DID system initialized") @@ -711,7 +713,7 @@ def _register_memory_event_listeners(self): """Scans for methods decorated with @on_change and registers them as listeners.""" if not self.memory_event_client: self.memory_event_client = MemoryEventClient( - self.haxen_server, self._get_current_execution_context() + self.agentfield_server, self._get_current_execution_context() ) for name, method in inspect.getmembers(self, predicate=inspect.ismethod): @@ -828,7 +830,7 @@ def get_conversation_summary() -> dict: memory_client = MemoryClient(self.client, self._current_execution_context) if not self.memory_event_client: self.memory_event_client = MemoryEventClient( - self.haxen_server, self._get_current_execution_context() + self.agentfield_server, self._get_current_execution_context() ) return MemoryInterface(memory_client, self.memory_event_client) @@ -905,7 +907,7 @@ def _build_callback_discovery_payload(self) -> Optional[Dict[str, Any]]: return payload def _apply_discovery_response(self, payload: Optional[Dict[str, Any]]) -> None: - """Update agent networking state from Haxen discovery response.""" + """Update agent networking state from AgentField discovery response.""" if not payload: return @@ -925,7 +927,7 @@ def _apply_discovery_response(self, payload: Optional[Dict[str, Any]]) -> None: ) if resolved and resolved != self.base_url: - log_debug(f"Applying resolved callback URL from Haxen: {resolved}") + log_debug(f"Applying resolved callback URL from AgentField: {resolved}") self.base_url = resolved if isinstance(discovery_section, dict): @@ -1017,20 +1019,20 @@ def _register_mcp_servers_with_registry(self) -> None: if self.dev_mode: log_debug("MCP server registration disabled - old modules removed") - def _setup_haxen_routes(self): + def _setup_agentfield_routes(self): """Delegate to server handler for route setup""" - return self.server_handler.setup_haxen_routes() + return self.server_handler.setup_agentfield_routes() def reasoner(self, path: Optional[str] = None, name: Optional[str] = None): """ Decorator to register a reasoner function. A reasoner is an AI-powered function that takes input and produces structured output using LLMs. - It automatically handles input/output schema generation and integrates with the Haxen's AI capabilities. + It automatically handles input/output schema generation and integrates with the AgentField's AI capabilities. Args: path (str, optional): The API endpoint path for this reasoner. Defaults to /reasoners/{function_name}. - name (str, optional): Explicit Haxen registration ID. Defaults to the function name. + name (str, optional): Explicit AgentField registration ID. Defaults to the function name. """ def decorator(func: Callable) -> Callable: @@ -1067,7 +1069,7 @@ def decorator(func: Callable) -> Callable: async def endpoint(input_data: InputSchema, request: Request): import asyncio import time - from haxen_sdk.execution_context import ( + from agentfield.execution_context import ( set_execution_context, reset_execution_context, ) @@ -1252,7 +1254,7 @@ async def tracked_func(*args, **kwargs): if current_context: # We're in a context managed by the enhanced decorator system # Use the enhanced decorator's tracking mechanism - from haxen_sdk.decorators import _execute_with_tracking + from agentfield.decorators import _execute_with_tracking return await _execute_with_tracking(original_func, *args, **kwargs) else: @@ -1390,7 +1392,7 @@ def skill( - Generates input/output schemas from type hints - Creates FastAPI endpoints with proper validation - Integrates with workflow tracking and execution context - - Enables cross-agent communication via the Haxen execution gateway + - Enables cross-agent communication via the AgentField execution gateway - Provides access to execution context and memory system Args: @@ -1398,10 +1400,10 @@ def skill( Useful for grouping related functionality (e.g., ["database", "user_management"]). path (str, optional): Custom API endpoint path for this skill. Defaults to "/skills/{function_name}". - name (str, optional): Explicit Haxen registration ID. Defaults to the function name. + name (str, optional): Explicit AgentField registration ID. Defaults to the function name. Returns: - Callable: The decorated function with enhanced Haxen integration. + Callable: The decorated function with enhanced AgentField integration. Example: ```python @@ -1944,10 +1946,10 @@ async def ai_with_multimodal( # pragma: no cover - relies on external multimoda async def call(self, target: str, *args, **kwargs) -> dict: """ - Initiates a cross-agent call to another reasoner or skill via the Haxen execution gateway. + Initiates a cross-agent call to another reasoner or skill via the AgentField execution gateway. This method allows agents to seamlessly communicate and utilize reasoners/skills - deployed on other agent nodes within the Haxen ecosystem. It properly propagates + deployed on other agent nodes within the AgentField ecosystem. It properly propagates workflow tracking headers and maintains execution context for DAG building. **Return Type**: Always returns JSON/dict objects, similar to calling any REST API. @@ -2050,8 +2052,8 @@ async def call(self, target: str, *args, **kwargs) -> dict: # πŸ”§ DEBUG: Validate context before creating child if self.dev_mode: - from haxen_sdk.execution_context import get_current_context - from haxen_sdk.logger import log_debug + from agentfield.execution_context import get_current_context + from agentfield.logger import log_debug log_debug(f"πŸ” CALL_DEBUG: Making cross-agent call to {target}") log_debug(f" Current execution_id: {current_context.execution_id}") @@ -2065,25 +2067,25 @@ async def call(self, target: str, *args, **kwargs) -> dict: # Prepare headers with proper workflow tracking headers = current_context.to_headers() - # DISABLED: Same-agent call detection - Force all calls through Haxen server - # This ensures all app.call() requests go through the Haxen server for proper + # DISABLED: Same-agent call detection - Force all calls through AgentField server + # This ensures all app.call() requests go through the AgentField server for proper # workflow tracking, execution context, and distributed processing - from haxen_sdk.logger import log_debug + from agentfield.logger import log_debug log_debug(f"Cross-agent call to: {target}") - # Check if Haxen server is available for cross-agent calls - if not self.haxen_connected: - from haxen_sdk.logger import log_warn + # Check if AgentField server is available for cross-agent calls + if not self.agentfield_connected: + from agentfield.logger import log_warn log_warn( - f"Haxen server unavailable - cannot make cross-agent call to {target}" + f"AgentField server unavailable - cannot make cross-agent call to {target}" ) raise Exception( - f"Cross-agent call to {target} failed: Haxen server unavailable. Agent is running in local mode." + f"Cross-agent call to {target} failed: AgentField server unavailable. Agent is running in local mode." ) - # Use the enhanced HaxenClient to make the call via execution gateway + # Use the enhanced AgentFieldClient to make the call via execution gateway try: async with self._limit_outbound_calls(): # Check for non-serializable parameters and convert them @@ -2128,7 +2130,8 @@ async def call(self, target: str, *args, **kwargs) -> dict: # Check if async execution is enabled and available use_async_execution = ( - self.async_config.enable_async_execution and self.haxen_connected + self.async_config.enable_async_execution + and self.agentfield_connected ) if use_async_execution: @@ -2237,7 +2240,7 @@ async def _get_async_execution_manager(self) -> AsyncExecutionManager: if self._async_execution_manager is None: # Create async execution manager with the same base URL as the client self._async_execution_manager = AsyncExecutionManager( - base_url=self.haxen_server, config=self.async_config + base_url=self.agentfield_server, config=self.async_config ) # Start the manager await self._async_execution_manager.start() @@ -2268,18 +2271,18 @@ async def _cleanup_async_resources(self) -> None: try: await self.client.aclose() if self.dev_mode: - log_debug("HaxenClient resources closed") + log_debug("AgentFieldClient resources closed") except Exception as e: if self.dev_mode: - log_debug(f"Error closing HaxenClient resources: {e}") + log_debug(f"Error closing AgentFieldClient resources: {e}") def note(self, message: str, tags: List[str] = None) -> None: """ Add a note to the current execution for debugging and tracking purposes. - This method sends a note to the Haxen server asynchronously without blocking + This method sends a note to the AgentField server asynchronously without blocking the current execution. The note is automatically associated with the current - execution context and can be viewed in the Haxen UI for debugging and monitoring. + execution context and can be viewed in the AgentField UI for debugging and monitoring. Args: message (str): The note message to log @@ -2336,7 +2339,7 @@ async def _send_note(): ui_api_base = self.client.api_base.replace("/api/v1", "/api/ui/v1") if self.dev_mode: - from haxen_sdk.logger import log_debug + from agentfield.logger import log_debug log_debug( f"NOTE DEBUG: Original api_base: {self.client.api_base}" @@ -2355,7 +2358,7 @@ async def _send_note(): headers=headers, ) as response: if self.dev_mode: - from haxen_sdk.logger import log_debug + from agentfield.logger import log_debug response_text = await response.text() log_debug( @@ -2381,7 +2384,7 @@ async def _send_note(): ) if self.dev_mode: - from haxen_sdk.logger import log_debug + from agentfield.logger import log_debug log_debug( f"NOTE DEBUG (requests): Original api_base: {self.client.api_base}" @@ -2400,7 +2403,7 @@ async def _send_note(): timeout=5.0, ) if self.dev_mode: - from haxen_sdk.logger import log_debug + from agentfield.logger import log_debug log_debug( f"NOTE DEBUG (requests): Response status: {response.status_code}" @@ -2418,14 +2421,14 @@ async def _send_note(): ) except Exception as e: if self.dev_mode: - from haxen_sdk.logger import log_debug + from agentfield.logger import log_debug log_debug(f"Note request failed: {type(e).__name__}: {e}") except Exception as e: # Silently handle errors to avoid interrupting main workflow if self.dev_mode: - from haxen_sdk.logger import log_debug + from agentfield.logger import log_debug log_debug(f"Failed to send note: {type(e).__name__}: {e}") @@ -2462,7 +2465,7 @@ def _get_current_execution_context(self) -> ExecutionContext: ExecutionContext: Current or new execution context """ # Check thread-local context first (most reliable) - from haxen_sdk.execution_context import get_current_context + from agentfield.execution_context import get_current_context thread_local_context = get_current_context() @@ -2623,17 +2626,17 @@ def serve( # pragma: no cover - requires full server runtime integration **kwargs, ): """ - Start the agent node server with intelligent port management and Haxen integration. + Start the agent node server with intelligent port management and AgentField integration. This method launches the agent as a FastAPI server that can receive reasoner and skill - requests from other agents via the Haxen execution gateway. It handles automatic - registration with the Haxen server, heartbeat management, and graceful shutdown. + requests from other agents via the AgentField execution gateway. It handles automatic + registration with the AgentField server, heartbeat management, and graceful shutdown. The server provides: - RESTful endpoints for all registered reasoners and skills - Health check endpoints for monitoring - MCP server status and management endpoints - - Automatic Haxen server registration and heartbeat + - Automatic AgentField server registration and heartbeat - Graceful shutdown with proper cleanup Args: @@ -2647,7 +2650,7 @@ def serve( # pragma: no cover - requires full server runtime integration - Auto-reload on code changes (if supported) - Detailed error messages - MCP server debugging information - heartbeat_interval (int): The interval in seconds for sending heartbeats to the Haxen server. + heartbeat_interval (int): The interval in seconds for sending heartbeats to the AgentField server. Defaults to 2 seconds. Lower values provide faster failure detection but increase network overhead. auto_port (bool): If True, automatically find an available port starting from the @@ -2708,22 +2711,22 @@ def get_status() -> dict: - `GET /docs`: Interactive API documentation (Swagger UI) - `GET /redoc`: Alternative API documentation - Integration with Haxen: - - Automatically registers with Haxen server on startup + Integration with AgentField: + - Automatically registers with AgentField server on startup - Sends periodic heartbeats to maintain connection - - Receives execution requests via Haxen's routing system + - Receives execution requests via AgentField's routing system - Participates in workflow tracking and DAG building - Handles cross-agent communication seamlessly Lifecycle: 1. Server initialization and route setup 2. MCP server startup (if configured) - 3. Haxen server registration + 3. AgentField server registration 4. Heartbeat loop starts 5. Ready to receive requests 6. Graceful shutdown on SIGINT/SIGTERM 7. MCP server cleanup - 8. Haxen server deregistration + 8. AgentField server deregistration Note: - The server runs indefinitely until interrupted (Ctrl+C) diff --git a/sdk/python/haxen_sdk/agent_ai.py b/sdk/python/agentfield/agent_ai.py similarity index 98% rename from sdk/python/haxen_sdk/agent_ai.py rename to sdk/python/agentfield/agent_ai.py index 92f5ccdb..96f2b961 100644 --- a/sdk/python/haxen_sdk/agent_ai.py +++ b/sdk/python/agentfield/agent_ai.py @@ -4,9 +4,9 @@ from typing import Any, Dict, List, Literal, Optional, Type, Union import requests -from haxen_sdk.agent_utils import AgentUtils -from haxen_sdk.logger import log_debug, log_error, log_warn -from haxen_sdk.rate_limiter import StatelessRateLimiter +from agentfield.agent_utils import AgentUtils +from agentfield.logger import log_debug, log_error, log_warn +from agentfield.rate_limiter import StatelessRateLimiter from httpx import HTTPStatusError from pydantic import BaseModel @@ -32,7 +32,7 @@ class OpenAI: class AgentAI: - """AI/LLM Integration functionality for Haxen Agent""" + """AI/LLM Integration functionality for AgentField Agent""" def __init__(self, agent_instance): """ @@ -450,7 +450,7 @@ async def _execute_with_fallbacks(): def _process_multimodal_args(self, args: tuple) -> List[Dict[str, Any]]: """Process multimodal arguments into LiteLLM-compatible message format""" - from haxen_sdk.multimodal import Audio, File, Image, Text + from agentfield.multimodal import Audio, File, Image, Text messages = [] user_content = [] @@ -824,7 +824,7 @@ async def _generate_tts_audio( Generate audio using LiteLLM's speech function for TTS models. """ import litellm - from haxen_sdk.multimodal_response import ( + from agentfield.multimodal_response import ( AudioOutput, MultimodalResponse, ) @@ -909,7 +909,7 @@ async def _generate_openai_direct_audio( import tempfile from pathlib import Path - from haxen_sdk.multimodal_response import AudioOutput, MultimodalResponse + from agentfield.multimodal_response import AudioOutput, MultimodalResponse from openai import OpenAI # Combine all text inputs @@ -1059,7 +1059,7 @@ async def ai_with_vision( response = await litellm.aimage_generation(**image_params) # Import multimodal response detection - from haxen_sdk.multimodal_response import detect_multimodal_response + from agentfield.multimodal_response import detect_multimodal_response # Detect and wrap multimodal content return detect_multimodal_response(response) diff --git a/sdk/python/haxen_sdk/agent_haxen.py b/sdk/python/agentfield/agent_field_handler.py similarity index 86% rename from sdk/python/haxen_sdk/agent_haxen.py rename to sdk/python/agentfield/agent_field_handler.py index e33d348e..8d4205df 100644 --- a/sdk/python/haxen_sdk/agent_haxen.py +++ b/sdk/python/agentfield/agent_field_handler.py @@ -5,8 +5,8 @@ from datetime import datetime import requests -from haxen_sdk.types import AgentStatus, HeartbeatData -from haxen_sdk.logger import ( +from agentfield.types import AgentStatus, HeartbeatData +from agentfield.logger import ( log_heartbeat, log_debug, log_warn, @@ -17,12 +17,12 @@ ) -class AgentHaxen: +class AgentFieldHandler: """ - Haxen Server Communication handler for Agent class. + AgentField Server Communication handler for Agent class. - This class encapsulates all Haxen server communication functionality including: - - Agent registration with Haxen server + This class encapsulates all AgentField server communication functionality including: + - Agent registration with AgentField server - Heartbeat management (both simple and enhanced) - Fast lifecycle management - Graceful shutdown notifications @@ -31,17 +31,17 @@ class AgentHaxen: def __init__(self, agent_instance): """ - Initialize the Haxen handler with a reference to the agent instance. + Initialize the AgentField handler with a reference to the agent instance. Args: agent_instance: The Agent instance this handler belongs to """ self.agent = agent_instance - async def register_with_haxen_server(self, port: int): - """Register this agent node with Haxen server""" + async def register_with_agentfield_server(self, port: int): + """Register this agent node with AgentField server""" # Import the callback URL resolution function - from haxen_sdk.agent import ( + from agentfield.agent import ( _build_callback_candidates, _resolve_callback_url, _is_running_in_container, @@ -118,7 +118,7 @@ async def register_with_haxen_server(self, port: int): try: log_debug( - f"Attempting to register with Haxen server at {self.agent.haxen_server}" + f"Attempting to register with AgentField server at {self.agent.agentfield_server}" ) discovery_payload = self.agent._build_callback_discovery_payload() @@ -132,10 +132,12 @@ async def register_with_haxen_server(self, port: int): if success: if payload: self.agent._apply_discovery_response(payload) - log_success(f"Registered node '{self.agent.node_id}' with Haxen server") - self.agent.haxen_connected = True + log_success( + f"Registered node '{self.agent.node_id}' with AgentField server" + ) + self.agent.agentfield_connected = True - # Attempt DID registration after successful Haxen registration + # Attempt DID registration after successful AgentField registration if self.agent.did_manager: did_success = self.agent._register_agent_with_did() if not did_success and self.agent.dev_mode: @@ -144,18 +146,18 @@ async def register_with_haxen_server(self, port: int): ) else: log_error("Registration failed") - self.agent.haxen_connected = False + self.agent.agentfield_connected = False except Exception as e: - self.agent.haxen_connected = False + self.agent.agentfield_connected = False if self.agent.dev_mode: - log_warn(f"Haxen server not available: {e}") + log_warn(f"AgentField server not available: {e}") log_setup("Running in development mode - agent will work standalone") log_info( - f"To connect to Haxen server, start it at {self.agent.haxen_server}" + f"To connect to AgentField server, start it at {self.agent.agentfield_server}" ) else: - log_error(f"Failed to register with Haxen server: {e}") + log_error(f"Failed to register with AgentField server: {e}") if ( isinstance(e, requests.exceptions.RequestException) and e.response is not None @@ -165,13 +167,13 @@ async def register_with_haxen_server(self, port: int): raise def send_heartbeat(self): - """Send heartbeat to Haxen server""" - if not self.agent.haxen_connected: - return # Skip heartbeat if not connected to Haxen + """Send heartbeat to AgentField server""" + if not self.agent.agentfield_connected: + return # Skip heartbeat if not connected to AgentField try: response = requests.post( - f"{self.agent.haxen_server}/api/v1/nodes/{self.agent.node_id}/heartbeat", + f"{self.agent.agentfield_server}/api/v1/nodes/{self.agent.node_id}/heartbeat", headers={"Content-Type": "application/json"}, timeout=5, ) @@ -188,8 +190,10 @@ def heartbeat_worker( self, interval: int = 30 ): # pragma: no cover - long-running thread loop """Background worker that sends periodic heartbeats""" - if not self.agent.haxen_connected: - log_heartbeat("Heartbeat worker skipped - not connected to Haxen server") + if not self.agent.agentfield_connected: + log_heartbeat( + "Heartbeat worker skipped - not connected to AgentField server" + ) return log_heartbeat(f"Starting heartbeat worker (interval: {interval}s)") @@ -199,8 +203,8 @@ def heartbeat_worker( def start_heartbeat(self, interval: int = 30): """Start the heartbeat background thread""" - if not self.agent.haxen_connected: - return # Skip heartbeat if not connected to Haxen + if not self.agent.agentfield_connected: + return # Skip heartbeat if not connected to AgentField if ( self.agent._heartbeat_thread is None @@ -226,7 +230,7 @@ async def send_enhanced_heartbeat(self) -> bool: Returns: True if heartbeat was successful, False otherwise """ - if not self.agent.haxen_connected: + if not self.agent.agentfield_connected: return False try: @@ -259,12 +263,12 @@ async def send_enhanced_heartbeat(self) -> bool: async def notify_shutdown(self) -> bool: """ - Notify Haxen server of graceful shutdown. + Notify AgentField server of graceful shutdown. Returns: True if notification was successful, False otherwise """ - if not self.agent.haxen_connected: + if not self.agent.agentfield_connected: return False try: @@ -285,12 +289,12 @@ def setup_fast_lifecycle_signal_handlers( """ Setup signal handler for fast lifecycle status while allowing uvicorn to perform graceful shutdown. - - Only intercepts SIGTERM to mark the agent offline and notify Haxen immediately. + - Only intercepts SIGTERM to mark the agent offline and notify AgentField immediately. - Leaves SIGINT (Ctrl+C) to uvicorn so its shutdown hooks run and resources are cleaned up. """ def signal_handler(signum: int, frame) -> None: - """Handle SIGTERM: mark offline, notify Haxen, then re-emit the signal for default handling.""" + """Handle SIGTERM: mark offline, notify AgentField, then re-emit the signal for default handling.""" signal_name = "SIGTERM" if signum == signal.SIGTERM else "SIGINT" if self.agent.dev_mode: @@ -302,7 +306,7 @@ def signal_handler(signum: int, frame) -> None: self.agent._shutdown_requested = True self.agent._current_status = AgentStatus.OFFLINE - # Best-effort immediate notification to Haxen + # Best-effort immediate notification to AgentField try: success = self.agent.client.notify_graceful_shutdown_sync( self.agent.node_id @@ -347,7 +351,7 @@ async def register_with_fast_lifecycle( Returns: True if registration was successful, False otherwise """ - from haxen_sdk.agent import _build_callback_candidates, _resolve_callback_url + from agentfield.agent import _build_callback_candidates, _resolve_callback_url if not self.agent.base_url: self.agent.callback_candidates = _build_callback_candidates( @@ -404,7 +408,7 @@ async def register_with_fast_lifecycle( try: if self.agent.dev_mode: log_info( - f"Fast registration with Haxen server at {self.agent.haxen_server}" + f"Fast registration with AgentField server at {self.agent.agentfield_server}" ) log_info(f"Using callback URL: {self.agent.base_url}") @@ -427,9 +431,9 @@ async def register_with_fast_lifecycle( log_success( f"Fast registration successful - Status: {AgentStatus.STARTING.value}" ) - self.agent.haxen_connected = True + self.agent.agentfield_connected = True - # Attempt DID registration after successful Haxen registration + # Attempt DID registration after successful AgentField registration if self.agent.did_manager: did_success = self.agent._register_agent_with_did() if not did_success and self.agent.dev_mode: @@ -441,11 +445,11 @@ async def register_with_fast_lifecycle( else: if self.agent.dev_mode: log_error("Fast registration failed") - self.agent.haxen_connected = False + self.agent.agentfield_connected = False return False except Exception as e: - self.agent.haxen_connected = False + self.agent.agentfield_connected = False if self.agent.dev_mode: log_warn(f"Fast registration error: {e}") return False diff --git a/sdk/python/haxen_sdk/agent_mcp.py b/sdk/python/agentfield/agent_mcp.py similarity index 96% rename from sdk/python/haxen_sdk/agent_mcp.py rename to sdk/python/agentfield/agent_mcp.py index d1a9bbf8..552afe74 100644 --- a/sdk/python/haxen_sdk/agent_mcp.py +++ b/sdk/python/agentfield/agent_mcp.py @@ -2,13 +2,13 @@ from datetime import datetime from typing import Any, Dict, List, Optional -from haxen_sdk.agent_utils import AgentUtils -from haxen_sdk.dynamic_skills import DynamicMCPSkillManager -from haxen_sdk.execution_context import ExecutionContext -from haxen_sdk.logger import log_debug, log_error, log_info, log_warn -from haxen_sdk.mcp_client import MCPClientRegistry -from haxen_sdk.mcp_manager import MCPManager -from haxen_sdk.types import AgentStatus, MCPServerHealth +from agentfield.agent_utils import AgentUtils +from agentfield.dynamic_skills import DynamicMCPSkillManager +from agentfield.execution_context import ExecutionContext +from agentfield.logger import log_debug, log_error, log_info, log_warn +from agentfield.mcp_client import MCPClientRegistry +from agentfield.mcp_manager import MCPManager +from agentfield.types import AgentStatus, MCPServerHealth from fastapi import Request @@ -292,7 +292,7 @@ async def mcp_skill_function(**kwargs): # Create FastAPI endpoint @self.agent.post(endpoint_path, response_model=dict) async def mcp_skill_endpoint(input_data: InputSchema, request: Request): - from haxen_sdk.execution_context import ExecutionContext + from agentfield.execution_context import ExecutionContext # Extract execution context from request headers execution_context = ExecutionContext.from_request( @@ -324,7 +324,7 @@ def _create_and_register_mcp_skill( self, server_alias: str, tool: Dict[str, Any] ) -> None: """ - Create and register a single MCP tool as a Haxen skill. + Create and register a single MCP tool as a AgentField skill. Args: server_alias: The alias of the MCP server @@ -372,7 +372,7 @@ async def mcp_skill_function( This is an auto-generated skill that wraps the '{tool_name}' tool from the '{server_alias}' MCP server. Args: - execution_context (ExecutionContext, optional): Haxen execution context for workflow tracking + execution_context (ExecutionContext, optional): AgentField execution context for workflow tracking **kwargs: Arguments to pass to the MCP tool Returns: @@ -520,7 +520,7 @@ async def _background_mcp_initialization(self) -> None: self.agent._mcp_initialization_complete = True # Send status update heartbeat - await self.agent.haxen_handler.send_enhanced_heartbeat() + await self.agent.agentfield_handler.send_enhanced_heartbeat() if self.agent.dev_mode: log_info( @@ -531,4 +531,4 @@ async def _background_mcp_initialization(self) -> None: if self.agent.dev_mode: log_error(f"Background MCP initialization error: {e}") self.agent._current_status = AgentStatus.DEGRADED - await self.agent.haxen_handler.send_enhanced_heartbeat() + await self.agent.agentfield_handler.send_enhanced_heartbeat() diff --git a/sdk/python/haxen_sdk/agent_registry.py b/sdk/python/agentfield/agent_registry.py similarity index 100% rename from sdk/python/haxen_sdk/agent_registry.py rename to sdk/python/agentfield/agent_registry.py diff --git a/sdk/python/haxen_sdk/agent_server.py b/sdk/python/agentfield/agent_server.py similarity index 94% rename from sdk/python/haxen_sdk/agent_server.py rename to sdk/python/agentfield/agent_server.py index f499eeab..b7614078 100644 --- a/sdk/python/haxen_sdk/agent_server.py +++ b/sdk/python/agentfield/agent_server.py @@ -6,15 +6,15 @@ from typing import Optional import uvicorn -from haxen_sdk.agent_utils import AgentUtils -from haxen_sdk.logger import log_debug, log_error, log_info, log_success, log_warn -from haxen_sdk.utils import get_free_port +from agentfield.agent_utils import AgentUtils +from agentfield.logger import log_debug, log_error, log_info, log_success, log_warn +from agentfield.utils import get_free_port from fastapi import Request from fastapi.routing import APIRoute class AgentServer: - """Server management functionality for Haxen Agent""" + """Server management functionality for AgentField Agent""" def __init__(self, agent_instance): """ @@ -25,8 +25,8 @@ def __init__(self, agent_instance): """ self.agent = agent_instance - def setup_haxen_routes(self): - """Setup standard routes that Haxen server expects""" + def setup_agentfield_routes(self): + """Setup standard routes that AgentField server expects""" @self.agent.get("/health") async def health(): @@ -104,7 +104,7 @@ async def shutdown_agent(request: Request): """ Graceful shutdown endpoint for the agent. - This endpoint allows the Haxen server to request a graceful shutdown + This endpoint allows the AgentField server to request a graceful shutdown instead of using process signals. """ try: @@ -123,12 +123,12 @@ async def shutdown_agent(request: Request): ) # Set shutdown status - from haxen_sdk.agent import AgentStatus + from agentfield.agent import AgentStatus self.agent._shutdown_requested = True self.agent._current_status = AgentStatus.OFFLINE - # Notify Haxen server of shutdown initiation + # Notify AgentField server of shutdown initiation try: success = self.agent.client.notify_graceful_shutdown_sync( self.agent.node_id @@ -393,7 +393,7 @@ async def restart_mcp_server(alias: str): @self.agent.get("/health/mcp") async def mcp_health(): - """Get MCP health information in the format expected by Haxen server""" + """Get MCP health information in the format expected by AgentField server""" if not self.agent.mcp_manager: # Return empty response when MCP manager is not available return { @@ -499,7 +499,7 @@ async def mcp_health(): @self.agent.post("/mcp/servers/{alias}/restart") async def restart_mcp_server_alt(alias: str): - """Alternative restart endpoint for Haxen server compatibility""" + """Alternative restart endpoint for AgentField server compatibility""" return await restart_mcp_server(alias) @self.agent.get("/mcp/servers/{alias}/tools") @@ -562,8 +562,11 @@ async def _graceful_shutdown(self, timeout_seconds: int = 30): # Stop heartbeat try: - if hasattr(self.agent, "haxen_handler") and self.agent.haxen_handler: - self.agent.haxen_handler.stop_heartbeat() + if ( + hasattr(self.agent, "agentfield_handler") + and self.agent.agentfield_handler + ): + self.agent.agentfield_handler.stop_heartbeat() if self.agent.dev_mode: log_debug("Heartbeat stopped") except Exception as e: @@ -572,7 +575,7 @@ async def _graceful_shutdown(self, timeout_seconds: int = 30): # Clear agent registry try: - from haxen_sdk.agent_registry import clear_current_agent + from agentfield.agent_registry import clear_current_agent clear_current_agent() except Exception as e: @@ -807,10 +810,10 @@ def serve( """ Start the agent node server with intelligent port management and production-ready configuration. - This method implements smart port resolution that seamlessly works with Haxen CLI + This method implements smart port resolution that seamlessly works with AgentField CLI or standalone execution. The port selection priority is: 1. Explicit port parameter (highest priority) - 2. PORT environment variable (Haxen CLI integration) + 2. PORT environment variable (AgentField CLI integration) 3. auto_port=True: find free port automatically 4. Default fallback with availability check @@ -819,7 +822,7 @@ def serve( If specified, this takes highest priority. host (str): The host address for the agent server. Defaults to "0.0.0.0". dev (bool): If True, enables development mode features (e.g., hot reload, debug UI). - heartbeat_interval (int): The interval in seconds for sending heartbeats to the Haxen server. + heartbeat_interval (int): The interval in seconds for sending heartbeats to the AgentField server. Defaults to 2 seconds (fast detection architecture). auto_port (bool): If True, automatically find an available port. Defaults to False. workers (int, optional): Number of worker processes for production. If None, uses single process. @@ -831,27 +834,27 @@ def serve( """ # Smart port resolution with priority order if port is None: - # Check for Haxen CLI integration via environment variable + # Check for AgentField CLI integration via environment variable env_port = os.getenv("PORT") if env_port and env_port.isdigit(): suggested_port = int(env_port) if AgentUtils.is_port_available(suggested_port): port = suggested_port if self.agent.dev_mode: - log_debug(f"Using port from Haxen CLI: {port}") + log_debug(f"Using port from AgentField CLI: {port}") else: - # Haxen CLI suggested port is taken, find next available + # AgentField CLI suggested port is taken, find next available try: port = get_free_port(start_port=suggested_port) if self.agent.dev_mode: log_debug( - f"Haxen CLI port {suggested_port} taken, using {port}" + f"AgentField CLI port {suggested_port} taken, using {port}" ) except RuntimeError: port = get_free_port() # Fallback to default range if self.agent.dev_mode: log_debug(f"Using fallback port: {port}") - elif auto_port or os.getenv("HAXEN_AUTO_PORT") == "true": + elif auto_port or os.getenv("AGENTFIELD_AUTO_PORT") == "true": # Auto-port mode: find any available port try: port = get_free_port() @@ -908,7 +911,7 @@ def serve( log_debug(f"Using explicit callback URL: {self.agent.base_url}") # Start heartbeat worker - self.agent.haxen_handler.start_heartbeat(heartbeat_interval) + self.agent.agentfield_handler.start_heartbeat(heartbeat_interval) log_info(f"Agent server running at http://{host}:{port}") log_info("Available endpoints:") @@ -920,19 +923,22 @@ def serve( log_debug(f"Endpoint registered: {method} {route.path}") # Setup fast lifecycle signal handlers - self.agent.haxen_handler.setup_fast_lifecycle_signal_handlers() + self.agent.agentfield_handler.setup_fast_lifecycle_signal_handlers() # Add startup event handler for resilient lifecycle @self.agent.on_event("startup") async def startup_resilient_lifecycle(): - """Resilient lifecycle startup: connection manager handles Haxen server connectivity""" + """Resilient lifecycle startup: connection manager handles AgentField server connectivity""" # Initialize connection manager - from haxen_sdk.connection_manager import ConnectionConfig, ConnectionManager + from agentfield.connection_manager import ( + ConnectionConfig, + ConnectionManager, + ) # Configure connection manager with reasonable retry interval config = ConnectionConfig( - retry_interval=10.0, # Check every 10 seconds for Haxen server + retry_interval=10.0, # Check every 10 seconds for AgentField server health_check_interval=30.0, connection_timeout=10.0, ) @@ -942,11 +948,13 @@ async def startup_resilient_lifecycle(): # Set up callbacks for connection state changes def on_connected(): if self.agent.dev_mode: - log_info("Connected to Haxen server - full functionality available") + log_info( + "Connected to AgentField server - full functionality available" + ) # Kick a heartbeat immediately so the control plane renews the lease try: asyncio.create_task( - self.agent.haxen_handler.send_enhanced_heartbeat() + self.agent.agentfield_handler.send_enhanced_heartbeat() ) except RuntimeError: # Event loop not running; the heartbeat worker will recover shortly @@ -957,14 +965,14 @@ def on_connected(): or self.agent._heartbeat_task.done() ): self.agent._heartbeat_task = asyncio.create_task( - self.agent.haxen_handler.enhanced_heartbeat_loop( + self.agent.agentfield_handler.enhanced_heartbeat_loop( heartbeat_interval ) ) def on_disconnected(): if self.agent.dev_mode: - log_warn("Haxen server disconnected - running in local mode") + log_warn("AgentField server disconnected - running in local mode") # Cancel heartbeat task when disconnected if ( hasattr(self.agent, "_heartbeat_task") @@ -979,7 +987,7 @@ def on_disconnected(): connected = await self.agent.connection_manager.start() # Always connect memory event client and start MCP initialization - # These work independently of Haxen server connection + # These work independently of AgentField server connection if self.agent.memory_event_client: try: await self.agent.memory_event_client.connect() @@ -992,11 +1000,11 @@ def on_disconnected(): if connected: if self.agent.dev_mode: - log_info("Agent started with Haxen server connection") + log_info("Agent started with AgentField server connection") else: if self.agent.dev_mode: log_info( - "Agent started in local mode - will connect to Haxen server when available" + "Agent started in local mode - will connect to AgentField server when available" ) # Add shutdown event handler for cleanup @@ -1034,10 +1042,10 @@ async def shutdown_cleanup(): await self.agent.client.aclose() except Exception as e: if self.agent.dev_mode: - log_error(f"Haxen client shutdown error: {e}") + log_error(f"AgentField client shutdown error: {e}") # Clear agent from thread-local storage during shutdown - from haxen_sdk.agent_registry import clear_current_agent + from agentfield.agent_registry import clear_current_agent clear_current_agent() @@ -1147,7 +1155,7 @@ async def shutdown_cleanup(): log_info("Agent shutdown initiated...") # Stop heartbeat worker - self.agent.haxen_handler.stop_heartbeat() + self.agent.agentfield_handler.stop_heartbeat() # Stop all MCP servers self.agent.mcp_handler._cleanup_mcp_servers() diff --git a/sdk/python/haxen_sdk/agent_utils.py b/sdk/python/agentfield/agent_utils.py similarity index 100% rename from sdk/python/haxen_sdk/agent_utils.py rename to sdk/python/agentfield/agent_utils.py diff --git a/sdk/python/haxen_sdk/agent_workflow.py b/sdk/python/agentfield/agent_workflow.py similarity index 97% rename from sdk/python/haxen_sdk/agent_workflow.py rename to sdk/python/agentfield/agent_workflow.py index 606c915d..f689ff50 100644 --- a/sdk/python/haxen_sdk/agent_workflow.py +++ b/sdk/python/agentfield/agent_workflow.py @@ -2,7 +2,7 @@ import time from typing import Any, Callable, Dict, Optional -from haxen_sdk.logger import log_debug, log_warn +from agentfield.logger import log_debug, log_warn from .execution_context import ( ExecutionContext, @@ -13,7 +13,7 @@ class AgentWorkflow: - """Workflow helper that keeps local execution metadata in sync with Haxen.""" + """Workflow helper that keeps local execution metadata in sync with AgentField.""" def __init__(self, agent_instance): self.agent = agent_instance @@ -174,10 +174,10 @@ async def notify_call_error( await self.fire_and_forget_update(payload) async def fire_and_forget_update(self, payload: Dict[str, Any]) -> None: - """Send workflow update to Haxen when a client is available.""" + """Send workflow update to AgentField when a client is available.""" client = getattr(self.agent, "client", None) - base_url = getattr(self.agent, "haxen_server", None) + base_url = getattr(self.agent, "agentfield_server", None) if not client or not hasattr(client, "_async_request") or not base_url: return @@ -224,7 +224,7 @@ async def _ensure_execution_registered( return context client = getattr(self.agent, "client", None) - base_url = getattr(self.agent, "haxen_server", None) + base_url = getattr(self.agent, "agentfield_server", None) if not client or not hasattr(client, "_async_request") or not base_url: context.registered = True return context diff --git a/sdk/python/haxen_sdk/async_config.py b/sdk/python/agentfield/async_config.py similarity index 97% rename from sdk/python/haxen_sdk/async_config.py rename to sdk/python/agentfield/async_config.py index 401e3e0b..e45d2623 100644 --- a/sdk/python/haxen_sdk/async_config.py +++ b/sdk/python/agentfield/async_config.py @@ -1,5 +1,5 @@ """ -Async execution configuration for the Haxen SDK. +Async execution configuration for the AgentField SDK. This module provides configuration classes for managing async execution behavior, polling strategies, resource limits, and performance tuning parameters. @@ -91,10 +91,10 @@ def from_environment(cls) -> "AsyncConfig": """ Create AsyncConfig from environment variables. - Environment variables use the prefix HAXEN_ASYNC_ followed by the + Environment variables use the prefix AGENTFIELD_ASYNC_ followed by the uppercase parameter name. For example: - - HAXEN_ASYNC_MAX_EXECUTION_TIMEOUT=1800 - - HAXEN_ASYNC_BATCH_SIZE=50 + - AGENTFIELD_ASYNC_MAX_EXECUTION_TIMEOUT=1800 + - AGENTFIELD_ASYNC_BATCH_SIZE=50 Returns: AsyncConfig instance with values from environment variables @@ -103,7 +103,7 @@ def from_environment(cls) -> "AsyncConfig": # Helper function to get env var with type conversion def get_env_var(name: str, default_value, converter=None): - env_name = f"HAXEN_ASYNC_{name.upper()}" + env_name = f"AGENTFIELD_ASYNC_{name.upper()}" value = os.getenv(env_name) if value is None: return default_value diff --git a/sdk/python/haxen_sdk/async_execution_manager.py b/sdk/python/agentfield/async_execution_manager.py similarity index 99% rename from sdk/python/haxen_sdk/async_execution_manager.py rename to sdk/python/agentfield/async_execution_manager.py index 3e6ede3a..769ec80f 100644 --- a/sdk/python/haxen_sdk/async_execution_manager.py +++ b/sdk/python/agentfield/async_execution_manager.py @@ -1,5 +1,5 @@ """ -Async Execution Manager for the Haxen SDK. +Async Execution Manager for the AgentField SDK. This module provides the central orchestrator for managing hundreds of concurrent async executions with intelligent polling, resource management, and comprehensive @@ -139,7 +139,7 @@ def __init__( Initialize the async execution manager. Args: - base_url: Base URL for the haxen server + base_url: Base URL for the af server config: AsyncConfig instance for configuration parameters connection_manager: Optional ConnectionManager instance result_cache: Optional ResultCache instance diff --git a/sdk/python/haxen_sdk/client.py b/sdk/python/agentfield/client.py similarity index 99% rename from sdk/python/haxen_sdk/client.py rename to sdk/python/agentfield/client.py index 190f1f18..6c3c1d78 100644 --- a/sdk/python/haxen_sdk/client.py +++ b/sdk/python/agentfield/client.py @@ -65,7 +65,7 @@ class _Submission: target_type: Optional[str] = None -class HaxenClient: +class AgentFieldClient: def __init__( self, base_url: str = "http://localhost:8080", @@ -179,7 +179,7 @@ async def get_async_http_client(self) -> "httpx.AsyncClient": client_kwargs = { "headers": { - "User-Agent": "HaxenSDK/1.0", + "User-Agent": "AgentFieldSDK/1.0", "Accept": "application/json", } } @@ -254,7 +254,7 @@ def _sync_request(method: str, url: str, **kwargs): # Add User-Agent if not present if "User-Agent" not in kwargs["headers"]: - kwargs["headers"]["User-Agent"] = "HaxenSDK/1.0" + kwargs["headers"]["User-Agent"] = "AgentFieldSDK/1.0" # DIAGNOSTIC: Log request details logger.debug(f"πŸ” SYNC_REQUEST: Headers: {kwargs.get('headers', {})}") @@ -305,7 +305,7 @@ async def aclose(self) -> None: self._async_http_client_lock = None def register_node(self, node_data: Dict[str, Any]) -> Dict[str, Any]: - """Register agent node with Haxen server""" + """Register agent node with AgentField server""" response = requests.post(f"{self.api_base}/nodes/register", json=node_data) response.raise_for_status() # Raise an exception for bad status codes return response.json() @@ -334,7 +334,7 @@ async def register_agent( base_url: str, discovery: Optional[Dict[str, Any]] = None, ) -> Tuple[bool, Optional[Dict[str, Any]]]: - """Register or update agent information with Haxen server.""" + """Register or update agent information with AgentField server.""" try: registration_data = { "id": node_id, @@ -709,7 +709,7 @@ async def send_enhanced_heartbeat( self, node_id: str, heartbeat_data: HeartbeatData ) -> bool: """ - Send enhanced heartbeat with status and MCP information to Haxen server. + Send enhanced heartbeat with status and MCP information to AgentField server. Args: node_id: The agent node ID @@ -758,7 +758,7 @@ def send_enhanced_heartbeat_sync( async def notify_graceful_shutdown(self, node_id: str) -> bool: """ - Notify Haxen server that the agent is shutting down gracefully. + Notify AgentField server that the agent is shutting down gracefully. Args: node_id: The agent node ID @@ -1246,7 +1246,7 @@ async def close_async_execution_manager(self) -> None: """ Close the async execution manager and cleanup resources. - This should be called when the HaxenClient is no longer needed + This should be called when the AgentFieldClient is no longer needed to ensure proper cleanup of background tasks and connections. """ if self._async_execution_manager is not None: diff --git a/sdk/python/haxen_sdk/connection_manager.py b/sdk/python/agentfield/connection_manager.py similarity index 89% rename from sdk/python/haxen_sdk/connection_manager.py rename to sdk/python/agentfield/connection_manager.py index 9f14d180..d965da18 100644 --- a/sdk/python/haxen_sdk/connection_manager.py +++ b/sdk/python/agentfield/connection_manager.py @@ -1,7 +1,7 @@ """ -Haxen SDK Connection Manager +AgentField SDK Connection Manager -Provides resilient connection handling for Haxen server connectivity. +Provides resilient connection handling for AgentField server connectivity. Handles automatic reconnection, graceful degradation, and connection health monitoring. """ @@ -10,17 +10,17 @@ from enum import Enum from typing import Optional, Callable, Any, Dict from dataclasses import dataclass -from haxen_sdk.logger import log_debug, log_info, log_warn, log_error +from agentfield.logger import log_debug, log_info, log_warn, log_error class ConnectionState(Enum): - """Connection states for Haxen server connectivity""" + """Connection states for AgentField server connectivity""" DISCONNECTED = "disconnected" CONNECTING = "connecting" CONNECTED = "connected" RECONNECTING = "reconnecting" - DEGRADED = "degraded" # Running locally without Haxen server + DEGRADED = "degraded" # Running locally without AgentField server @dataclass @@ -34,11 +34,11 @@ class ConnectionConfig: class ConnectionManager: """ - Manages resilient connections to Haxen server with automatic reconnection, + Manages resilient connections to AgentField server with automatic reconnection, graceful degradation, and health monitoring. Uses a simple, consistent retry interval to ensure immediate reconnection - when Haxen server becomes available. + when AgentField server becomes available. """ def __init__(self, agent, config: Optional[ConnectionConfig] = None): @@ -104,16 +104,16 @@ async def stop(self): async def _attempt_connection(self) -> bool: """ - Attempt to connect to Haxen server. + Attempt to connect to AgentField server. Returns: True if connection successful, False otherwise """ try: self.state = ConnectionState.CONNECTING - log_debug("Attempting connection to Haxen server") + log_debug("Attempting connection to AgentField server") - # Try to register with Haxen server - suppress verbose error logging + # Try to register with AgentField server - suppress verbose error logging import logging # Temporarily suppress httpx and httpcore logging to avoid verbose connection errors @@ -171,7 +171,7 @@ async def _health_check_loop(self): break # Try to send a heartbeat to check connection health - success = await self.agent.haxen_handler.send_enhanced_heartbeat() + success = await self.agent.agentfield_handler.send_enhanced_heartbeat() if not success: log_warn("Health check failed - connection lost") @@ -227,9 +227,9 @@ def _on_connection_success(self): """Handle successful connection""" self.state = ConnectionState.CONNECTED self.last_successful_connection = time.time() - self.agent.haxen_connected = True + self.agent.agentfield_connected = True - log_info("Connected to Haxen server") + log_info("Connected to AgentField server") if self.on_connected: try: @@ -240,9 +240,9 @@ def _on_connection_success(self): def _on_connection_failure(self): """Handle connection failure""" self.state = ConnectionState.DEGRADED - self.agent.haxen_connected = False + self.agent.agentfield_connected = False - log_warn("Haxen server unavailable - running in degraded mode") + log_warn("AgentField server unavailable - running in degraded mode") if self.on_disconnected: try: @@ -251,7 +251,7 @@ def _on_connection_failure(self): log_error(f"Error in disconnection callback: {e}") def is_connected(self) -> bool: - """Check if currently connected to Haxen server""" + """Check if currently connected to AgentField server""" return self.state == ConnectionState.CONNECTED def is_degraded(self) -> bool: diff --git a/sdk/python/haxen_sdk/decorators.py b/sdk/python/agentfield/decorators.py similarity index 99% rename from sdk/python/haxen_sdk/decorators.py rename to sdk/python/agentfield/decorators.py index 175ce9bd..fbc2667a 100644 --- a/sdk/python/haxen_sdk/decorators.py +++ b/sdk/python/agentfield/decorators.py @@ -1,5 +1,5 @@ """ -Enhanced decorators for Haxen SDK with automatic workflow tracking. +Enhanced decorators for AgentField SDK with automatic workflow tracking. Provides always-on workflow tracking for reasoner calls. """ @@ -9,7 +9,7 @@ import time from typing import Any, Callable, Dict, List, Optional, Union -from haxen_sdk.logger import log_warn +from agentfield.logger import log_warn from .execution_context import ( ExecutionContext, diff --git a/sdk/python/haxen_sdk/did_manager.py b/sdk/python/agentfield/did_manager.py similarity index 92% rename from sdk/python/haxen_sdk/did_manager.py rename to sdk/python/agentfield/did_manager.py index 89bf3191..08c034d1 100644 --- a/sdk/python/haxen_sdk/did_manager.py +++ b/sdk/python/agentfield/did_manager.py @@ -1,5 +1,5 @@ """ -DID Manager for Haxen SDK +DID Manager for AgentField SDK Handles Decentralized Identity (DID) and Verifiable Credentials (VC) functionality for agent nodes, reasoners, and skills. @@ -34,7 +34,7 @@ class DIDIdentityPackage: agent_did: DIDIdentity reasoner_dids: Dict[str, DIDIdentity] skill_dids: Dict[str, DIDIdentity] - haxen_server_id: str + agentfield_server_id: str @dataclass @@ -52,24 +52,24 @@ class DIDExecutionContext: class DIDManager: """ - Manages DID operations for Haxen SDK agents. + Manages DID operations for AgentField SDK agents. Handles: - - Agent registration with Haxen Server + - Agent registration with AgentField Server - DID resolution and verification - Execution context creation - Integration with agent lifecycle """ - def __init__(self, haxen_server_url: str, agent_node_id: str): + def __init__(self, agentfield_server_url: str, agent_node_id: str): """ Initialize DID Manager. Args: - haxen_server_url: URL of the Haxen Server + agentfield_server_url: URL of the AgentField Server agent_node_id: Unique identifier for this agent node """ - self.haxen_server_url = haxen_server_url.rstrip("/") + self.agentfield_server_url = agentfield_server_url.rstrip("/") self.agent_node_id = agent_node_id self.identity_package: Optional[DIDIdentityPackage] = None self.enabled = False @@ -78,7 +78,7 @@ def register_agent( self, reasoners: List[Dict[str, Any]], skills: List[Dict[str, Any]] ) -> bool: """ - Register agent with Haxen Server and obtain DID identity package. + Register agent with AgentField Server and obtain DID identity package. Args: reasoners: List of reasoner definitions @@ -100,9 +100,9 @@ def register_agent( "skills": skills, } - # Send registration request to Haxen Server + # Send registration request to AgentField Server response = requests.post( - f"{self.haxen_server_url}/api/v1/did/register", + f"{self.agentfield_server_url}/api/v1/did/register", json=registration_data, timeout=30, ) @@ -216,7 +216,7 @@ def resolve_did(self, did: str) -> Optional[Dict[str, Any]]: """ try: response = requests.get( - f"{self.haxen_server_url}/api/v1/did/resolve/{did}", timeout=10 + f"{self.agentfield_server_url}/api/v1/did/resolve/{did}", timeout=10 ) if response.status_code == 200: @@ -246,7 +246,7 @@ def get_identity_summary(self) -> Dict[str, Any]: return { "enabled": True, "agent_did": self.identity_package.agent_did.did, - "haxen_server_id": self.identity_package.haxen_server_id, + "agentfield_server_id": self.identity_package.agentfield_server_id, "reasoner_count": len(self.identity_package.reasoner_dids), "skill_count": len(self.identity_package.skill_dids), "reasoner_dids": { @@ -302,7 +302,7 @@ def _parse_identity_package( agent_did=agent_did, reasoner_dids=reasoner_dids, skill_dids=skill_dids, - haxen_server_id=package_data["haxen_server_id"], + agentfield_server_id=package_data["agentfield_server_id"], ) def _get_function_did(self, function_name: str) -> Optional[str]: diff --git a/sdk/python/haxen_sdk/dynamic_skills.py b/sdk/python/agentfield/dynamic_skills.py similarity index 94% rename from sdk/python/haxen_sdk/dynamic_skills.py rename to sdk/python/agentfield/dynamic_skills.py index 134d2e2b..12f18a0f 100644 --- a/sdk/python/haxen_sdk/dynamic_skills.py +++ b/sdk/python/agentfield/dynamic_skills.py @@ -4,17 +4,17 @@ from pydantic import BaseModel, create_model from fastapi import Request -from haxen_sdk.agent_utils import AgentUtils -from haxen_sdk.execution_context import ExecutionContext -from haxen_sdk.logger import log_debug, log_error, log_info, log_warn +from agentfield.agent_utils import AgentUtils +from agentfield.execution_context import ExecutionContext +from agentfield.logger import log_debug, log_error, log_info, log_warn class DynamicMCPSkillManager: """ - Dynamic MCP Skill Generator that converts MCP tools into Haxen skills. + Dynamic MCP Skill Generator that converts MCP tools into AgentField skills. This class discovers MCP servers, lists their tools, and dynamically - registers each tool as a Haxen skill with proper schema generation + registers each tool as a AgentField skill with proper schema generation and execution context handling. """ @@ -23,7 +23,7 @@ def __init__(self, agent, dev_mode: bool = False): Initialize the Dynamic MCP Skill Manager. Args: - agent: The Haxen agent instance + agent: The AgentField agent instance dev_mode: Enable development mode logging """ self.agent = agent @@ -32,7 +32,7 @@ def __init__(self, agent, dev_mode: bool = False): async def discover_and_register_all_skills(self) -> None: """ - Discover and register all MCP tools as Haxen skills. + Discover and register all MCP tools as AgentField skills. This method: 1. Checks for MCP client registry availability @@ -40,7 +40,7 @@ async def discover_and_register_all_skills(self) -> None: 3. Waits for server readiness 4. Performs health checks on each server 5. Lists tools from healthy servers - 6. Registers each tool as a Haxen skill + 6. Registers each tool as a AgentField skill """ if not self.agent.mcp_client_registry: if self.dev_mode: @@ -119,7 +119,7 @@ async def _register_mcp_tool_as_skill( self, server_alias: str, tool: Dict[str, Any], skill_name: str ) -> None: """ - Register an MCP tool as a Haxen skill. + Register an MCP tool as a AgentField skill. This method: 1. Extracts tool metadata (name, description) diff --git a/sdk/python/haxen_sdk/execution_context.py b/sdk/python/agentfield/execution_context.py similarity index 98% rename from sdk/python/haxen_sdk/execution_context.py rename to sdk/python/agentfield/execution_context.py index d739a9d2..0cf51965 100644 --- a/sdk/python/haxen_sdk/execution_context.py +++ b/sdk/python/agentfield/execution_context.py @@ -55,7 +55,7 @@ def to_headers(self) -> Dict[str, str]: Produce the headers that should be forwarded for downstream executions. We only send the run identifier and the current execution as the parent. - The Haxen backend issues fresh execution IDs for child nodes. + The AgentField backend issues fresh execution IDs for child nodes. """ parent_execution = self.parent_execution_id or self.execution_id @@ -90,7 +90,7 @@ def child_context(self) -> "ExecutionContext": Create an in-process child context for local tracking. The new execution ID is generated locally so callers can reference - it while awaiting downstream responses. The Haxen server will still + it while awaiting downstream responses. The AgentField server will still assign its own execution ID when the child request is submitted. """ diff --git a/sdk/python/haxen_sdk/execution_state.py b/sdk/python/agentfield/execution_state.py similarity index 100% rename from sdk/python/haxen_sdk/execution_state.py rename to sdk/python/agentfield/execution_state.py diff --git a/sdk/python/haxen_sdk/http_connection_manager.py b/sdk/python/agentfield/http_connection_manager.py similarity index 98% rename from sdk/python/haxen_sdk/http_connection_manager.py rename to sdk/python/agentfield/http_connection_manager.py index 8bd25763..9f3a5d78 100644 --- a/sdk/python/haxen_sdk/http_connection_manager.py +++ b/sdk/python/agentfield/http_connection_manager.py @@ -3,7 +3,7 @@ This module provides aiohttp session pooling with configurable connection limits, connection reuse, proper cleanup, timeout handling, and connection health monitoring. -Supports both single requests and batch operations for the Haxen SDK async execution. +Supports both single requests and batch operations for the AgentField SDK async execution. """ import asyncio @@ -161,7 +161,7 @@ async def start(self) -> None: connector=self._connector, timeout=timeout, headers={ - "User-Agent": "Haxen-SDK-AsyncClient/1.0", + "User-Agent": "AgentField-SDK-AsyncClient/1.0", "Accept": "application/json", "Content-Type": "application/json", }, diff --git a/sdk/python/haxen_sdk/logger.py b/sdk/python/agentfield/logger.py similarity index 86% rename from sdk/python/haxen_sdk/logger.py rename to sdk/python/agentfield/logger.py index f53a8096..07da3381 100644 --- a/sdk/python/haxen_sdk/logger.py +++ b/sdk/python/agentfield/logger.py @@ -1,7 +1,7 @@ """ -Haxen SDK Logging Utility +AgentField SDK Logging Utility -This module provides a centralized logging system for the Haxen SDK that: +This module provides a centralized logging system for the AgentField SDK that: - Replaces print statements with proper logging - Provides configurable log levels - Truncates long messages and payloads @@ -17,7 +17,7 @@ class LogLevel(Enum): - """Log levels for Haxen SDK""" + """Log levels for AgentField SDK""" DEBUG = "DEBUG" INFO = "INFO" @@ -26,23 +26,27 @@ class LogLevel(Enum): ERROR = "ERROR" -class HaxenLogger: +class AgentFieldLogger: """ - Centralized logger for Haxen SDK with configurable verbosity and payload truncation. + Centralized logger for AgentField SDK with configurable verbosity and payload truncation. Supports runtime log level changes (e.g., for dev_mode). """ - def __init__(self, name: str = "haxen_sdk"): + def __init__(self, name: str = "agentfield"): self.logger = logging.getLogger(name) self._setup_logger() # Configuration from environment variables - default to WARNING (only important events) - self.log_level = os.getenv("HAXEN_LOG_LEVEL", "WARNING").upper() - self.truncate_length = int(os.getenv("HAXEN_LOG_TRUNCATE", "200")) - self.show_payloads = os.getenv("HAXEN_LOG_PAYLOADS", "false").lower() == "true" - self.show_tracking = os.getenv("HAXEN_LOG_TRACKING", "false").lower() == "true" - self.show_fire = os.getenv("HAXEN_LOG_FIRE", "false").lower() == "true" + self.log_level = os.getenv("AGENTFIELD_LOG_LEVEL", "WARNING").upper() + self.truncate_length = int(os.getenv("AGENTFIELD_LOG_TRUNCATE", "200")) + self.show_payloads = ( + os.getenv("AGENTFIELD_LOG_PAYLOADS", "false").lower() == "true" + ) + self.show_tracking = ( + os.getenv("AGENTFIELD_LOG_TRACKING", "false").lower() == "true" + ) + self.show_fire = os.getenv("AGENTFIELD_LOG_FIRE", "false").lower() == "true" # Set logger level based on configuration level_map = { @@ -84,7 +88,7 @@ def _truncate_message(self, message: str) -> str: def _format_payload(self, payload: Any) -> str: """Format payload for logging with truncation""" if not self.show_payloads: - return "[payload hidden - set HAXEN_LOG_PAYLOADS=true to show]" + return "[payload hidden - set AGENTFIELD_LOG_PAYLOADS=true to show]" try: if isinstance(payload, dict): @@ -101,12 +105,12 @@ def heartbeat(self, message: str, **kwargs): self.logger.debug(f"πŸ’“ {message}") def track(self, message: str, **kwargs): - """Log tracking messages (controlled by HAXEN_LOG_TRACKING)""" + """Log tracking messages (controlled by AGENTFIELD_LOG_TRACKING)""" if self.show_tracking: self.logger.debug(f"πŸ” TRACK: {self._truncate_message(message)}") def fire(self, message: str, payload: Optional[Any] = None, **kwargs): - """Log fire-and-forget workflow messages (controlled by HAXEN_LOG_FIRE)""" + """Log fire-and-forget workflow messages (controlled by AGENTFIELD_LOG_FIRE)""" if self.show_fire: if payload is not None: formatted_payload = self._format_payload(payload) @@ -171,11 +175,11 @@ def security(self, message: str, **kwargs): _global_logger = None -def get_logger(name: str = "haxen_sdk") -> HaxenLogger: - """Get or create a Haxen SDK logger instance""" +def get_logger(name: str = "agentfield") -> AgentFieldLogger: + """Get or create a AgentField SDK logger instance""" global _global_logger if _global_logger is None: - _global_logger = HaxenLogger(name) + _global_logger = AgentFieldLogger(name) return _global_logger diff --git a/sdk/python/haxen_sdk/mcp_client.py b/sdk/python/agentfield/mcp_client.py similarity index 99% rename from sdk/python/haxen_sdk/mcp_client.py rename to sdk/python/agentfield/mcp_client.py index fcf18f7e..9c3a41d5 100644 --- a/sdk/python/haxen_sdk/mcp_client.py +++ b/sdk/python/agentfield/mcp_client.py @@ -3,7 +3,7 @@ import aiohttp from aiohttp import ClientTimeout -from haxen_sdk.logger import log_debug, log_error, log_info, log_warn +from agentfield.logger import log_debug, log_error, log_info, log_warn class MCPClient: diff --git a/sdk/python/haxen_sdk/mcp_manager.py b/sdk/python/agentfield/mcp_manager.py similarity index 100% rename from sdk/python/haxen_sdk/mcp_manager.py rename to sdk/python/agentfield/mcp_manager.py diff --git a/sdk/python/haxen_sdk/mcp_stdio_bridge.py b/sdk/python/agentfield/mcp_stdio_bridge.py similarity index 99% rename from sdk/python/haxen_sdk/mcp_stdio_bridge.py rename to sdk/python/agentfield/mcp_stdio_bridge.py index a3373bab..43d3e833 100644 --- a/sdk/python/haxen_sdk/mcp_stdio_bridge.py +++ b/sdk/python/agentfield/mcp_stdio_bridge.py @@ -259,7 +259,7 @@ async def _initialize_mcp_session(self) -> bool: init_params = { "protocolVersion": "2024-11-05", "capabilities": {"roots": {"listChanged": True}}, - "clientInfo": {"name": "haxen-stdio-bridge", "version": "1.0.0"}, + "clientInfo": {"name": "agentfield-stdio-bridge", "version": "1.0.0"}, } response = await self._send_stdio_request("initialize", init_params) diff --git a/sdk/python/haxen_sdk/memory.py b/sdk/python/agentfield/memory.py similarity index 92% rename from sdk/python/haxen_sdk/memory.py rename to sdk/python/agentfield/memory.py index c52ea7df..0bd42f45 100644 --- a/sdk/python/haxen_sdk/memory.py +++ b/sdk/python/agentfield/memory.py @@ -1,5 +1,5 @@ """ -Cross-Agent Persistent Memory Client for Haxen SDK. +Cross-Agent Persistent Memory Client for AgentField SDK. This module provides the memory interface that enables seamless, automatic memory sharing and synchronization across distributed agents. @@ -8,27 +8,29 @@ import asyncio import json from typing import Any, List, Optional, Union -from .client import HaxenClient +from .client import AgentFieldClient from .execution_context import ExecutionContext from .memory_events import MemoryEventClient, ScopedMemoryEventClient class MemoryClient: """ - Core memory client that communicates with the Haxen server's memory API. + Core memory client that communicates with the AgentField server's memory API. This client handles the low-level HTTP operations for memory management and automatically includes execution context headers for proper scoping. """ - def __init__(self, haxen_client: HaxenClient, execution_context: ExecutionContext): - self.haxen_client = haxen_client + def __init__( + self, agentfield_client: AgentFieldClient, execution_context: ExecutionContext + ): + self.agentfield_client = agentfield_client self.execution_context = execution_context async def _async_request(self, method: str, url: str, **kwargs): """Internal helper to perform HTTP requests with graceful fallbacks.""" - if hasattr(self.haxen_client, "_async_request"): - return await self.haxen_client._async_request(method, url, **kwargs) + if hasattr(self.agentfield_client, "_async_request"): + return await self.agentfield_client._async_request(method, url, **kwargs) try: import httpx @@ -49,7 +51,7 @@ async def set(self, key: str, data: Any, scope: Optional[str] = None) -> None: data: The data to store (will be JSON serialized) scope: Optional explicit scope override """ - from haxen_sdk.logger import log_debug + from agentfield.logger import log_debug headers = self.execution_context.to_headers() @@ -68,12 +70,12 @@ async def set(self, key: str, data: Any, scope: Optional[str] = None) -> None: ) raise - # Use synchronous requests to avoid event loop conflicts with Haxen SDK - url = f"{self.haxen_client.api_base}/memory/set" + # Use synchronous requests to avoid event loop conflicts with AgentField SDK + url = f"{self.agentfield_client.api_base}/memory/set" try: - if hasattr(self.haxen_client, "_async_request"): - response = await self.haxen_client._async_request( + if hasattr(self.agentfield_client, "_async_request"): + response = await self.agentfield_client._async_request( "POST", url, json=payload, @@ -119,7 +121,7 @@ async def get( response = await self._async_request( "POST", - f"{self.haxen_client.api_base}/memory/get", + f"{self.agentfield_client.api_base}/memory/get", json=payload, headers=headers, timeout=10.0, @@ -178,7 +180,7 @@ async def delete(self, key: str, scope: Optional[str] = None) -> None: response = await self._async_request( "DELETE", - f"{self.haxen_client.api_base}/memory/delete", + f"{self.agentfield_client.api_base}/memory/delete", json=payload, headers=headers, timeout=10.0, @@ -199,7 +201,7 @@ async def list_keys(self, scope: str) -> List[str]: response = await self._async_request( "GET", - f"{self.haxen_client.api_base}/memory/list", + f"{self.agentfield_client.api_base}/memory/list", params={"scope": scope}, headers=headers, timeout=10.0, diff --git a/sdk/python/haxen_sdk/memory_events.py b/sdk/python/agentfield/memory_events.py similarity index 99% rename from sdk/python/haxen_sdk/memory_events.py rename to sdk/python/agentfield/memory_events.py index fa2ad63b..2eabe93e 100644 --- a/sdk/python/haxen_sdk/memory_events.py +++ b/sdk/python/agentfield/memory_events.py @@ -7,7 +7,7 @@ import websockets -from haxen_sdk.logger import log_error, log_info +from agentfield.logger import log_error, log_info from .types import MemoryChangeEvent diff --git a/sdk/python/haxen_sdk/multimodal.py b/sdk/python/agentfield/multimodal.py similarity index 100% rename from sdk/python/haxen_sdk/multimodal.py rename to sdk/python/agentfield/multimodal.py diff --git a/sdk/python/haxen_sdk/multimodal_response.py b/sdk/python/agentfield/multimodal_response.py similarity index 99% rename from sdk/python/haxen_sdk/multimodal_response.py rename to sdk/python/agentfield/multimodal_response.py index 06eb000c..26ddc6d7 100644 --- a/sdk/python/haxen_sdk/multimodal_response.py +++ b/sdk/python/agentfield/multimodal_response.py @@ -10,7 +10,7 @@ from pathlib import Path from typing import Any, Dict, List, Optional, Union -from haxen_sdk.logger import log_error, log_warn +from agentfield.logger import log_error, log_warn from pydantic import BaseModel, Field diff --git a/sdk/python/haxen_sdk/pydantic_utils.py b/sdk/python/agentfield/pydantic_utils.py similarity index 98% rename from sdk/python/haxen_sdk/pydantic_utils.py rename to sdk/python/agentfield/pydantic_utils.py index 3c5e5c49..2e5da377 100644 --- a/sdk/python/haxen_sdk/pydantic_utils.py +++ b/sdk/python/agentfield/pydantic_utils.py @@ -1,12 +1,12 @@ """ -Utility functions for automatic Pydantic model conversion in Haxen SDK. +Utility functions for automatic Pydantic model conversion in AgentField SDK. Provides FastAPI-like automatic conversion of dictionary arguments to Pydantic model instances. """ import inspect from typing import Any, Union, get_args, get_origin, get_type_hints -from haxen_sdk.logger import log_warn +from agentfield.logger import log_warn from pydantic import BaseModel, ValidationError diff --git a/sdk/python/haxen_sdk/rate_limiter.py b/sdk/python/agentfield/rate_limiter.py similarity index 99% rename from sdk/python/haxen_sdk/rate_limiter.py rename to sdk/python/agentfield/rate_limiter.py index 829e6330..168a4190 100644 --- a/sdk/python/haxen_sdk/rate_limiter.py +++ b/sdk/python/agentfield/rate_limiter.py @@ -4,7 +4,7 @@ import random import time from typing import Any, Optional -from haxen_sdk.logger import log_debug +from agentfield.logger import log_debug class RateLimitError(Exception): diff --git a/sdk/python/haxen_sdk/result_cache.py b/sdk/python/agentfield/result_cache.py similarity index 100% rename from sdk/python/haxen_sdk/result_cache.py rename to sdk/python/agentfield/result_cache.py diff --git a/sdk/python/haxen_sdk/router.py b/sdk/python/agentfield/router.py similarity index 100% rename from sdk/python/haxen_sdk/router.py rename to sdk/python/agentfield/router.py diff --git a/sdk/python/haxen_sdk/status.py b/sdk/python/agentfield/status.py similarity index 95% rename from sdk/python/haxen_sdk/status.py rename to sdk/python/agentfield/status.py index 56987b58..6246e0ba 100644 --- a/sdk/python/haxen_sdk/status.py +++ b/sdk/python/agentfield/status.py @@ -1,4 +1,4 @@ -"""Canonical execution status utilities for the Haxen SDK.""" +"""Canonical execution status utilities for the AgentField SDK.""" from __future__ import annotations diff --git a/sdk/python/haxen_sdk/types.py b/sdk/python/agentfield/types.py similarity index 99% rename from sdk/python/haxen_sdk/types.py rename to sdk/python/agentfield/types.py index 989df0fe..a6c7c375 100644 --- a/sdk/python/haxen_sdk/types.py +++ b/sdk/python/agentfield/types.py @@ -82,7 +82,7 @@ def to_dict(self) -> Dict[str, Any]: @dataclass class ExecutionHeaders: """ - Simple helper for constructing execution headers when initiating Haxen calls. + Simple helper for constructing execution headers when initiating AgentField calls. This replaces the wide workflow context structure with the minimal information required by the run-based execution pipeline. @@ -494,7 +494,7 @@ def from_env(cls, **overrides) -> "AIConfig": @dataclass class MemoryValue: - """Represents a memory value stored in the Haxen system.""" + """Represents a memory value stored in the AgentField system.""" key: str data: Any diff --git a/sdk/python/haxen_sdk/utils.py b/sdk/python/agentfield/utils.py similarity index 100% rename from sdk/python/haxen_sdk/utils.py rename to sdk/python/agentfield/utils.py diff --git a/sdk/python/haxen_sdk/vc_generator.py b/sdk/python/agentfield/vc_generator.py similarity index 94% rename from sdk/python/haxen_sdk/vc_generator.py rename to sdk/python/agentfield/vc_generator.py index c06e5195..0ae4c861 100644 --- a/sdk/python/haxen_sdk/vc_generator.py +++ b/sdk/python/agentfield/vc_generator.py @@ -1,5 +1,5 @@ """ -VC Generator for Haxen SDK +VC Generator for AgentField SDK Handles Verifiable Credentials (VC) generation and verification for agent executions. """ @@ -58,17 +58,17 @@ class VCGenerator: - Execution VC generation - Workflow VC aggregation - VC verification - - Integration with Haxen Server + - Integration with AgentField Server """ - def __init__(self, haxen_server_url: str): + def __init__(self, agentfield_server_url: str): """ Initialize VC Generator. Args: - haxen_server_url: URL of the Haxen Server + agentfield_server_url: URL of the AgentField Server """ - self.haxen_server_url = haxen_server_url.rstrip("/") + self.agentfield_server_url = agentfield_server_url.rstrip("/") self.enabled = False def set_enabled(self, enabled: bool): @@ -126,9 +126,11 @@ def generate_execution_vc( "duration_ms": duration_ms, } - # Send VC generation request to Haxen Server + # Send VC generation request to AgentField Server response = requests.post( - f"{self.haxen_server_url}/api/v1/execution/vc", json=vc_data, timeout=10 + f"{self.agentfield_server_url}/api/v1/execution/vc", + json=vc_data, + timeout=10, ) if response.status_code == 200: @@ -161,7 +163,7 @@ def verify_vc(self, vc_document: Dict[str, Any]) -> Optional[Dict[str, Any]]: verification_data = {"vc_document": vc_document} response = requests.post( - f"{self.haxen_server_url}/api/v1/did/verify", + f"{self.agentfield_server_url}/api/v1/did/verify", json=verification_data, timeout=10, ) @@ -190,7 +192,7 @@ def get_workflow_vc_chain(self, workflow_id: str) -> Optional[Dict[str, Any]]: """ try: response = requests.get( - f"{self.haxen_server_url}/api/v1/did/workflow/{workflow_id}/vc-chain", + f"{self.agentfield_server_url}/api/v1/did/workflow/{workflow_id}/vc-chain", timeout=10, ) @@ -227,7 +229,7 @@ def create_workflow_vc( } response = requests.post( - f"{self.haxen_server_url}/api/v1/did/workflow/{workflow_id}/vc", + f"{self.agentfield_server_url}/api/v1/did/workflow/{workflow_id}/vc", json=workflow_data, timeout=10, ) @@ -261,7 +263,7 @@ def export_vcs( params = filters or {} response = requests.get( - f"{self.haxen_server_url}/api/v1/did/export/vcs", + f"{self.agentfield_server_url}/api/v1/did/export/vcs", params=params, timeout=30, ) diff --git a/sdk/python/haxen_sdk/.DS_Store b/sdk/python/haxen_sdk/.DS_Store deleted file mode 100644 index 72067d75..00000000 Binary files a/sdk/python/haxen_sdk/.DS_Store and /dev/null differ diff --git a/sdk/python/pyproject.toml b/sdk/python/pyproject.toml index 5aed4021..f347647c 100644 --- a/sdk/python/pyproject.toml +++ b/sdk/python/pyproject.toml @@ -3,13 +3,13 @@ requires = ["setuptools>=45", "wheel"] build-backend = "setuptools.build_meta" [project] -name = "haxen-sdk" +name = "agentfield" version = "0.1.0" -description = "Python SDK for the Haxen control plane" +description = "Python SDK for the AgentField control plane" readme = "README.md" license = {text = "Apache-2.0"} authors = [ - {name = "Haxen Maintainers"} + {name = "AgentField Maintainers"} ] classifiers = [ "Development Status :: 4 - Beta", @@ -34,16 +34,16 @@ dependencies = [ "aiohttp>=3.8", "websockets" ] -keywords = ["haxen", "sdk", "agents"] +keywords = ["agentfield", "sdk", "agents"] [project.urls] -Homepage = "https://github.com/your-org/haxen" -Documentation = "https://github.com/your-org/haxen/tree/main/docs" -Issues = "https://github.com/your-org/haxen/issues" +Homepage = "https://github.com/Agent-Field/agentfield" +Documentation = "https://github.com/Agent-Field/agentfield/tree/main/docs" +Issues = "https://github.com/Agent-Field/agentfield/issues" [tool.setuptools.packages.find] where = ["."] -include = ["haxen_sdk*"] +include = ["agentfield*"] exclude = ["tests*", "examples*"] [project.optional-dependencies] @@ -72,17 +72,17 @@ markers = [ "integration: tests that can touch network/services", "mcp: tests that exercise MCP/network interactions" ] -addopts = "-ra -q -m \"not mcp\" --strict-markers --strict-config --cov=haxen_sdk.client --cov=haxen_sdk.agent_haxen --cov=haxen_sdk.execution_context --cov=haxen_sdk.execution_state --cov=haxen_sdk.memory --cov=haxen_sdk.rate_limiter --cov=haxen_sdk.result_cache --cov-report=term-missing:skip-covered" +addopts = "-ra -q -m \"not mcp\" --strict-markers --strict-config --cov=agentfield.client --cov=agentfield.agent_field_handler --cov=agentfield.execution_context --cov=agentfield.execution_state --cov=agentfield.memory --cov=agentfield.rate_limiter --cov=agentfield.result_cache --cov-report=term-missing:skip-covered" asyncio_mode = "auto" [tool.coverage.run] -source = ["haxen_sdk"] +source = ["agentfield"] omit = [ - "haxen_sdk/agent_mcp.py", - "haxen_sdk/dynamic_skills.py", - "haxen_sdk/mcp_client.py", - "haxen_sdk/mcp_manager.py", - "haxen_sdk/mcp_stdio_bridge.py", - "haxen_sdk/logger.py", - "haxen_sdk/types.py", + "agentfield/agent_mcp.py", + "agentfield/dynamic_skills.py", + "agentfield/mcp_client.py", + "agentfield/mcp_manager.py", + "agentfield/mcp_stdio_bridge.py", + "agentfield/logger.py", + "agentfield/types.py", ] diff --git a/sdk/python/requirements-dev.txt b/sdk/python/requirements-dev.txt index f68b782c..0c1d83f6 100644 --- a/sdk/python/requirements-dev.txt +++ b/sdk/python/requirements-dev.txt @@ -1,4 +1,4 @@ -# Development/test dependencies for Haxen SDK (Python 3.8+) +# Development/test dependencies for AgentField SDK (Python 3.8+) # Install with: pip install -r requirements-dev.txt pytest>=7.4,<9 pytest-asyncio>=0.21,<0.24 diff --git a/sdk/python/tests/conftest.py b/sdk/python/tests/conftest.py index b758d2a0..753d9095 100644 --- a/sdk/python/tests/conftest.py +++ b/sdk/python/tests/conftest.py @@ -1,4 +1,4 @@ -# Pytest configuration and fixtures for Haxen SDK tests +# Pytest configuration and fixtures for AgentField SDK tests """ Shared fixtures used by the actively supported open-source test suite. The helpers here focus on deterministic behaviour (frozen time, env patching, @@ -21,8 +21,8 @@ import responses as responses_lib from freezegun import freeze_time -from haxen_sdk.agent import Agent -from haxen_sdk.types import AIConfig, MemoryConfig +from agentfield.agent import Agent +from agentfield.types import AIConfig, MemoryConfig # Optional imports guarded for test envs try: @@ -340,9 +340,9 @@ def _completion_alias(**kwargs): return LiteLLMMockController(module=fake) -class HaxenHTTPMocks: +class AgentFieldHTTPMocks: """ - Helper wrapper that registers common Haxen server endpoints on both: + Helper wrapper that registers common AgentField server endpoints on both: - httpx (via respx) - requests (via responses) @@ -443,9 +443,9 @@ def mock_memory_list(self, keys: List[str], status: int = 200): @pytest.fixture -def http_mocks() -> HaxenHTTPMocks: +def http_mocks() -> AgentFieldHTTPMocks: """ - Returns a helper for mocking Haxen server endpoints on both httpx and requests. + Returns a helper for mocking AgentField server endpoints on both httpx and requests. Note: This works in concert with the autouse respx/responses wrappers already defined @@ -455,9 +455,9 @@ def http_mocks() -> HaxenHTTPMocks: def test_execute_headers_propagation(http_mocks, workflow_context): ctx, headers = workflow_context # minimal by default http_mocks.mock_execute("n.reasoner", json={"result": {"ok": True}}) - # ... call HaxenClient.execute(...), ensure headers were passed ... + # ... call AgentFieldClient.execute(...), ensure headers were passed ... """ - return HaxenHTTPMocks() + return AgentFieldHTTPMocks() # ---------------------------- 4) Sample Agent Fixture ---------------------------- @@ -492,7 +492,7 @@ def test_callback_prefers_env(mock_container_detection): mock_container_detection(is_container=False) ... """ - from haxen_sdk import agent as agent_mod + from agentfield import agent as agent_mod def _apply(is_container: bool = False): monkeypatch.setattr( @@ -513,7 +513,7 @@ def test_local_ip_fallback(mock_ip_detection, env_patch): env_patch.unset("AGENT_CALLBACK_URL") ... """ - from haxen_sdk import agent as agent_mod + from agentfield import agent as agent_mod def _apply(container_ip: Optional[str] = None, local_ip: Optional[str] = None): monkeypatch.setattr( @@ -551,7 +551,7 @@ def test_agent_init(sample_agent): agent = Agent( node_id="test-node", - haxen_server="http://localhost:8080", + agentfield_server="http://localhost:8080", version="0.0.0", ai_config=sample_ai_config, memory_config=MemoryConfig( @@ -567,7 +567,7 @@ def test_agent_init(sample_agent): @pytest.fixture def fake_server(monkeypatch, request): """ - Spins up an in-process FastAPI mock server and routes HaxenClient calls to it WITHOUT real sockets. + Spins up an in-process FastAPI mock server and routes AgentFieldClient calls to it WITHOUT real sockets. This is suitable for contract tests while keeping network isolation. Endpoints: @@ -579,7 +579,7 @@ def fake_server(monkeypatch, request): How it works: - Patches httpx.AsyncClient to use httpx.ASGITransport against the in-process FastAPI app. - - HaxenClient(async) calls are transparently routed; no sockets required. + - AgentFieldClient(async) calls are transparently routed; no sockets required. - requests.* fallbacks are NOT routed here; rely on responses/respx for those. Returns: @@ -591,7 +591,7 @@ def fake_server(monkeypatch, request): if FastAPI is None or httpx is None: pytest.skip("fastapi/httpx are required for fake_server fixture") - app = FastAPI(title="Haxen Fake Server") + app = FastAPI(title="AgentField Fake Server") memory_store: Dict[str, Any] = {} @@ -609,7 +609,7 @@ async def execute_target(target: str, payload: Dict[str, Any]): "result": {"echo": payload.get("input", {}), "target": target}, "metadata": { "execution_id": "exec_" + uuid.uuid4().hex[:8], - "haxen_request_id": "req_" + uuid.uuid4().hex[:8], + "agentfield_request_id": "req_" + uuid.uuid4().hex[:8], "agent_node_id": target.split(".")[0] if "." in target else "node", "duration_ms": 12, "timestamp": "2024-01-01T00:00:00Z", @@ -673,7 +673,7 @@ def _patched_async_client(*args, **kwargs): # ---------------------------- Notes and Cross-Cutting Concerns ---------------------------- # - Agent.__init__ callback URL resolution is exercised via env_patch + mock_container_detection + mock_ip_detection -# - HaxenClient request/header propagation is covered by http_mocks and fake_server +# - AgentFieldClient request/header propagation is covered by http_mocks and fake_server # - MemoryClient serialization and HTTP fallback paths are supported by http_mocks and fake_server # - AgentAI model limits caching and message trimming rely on litellm_mock + sample_ai_config # - AIConfig parameter merging and fallback logic can be tested via sample_ai_config overrides @@ -696,11 +696,11 @@ def _apply(**values): @pytest.fixture def dummy_headers(): - """Baseline execution headers consumed by memory/haxen client tests.""" + """Baseline execution headers consumed by memory/agentfield client tests.""" return { "X-Workflow-ID": "wf-test", "X-Execution-ID": "exec-test", - "X-Haxen-Request-ID": "req-test", + "X-AgentField-Request-ID": "req-test", } diff --git a/sdk/python/tests/helpers.py b/sdk/python/tests/helpers.py index 8261f878..fea29c1b 100644 --- a/sdk/python/tests/helpers.py +++ b/sdk/python/tests/helpers.py @@ -1,4 +1,4 @@ -"""Shared testing utilities for Haxen SDK unit tests.""" +"""Shared testing utilities for AgentField SDK unit tests.""" from __future__ import annotations @@ -8,11 +8,11 @@ from types import SimpleNamespace from typing import Any, Dict, List, Optional, Tuple -from haxen_sdk.types import AgentStatus, HeartbeatData +from agentfield.types import AgentStatus, HeartbeatData -class DummyHaxenClient: - """Simple in-memory haxen client used to capture registration calls.""" +class DummyAgentFieldClient: + """Simple in-memory agentfield client used to capture registration calls.""" def __init__(self): self.register_calls: List[Dict[str, Any]] = [] @@ -59,14 +59,14 @@ class StubAgent: """Light-weight stand-in for Agent used across module tests.""" node_id: str = "stub-node" - haxen_server: str = "http://haxen" + agentfield_server: str = "http://agentfield" callback_url: Optional[str] = None base_url: Optional[str] = None version: str = "0.0.0" dev_mode: bool = False ai_config: Any = None async_config: Any = None - client: DummyHaxenClient = field(default_factory=DummyHaxenClient) + client: DummyAgentFieldClient = field(default_factory=DummyAgentFieldClient) did_manager: Any = None mcp_handler: Any = field( default_factory=lambda: type( @@ -75,7 +75,7 @@ class StubAgent: ) reasoners: List[Dict[str, Any]] = field(default_factory=list) skills: List[Dict[str, Any]] = field(default_factory=list) - haxen_connected: bool = True + agentfield_connected: bool = True _current_status: AgentStatus = AgentStatus.STARTING callback_candidates: List[str] = field(default_factory=list) @@ -140,7 +140,7 @@ def _apply_discovery_response(self, payload: Optional[Dict[str, Any]]) -> None: class DummyAsyncExecutionManager: - """Simple async execution manager used in tests for HaxenClient async flows.""" + """Simple async execution manager used in tests for AgentFieldClient async flows.""" def __init__(self): self.submissions: List[Dict[str, Any]] = [] @@ -193,7 +193,7 @@ def set_event_stream_headers(self, headers: Optional[Dict[str, str]]): __all__ = [ - "DummyHaxenClient", + "DummyAgentFieldClient", "DummyAsyncExecutionManager", "StubAgent", "create_test_agent", @@ -206,32 +206,34 @@ def create_test_agent( node_id: str = "test-agent", callback_url: Optional[str] = None, dev_mode: bool = False, -) -> Tuple[Any, DummyHaxenClient]: +) -> Tuple[Any, DummyAgentFieldClient]: """Construct a fully initialized Agent with key dependencies stubbed out. This helper isolates network-bound components so functional tests can exercise - FastAPI routing, workflow notifications, and Haxen registration without + FastAPI routing, workflow notifications, and AgentField registration without touching external services. """ - from haxen_sdk.agent import Agent - from haxen_sdk.agent_workflow import AgentWorkflow + from agentfield.agent import Agent + from agentfield.agent_workflow import AgentWorkflow memory_store: Dict[str, Any] = {} - class _FakeHaxenClient(DummyHaxenClient): + class _FakeAgentFieldClient(DummyAgentFieldClient): def __init__(self, base_url: str, async_config: Any): super().__init__() self.base_url = base_url self.api_base = f"{base_url}/api/v1" self.async_config = async_config - def _haxen_client_factory(base_url: str, async_config: Any) -> _FakeHaxenClient: - return _FakeHaxenClient(base_url, async_config) + def _agentfield_client_factory( + base_url: str, async_config: Any + ) -> _FakeAgentFieldClient: + return _FakeAgentFieldClient(base_url, async_config) class _FakeMemoryClient: - def __init__(self, haxen_client: Any, execution_context: Any): - self.haxen_client = haxen_client + def __init__(self, agentfield_client: Any, execution_context: Any): + self.agentfield_client = agentfield_client self.execution_context = execution_context async def set(self, key: str, data: Any, scope: Optional[str] = None) -> None: @@ -287,8 +289,8 @@ def __init__(self, *args, **kwargs): pass class _FakeDIDManager: - def __init__(self, haxen_server: str, node: str): - self.haxen_server = haxen_server + def __init__(self, agentfield_server: str, node: str): + self.agentfield_server = agentfield_server self.node_id = node self.registered: Dict[str, Any] = {} @@ -390,17 +392,17 @@ async def _noop_fire_and_forget_update(self, payload: Dict[str, Any]) -> None: events.append(("update", payload)) self.agent._captured_workflow_events = events - monkeypatch.setattr("haxen_sdk.agent.HaxenClient", _haxen_client_factory) - monkeypatch.setattr("haxen_sdk.agent.MemoryClient", _FakeMemoryClient) - monkeypatch.setattr("haxen_sdk.agent.MemoryEventClient", _FakeMemoryEventClient) - monkeypatch.setattr("haxen_sdk.agent.AgentMCP", _FakeAgentMCP) - monkeypatch.setattr("haxen_sdk.agent.MCPManager", _FakeMCPManager) - monkeypatch.setattr("haxen_sdk.agent.MCPClientRegistry", _FakeMCPClientRegistry) + monkeypatch.setattr("agentfield.agent.AgentFieldClient", _agentfield_client_factory) + monkeypatch.setattr("agentfield.agent.MemoryClient", _FakeMemoryClient) + monkeypatch.setattr("agentfield.agent.MemoryEventClient", _FakeMemoryEventClient) + monkeypatch.setattr("agentfield.agent.AgentMCP", _FakeAgentMCP) + monkeypatch.setattr("agentfield.agent.MCPManager", _FakeMCPManager) + monkeypatch.setattr("agentfield.agent.MCPClientRegistry", _FakeMCPClientRegistry) monkeypatch.setattr( - "haxen_sdk.agent.DynamicMCPSkillManager", _FakeDynamicSkillManager + "agentfield.agent.DynamicMCPSkillManager", _FakeDynamicSkillManager ) - monkeypatch.setattr("haxen_sdk.agent.DIDManager", _FakeDIDManager) - monkeypatch.setattr("haxen_sdk.agent.VCGenerator", _FakeVCGenerator) + monkeypatch.setattr("agentfield.agent.DIDManager", _FakeDIDManager) + monkeypatch.setattr("agentfield.agent.VCGenerator", _FakeVCGenerator) monkeypatch.setattr( AgentWorkflow, "notify_call_start", _record_call_start, raising=False ) @@ -419,7 +421,7 @@ async def _noop_fire_and_forget_update(self, payload: Dict[str, Any]) -> None: agent = Agent( node_id=node_id, - haxen_server="http://haxen", + agentfield_server="http://agentfield", version="1.2.3", callback_url=callback_url, dev_mode=dev_mode, diff --git a/sdk/python/tests/integration/conftest.py b/sdk/python/tests/integration/conftest.py index 0b33aba0..39aa3a32 100644 --- a/sdk/python/tests/integration/conftest.py +++ b/sdk/python/tests/integration/conftest.py @@ -18,7 +18,7 @@ import uvicorn if TYPE_CHECKING: - from haxen_sdk.agent import Agent + from agentfield.agent import Agent def _find_free_port() -> int: @@ -27,11 +27,11 @@ def _find_free_port() -> int: return sock.getsockname()[1] -def _write_haxen_config(config_path: Path, db_path: Path, kv_path: Path) -> None: +def _write_agentfield_config(config_path: Path, db_path: Path, kv_path: Path) -> None: db_uri = db_path.as_posix() kv_uri = kv_path.as_posix() config_content = f""" -haxen: +agentfield: port: 0 mode: "local" request_timeout: 60s @@ -59,23 +59,25 @@ def _write_haxen_config(config_path: Path, db_path: Path, kv_path: Path) -> None @dataclass -class HaxenServerInfo: +class AgentFieldServerInfo: base_url: str port: int - haxen_home: Path + agentfield_home: Path @pytest.fixture(scope="session") -def haxen_binary(tmp_path_factory: pytest.TempPathFactory) -> Path: +def agentfield_binary(tmp_path_factory: pytest.TempPathFactory) -> Path: repo_root = Path(__file__).resolve().parents[4] - haxen_go_root = repo_root / "apps" / "platform" / "haxen" - if not haxen_go_root.exists(): - pytest.skip("Haxen server sources not available in this checkout") - build_dir = tmp_path_factory.mktemp("haxen-server-bin") - binary_name = "haxen-test-server.exe" if os.name == "nt" else "haxen-test-server" + agentfield_go_root = repo_root / "apps" / "platform" / "agentfield" + if not agentfield_go_root.exists(): + pytest.skip("AgentField server sources not available in this checkout") + build_dir = tmp_path_factory.mktemp("agentfield-server-bin") + binary_name = ( + "agentfield-test-server.exe" if os.name == "nt" else "agentfield-test-server" + ) binary_path = build_dir / binary_name - releases_dir = haxen_go_root / "dist" / "releases" + releases_dir = agentfield_go_root / "dist" / "releases" os_part = sys.platform if os_part.startswith("darwin"): os_part = "darwin" @@ -95,11 +97,11 @@ def haxen_binary(tmp_path_factory: pytest.TempPathFactory) -> Path: prebuilt_path: Optional[Path] = None if os_part: - candidate = releases_dir / f"haxen-{os_part}-{arch_part}" + candidate = releases_dir / f"agentfield-{os_part}-{arch_part}" if candidate.exists(): prebuilt_path = candidate elif os_part == "darwin": - universal = releases_dir / "haxen-darwin-arm64" + universal = releases_dir / "agentfield-darwin-arm64" if universal.exists(): prebuilt_path = universal @@ -108,30 +110,30 @@ def haxen_binary(tmp_path_factory: pytest.TempPathFactory) -> Path: binary_path.chmod(0o755) return binary_path - build_cmd = ["go", "build", "-o", str(binary_path), "./cmd/haxen"] + build_cmd = ["go", "build", "-o", str(binary_path), "./cmd/agentfield"] env = os.environ.copy() env["GOCACHE"] = str(tmp_path_factory.mktemp("go-cache")) env["GOMODCACHE"] = str(tmp_path_factory.mktemp("go-modcache")) - subprocess.run(build_cmd, check=True, cwd=haxen_go_root, env=env) + subprocess.run(build_cmd, check=True, cwd=agentfield_go_root, env=env) return binary_path @pytest.fixture -def haxen_server( - tmp_path_factory: pytest.TempPathFactory, haxen_binary: Path -) -> Generator[HaxenServerInfo, None, None]: +def agentfield_server( + tmp_path_factory: pytest.TempPathFactory, agentfield_binary: Path +) -> Generator[AgentFieldServerInfo, None, None]: repo_root = Path(__file__).resolve().parents[4] - haxen_go_root = repo_root / "apps" / "platform" / "haxen" + agentfield_go_root = repo_root / "apps" / "platform" / "agentfield" - haxen_home = Path(tmp_path_factory.mktemp("haxen-home")) - data_dir = haxen_home / "data" + agentfield_home = Path(tmp_path_factory.mktemp("agentfield-home")) + data_dir = agentfield_home / "data" data_dir.mkdir(parents=True, exist_ok=True) - db_path = data_dir / "haxen.db" - kv_path = data_dir / "haxen.bolt" - config_path = haxen_home / "haxen.yaml" + db_path = data_dir / "agentfield.db" + kv_path = data_dir / "agentfield.bolt" + config_path = agentfield_home / "agentfield.yaml" - _write_haxen_config(config_path, db_path, kv_path) + _write_agentfield_config(config_path, db_path, kv_path) port = _find_free_port() base_url = f"http://127.0.0.1:{port}" @@ -139,13 +141,13 @@ def haxen_server( env = os.environ.copy() env.update( { - "HAXEN_HOME": str(haxen_home), - "HAXEN_STORAGE_MODE": "local", + "AGENTFIELD_HOME": str(agentfield_home), + "AGENTFIELD_STORAGE_MODE": "local", } ) cmd = [ - str(haxen_binary), + str(agentfield_binary), "server", "--backend-only", "--port", @@ -155,7 +157,7 @@ def haxen_server( "--no-vc-execution", ] - log_path = haxen_home / "haxen.log" + log_path = agentfield_home / "agentfield.log" log_file = log_path.open("w") process = subprocess.Popen( @@ -163,7 +165,7 @@ def haxen_server( stdout=log_file, stderr=subprocess.STDOUT, env=env, - cwd=haxen_go_root, + cwd=agentfield_go_root, ) try: @@ -171,7 +173,7 @@ def haxen_server( deadline = time.time() + 60 while time.time() < deadline: if process.poll() is not None: - raise RuntimeError("Haxen server exited before becoming healthy") + raise RuntimeError("AgentField server exited before becoming healthy") try: response = requests.get(health_url, timeout=1.0) if response.status_code == 200: @@ -180,9 +182,11 @@ def haxen_server( pass time.sleep(0.5) else: - raise RuntimeError("Haxen server did not become healthy in time") + raise RuntimeError("AgentField server did not become healthy in time") - yield HaxenServerInfo(base_url=base_url, port=port, haxen_home=haxen_home) + yield AgentFieldServerInfo( + base_url=base_url, port=port, agentfield_home=agentfield_home + ) finally: if process.poll() is None: diff --git a/sdk/python/tests/integration/test_haxen_end_to_end.py b/sdk/python/tests/integration/test_agentfield_end_to_end.py similarity index 72% rename from sdk/python/tests/integration/test_haxen_end_to_end.py rename to sdk/python/tests/integration/test_agentfield_end_to_end.py index a45c7fc8..f5e42961 100644 --- a/sdk/python/tests/integration/test_haxen_end_to_end.py +++ b/sdk/python/tests/integration/test_agentfield_end_to_end.py @@ -4,8 +4,8 @@ import httpx import pytest -from haxen_sdk.agent import Agent -from haxen_sdk.types import AgentStatus +from agentfield.agent import Agent +from agentfield.types import AgentStatus async def _wait_for_node( @@ -18,7 +18,7 @@ async def _wait_for_node( if payload.get("id") == node_id: return payload await asyncio.sleep(0.5) - raise AssertionError(f"Node {node_id} did not appear in Haxen registry") + raise AssertionError(f"Node {node_id} did not appear in AgentField registry") async def _wait_for_status( @@ -40,10 +40,10 @@ async def _wait_for_status( @pytest.mark.integration @pytest.mark.asyncio -async def test_agent_registration_and_status_propagation(haxen_server, run_agent): +async def test_agent_registration_and_status_propagation(agentfield_server, run_agent): agent = Agent( node_id="integration-agent-status", - haxen_server=haxen_server.base_url, + agentfield_server=agentfield_server.base_url, dev_mode=True, callback_url="http://127.0.0.1", ) @@ -54,15 +54,17 @@ async def ping() -> Dict[str, bool]: runtime = run_agent(agent) - await agent.haxen_handler.register_with_haxen_server(runtime.port) - assert agent.haxen_connected is True + await agent.agentfield_handler.register_with_agentfield_server(runtime.port) + assert agent.agentfield_connected is True - async with httpx.AsyncClient(base_url=haxen_server.base_url, timeout=5.0) as client: + async with httpx.AsyncClient( + base_url=agentfield_server.base_url, timeout=5.0 + ) as client: node = await _wait_for_node(client, agent.node_id) assert any(r["id"] == "ping" for r in node.get("reasoners", [])) agent._current_status = AgentStatus.READY - await agent.haxen_handler.send_enhanced_heartbeat() + await agent.agentfield_handler.send_enhanced_heartbeat() status = await _wait_for_status(client, agent.node_id, expected="ready") assert status.get("state") == "active" @@ -71,10 +73,10 @@ async def ping() -> Dict[str, bool]: @pytest.mark.integration @pytest.mark.asyncio -async def test_reasoner_execution_roundtrip(haxen_server, run_agent): +async def test_reasoner_execution_roundtrip(agentfield_server, run_agent): agent = Agent( node_id="integration-agent-reasoner", - haxen_server=haxen_server.base_url, + agentfield_server=agentfield_server.base_url, dev_mode=True, callback_url="http://127.0.0.1", ) @@ -85,11 +87,13 @@ async def double(value: int) -> Dict[str, int]: runtime = run_agent(agent) - await agent.haxen_handler.register_with_haxen_server(runtime.port) + await agent.agentfield_handler.register_with_agentfield_server(runtime.port) agent._current_status = AgentStatus.READY - await agent.haxen_handler.send_enhanced_heartbeat() + await agent.agentfield_handler.send_enhanced_heartbeat() - async with httpx.AsyncClient(base_url=haxen_server.base_url, timeout=5.0) as client: + async with httpx.AsyncClient( + base_url=agentfield_server.base_url, timeout=5.0 + ) as client: await _wait_for_node(client, agent.node_id) await _wait_for_status(client, agent.node_id, expected="ready") diff --git a/sdk/python/tests/test_agent_ai.py b/sdk/python/tests/test_agent_ai.py index 9c21eaa3..547dd59b 100644 --- a/sdk/python/tests/test_agent_ai.py +++ b/sdk/python/tests/test_agent_ai.py @@ -7,7 +7,7 @@ import pytest -from haxen_sdk.agent_ai import AgentAI +from agentfield.agent_ai import AgentAI from tests.helpers import StubAgent @@ -74,7 +74,7 @@ def setup_litellm_stub(monkeypatch): monkeypatch.setitem(sys.modules, "litellm", module) monkeypatch.setitem(sys.modules, "litellm.utils", utils_module) - monkeypatch.setattr("haxen_sdk.agent_ai.litellm", module, raising=False) + monkeypatch.setattr("agentfield.agent_ai.litellm", module, raising=False) return module @@ -91,7 +91,7 @@ class DummyLimiter: def __init__(self, **kwargs): created.update(kwargs) - monkeypatch.setattr("haxen_sdk.agent_ai.StatelessRateLimiter", DummyLimiter) + monkeypatch.setattr("agentfield.agent_ai.StatelessRateLimiter", DummyLimiter) ai = AgentAI(agent_with_ai) limiter1 = ai._get_rate_limiter() @@ -128,10 +128,10 @@ async def execute_with_retry(self, func): monkeypatch.setattr(ai, "_ensure_model_limits_cached", lambda: asyncio.sleep(0)) monkeypatch.setattr(ai, "_get_rate_limiter", lambda: DummyLimiter()) monkeypatch.setattr( - "haxen_sdk.agent_ai.AgentUtils.detect_input_type", lambda value: "text" + "agentfield.agent_ai.AgentUtils.detect_input_type", lambda value: "text" ) monkeypatch.setattr( - "haxen_sdk.agent_ai.AgentUtils.serialize_result", lambda value: value + "agentfield.agent_ai.AgentUtils.serialize_result", lambda value: value ) result = await ai.ai("Hello world") @@ -167,10 +167,10 @@ async def execute_with_retry(self, func): monkeypatch.setattr(ai, "_ensure_model_limits_cached", lambda: asyncio.sleep(0)) monkeypatch.setattr(ai, "_get_rate_limiter", lambda: limiter) monkeypatch.setattr( - "haxen_sdk.agent_ai.AgentUtils.detect_input_type", lambda value: "text" + "agentfield.agent_ai.AgentUtils.detect_input_type", lambda value: "text" ) monkeypatch.setattr( - "haxen_sdk.agent_ai.AgentUtils.serialize_result", lambda value: value + "agentfield.agent_ai.AgentUtils.serialize_result", lambda value: value ) result = await ai.ai("hello") @@ -189,10 +189,10 @@ async def test_ai_skips_rate_limiter_when_disabled(monkeypatch, agent_with_ai): ai = AgentAI(agent_with_ai) monkeypatch.setattr(ai, "_ensure_model_limits_cached", lambda: asyncio.sleep(0)) monkeypatch.setattr( - "haxen_sdk.agent_ai.AgentUtils.detect_input_type", lambda value: "text" + "agentfield.agent_ai.AgentUtils.detect_input_type", lambda value: "text" ) monkeypatch.setattr( - "haxen_sdk.agent_ai.AgentUtils.serialize_result", lambda value: value + "agentfield.agent_ai.AgentUtils.serialize_result", lambda value: value ) result = await ai.ai("hello") diff --git a/sdk/python/tests/test_agent_call.py b/sdk/python/tests/test_agent_call.py index 0234e854..7ab5b9ba 100644 --- a/sdk/python/tests/test_agent_call.py +++ b/sdk/python/tests/test_agent_call.py @@ -2,15 +2,15 @@ import pytest -from haxen_sdk.agent import Agent -from haxen_sdk.agent_registry import set_current_agent, clear_current_agent +from agentfield.agent import Agent +from agentfield.agent_registry import set_current_agent, clear_current_agent @pytest.mark.asyncio async def test_call_local_reasoner_argument_mapping(): agent = object.__new__(Agent) agent.node_id = "node" - agent.haxen_connected = True + agent.agentfield_connected = True agent.dev_mode = False agent.async_config = SimpleNamespace( enable_async_execution=False, fallback_to_sync=False @@ -49,7 +49,7 @@ async def local_reasoner(self, a, b, execution_context=None, extra=None): async def test_call_remote_target_uses_generic_arg_names(): agent = object.__new__(Agent) agent.node_id = "node" - agent.haxen_connected = True + agent.agentfield_connected = True agent.dev_mode = False agent.async_config = SimpleNamespace( enable_async_execution=False, fallback_to_sync=False @@ -78,10 +78,10 @@ async def fake_execute(target, input_data, headers): @pytest.mark.asyncio -async def test_call_raises_when_haxen_disconnected(): +async def test_call_raises_when_agentfield_disconnected(): agent = object.__new__(Agent) agent.node_id = "node" - agent.haxen_connected = False + agent.agentfield_connected = False agent.dev_mode = False agent.async_config = SimpleNamespace( enable_async_execution=False, fallback_to_sync=False diff --git a/sdk/python/tests/test_agent_core.py b/sdk/python/tests/test_agent_core.py index 98581b89..1b82d41c 100644 --- a/sdk/python/tests/test_agent_core.py +++ b/sdk/python/tests/test_agent_core.py @@ -4,9 +4,9 @@ import pytest -from haxen_sdk.agent import Agent -from haxen_sdk.agent_registry import get_current_agent_instance -from haxen_sdk.execution_context import ( +from agentfield.agent import Agent +from agentfield.agent_registry import get_current_agent_instance +from agentfield.execution_context import ( ExecutionContext, set_execution_context, reset_execution_context, @@ -16,14 +16,14 @@ def make_agent_stub(): agent = object.__new__(Agent) agent.node_id = "node" - agent.haxen_server = "http://haxen" + agent.agentfield_server = "http://agentfield" agent.dev_mode = False agent.async_config = SimpleNamespace( enable_async_execution=True, fallback_to_sync=True ) agent._async_execution_manager = None agent._current_execution_context = None - agent.client = SimpleNamespace(api_base="http://haxen/api/v1") + agent.client = SimpleNamespace(api_base="http://agentfield/api/v1") return agent @@ -121,7 +121,7 @@ async def text(self_inner): ClientTimeout=DummyTimeout, ClientSession=DummySession ) monkeypatch.setitem(sys.modules, "aiohttp", stub_aiohttp) - monkeypatch.setattr("haxen_sdk.agent.aiohttp", stub_aiohttp) + monkeypatch.setattr("agentfield.agent.aiohttp", stub_aiohttp) context = SimpleNamespace(to_headers=lambda: {"X-Workflow-ID": "wf"}) monkeypatch.setattr(agent, "_get_current_execution_context", lambda: context) @@ -142,6 +142,6 @@ def create_task(self, coro): agent.note("hello", tags=["debug"]) await asyncio.gather(*tasks) - assert called["url"].startswith("http://haxen/api/ui/v1") + assert called["url"].startswith("http://agentfield/api/ui/v1") assert called["json"]["message"] == "hello" assert called["json"]["tags"] == ["debug"] diff --git a/sdk/python/tests/test_agent_haxen.py b/sdk/python/tests/test_agent_field_handler.py similarity index 52% rename from sdk/python/tests/test_agent_haxen.py rename to sdk/python/tests/test_agent_field_handler.py index 5645609a..109ddbf1 100644 --- a/sdk/python/tests/test_agent_haxen.py +++ b/sdk/python/tests/test_agent_field_handler.py @@ -3,135 +3,137 @@ import pytest import requests -from haxen_sdk.agent_haxen import AgentHaxen -from tests.helpers import StubAgent, DummyHaxenClient +from agentfield.agent_field_handler import AgentFieldHandler +from tests.helpers import StubAgent, DummyAgentFieldClient @pytest.mark.asyncio -async def test_register_with_haxen_server_sets_base_url(monkeypatch): +async def test_register_with_agentfield_server_sets_base_url(monkeypatch): agent = StubAgent(callback_url="agent.local", base_url=None) - agent.client = DummyHaxenClient() - agent.haxen_connected = False + agent.client = DummyAgentFieldClient() + agent.agentfield_connected = False monkeypatch.setattr( - "haxen_sdk.agent._resolve_callback_url", + "agentfield.agent._resolve_callback_url", lambda url, port: f"http://resolved:{port}", ) monkeypatch.setattr( - "haxen_sdk.agent._build_callback_candidates", + "agentfield.agent._build_callback_candidates", lambda value, port, include_defaults=True: [f"http://resolved:{port}"], ) - monkeypatch.setattr("haxen_sdk.agent._is_running_in_container", lambda: False) + monkeypatch.setattr("agentfield.agent._is_running_in_container", lambda: False) - haxen = AgentHaxen(agent) - await haxen.register_with_haxen_server(port=8080) + agentfield = AgentFieldHandler(agent) + await agentfield.register_with_agentfield_server(port=8080) assert agent.base_url == "http://resolved:8080" - assert agent.haxen_connected is True + assert agent.agentfield_connected is True assert agent.client.register_calls[0]["base_url"] == "http://resolved:8080" @pytest.mark.asyncio -async def test_register_with_haxen_server_handles_failure(monkeypatch): +async def test_register_with_agentfield_server_handles_failure(monkeypatch): async def failing_register(*args, **kwargs): raise RuntimeError("boom") agent = StubAgent(callback_url=None, base_url="http://already", dev_mode=True) - agent.client = DummyHaxenClient() + agent.client = DummyAgentFieldClient() monkeypatch.setattr(agent.client, "register_agent", failing_register) monkeypatch.setattr( - "haxen_sdk.agent._build_callback_candidates", + "agentfield.agent._build_callback_candidates", lambda value, port, include_defaults=True: [], ) - monkeypatch.setattr("haxen_sdk.agent._is_running_in_container", lambda: False) + monkeypatch.setattr("agentfield.agent._is_running_in_container", lambda: False) - haxen = AgentHaxen(agent) - agent.haxen_connected = True + agentfield = AgentFieldHandler(agent) + agent.agentfield_connected = True - await haxen.register_with_haxen_server(port=9000) - assert agent.haxen_connected is False + await agentfield.register_with_agentfield_server(port=9000) + assert agent.agentfield_connected is False @pytest.mark.asyncio -async def test_register_with_haxen_updates_existing_port(monkeypatch): +async def test_register_with_agentfield_updates_existing_port(monkeypatch): agent = StubAgent(callback_url=None, base_url="http://host:5000") - agent.client = DummyHaxenClient() + agent.client = DummyAgentFieldClient() monkeypatch.setattr( - "haxen_sdk.agent._build_callback_candidates", + "agentfield.agent._build_callback_candidates", lambda value, port, include_defaults=True: [], ) - monkeypatch.setattr("haxen_sdk.agent._is_running_in_container", lambda: False) + monkeypatch.setattr("agentfield.agent._is_running_in_container", lambda: False) - haxen = AgentHaxen(agent) - await haxen.register_with_haxen_server(port=6000) + agentfield = AgentFieldHandler(agent) + await agentfield.register_with_agentfield_server(port=6000) assert agent.base_url == "http://host:6000" assert agent.client.register_calls[0]["base_url"] == "http://host:6000" @pytest.mark.asyncio -async def test_register_with_haxen_preserves_container_urls(monkeypatch): +async def test_register_with_agentfield_preserves_container_urls(monkeypatch): agent = StubAgent( callback_url=None, base_url="http://service.railway.internal:5000", dev_mode=True, ) - agent.client = DummyHaxenClient() + agent.client = DummyAgentFieldClient() monkeypatch.setattr( - "haxen_sdk.agent._build_callback_candidates", + "agentfield.agent._build_callback_candidates", lambda value, port, include_defaults=True: [], ) - monkeypatch.setattr("haxen_sdk.agent._is_running_in_container", lambda: True) + monkeypatch.setattr("agentfield.agent._is_running_in_container", lambda: True) - haxen = AgentHaxen(agent) - await haxen.register_with_haxen_server(port=7000) + agentfield = AgentFieldHandler(agent) + await agentfield.register_with_agentfield_server(port=7000) assert agent.base_url == "http://service.railway.internal:5000" @pytest.mark.asyncio -async def test_register_with_haxen_server_resolves_when_no_candidates(monkeypatch): +async def test_register_with_agentfield_server_resolves_when_no_candidates(monkeypatch): agent = StubAgent(callback_url=None, base_url=None) - agent.client = DummyHaxenClient() + agent.client = DummyAgentFieldClient() monkeypatch.setattr( - "haxen_sdk.agent._build_callback_candidates", lambda *a, **k: [] + "agentfield.agent._build_callback_candidates", lambda *a, **k: [] ) monkeypatch.setattr( - "haxen_sdk.agent._resolve_callback_url", + "agentfield.agent._resolve_callback_url", lambda url, port: f"http://resolved:{port}", ) - monkeypatch.setattr("haxen_sdk.agent._is_running_in_container", lambda: False) + monkeypatch.setattr("agentfield.agent._is_running_in_container", lambda: False) - haxen = AgentHaxen(agent) - await haxen.register_with_haxen_server(port=7100) + agentfield = AgentFieldHandler(agent) + await agentfield.register_with_agentfield_server(port=7100) assert agent.base_url == "http://resolved:7100" - assert agent.haxen_connected is True + assert agent.agentfield_connected is True @pytest.mark.asyncio -async def test_register_with_haxen_server_reorders_candidates(monkeypatch): +async def test_register_with_agentfield_server_reorders_candidates(monkeypatch): agent = StubAgent(callback_url=None, base_url="http://preferred:8000") - agent.client = DummyHaxenClient() + agent.client = DummyAgentFieldClient() agent.callback_candidates = ["http://other:8000", "http://preferred:8000"] monkeypatch.setattr( - "haxen_sdk.agent._build_callback_candidates", + "agentfield.agent._build_callback_candidates", lambda value, port, include_defaults=True: agent.callback_candidates, ) - monkeypatch.setattr("haxen_sdk.agent._is_running_in_container", lambda: False) + monkeypatch.setattr("agentfield.agent._is_running_in_container", lambda: False) - haxen = AgentHaxen(agent) - await haxen.register_with_haxen_server(port=8000) + agentfield = AgentFieldHandler(agent) + await agentfield.register_with_agentfield_server(port=8000) assert agent.callback_candidates[0] == "http://preferred:8000" @pytest.mark.asyncio -async def test_register_with_haxen_server_propagates_request_exception(monkeypatch): +async def test_register_with_agentfield_server_propagates_request_exception( + monkeypatch, +): class DummyResponse: def __init__(self): self.status_code = 503 @@ -144,49 +146,49 @@ async def failing_register(*args, **kwargs): raise exception agent = StubAgent(callback_url=None, base_url="http://already", dev_mode=False) - agent.client = DummyHaxenClient() + agent.client = DummyAgentFieldClient() monkeypatch.setattr(agent.client, "register_agent", failing_register) monkeypatch.setattr( - "haxen_sdk.agent._build_callback_candidates", lambda *a, **k: [] + "agentfield.agent._build_callback_candidates", lambda *a, **k: [] ) monkeypatch.setattr( - "haxen_sdk.agent._resolve_callback_url", lambda url, port: "http://already" + "agentfield.agent._resolve_callback_url", lambda url, port: "http://already" ) - monkeypatch.setattr("haxen_sdk.agent._is_running_in_container", lambda: False) + monkeypatch.setattr("agentfield.agent._is_running_in_container", lambda: False) - haxen = AgentHaxen(agent) + agentfield = AgentFieldHandler(agent) with pytest.raises(requests.exceptions.RequestException): - await haxen.register_with_haxen_server(port=9001) - assert agent.haxen_connected is False + await agentfield.register_with_agentfield_server(port=9001) + assert agent.agentfield_connected is False @pytest.mark.asyncio -async def test_register_with_haxen_server_unsuccessful_response(monkeypatch): +async def test_register_with_agentfield_server_unsuccessful_response(monkeypatch): agent = StubAgent(callback_url=None, base_url="http://host:5000") - agent.client = DummyHaxenClient() + agent.client = DummyAgentFieldClient() async def register_returns_false(*args, **kwargs): return False, None monkeypatch.setattr(agent.client, "register_agent", register_returns_false) monkeypatch.setattr( - "haxen_sdk.agent._build_callback_candidates", lambda *a, **k: [] + "agentfield.agent._build_callback_candidates", lambda *a, **k: [] ) monkeypatch.setattr( - "haxen_sdk.agent._resolve_callback_url", lambda url, port: "http://host:5000" + "agentfield.agent._resolve_callback_url", lambda url, port: "http://host:5000" ) - monkeypatch.setattr("haxen_sdk.agent._is_running_in_container", lambda: False) + monkeypatch.setattr("agentfield.agent._is_running_in_container", lambda: False) - haxen = AgentHaxen(agent) - await haxen.register_with_haxen_server(port=5000) - assert agent.haxen_connected is False + agentfield = AgentFieldHandler(agent) + await agentfield.register_with_agentfield_server(port=5000) + assert agent.agentfield_connected is False @pytest.mark.asyncio -async def test_register_with_haxen_applies_discovery_payload(monkeypatch): +async def test_register_with_agentfield_applies_discovery_payload(monkeypatch): from tests.helpers import create_test_agent - agent, haxen_client = create_test_agent(monkeypatch) + agent, agentfield_client = create_test_agent(monkeypatch) agent.callback_candidates = [] async def fake_register(node_id, reasoners, skills, base_url, discovery=None): @@ -197,14 +199,14 @@ async def fake_register(node_id, reasoners, skills, base_url, discovery=None): }, } - monkeypatch.setattr(haxen_client, "register_agent", fake_register) + monkeypatch.setattr(agentfield_client, "register_agent", fake_register) monkeypatch.setattr( - "haxen_sdk.agent._build_callback_candidates", + "agentfield.agent._build_callback_candidates", lambda value, port, include_defaults=True: [f"http://detected:{port}"], ) - monkeypatch.setattr("haxen_sdk.agent._is_running_in_container", lambda: False) + monkeypatch.setattr("agentfield.agent._is_running_in_container", lambda: False) - await agent.haxen_handler.register_with_haxen_server(port=9000) + await agent.agentfield_handler.register_with_agentfield_server(port=9000) assert agent.base_url == "https://public:9000" assert agent.callback_candidates[0] == "https://public:9000" @@ -213,7 +215,7 @@ async def fake_register(node_id, reasoners, skills, base_url, discovery=None): def test_send_heartbeat(monkeypatch): agent = StubAgent() - haxen = AgentHaxen(agent) + agentfield = AgentFieldHandler(agent) calls = {} @@ -227,62 +229,62 @@ class Dummy: return Dummy() monkeypatch.setattr("requests.post", fake_post) - haxen.send_heartbeat() + agentfield.send_heartbeat() assert calls["url"].endswith(f"/api/v1/nodes/{agent.node_id}/heartbeat") def test_send_heartbeat_warns_on_non_200(monkeypatch): agent = StubAgent() - agent.haxen_connected = True - haxen = AgentHaxen(agent) + agent.agentfield_connected = True + agentfield = AgentFieldHandler(agent) class Dummy: status_code = 500 text = "error" monkeypatch.setattr("requests.post", lambda *a, **k: Dummy()) - haxen.send_heartbeat() + agentfield.send_heartbeat() @pytest.mark.asyncio async def test_enhanced_heartbeat_returns_false_when_disconnected(): agent = StubAgent() - haxen = AgentHaxen(agent) - agent.haxen_connected = False - assert await haxen.send_enhanced_heartbeat() is False + agentfield = AgentFieldHandler(agent) + agent.agentfield_connected = False + assert await agentfield.send_enhanced_heartbeat() is False def test_start_and_stop_heartbeat(monkeypatch): agent = StubAgent() - haxen = AgentHaxen(agent) + agentfield = AgentFieldHandler(agent) called = [] def fake_worker(interval): called.append(interval) - monkeypatch.setattr(haxen, "heartbeat_worker", fake_worker) + monkeypatch.setattr(agentfield, "heartbeat_worker", fake_worker) - haxen.start_heartbeat(interval=1) + agentfield.start_heartbeat(interval=1) assert isinstance(agent._heartbeat_thread, threading.Thread) - haxen.stop_heartbeat() + agentfield.stop_heartbeat() @pytest.mark.asyncio async def test_enhanced_heartbeat_and_shutdown(monkeypatch): agent = StubAgent() - agent.client = DummyHaxenClient() + agent.client = DummyAgentFieldClient() agent.mcp_handler = type( "MCP", (), {"_get_mcp_server_health": lambda self: ["mcp"]} )() agent.dev_mode = True - haxen = AgentHaxen(agent) + agentfield = AgentFieldHandler(agent) - success = await haxen.send_enhanced_heartbeat() + success = await agentfield.send_enhanced_heartbeat() assert success is True assert agent.client.heartbeat_calls - success_shutdown = await haxen.notify_shutdown() + success_shutdown = await agentfield.notify_shutdown() assert success_shutdown is True assert agent.client.shutdown_calls == [agent.node_id] @@ -290,48 +292,48 @@ async def test_enhanced_heartbeat_and_shutdown(monkeypatch): @pytest.mark.asyncio async def test_enhanced_heartbeat_failure_returns_false(monkeypatch): agent = StubAgent() - agent.client = DummyHaxenClient() - haxen = AgentHaxen(agent) + agent.client = DummyAgentFieldClient() + agentfield = AgentFieldHandler(agent) async def boom(*args, **kwargs): raise RuntimeError("boom") monkeypatch.setattr(agent.client, "send_enhanced_heartbeat", boom) - agent.haxen_connected = True + agent.agentfield_connected = True agent.dev_mode = True - assert await haxen.send_enhanced_heartbeat() is False + assert await agentfield.send_enhanced_heartbeat() is False @pytest.mark.asyncio async def test_notify_shutdown_failure_returns_false(monkeypatch): agent = StubAgent() - agent.client = DummyHaxenClient() - haxen = AgentHaxen(agent) + agent.client = DummyAgentFieldClient() + agentfield = AgentFieldHandler(agent) async def boom(*args, **kwargs): raise RuntimeError("boom") monkeypatch.setattr(agent.client, "notify_graceful_shutdown", boom) - agent.haxen_connected = True + agent.agentfield_connected = True agent.dev_mode = True - assert await haxen.notify_shutdown() is False + assert await agentfield.notify_shutdown() is False def test_send_heartbeat_handles_error(monkeypatch): agent = StubAgent() - agent.haxen_connected = True - haxen = AgentHaxen(agent) + agent.agentfield_connected = True + agentfield = AgentFieldHandler(agent) def boom(*args, **kwargs): raise requests.RequestException("boom") monkeypatch.setattr("requests.post", boom) - haxen.send_heartbeat() + agentfield.send_heartbeat() def test_start_heartbeat_skips_when_disconnected(): agent = StubAgent() - agent.haxen_connected = False - haxen = AgentHaxen(agent) - haxen.start_heartbeat() + agent.agentfield_connected = False + agentfield = AgentFieldHandler(agent) + agentfield.start_heartbeat() assert agent._heartbeat_thread is None diff --git a/sdk/python/tests/test_agent_helpers.py b/sdk/python/tests/test_agent_helpers.py index 8d5ec42b..9b0de310 100644 --- a/sdk/python/tests/test_agent_helpers.py +++ b/sdk/python/tests/test_agent_helpers.py @@ -1,4 +1,4 @@ -from haxen_sdk.agent import _resolve_callback_url, _build_callback_candidates +from agentfield.agent import _resolve_callback_url, _build_callback_candidates def test_resolve_callback_url_prefers_explicit_url(): @@ -23,9 +23,9 @@ def test_resolve_callback_url_handles_container_overrides(monkeypatch): monkeypatch.setenv("RAILWAY_SERVICE_NAME", "my-service") monkeypatch.setenv("RAILWAY_ENVIRONMENT", "prod") - monkeypatch.setattr("haxen_sdk.agent._is_running_in_container", lambda: True) - monkeypatch.setattr("haxen_sdk.agent._detect_container_ip", lambda: None) - monkeypatch.setattr("haxen_sdk.agent._detect_local_ip", lambda: "10.0.0.5") + monkeypatch.setattr("agentfield.agent._is_running_in_container", lambda: True) + monkeypatch.setattr("agentfield.agent._detect_container_ip", lambda: None) + monkeypatch.setattr("agentfield.agent._detect_local_ip", lambda: "10.0.0.5") url = _resolve_callback_url(None, port=4500) assert url == "http://my-service.railway.internal:4500" @@ -36,14 +36,14 @@ def test_resolve_callback_url_fallback_to_detected_ips(monkeypatch): monkeypatch.delenv("RAILWAY_SERVICE_NAME", raising=False) monkeypatch.delenv("RAILWAY_ENVIRONMENT", raising=False) - monkeypatch.setattr("haxen_sdk.agent._is_running_in_container", lambda: True) - monkeypatch.setattr("haxen_sdk.agent._detect_container_ip", lambda: "203.0.113.10") + monkeypatch.setattr("agentfield.agent._is_running_in_container", lambda: True) + monkeypatch.setattr("agentfield.agent._detect_container_ip", lambda: "203.0.113.10") url = _resolve_callback_url(None, port=3200) assert url == "http://203.0.113.10:3200" - monkeypatch.setattr("haxen_sdk.agent._detect_container_ip", lambda: None) - monkeypatch.setattr("haxen_sdk.agent._detect_local_ip", lambda: "192.168.1.2") + monkeypatch.setattr("agentfield.agent._detect_container_ip", lambda: None) + monkeypatch.setattr("agentfield.agent._detect_local_ip", lambda: "192.168.1.2") url = _resolve_callback_url(None, port=3201) assert url == "http://192.168.1.2:3201" @@ -51,18 +51,18 @@ def test_resolve_callback_url_fallback_to_detected_ips(monkeypatch): def test_resolve_callback_url_final_fallback(monkeypatch): monkeypatch.delenv("AGENT_CALLBACK_URL", raising=False) - monkeypatch.setattr("haxen_sdk.agent._is_running_in_container", lambda: False) - monkeypatch.setattr("haxen_sdk.agent._detect_local_ip", lambda: None) - monkeypatch.setattr("haxen_sdk.agent.socket.gethostname", lambda: "") + monkeypatch.setattr("agentfield.agent._is_running_in_container", lambda: False) + monkeypatch.setattr("agentfield.agent._detect_local_ip", lambda: None) + monkeypatch.setattr("agentfield.agent.socket.gethostname", lambda: "") url = _resolve_callback_url(None, port=8080) assert url in {"http://localhost:8080", "http://host.docker.internal:8080"} def test_build_callback_candidates_includes_defaults(monkeypatch): - monkeypatch.setattr("haxen_sdk.agent._is_running_in_container", lambda: False) - monkeypatch.setattr("haxen_sdk.agent._detect_local_ip", lambda: "192.168.1.50") - monkeypatch.setattr("haxen_sdk.agent.socket.gethostname", lambda: "my-host") + monkeypatch.setattr("agentfield.agent._is_running_in_container", lambda: False) + monkeypatch.setattr("agentfield.agent._detect_local_ip", lambda: "192.168.1.50") + monkeypatch.setattr("agentfield.agent.socket.gethostname", lambda: "my-host") candidates = _build_callback_candidates(None, 9000) diff --git a/sdk/python/tests/test_agent_integration.py b/sdk/python/tests/test_agent_integration.py index f9f13630..aa522041 100644 --- a/sdk/python/tests/test_agent_integration.py +++ b/sdk/python/tests/test_agent_integration.py @@ -4,14 +4,14 @@ import pytest from fastapi import APIRouter -from haxen_sdk.router import AgentRouter +from agentfield.router import AgentRouter from tests.helpers import create_test_agent @pytest.mark.asyncio async def test_agent_reasoner_routing_and_workflow(monkeypatch): - agent, haxen_client = create_test_agent( + agent, agentfield_client = create_test_agent( monkeypatch, callback_url="https://callback.example.com" ) @@ -38,9 +38,9 @@ async def status(): agent.include_router(router, prefix="/ops") - await agent.haxen_handler.register_with_haxen_server(port=9100) - assert haxen_client.register_calls - registration = haxen_client.register_calls[-1] + await agent.agentfield_handler.register_with_agentfield_server(port=9100) + assert agentfield_client.register_calls + registration = agentfield_client.register_calls[-1] assert registration["base_url"] == "https://callback.example.com:9100" assert registration["reasoners"][0]["id"] == "double" assert registration["skills"][0]["id"] == "annotate" @@ -165,7 +165,7 @@ async def test_callback_url_precedence_and_env(monkeypatch): explicit_agent, explicit_client = create_test_agent( monkeypatch, callback_url="https://explicit.example.com" ) - await explicit_agent.haxen_handler.register_with_haxen_server(port=9200) + await explicit_agent.agentfield_handler.register_with_agentfield_server(port=9200) assert explicit_agent.base_url == "https://explicit.example.com:9200" assert ( explicit_client.register_calls[-1]["base_url"] @@ -173,6 +173,6 @@ async def test_callback_url_precedence_and_env(monkeypatch): ) env_agent, env_client = create_test_agent(monkeypatch) - await env_agent.haxen_handler.register_with_haxen_server(port=9300) + await env_agent.agentfield_handler.register_with_agentfield_server(port=9300) assert env_agent.base_url == "https://env.example.com:9300" assert env_client.register_calls[-1]["base_url"] == "https://env.example.com:9300" diff --git a/sdk/python/tests/test_agent_networking.py b/sdk/python/tests/test_agent_networking.py index 1ebaa4ec..0c2a1605 100644 --- a/sdk/python/tests/test_agent_networking.py +++ b/sdk/python/tests/test_agent_networking.py @@ -2,8 +2,8 @@ import pytest -from haxen_sdk import agent as agent_mod -from haxen_sdk.agent import ( +from agentfield import agent as agent_mod +from agentfield.agent import ( ExecutionContext, _build_callback_candidates, _normalize_candidate, @@ -96,7 +96,7 @@ def test_build_callback_candidates_prefers_env(monkeypatch): monkeypatch.setattr(agent_mod, "_detect_local_ip", lambda: "10.0.0.5") monkeypatch.setattr(agent_mod.socket, "gethostname", lambda: "agent-host") monkeypatch.setenv("AGENT_CALLBACK_URL", "https://env.example") - monkeypatch.setenv("RAILWAY_SERVICE_NAME", "haxen") + monkeypatch.setenv("RAILWAY_SERVICE_NAME", "agentfield") monkeypatch.setenv("RAILWAY_ENVIRONMENT", "prod") candidates = _build_callback_candidates(None, 9090) diff --git a/sdk/python/tests/test_agent_registry.py b/sdk/python/tests/test_agent_registry.py index bb849496..3f5cb673 100644 --- a/sdk/python/tests/test_agent_registry.py +++ b/sdk/python/tests/test_agent_registry.py @@ -1,4 +1,4 @@ -from haxen_sdk.agent_registry import ( +from agentfield.agent_registry import ( set_current_agent, get_current_agent_instance, clear_current_agent, diff --git a/sdk/python/tests/test_agent_server.py b/sdk/python/tests/test_agent_server.py index 70916e32..441ab890 100644 --- a/sdk/python/tests/test_agent_server.py +++ b/sdk/python/tests/test_agent_server.py @@ -5,7 +5,7 @@ from types import SimpleNamespace from fastapi import FastAPI -from haxen_sdk.agent_server import AgentServer +from agentfield.agent_server import AgentServer def make_agent_app(): @@ -29,15 +29,15 @@ def make_agent_app(): }, )() app.dev_mode = False - app.haxen_server = "http://haxen" + app.agentfield_server = "http://agentfield" return app @pytest.mark.asyncio -async def test_setup_haxen_routes_health_endpoint(): +async def test_setup_agentfield_routes_health_endpoint(): app = make_agent_app() server = AgentServer(app) - server.setup_haxen_routes() + server.setup_agentfield_routes() async with httpx.AsyncClient( transport=httpx.ASGITransport(app=app), base_url="http://test" @@ -64,7 +64,7 @@ async def test_shutdown_endpoint_triggers_flags(): app = make_agent_app() app.dev_mode = True server = AgentServer(app) - server.setup_haxen_routes() + server.setup_agentfield_routes() async with httpx.AsyncClient( transport=httpx.ASGITransport(app=app), base_url="http://test" @@ -99,7 +99,7 @@ def num_threads(self): dummy_psutil = SimpleNamespace(Process=lambda: DummyProcess()) monkeypatch.setitem(sys.modules, "psutil", dummy_psutil) - server.setup_haxen_routes() + server.setup_agentfield_routes() async with httpx.AsyncClient( transport=httpx.ASGITransport(app=app), base_url="http://test" @@ -116,7 +116,7 @@ def num_threads(self): async def test_shutdown_immediate_path(monkeypatch): app = make_agent_app() server = AgentServer(app) - server.setup_haxen_routes() + server.setup_agentfield_routes() triggered = {} @@ -162,7 +162,7 @@ def get_all_status(self): manager = StubMCPManager() app.mcp_manager = manager server = AgentServer(app) - server.setup_haxen_routes() + server.setup_agentfield_routes() async with httpx.AsyncClient( transport=httpx.ASGITransport(app=app), base_url="http://test" diff --git a/sdk/python/tests/test_agent_utils.py b/sdk/python/tests/test_agent_utils.py index 9cb0501c..50b17bd0 100644 --- a/sdk/python/tests/test_agent_utils.py +++ b/sdk/python/tests/test_agent_utils.py @@ -1,7 +1,7 @@ import pytest from pydantic import BaseModel -from haxen_sdk.agent_utils import AgentUtils +from agentfield.agent_utils import AgentUtils def test_detect_input_type_and_helpers(tmp_path): diff --git a/sdk/python/tests/test_agent_workflow.py b/sdk/python/tests/test_agent_workflow.py index e1c40756..0ab839e8 100644 --- a/sdk/python/tests/test_agent_workflow.py +++ b/sdk/python/tests/test_agent_workflow.py @@ -1,8 +1,8 @@ import pytest -from haxen_sdk.agent_workflow import AgentWorkflow -from haxen_sdk.execution_context import ExecutionContext -from haxen_sdk.agent_registry import set_current_agent, clear_current_agent +from agentfield.agent_workflow import AgentWorkflow +from agentfield.execution_context import ExecutionContext +from agentfield.agent_registry import set_current_agent, clear_current_agent from tests.helpers import StubAgent diff --git a/sdk/python/tests/test_agent_workflow_registration.py b/sdk/python/tests/test_agent_workflow_registration.py index 9288b569..6f2b9d55 100644 --- a/sdk/python/tests/test_agent_workflow_registration.py +++ b/sdk/python/tests/test_agent_workflow_registration.py @@ -1,9 +1,9 @@ import pytest -from haxen_sdk.agent_workflow import AgentWorkflow -from haxen_sdk.agent_registry import set_current_agent, clear_current_agent -from haxen_sdk.decorators import _execute_with_tracking -from haxen_sdk.execution_context import ( +from agentfield.agent_workflow import AgentWorkflow +from agentfield.agent_registry import set_current_agent, clear_current_agent +from agentfield.decorators import _execute_with_tracking +from agentfield.execution_context import ( ExecutionContext, set_execution_context, reset_execution_context, @@ -38,7 +38,7 @@ async def _async_request(self, method, url, **kwargs): class DummyAgent: def __init__(self): self.node_id = "agent-node" - self.haxen_server = "http://haxen.local" + self.agentfield_server = "http://agentfield.local" self.client = DummyClient() self.dev_mode = False self._current_execution_context = None diff --git a/sdk/python/tests/test_ai_config.py b/sdk/python/tests/test_ai_config.py index b26fa55f..ae56aaf8 100644 --- a/sdk/python/tests/test_ai_config.py +++ b/sdk/python/tests/test_ai_config.py @@ -1,4 +1,4 @@ -from haxen_sdk.types import AIConfig +from agentfield.types import AIConfig def test_ai_config_defaults_and_to_dict(): diff --git a/sdk/python/tests/test_async_config.py b/sdk/python/tests/test_async_config.py index 93393746..a5919965 100644 --- a/sdk/python/tests/test_async_config.py +++ b/sdk/python/tests/test_async_config.py @@ -1,4 +1,4 @@ -from haxen_sdk.async_config import AsyncConfig +from agentfield.async_config import AsyncConfig def test_async_config_validate_defaults_ok(): @@ -34,12 +34,12 @@ def test_get_poll_interval_for_age(): def test_from_environment_overrides(monkeypatch): - monkeypatch.setenv("HAXEN_ASYNC_MAX_EXECUTION_TIMEOUT", "123") - monkeypatch.setenv("HAXEN_ASYNC_BATCH_SIZE", "7") - monkeypatch.setenv("HAXEN_ASYNC_ENABLE_RESULT_CACHING", "false") - monkeypatch.setenv("HAXEN_ASYNC_ENABLE_EVENT_STREAM", "true") - monkeypatch.setenv("HAXEN_ASYNC_EVENT_STREAM_PATH", "/stream") - monkeypatch.setenv("HAXEN_ASYNC_EVENT_STREAM_RETRY_BACKOFF", "4.5") + monkeypatch.setenv("AGENTFIELD_ASYNC_MAX_EXECUTION_TIMEOUT", "123") + monkeypatch.setenv("AGENTFIELD_ASYNC_BATCH_SIZE", "7") + monkeypatch.setenv("AGENTFIELD_ASYNC_ENABLE_RESULT_CACHING", "false") + monkeypatch.setenv("AGENTFIELD_ASYNC_ENABLE_EVENT_STREAM", "true") + monkeypatch.setenv("AGENTFIELD_ASYNC_EVENT_STREAM_PATH", "/stream") + monkeypatch.setenv("AGENTFIELD_ASYNC_EVENT_STREAM_RETRY_BACKOFF", "4.5") cfg = AsyncConfig.from_environment() assert cfg.max_execution_timeout == 123 diff --git a/sdk/python/tests/test_async_execution_manager_paths.py b/sdk/python/tests/test_async_execution_manager_paths.py index 6953e136..ba6ebfa0 100644 --- a/sdk/python/tests/test_async_execution_manager_paths.py +++ b/sdk/python/tests/test_async_execution_manager_paths.py @@ -5,9 +5,9 @@ import pytest -from haxen_sdk.async_config import AsyncConfig -from haxen_sdk.async_execution_manager import AsyncExecutionManager -from haxen_sdk.execution_state import ExecutionState, ExecutionStatus +from agentfield.async_config import AsyncConfig +from agentfield.async_execution_manager import AsyncExecutionManager +from agentfield.execution_state import ExecutionState, ExecutionStatus class _DummyResponse: diff --git a/sdk/python/tests/test_client.py b/sdk/python/tests/test_client.py index f3498b0e..902c6c66 100644 --- a/sdk/python/tests/test_client.py +++ b/sdk/python/tests/test_client.py @@ -6,8 +6,8 @@ import pytest import requests -from haxen_sdk.client import HaxenClient -from haxen_sdk.types import AgentStatus, HeartbeatData +from agentfield.client import AgentFieldClient +from agentfield.types import AgentStatus, HeartbeatData @pytest.fixture(autouse=True) @@ -64,7 +64,7 @@ async def aclose(self): Timeout=lambda *args, **kwargs: None, ) - import haxen_sdk.client as client_mod + import agentfield.client as client_mod monkeypatch.setitem(sys.modules, "httpx", module) client_mod.httpx = module @@ -100,12 +100,12 @@ def fake_get(url, headers=None, timeout=None): } ) - import haxen_sdk.client as client_mod + import agentfield.client as client_mod monkeypatch.setattr(client_mod.requests, "post", fake_post) monkeypatch.setattr(client_mod.requests, "get", fake_get) - client = HaxenClient(base_url="http://example.com") + client = AgentFieldClient(base_url="http://example.com") result = client.execute_sync("node.reasoner", {"payload": 1}) assert result["status"] == "succeeded" @@ -143,12 +143,12 @@ def fake_get(url, headers=None, timeout=None): } ) - import haxen_sdk.client as client_mod + import agentfield.client as client_mod monkeypatch.setattr(client_mod.requests, "post", fake_post) monkeypatch.setattr(client_mod.requests, "get", fake_get) - client = HaxenClient(base_url="http://example.com") + client = AgentFieldClient(base_url="http://example.com") result = client.execute_sync( "node.reasoner", {"payload": 1}, @@ -182,7 +182,7 @@ def on_request(method, url, **kwargs): install_httpx_stub(monkeypatch, on_request=on_request) - client = HaxenClient(base_url="http://example.com") + client = AgentFieldClient(base_url="http://example.com") result = asyncio.run(client.execute("node.reasoner", {"payload": 1})) assert result["result"] == {"async": True} @@ -228,7 +228,7 @@ def fake_get(url, headers=None, timeout=None, **kwargs): } ) - import haxen_sdk.client as client_mod + import agentfield.client as client_mod client_mod.httpx = None monkeypatch.setattr( @@ -246,7 +246,7 @@ def fake_session_request(self, method, url, **kwargs): monkeypatch.setattr(requests.Session, "request", fake_session_request) - client = HaxenClient(base_url="http://example.com") + client = AgentFieldClient(base_url="http://example.com") result = await client.execute("node.reasoner", {"payload": 1}) assert result["status"] == "succeeded" @@ -265,13 +265,13 @@ def on_request(method, url, **kwargs): install_httpx_stub(monkeypatch, on_request=on_request) - import haxen_sdk.client as client_mod + import agentfield.client as client_mod monkeypatch.setattr( client_mod.requests, "post", lambda *args, **kwargs: DummyResponse({}, 200) ) - client = HaxenClient(base_url="http://example.com") + client = AgentFieldClient(base_url="http://example.com") heartbeat = HeartbeatData(status=AgentStatus.READY, mcp_servers=[], timestamp="now") assert await client.send_enhanced_heartbeat("node", heartbeat) is True @@ -290,11 +290,11 @@ def fake_post(url, json=None, headers=None, timeout=None): urls.append(url) return DummyResp() - import haxen_sdk.client as client_mod + import agentfield.client as client_mod monkeypatch.setattr(client_mod.requests, "post", fake_post) - client = HaxenClient(base_url="http://example.com") + client = AgentFieldClient(base_url="http://example.com") heartbeat = HeartbeatData(status=AgentStatus.READY, mcp_servers=[], timestamp="now") assert client.send_enhanced_heartbeat_sync("node", heartbeat) is True @@ -329,13 +329,13 @@ def fake_get(url, **kwargs): calls.setdefault("get", []).append(url) return DummyResp({"nodes": ["n1"]}) - import haxen_sdk.client as client_mod + import agentfield.client as client_mod monkeypatch.setattr(client_mod.requests, "post", fake_post) monkeypatch.setattr(client_mod.requests, "put", fake_put) monkeypatch.setattr(client_mod.requests, "get", fake_get) - client = HaxenClient(base_url="http://example.com") + client = AgentFieldClient(base_url="http://example.com") assert client.register_node({"id": "n1"}) == {"ok": True} assert client.update_health("n1", {"status": "up"}) == {"status": "updated"} assert client.get_nodes() == {"nodes": ["n1"]} @@ -355,7 +355,7 @@ def on_request(method, url, **kwargs): install_httpx_stub(monkeypatch, on_request=on_request) - client = HaxenClient(base_url="http://example.com") + client = AgentFieldClient(base_url="http://example.com") ok, payload = await client.register_agent("node-1", [], [], base_url="http://agent") assert ok is True assert payload == {} diff --git a/sdk/python/tests/test_client_lifecycle.py b/sdk/python/tests/test_client_lifecycle.py index 989843e5..65d44f3f 100644 --- a/sdk/python/tests/test_client_lifecycle.py +++ b/sdk/python/tests/test_client_lifecycle.py @@ -2,8 +2,8 @@ import sys import types -from haxen_sdk.client import HaxenClient -from haxen_sdk.types import AgentStatus, HeartbeatData +from agentfield.client import AgentFieldClient +from agentfield.types import AgentStatus, HeartbeatData class DummyResponse: @@ -28,11 +28,11 @@ def ok_post(url, json, headers, timeout): sent["calls"] += 1 return DummyResponse(200) - import haxen_sdk.client as client_mod + import agentfield.client as client_mod monkeypatch.setattr(client_mod.requests, "post", ok_post) - bc = HaxenClient(base_url="http://example") + bc = AgentFieldClient(base_url="http://example") hb = HeartbeatData(status=AgentStatus.READY, mcp_servers=[], timestamp="now") assert bc.send_enhanced_heartbeat_sync("node1", hb) is True @@ -44,13 +44,13 @@ def bad_post(url, json, headers, timeout): def test_notify_graceful_shutdown_sync(monkeypatch): - import haxen_sdk.client as client_mod + import agentfield.client as client_mod def ok_post(url, headers, timeout): return DummyResponse(200) monkeypatch.setattr(client_mod.requests, "post", ok_post) - bc = HaxenClient(base_url="http://example") + bc = AgentFieldClient(base_url="http://example") assert bc.notify_graceful_shutdown_sync("node1") is True def bad_post(url, headers, timeout): @@ -61,8 +61,8 @@ def bad_post(url, headers, timeout): def test_register_agent_with_status_async(monkeypatch): - # Provide a dummy httpx module that HaxenClient will use - from haxen_sdk import client as client_mod + # Provide a dummy httpx module that AgentFieldClient will use + from agentfield import client as client_mod class DummyAsyncClient: def __init__(self, *args, **kwargs): @@ -90,7 +90,7 @@ async def aclose(self): raising=False, ) - bc = HaxenClient(base_url="http://example") + bc = AgentFieldClient(base_url="http://example") async def run(): return await bc.register_agent_with_status( diff --git a/sdk/python/tests/test_client_unit.py b/sdk/python/tests/test_client_unit.py index e23c2c74..e300b377 100644 --- a/sdk/python/tests/test_client_unit.py +++ b/sdk/python/tests/test_client_unit.py @@ -2,7 +2,7 @@ import pytest -from haxen_sdk.client import HaxenClient +from agentfield.client import AgentFieldClient class DummyContext: @@ -22,7 +22,7 @@ def set_event_stream_headers(self, headers): def test_generate_id_prefix_and_uniqueness(): - client = HaxenClient() + client = AgentFieldClient() first = client._generate_id("exec") second = client._generate_id("exec") assert first.startswith("exec_") @@ -32,7 +32,7 @@ def test_generate_id_prefix_and_uniqueness(): def test_get_headers_with_context_merges_workflow_headers(): - client = HaxenClient() + client = AgentFieldClient() client._current_workflow_context = DummyContext({"X-Workflow-ID": "wf-1"}) combined = client._get_headers_with_context({"Authorization": "Bearer token"}) @@ -42,7 +42,7 @@ def test_get_headers_with_context_merges_workflow_headers(): def test_build_event_stream_headers_filters_keys(): - client = HaxenClient() + client = AgentFieldClient() headers = { "Authorization": "Bearer token", "X-Custom": "value", @@ -61,7 +61,7 @@ def test_build_event_stream_headers_filters_keys(): def test_maybe_update_event_stream_headers_uses_context_when_enabled(): - client = HaxenClient() + client = AgentFieldClient() client.async_config.enable_event_stream = True client._async_execution_manager = DummyManager() client._current_workflow_context = DummyContext({"X-Workflow-ID": "wf-ctx"}) @@ -73,7 +73,7 @@ def test_maybe_update_event_stream_headers_uses_context_when_enabled(): def test_maybe_update_event_stream_headers_prefers_source_headers(): - client = HaxenClient() + client = AgentFieldClient() client.async_config.enable_event_stream = True manager = DummyManager() client._async_execution_manager = manager @@ -92,7 +92,7 @@ def test_maybe_update_event_stream_headers_prefers_source_headers(): ], ) def test_maybe_update_event_stream_headers_without_manager(source_headers, expected): - client = HaxenClient() + client = AgentFieldClient() client.async_config.enable_event_stream = True client._current_workflow_context = DummyContext({"X-Workflow-ID": "wf-ctx"}) diff --git a/sdk/python/tests/test_connection_manager.py b/sdk/python/tests/test_connection_manager.py index b158f644..c58270cd 100644 --- a/sdk/python/tests/test_connection_manager.py +++ b/sdk/python/tests/test_connection_manager.py @@ -1,5 +1,5 @@ import asyncio -from haxen_sdk.connection_manager import ( +from agentfield.connection_manager import ( ConnectionManager, ConnectionConfig, ConnectionState, @@ -11,7 +11,7 @@ async def register_agent_with_status(self, **kwargs): return False, None # simulate failure so start enters reconnection -class FakeHaxenHandler: +class FakeAgentFieldHandler: async def send_enhanced_heartbeat(self): return True @@ -19,7 +19,7 @@ async def send_enhanced_heartbeat(self): class FakeAgent: def __init__(self): self.client = FakeClient() - self.haxen_handler = FakeHaxenHandler() + self.agentfield_handler = FakeAgentFieldHandler() self.node_id = "n" self.reasoners = [] self.skills = [] diff --git a/sdk/python/tests/test_decorators.py b/sdk/python/tests/test_decorators.py index bcd3cdfc..ffd59ef6 100644 --- a/sdk/python/tests/test_decorators.py +++ b/sdk/python/tests/test_decorators.py @@ -2,9 +2,9 @@ import pytest -from haxen_sdk.decorators import reasoner, _execute_with_tracking -from haxen_sdk.execution_context import ExecutionContext -from haxen_sdk.agent_registry import set_current_agent, clear_current_agent +from agentfield.decorators import reasoner, _execute_with_tracking +from agentfield.execution_context import ExecutionContext +from agentfield.agent_registry import set_current_agent, clear_current_agent from tests.helpers import StubAgent @@ -48,9 +48,9 @@ async def record_start(agent, ctx, payload): async def record_complete(agent, ctx, result, duration_ms, payload): captured.setdefault("complete", []).append((ctx, result)) - monkeypatch.setattr("haxen_sdk.decorators._send_workflow_start", record_start) + monkeypatch.setattr("agentfield.decorators._send_workflow_start", record_start) monkeypatch.setattr( - "haxen_sdk.decorators._send_workflow_completion", record_complete + "agentfield.decorators._send_workflow_completion", record_complete ) agent = StubAgent() @@ -92,9 +92,9 @@ async def record_error(agent, ctx, message, duration_ms, payload): calls.setdefault("error", []).append((ctx, message)) monkeypatch.setattr( - "haxen_sdk.decorators._send_workflow_start", lambda *a, **k: asyncio.sleep(0) + "agentfield.decorators._send_workflow_start", lambda *a, **k: asyncio.sleep(0) ) - monkeypatch.setattr("haxen_sdk.decorators._send_workflow_error", record_error) + monkeypatch.setattr("agentfield.decorators._send_workflow_error", record_error) agent = StubAgent() set_current_agent(agent) diff --git a/sdk/python/tests/test_did_manager.py b/sdk/python/tests/test_did_manager.py index 415aa0fb..d7c7ecac 100644 --- a/sdk/python/tests/test_did_manager.py +++ b/sdk/python/tests/test_did_manager.py @@ -1,6 +1,6 @@ import datetime -from haxen_sdk.did_manager import DIDManager, DIDIdentityPackage +from agentfield.did_manager import DIDManager, DIDIdentityPackage def make_package(): @@ -30,12 +30,12 @@ def make_package(): "component_type": "skill", } }, - "haxen_server_id": "haxen-1", + "agentfield_server_id": "agentfield-1", } def test_register_agent_success(monkeypatch): - manager = DIDManager("http://haxen", "node") + manager = DIDManager("http://agentfield", "node") class DummyResponse: status_code = 200 @@ -55,7 +55,7 @@ def json(): def test_register_agent_failure_status(monkeypatch): - manager = DIDManager("http://haxen", "node") + manager = DIDManager("http://agentfield", "node") class DummyResponse: status_code = 500 @@ -68,7 +68,7 @@ class DummyResponse: def test_create_execution_context(monkeypatch): - manager = DIDManager("http://haxen", "node") + manager = DIDManager("http://agentfield", "node") package = manager._parse_identity_package(make_package()) assert isinstance(package, DIDIdentityPackage) manager.identity_package = package @@ -88,11 +88,11 @@ def test_create_execution_context(monkeypatch): def test_create_execution_context_missing_identity(): - manager = DIDManager("http://haxen", "node") + manager = DIDManager("http://agentfield", "node") assert manager.create_execution_context("e", "w", "s", "a", "b") is None def test_get_identity_summary_disabled(): - manager = DIDManager("http://haxen", "node") + manager = DIDManager("http://agentfield", "node") summary = manager.get_identity_summary() assert summary["enabled"] is False diff --git a/sdk/python/tests/test_dynamic_skills.py b/sdk/python/tests/test_dynamic_skills.py index a35907d6..47d8762a 100644 --- a/sdk/python/tests/test_dynamic_skills.py +++ b/sdk/python/tests/test_dynamic_skills.py @@ -4,7 +4,7 @@ import pytest from fastapi import FastAPI -from haxen_sdk.dynamic_skills import DynamicMCPSkillManager +from agentfield.dynamic_skills import DynamicMCPSkillManager class StubMCPClient: diff --git a/sdk/python/tests/test_execution_context_core.py b/sdk/python/tests/test_execution_context_core.py index 5e58fd2f..70ffd062 100644 --- a/sdk/python/tests/test_execution_context_core.py +++ b/sdk/python/tests/test_execution_context_core.py @@ -1,6 +1,6 @@ import pytest -from haxen_sdk.execution_context import ( +from agentfield.execution_context import ( ExecutionContext, generate_execution_id, ) diff --git a/sdk/python/tests/test_execution_state.py b/sdk/python/tests/test_execution_state.py index d97e8cf3..e38535a0 100644 --- a/sdk/python/tests/test_execution_state.py +++ b/sdk/python/tests/test_execution_state.py @@ -1,5 +1,5 @@ import time -from haxen_sdk.execution_state import ( +from agentfield.execution_state import ( ExecutionBatch, ExecutionState, ExecutionStatus, diff --git a/sdk/python/tests/test_memory_client_core.py b/sdk/python/tests/test_memory_client_core.py index 030e3bbf..6e20836b 100644 --- a/sdk/python/tests/test_memory_client_core.py +++ b/sdk/python/tests/test_memory_client_core.py @@ -8,7 +8,7 @@ import pytest import requests -from haxen_sdk.memory import ( +from agentfield.memory import ( GlobalMemoryClient, MemoryClient, MemoryInterface, @@ -31,14 +31,14 @@ def raise_for_status(self): @pytest.fixture(autouse=True) def mute_debug_logs(monkeypatch): - monkeypatch.setattr("haxen_sdk.logger.log_debug", lambda *args, **kwargs: None) + monkeypatch.setattr("agentfield.logger.log_debug", lambda *args, **kwargs: None) @pytest.fixture def memory_client(dummy_headers): context = SimpleNamespace(to_headers=lambda: dict(dummy_headers)) - haxen_client = SimpleNamespace(api_base="http://haxen.local/api/v1") - return MemoryClient(haxen_client, context) + agentfield_client = SimpleNamespace(api_base="http://agentfield.local/api/v1") + return MemoryClient(agentfield_client, context) @pytest.mark.unit @@ -275,11 +275,11 @@ def raise_for_status(self): return OkResponse() context = SimpleNamespace(to_headers=lambda: dict(dummy_headers)) - haxen_client = SimpleNamespace( - api_base="http://haxen.local/api/v1", + agentfield_client = SimpleNamespace( + api_base="http://agentfield.local/api/v1", _async_request=fake_async_request, ) - client = MemoryClient(haxen_client, context) + client = MemoryClient(agentfield_client, context) await client.set("key", {"value": 1}) diff --git a/sdk/python/tests/test_memory_events.py b/sdk/python/tests/test_memory_events.py index 3770c4be..7c10f369 100644 --- a/sdk/python/tests/test_memory_events.py +++ b/sdk/python/tests/test_memory_events.py @@ -6,8 +6,12 @@ import pytest -from haxen_sdk.memory_events import PatternMatcher, EventSubscription, MemoryEventClient -from haxen_sdk.types import MemoryChangeEvent +from agentfield.memory_events import ( + PatternMatcher, + EventSubscription, + MemoryEventClient, +) +from agentfield.types import MemoryChangeEvent def test_pattern_matcher_wildcards(): @@ -35,7 +39,7 @@ def test_event_subscription_matches_scoped_event(): def test_memory_event_client_subscription_and_unsubscribe(monkeypatch): ctx = SimpleNamespace(to_headers=lambda: {"Authorization": "token"}) - client = MemoryEventClient("http://haxen", ctx) + client = MemoryEventClient("http://agentfield", ctx) callback_called = asyncio.Event() @@ -55,7 +59,7 @@ async def callback(event): @pytest.mark.asyncio async def test_memory_event_client_history(monkeypatch): ctx = SimpleNamespace(to_headers=lambda: {"Authorization": "token"}) - client = MemoryEventClient("http://haxen", ctx) + client = MemoryEventClient("http://agentfield", ctx) class DummyResponse: def __init__(self): @@ -97,7 +101,7 @@ async def get(self, url, params=None, headers=None, timeout=None): @pytest.mark.asyncio async def test_memory_event_client_connect_builds_ws_url(monkeypatch): ctx = SimpleNamespace(to_headers=lambda: {"Authorization": "token"}) - client = MemoryEventClient("http://haxen", ctx) + client = MemoryEventClient("http://agentfield", ctx) record = {} listener_called = {} @@ -114,7 +118,7 @@ async def fake_connect(url, additional_headers=None): async def fake_listen(self): listener_called["run"] = True - monkeypatch.setattr("haxen_sdk.memory_events.websockets.connect", fake_connect) + monkeypatch.setattr("agentfield.memory_events.websockets.connect", fake_connect) monkeypatch.setattr(MemoryEventClient, "_listen", fake_listen, raising=False) await client.connect( @@ -122,7 +126,7 @@ async def fake_listen(self): ) await asyncio.sleep(0) - assert record["url"].startswith("ws://haxen") + assert record["url"].startswith("ws://agentfield") assert "patterns=cart.*,order.*" in record["url"] assert "scope=session" in record["url"] assert "scope_id=abc" in record["url"] @@ -133,7 +137,7 @@ async def fake_listen(self): @pytest.mark.asyncio async def test_memory_event_client_listen_dispatches(monkeypatch): ctx = SimpleNamespace(to_headers=lambda: {}) - client = MemoryEventClient("http://haxen", ctx) + client = MemoryEventClient("http://agentfield", ctx) received = [] @@ -189,7 +193,7 @@ def capture_task(coro): @pytest.mark.asyncio async def test_memory_event_client_handle_reconnect(monkeypatch): ctx = SimpleNamespace(to_headers=lambda: {}) - client = MemoryEventClient("http://haxen", ctx) + client = MemoryEventClient("http://agentfield", ctx) client._max_reconnect_attempts = 2 sleeps = [] @@ -217,7 +221,7 @@ async def fake_connect(*args, **kwargs): def test_on_change_decorator_marks_wrapper(): ctx = SimpleNamespace(to_headers=lambda: {}) - client = MemoryEventClient("http://haxen", ctx) + client = MemoryEventClient("http://agentfield", ctx) client.websocket = SimpleNamespace(open=True) @client.on_change("foo.*") diff --git a/sdk/python/tests/test_memory_flow_core.py b/sdk/python/tests/test_memory_flow_core.py index aff4e64d..c64ff33d 100644 --- a/sdk/python/tests/test_memory_flow_core.py +++ b/sdk/python/tests/test_memory_flow_core.py @@ -5,7 +5,7 @@ import pytest import requests -from haxen_sdk.memory import ( +from agentfield.memory import ( GlobalMemoryClient, MemoryClient, MemoryInterface, @@ -88,11 +88,11 @@ async def get(self, url, params=None, headers=None, timeout=None): # type: igno monkeypatch.setattr(requests, "post", fake_post) monkeypatch.setattr(httpx, "AsyncClient", lambda *args, **kwargs: AsyncClientStub()) - monkeypatch.setattr("haxen_sdk.logger.log_debug", lambda *args, **kwargs: None) + monkeypatch.setattr("agentfield.logger.log_debug", lambda *args, **kwargs: None) context = SimpleNamespace(to_headers=lambda: dict(dummy_headers)) - haxen_client = SimpleNamespace(api_base="http://haxen.local/api/v1") - memory_client = MemoryClient(haxen_client, context) + agentfield_client = SimpleNamespace(api_base="http://agentfield.local/api/v1") + memory_client = MemoryClient(agentfield_client, context) interface = MemoryInterface(memory_client, SimpleNamespace()) # type: ignore[arg-type] # Default scope round-trip @@ -116,11 +116,11 @@ async def get(self, url, params=None, headers=None, timeout=None): # type: igno @pytest.mark.functional @pytest.mark.asyncio -async def test_memory_client_uses_haxen_async_request(dummy_headers): +async def test_memory_client_uses_agentfield_async_request(dummy_headers): calls: list[tuple[str, str, dict]] = [] - class DummyHaxenClient: - api_base = "http://haxen.local/api/v1" + class DummyAgentFieldClient: + api_base = "http://agentfield.local/api/v1" async def _async_request(self, method, url, **kwargs): calls.append((method, url, kwargs)) @@ -129,7 +129,7 @@ async def _async_request(self, method, url, **kwargs): return DummyAsyncResponse(200, {"ok": True}) context = SimpleNamespace(to_headers=lambda: dict(dummy_headers)) - memory_client = MemoryClient(DummyHaxenClient(), context) + memory_client = MemoryClient(DummyAgentFieldClient(), context) await memory_client.set("answer", 42) value = await memory_client.get("answer") diff --git a/sdk/python/tests/test_multimodal.py b/sdk/python/tests/test_multimodal.py index 27142b75..968f3924 100644 --- a/sdk/python/tests/test_multimodal.py +++ b/sdk/python/tests/test_multimodal.py @@ -1,4 +1,4 @@ -from haxen_sdk.multimodal import image_from_file, audio_from_file, file_from_path +from agentfield.multimodal import image_from_file, audio_from_file, file_from_path def test_image_from_file_and_audio_from_file(tmp_path): diff --git a/sdk/python/tests/test_multimodal_response.py b/sdk/python/tests/test_multimodal_response.py index 32758352..53e4fc87 100644 --- a/sdk/python/tests/test_multimodal_response.py +++ b/sdk/python/tests/test_multimodal_response.py @@ -1,5 +1,5 @@ import base64 -from haxen_sdk.multimodal_response import AudioOutput, ImageOutput, FileOutput +from agentfield.multimodal_response import AudioOutput, ImageOutput, FileOutput def test_audio_output_save_and_get_bytes(tmp_path): diff --git a/sdk/python/tests/test_pydantic_utils.py b/sdk/python/tests/test_pydantic_utils.py index 815c5ea5..af98bd1f 100644 --- a/sdk/python/tests/test_pydantic_utils.py +++ b/sdk/python/tests/test_pydantic_utils.py @@ -1,5 +1,5 @@ from pydantic import BaseModel -from haxen_sdk.pydantic_utils import ( +from agentfield.pydantic_utils import ( is_pydantic_model, is_optional_type, get_optional_inner_type, diff --git a/sdk/python/tests/test_rate_limiter_core.py b/sdk/python/tests/test_rate_limiter_core.py index 0490e13c..e77fa35d 100644 --- a/sdk/python/tests/test_rate_limiter_core.py +++ b/sdk/python/tests/test_rate_limiter_core.py @@ -4,7 +4,7 @@ import pytest -from haxen_sdk.rate_limiter import RateLimitError, StatelessRateLimiter +from agentfield.rate_limiter import RateLimitError, StatelessRateLimiter class DummyHTTPError(Exception): @@ -49,7 +49,7 @@ async def test_execute_with_retry_eventual_success(monkeypatch): async def fake_sleep(delay): attempts["sleeps"].append(delay) - monkeypatch.setattr("haxen_sdk.rate_limiter.asyncio.sleep", fake_sleep) + monkeypatch.setattr("agentfield.rate_limiter.asyncio.sleep", fake_sleep) async def flaky_call(): attempts["count"] += 1 @@ -73,7 +73,7 @@ async def test_execute_with_retry_gives_up(monkeypatch): async def fake_sleep(delay): pass - monkeypatch.setattr("haxen_sdk.rate_limiter.asyncio.sleep", fake_sleep) + monkeypatch.setattr("agentfield.rate_limiter.asyncio.sleep", fake_sleep) async def always_fail(): raise DummyHTTPError() @@ -168,7 +168,7 @@ async def test_circuit_breaker_blocks_and_recovers(monkeypatch): async def fake_sleep(delay): return None - monkeypatch.setattr("haxen_sdk.rate_limiter.asyncio.sleep", fake_sleep) + monkeypatch.setattr("agentfield.rate_limiter.asyncio.sleep", fake_sleep) class Clock: def __init__(self, value: float): @@ -181,7 +181,7 @@ def advance(self, seconds: float) -> None: self.value += seconds clock = Clock(100.0) - monkeypatch.setattr("haxen_sdk.rate_limiter.time.time", clock.time) + monkeypatch.setattr("agentfield.rate_limiter.time.time", clock.time) async def always_limit(): raise DummyHTTPError() diff --git a/sdk/python/tests/test_result_cache.py b/sdk/python/tests/test_result_cache.py index d2cf6139..be41544f 100644 --- a/sdk/python/tests/test_result_cache.py +++ b/sdk/python/tests/test_result_cache.py @@ -1,6 +1,6 @@ import time -from haxen_sdk.result_cache import ResultCache -from haxen_sdk.async_config import AsyncConfig +from agentfield.result_cache import ResultCache +from agentfield.async_config import AsyncConfig import asyncio diff --git a/sdk/python/tests/test_router.py b/sdk/python/tests/test_router.py index d5cb0b49..4028ac3a 100644 --- a/sdk/python/tests/test_router.py +++ b/sdk/python/tests/test_router.py @@ -1,6 +1,6 @@ import pytest -from haxen_sdk.router import AgentRouter +from agentfield.router import AgentRouter class DummyAgent: diff --git a/sdk/python/tests/test_status_utils.py b/sdk/python/tests/test_status_utils.py index ce14850d..fc286993 100644 --- a/sdk/python/tests/test_status_utils.py +++ b/sdk/python/tests/test_status_utils.py @@ -1,4 +1,4 @@ -from haxen_sdk.status import normalize_status, is_terminal, TERMINAL_STATUSES +from agentfield.status import normalize_status, is_terminal, TERMINAL_STATUSES def test_status_normalization_all_values(): diff --git a/sdk/python/tests/test_types.py b/sdk/python/tests/test_types.py index a690940b..d8ae0aba 100644 --- a/sdk/python/tests/test_types.py +++ b/sdk/python/tests/test_types.py @@ -1,4 +1,4 @@ -from haxen_sdk.types import ( +from agentfield.types import ( ExecutionHeaders, AgentStatus, HeartbeatData, diff --git a/sdk/python/tests/test_utils.py b/sdk/python/tests/test_utils.py index 7b0c133d..19836983 100644 --- a/sdk/python/tests/test_utils.py +++ b/sdk/python/tests/test_utils.py @@ -1,7 +1,7 @@ import socket import pytest -from haxen_sdk.utils import get_free_port +from agentfield.utils import get_free_port def test_get_free_port_iterates_until_success(monkeypatch): diff --git a/sdk/python/tests/test_vc_generator.py b/sdk/python/tests/test_vc_generator.py index 2ab99031..64ed60af 100644 --- a/sdk/python/tests/test_vc_generator.py +++ b/sdk/python/tests/test_vc_generator.py @@ -2,7 +2,7 @@ from datetime import datetime from types import SimpleNamespace -from haxen_sdk.vc_generator import VCGenerator +from agentfield.vc_generator import VCGenerator def make_execution_context(): @@ -18,7 +18,7 @@ def make_execution_context(): def test_generate_execution_vc_success(monkeypatch): - generator = VCGenerator("http://haxen") + generator = VCGenerator("http://agentfield") generator.set_enabled(True) payload = { @@ -41,7 +41,7 @@ def fake_post(url, json=None, timeout=None): assert url.endswith("/execution/vc") return SimpleNamespace(status_code=200, json=lambda: payload) - monkeypatch.setattr("haxen_sdk.vc_generator.requests.post", fake_post) + monkeypatch.setattr("agentfield.vc_generator.requests.post", fake_post) vc = generator.generate_execution_vc( make_execution_context(), {"x": 1}, {"y": 2}, status="succeeded" @@ -50,7 +50,7 @@ def fake_post(url, json=None, timeout=None): def test_generate_execution_vc_disabled(): - generator = VCGenerator("http://haxen") + generator = VCGenerator("http://agentfield") generator.set_enabled(False) assert ( generator.generate_execution_vc( @@ -61,18 +61,18 @@ def test_generate_execution_vc_disabled(): def test_verify_vc(monkeypatch): - generator = VCGenerator("http://haxen") + generator = VCGenerator("http://agentfield") def fake_post(url, json=None, timeout=None): return SimpleNamespace(status_code=200, json=lambda: {"valid": True}) - monkeypatch.setattr("haxen_sdk.vc_generator.requests.post", fake_post) + monkeypatch.setattr("agentfield.vc_generator.requests.post", fake_post) result = generator.verify_vc({"proof": {}}) assert result == {"valid": True} def test_create_workflow_vc(monkeypatch): - generator = VCGenerator("http://haxen") + generator = VCGenerator("http://agentfield") payload = { "workflow_id": "wf-1", "session_id": "sess-1", @@ -88,24 +88,24 @@ def test_create_workflow_vc(monkeypatch): def fake_post(url, json=None, timeout=None): return SimpleNamespace(status_code=200, json=lambda: payload) - monkeypatch.setattr("haxen_sdk.vc_generator.requests.post", fake_post) + monkeypatch.setattr("agentfield.vc_generator.requests.post", fake_post) vc = generator.create_workflow_vc("wf-1", "sess-1", ["vc-1"]) assert vc.workflow_vc_id == "wvc-1" def test_get_workflow_vc_chain(monkeypatch): - generator = VCGenerator("http://haxen") + generator = VCGenerator("http://agentfield") def fake_get(url, timeout=None): return SimpleNamespace(status_code=200, json=lambda: {"chain": ["vc-1"]}) - monkeypatch.setattr("haxen_sdk.vc_generator.requests.get", fake_get) + monkeypatch.setattr("agentfield.vc_generator.requests.get", fake_get) chain = generator.get_workflow_vc_chain("wf-1") assert chain == {"chain": ["vc-1"]} def test_serialize_data_for_json_base64(): - generator = VCGenerator("http://haxen") + generator = VCGenerator("http://agentfield") generator.set_enabled(True) encoded = generator._serialize_data_for_json({"a": 1}) decoded = base64.b64decode(encoded.encode()).decode()