diff --git a/.claude/mastra-llms.txt b/.claude/mastra-llms.txt new file mode 100644 index 000000000..8f298c4f4 --- /dev/null +++ b/.claude/mastra-llms.txt @@ -0,0 +1,380 @@ +# Mastra + +> Mastra is an open-source TypeScript agent framework designed to provide the essential primitives for building AI applications. It enables developers to create AI agents with memory and tool-calling capabilities, implement deterministic LLM workflows, and leverage RAG for knowledge integration. With features like model routing, workflow graphs, and automated evals, Mastra provides a complete toolkit for developing, testing, and deploying AI applications. + +This documentation covers everything from getting started to advanced features, APIs, and best practices for working with Mastra's agent-based architecture. + +The documentation is organized into key sections: +- **docs**: Core documentation covering concepts, features, and implementation details +- **examples**: Practical examples and use cases demonstrating Mastra's capabilities +- **showcase**: A showcase of applications built using Mastra + +Each section contains detailed markdown files that provide comprehensive information about Mastra's features and how to use them effectively. + + +## EN - docs +- [adding-voice](https://mastra.ai/en/docs/agents/adding-voice) +- [Using Agent Memory | Agents | Mastra Docs](https://mastra.ai/en/docs/agents/agent-memory): Documentation on how agents in Mastra use memory to store conversation history and contextual information. +- [Dynamic Agents](https://mastra.ai/en/docs/agents/dynamic-agents): Dynamically configure your agents instruction, model, tools, and memory using runtime context. +- [Agent Overview | Agent Documentation | Mastra](https://mastra.ai/en/docs/agents/overview): Overview of agents in Mastra, detailing their capabilities and how they interact with tools, workflows, and external systems. +- [Runtime context | Agents | Mastra Docs](https://mastra.ai/en/docs/agents/runtime-variables): Learn how to use Mastras dependency injection system to provide runtime configuration to agents and tools. +- [Using Tools with Agents | Agents | Mastra Docs](https://mastra.ai/en/docs/agents/using-tools-and-mcp): Learn how to create tools, add them to Mastra agents, and integrate tools from MCP servers. +- [Discord Community and Bot | Documentation | Mastra](https://mastra.ai/en/docs/community/discord): Information about the Mastra Discord community and MCP bot. +- [Licensing](https://mastra.ai/en/docs/community/licensing): Mastra License +- [Amazon EC2](https://mastra.ai/en/docs/deployment/cloud-providers/amazon-ec2): Deploy your Mastra applications to Amazon EC2. +- [AWS Lambda](https://mastra.ai/en/docs/deployment/cloud-providers/aws-lambda): Deploy your Mastra applications to AWS Lambda using Docker containers and the AWS Lambda Web Adapter. +- [Azure App Services](https://mastra.ai/en/docs/deployment/cloud-providers/azure-app-services): Deploy your Mastra applications to Azure App Services. +- [Digital Ocean](https://mastra.ai/en/docs/deployment/cloud-providers/digital-ocean): Deploy your Mastra applications to Digital Ocean. +- [Cloud Providers](https://mastra.ai/en/docs/deployment/cloud-providers): Deploy your Mastra applications to popular cloud providers. +- [Deployment Overview](https://mastra.ai/en/docs/deployment/overview): Learn about different deployment options for your Mastra applications +- [Deploy A Mastra Server](https://mastra.ai/en/docs/deployment/server-deployment): Deploy a Mastra server with middleware and other options +- [Cloudflare Deployer](https://mastra.ai/en/docs/deployment/serverless-platforms/cloudflare-deployer): Learn how to deploy a Mastra application to Cloudflare using the Mastra CloudflareDeployer +- [Serverless Deployment](https://mastra.ai/en/docs/deployment/serverless-platforms): Build and deploy Mastra applications using platform-specific deployers or standard HTTP servers +- [Netlify Deployer](https://mastra.ai/en/docs/deployment/serverless-platforms/netlify-deployer): Learn how to deploy a Mastra application to Netlify using the Mastra NetlifyDeployer +- [Vercel Deployer](https://mastra.ai/en/docs/deployment/serverless-platforms/vercel-deployer): Learn how to deploy a Mastra application to Vercel using the Mastra VercelDeployer +- [Deploying Mastra with a Web Framework](https://mastra.ai/en/docs/deployment/web-framework): Learn how Mastra can be deployed when integrated with a Web Framework +- [Create your own Eval](https://mastra.ai/en/docs/evals/custom-eval): Mastra allows so create your own evals, here is how. +- [Overview](https://mastra.ai/en/docs/evals/overview): Understanding how to evaluate and measure AI agent quality using Mastra evals. +- [Running in CI](https://mastra.ai/en/docs/evals/running-in-ci): Learn how to run Mastra evals in your CI/CD pipeline to monitor agent quality over time. +- [Textual Evals](https://mastra.ai/en/docs/evals/textual-evals): Understand how Mastra uses LLM-as-judge methodology to evaluate text quality. +- [Using with Vercel AI SDK](https://mastra.ai/en/docs/frameworks/agentic-uis/ai-sdk): Learn how Mastra leverages the Vercel AI SDK library and how you can leverage it further with Mastra +- [Using with Assistant UI](https://mastra.ai/en/docs/frameworks/agentic-uis/assistant-ui): Learn how to integrate Assistant UI with Mastra +- [Using with CopilotKit](https://mastra.ai/en/docs/frameworks/agentic-uis/copilotkit): Learn how Mastra leverages the CopilotKits AGUI library and how you can leverage it to build user experiences +- [Using with OpenRouter](https://mastra.ai/en/docs/frameworks/agentic-uis/openrouter): Learn how to integrate OpenRouter with Mastra +- [Getting started with Mastra and Express | Mastra Guides](https://mastra.ai/en/docs/frameworks/servers/express): A step-by-step guide to integrating Mastra with an Express backend. +- [Getting Started with Mastra and Astro | Mastra Guides](https://mastra.ai/en/docs/frameworks/web-frameworks/astro): A step-by-step guide to integrating Mastra with Astro. +- [Getting Started with Mastra and Next.js | Mastra Guides](https://mastra.ai/en/docs/frameworks/web-frameworks/next-js): A step-by-step guide to integrating Mastra with Next.js. +- [Getting Started with Mastra and SvelteKit | Mastra Guides](https://mastra.ai/en/docs/frameworks/web-frameworks/sveltekit): A step-by-step guide to integrating Mastra with SvelteKit. +- [Getting Started with Mastra and Vite/React | Mastra Guides](https://mastra.ai/en/docs/frameworks/web-frameworks/vite-react): A step-by-step guide to integrating Mastra with Vite and React. +- [Installing Mastra | Getting Started | Mastra Docs](https://mastra.ai/en/docs/getting-started/installation): Guide on installing Mastra and setting up the necessary prerequisites for running it with various LLM providers. +- [Using with Cursor/Windsurf | Getting Started | Mastra Docs](https://mastra.ai/en/docs/getting-started/mcp-docs-server): Learn how to use the Mastra MCP documentation server in your IDE to turn it into an agentic Mastra expert. +- [model-capability](https://mastra.ai/en/docs/getting-started/model-capability) +- [Model Providers | Getting Started | Mastra Docs](https://mastra.ai/en/docs/getting-started/model-providers): Learn how to configure and use different model providers with Mastra. +- [Local Project Structure | Getting Started | Mastra Docs](https://mastra.ai/en/docs/getting-started/project-structure): Guide on organizing folders and files in Mastra, including best practices and recommended structures. +- [Introduction | Mastra Docs](https://mastra.ai/en/docs): Mastra is a TypeScript agent framework. It helps you build AI applications and features quickly. It gives you the set of primitives you need: workflows, agents, RAG, integrations, syncs and evals. +- [Understanding the Mastra Cloud Dashboard](https://mastra.ai/en/docs/mastra-cloud/dashboard): Details of each feature available in Mastra Cloud +- [Observability in Mastra Cloud](https://mastra.ai/en/docs/mastra-cloud/observability): Monitoring and debugging tools for Mastra Cloud deployments +- [Mastra Cloud](https://mastra.ai/en/docs/mastra-cloud/overview): Deployment and monitoring service for Mastra applications +- [Setting Up a Project](https://mastra.ai/en/docs/mastra-cloud/setting-up): Configuration steps for Mastra Cloud projects +- [memory-processors](https://mastra.ai/en/docs/memory/memory-processors) +- [overview](https://mastra.ai/en/docs/memory/overview) +- [semantic-recall](https://mastra.ai/en/docs/memory/semantic-recall) +- [working-memory](https://mastra.ai/en/docs/memory/working-memory) +- [complex-task-execution](https://mastra.ai/en/docs/networks-vnext/complex-task-execution) +- [Handling Complex LLM Operations | Networks | Mastra](https://mastra.ai/en/docs/networks-vnext/overview): Networks in Mastra help you execute individual or multiple Mastra primitives in a non-deterministic way using a single API. +- [single-task-execution](https://mastra.ai/en/docs/networks-vnext/single-task-execution) +- [Logging | Mastra Observability Documentation](https://mastra.ai/en/docs/observability/logging): Documentation on effective logging in Mastra, crucial for understanding application behavior and improving AI accuracy. +- [Next.js Tracing | Mastra Observability Documentation](https://mastra.ai/en/docs/observability/nextjs-tracing): Set up OpenTelemetry tracing for Next.js applications +- [Tracing | Mastra Observability Documentation](https://mastra.ai/en/docs/observability/tracing): Set up OpenTelemetry tracing for Mastra applications +- [Chunking and Embedding Documents | RAG | Mastra Docs](https://mastra.ai/en/docs/rag/chunking-and-embedding): Guide on chunking and embedding documents in Mastra for efficient processing and retrieval. +- [RAG (Retrieval-Augmented Generation) in Mastra | Mastra Docs](https://mastra.ai/en/docs/rag/overview): Overview of Retrieval-Augmented Generation (RAG) in Mastra, detailing its capabilities for enhancing LLM outputs with relevant context. +- [Retrieval, Semantic Search, Reranking | RAG | Mastra Docs](https://mastra.ai/en/docs/rag/retrieval): Guide on retrieval processes in Mastras RAG systems, including semantic search, filtering, and re-ranking. +- [Storing Embeddings in A Vector Database | Mastra Docs](https://mastra.ai/en/docs/rag/vector-databases): Guide on vector storage options in Mastra, including embedded and dedicated vector databases for similarity search. +- [Custom API Routes](https://mastra.ai/en/docs/server-db/custom-api-routes): Expose additional HTTP endpoints from your Mastra server. +- [Inspecting agents and workflows with mastra dev | Mastra Local Dev Docs](https://mastra.ai/en/docs/server-db/local-dev-playground): Documentation for the Mastra local development environment for Mastra applications. +- [MastraClient](https://mastra.ai/en/docs/server-db/mastra-client): Learn how to set up and use the Mastra Client SDK +- [Middleware](https://mastra.ai/en/docs/server-db/middleware): Apply custom middleware functions to intercept requests. +- [Create A Mastra Production Server](https://mastra.ai/en/docs/server-db/production-server): Learn how to configure and deploy a production-ready Mastra server with custom settings for APIs, CORS, and more +- [Storage in Mastra | Mastra Docs](https://mastra.ai/en/docs/server-db/storage): Overview of Mastras storage system and data persistence capabilities. +- [Advanced Tool Usage | Tools & MCP | Mastra Docs](https://mastra.ai/en/docs/tools-mcp/advanced-usage): This page covers advanced features for Mastra tools, including abort signals and compatibility with the Vercel AI SDK tool format. +- [Dynamic Tool Context | Tools & MCP | Mastra Docs](https://mastra.ai/en/docs/tools-mcp/dynamic-context): Learn how to use Mastras RuntimeContext to provide dynamic, request-specific configuration to tools. +- [MCP Overview | Tools & MCP | Mastra Docs](https://mastra.ai/en/docs/tools-mcp/mcp-overview): Learn about the Model Context Protocol (MCP), how to use third-party tools via MCPClient, connect to registries, and share your own tools using MCPServer. +- [Tools Overview | Tools & MCP | Mastra Docs](https://mastra.ai/en/docs/tools-mcp/overview): Understand what tools are in Mastra, how to add them to agents, and best practices for designing effective tools. +- [Voice in Mastra | Mastra Docs](https://mastra.ai/en/docs/voice/overview): Overview of voice capabilities in Mastra, including text-to-speech, speech-to-text, and real-time speech-to-speech interactions. +- [Speech-to-Speech Capabilities in Mastra | Mastra Docs](https://mastra.ai/en/docs/voice/speech-to-speech): Overview of speech-to-speech capabilities in Mastra, including real-time interactions and event-driven architecture. +- [Speech-to-Text (STT) in Mastra | Mastra Docs](https://mastra.ai/en/docs/voice/speech-to-text): Overview of Speech-to-Text capabilities in Mastra, including configuration, usage, and integration with voice providers. +- [Text-to-Speech (TTS) in Mastra | Mastra Docs](https://mastra.ai/en/docs/voice/text-to-speech): Overview of Text-to-Speech capabilities in Mastra, including configuration, usage, and integration with voice providers. +- [Branching, Merging, Conditions | Workflows | Mastra Docs](https://mastra.ai/en/docs/workflows/control-flow): Control flow in Mastra workflows allows you to manage branching, merging, and conditions to construct workflows that meet your logic requirements. +- [Inngest Workflows | Workflows | Mastra Docs](https://mastra.ai/en/docs/workflows/inngest-workflow): Inngest workflow allows you to run Mastra workflows with Inngest +- [Input Data Mapping with Workflow | Mastra Docs](https://mastra.ai/en/docs/workflows/input-data-mapping): Learn how to use workflow input mapping to create more dynamic data flows in your Mastra workflows. +- [Handling Complex LLM Operations | Workflows | Mastra](https://mastra.ai/en/docs/workflows/overview): Workflows in Mastra help you orchestrate complex sequences of operations with features like branching, parallel execution, resource suspension, and more. +- [Pausing Execution | Mastra Docs](https://mastra.ai/en/docs/workflows/pausing-execution): Pausing execution in Mastra workflows allows you to pause execution while waiting for external input or resources via .sleep(), .sleepUntil() and .waitForEvent(). +- [Suspend & Resume Workflows | Human-in-the-Loop | Mastra Docs](https://mastra.ai/en/docs/workflows/suspend-and-resume): Suspend and resume in Mastra workflows allows you to pause execution while waiting for external input or resources. +- [Using Workflows with Agents and Tools | Workflows | Mastra Docs](https://mastra.ai/en/docs/workflows/using-with-agents-and-tools): Steps in Mastra workflows provide a structured way to manage operations by defining inputs, outputs, and execution logic. +- [Branching, Merging, Conditions | Workflows (Legacy) | Mastra Docs](https://mastra.ai/en/docs/workflows-legacy/control-flow): Control flow in Mastra legacy workflows allows you to manage branching, merging, and conditions to construct legacy workflows that meet your logic requirements. +- [Dynamic Workflows (Legacy) | Mastra Docs](https://mastra.ai/en/docs/workflows-legacy/dynamic-workflows): Learn how to create dynamic workflows within legacy workflow steps, allowing for flexible workflow creation based on runtime conditions. +- [Error Handling in Workflows (Legacy) | Mastra Docs](https://mastra.ai/en/docs/workflows-legacy/error-handling): Learn how to handle errors in Mastra legacy workflows using step retries, conditional branching, and monitoring. +- [nested-workflows](https://mastra.ai/en/docs/workflows-legacy/nested-workflows) +- [Handling Complex LLM Operations | Workflows (Legacy) | Mastra](https://mastra.ai/en/docs/workflows-legacy/overview): Workflows in Mastra help you orchestrate complex sequences of operations with features like branching, parallel execution, resource suspension, and more. +- [Runtime variables - dependency injection | Workflows (Legacy) | Mastra Docs](https://mastra.ai/en/docs/workflows-legacy/runtime-variables): Learn how to use Mastras dependency injection system to provide runtime configuration to workflows and steps. +- [Creating Steps and Adding to Workflows (Legacy) | Mastra Docs](https://mastra.ai/en/docs/workflows-legacy/steps): Steps in Mastra workflows provide a structured way to manage operations by defining inputs, outputs, and execution logic. +- [Suspend & Resume Workflows (Legacy) | Human-in-the-Loop | Mastra Docs](https://mastra.ai/en/docs/workflows-legacy/suspend-and-resume): Suspend and resume in Mastra workflows allows you to pause execution while waiting for external input or resources. +- [Data Mapping with Workflow (Legacy) Variables | Mastra Docs](https://mastra.ai/en/docs/workflows-legacy/variables): Learn how to use workflow variables to map data between steps and create dynamic data flows in your Mastra workflows. + +## EN - examples +- [Example: Adding Voice Capabilities | Agents | Mastra](https://mastra.ai/en/examples/agents/adding-voice-capabilities): Example of adding voice capabilities to Mastra agents, enabling them to speak and listen using different voice providers. +- [Example: AI SDK v5 Integration | Agents | Mastra Docs](https://mastra.ai/en/examples/agents/ai-sdk-v5-integration): Example of integrating Mastra agents with AI SDK v5 for streaming chat interfaces with memory and tool integration. +- [Example: Categorizing Birds | Agents | Mastra Docs](https://mastra.ai/en/examples/agents/bird-checker): Example of using a Mastra AI Agent to determine if an image from Unsplash depicts a bird. +- [Example: Deploying an MCPServer | Agents | Mastra Docs](https://mastra.ai/en/examples/agents/deploying-mcp-server): Example of setting up, building, and deploying a Mastra MCPServer using the stdio transport and publishing it to NPM. +- [Dynamic Agents Example | Agents | Mastra Docs](https://mastra.ai/en/examples/agents/dynamic-agents): Learn how to create and configure dynamic agents using runtime context in Mastra. +- [Example: Hierarchical Multi-Agent System | Agents | Mastra](https://mastra.ai/en/examples/agents/hierarchical-multi-agent): Example of creating a hierarchical multi-agent system using Mastra, where agents interact through tool functions. +- [Example: Multi-Agent Workflow | Agents | Mastra Docs](https://mastra.ai/en/examples/agents/multi-agent-workflow): Example of creating an agentic workflow in Mastra, where work product is passed between multiple agents. +- [Example: Agents with a System Prompt | Agents | Mastra Docs](https://mastra.ai/en/examples/agents/system-prompt): Example of creating an AI agent in Mastra with a system prompt to define its personality and capabilities. +- [Example: Giving an Agent a Tool | Agents | Mastra Docs](https://mastra.ai/en/examples/agents/using-a-tool): Example of creating an AI agent in Mastra that uses a dedicated tool to provide weather information. +- [Example: Workflow as Tools | Agents | Mastra Docs](https://mastra.ai/en/examples/agents/workflow-as-tools): Example of creating Agents in Mastra, demonstrating how to use workflows as tools. It shows how to suspend and resume workflows from an agent. +- [Auth Middleware](https://mastra.ai/en/examples/deployment/auth-middleware) +- [CORS Middleware](https://mastra.ai/en/examples/deployment/cors-middleware) +- [Custom API Route](https://mastra.ai/en/examples/deployment/custom-api-route) +- [Deploying a Mastra Server](https://mastra.ai/en/examples/deployment/deploying-mastra-server) +- [Deployment examples](https://mastra.ai/en/examples/deployment) +- [Logging Middleware](https://mastra.ai/en/examples/deployment/logging-middleware) +- [Example: Answer Relevancy | Evals | Mastra Docs](https://mastra.ai/en/examples/evals/answer-relevancy): Example of using the Answer Relevancy metric to evaluate response relevancy to queries. +- [Example: Bias | Evals | Mastra Docs](https://mastra.ai/en/examples/evals/bias): Example of using the Bias metric to evaluate responses for various forms of bias. +- [Example: Completeness | Evals | Mastra Docs](https://mastra.ai/en/examples/evals/completeness): Example of using the Completeness metric to evaluate how thoroughly responses cover input elements. +- [Example: Content Similarity | Evals | Mastra Docs](https://mastra.ai/en/examples/evals/content-similarity): Example of using the Content Similarity metric to evaluate text similarity between content. +- [Example: Context Position | Evals | Mastra Docs](https://mastra.ai/en/examples/evals/context-position): Example of using the Context Position metric to evaluate sequential ordering in responses. +- [Example: Context Precision | Evals | Mastra Docs](https://mastra.ai/en/examples/evals/context-precision): Example of using the Context Precision metric to evaluate how precisely context information is used. +- [Example: Context Relevancy | Evals | Mastra Docs](https://mastra.ai/en/examples/evals/context-relevancy): Example of using the Context Relevancy metric to evaluate how relevant context information is to a query. +- [Example: Contextual Recall | Evals | Mastra Docs](https://mastra.ai/en/examples/evals/contextual-recall): Example of using the Contextual Recall metric to evaluate how well responses incorporate context information. +- [Example: Custom Eval | Evals | Mastra Docs](https://mastra.ai/en/examples/evals/custom-eval): Example of creating custom LLM-based evaluation metrics in Mastra. +- [Example: Faithfulness | Evals | Mastra Docs](https://mastra.ai/en/examples/evals/faithfulness): Example of using the Faithfulness metric to evaluate how factually accurate responses are compared to context. +- [Example: Hallucination | Evals | Mastra Docs](https://mastra.ai/en/examples/evals/hallucination): Example of using the Hallucination metric to evaluate factual contradictions in responses. +- [Example: Keyword Coverage | Evals | Mastra Docs](https://mastra.ai/en/examples/evals/keyword-coverage): Example of using the Keyword Coverage metric to evaluate how well responses cover important keywords from input text. +- [Example: Prompt Alignment | Evals | Mastra Docs](https://mastra.ai/en/examples/evals/prompt-alignment): Example of using the Prompt Alignment metric to evaluate instruction adherence in responses. +- [Example: Summarization | Evals | Mastra Docs](https://mastra.ai/en/examples/evals/summarization): Example of using the Summarization metric to evaluate how well LLM-generated summaries capture content while maintaining factual accuracy. +- [Example: Textual Difference | Evals | Mastra Docs](https://mastra.ai/en/examples/evals/textual-difference): Example of using the Textual Difference metric to evaluate similarity between text strings by analyzing sequence differences and changes. +- [Example: Tone Consistency | Evals | Mastra Docs](https://mastra.ai/en/examples/evals/tone-consistency): Example of using the Tone Consistency metric to evaluate emotional tone patterns and sentiment consistency in text. +- [Example: Toxicity | Evals | Mastra Docs](https://mastra.ai/en/examples/evals/toxicity): Example of using the Toxicity metric to evaluate responses for harmful content and toxic language. +- [Example: Word Inclusion | Evals | Mastra Docs](https://mastra.ai/en/examples/evals/word-inclusion): Example of creating a custom metric to evaluate word inclusion in output text. +- [Examples List: Workflows, Agents, RAG | Mastra Docs](https://mastra.ai/en/examples): Explore practical examples of AI development with Mastra, including text generation, RAG implementations, structured outputs, and multi-modal interactions. Learn how to build AI applications using OpenAI, Anthropic, and Google Gemini. +- [Memory Processors](https://mastra.ai/en/examples/memory/memory-processors): Example of using memory processors to filter and transform recalled messages +- [memory-with-libsql](https://mastra.ai/en/examples/memory/memory-with-libsql) +- [memory-with-mem0](https://mastra.ai/en/examples/memory/memory-with-mem0) +- [memory-with-pg](https://mastra.ai/en/examples/memory/memory-with-pg) +- [memory-with-upstash](https://mastra.ai/en/examples/memory/memory-with-upstash) +- [Streaming Working Memory (advanced)](https://mastra.ai/en/examples/memory/streaming-working-memory-advanced): Example of using working memory to maintain a todo list across conversations +- [Streaming Structured Working Memory](https://mastra.ai/en/examples/memory/streaming-working-memory-structured): Example of using structured working memory (schema) to maintain a todo list across conversations +- [Streaming Working Memory](https://mastra.ai/en/examples/memory/streaming-working-memory): Example of using working memory with an agent +- [AI SDK useChat Hook](https://mastra.ai/en/examples/memory/use-chat): Example showing how to integrate Mastra memory with the Vercel AI SDK useChat hook. +- [Example: Adjusting Chunk Delimiters | RAG | Mastra Docs](https://mastra.ai/en/examples/rag/chunking/adjust-chunk-delimiters): Adjust chunk delimiters in Mastra to better match your content structure. +- [Example: Adjusting The Chunk Size | RAG | Mastra Docs](https://mastra.ai/en/examples/rag/chunking/adjust-chunk-size): Adjust chunk size in Mastra to better match your content and memory requirements. +- [Example: Semantically Chunking HTML | RAG | Mastra Docs](https://mastra.ai/en/examples/rag/chunking/chunk-html): Chunk HTML content in Mastra to semantically chunk the document. +- [Example: Semantically Chunking JSON | RAG | Mastra Docs](https://mastra.ai/en/examples/rag/chunking/chunk-json): Chunk JSON data in Mastra to semantically chunk the document. +- [Example: Semantically Chunking Markdown | RAG | Mastra Docs](https://mastra.ai/en/examples/rag/chunking/chunk-markdown): Example of using Mastra to chunk markdown documents for search or retrieval purposes. +- [Example: Semantically Chunking Text | RAG | Mastra Docs](https://mastra.ai/en/examples/rag/chunking/chunk-text): Example of using Mastra to split large text documents into smaller chunks for processing. +- [Example: Embedding Chunk Arrays | RAG | Mastra Docs](https://mastra.ai/en/examples/rag/embedding/embed-chunk-array): Example of using Mastra to generate embeddings for an array of text chunks for similarity search. +- [Example: Embedding Text Chunks | RAG | Mastra Docs](https://mastra.ai/en/examples/rag/embedding/embed-text-chunk): Example of using Mastra to generate an embedding for a single text chunk for similarity search. +- [Example: Embedding Text with Cohere | RAG | Mastra Docs](https://mastra.ai/en/examples/rag/embedding/embed-text-with-cohere): Example of using Mastra to generate embeddings using Coheres embedding model. +- [Example: Metadata Extraction | Retrieval | RAG | Mastra Docs](https://mastra.ai/en/examples/rag/embedding/metadata-extraction): Example of extracting and utilizing metadata from documents in Mastra for enhanced document processing and retrieval. +- [Example: Hybrid Vector Search | RAG | Mastra Docs](https://mastra.ai/en/examples/rag/query/hybrid-vector-search): Example of using metadata filters with PGVector to enhance vector search results in Mastra. +- [Example: Retrieving Top-K Results | RAG | Mastra Docs](https://mastra.ai/en/examples/rag/query/retrieve-results): Example of using Mastra to query a vector database and retrieve semantically similar chunks. +- [Example: Re-ranking Results with Tools | Retrieval | RAG | Mastra Docs](https://mastra.ai/en/examples/rag/rerank/rerank-rag): Example of implementing a RAG system with re-ranking in Mastra using OpenAI embeddings and PGVector for vector storage. +- [Example: Re-ranking Results | Retrieval | RAG | Mastra Docs](https://mastra.ai/en/examples/rag/rerank/rerank): Example of implementing semantic re-ranking in Mastra using OpenAI embeddings and PGVector for vector storage. +- [Example: Reranking with Cohere | RAG | Mastra Docs](https://mastra.ai/en/examples/rag/rerank/reranking-with-cohere): Example of using Mastra to improve document retrieval relevance with Coheres reranking service. +- [Example: Reranking with ZeroEntropy | RAG | Mastra Docs](https://mastra.ai/en/examples/rag/rerank/reranking-with-zeroentropy): Example of using Mastra to improve document retrieval relevance with ZeroEntropys reranking service. +- [Example: Upsert Embeddings | RAG | Mastra Docs](https://mastra.ai/en/examples/rag/upsert/upsert-embeddings): Examples of using Mastra to store embeddings in various vector databases for similarity search. +- [Example: Using the Vector Query Tool | RAG | Mastra Docs](https://mastra.ai/en/examples/rag/usage/basic-rag): Example of implementing a basic RAG system in Mastra using OpenAI embeddings and PGVector for vector storage. +- [Example: Optimizing Information Density | RAG | Mastra Docs](https://mastra.ai/en/examples/rag/usage/cleanup-rag): Example of implementing a RAG system in Mastra to optimize information density and deduplicate data using LLM-based processing. +- [Example: Chain of Thought Prompting | RAG | Mastra Docs](https://mastra.ai/en/examples/rag/usage/cot-rag): Example of implementing a RAG system in Mastra with chain-of-thought reasoning using OpenAI and PGVector. +- [Example: Structured Reasoning with Workflows | RAG | Mastra Docs](https://mastra.ai/en/examples/rag/usage/cot-workflow-rag): Example of implementing structured reasoning in a RAG system using Mastras workflow capabilities. +- [Database-Specific Configurations | RAG | Mastra Examples](https://mastra.ai/en/examples/rag/usage/database-specific-config): Learn how to use database-specific configurations to optimize vector search performance and leverage unique features of different vector stores. +- [Example: Agent-Driven Metadata Filtering | Retrieval | RAG | Mastra Docs](https://mastra.ai/en/examples/rag/usage/filter-rag): Example of using a Mastra agent in a RAG system to construct and apply metadata filters for document retrieval. +- [Example: A Complete Graph RAG System | RAG | Mastra Docs](https://mastra.ai/en/examples/rag/usage/graph-rag): Example of implementing a Graph RAG system in Mastra using OpenAI embeddings and PGVector for vector storage. +- [Speech to Speech](https://mastra.ai/en/examples/voice/speech-to-speech): Example of using Mastra to create a speech to speech application. +- [Example: Speech to Text | Voice | Mastra Docs](https://mastra.ai/en/examples/voice/speech-to-text): Example of using Mastra to create a speech to text application. +- [Example: Text to Speech | Voice | Mastra Docs](https://mastra.ai/en/examples/voice/text-to-speech): Example of using Mastra to create a text to speech application. +- [Turn Taking](https://mastra.ai/en/examples/voice/turn-taking): Example of using Mastra to create a multi-agent debate with turn-taking conversation flow. +- [Example: Using a Tool/Agent as a Step | Workflows | Mastra Docs](https://mastra.ai/en/examples/workflows/agent-and-tool-interop): Example of using Mastra to integrate a tool or an agent as a step in a workflow. +- [Example: Array as Input (.foreach()) | Workflows | Mastra Docs](https://mastra.ai/en/examples/workflows/array-as-input): Example of using Mastra to process an array using .foreach() in a workflow. +- [Example: Calling an Agent from a Workflow | Mastra Docs](https://mastra.ai/en/examples/workflows/calling-agent): Example of using Mastra to call an AI agent from within a workflow step. +- [Example: Conditional Branching | Workflows | Mastra Docs](https://mastra.ai/en/examples/workflows/conditional-branching): Example of using Mastra to create conditional branches in workflows using the `branch` statement . +- [Example: Control Flow | Workflows | Mastra Docs](https://mastra.ai/en/examples/workflows/control-flow): Example of using Mastra to create workflows with loops based on provided conditions. +- [Example: Human in the Loop | Workflows | Mastra Docs](https://mastra.ai/en/examples/workflows/human-in-the-loop): Example of using Mastra to create workflows with human intervention points. +- [Inngest Workflow | Workflows | Mastra Docs](https://mastra.ai/en/examples/workflows/inngest-workflow): Example of building an inngest workflow with Mastra +- [Example: Parallel Execution | Workflows | Mastra Docs](https://mastra.ai/en/examples/workflows/parallel-steps): Example of using Mastra to execute multiple independent tasks in parallel within a workflow. +- [Example: Branching Paths | Workflows (Legacy) | Mastra Docs](https://mastra.ai/en/examples/workflows_legacy/branching-paths): Example of using Mastra to create legacy workflows with branching paths based on intermediate results. +- [Example: Calling an Agent from a Workflow (Legacy) | Mastra Docs](https://mastra.ai/en/examples/workflows_legacy/calling-agent): Example of using Mastra to call an AI agent from within a legacy workflow step. +- [Example: Conditional Branching (experimental) | Workflows (Legacy) | Mastra Docs](https://mastra.ai/en/examples/workflows_legacy/conditional-branching): Example of using Mastra to create conditional branches in legacy workflows using if/else statements. +- [Example: Creating a Workflow | Workflows (Legacy) | Mastra Docs](https://mastra.ai/en/examples/workflows_legacy/creating-a-workflow): Example of using Mastra to define and execute a simple workflow with a single step. +- [Example: Cyclical Dependencies | Workflows (Legacy) | Mastra Docs](https://mastra.ai/en/examples/workflows_legacy/cyclical-dependencies): Example of using Mastra to create legacy workflows with cyclical dependencies and conditional loops. +- [Example: Human in the Loop | Workflows (Legacy) | Mastra Docs](https://mastra.ai/en/examples/workflows_legacy/human-in-the-loop): Example of using Mastra to create legacy workflows with human intervention points. +- [Example: Parallel Execution | Workflows (Legacy) | Mastra Docs](https://mastra.ai/en/examples/workflows_legacy/parallel-steps): Example of using Mastra to execute multiple independent tasks in parallel within a workflow. +- [Example: Sequential Steps | Workflows (Legacy) | Mastra Docs](https://mastra.ai/en/examples/workflows_legacy/sequential-steps): Example of using Mastra to chain legacy workflow steps in a specific sequence, passing data between them. +- [Example: Suspend and Resume | Workflows (Legacy) | Mastra Docs](https://mastra.ai/en/examples/workflows_legacy/suspend-and-resume): Example of using Mastra to suspend and resume legacy workflow steps during execution. +- [Example: Using a Tool as a Step | Workflows (Legacy) | Mastra Docs](https://mastra.ai/en/examples/workflows_legacy/using-a-tool-as-a-step): Example of using Mastra to integrate a custom tool as a step in a legacy workflow. +- [Data Mapping with Workflow Variables (Legacy) | Mastra Examples](https://mastra.ai/en/examples/workflows_legacy/workflow-variables): Learn how to use workflow variables to map data between steps in Mastra workflows. + +## EN - guides +- [Building an AI Recruiter | Mastra Workflows | Guides](https://mastra.ai/en/guides/guide/ai-recruiter): Guide on building a recruiter workflow in Mastra to gather and process candidate information using LLMs. +- [Building an AI Chef Assistant | Mastra Agent Guides](https://mastra.ai/en/guides/guide/chef-michel): Guide on creating a Chef Assistant agent in Mastra to help users cook meals with available ingredients. +- [MCP Server: Building a Notes MCP Server | Mastra Guide](https://mastra.ai/en/guides/guide/notes-mcp-server): A step-by-step guide to creating a fully-featured MCP (Model Context Protocol) server for managing notes using the Mastra framework. +- [Building a Research Paper Assistant | Mastra RAG Guides](https://mastra.ai/en/guides/guide/research-assistant): Guide on creating an AI research assistant that can analyze and answer questions about academic papers using RAG. +- [Building an AI Stock Agent | Mastra Agents | Guides](https://mastra.ai/en/guides/guide/stock-agent): Guide on creating a simple stock agent in Mastra to fetch the last days closing stock price for a given symbol. +- [Overview](https://mastra.ai/en/guides): Guides on building with Mastra + +## EN - reference +- [Reference: Agent | Agents | Mastra Docs](https://mastra.ai/en/reference/agents/agent): Documentation for the Agent class in Mastra, which provides the foundation for creating AI agents with various capabilities. +- [Reference: createTool() | Tools | Agents | Mastra Docs](https://mastra.ai/en/reference/agents/createTool): Documentation for the createTool function in Mastra, which creates custom tools for agents and workflows. +- [Reference: Agent.generate() | Agents | Mastra Docs](https://mastra.ai/en/reference/agents/generate): Documentation for the `.generate()` method in Mastra agents, which produces text or structured responses. +- [Reference: getAgent() | Agent Config | Agents | Mastra Docs](https://mastra.ai/en/reference/agents/getAgent): API Reference for getAgent. +- [Reference: Agent.getInstructions() | Agents | Mastra Docs](https://mastra.ai/en/reference/agents/getInstructions): Documentation for the `.getInstructions()` method in Mastra agents, which retrieves the instructions that guide the agents behavior. +- [Reference: Agent.getMemory() | Agents | Mastra Docs](https://mastra.ai/en/reference/agents/getMemory): Documentation for the `.getMemory()` method in Mastra agents, which retrieves the memory system associated with the agent. +- [Reference: Agent.getModel() | Agents | Mastra Docs](https://mastra.ai/en/reference/agents/getModel): Documentation for the `.getModel()` method in Mastra agents, which retrieves the language model that powers the agent. +- [Reference: Agent.getTools() | Agents | Mastra Docs](https://mastra.ai/en/reference/agents/getTools): Documentation for the `.getTools()` method in Mastra agents, which retrieves the tools that the agent can use. +- [Reference: Agent.getVoice() | Agents | Mastra Docs](https://mastra.ai/en/reference/agents/getVoice): Documentation for the `.getVoice()` method in Mastra agents, which retrieves the voice provider for speech capabilities. +- [Reference: Agent.getWorkflows() | Agents | Mastra Docs](https://mastra.ai/en/reference/agents/getWorkflows): Documentation for the `.getWorkflows()` method in Mastra agents, which retrieves the workflows that the agent can execute. +- [Reference: Agent.stream() | Streaming | Agents | Mastra Docs](https://mastra.ai/en/reference/agents/stream): Documentation for the `.stream()` method in Mastra agents, which enables real-time streaming of responses. +- [mastra build | Production Bundle | Mastra CLI](https://mastra.ai/en/reference/cli/build): Build your Mastra project for production deployment +- [create-mastra | Create Project | Mastra CLI](https://mastra.ai/en/reference/cli/create-mastra): Documentation for the create-mastra command, which creates a new Mastra project with interactive setup options. +- [mastra dev | Development Server | Mastra CLI](https://mastra.ai/en/reference/cli/dev): Documentation for the mastra dev command, which starts a development server for agents, tools, and workflows. +- [mastra init | Initialize Project | Mastra CLI](https://mastra.ai/en/reference/cli/init): Documentation for the mastra init command, which creates a new Mastra project with interactive setup options. +- [mastra lint | Validate Project | Mastra CLI](https://mastra.ai/en/reference/cli/lint): Lint your Mastra project +- [@mastra/mcp-docs-server](https://mastra.ai/en/reference/cli/mcp-docs-server): Serve Mastra docs, examples and blog posts over MCP +- [mastra start](https://mastra.ai/en/reference/cli/start): Start your built Mastra application +- [Mastra Client Agents API](https://mastra.ai/en/reference/client-js/agents): Learn how to interact with Mastra AI agents, including generating responses, streaming interactions, and managing agent tools using the client-js SDK. +- [Mastra Client Error Handling](https://mastra.ai/en/reference/client-js/error-handling): Learn about the built-in retry mechanism and error handling capabilities in the Mastra client-js SDK. +- [Mastra Client Logs API](https://mastra.ai/en/reference/client-js/logs): Learn how to access and query system logs and debugging information in Mastra using the client-js SDK. +- [Mastra Client Memory API](https://mastra.ai/en/reference/client-js/memory): Learn how to manage conversation threads and message history in Mastra using the client-js SDK. +- [Mastra Client Telemetry API](https://mastra.ai/en/reference/client-js/telemetry): Learn how to retrieve and analyze traces from your Mastra application for monitoring and debugging using the client-js SDK. +- [Mastra Client Tools API](https://mastra.ai/en/reference/client-js/tools): Learn how to interact with and execute tools available in the Mastra platform using the client-js SDK. +- [Mastra Client Vectors API](https://mastra.ai/en/reference/client-js/vectors): Learn how to work with vector embeddings for semantic search and similarity matching in Mastra using the client-js SDK. +- [Mastra Client Workflows (Legacy) API](https://mastra.ai/en/reference/client-js/workflows-legacy): Learn how to interact with and execute automated legacy workflows in Mastra using the client-js SDK. +- [Mastra Client Workflows API](https://mastra.ai/en/reference/client-js/workflows): Learn how to interact with and execute automated workflows in Mastra using the client-js SDK. +- [Mastra Core](https://mastra.ai/en/reference/core/mastra-class): Documentation for the Mastra Class, the core entry point for managing agents, workflows, MCP servers, and server endpoints. +- [Cloudflare Deployer](https://mastra.ai/en/reference/deployer/cloudflare): Documentation for the CloudflareDeployer class, which deploys Mastra applications to Cloudflare Workers. +- [Mastra Deployer](https://mastra.ai/en/reference/deployer/deployer): Documentation for the Deployer abstract class, which handles packaging and deployment of Mastra applications. +- [Netlify Deployer](https://mastra.ai/en/reference/deployer/netlify): Documentation for the NetlifyDeployer class, which deploys Mastra applications to Netlify Functions. +- [Vercel Deployer](https://mastra.ai/en/reference/deployer/vercel): Documentation for the VercelDeployer class, which deploys Mastra applications to Vercel. +- [Reference: Answer Relevancy | Metrics | Evals | Mastra Docs](https://mastra.ai/en/reference/evals/answer-relevancy): Documentation for the Answer Relevancy Metric in Mastra, which evaluates how well LLM outputs address the input query. +- [Reference: Bias | Output Metrics | Evals | Mastra Docs](https://mastra.ai/en/reference/evals/bias): Documentation for the Bias Metric in Mastra, which evaluates LLM outputs for various forms of bias, including gender, political, racial/ethnic, or geographical bias. +- [Reference: Completeness | Metrics | Evals | Mastra Docs](https://mastra.ai/en/reference/evals/completeness): Documentation for the Completeness Metric in Mastra, which evaluates how thoroughly LLM outputs cover key elements present in the input. +- [Reference: Content Similarity | Evals | Mastra Docs](https://mastra.ai/en/reference/evals/content-similarity): Documentation for the Content Similarity Metric in Mastra, which measures textual similarity between strings and provides a matching score. +- [Reference: Context Position | Metrics | Evals | Mastra Docs](https://mastra.ai/en/reference/evals/context-position): Documentation for the Context Position Metric in Mastra, which evaluates the ordering of context nodes based on their relevance to the query and output. +- [Reference: Context Precision | Metrics | Evals | Mastra Docs](https://mastra.ai/en/reference/evals/context-precision): Documentation for the Context Precision Metric in Mastra, which evaluates the relevance and precision of retrieved context nodes for generating expected outputs. +- [Reference: Context Relevancy | Evals | Mastra Docs](https://mastra.ai/en/reference/evals/context-relevancy): Documentation for the Context Relevancy Metric, which evaluates the relevance of retrieved context in RAG pipelines. +- [Reference: Contextual Recall | Metrics | Evals | Mastra Docs](https://mastra.ai/en/reference/evals/contextual-recall): Documentation for the Contextual Recall Metric, which evaluates the completeness of LLM responses in incorporating relevant context. +- [Reference: Faithfulness | Metrics | Evals | Mastra Docs](https://mastra.ai/en/reference/evals/faithfulness): Documentation for the Faithfulness Metric in Mastra, which evaluates the factual accuracy of LLM outputs compared to the provided context. +- [Reference: Hallucination | Metrics | Evals | Mastra Docs](https://mastra.ai/en/reference/evals/hallucination): Documentation for the Hallucination Metric in Mastra, which evaluates the factual correctness of LLM outputs by identifying contradictions with provided context. +- [Reference: Keyword Coverage | Metrics | Evals | Mastra Docs](https://mastra.ai/en/reference/evals/keyword-coverage): Documentation for the Keyword Coverage Metric in Mastra, which evaluates how well LLM outputs cover important keywords from the input. +- [Reference: Prompt Alignment | Metrics | Evals | Mastra Docs](https://mastra.ai/en/reference/evals/prompt-alignment): Documentation for the Prompt Alignment Metric in Mastra, which evaluates how well LLM outputs adhere to given prompt instructions. +- [Reference: Summarization | Metrics | Evals | Mastra Docs](https://mastra.ai/en/reference/evals/summarization): Documentation for the Summarization Metric in Mastra, which evaluates the quality of LLM-generated summaries for content and factual accuracy. +- [Reference: Textual Difference | Evals | Mastra Docs](https://mastra.ai/en/reference/evals/textual-difference): Documentation for the Textual Difference Metric in Mastra, which measures textual differences between strings using sequence matching. +- [Reference: Tone Consistency | Metrics | Evals | Mastra Docs](https://mastra.ai/en/reference/evals/tone-consistency): Documentation for the Tone Consistency Metric in Mastra, which evaluates emotional tone and sentiment consistency in text. +- [Reference: Toxicity | Metrics | Evals | Mastra Docs](https://mastra.ai/en/reference/evals/toxicity): Documentation for the Toxicity Metric in Mastra, which evaluates LLM outputs for racist, biased, or toxic elements. +- [API Reference](https://mastra.ai/en/reference): Mastra API Reference +- [Reference: .after() | Building Workflows (Legacy) | Mastra Docs](https://mastra.ai/en/reference/legacyWorkflows/after): Documentation for the `after()` method in workflows (legacy), enabling branching and merging paths. +- [.afterEvent() Method | Mastra Docs](https://mastra.ai/en/reference/legacyWorkflows/afterEvent): Reference for the afterEvent method in Mastra workflows that creates event-based suspension points. +- [Reference: Workflow.commit() | Running Workflows (Legacy) | Mastra Docs](https://mastra.ai/en/reference/legacyWorkflows/commit): Documentation for the `.commit()` method in workflows, which re-initializes the workflow machine with the current step configuration. +- [Reference: Workflow.createRun() | Running Workflows (Legacy) | Mastra Docs](https://mastra.ai/en/reference/legacyWorkflows/createRun): Documentation for the `.createRun()` method in workflows (legacy), which initializes a new workflow run instance. +- [Reference: Workflow.else() | Conditional Branching | Mastra Docs](https://mastra.ai/en/reference/legacyWorkflows/else): Documentation for the `.else()` method in Mastra workflows, which creates an alternative branch when an if condition is false. +- [Event-Driven Workflows (Legacy) | Mastra Docs](https://mastra.ai/en/reference/legacyWorkflows/events): Learn how to create event-driven workflows using afterEvent and resumeWithEvent methods in Mastra. +- [Reference: Workflow.execute() | Workflows (Legacy) | Mastra Docs](https://mastra.ai/en/reference/legacyWorkflows/execute): Documentation for the `.execute()` method in Mastra workflows, which runs workflow steps and returns results. +- [Reference: Workflow.if() | Conditional Branching | Mastra Docs](https://mastra.ai/en/reference/legacyWorkflows/if): Documentation for the `.if()` method in Mastra workflows, which creates conditional branches based on specified conditions. +- [Reference: run.resume() | Running Workflows (Legacy) | Mastra Docs](https://mastra.ai/en/reference/legacyWorkflows/resume): Documentation for the `.resume()` method in workflows, which continues execution of a suspended workflow step. +- [.resumeWithEvent() Method | Mastra Docs](https://mastra.ai/en/reference/legacyWorkflows/resumeWithEvent): Reference for the resumeWithEvent method that resumes suspended workflows using event data. +- [Reference: Snapshots | Workflow State Persistence (Legacy) | Mastra Docs](https://mastra.ai/en/reference/legacyWorkflows/snapshots): Technical reference on snapshots in Mastra - the serialized workflow state that enables suspend and resume functionality +- [Reference: start() | Running Workflows (Legacy) | Mastra Docs](https://mastra.ai/en/reference/legacyWorkflows/start): Documentation for the `start()` method in workflows, which begins execution of a workflow run. +- [Reference: Step | Building Workflows (Legacy) | Mastra Docs](https://mastra.ai/en/reference/legacyWorkflows/step-class): Documentation for the Step class, which defines individual units of work within a workflow. +- [Reference: StepCondition | Building Workflows (Legacy) | Mastra](https://mastra.ai/en/reference/legacyWorkflows/step-condition): Documentation for the step condition class in workflows, which determines whether a step should execute based on the output of previous steps or trigger data. +- [Reference: Workflow.step() | Workflows (Legacy) | Mastra Docs](https://mastra.ai/en/reference/legacyWorkflows/step-function): Documentation for the `.step()` method in workflows, which adds a new step to the workflow. +- [Reference: StepOptions | Building Workflows (Legacy) | Mastra Docs](https://mastra.ai/en/reference/legacyWorkflows/step-options): Documentation for the step options in workflows, which control variable mapping, execution conditions, and other runtime behavior. +- [Step Retries | Error Handling | Mastra Docs](https://mastra.ai/en/reference/legacyWorkflows/step-retries): Automatically retry failed steps in Mastra workflows with configurable retry policies. +- [Reference: suspend() | Control Flow | Mastra Docs](https://mastra.ai/en/reference/legacyWorkflows/suspend): Documentation for the suspend function in Mastra workflows, which pauses execution until resumed. +- [Reference: Workflow.then() | Building Workflows (Legacy) | Mastra Docs](https://mastra.ai/en/reference/legacyWorkflows/then): Documentation for the `.then()` method in workflows, which creates sequential dependencies between steps. +- [Reference: Workflow.until() | Looping in Workflows (Legacy) | Mastra Docs](https://mastra.ai/en/reference/legacyWorkflows/until): Documentation for the `.until()` method in Mastra workflows, which repeats a step until a specified condition becomes true. +- [Reference: run.watch() | Workflows (Legacy) | Mastra Docs](https://mastra.ai/en/reference/legacyWorkflows/watch): Documentation for the `.watch()` method in workflows, which monitors the status of a workflow run. +- [Reference: Workflow.while() | Looping in Workflows (Legacy) | Mastra Docs](https://mastra.ai/en/reference/legacyWorkflows/while): Documentation for the `.while()` method in Mastra workflows, which repeats a step as long as a specified condition remains true. +- [Reference: Workflow Class | Building Workflows (Legacy) | Mastra Docs](https://mastra.ai/en/reference/legacyWorkflows/workflow): Documentation for the Workflow class in Mastra, which enables you to create state machines for complex sequences of operations with conditional branching and data validation. +- [Memory](https://mastra.ai/en/reference/memory/Memory) +- [createThread](https://mastra.ai/en/reference/memory/createThread) +- [getThreadById](https://mastra.ai/en/reference/memory/getThreadById) +- [getThreadsByResourceId](https://mastra.ai/en/reference/memory/getThreadsByResourceId) +- [query](https://mastra.ai/en/reference/memory/query) +- [AgentNetwork (Experimental)](https://mastra.ai/en/reference/networks/agent-network): Reference documentation for the AgentNetwork class +- [Reference: Logger Instance | Mastra Observability Docs](https://mastra.ai/en/reference/observability/logger): Documentation for Logger instances, which provide methods to record events at various severity levels. +- [Reference: OtelConfig | Mastra Observability Docs](https://mastra.ai/en/reference/observability/otel-config): Documentation for the OtelConfig object, which configures OpenTelemetry instrumentation, tracing, and exporting behavior. +- [Reference: Braintrust | Observability | Mastra Docs](https://mastra.ai/en/reference/observability/providers/braintrust): Documentation for integrating Braintrust with Mastra, an evaluation and monitoring platform for LLM applications. +- [Reference: Dash0 Integration | Mastra Observability Docs](https://mastra.ai/en/reference/observability/providers/dash0): Documentation for integrating Mastra with Dash0, an Open Telementry native observability solution. +- [Reference: Provider List | Observability | Mastra Docs](https://mastra.ai/en/reference/observability/providers): Overview of observability providers supported by Mastra, including Dash0, SigNoz, Braintrust, Langfuse, and more. +- [Reference: Keywords AI Integration | Mastra Observability Docs](https://mastra.ai/en/reference/observability/providers/keywordsai): Documentation for integrating Keywords AI (an observability platform for LLM applications) with Mastra. +- [Reference: Laminar Integration | Mastra Observability Docs](https://mastra.ai/en/reference/observability/providers/laminar): Documentation for integrating Laminar with Mastra, a specialized observability platform for LLM applications. +- [Reference: Langfuse Integration | Mastra Observability Docs](https://mastra.ai/en/reference/observability/providers/langfuse): Documentation for integrating Langfuse with Mastra, an open-source observability platform for LLM applications. +- [Reference: LangSmith Integration | Mastra Observability Docs](https://mastra.ai/en/reference/observability/providers/langsmith): Documentation for integrating LangSmith with Mastra, a platform for debugging, testing, evaluating, and monitoring LLM applications. +- [Reference: LangWatch Integration | Mastra Observability Docs](https://mastra.ai/en/reference/observability/providers/langwatch): Documentation for integrating LangWatch with Mastra, a specialized observability platform for LLM applications. +- [Reference: New Relic Integration | Mastra Observability Docs](https://mastra.ai/en/reference/observability/providers/new-relic): Documentation for integrating New Relic with Mastra, a comprehensive observability platform supporting OpenTelemetry for full-stack monitoring. +- [Reference: SigNoz Integration | Mastra Observability Docs](https://mastra.ai/en/reference/observability/providers/signoz): Documentation for integrating SigNoz with Mastra, an open-source APM and observability platform providing full-stack monitoring through OpenTelemetry. +- [Reference: Traceloop Integration | Mastra Observability Docs](https://mastra.ai/en/reference/observability/providers/traceloop): Documentation for integrating Traceloop with Mastra, an OpenTelemetry-native observability platform for LLM applications. +- [Reference: Astra Vector Store | Vector Databases | RAG | Mastra Docs](https://mastra.ai/en/reference/rag/astra): Documentation for the AstraVector class in Mastra, which provides vector search using DataStax Astra DB. +- [Reference: Chroma Vector Store | Vector Databases | RAG | Mastra Docs](https://mastra.ai/en/reference/rag/chroma): Documentation for the ChromaVector class in Mastra, which provides vector search using ChromaDB. +- [Reference: .chunk() | Document Processing | RAG | Mastra Docs](https://mastra.ai/en/reference/rag/chunk): Documentation for the chunk function in Mastra, which splits documents into smaller segments using various strategies. +- [Reference: Couchbase Vector Store | Vector Databases | RAG | Mastra Docs](https://mastra.ai/en/reference/rag/couchbase): Documentation for the CouchbaseVector class in Mastra, which provides vector search using Couchbase Vector Search. +- [Reference: DatabaseConfig | RAG | Mastra Docs](https://mastra.ai/en/reference/rag/database-config): API reference for database-specific configuration types used with vector query tools in Mastra RAG systems. +- [Reference: MDocument | Document Processing | RAG | Mastra Docs](https://mastra.ai/en/reference/rag/document): Documentation for the MDocument class in Mastra, which handles document processing and chunking. +- [Reference: embed() | Document Embedding | RAG | Mastra Docs](https://mastra.ai/en/reference/rag/embeddings): Documentation for embedding functionality in Mastra using the AI SDK. +- [Reference: ExtractParams | Document Processing | RAG | Mastra Docs](https://mastra.ai/en/reference/rag/extract-params): Documentation for metadata extraction configuration in Mastra. +- [Reference: GraphRAG | Graph-based RAG | RAG | Mastra Docs](https://mastra.ai/en/reference/rag/graph-rag): Documentation for the GraphRAG class in Mastra, which implements a graph-based approach to retrieval augmented generation. +- [Reference: Lance Vector Store | Vector Databases | RAG | Mastra Docs](https://mastra.ai/en/reference/rag/lance): Documentation for the LanceVectorStore class in Mastra, which provides vector search using LanceDB, an embedded vector database based on the Lance columnar format. +- [Default Vector Store | Vector Databases | RAG | Mastra Docs](https://mastra.ai/en/reference/rag/libsql): Documentation for the LibSQLVector class in Mastra, which provides vector search using LibSQL with vector extensions. +- [Reference: Metadata Filters | Metadata Filtering | RAG | Mastra Docs](https://mastra.ai/en/reference/rag/metadata-filters): Documentation for metadata filtering capabilities in Mastra, which allow for precise querying of vector search results across different vector stores. +- [Reference: MongoDB Vector Store | Vector Databases | RAG | Mastra Docs](https://mastra.ai/en/reference/rag/mongodb): Documentation for the MongoDBVector class in Mastra, which provides vector search using MongoDB Atlas and Atlas Vector Search. +- [Reference: OpenSearch Vector Store | Vector Databases | RAG | Mastra Docs](https://mastra.ai/en/reference/rag/opensearch): Documentation for the OpenSearchVector class in Mastra, which provides vector search using OpenSearch. +- [Reference: PG Vector Store | Vector Databases | RAG | Mastra Docs](https://mastra.ai/en/reference/rag/pg): Documentation for the PgVector class in Mastra, which provides vector search using PostgreSQL with pgvector extension. +- [Reference: Pinecone Vector Store | Vector DBs | RAG | Mastra Docs](https://mastra.ai/en/reference/rag/pinecone): Documentation for the PineconeVector class in Mastra, which provides an interface to Pinecones vector database. +- [Reference: Qdrant Vector Store | Vector Databases | RAG | Mastra Docs](https://mastra.ai/en/reference/rag/qdrant): Documentation for integrating Qdrant with Mastra, a vector similarity search engine for managing vectors and payloads. +- [Reference: Rerank | Document Retrieval | RAG | Mastra Docs](https://mastra.ai/en/reference/rag/rerank): Documentation for the rerank function in Mastra, which provides advanced reranking capabilities for vector search results. +- [Reference: Rerank | Document Retrieval | RAG | Mastra Docs](https://mastra.ai/en/reference/rag/rerankWithScorer): Documentation for the rerank function in Mastra, which provides advanced reranking capabilities for vector search results. +- [Reference: Turbopuffer Vector Store | Vector Databases | RAG | Mastra Docs](https://mastra.ai/en/reference/rag/turbopuffer): Documentation for integrating Turbopuffer with Mastra, a high-performance vector database for efficient similarity search. +- [Reference: Upstash Vector Store | Vector Databases | RAG | Mastra Docs](https://mastra.ai/en/reference/rag/upstash): Documentation for the UpstashVector class in Mastra, which provides vector search using Upstash Vector. +- [Reference: Cloudflare Vector Store | Vector Databases | RAG | Mastra Docs](https://mastra.ai/en/reference/rag/vectorize): Documentation for the CloudflareVector class in Mastra, which provides vector search using Cloudflare Vectorize. +- [Cloudflare D1 Storage | Storage System | Mastra Core](https://mastra.ai/en/reference/storage/cloudflare-d1): Documentation for the Cloudflare D1 SQL storage implementation in Mastra. +- [Cloudflare Storage | Storage System | Mastra Core](https://mastra.ai/en/reference/storage/cloudflare): Documentation for the Cloudflare KV storage implementation in Mastra. +- [DynamoDB Storage | Storage System | Mastra Core](https://mastra.ai/en/reference/storage/dynamodb): Documentation for the DynamoDB storage implementation in Mastra, using a single-table design with ElectroDB. +- [LanceDB Storage](https://mastra.ai/en/reference/storage/lance): Documentation for the LanceDB storage implementation in Mastra. +- [LibSQL Storage | Storage System | Mastra Core](https://mastra.ai/en/reference/storage/libsql): Documentation for the LibSQL storage implementation in Mastra. +- [MSSQL Storage | Storage System | Mastra Core](https://mastra.ai/en/reference/storage/mssql): Documentation for the MSSQL storage implementation in Mastra. +- [PostgreSQL Storage | Storage System | Mastra Core](https://mastra.ai/en/reference/storage/postgresql): Documentation for the PostgreSQL storage implementation in Mastra. +- [Upstash Storage | Storage System | Mastra Core](https://mastra.ai/en/reference/storage/upstash): Documentation for the Upstash storage implementation in Mastra. +- [Reference: MastraMCPClient | Tool Discovery | Mastra Docs](https://mastra.ai/en/reference/tools/client): API Reference for MastraMCPClient - A client implementation for the Model Context Protocol. +- [Reference: createTool() | Tools | Mastra Docs](https://mastra.ai/en/reference/tools/create-tool): Documentation for the createTool function in Mastra, used to define custom tools for agents. +- [Reference: createDocumentChunkerTool() | Tools | Mastra Docs](https://mastra.ai/en/reference/tools/document-chunker-tool): Documentation for the Document Chunker Tool in Mastra, which splits documents into smaller chunks for efficient processing and retrieval. +- [Reference: createGraphRAGTool() | RAG | Mastra Tools Docs](https://mastra.ai/en/reference/tools/graph-rag-tool): Documentation for the Graph RAG Tool in Mastra, which enhances RAG by building a graph of semantic relationships between documents. +- [Reference: MCPClient | Tool Management | Mastra Docs](https://mastra.ai/en/reference/tools/mcp-client): API Reference for MCPClient - A class for managing multiple Model Context Protocol servers and their tools. +- [Reference: MCPServer | Exposing Mastra Tools via MCP | Mastra Docs](https://mastra.ai/en/reference/tools/mcp-server): API Reference for MCPServer - A class for exposing Mastra tools and capabilities as a Model Context Protocol server. +- [Reference: createVectorQueryTool() | RAG | Mastra Tools Docs](https://mastra.ai/en/reference/tools/vector-query-tool): Documentation for the Vector Query Tool in Mastra, which facilitates semantic search over vector stores with filtering and reranking capabilities. +- [Reference: Azure Voice | Voice Providers | Mastra Docs](https://mastra.ai/en/reference/voice/azure): Documentation for the AzureVoice class, providing text-to-speech and speech-to-text capabilities using Azure Cognitive Services. +- [Reference: Cloudflare Voice | Voice Providers | Mastra Docs](https://mastra.ai/en/reference/voice/cloudflare): Documentation for the CloudflareVoice class, providing text-to-speech capabilities using Cloudflare Workers AI. +- [Reference: CompositeVoice | Voice Providers | Mastra Docs](https://mastra.ai/en/reference/voice/composite-voice): Documentation for the CompositeVoice class, which enables combining multiple voice providers for flexible text-to-speech and speech-to-text operations. +- [Reference: Deepgram Voice | Voice Providers | Mastra Docs](https://mastra.ai/en/reference/voice/deepgram): Documentation for the Deepgram voice implementation, providing text-to-speech and speech-to-text capabilities with multiple voice models and languages. +- [Reference: ElevenLabs Voice | Voice Providers | Mastra Docs](https://mastra.ai/en/reference/voice/elevenlabs): Documentation for the ElevenLabs voice implementation, offering high-quality text-to-speech capabilities with multiple voice models and natural-sounding synthesis. +- [Reference: Google Voice | Voice Providers | Mastra Docs](https://mastra.ai/en/reference/voice/google): Documentation for the Google Voice implementation, providing text-to-speech and speech-to-text capabilities. +- [Reference: MastraVoice | Voice Providers | Mastra Docs](https://mastra.ai/en/reference/voice/mastra-voice): Documentation for the MastraVoice abstract base class, which defines the core interface for all voice services in Mastra, including speech-to-speech capabilities. +- [Reference: Murf Voice | Voice Providers | Mastra Docs](https://mastra.ai/en/reference/voice/murf): Documentation for the Murf voice implementation, providing text-to-speech capabilities. +- [Reference: OpenAI Realtime Voice | Voice Providers | Mastra Docs](https://mastra.ai/en/reference/voice/openai-realtime): Documentation for the OpenAIRealtimeVoice class, providing real-time text-to-speech and speech-to-text capabilities via WebSockets. +- [Reference: OpenAI Voice | Voice Providers | Mastra Docs](https://mastra.ai/en/reference/voice/openai): Documentation for the OpenAIVoice class, providing text-to-speech and speech-to-text capabilities. +- [Reference: PlayAI Voice | Voice Providers | Mastra Docs](https://mastra.ai/en/reference/voice/playai): Documentation for the PlayAI voice implementation, providing text-to-speech capabilities. +- [Reference: Sarvam Voice | Voice Providers | Mastra Docs](https://mastra.ai/en/reference/voice/sarvam): Documentation for the Sarvam class, providing text-to-speech and speech-to-text capabilities. +- [Reference: Speechify Voice | Voice Providers | Mastra Docs](https://mastra.ai/en/reference/voice/speechify): Documentation for the Speechify voice implementation, providing text-to-speech capabilities. +- [Reference: voice.addInstructions() | Voice Providers | Mastra Docs](https://mastra.ai/en/reference/voice/voice.addInstructions): Documentation for the addInstructions() method available in voice providers, which adds instructions to guide the voice models behavior. +- [Reference: voice.addTools() | Voice Providers | Mastra Docs](https://mastra.ai/en/reference/voice/voice.addTools): Documentation for the addTools() method available in voice providers, which equips voice models with function calling capabilities. +- [Reference: voice.answer() | Voice Providers | Mastra Docs](https://mastra.ai/en/reference/voice/voice.answer): Documentation for the answer() method available in real-time voice providers, which triggers the voice provider to generate a response. +- [Reference: voice.close() | Voice Providers | Mastra Docs](https://mastra.ai/en/reference/voice/voice.close): Documentation for the close() method available in voice providers, which disconnects from real-time voice services. +- [Reference: voice.connect() | Voice Providers | Mastra Docs](https://mastra.ai/en/reference/voice/voice.connect): Documentation for the connect() method available in real-time voice providers, which establishes a connection for speech-to-speech communication. +- [Reference: Voice Events | Voice Providers | Mastra Docs](https://mastra.ai/en/reference/voice/voice.events): Documentation for events emitted by voice providers, particularly for real-time voice interactions. +- [Reference: voice.getSpeakers() | Voice Providers | Mastra Docs](https://mastra.ai/en/reference/voice/voice.getSpeakers): Documentation for the getSpeakers() method available in voice providers, which retrieves available voice options. +- [Reference: voice.listen() | Voice Providers | Mastra Docs](https://mastra.ai/en/reference/voice/voice.listen): Documentation for the listen() method available in all Mastra voice providers, which converts speech to text. +- [Reference: voice.off() | Voice Providers | Mastra Docs](https://mastra.ai/en/reference/voice/voice.off): Documentation for the off() method available in voice providers, which removes event listeners for voice events. +- [Reference: voice.on() | Voice Providers | Mastra Docs](https://mastra.ai/en/reference/voice/voice.on): Documentation for the on() method available in voice providers, which registers event listeners for voice events. +- [Reference: voice.send() | Voice Providers | Mastra Docs](https://mastra.ai/en/reference/voice/voice.send): Documentation for the send() method available in real-time voice providers, which streams audio data for continuous processing. +- [Reference: voice.speak() | Voice Providers | Mastra Docs](https://mastra.ai/en/reference/voice/voice.speak): Documentation for the speak() method available in all Mastra voice providers, which converts text to speech. +- [Reference: voice.updateConfig() | Voice Providers | Mastra Docs](https://mastra.ai/en/reference/voice/voice.updateConfig): Documentation for the updateConfig() method available in voice providers, which updates the configuration of a voice provider at runtime. +- [Reference: Workflow.branch() | Building Workflows | Mastra Docs](https://mastra.ai/en/reference/workflows/branch): Documentation for the `.branch()` method in workflows, which creates conditional branches between steps. +- [Reference: Workflow.commit() | Building Workflows | Mastra Docs](https://mastra.ai/en/reference/workflows/commit): Documentation for the `.commit()` method in workflows, which finalizes the workflow and returns the final result. +- [Reference: Workflow.createRunAsync() | Building Workflows | Mastra Docs](https://mastra.ai/en/reference/workflows/create-run): Documentation for the `.createRunAsync()` method in workflows, which creates a new workflow run instance. +- [Reference: Workflow.dountil() | Building Workflows | Mastra Docs](https://mastra.ai/en/reference/workflows/dountil): Documentation for the `.dountil()` method in workflows, which creates a loop that executes a step until a condition is met. +- [Reference: Workflow.dowhile() | Building Workflows | Mastra Docs](https://mastra.ai/en/reference/workflows/dowhile): Documentation for the `.dowhile()` method in workflows, which creates a loop that executes a step while a condition is met. +- [Reference: Workflow.execute() | Building Workflows | Mastra Docs](https://mastra.ai/en/reference/workflows/execute): Documentation for the `.execute()` method in workflows, which executes a step with input data and returns the output. +- [Reference: Workflow.foreach() | Building Workflows | Mastra Docs](https://mastra.ai/en/reference/workflows/foreach): Documentation for the `.foreach()` method in workflows, which creates a loop that executes a step for each item in an array. +- [Reference: Workflow.map() | Building Workflows | Mastra Docs](https://mastra.ai/en/reference/workflows/map): Documentation for the `.map()` method in workflows, which maps output data from a previous step to the input of a subsequent step. +- [Reference: Workflow.parallel() | Building Workflows | Mastra Docs](https://mastra.ai/en/reference/workflows/parallel): Documentation for the `.parallel()` method in workflows, which executes multiple steps in parallel. +- [Reference: Workflow.resume() | Building Workflows | Mastra Docs](https://mastra.ai/en/reference/workflows/resume): Documentation for the `.resume()` method in workflows, which resumes a suspended workflow run with new data. +- [Reference: Workflow.sendEvent() | Building Workflows | Mastra Docs](https://mastra.ai/en/reference/workflows/sendEvent): Documentation for the `.sendEvent()` method in workflows, which resumes execution when an event is sent. +- [Reference: Workflow.sleep() | Building Workflows | Mastra Docs](https://mastra.ai/en/reference/workflows/sleep): Documentation for the `.sleep()` method in workflows, which pauses execution for a specified number of milliseconds. +- [Reference: Workflow.sleepUntil() | Building Workflows | Mastra Docs](https://mastra.ai/en/reference/workflows/sleepUntil): Documentation for the `.sleepUntil()` method in workflows, which pauses execution until a specified date. +- [Reference: Snapshots | Workflow State Persistence | Mastra Docs](https://mastra.ai/en/reference/workflows/snapshots): Technical reference on snapshots in Mastra - the serialized workflow state that enables suspend and resume functionality +- [Reference: Workflow.start() | Building Workflows | Mastra Docs](https://mastra.ai/en/reference/workflows/start): Documentation for the `.start()` method in workflows, which starts a workflow run with input data. +- [Reference: Step | Building Workflows | Mastra Docs](https://mastra.ai/en/reference/workflows/step): Documentation for the Step class, which defines individual units of work within a workflow. +- [Reference: Workflow.stream() | Building Workflows | Mastra Docs](https://mastra.ai/en/reference/workflows/stream): Documentation for the `.stream()` method in workflows, which allows you to monitor the execution of a workflow run as a stream. +- [Reference: Workflow.then() | Building Workflows | Mastra Docs](https://mastra.ai/en/reference/workflows/then): Documentation for the `.then()` method in workflows, which creates sequential dependencies between steps. +- [Reference: Workflow.waitForEvent() | Building Workflows | Mastra Docs](https://mastra.ai/en/reference/workflows/waitForEvent): Documentation for the `.waitForEvent()` method in workflows, which pauses execution until an event is received. +- [Reference: Workflow.watch() | Building Workflows | Mastra Docs](https://mastra.ai/en/reference/workflows/watch): Documentation for the `.watch()` method in workflows, which allows you to monitor the execution of a workflow run. +- [Reference: Workflow Class | Building Workflows | Mastra Docs](https://mastra.ai/en/reference/workflows/workflow): Documentation for the Workflow class in Mastra, which enables you to create state machines for complex sequences of operations with conditional branching and data validation. diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..37bc0b0d4 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,73 @@ +# Dependencies +node_modules +npm-debug.log* +yarn-debug.log* +yarn-error.log* + +# Production builds (will be built inside container) +.next/ +dist/ +build/ + +# Environment and config files +.env +.env.local +.env.development.local +.env.test.local +.env.production.local + +# IDE and editor files +.vscode/ +.idea/ +*.swp +*.swo +*~ + +# OS generated files +.DS_Store +.DS_Store? +._* +.Spotlight-V100 +.Trashes +ehthumbs.db +Thumbs.db + +# Git +.git +.gitignore + +# Docker +Dockerfile +.dockerignore +docker-compose*.yml + +# Documentation +*.md +docs/ + +# Test files +coverage/ +.nyc_output +__tests__/ +**/*.test.ts +**/*.test.js +**/*.spec.ts +**/*.spec.js + +# Temporary files +tmp/ +temp/ + +# Logs +logs +*.log + +# Runtime data +pids +*.pid +*.seed +*.pid.lock + +# Claude/AI related +.claude/ +claude-code-prompts/ \ No newline at end of file diff --git a/.env.local b/.env.local new file mode 100644 index 000000000..d6e49e2c5 --- /dev/null +++ b/.env.local @@ -0,0 +1,7 @@ +VITE_CONVEX_URL=https://exuberant-albatross-496.convex.cloud +CONVEX_URL=https://exuberant-albatross-496.convex.cloud +VITE_WORKOS_CLIENT_ID=client_01K4C1TVA6CMQ3G32F1P301A9G +VITE_WORKOS_REDIRECT_URI=mcpjam://oauth/callback +CONVEX_HTTP_URL=https://exuberant-albatross-496.convex.site +ENVIRONMENT=local +VITE_DISABLE_POSTHOG_LOCAL=true \ No newline at end of file diff --git a/.env.production b/.env.production new file mode 100644 index 000000000..f09de7a64 --- /dev/null +++ b/.env.production @@ -0,0 +1,8 @@ +VITE_CONVEX_URL=https://outstanding-fennec-304.convex.cloud +CONVEX_URL=https://outstanding-fennec-304.convex.cloud +VITE_WORKOS_CLIENT_ID=client_01K4C1TVPBE7JTBFQJF9SDW9P9 +VITE_WORKOS_REDIRECT_URI=mcpjam://oauth/callback +CONVEX_HTTP_URL=https://outstanding-fennec-304.convex.site +ENVIRONMENT=production +PORT=6274 +VITE_DISABLE_POSTHOG_LOCAL=false \ No newline at end of file diff --git a/.gitattributes b/.gitattributes deleted file mode 100644 index f6a30e128..000000000 --- a/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -dist/**/* linguist-generated=true diff --git a/.github/workflows/docker-build-deploy.yml b/.github/workflows/docker-build-deploy.yml new file mode 100644 index 000000000..56551ca21 --- /dev/null +++ b/.github/workflows/docker-build-deploy.yml @@ -0,0 +1,53 @@ +name: Docker Release + +on: + workflow_dispatch: + push: + tags: ["v*"] + +env: + DOCKER_REPOSITORY: mcpjam/mcp-inspector + +jobs: + build-and-push: + runs-on: ubuntu-latest + if: github.event_name != 'pull_request' + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Extract metadata + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.DOCKER_REPOSITORY }} + tags: | + type=ref,event=branch + type=sha + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + type=raw,value=latest,enable={{is_default_branch}} + + - name: Build and push Docker image + uses: docker/build-push-action@v5 + with: + context: . + file: ./Dockerfile + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max + platforms: linux/amd64 + build-args: | + BUILDKIT_INLINE_CACHE=1 diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 000000000..69399ec14 --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,70 @@ +name: Prettier and Build Check + +on: + pull_request: + branches: [main] + push: + branches: [main] + +jobs: + prettier-and-build: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: "20" + + - name: Install dependencies + run: | + # Clean npm cache + npm cache clean --force + + # Remove lock files and node_modules to ensure fresh install + rm -rf package-lock.json node_modules + rm -rf client/package-lock.json client/node_modules + rm -rf server/package-lock.json server/node_modules + + # Install all dependencies (root + client + server) + npm install --legacy-peer-deps + cd client && npm install --legacy-peer-deps && cd .. + cd server && npm install --legacy-peer-deps && cd .. + + - name: Check Prettier formatting + run: | + if ! npx prettier --check .; then + echo "❌ Code is not properly formatted!" + echo "" + echo "To fix this, run:" + echo " npm run prettier-fix" + echo "" + echo "Then commit and push the changes." + exit 1 + else + echo "✅ All files are properly formatted!" + fi + + - name: Build project + run: | + echo "🏗️ Building project..." + npm run build + echo "✅ Build completed successfully!" + + - name: Test production start + run: | + echo "🧪 Testing production start..." + timeout 3s npm start || EXIT_CODE=$? + if [ $EXIT_CODE -eq 124 ]; then + echo "✅ Production server started successfully (timed out as expected)" + exit 0 + elif [ $EXIT_CODE -eq 0 ]; then + echo "✅ Production server started successfully" + exit 0 + else + echo "❌ Production server failed to start with exit code $EXIT_CODE" + exit 1 + fi diff --git a/.github/workflows/mac-release.yml b/.github/workflows/mac-release.yml new file mode 100644 index 000000000..69beb725e --- /dev/null +++ b/.github/workflows/mac-release.yml @@ -0,0 +1,209 @@ +name: Mac Release + +on: + push: + tags: + - "v*" + workflow_dispatch: + inputs: + version: + description: "Version to release (e.g., v1.0.0)" + required: false + type: string + create_release: + description: "Create GitHub release?" + required: false + default: true + type: boolean + +env: + NODE_VERSION: "20" + +permissions: + contents: write + +jobs: + build-mac: + runs-on: macos-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: ${{ env.NODE_VERSION }} + registry-url: "https://registry.npmjs.org" + + - name: Install dependencies + run: | + npm install --legacy-peer-deps + npm run install:deps + cd sdk && npm install --legacy-peer-deps + + - name: Setup Apple certificates and API key + run: | + # Create API key file + echo "${{ secrets.APPLE_API_KEY_CONTENT }}" | base64 -d > /tmp/AuthKey.p8 + + # Create certificates directory + mkdir -p ~/certificates + + # Decode and install Developer ID Application certificate + echo "${{ secrets.DEVELOPER_ID_APPLICATION_CERT }}" | base64 -d > ~/certificates/developer_id_application.p12 + + # Create temporary keychain + security create-keychain -p "temp_keychain_password" build.keychain + security default-keychain -s build.keychain + security unlock-keychain -p "temp_keychain_password" build.keychain + security set-keychain-settings -t 3600 -u build.keychain + + # Import certificate + security import ~/certificates/developer_id_application.p12 -k build.keychain -P "${{ secrets.DEVELOPER_ID_APPLICATION_PASSWORD }}" -T /usr/bin/codesign + + # Enable codesign to access keychain + security set-key-partition-list -S apple-tool:,apple: -s -k "temp_keychain_password" build.keychain + + # Set identity for Electron Forge + echo "MAC_CODESIGN_IDENTITY=Developer ID Application: Marcelo Jimenez (TLBS4N7QZ3)" >> $GITHUB_ENV + echo "APPLE_API_KEY_FILE=/tmp/AuthKey.p8" >> $GITHUB_ENV + env: + APPLE_API_KEY_CONTENT: ${{ secrets.APPLE_API_KEY_CONTENT }} + DEVELOPER_ID_APPLICATION_CERT: ${{ secrets.DEVELOPER_ID_APPLICATION_CERT }} + DEVELOPER_ID_APPLICATION_PASSWORD: ${{ secrets.DEVELOPER_ID_APPLICATION_PASSWORD }} + + - name: Build and sign application + run: | + npm run build + + # Clean up any mounted volumes before making + sudo diskutil unmountDisk force /Volumes/"MCPJam Inspector" 2>/dev/null || true + hdiutil detach /Volumes/"MCPJam Inspector" -force 2>/dev/null || true + + npm run electron:make + + # Clean up any mounted volumes after making + sudo diskutil unmountDisk force /Volumes/"MCPJam Inspector" 2>/dev/null || true + hdiutil detach /Volumes/"MCPJam Inspector" -force 2>/dev/null || true + env: + NODE_OPTIONS: --max-old-space-size=8192 + APPLE_API_KEY_ID: ${{ secrets.APPLE_API_KEY_ID }} + APPLE_API_ISSUER_ID: ${{ secrets.APPLE_API_ISSUER_ID }} + + - name: Notarize and staple DMG + run: | + # Find the DMG file + DMG_PATH=$(find out/make -name "*.dmg" | head -n1) + + if [ ! -f "$DMG_PATH" ]; then + echo "DMG file not found!" + exit 1 + fi + + echo "Found DMG: $DMG_PATH" + + # Notarize the DMG + xcrun notarytool submit "$DMG_PATH" \ + --key /tmp/AuthKey.p8 \ + --key-id "${{ secrets.APPLE_API_KEY_ID }}" \ + --issuer "${{ secrets.APPLE_API_ISSUER_ID }}" \ + --wait + + # Staple the DMG + xcrun stapler staple "$DMG_PATH" + xcrun stapler validate "$DMG_PATH" + + echo "✅ DMG successfully notarized and stapled" + + # Rename DMG to consistent name + cp "$DMG_PATH" "MCPJam Inspector.dmg" + echo "DMG_PATH=MCPJam Inspector.dmg" >> $GITHUB_ENV + env: + APPLE_API_KEY_ID: ${{ secrets.APPLE_API_KEY_ID }} + APPLE_API_ISSUER_ID: ${{ secrets.APPLE_API_ISSUER_ID }} + + - name: Notarize and staple ZIP + run: | + # Find the ZIP file + ZIP_PATH=$(find out/make -name "*.zip" | head -n1) + + if [ ! -f "$ZIP_PATH" ]; then + echo "ZIP file not found!" + exit 1 + fi + + echo "Found ZIP: $ZIP_PATH" + + # Notarize the ZIP + xcrun notarytool submit "$ZIP_PATH" \ + --key /tmp/AuthKey.p8 \ + --key-id "${{ secrets.APPLE_API_KEY_ID }}" \ + --issuer "${{ secrets.APPLE_API_ISSUER_ID }}" \ + --wait + + echo "ZIP_PATH=$ZIP_PATH" >> $GITHUB_ENV + env: + APPLE_API_KEY_ID: ${{ secrets.APPLE_API_KEY_ID }} + APPLE_API_ISSUER_ID: ${{ secrets.APPLE_API_ISSUER_ID }} + + - name: Create stapled app ZIP for auto-updates + run: | + # Extract the notarized ZIP to get the app + cd "$(dirname "$ZIP_PATH")" + unzip -q "$(basename "$ZIP_PATH")" -d notarized + + # Find and staple the app + APP_PATH=$(find notarized -name "*.app" | head -n1) + if [ ! -d "$APP_PATH" ]; then + echo "App not found in ZIP!" + exit 1 + fi + + xcrun stapler staple "$APP_PATH" + xcrun stapler validate "$APP_PATH" + + # Create new ZIP with stapled app + STAPLED_ZIP="$GITHUB_WORKSPACE/MCPJam Inspector-stapled.zip" + ditto -c -k --keepParent "$APP_PATH" "$STAPLED_ZIP" + + echo "STAPLED_ZIP=$STAPLED_ZIP" >> $GITHUB_ENV + + - name: Clean up sensitive files + if: always() + run: | + rm -f /tmp/AuthKey.p8 + rm -rf ~/certificates + security delete-keychain build.keychain || true + + - name: Upload build artifacts (if not creating release) + if: github.event_name == 'workflow_dispatch' && !inputs.create_release + uses: actions/upload-artifact@v4 + with: + name: mcpjam-inspector-macos + path: | + ${{ env.DMG_PATH }} + ${{ env.ZIP_PATH }} + ${{ env.STAPLED_ZIP }} + retention-days: 30 + + - name: Create GitHub Release + if: github.event_name == 'push' || (github.event_name == 'workflow_dispatch' && inputs.create_release) + uses: softprops/action-gh-release@v2 + with: + tag_name: ${{ github.event_name == 'workflow_dispatch' && (inputs.version || 'v0.9.14-manual') || github.ref_name }} + name: Release ${{ github.event_name == 'workflow_dispatch' && (inputs.version || 'v0.9.14-manual') || github.ref_name }} + draft: false + prerelease: false + files: | + ${{ env.DMG_PATH }} + ${{ env.ZIP_PATH }} + ${{ env.STAPLED_ZIP }} + body: | + ## Download + - **macOS DMG**: [MCPJam Inspector.dmg](${{ github.server_url }}/${{ github.repository }}/releases/download/${{ github.event_name == 'workflow_dispatch' && (inputs.version || 'v0.9.14-manual') || github.ref_name }}/MCPJam%20Inspector.dmg) + - **macOS ZIP**: [MCPJam Inspector ZIP](${{ github.server_url }}/${{ github.repository }}/releases/download/${{ github.event_name == 'workflow_dispatch' && (inputs.version || 'v0.9.14-manual') || github.ref_name }}/MCPJam%20Inspector-darwin-arm64-0.9.14.zip) + + Both files are notarized and stapled for seamless installation on macOS. + + ## Auto-generated release + This release was automatically built and notarized by GitHub Actions. diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml deleted file mode 100644 index 4b2013773..000000000 --- a/.github/workflows/main.yml +++ /dev/null @@ -1,26 +0,0 @@ -on: - push: - branches: - - main - - pull_request: - -jobs: - build: - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-node@v4 - with: - node-version: 18 - cache: yarn - - - run: yarn install --immutable - - run: yarn build - - - name: Verify that `yarn build` did not change outputs - run: git diff --exit-code - - - run: yarn test - - run: yarn lint diff --git a/.github/workflows/npm-publish.yml b/.github/workflows/npm-publish.yml new file mode 100644 index 000000000..c9e2a87c5 --- /dev/null +++ b/.github/workflows/npm-publish.yml @@ -0,0 +1,63 @@ +name: NPM release + +on: + push: + tags: + - "v*" + workflow_dispatch: + inputs: + version: + description: "Version to publish (e.g., v1.0.0)" + required: false + type: string + +env: + NODE_VERSION: "20" + +jobs: + publish-npm: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: ${{ env.NODE_VERSION }} + registry-url: "https://registry.npmjs.org" + + - name: Clear npm cache and rebuild native modules + run: | + npm cache clean --force + rm -rf ~/.npm + rm -rf node_modules package-lock.json + rm -rf client/node_modules client/package-lock.json + rm -rf server/node_modules server/package-lock.json + + - name: Install dependencies with fresh state + run: | + npm install --legacy-peer-deps --no-optional + npm run install:deps + + - name: Rebuild native dependencies + run: | + cd server && npm rebuild esbuild + cd ../client && npm rebuild + + - name: Build for npm + run: npm run build + + - name: Publish to npm + run: npm publish --access public + env: + NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} + + - name: Create success summary + run: | + echo "## ✅ NPM Package Published Successfully!" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "**Package:** [@mcpjam/inspector](https://www.npmjs.com/package/@mcpjam/inspector)" >> $GITHUB_STEP_SUMMARY + echo "**Version:** ${{ github.ref_name || inputs.version }}" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "Install with: \`npm install @mcpjam/inspector\`" >> $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/update-docs.yml b/.github/workflows/update-docs.yml new file mode 100644 index 000000000..6e4047f85 --- /dev/null +++ b/.github/workflows/update-docs.yml @@ -0,0 +1,94 @@ +name: Auto-update Documentation + +on: + pull_request: + types: [closed] + +jobs: + update-docs: + # Only run if PR was actually merged (not just closed) + if: github.event.pull_request.merged == true + runs-on: ubuntu-latest + + steps: + - name: Get PR details + id: pr_details + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + # Get PR diff + PR_DIFF=$(gh api \ + -H "Accept: application/vnd.github.v3.diff" \ + /repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}) + + # Get list of changed files + FILES_CHANGED=$(gh api \ + /repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}/files \ + --jq '[.[] | .filename] | join(", ")') + + # Save to output (escape for JSON) + echo "pr_diff<> $GITHUB_OUTPUT + echo "$PR_DIFF" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + + echo "files_changed=$FILES_CHANGED" >> $GITHUB_OUTPUT + + - name: Call Mintlify Agent API + env: + MINTLIFY_API_KEY: ${{ secrets.MINTLIFY_API_KEY }} + PROJECT_ID: ${{ secrets.MINTLIFY_PROJECT_ID }} + PR_BODY: ${{ github.event.pull_request.body }} + PR_DIFF: ${{ steps.pr_details.outputs.pr_diff }} + FILES_CHANGED: ${{ steps.pr_details.outputs.files_changed }} + run: | + # Escape and format the message content + MESSAGE=$(cat <> $env:GITHUB_ENV + } + shell: powershell + + - name: Upload build artifacts (if not creating release) + if: github.event_name == 'workflow_dispatch' && !inputs.create_release + uses: actions/upload-artifact@v4 + with: + name: mcpjam-inspector-windows + path: | + out\make\**\*Setup.exe + out\make\**\RELEASES + out\make\**\*.nupkg + retention-days: 30 + + - name: Create GitHub Release + if: github.event_name == 'push' || (github.event_name == 'workflow_dispatch' && inputs.create_release) + uses: softprops/action-gh-release@v2 + with: + tag_name: ${{ github.event_name == 'workflow_dispatch' && (inputs.version || 'v0.9.14-manual') || github.ref_name }} + name: Release ${{ github.event_name == 'workflow_dispatch' && (inputs.version || 'v0.9.14-manual') || github.ref_name }} + draft: false + prerelease: false + files: | + ${{ env.SETUP_PATH }} + out\make\**\RELEASES + out\make\**\*.nupkg + body: | + ## Download for Windows + - **Windows Installer**: [MCPJam Inspector Setup.exe](https://github.com/MCPJam/inspector/releases/download/${{ github.event_name == 'workflow_dispatch' && (inputs.version || 'v0.9.14-manual') || github.ref_name }}/MCPJam-Inspector-Setup.exe) + + The installer is code-signed and ready for installation on Windows. + + ## Auto-generated release + This release was automatically built and signed by GitHub Actions. diff --git a/.gitignore b/.gitignore index 6e28fb8bd..9e208cd79 100644 --- a/.gitignore +++ b/.gitignore @@ -1,131 +1,76 @@ -# Logs -logs -*.log -npm-debug.log* -yarn-debug.log* -yarn-error.log* -lerna-debug.log* -.pnpm-debug.log* - -# Diagnostic reports (https://nodejs.org/api/report.html) -report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json - -# Runtime data -pids -*.pid -*.seed -*.pid.lock - -# Directory for instrumented libs generated by jscoverage/JSCover -lib-cov - -# Coverage directory used by tools like istanbul -coverage -*.lcov - -# nyc test coverage -.nyc_output - -# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) -.grunt +# See https://help.github.com/articles/ignoring-files/ for more about ignoring files. -# Bower dependency directory (https://bower.io/) -bower_components - -# node-waf configuration -.lock-wscript - -# Compiled binary addons (https://nodejs.org/api/addons.html) -build/Release - -# Dependency directories +# dependencies +sdk/node_modules/ node_modules/ -jspm_packages/ - -# Snowpack dependency directory (https://snowpack.dev/) -web_modules/ - -# TypeScript cache -*.tsbuildinfo - -# Optional npm cache directory -.npm - -# Optional eslint cache -.eslintcache - -# Optional stylelint cache -.stylelintcache - -# Microbundle cache -.rpt2_cache/ -.rts2_cache_cjs/ -.rts2_cache_es/ -.rts2_cache_umd/ - -# Optional REPL history -.node_repl_history +/.pnp +.npm-cache/ +.pnp.* +.yarn/* +!.yarn/patches +!.yarn/plugins +!.yarn/releases +!.yarn/versions + +# testing +/coverage + + +# production +/build +/dist +cli/build/ +cli/dist/ +cli-reference/ +client/dist/ +server/build/ +client/node_modules/ +cli/node_modules/ +cli/package-lock.json +server/node_modules/ +mcp-chat-client/node_modules/ +.npm-cache/ +temp/ +internals/ + +# misc +.DS_Store +*.pem -# Output of 'npm pack' -*.tgz +# pnpm +pnpm-lock.yaml -# Yarn Integrity file -.yarn-integrity +# debug +npm-debug.log* +yarn-debug.log* +yarn-error.log* +.pnpm-debug.log* -# dotenv environment variable files +# env files (can opt-in for committing if needed) .env -.env.development.local -.env.test.local -.env.production.local -.env.local - -# parcel-bundler cache (https://parceljs.org/) -.cache -.parcel-cache - -# Next.js build output -.next -out +.env.development +client/.env +client/.env.* -# Nuxt.js build / generate output -.nuxt +# next.js +/.next/ +/out/ +next-env.d.ts -# Gatsby files -.cache/ -# Comment in the public line in if your project uses Gatsby and not Next.js -# https://nextjs.org/blog/next-9-1#public-directory-support -# public - -# vuepress build output -.vuepress/dist - -# vuepress v2.x temp and cache directory -.temp -.cache - -# Docusaurus cache and generated files -.docusaurus - -# Serverless directories -.serverless/ - -# FuseBox cache -.fusebox/ - -# DynamoDB Local files -.dynamodb/ +# typescript +*.tsbuildinfo -# TernJS port file -.tern-port +# vite +.vite/ -# Stores VSCode versions used for testing VSCode extensions -.vscode-test +# convex (generated client code) +convex/ -# yarn v2 -.yarn/cache -.yarn/unplugged -.yarn/build-state.yml -.yarn/install-state.gz -.pnp.* +# Sentry Config File +.env.sentry-build-plugin -.DS_Store +# sdk +sdk/dist/ +sdk/node_modules/ +sdk/mcp-client-manager/node_modules/ +sdk/mcp-client-manager/dist/ \ No newline at end of file diff --git a/.npmignore b/.npmignore new file mode 100644 index 000000000..cd77cb364 --- /dev/null +++ b/.npmignore @@ -0,0 +1,29 @@ +.next/ +node_modules/ +client/node_modules/ +server/node_modules/ +.git/ +.gitignore +README.md +.env* +*.log +.DS_Store +*.tsbuildinfo +coverage/ +.nyc_output/ +.vscode/ +.idea/ +*.swp +*.swo +*~ +src/ +.claude/ +hackathon/ +claude-code-prompts/ +cli/ +forge.config.ts +vite.*.config.* +tsconfig.json +Dockerfile* +docker-compose* +SECURITY.md \ No newline at end of file diff --git a/.npmrc b/.npmrc new file mode 100644 index 000000000..521a9f7c0 --- /dev/null +++ b/.npmrc @@ -0,0 +1 @@ +legacy-peer-deps=true diff --git a/.nvmrc b/.nvmrc new file mode 100644 index 000000000..209e3ef4b --- /dev/null +++ b/.nvmrc @@ -0,0 +1 @@ +20 diff --git a/.prettierignore b/.prettierignore new file mode 100644 index 000000000..92c583369 --- /dev/null +++ b/.prettierignore @@ -0,0 +1,24 @@ +# Build output +dist +build +out + +# Dependencies +node_modules + +# Logs +*.log +npm-debug.log* + +# Environment files +.env +.env.local + +# Lock files +package-lock.json +yarn.lock +pnpm-lock.yaml + +# Coverage +coverage +.nyc_output diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 000000000..5150a5620 --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,51 @@ +# Repository Guidelines + +## Project Structure & Module Organization + +- `client/` — Vite + React UI (TS/TSX). Outputs to `dist/client`. +- `server/` — Hono HTTP API (Node/TS). Outputs to `dist/server`. +- `src/` — Electron entry points (`main.ts`, `preload.ts`). +- `shared/` — Shared types/utilities consumed by client and server. +- `cli/` — Programmatic MCP testing CLI. +- `assets/`, `scripts/`, `bin/` — Icons, build helpers, startup scripts. +- `dist/`, `out/` — Build artifacts (do not edit). + +## Build, Test, and Development Commands + +- `npm run install:deps` — Install client and server dependencies. +- `npm run dev` — Run server and client in watch mode. +- `npm run build` — Build client and server bundles to `dist/`. +- `npm start` — Start production server via `bin/start.js` (auto‑picks a free port). Example: `PORT=6274 npm start`. +- `npm run test` — Run unit tests with Vitest. +- `npm run test:e2e` — Run Playwright end‑to‑end tests. +- Electron: `npm run electron:start`, `electron:make`, `electron:package`. +- Docker: `docker:build`, `docker:run`, `docker:up`, `docker:down`. + +## Coding Style & Naming Conventions + +- Language: TypeScript. Prefer named exports. 2‑space indentation. +- Run `npm run prettier-fix` before pushing. +- React: functional components, files `PascalCase.tsx` in `client/src/components/`. +- Modules/utilities: `kebab-case.ts`. Types/interfaces `PascalCase`. +- Imports: client supports `@/...` alias; server uses relative paths. + +## Testing Guidelines + +- Frameworks: Vitest (unit), Playwright (e2e). +- Co‑locate tests next to source: `*.test.ts`/`*.test.tsx`. +- Aim to cover data transforms, hooks, and server routes. +- Keep tests deterministic; avoid network calls unless mocked. + +## Commit & Pull Request Guidelines + +- Commits: concise, imperative subject; include scope when helpful. + - Examples: `server: add health endpoint`, `client: fix connection modal`, `build: update forge config`. +- PRs: include purpose, linked issues, clear testing notes, and screenshots for UI changes. +- Keep PRs focused and small; update docs when behavior or commands change. + +## Security & Configuration Tips + +- Do not commit secrets. Use environment variables locally and CI. +- Common env vars: `PORT`, `MCP_SERVER_COMMAND`, `MCP_SERVER_ARGS`. + - Example: `MCP_SERVER_COMMAND="npx" MCP_SERVER_ARGS='["my-mcp", "--flag"]' npm start`. +- Docker image exposes port 3001; configure `PORT` as needed. diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md new file mode 100644 index 000000000..981905ae4 --- /dev/null +++ b/ARCHITECTURE.md @@ -0,0 +1,996 @@ +# MCPJam Inspector - Technical Architecture + +This document provides a deep technical dive into the MCPJam Inspector architecture for developers who want to understand the system internals. + +## Table of Contents + +- [System Overview](#system-overview) +- [Multi-Mode Architecture](#multi-mode-architecture) +- [Server Architecture](#server-architecture) +- [Electron Integration](#electron-integration) +- [MCP Client Management](#mcp-client-management) +- [State Management](#state-management) +- [Real-time Communication](#real-time-communication) +- [Authentication Flow](#authentication-flow) +- [Data Flow](#data-flow) + +## System Overview + +MCPJam Inspector is a sophisticated multi-platform application that can run as: + +1. A standalone web application (client + server) +2. An Electron desktop application (embedded server) +3. A Docker container + +### Tech Stack + +```mermaid +graph TB + subgraph "Frontend Stack" + React[React 19] + Vite[Vite] + TanStack[TanStack Query] + Zustand[Zustand] + Shadcn[Shadcn UI] + end + + subgraph "Backend Stack" + Hono[Hono.js] + MCP[@mastra/mcp] + Convex[Convex] + end + + subgraph "Desktop Stack" + Electron[Electron 37] + Forge[Electron Forge] + end + + subgraph "DevOps" + Docker[Docker] + GitHub[GitHub Actions] + Sentry[Sentry] + end +``` + +## Multi-Mode Architecture + +### Mode Detection Flow + +The application determines its runtime mode through a series of environment checks: + +```mermaid +flowchart TD + Boot([Application Boot]) + + Boot --> EnvCheck[Load Environment Variables] + EnvCheck --> ElectronCheck{ELECTRON_APP === 'true'?} + + ElectronCheck -->|Yes| PackagedCheck{IS_PACKAGED === 'true'?} + ElectronCheck -->|No| NodeEnvCheck{NODE_ENV?} + + PackagedCheck -->|Yes| EProd[Electron Production Mode
---
Server: 127.0.0.1:3000
Resources: process.resourcesPath
Client: Bundled in resources/client
Static: Serve from bundle] + + PackagedCheck -->|No| EDev[Electron Development Mode
---
Server: localhost:3000
Resources: app.getAppPath
Client: Vite dev server :8080
Static: Redirect to Vite] + + NodeEnvCheck -->|production| WProd[Web Production Mode
---
Server: 0.0.0.0:3001
Client: dist/client
Static: Serve bundled files
SPA: Fallback to index.html] + + NodeEnvCheck -->|development| WDev[Web Development Mode
---
Server: localhost:3000
Client: Vite dev server :8080
Static: API only, CORS enabled
SPA: No static serving] + + style EProd fill:#90EE90 + style EDev fill:#FFD700 + style WProd fill:#87CEEB + style WDev fill:#FFA07A +``` + +### File Serving Strategy + +Different modes have different file serving strategies: + +```mermaid +graph TB + Request[HTTP Request] + + Request --> Mode{Runtime Mode?} + + Mode -->|Web Dev| DevServer[API Only
No static files
CORS to :8080] + Mode -->|Web Prod| ProdServer[Serve from dist/client
SPA fallback
index.html injection] + Mode -->|Electron Dev| ElectronDevServer[Redirect to Vite
307 redirect
localhost:8080] + Mode -->|Electron Prod| ElectronProdServer[Serve from resources
Bundled client
No injection needed] + + DevServer --> Vite[Vite Dev Server
HMR enabled] + ProdServer --> Static[Static Files
Pre-built] + ElectronDevServer --> Vite + ElectronProdServer --> Bundled[Bundled Assets
In .app/resources] + + style DevServer fill:#fff3cd + style ProdServer fill:#d4edda + style ElectronDevServer fill:#e1f5ff + style ElectronProdServer fill:#90EE90 +``` + +## Server Architecture + +### Hono Application Factory + +The server uses a factory pattern (`server/app.ts`) to create the Hono app, allowing it to be used in multiple contexts (standalone server, Electron, Docker). + +```mermaid +flowchart LR + Factory[createHonoApp] + + Factory --> EnvLoad[Load .env
Based on mode] + EnvLoad --> Validate[Validate CONVEX_HTTP_URL] + Validate --> FixPath[Fix PATH for GUI apps
fixPath] + FixPath --> CreateApp[Create Hono instance] + + CreateApp --> MCPManager[Initialize MCPClientManager
+ RPC Logger] + CreateApp --> Middleware[Setup Middleware
CORS, Logger] + CreateApp --> Routes[Mount Routes
/api/mcp] + + MCPManager --> Bus[Wire to rpcLogBus
SSE events] + + Routes --> App[Return Hono App] + Middleware --> App + Bus --> App + + style Factory fill:#e1f5ff + style App fill:#90EE90 +``` + +### API Routes Structure + +```mermaid +graph TB + API[/api] + + API --> MCP[/mcp] + + MCP --> Servers[/servers] + MCP --> Health[/health] + + Servers --> List[GET / - List servers] + Servers --> Connect[POST /:id/connect] + Servers --> Disconnect[POST /:id/disconnect] + Servers --> Tools[/servers/:id/tools] + Servers --> Resources[/servers/:id/resources] + Servers --> Prompts[/servers/:id/prompts] + + Tools --> ListTools[GET / - List tools] + Tools --> CallTool[POST /:name/call] + + Resources --> ListResources[GET / - List resources] + Resources --> ReadResource[GET /:uri] + + Prompts --> ListPrompts[GET / - List prompts] + Prompts --> GetPrompt[POST /:name/get] + + style API fill:#e1f5ff + style MCP fill:#fff3cd + style Servers fill:#d4edda +``` + +### Request Lifecycle + +```mermaid +sequenceDiagram + participant Client as HTTP Client + participant Hono as Hono Server + participant Middleware as Middleware Stack + participant Route as Route Handler + participant Manager as MCPClientManager + participant RPC as rpcLogBus + participant MCP as MCP Server + + Client->>Hono: HTTP Request + Hono->>Middleware: Process request + Middleware->>Middleware: Logger middleware + Middleware->>Middleware: CORS middleware + Middleware->>Middleware: Inject mcpClientManager + Middleware->>Route: Forward to route + + Route->>Manager: Get server client + Manager->>RPC: Publish outgoing RPC + Manager->>MCP: Execute MCP call + MCP-->>Manager: Response + Manager->>RPC: Publish incoming RPC + Manager-->>Route: Return result + + Route->>Route: Format response + Route-->>Middleware: Return JSON + Middleware-->>Hono: Response + Hono-->>Client: HTTP Response + + Note over RPC: SSE subscribers receive
real-time logs +``` + +## Electron Integration + +### Process Architecture + +Electron uses a multi-process architecture: + +```mermaid +graph TB + Main[Main Process
src/main.ts] + + Main --> Server[Embedded Hono Server
127.0.0.1:3000] + Main --> Window[BrowserWindow] + Main --> IPC[IPC Handlers
src/ipc/*] + Main --> Protocol[Protocol Handler
mcpjam://] + + Window --> Renderer[Renderer Process
client/src/*] + Renderer --> Preload[Preload Script
src/preload.ts] + + Renderer -.HTTP.-> Server + Renderer -.IPC.-> IPC + Protocol -.Deep Links.-> Main + + style Main fill:#f8d7da + style Renderer fill:#fff3cd + style Server fill:#d4edda +``` + +### Startup Sequence + +```mermaid +sequenceDiagram + participant OS as Operating System + participant Main as Main Process + participant Server as Hono Server + participant Window as BrowserWindow + participant Renderer as Renderer Process + + OS->>Main: Launch app + Main->>Main: app.whenReady() + + Note over Main: Set environment variables:
ELECTRON_APP=true
IS_PACKAGED
ELECTRON_RESOURCES_PATH + + Main->>Server: startHonoServer() + Server->>Server: Find available port + Server->>Server: createHonoApp() + Server-->>Main: Return port (e.g., 3000) + + Main->>Window: createMainWindow(serverUrl) + Window->>Window: Create BrowserWindow + Window->>Renderer: Load URL + + alt Development Mode + Renderer->>Renderer: Load from Vite
MAIN_WINDOW_VITE_DEV_SERVER_URL + else Production Mode + Renderer->>Renderer: Load from server
http://127.0.0.1:3000 + end + + Main->>Main: registerListeners() + Main->>Main: createAppMenu() + + Renderer->>Renderer: React app boots + Renderer->>Server: Fetch data via HTTP +``` + +### OAuth Deep Linking + +The Electron app uses a custom protocol (`mcpjam://`) to handle OAuth callbacks: + +```mermaid +sequenceDiagram + participant User + participant App as Electron App + participant Main as Main Process + participant Browser as System Browser + participant OAuth as OAuth Provider
(WorkOS) + participant Renderer as Renderer Process + + User->>App: Click "Sign In" + App->>Renderer: Initiate OAuth + Renderer->>Browser: shell.openExternal(authUrl) + Browser->>OAuth: Navigate to OAuth page + + User->>Browser: Enter credentials + Browser->>OAuth: Submit credentials + OAuth-->>Browser: Redirect to mcpjam://oauth/callback?code=xxx&state=yyy + + Note over Browser,Main: OS intercepts mcpjam:// protocol + + Browser->>Main: open-url event + Main->>Main: Parse URL
Extract code & state + + Main->>Main: Build callback URL
/callback?code=xxx&state=yyy + + alt Window exists + Main->>Renderer: window.loadURL(callbackUrl) + else No window + Main->>Main: createMainWindow() + Main->>Renderer: window.loadURL(callbackUrl) + end + + Main->>Renderer: Send 'oauth-callback' IPC event + Renderer->>Renderer: useElectronOAuth hook processes + + Renderer->>Renderer: Navigate to /callback route + Renderer->>OAuth: Exchange code for tokens
(AuthKit handles this) + OAuth-->>Renderer: Return access token + + Renderer->>User: Sign in complete! +``` + +#### Key Implementation Details + +**Protocol Registration** (`src/main.ts:31-33`): + +```typescript +if (!app.isDefaultProtocolClient("mcpjam")) { + app.setAsDefaultProtocolClient("mcpjam"); +} +``` + +**Deep Link Handler** (`src/main.ts:273-313`): + +```typescript +app.on("open-url", (event, url) => { + event.preventDefault(); + + if (!url.startsWith("mcpjam://oauth/callback")) { + return; + } + + const parsed = new URL(url); + const code = parsed.searchParams.get("code") ?? ""; + const state = parsed.searchParams.get("state") ?? ""; + + // Build callback URL for renderer + const callbackUrl = new URL("/callback", baseUrl); + if (code) callbackUrl.searchParams.set("code", code); + if (state) callbackUrl.searchParams.set("state", state); + + // Load callback URL and emit IPC event + mainWindow.loadURL(callbackUrl.toString()); + mainWindow.webContents.send("oauth-callback", url); +}); +``` + +**React Hook** (`client/src/hooks/useElectronOAuth.ts`): + +```typescript +useEffect(() => { + if (!window.isElectron || !window.electronAPI?.oauth) { + return; + } + + const handleOAuthCallback = (url: string) => { + const urlObj = new URL(url); + const params = new URLSearchParams(urlObj.search); + const code = params.get("code"); + const state = params.get("state"); + + if (code) { + const callbackUrl = new URL("/callback", window.location.origin); + callbackUrl.searchParams.set("code", code); + if (state) callbackUrl.searchParams.set("state", state); + + // Redirect to AuthKit's callback handler + window.location.href = callbackUrl.toString(); + } + }; + + window.electronAPI.oauth.onCallback(handleOAuthCallback); + return () => window.electronAPI.oauth.removeCallback(); +}, []); +``` + +## MCP Client Management + +### MCPClientManager Architecture + +The MCPClientManager is the heart of MCP server integration: + +```mermaid +graph TB + Manager[MCPClientManager
sdk/src/index.ts] + + Manager --> Config[Server Configurations
STDIO, SSE, HTTP] + Manager --> Pool[Client Pool
Map serverId → Client] + Manager --> Logger[RPC Logger
Callback function] + + Pool --> STDIO[STDIO Client
Child process spawn] + Pool --> SSE[SSE Client
EventSource] + Pool --> HTTP[HTTP Client
Fetch API] + + STDIO --> Transport1[Transport Layer
stdin/stdout streams] + SSE --> Transport2[Transport Layer
Server-Sent Events] + HTTP --> Transport3[Transport Layer
HTTP/Streamable] + + Logger --> Bus[rpcLogBus
Publish-Subscribe] + Bus --> SSEStream[SSE /api/mcp/rpc-logs] + SSEStream --> Frontend[Frontend Subscribers
Real-time logs] + + style Manager fill:#e1f5ff + style Logger fill:#fff3cd + style Bus fill:#d4edda +``` + +### Client Lifecycle + +```mermaid +stateDiagram-v2 + [*] --> Disconnected + + Disconnected --> Connecting: connect() + Connecting --> Connected: Success + Connecting --> Failed: Error + + Connected --> Active: ready + Active --> Disconnecting: disconnect() + Active --> Failed: Transport error + + Disconnecting --> Disconnected: Cleanup complete + Failed --> Disconnected: Reset + + Disconnected --> [*] +``` + +### Transport Selection + +```mermaid +flowchart TD + Start([MCP Server Config]) + + Start --> Type{Transport Type?} + + Type -->|stdio| STDIO[STDIO Transport
---
• Spawn child process
• command + args
• env variables
• stdin/stdout pipes] + + Type -->|sse| SSE[SSE Transport
---
• EventSource connection
• url endpoint
• Auto-reconnect
• Message streaming] + + Type -->|http| HTTP[HTTP Transport
---
• Fetch-based
• Streamable responses
• Request/response
• Connection pooling] + + STDIO --> Protocol[MCP Protocol Layer
JSON-RPC 2.0] + SSE --> Protocol + HTTP --> Protocol + + Protocol --> Methods[MCP Methods
• initialize
• tools/list
• tools/call
• resources/list
• resources/read
• prompts/list
• prompts/get] + + style STDIO fill:#fff3cd + style SSE fill:#d4edda + style HTTP fill:#e1f5ff +``` + +## State Management + +### Frontend State Architecture + +```mermaid +graph TB + subgraph "React Query (Server State)" + Queries[TanStack Query] + Cache[Query Cache] + Mutations[Mutations] + end + + subgraph "Zustand (Client State)" + Preferences[Preferences Store] + UI[UI State Store] + end + + subgraph "Context (Scoped State)" + Auth[Auth Context
WorkOS AuthKit] + end + + Components[React Components] + + Components --> Queries + Components --> Mutations + Components --> Preferences + Components --> UI + Components --> Auth + + Queries --> Cache + Mutations --> Cache + + Cache --> Refetch[Auto-refetch] + Refetch --> API[API Calls] + + style Queries fill:#fff3cd + style Preferences fill:#d4edda + style Auth fill:#e1f5ff +``` + +### Data Synchronization + +```mermaid +sequenceDiagram + participant UI as React Component + participant Query as TanStack Query + participant Cache as Query Cache + participant API as API Endpoint + participant Server as MCP Server + + UI->>Query: useQuery('servers') + Query->>Cache: Check cache + + alt Cache hit & fresh + Cache-->>Query: Return cached data + Query-->>UI: Render with data + else Cache miss or stale + Query->>API: Fetch /api/mcp/servers + API->>Server: List MCP servers + Server-->>API: Server list + API-->>Query: JSON response + Query->>Cache: Update cache + Query-->>UI: Render with data + end + + Note over UI: User calls tool + + UI->>Query: useMutation('callTool') + Query->>API: POST /api/mcp/servers/:id/tools/:name/call + API->>Server: Execute tool + Server-->>API: Tool result + API-->>Query: JSON response + Query->>Cache: Invalidate related queries + Cache->>API: Auto-refetch + Query-->>UI: Update with result +``` + +## Real-time Communication + +### SSE Event Bus + +The application uses Server-Sent Events for real-time RPC logging: + +```mermaid +graph TB + subgraph "Backend" + Manager[MCPClientManager] + Logger[RPC Logger Callback] + Bus[rpcLogBus
Publish-Subscribe] + Route[/api/mcp/rpc-logs
SSE Endpoint] + end + + subgraph "Transport" + SSEStream[Server-Sent Events
event: message] + end + + subgraph "Frontend" + EventSource[EventSource API] + Subscribers[Log Subscribers
React components] + end + + Manager --> Logger + Logger --> Bus + Bus --> Route + Route --> SSEStream + SSEStream --> EventSource + EventSource --> Subscribers + + style Bus fill:#d4edda + style SSEStream fill:#fff3cd +``` + +### RPC Log Flow + +```mermaid +sequenceDiagram + participant Tool as Tool Call + participant Manager as MCPClientManager + participant Logger as rpcLogger + participant Bus as rpcLogBus + participant SSE as SSE Route + participant Client as Frontend + + Note over Client,SSE: Client establishes SSE connection + + Client->>SSE: GET /api/mcp/rpc-logs + SSE-->>Client: 200 OK, text/event-stream + + Note over Tool,Manager: User executes tool + + Tool->>Manager: callTool(params) + + Manager->>Logger: Log outgoing RPC + Logger->>Bus: publish({ direction: 'outgoing', message }) + Bus->>SSE: Emit event to all connections + SSE-->>Client: data: { direction: 'outgoing', ... } + + Manager->>Manager: Send to MCP server + + Note over Manager: MCP server responds + + Manager->>Logger: Log incoming RPC + Logger->>Bus: publish({ direction: 'incoming', message }) + Bus->>SSE: Emit event to all connections + SSE-->>Client: data: { direction: 'incoming', ... } + + Client->>Client: Display in logs panel +``` + +## Authentication Flow + +### WorkOS AuthKit Integration + +```mermaid +sequenceDiagram + participant User + participant App as React App + participant AuthKit as AuthKit Component + participant WorkOS as WorkOS API + participant Convex as Convex Backend + + User->>App: Navigate to app + App->>AuthKit: Render AuthKit provider + AuthKit->>AuthKit: Check localStorage
for session + + alt No session + AuthKit->>User: Show sign-in UI + User->>AuthKit: Click "Sign In" + + alt Web App + AuthKit->>WorkOS: Redirect to OAuth + WorkOS->>User: Show auth page + User->>WorkOS: Authenticate + WorkOS-->>AuthKit: Redirect with code + else Electron App + AuthKit->>Browser: Open in system browser + Browser->>WorkOS: OAuth flow + WorkOS-->>Browser: mcpjam://oauth/callback?code=xxx + Browser->>Main: Deep link + Main->>Renderer: Load /callback + Renderer->>AuthKit: Process callback + end + + AuthKit->>WorkOS: Exchange code for token + WorkOS-->>AuthKit: Access token + AuthKit->>AuthKit: Store in localStorage + end + + AuthKit->>Convex: Verify token + Convex-->>AuthKit: User info + AuthKit->>App: Provide auth context + App->>User: Show authenticated UI +``` + +## Data Flow + +### Complete Request-Response Cycle + +```mermaid +flowchart TD + User([User Action]) + + User --> UI[React Component
e.g., Tool Call Button] + UI --> Mutation[useMutation
TanStack Query] + Mutation --> API[POST /api/mcp/servers/:id/tools/:name/call] + + API --> Hono[Hono Route Handler] + Hono --> Middleware[Middleware Chain] + Middleware --> Context[Get mcpClientManager
from context] + + Context --> Manager[MCPClientManager.getClient] + Manager --> Client[MCP Client Instance] + + Client --> LogOut[rpcLogger
direction: outgoing] + LogOut --> Bus1[rpcLogBus.publish] + Bus1 --> SSE1[SSE /rpc-logs
Emit to subscribers] + + Client --> Transport[Transport Layer
STDIO/SSE/HTTP] + Transport --> MCPServer[MCP Server
External process] + + MCPServer --> Response[Tool Result] + Response --> Client2[MCP Client] + + Client2 --> LogIn[rpcLogger
direction: incoming] + LogIn --> Bus2[rpcLogBus.publish] + Bus2 --> SSE2[SSE /rpc-logs
Emit to subscribers] + + Client2 --> Format[Format Response] + Format --> JSON[JSON Response] + JSON --> React[React Query Cache] + React --> Update[Update UI] + Update --> User + + SSE1 -.Real-time logs.-> LogPanel[Logs Panel] + SSE2 -.Real-time logs.-> LogPanel + + style User fill:#e1f5ff + style Manager fill:#fff3cd + style Bus1 fill:#d4edda + style Bus2 fill:#d4edda +``` + +## Performance Considerations + +### Caching Strategy + +```mermaid +graph TB + Request[API Request] + + Request --> QueryCache{In Query Cache?} + + QueryCache -->|Yes + Fresh| Instant[Return Immediately
0ms] + QueryCache -->|Yes + Stale| Background[Return cached data
Refetch in background] + QueryCache -->|No| Fetch[Fetch from API] + + Background --> Update[Update on completion] + Fetch --> Cache[Cache result] + + Cache --> Invalidation{Auto-invalidation?} + Invalidation -->|Mutation| InvalidateRelated[Invalidate related queries] + Invalidation -->|Time| StaleTime[Mark stale after X seconds] + + style Instant fill:#90EE90 + style Background fill:#FFD700 + style Fetch fill:#FFA07A +``` + +### Connection Pooling + +```mermaid +graph LR + subgraph "MCPClientManager" + Pool[Client Pool
Map: serverId → Client] + end + + subgraph "Clients" + Client1[Server 1 Client
STDIO] + Client2[Server 2 Client
SSE] + Client3[Server 3 Client
HTTP] + end + + Request1[Tool Request
Server 1] + Request2[Resource Request
Server 2] + Request3[Prompt Request
Server 3] + + Request1 --> Pool + Request2 --> Pool + Request3 --> Pool + + Pool --> Client1 + Pool --> Client2 + Pool --> Client3 + + Client1 -.Reused connection.-> Pool + Client2 -.Reused connection.-> Pool + Client3 -.Reused connection.-> Pool + + style Pool fill:#d4edda +``` + +## Error Handling + +### Error Propagation + +```mermaid +flowchart TD + Error([Error Occurs]) + + Error --> Layer{Where?} + + Layer -->|MCP Server| MCPError[MCP Server Error
- Invalid parameters
- Server crash
- Timeout] + + Layer -->|Transport| TransportError[Transport Error
- Connection failed
- Network timeout
- Protocol error] + + Layer -->|Client Manager| ManagerError[Manager Error
- Server not found
- Client not connected
- Invalid config] + + Layer -->|API| APIError[API Error
- Invalid request
- Auth failure
- Rate limit] + + MCPError --> Log[Log to console] + TransportError --> Log + ManagerError --> Log + APIError --> Log + + Log --> Sentry[Report to Sentry
If production] + + Sentry --> Response[Format Error Response] + + Response --> Client[Return to client
with error details] + + Client --> UI[Display error in UI
Toast/Alert] + + style Error fill:#f8d7da + style Sentry fill:#e1f5ff +``` + +## Deployment Architecture + +### Build Artifacts + +```mermaid +graph TB + Source[Source Code] + + Source --> BuildClient[npm run build:client] + Source --> BuildServer[npm run build:server] + Source --> BuildSDK[npm run build:sdk] + + BuildClient --> ClientDist[dist/client/
- index.html
- assets/js/*.js
- assets/css/*.css] + + BuildServer --> ServerDist[dist/server/
- index.js
- app.js
- routes/**/*.js] + + BuildSDK --> SDKDist[sdk/dist/
- index.js
- types.d.ts] + + ClientDist --> WebDeploy[Web Deployment
Serve via Hono] + ServerDist --> WebDeploy + + ClientDist --> ElectronBuild[Electron Forge Build] + ServerDist --> ElectronBuild + SDKDist --> ElectronBuild + + ElectronBuild --> ElectronArtifacts[Electron Artifacts
- .app for macOS
- .exe for Windows
- .deb for Linux] + + ClientDist --> DockerBuild[Docker Build] + ServerDist --> DockerBuild + + DockerBuild --> DockerImage[Docker Image
mcpjam/inspector:latest] + + style ClientDist fill:#fff3cd + style ServerDist fill:#d4edda + style SDKDist fill:#e1f5ff +``` + +### Electron Packaging + +```mermaid +flowchart LR + Start([npm run electron:make]) + + Start --> Icons1[Generate Icons
Windows .ico] + Icons1 --> Icons2[Generate Icons
macOS .icns] + + Icons2 --> Forge[Electron Forge] + + Forge --> Package[Package Step
- Bundle app code
- Include resources
- Sign binaries] + + Package --> Make[Make Step
Platform-specific] + + Make --> MacOS[macOS
- DMG installer
- ZIP archive
- Code signing] + + Make --> Windows[Windows
- Squirrel installer
- NSIS installer
- Code signing] + + Make --> Linux[Linux
- DEB package
- RPM package
- AppImage] + + MacOS --> Output[/out directory] + Windows --> Output + Linux --> Output + + style Start fill:#e1f5ff + style Output fill:#90EE90 +``` + +## Security Architecture + +### Security Layers + +```mermaid +graph TB + subgraph "Frontend Security" + CSP[Content Security Policy] + XSS[XSS Protection] + HTTPS[HTTPS Only] + end + + subgraph "Transport Security" + CORS[CORS Configuration] + Auth[WorkOS Authentication] + RateLimit[Rate Limiting] + end + + subgraph "Backend Security" + Validation[Input Validation] + Sanitization[Data Sanitization] + Secrets[Secret Management] + end + + subgraph "Electron Security" + NoNodeInt[No Node Integration
in renderer] + ContextIso[Context Isolation] + Preload[Secure Preload Script] + end + + Request[User Request] + + Request --> CSP + Request --> XSS + Request --> HTTPS + + CSP --> CORS + XSS --> CORS + HTTPS --> CORS + + CORS --> Auth + Auth --> RateLimit + + RateLimit --> Validation + Validation --> Sanitization + Sanitization --> Secrets + + NoNodeInt --> Safe[Safe Execution] + ContextIso --> Safe + Preload --> Safe + + style Auth fill:#90EE90 + style Secrets fill:#f8d7da +``` + +## Monitoring & Observability + +### Telemetry Pipeline + +```mermaid +flowchart LR + App[Application Events] + + App --> Console[Console Logs
Development] + App --> Sentry[Sentry
Error Tracking] + App --> Metrics[Performance Metrics
TanStack Query DevTools] + + Console --> Dev[Developers
Local debugging] + + Sentry --> Dashboard[Sentry Dashboard
Production errors] + + Metrics --> Analysis[Performance Analysis
Query timing
Cache hit rates] + + Dashboard --> Alerts[Slack Alerts
Critical errors] + + style Sentry fill:#f8d7da + style Console fill:#fff3cd + style Metrics fill:#d4edda +``` + +--- + +## Quick Reference + +### Environment Variables + +| Variable | Purpose | Set By | +| ------------------------- | ------------------------------- | --------------------- | +| `CONVEX_HTTP_URL` | Convex backend URL | User (required) | +| `ELECTRON_APP` | Indicates Electron runtime | Electron main process | +| `IS_PACKAGED` | Indicates packaged Electron app | Electron main process | +| `ELECTRON_RESOURCES_PATH` | Path to Electron resources | Electron main process | +| `NODE_ENV` | Runtime environment | Build scripts / user | +| `PORT` | Server port | User (default: 3001) | +| `DEBUG_MCP_SELECTION` | Enable MCP debug logs | User (optional) | + +### Key File Locations + +| Path | Purpose | +| -------------------------------------- | --------------------------------------------- | +| `server/index.ts:180-192` | MCPClientManager initialization (npm package) | +| `server/app.ts:67-79` | MCPClientManager setup (Electron) | +| `src/main.ts:62-92` | Hono server startup in Electron | +| `src/main.ts:273-313` | OAuth deep link handler | +| `client/src/hooks/useElectronOAuth.ts` | React OAuth hook | +| `server/routes/mcp/index.ts` | MCP API routes | +| `server/services/rpc-log-bus.ts` | SSE event bus | +| `sdk/src/index.ts` | MCP SDK wrapper | + +### Common Patterns + +**Adding a new MCP endpoint:** + +1. Add route in `server/routes/mcp/` +2. Use `c.mcpClientManager` from context +3. Handle errors and log via `rpcLogBus` +4. Return JSON response + +**Adding a new React feature:** + +1. Create component in `client/src/components/` +2. Use TanStack Query for server state +3. Use Zustand for client state +4. Follow Shadcn UI patterns + +**Testing in Electron:** + +1. Run `npm run electron:dev` +2. Open DevTools with `Cmd+Option+I` (Mac) or `Ctrl+Shift+I` (Windows) +3. Check main process logs in terminal +4. Check renderer logs in DevTools + +--- + +For more information, see: + +- [CONTRIBUTING.md](./CONTRIBUTING.md) - Contribution guidelines +- [CLAUDE.md](./CLAUDE.md) - AI assistant instructions +- [README.md](./README.md) - User-facing documentation diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 000000000..00bdc5d75 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,266 @@ +# MCPJam Inspector Development Guidelines + +## Project Overview + +MCPJam Inspector is a developer tool for testing and debugging Model Context Protocol (MCP) servers. Built with Electron, it combines a Vite+React frontend and Hono.js backend to provide a comprehensive development environment for MCP server implementations. + +## Quick Start + +```bash +# Install dependencies +npm install + +# Start development +npm run dev + +# Build for production +npm run build + +# Start production +npm start +``` + +## Project Structure + +``` +/inspector + /client # Vite + React frontend + /src # UI components, hooks, stores + /public # Static assets + /server # Hono.js backend + /routes # API endpoints, MCP handlers + /utils # Server utilities + /src # Electron main process + /ipc # Inter-process communication + /shared # Common types, utilities +``` + +## Core Features + +1. **MCP Compliance Testing** + - Full spec validation for tools and resources + - Tool definition validation (name, description, parameters) + - Resource schema verification + - Input/output format checking + - Parameter type validation + - OAuth 2.0 authentication testing + - Token flow validation + - Scope verification + - Refresh token handling + - Error response testing + - Prompt and elicitation verification + - Context window validation + - Token limit compliance + - Response format checking + - Streaming response validation + - Real-time compliance checking + - Live validation feedback + - Error highlighting + - Fix suggestions + - Compliance reports + +2. **Transport Support** + - STDIO transport protocol + - Bidirectional streaming + - Process management + - Error handling + - Buffer management + - Server-Sent Events (SSE) + - Event stream handling + - Reconnection logic + - Message parsing + - Error recovery + - Streamable HTTP transport + - Chunked transfer encoding + - Connection pooling + - Request/response streaming + - Timeout handling + - Connection management + - Auto-reconnect + - Load balancing + - Circuit breaking + - Health checks + +3. **LLM Integration** + - OpenAI models support + - GPT-3.5/4 integration + - API key management + - Model selection + - Temperature control + - Anthropic Claude integration + - Claude 2/3 support + - Context handling + - Response streaming + - Error handling + - DeepSeek AI integration + - DeepSeek R1 Support + - Coding-optimized models + - Reasoning capabilities + - Context management + - Ollama model compatibility + - Local model support + - Custom model loading + - Inference optimization + - Resource management + - Response validation + - Format verification + - Token counting + - Content filtering + - Safety checks + +4. **Developer Tools** + - Comprehensive logging + - Request/response logs + - Error tracking + - Performance metrics + - Debug information + - Request/response tracing + - Timing analysis + - Headers inspection + - Payload examination + - Transport details + - Error reporting and analysis + - Stack traces + - Error categorization + - Resolution suggestions + - Error patterns + - Performance monitoring + - Response times + - Resource usage + - Throughput metrics + - Bottleneck detection + +## Project Structure + +``` +/inspector + /client # React + Vite frontend + /src # UI components, hooks, stores + /public # Static assets, images + /server # Hono.js backend + /routes # API endpoints, MCP handlers + /utils # Server utilities + /src # Electron main process + /ipc # Inter-process communication + /shared # Common types, utilities +``` + +## Development Setup + +### Quick Start + +```bash +# Install dependencies +npm install + +# Start development server +npm run dev + +# Build for production +npm run build + +# Start production server +npm start +``` + +### Docker Support + +```bash +# Run latest version +docker run -p 3001:3001 mcpjam/mcp-inspector:latest + +# Run in background +docker run -d -p 3001:3001 --name mcp-inspector mcpjam/mcp-inspector:latest +``` + +## Best Practices + +1. **Code Quality** + - Follow TypeScript best practices + - Strict type checking + - Interface definitions + - Generic constraints + - Type guards + - Maintain consistent code style + - ESLint configuration + - Prettier formatting + - Import ordering + - Component structure + - Write comprehensive tests + - Unit testing + - Integration testing + - E2E testing + - Performance testing + - Document API changes + - OpenAPI specs + - Breaking changes + - Migration guides + - Version history + +2. **MCP Development** + - Follow MCP specification + - Protocol versioning + - Message formats + - Error codes + - Extensions + - Implement proper error handling + - Error types + - Recovery strategies + - Fallback mechanisms + - Error reporting + - Validate server responses + - Schema validation + - Content verification + - Status codes + - Headers + - Monitor performance metrics + - Response times + - Resource usage + - Error rates + - Throughput + +3. **Security** + - Secure API key management + - Key rotation + - Access control + - Encryption + - Auditing + - Input validation + - Type checking + - Sanitization + - Size limits + - Format validation + - Rate limiting + - Request quotas + - Throttling + - Backoff strategies + - Burst handling + - Error handling + - Safe error messages + - Log sanitization + - Stack trace hiding + - Security headers + +4. **Documentation** + - Keep docs up-to-date + - API reference + - Setup guides + - Best practices + - Troubleshooting + - Include usage examples + - Code snippets + - Configuration samples + - Common patterns + - Edge cases + - Document breaking changes + - Version differences + - Migration steps + - Deprecation notices + - Compatibility notes + - Maintain changelog + - Version history + - Feature additions + - Bug fixes + - Performance improvements + +- @client/src/components/mcp-sidebar.tsx Can we temporarily disable the interceptor feature, and have teh Run evals and eval results be tabs in its own section? diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..01a41698e --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,126 @@ +# Contributing + +First off, thank you for considering contributing to MCPJam Inspector! It's people like you that make the open source community such a great place. + +## Finding an issue to work on + +1. You can find things to work on in our [issues tab](https://github.com/MCPJam/inspector/issues). +2. Look for issues labelled `good first issue` and `very easy`. These are great starter tasks that are low commitment +3. Once you find an issue you like to work on, comment on the issue and tag @matteo8p. Then assign yourself the issue. This helps avoid multiple contributors working on the same issue. + +## Getting Started + +Before you get started, please consider giving the project a star ⭐. It helps grow the project and gives your contributions more recognition. + +Also join our [Discord channel](https://discord.com/invite/JEnDtz8X6z). That's where the community and other open source contributors communicate on. + +### Prerequisites + +Make sure to have the following: + +- [Node.js](https://nodejs.org/) (LTS version recommended) +- [npm](https://www.npmjs.com/) (comes with Node.js) + +### Fork, Clone, and Branch + +1. **Fork** the repository on GitHub. +2. **Clone** your fork locally: + ```bash + git clone https://github.com/YOUR_USERNAME/inspector.git + cd inspector + ``` +3. Create a new **branch** for your changes: + ```bash + git checkout -b my-feature-branch + ``` + +### Setup + +Install the dependencies for all workspaces: + +```bash +npm install +``` + +## Development + +To run the client and server in development mode with hot-reloading, use: + +```bash +npm run dev +``` + +This runs: + +- **Client**: Vite dev server on `:8080` +- **Server**: Hono dev server on `:3000` + +For Windows users, there's a specific script: + +```bash +npm run dev:windows +``` + +### Electron Development + +To run the Electron app in development mode: + +```bash +npm run electron:dev +``` + +This runs: + +- Electron main process +- Embedded Hono server +- Vite dev server for renderer + +### Building the Project + +To build all parts of the project (client, server, and SDK), run: + +```bash +npm run build +npm run start // starts the build +``` + +You can also build each part individually: + +- `npm run build:client` - Build React frontend +- `npm run build:server` - Build Hono backend +- `npm run build:sdk` - Build MCP SDK wrapper + +## Code Style + +We use [Prettier](https://prettier.io/) to maintain a consistent code style. Before you commit your changes, please format your code by running: + +```bash +npm run prettier-fix +``` + +## Commit Messages + +We follow the [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) specification. This helps us automate changelog generation and keep the commit history clean and readable. + +Your commit messages should be structured as follows: + +``` +[optional scope]: + +[optional body] + +[optional footer(s)] +``` + +**Example:** +`feat(client): add new button to the main component` +`fix(server): resolve issue with API endpoint` + +## Getting Help + +- **GitHub Issues**: https://github.com/MCPJam/inspector/issues +- **Discord**: https://discord.com/invite/JEnDtz8X6z +- **Discussions**: Use GitHub Discussions for questions +- **Documentation**: https://docs.mcpjam.com + +Thank you for your contribution! diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 000000000..fa9a2fc48 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,103 @@ +# Use the existing mcpjam/mcp-inspector as base or build from scratch +# Multi-stage build for client and server + +# Stage 1: Dependencies base (shared) +FROM node:20-slim AS deps-base +WORKDIR /app + +# Copy package.json and package-lock.json files +COPY package.json package-lock.json ./ +COPY sdk/package.json sdk/package-lock.json ./sdk/ +COPY evals-cli/package.json evals-cli/package-lock.json ./evals-cli/ + +# Install dependencies using package-lock files for consistent versions +RUN npm ci --legacy-peer-deps +RUN npm --prefix sdk ci --legacy-peer-deps +RUN npm --prefix evals-cli ci --legacy-peer-deps + +# Stage 2: Build client +FROM deps-base AS client-builder +COPY shared/ ./shared/ +COPY client/ ./client/ +COPY tsconfig.json ./ +COPY vite.renderer.config.mts ./ +COPY vite.main.config.ts ./ +COPY vite.preload.config.ts ./ +COPY .env.production ./ +# Set environment variable for Docker platform detection +ENV VITE_DOCKER=true +RUN npm run build:client + +# Stage 3: Build SDK (required by server) +FROM deps-base AS sdk-builder +COPY sdk/ ./sdk/ +RUN npm --prefix sdk run build + +# Stage 4: Build server +FROM deps-base AS server-builder +COPY --from=sdk-builder /app/sdk/dist ./sdk/dist +COPY shared/ ./shared/ +COPY evals-cli/ ./evals-cli/ +COPY server/ ./server/ +COPY tsconfig.json ./ +RUN npm run build:server + +# Stage 5: Production image - extend existing or create new +FROM node:20-slim AS production + +# Build arguments for runtime configuration +ARG CONVEX_HTTP_URL +ENV CONVEX_HTTP_URL=${CONVEX_HTTP_URL} + +# Install dumb-init for proper signal handling +RUN apt-get update && apt-get install -y --no-install-recommends dumb-init && rm -rf /var/lib/apt/lists/* + +# Create app directory +WORKDIR /app + +# Copy built applications +COPY --from=client-builder /app/dist/client ./dist/client +COPY --from=server-builder /app/dist/server ./dist/server + +# Copy built SDK (required by server at runtime) +COPY --from=sdk-builder /app/sdk/dist ./sdk/dist +COPY --from=sdk-builder /app/sdk/package.json ./sdk/package.json +COPY --from=deps-base /app/sdk/node_modules ./sdk/node_modules + +# Copy public assets (logos, etc.) to be served at root level +COPY --from=client-builder /app/client/public ./public + +# Copy package.json and node_modules for runtime dependencies +COPY --from=deps-base /app/package.json ./package.json +COPY --from=deps-base /app/node_modules ./node_modules + +# Copy shared types +COPY shared/ ./shared/ + +# Copy any startup scripts +COPY bin/ ./bin/ + +# Create non-root user +RUN groupadd --gid 1001 nodejs && \ + useradd --uid 1001 --gid nodejs --shell /bin/bash --create-home mcpjam + +# Change ownership of the app directory +RUN chown -R mcpjam:nodejs /app +USER mcpjam + +# Expose port +EXPOSE 3001 + +# Set environment variables +ENV PORT=3001 +ENV NODE_ENV=production + +# Health check +HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \ + CMD node -e "require('http').request('http://localhost:3001/health', (res) => process.exit(res.statusCode === 200 ? 0 : 1)).end()" + +# Use dumb-init to handle signals properly +ENTRYPOINT ["dumb-init", "--"] + +# Start the application with production environment +CMD ["sh", "-c", "NODE_ENV=production node dist/server/index.js"] diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..9990483e0 --- /dev/null +++ b/LICENSE @@ -0,0 +1,18 @@ +Copyright (c) 2025-present MCPJam Inc + +Portions of this software are licensed as follows: + +* All content residing under the "evals-cli/" directory of this repository, if that directory exists, is licensed under the license defined in evals-cli/LICENSE. +* Content outside of the above mentioned directories or restrictions above is available under the "Apache License" license as defined below. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/README-Docker.md b/README-Docker.md new file mode 100644 index 000000000..e7bc59a60 --- /dev/null +++ b/README-Docker.md @@ -0,0 +1,189 @@ +# Docker Setup for MCP Inspector + +This guide explains how to use Docker with the MCP Inspector project, leveraging the existing [mcpjam/mcp-inspector](https://hub.docker.com/r/mcpjam/mcp-inspector) image on Docker Hub. + +## Quick Start + +### Using Pre-built Image from Docker Hub + +```bash +# Pull and run the latest version +docker run -p 3001:3001 mcpjam/mcp-inspector:latest + +# Or using docker-compose +docker-compose -f docker-compose.prod.yml up -d +``` + +### Building Locally + +```bash +# Build the production image +npm run docker:build + +# Build the development image +npm run docker:build:dev + +# Run locally built image +npm run docker:run +``` + +## Available Docker Commands + +| Command | Description | +| -------------------------- | ---------------------------------------------- | +| `npm run docker:build` | Build production Docker image | +| `npm run docker:build:dev` | Build development Docker image | +| `npm run docker:run` | Run production container | +| `npm run docker:run:dev` | Run development container | +| `npm run docker:up` | Start production services with docker-compose | +| `npm run docker:up:dev` | Start development services with docker-compose | +| `npm run docker:down` | Stop all services | +| `npm run docker:logs` | View container logs | +| `npm run docker:clean` | Clean up containers, volumes, and images | + +## Docker Compose Profiles + +### Development Profile + +```bash +# Start development environment with hot-reloading +docker run -e CONVEX_HTTP_URL=https://your-convex-url.convex.cloud -p 3001:3001 mcpjam/mcp-inspector:latest + +# View logs +docker-compose logs -f mcp-inspector-dev +``` + +### Production Profile + +```bash +# Start production environment +docker-compose --profile production up -d + +# Or use the dedicated production compose file +docker-compose -f docker-compose.prod.yml up -d +``` + +## Configuration + +### Environment Variables + +| Variable | Default | Description | +| ---------- | ------------ | ---------------- | +| `NODE_ENV` | `production` | Environment mode | +| `PORT` | `3001` | Server port | + +### Volumes + +- `mcp_data`: Persistent data storage +- Development mode mounts source code for hot-reloading + +## CI/CD Integration + +The project includes GitHub Actions workflows for: + +1. **Build and Deploy** (`.github/workflows/docker-build-deploy.yml`) + - Runs tests + - Builds Docker images + - Pushes to Docker Hub + - Deploys to staging/production + +2. **Security Scanning** (`.github/workflows/docker-security-scan.yml`) + - Daily vulnerability scans + - Trivy and Snyk integration + - SARIF reports to GitHub Security tab + +### Required Secrets + +Add these secrets to your GitHub repository: + +| Secret | Description | +| ----------------- | ------------------------- | +| `DOCKER_USERNAME` | Docker Hub username | +| `DOCKERHUB_TOKEN` | Docker Hub password/token | +| `SNYK_TOKEN` | Snyk API token (optional) | + +## Multi-Architecture Support + +The Docker images support multiple architectures: + +- `linux/amd64` (Intel/AMD processors) +- `linux/arm64` (ARM processors, including Apple Silicon) + +## Health Checks + +Both development and production containers include health checks: + +- Endpoint: `http://localhost:3001/health` +- Interval: 30 seconds +- Timeout: 10 seconds +- Retries: 3 + +## Security Features + +- Non-root user (`mcpjam:nodejs`) +- Minimal Alpine Linux base image +- Security scanning in CI/CD +- Proper signal handling with `dumb-init` + +## Troubleshooting + +### Common Issues + +1. **Port already in use** + + ```bash + # Check what's using port 3001 + lsof -i :3001 + + # Use different port + docker run -p 3002:3001 mcpjam/mcp-inspector:latest + ``` + +2. **Permission issues** + + ```bash + # Check container logs + docker logs + + # Ensure proper ownership + docker exec -it ls -la /app + ``` + +3. **Build failures** + + ```bash + # Clean Docker cache + docker system prune -a + + # Rebuild without cache + docker build --no-cache -t mcpjam/mcp-inspector:latest . + ``` + +### Debugging + +```bash +# Access container shell +docker exec -it sh + +# View container logs +docker logs -f + +# Inspect container +docker inspect +``` + +## Production Deployment + +For production deployment, consider: + +1. **Use docker-compose.prod.yml** +2. **Set up reverse proxy (Nginx)** +3. **Configure SSL certificates** +4. **Set up monitoring and logging** +5. **Configure backup for persistent volumes** + +## Links + +- [Docker Hub Repository](https://hub.docker.com/r/mcpjam/mcp-inspector) +- [Project Repository](https://github.com/mcpjam/inspector) +- [MCP Jam Website](https://mcpjam.com) diff --git a/README.md b/README.md index a9371ba8c..6b1552021 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,199 @@ -# mcp-typescript -TypeScript implementation of the Model Context Protocol +
+ + + + + MCPJam Inspector V1 logo + + +
+ +www.mcpjam.com + +[![npm version](https://img.shields.io/npm/v/@mcpjam/inspector?style=for-the-badge&color=blue)](https://www.npmjs.com/package/@mcpjam/inspector) +[![License: Apache 2.0](https://img.shields.io/badge/License-Apache%202.0-blue.svg?style=for-the-badge)](https://opensource.org/licenses/Apache-2.0) +[![Discord](https://img.shields.io/badge/Discord-Join%20Server-5865F2.svg?style=for-the-badge&logo=discord&logoColor=white)](https://discord.gg/JEnDtz8X6z) + +
+
+
+ +MCPJam inspector is the local-first development platform for MCP servers. Visually test your server's tools, resources, and prompts. Try your server against different models in the LLM playground. Now with support for OpenAI Apps SDK. + +## 📸 Screenshots + +MCPJam Inspector Demo + +
+LLM Playground + +LLM Chat Demo + +
+ +
+Evals and pen testing + +MCPJam Connection Demo + +
+ +
+Connect with OAuth + +MCPJam Connection Demo + +
+ +## 🚀 Quick Start + +Start up the MCPJam inspector: + +```bash +npx @mcpjam/inspector@latest +``` + +## Key Features + +| Feature | Description | +| ------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------- | +| **Protocol handshake testing** | Visually test your MCP server's tools, resources, prompts, elicitation, and OAuth 2. MCPJam is compliant with the latest MCP specs. | +| **All transports** | Connect to any MCP server. MCPJam inspector supports STDIO, SSE, and Streamable HTTP transports. | +| **LLM Playground** | Integrated chat playground with OpenAI, Anthropic Claude, Google Gemini, and Ollama model support. Test how your MCP server would behave against an LLM | +| **Test OAuth** | Test your server's OAuth and Dynamic Client Registration implementation. | +| **View JSON-RPC** | View every JSON-RPC message sent over network. Provides granular observability and debugging. | +| **MCP-UI and OpenAI Apps SDK** | Test your MCP server's implementation of MCP-UI or OpenAI Apps SDK | + +## 🎉 Now with OpenAI Apps SDK support! + +OpenAI Apps SDK Demo + +Developing with Apps SDK is pretty restricted right now as it requires ChatGPT developer mode access and an OpenAI partner to approve access. We wanted to make that more accessible for developers today by putting it in an open source project, give y’all a head start. + +Test your Apps SDK app with: + +- Tools tab. Deterministically call tools and view your UI +- LLM playground to see your Apps SDK UI in a chat environment + +The feature is in beta, and still needs polishing. Please report any bugs in the issues tab. We encourage the community to contibute! + +# Installation Guides + +We recommend starting MCPJam inspector via `npx`: + +```bash +npx @mcpjam/inspector@latest +``` + +or download the Mac / Windows desktop app [on our site](https://www.mcpjam.com/). + +## 🐳 Docker + +Run MCPJam Inspector using Docker: + +```bash +# Run the latest version from Docker Hub +docker run -p 3001:3001 mcpjam/mcp-inspector:latest + +# Or run in the background +docker run -d -p 3001:3001 --name mcp-inspector mcpjam/mcp-inspector:latest +``` + +The application will be available at `http://localhost:3001`. + +## Open commands + +```bash +# Launch with custom port +npx @mcpjam/inspector@latest --port 4000 + +# Shortcut for starting MCPJam and an Ollama model +npx @mcpjam/inspector@latest --ollama llama3.2 + +# Local FastMCP STDIO example +npx @mcpjam/inspector@latest uv run fastmcp run /Users/matt8p/demo/src/server.py + +# Local Node example +npx @mcpjam/inspector@latest npx -y /Users/matt8p/demo-ts/dist/index.js +``` + +## Connecting to MCP servers + +### mcp.json + +You can import your `mcp.json` MCP server configs from Claude Desktop and Cursor with the command: + +``` +npx @mcpjam/inspector@latest --config mcp.json +``` + +### SSE / Streamable HTTP + +Spin up the MCPJam inspector + +``` +npx @mcpjam/inspector@latest +``` + +In the UI "MCP Servers" tab, click add server, select HTTP, then paste in your server URL. Support for OAuth 2.0 testing. + +## Requirements + +[![Node.js](https://img.shields.io/badge/Node.js-20+-green.svg?style=for-the-badge&logo=node.js)](https://nodejs.org/) +[![TypeScript](https://img.shields.io/badge/TypeScript-5+-blue.svg?style=for-the-badge&logo=typescript)](https://www.typescriptlang.org/) + +## 🛠️ Development + +### Local Development Setup + +```bash +# Clone the repository +git clone https://github.com/mcpjam/inspector.git +cd inspector + +# Install dependencies +npm install + +# Start development server +npm run dev +``` + +The development server will start at `http://localhost:6274` with hot reloading enabled. + +### Build for Production + +```bash +# Build the application +npm run build + +# Start production server +npm run start +``` + +## 🤝 Contributing + +We welcome contributions to MCPJam Inspector V1! Please read our [Contributing Guide](https://docs.mcpjam.com/CONTRIBUTING) for development guidelines and best practices. + +## 📚 Resources + +- **💬 Discord**: [Join the MCPJam Community](https://discord.gg/JEnDtz8X6z) +- **📖 MCP Protocol**: [Model Context Protocol Documentation](https://modelcontextprotocol.io/) +- **🤖 AI SDK**: [Vercel AI SDK](https://sdk.vercel.ai/) +- **⚡ FastApps** [DooiLabs/FastApps](https://github.com/DooiLabs/FastApps) - The Python framework to build OpenAI Apps. +- **✖️ xMCP** [xMCP](https://xmcp.dev/) - The Typescript MCP framework. Ship on Vercel instantly. + +--- + +## 📄 License + +This project is licensed under the **Apache License 2.0** - see the [LICENSE](LICENSE) file for details. + +--- + +
+ +**MCPJam Inspector V1** • Built with Hono.js and ❤️ for the MCP community + +[🌐 Website](https://mcpjam.com) • [📖 Docs](https://modelcontextprotocol.io/) • [🐛 Issues](https://github.com/MCPJam/inspector/issues) + +
diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 000000000..02886d309 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,3 @@ +# Security Policy + +Thank you for helping us keep the MCPJam secure. Please notify us via email (mcpjams@gmail.com) or discord as soon as possible for us to put a fix. Thank you! diff --git a/assets/entitlements.mac.plist b/assets/entitlements.mac.plist new file mode 100644 index 000000000..a7a8d21e6 --- /dev/null +++ b/assets/entitlements.mac.plist @@ -0,0 +1,16 @@ + + + + + com.apple.security.app-sandbox + + com.apple.security.cs.allow-jit + + com.apple.security.cs.allow-unsigned-executable-memory + + com.apple.security.cs.disable-library-validation + + + + + diff --git a/assets/icon.icns b/assets/icon.icns new file mode 100644 index 000000000..6fe5876e9 Binary files /dev/null and b/assets/icon.icns differ diff --git a/assets/icon.ico b/assets/icon.ico new file mode 100644 index 000000000..aa513c7a3 Binary files /dev/null and b/assets/icon.ico differ diff --git a/bin/start.js b/bin/start.js new file mode 100755 index 000000000..360bb3625 --- /dev/null +++ b/bin/start.js @@ -0,0 +1,665 @@ +#!/usr/bin/env node + +import { resolve, dirname } from "path"; +import { spawn } from "child_process"; +import { fileURLToPath } from "url"; +import { createServer } from "net"; +import { execSync } from "child_process"; +import { existsSync, readFileSync } from "fs"; +import open from "open"; + +const __dirname = dirname(fileURLToPath(import.meta.url)); + +const MCP_BANNER = ` +███╗ ███╗ ██████╗██████╗ ██╗ █████╗ ███╗ ███╗ +████╗ ████║██╔════╝██╔══██╗ ██║██╔══██╗████╗ ████║ +██╔████╔██║██║ ██████╔╝ ██║███████║██╔████╔██║ +██║╚██╔╝██║██║ ██╔═══╝██ ██║██╔══██║██║╚██╔╝██║ +██║ ╚═╝ ██║╚██████╗██║ ╚█████╔╝██║ ██║██║ ╚═╝ ██║ +╚═╝ ╚═╝ ╚═════╝╚═╝ ╚════╝ ╚═╝ ╚═╝╚═╝ ╚═╝ +`; + +// ANSI color codes +const colors = { + reset: "\x1b[0m", + bright: "\x1b[1m", + dim: "\x1b[2m", + red: "\x1b[31m", + green: "\x1b[32m", + yellow: "\x1b[33m", + blue: "\x1b[34m", + magenta: "\x1b[35m", + cyan: "\x1b[36m", + white: "\x1b[37m", + bgRed: "\x1b[41m", + bgGreen: "\x1b[42m", + bgYellow: "\x1b[43m", + bgBlue: "\x1b[44m", + bgMagenta: "\x1b[45m", + bgCyan: "\x1b[46m", +}; + +// Utility functions for beautiful output +function log(message, color = colors.reset) { + console.log(`${color}${message}${colors.reset}`); +} + +function logSuccess(message) { + log(`✅ ${message}`, colors.green); +} + +function logInfo(message) { + log(`ℹ️ ${message}`, colors.blue); +} + +function logWarning(message) { + log(`⚠️ ${message}`, colors.yellow); +} + +function logError(message) { + log(`❌ ${message}`, colors.red); +} + +function logStep(step, message) { + log( + `\n${colors.cyan}${colors.bright}[${step}]${colors.reset} ${message}`, + colors.white, + ); +} + +function logProgress(message) { + log(`⏳ ${message}`, colors.magenta); +} + +function logDivider() { + log("─".repeat(80), colors.dim); +} + +function logBox(content, title = null) { + const lines = content.split("\n"); + const maxLength = Math.max(...lines.map((line) => line.length)); + const width = maxLength + 4; + + log("┌" + "─".repeat(width) + "┐", colors.cyan); + if (title) { + const titlePadding = Math.floor((width - title.length - 2) / 2); + log( + "│" + + " ".repeat(titlePadding) + + title + + " ".repeat(width - title.length - titlePadding) + + "│", + colors.cyan, + ); + log("├" + "─".repeat(width) + "┤", colors.cyan); + } + + lines.forEach((line) => { + const padding = width - line.length - 2; + log("│ " + line + " ".repeat(padding) + " │", colors.cyan); + }); + + log("└" + "─".repeat(width) + "┘", colors.cyan); +} + +function delay(ms) { + return new Promise((resolve) => setTimeout(resolve, ms, true)); +} + +function isPortAvailable(port) { + return new Promise((resolve) => { + const server = createServer(); + + server.listen(port, () => { + // Port is available, close the server and resolve true + server.close(() => { + resolve(true); + }); + }); + + server.on("error", () => { + // Port is not available + resolve(false); + }); + }); +} + +async function findAvailablePort(startPort = 3000, maxPort = 65535) { + logProgress(`Scanning for available ports starting from ${startPort}...`); + + for (let port = startPort; port <= maxPort; port++) { + if (await isPortAvailable(port)) { + return port; + } + + // Show progress every 10 ports to avoid spam + if (port % 10 === 0) { + logProgress(`Checked port ${port}, continuing search...`); + } + } + throw new Error( + `No available ports found between ${startPort} and ${maxPort}`, + ); +} + +function spawnPromise(command, args, options) { + return new Promise((resolve, reject) => { + const child = spawn(command, args, { + stdio: options.echoOutput ? "inherit" : "pipe", + ...options, + }); + + if (options.signal) { + options.signal.addEventListener("abort", () => { + child.kill("SIGTERM"); + }); + } + + child.on("close", (code) => { + if (code === 0) { + resolve(code); + } else { + reject(new Error(`Process exited with code ${code}`)); + } + }); + + child.on("error", reject); + }); +} + +async function checkOllamaInstalled() { + try { + await spawnPromise("ollama", ["--version"], { echoOutput: false }); + return true; + } catch (error) { + return false; + } +} + +function getTerminalCommand() { + const platform = process.platform; + + if (platform === "darwin") { + // macOS + return ["open", "-a", "Terminal"]; + } else if (platform === "win32") { + // Windows + return ["cmd", "/c", "start", "cmd", "/k"]; + } else { + // Linux and other Unix-like systems + // Try common terminal emulators in order of preference + const terminals = [ + "gnome-terminal", + "konsole", + "xterm", + "x-terminal-emulator", + ]; + for (const terminal of terminals) { + try { + execSync(`which ${terminal}`, { + stdio: "ignore", + }); + if (terminal === "gnome-terminal") { + return ["gnome-terminal", "--"]; + } else if (terminal === "konsole") { + return ["konsole", "-e"]; + } else { + return [terminal, "-e"]; + } + } catch (e) { + // Terminal not found, try next + } + } + // Fallback + return ["xterm", "-e"]; + } +} + +async function openTerminalWithMultipleCommands(commands, title) { + const platform = process.platform; + const terminalCmd = getTerminalCommand(); + + if (platform === "darwin") { + // macOS: Chain commands with && separator + const chainedCommand = commands.join(" && "); + const script = `tell application "Terminal" + activate + do script "${chainedCommand}" + end tell`; + + await spawnPromise("osascript", ["-e", script], { echoOutput: false }); + } else if (platform === "win32") { + // Windows: Chain commands with && separator + const chainedCommand = commands.join(" && "); + const fullCommand = `${chainedCommand} && pause`; + await spawnPromise("cmd", ["/c", "start", "cmd", "/k", fullCommand], { + echoOutput: false, + }); + } else { + // Linux and other Unix-like systems: Chain commands with && separator + const chainedCommand = commands.join(" && "); + const fullCommand = `${chainedCommand}; read -p "Press Enter to close..."`; + await spawnPromise( + terminalCmd[0], + [...terminalCmd.slice(1), "bash", "-c", fullCommand], + { echoOutput: false }, + ); + } +} + +async function setupOllamaInSingleTerminal(model) { + logStep("Ollama", `Opening terminal to pull model ${model} and start server`); + logInfo("Both pull and serve commands will run in the same terminal window"); + + try { + const commands = [`ollama pull ${model}`, `ollama serve`]; + + await openTerminalWithMultipleCommands( + commands, + `Ollama: Pull ${model} & Serve`, + ); + logSuccess("Ollama pull and serve started in same terminal"); + logProgress( + "Waiting for model download to complete and server to start...", + ); + + // Wait a bit for the model pull to start + await delay(3000); + + // Check if model was pulled successfully and server is ready + let setupReady = false; + for (let i = 0; i < 60; i++) { + // Wait up to 10 minutes for pull + server start + try { + // First check if server is responding + await spawnPromise("ollama", ["list"], { echoOutput: false }); + + // Then check if our model is available + try { + await spawnPromise("ollama", ["show", model], { echoOutput: false }); + setupReady = true; + break; + } catch (e) { + // Model not ready yet, but server is responding + } + } catch (e) { + // Server not ready yet + } + + await delay(10000); // Wait 10 seconds between checks + if (i % 3 === 0) { + logProgress( + `Still waiting for model ${model} to be ready and server to start...`, + ); + } + } + + if (setupReady) { + logSuccess(`Model ${model} is ready and Ollama server is running`); + } else { + logWarning( + `Setup may still be in progress. Please check the terminal window.`, + ); + } + } catch (error) { + logError(`Failed to setup Ollama: ${error.message}`); + throw error; + } +} + +async function main() { + // Show MCP banner at startup + console.clear(); + log(MCP_BANNER, colors.cyan); + logDivider(); + + // Parse command line arguments + const args = process.argv.slice(2); + const envVars = {}; + let parsingFlags = true; + let ollamaModel = null; + let mcpServerCommand = null; + let mcpServerArgs = []; + let mcpConfigFile = null; + let mcpServerName = null; + let rebuildRequested = false; + + for (let i = 0; i < args.length; i++) { + const arg = args[i]; + + if (parsingFlags && arg === "--") { + parsingFlags = false; + continue; + } + + if (parsingFlags && arg === "--ollama" && i + 1 < args.length) { + ollamaModel = args[++i]; + continue; + } + + if (parsingFlags && arg === "--port" && i + 1 < args.length) { + const port = args[++i]; + envVars.PORT = port; + // Default: localhost in development, 127.0.0.1 in production + const defaultHost = + process.env.ENVIRONMENT === "dev" ? "localhost" : "127.0.0.1"; + const baseHost = process.env.HOST || defaultHost; + envVars.BASE_URL = `http://${baseHost}:${port}`; + continue; + } + + if (parsingFlags && arg === "--config" && i + 1 < args.length) { + mcpConfigFile = args[++i]; + continue; + } + + if (parsingFlags && arg === "--server" && i + 1 < args.length) { + mcpServerName = args[++i]; + continue; + } + + if (parsingFlags && (arg === "--rebuild" || arg === "--force-rebuild")) { + rebuildRequested = true; + continue; + } + + if (parsingFlags && arg === "-e" && i + 1 < args.length) { + const envVar = args[++i]; + const equalsIndex = envVar.indexOf("="); + + if (equalsIndex !== -1) { + const key = envVar.substring(0, equalsIndex); + const value = envVar.substring(equalsIndex + 1); + envVars[key] = value; + } else { + envVars[envVar] = ""; + } + continue; + } + + // If we encounter a non-flag argument, treat it as MCP server command + if (parsingFlags && !arg.startsWith("-")) { + mcpServerCommand = arg; + // Collect all remaining arguments as server arguments + mcpServerArgs = args.slice(i + 1); + break; + } + } + + // Allow environment variables to request rebuild as well + const truthyEnv = new Set(["1", "true", "yes", "on"]); + const forceRebuildEnv = (process.env.FORCE_REBUILD || "").toLowerCase(); + const rebuildEnv = (process.env.REBUILD || "").toLowerCase(); + if (truthyEnv.has(forceRebuildEnv) || truthyEnv.has(rebuildEnv)) { + rebuildRequested = true; + } + + // Handle MCP config file if provided + if (mcpConfigFile) { + logStep("MCP Server", `Configuring auto-connection to: ${mcpConfigFile}`); + + try { + const configPath = resolve(mcpConfigFile); + if (!existsSync(configPath)) { + logError(`MCP config file not found: ${configPath}`); + process.exit(1); + } + + const configContent = readFileSync(configPath, "utf-8"); + const configData = JSON.parse(configContent); + + if ( + !configData.mcpServers || + Object.keys(configData.mcpServers).length === 0 + ) { + logWarning("No MCP servers found in config file"); + } else { + // If --server flag is provided, validate it exists but don't filter config + if (mcpServerName) { + if (!configData.mcpServers[mcpServerName]) { + logError( + `Server '${mcpServerName}' not found in config file. Available servers: ${Object.keys(configData.mcpServers).join(", ")}`, + ); + process.exit(1); + } + logInfo(`Auto-connecting only to server: ${mcpServerName}`); + // Pass the server filter separately + envVars.MCP_AUTO_CONNECT_SERVER = mcpServerName; + } + + // Pass the full config (all servers will show in UI) + envVars.MCP_CONFIG_DATA = JSON.stringify(configData); + const serverCount = Object.keys(configData.mcpServers).length; + const serverNames = Object.keys(configData.mcpServers).join(", "); + logSuccess( + `MCP config loaded with ${serverCount} server(s) - showing all in UI`, + ); + logInfo(`Servers: ${serverNames}`); + if (mcpServerName) { + logInfo(`Will auto-connect only to: ${mcpServerName}`); + } else { + logInfo(`Will auto-connect to all servers`); + } + } + } catch (error) { + logError(`Failed to read MCP config file: ${error.message}`); + process.exit(1); + } + } else if (mcpServerCommand) { + // Handle single MCP server command if provided (legacy mode) + logStep( + "MCP Server", + `Configuring auto-connection to: ${mcpServerCommand} ${mcpServerArgs.join(" ")}`, + ); + + // Pass MCP server config via environment variables + envVars.MCP_SERVER_COMMAND = mcpServerCommand; + if (mcpServerArgs.length > 0) { + envVars.MCP_SERVER_ARGS = JSON.stringify(mcpServerArgs); + } + + logSuccess(`MCP server will auto-connect on startup`); + } + + // Handle Ollama setup if requested + if (ollamaModel) { + logStep("Setup", "Configuring Ollama integration"); + + const isOllamaInstalled = await checkOllamaInstalled(); + if (!isOllamaInstalled) { + logError("Ollama is not installed. Please install Ollama first:"); + logInfo( + "Visit https://ollama.ai/download to download and install Ollama", + ); + process.exit(1); + } + + logSuccess("Ollama is installed"); + + try { + await setupOllamaInSingleTerminal(ollamaModel); + + logDivider(); + logSuccess(`Ollama setup complete with model: ${ollamaModel}`); + logInfo("Ollama server is running and ready for MCP connections"); + logDivider(); + } catch (error) { + logError("Failed to setup Ollama"); + process.exit(1); + } + } + + const projectRoot = resolve(__dirname, ".."); + + // Apply parsed environment variables to process.env first + Object.assign(process.env, envVars); + + // Port configuration (fixed default to 3000) + const requestedPort = parseInt(process.env.PORT ?? "6274", 10); + let PORT; + + try { + // Check if user explicitly set a port via --port flag + const hasExplicitPort = envVars.PORT !== undefined; + + if (hasExplicitPort) { + logInfo(`Using explicitly requested port: ${requestedPort}`); + if (await isPortAvailable(requestedPort)) { + PORT = requestedPort.toString(); + logSuccess(`Port ${requestedPort} is available and ready`); + } else { + logError(`Explicitly requested port ${requestedPort} is not available`); + logInfo( + "Use a different port with --port or let the system find one automatically", + ); + throw new Error(`Port ${requestedPort} is already in use`); + } + } else { + // Fixed port policy: use default port 3000 and fail fast if unavailable + logInfo("No specific port requested, using fixed default port 6274"); + if (await isPortAvailable(requestedPort)) { + PORT = requestedPort.toString(); + logSuccess(`Default port ${requestedPort} is available`); + } else { + logError( + `Default port ${requestedPort} is already in use. Please free the port`, + ); + throw new Error(`Port ${requestedPort} is already in use`); + } + } + + // Update environment variables with the final port + envVars.PORT = PORT; + // Default: localhost in development, 127.0.0.1 in production + const defaultHost = + process.env.ENVIRONMENT === "dev" ? "localhost" : "127.0.0.1"; + const baseHost = process.env.HOST || defaultHost; + envVars.BASE_URL = `http://${baseHost}:${PORT}`; + Object.assign(process.env, envVars); + } catch (error) { + logError(`Port configuration failed: ${error.message}`); + throw error; + } + + const abort = new AbortController(); + + let cancelled = false; + process.on("SIGINT", () => { + cancelled = true; + abort.abort(); + logDivider(); + logWarning("Shutdown signal received..."); + logProgress("Stopping MCP Inspector server"); + logInfo("Cleaning up resources..."); + logSuccess("Server stopped gracefully"); + logDivider(); + }); + + try { + const distServerPath = resolve(projectRoot, "dist", "server", "index.js"); + + // Production start behavior: + // - Do NOT auto-build by default. + // - If --rebuild (or env) is passed, run a rebuild before starting. + // - If dist is missing and no rebuild requested, fail fast with guidance. + const distExists = existsSync(distServerPath); + + if (rebuildRequested) { + logStep("Build", "Rebuild requested; running production build"); + await spawnPromise("npm", ["run", "build"], { + env: process.env, + cwd: projectRoot, + signal: abort.signal, + echoOutput: false, + }); + logSuccess("Build completed successfully"); + await delay(500); + } else if (!distExists) { + logError( + `Production build not found at ${distServerPath}. Build artifacts are required to start.`, + ); + logInfo( + "Run this command with --rebuild or build in CI/CD before starting.", + ); + process.exit(1); + } else { + // Small delay to let logs flush before starting + await delay(500); + } + + // Spawn the server process but don't wait for it to exit + const serverProcess = spawn("node", [distServerPath], { + env: { + ...process.env, + NODE_ENV: "production", + PORT: PORT, + }, + cwd: projectRoot, + stdio: "inherit", + }); + + // Handle server process errors + serverProcess.on("error", (error) => { + if (!cancelled) { + logError(`Failed to start server: ${error.message}`); + process.exit(1); + } + }); + + // Handle abort signal + abort.signal.addEventListener("abort", () => { + serverProcess.kill("SIGTERM"); + }); + + // Wait a bit for the server to start up + await delay(2000); + + if (!cancelled) { + // Open the browser automatically + // Use BASE_URL if set, otherwise construct from HOST and PORT + // Default: localhost in development, 127.0.0.1 in production + const defaultHost = + process.env.ENVIRONMENT === "dev" ? "localhost" : "127.0.0.1"; + const host = process.env.HOST || defaultHost; + const url = process.env.BASE_URL || `http://${host}:${PORT}`; + + try { + await open(url); + logSuccess(`🌐 Browser opened at ${url}`); + } catch (error) { + logWarning( + `Could not open browser automatically. Please visit ${url} manually.`, + ); + } + } + + // Wait for the server process to exit + await new Promise((resolve, reject) => { + serverProcess.on("close", (code) => { + if (code === 0 || cancelled) { + resolve(code); + } else { + reject(new Error(`Server process exited with code ${code}`)); + } + }); + }); + } catch (e) { + if (!cancelled || process.env.DEBUG) { + logDivider(); + logError("Failed to start MCP Inspector"); + logError(`Error: ${e.message}`); + logDivider(); + throw e; + } + } + + return 0; +} + +main() + .then((_) => process.exit(0)) + .catch((e) => { + logError("Fatal error occurred"); + logError(e.stack || e.message); + process.exit(1); + }); diff --git a/claude-code-prompts/ELECTRON_DESIGN.md b/claude-code-prompts/ELECTRON_DESIGN.md new file mode 100644 index 000000000..02d925a55 --- /dev/null +++ b/claude-code-prompts/ELECTRON_DESIGN.md @@ -0,0 +1,304 @@ +# Design Document: Converting MCPJam Inspector to Electron App + +## Current Architecture Analysis + +**Current Setup:** + +- **Client**: React app using Vite dev server (port 8080) with Tailwind CSS, shadcn/ui components +- **Server**: Hono-based API server (port 3001) handling MCP connections and AI chat functionality +- **Build**: Client builds to `dist/client`, server builds to `dist/server` +- **Production**: Single server serves both API and static client files +- **Deployment**: NPX command (`npx @mcpjam/inspector`) for easy access + +**Key Components:** + +- MCP server connection management via WebSocket/IPC +- AI chat integration with multiple providers (OpenAI, Anthropic, Ollama) +- Resource/tool inspection interface +- Real-time logging and tracing + +## Proposed Electron Architecture + +### 1. Dual Deployment Strategy + +**Preserving NPX Usage:** + +```bash +npx @mcpjam/inspector # Current workflow remains unchanged +``` + +**Adding Electron Option:** + +- Native desktop app download and installation +- Same functionality with enhanced desktop UX +- Shared codebase for both deployment methods + +### 2. Main Process Design (Learnings from Summon) + +**Recommended Structure (Based on Electron Forge + Vite):** + +``` +mcpjam-inspector/ +├── forge.config.ts # Electron Forge configuration +├── vite.main.config.ts # Main process Vite config +├── vite.preload.config.ts # Preload Vite config +├── vite.renderer.config.mts # Renderer Vite config +├── src/ +│ ├── main.ts # Main process entry +│ ├── preload.ts # Preload script +│ ├── renderer.ts # Renderer entry +│ └── ipc/ # IPC handlers (organized by feature) +│ ├── listeners-register.ts +│ ├── mcp/ +│ ├── auth/ +│ └── window/ +├── client/ # Existing React app (minimal changes) +├── server/ # Existing Hono server (shared module) +└── package.json # Updated with Forge scripts +``` + +**Main Process Responsibilities (Inspired by Summon):** + +- Embed Hono server in main process with dynamic port allocation +- Handle single instance lock to prevent multiple app instances +- Manage MCP server lifecycle and file watchers +- Implement proper app cleanup on shutdown +- Handle OAuth protocol registration (if needed for AI providers) +- Native file dialogs and system integration + +### 3. Shared Server Module Strategy + +**Extract Hono App for Dual Usage:** + +```javascript +// server/app.ts - Shared module +export const createHonoApp = () => { + const app = new Hono(); + // ... existing routes and middleware + return app; +}; + +// bin/start.js - NPX entry (preserves current behavior) +import { createHonoApp } from "../server/app.js"; +const app = createHonoApp(); +serve({ fetch: app.fetch, port: 3001 }); + +// src/main.ts - Electron entry +import { createHonoApp } from "../server/app.js"; +const app = createHonoApp(); +const server = serve({ fetch: app.fetch, port: 0 }); // dynamic port +createWindow(`http://localhost:${server.address().port}`); +``` + +### 4. Technology Stack (Aligned with Summon) + +**Build System:** + +- **Electron Forge** instead of electron-vite for better ecosystem support +- **Separate Vite configs** for main, preload, and renderer processes +- **Auto-updater** with proper release management + +**Development Tools:** + +- **TypeScript** throughout the application +- **ESLint + Prettier** for code quality +- **Vitest** for unit testing +- **Playwright** for E2E testing + +### 5. Configuration Strategy + +**forge.config.ts:** + +```typescript +import type { ForgeConfig } from "@electron-forge/shared-types"; +import { VitePlugin } from "@electron-forge/plugin-vite"; +import { FusesPlugin } from "@electron-forge/plugin-fuses"; + +const config: ForgeConfig = { + packagerConfig: { + asar: true, + appBundleId: "com.mcpjam.inspector", + appCategoryType: "public.app-category.developer-tools", + icon: "assets/icon", // Platform-specific icons + }, + makers: [ + new MakerSquirrel({}), // Windows + new MakerZIP({}, ["darwin", "linux"]), + new MakerDMG({}), // macOS + new MakerDeb({}), // Linux + new MakerRpm({}), // Linux + ], + plugins: [ + new VitePlugin({ + build: [ + { + entry: "src/main.ts", + config: "vite.main.config.ts", + target: "main", + }, + { + entry: "src/preload.ts", + config: "vite.preload.config.ts", + target: "preload", + }, + ], + renderer: [ + { + name: "main_window", + config: "vite.renderer.config.mts", + }, + ], + }), + new FusesPlugin({ + // Security hardening + [FuseV1Options.RunAsNode]: false, + [FuseV1Options.EnableCookieEncryption]: true, + [FuseV1Options.OnlyLoadAppFromAsar]: true, + }), + ], +}; +``` + +### 6. IPC Architecture (Following Summon's Patterns) + +**Organized IPC Structure:** + +```typescript +// src/ipc/listeners-register.ts +export default function registerListeners(mainWindow: BrowserWindow) { + registerMcpListeners(mainWindow); + registerWindowListeners(mainWindow); + registerAuthListeners(mainWindow); +} + +// src/ipc/mcp/mcp-listeners.ts +export function registerMcpListeners(mainWindow: BrowserWindow) { + ipcMain.handle("mcp:connect", handleMcpConnect); + ipcMain.handle("mcp:disconnect", handleMcpDisconnect); + ipcMain.handle("mcp:list-servers", handleListServers); +} +``` + +**Preload Context Exposure:** + +```typescript +// src/preload.ts +import { contextBridge, ipcRenderer } from "electron"; + +const electronAPI = { + // MCP operations + mcp: { + connect: (config) => ipcRenderer.invoke("mcp:connect", config), + disconnect: (id) => ipcRenderer.invoke("mcp:disconnect", id), + listServers: () => ipcRenderer.invoke("mcp:list-servers"), + }, + // File operations + files: { + openDialog: () => ipcRenderer.invoke("dialog:open"), + saveDialog: (data) => ipcRenderer.invoke("dialog:save", data), + }, + // App metadata + app: { + getVersion: () => ipcRenderer.invoke("app:version"), + }, +}; + +contextBridge.exposeInMainWorld("electronAPI", electronAPI); +``` + +### 7. Migration Plan + +**Phase 1: Forge Setup & Basic Shell** + +1. Install Electron Forge and configure build system +2. Create separate Vite configs for main/preload/renderer +3. Extract shared Hono server module +4. Implement basic main process with embedded server +5. Ensure NPX command still works unchanged + +**Phase 2: Desktop Integration** + +1. Add native app menus and keyboard shortcuts +2. Implement file dialogs for MCP server configuration +3. Add proper window state management +4. Setup auto-updater infrastructure + +**Phase 3: Enhanced Features** + +1. System tray integration for background operation +2. Native notifications for server status +3. Protocol handler for `mcpjam://` URLs +4. Better error handling and crash reporting + +### 8. Dependencies Update + +**Key New Dependencies:** + +```json +{ + "devDependencies": { + "@electron-forge/cli": "^7.8.1", + "@electron-forge/maker-deb": "^7.8.1", + "@electron-forge/maker-dmg": "^7.8.1", + "@electron-forge/maker-rpm": "^7.8.1", + "@electron-forge/maker-squirrel": "^7.8.1", + "@electron-forge/maker-zip": "^7.8.1", + "@electron-forge/plugin-vite": "^7.8.1", + "@electron-forge/publisher-github": "^7.8.1", + "electron": "^35.2.1", + "vitest": "^3.2.4", + "@playwright/test": "^1.53.1" + }, + "dependencies": { + "electron-log": "^5.4.0", + "update-electron-app": "^3.1.1", + "fix-path": "^4.0.0" + } +} +``` + +**Updated Scripts:** + +```json +{ + "scripts": { + "start": "electron-forge start", + "package": "electron-forge package", + "make": "electron-forge make", + "publish": "electron-forge publish", + "dev:electron": "electron-forge start", + "dev:web": "concurrently \"npm run dev:server\" \"npm run dev:client\"", + "test": "vitest run", + "test:e2e": "playwright test" + } +} +``` + +### 9. Security & Production Considerations + +**Security Features (Following Summon's Approach):** + +- Context isolation enabled by default +- Node integration disabled in renderer +- Secure IPC patterns with proper validation +- ASAR packaging with integrity validation +- Proper fuse configuration for production hardening + +**Production Features:** + +- Auto-updater with GitHub releases +- Crash reporting and telemetry (optional) +- Code signing for macOS and Windows +- Proper app categorization for app stores + +### 10. Benefits of This Architecture + +1. **Dual Deployment**: Preserves NPX workflow while adding native app option +2. **Proven Patterns**: Leverages battle-tested approaches from Summon +3. **Minimal Migration**: Existing codebase requires minimal changes +4. **Professional Tooling**: Electron Forge provides production-ready build pipeline +5. **Native Experience**: File dialogs, app menus, system integration +6. **Maintainable**: Clear separation between web and desktop concerns +7. **Secure**: Modern Electron security practices from day one + +This architecture ensures developers can continue using `npx @mcpjam/inspector` while also offering a superior desktop experience for users who prefer native applications. diff --git a/claude-code-prompts/ELECTRON_README.md b/claude-code-prompts/ELECTRON_README.md new file mode 100644 index 000000000..7a398af93 --- /dev/null +++ b/claude-code-prompts/ELECTRON_README.md @@ -0,0 +1,200 @@ +# MCPJam Inspector - Electron Desktop App + +This document explains how to use MCPJam Inspector as a desktop Electron application. + +## Dual Deployment Options + +MCPJam Inspector now supports two deployment methods: + +### 1. NPX Command (Original Method) + +```bash +npx @mcpjam/inspector +``` + +This preserves the original workflow and functionality. + +### 2. Electron Desktop App (New) + +Download and install the native desktop application or run it in development mode. + +## Development + +### Running in Development Mode + +```bash +# Start the Electron app in development mode +npm run electron:dev + +# Or use the alias +npm run electron:start +``` + +### Building the Client and Server + +```bash +# Build both client and server (needed for production Electron app) +npm run build + +# Or build separately +npm run build:client +npm run build:server +``` + +## Production Builds + +### Package the App + +```bash +npm run electron:package +``` + +### Create Distribution Files + +```bash +npm run electron:make +``` + +This will create platform-specific installers in the `out/` directory: + +- **Windows**: `.exe` installer +- **macOS**: `.dmg` disk image +- **Linux**: `.deb` and `.rpm` packages + +### Publishing (GitHub Releases) + +```bash +npm run electron:publish +``` + +## Architecture + +### Main Process (`src/main.ts`) + +- Embeds the Hono server with dynamic port allocation +- Manages the main application window +- Handles single instance locking +- Provides native OS integration (menus, file dialogs) + +### Preload Script (`src/preload.ts`) + +- Exposes secure IPC APIs to the renderer process +- Provides access to file system operations +- Enables native dialog integration + +### Renderer Process (React App) + +- Uses the existing React application from `client/` +- Can detect Electron environment via `window.isElectron` +- Access native features through `window.electronAPI` + +### Shared Server Module (`server/app.ts`) + +- Extracted Hono application used by both NPX and Electron +- Ensures consistent API behavior across deployment methods + +## Configuration + +### Electron Forge Config (`forge.config.ts`) + +- Configures build targets and makers +- Sets up security fuses +- Defines packaging options + +### Vite Configurations + +- `vite.main.config.ts` - Main process build +- `vite.preload.config.ts` - Preload script build +- `vite.renderer.config.mts` - Renderer process build + +## Features + +### Native Integration + +- **File Dialogs**: Open/save MCP server configurations +- **App Menus**: Standard application menus with keyboard shortcuts +- **Window Management**: Minimize, maximize, close operations +- **External Links**: Automatically open in system browser + +### Security + +- Context isolation enabled +- Node integration disabled in renderer +- Secure IPC communication patterns +- ASAR packaging with integrity validation + +### Cross-Platform Support + +- Windows (Squirrel installer) +- macOS (DMG disk image) +- Linux (DEB and RPM packages) + +## Usage Examples + +### Detecting Electron Environment + +```typescript +if (window.isElectron) { + // Running in Electron + const version = await window.electronAPI?.app.getVersion(); +} else { + // Running in browser/NPX mode +} +``` + +### Using Native File Dialogs + +```typescript +// Open file dialog +const files = await window.electronAPI?.files.openDialog({ + filters: [{ name: "JSON Files", extensions: ["json"] }], +}); + +// Save file dialog +const path = await window.electronAPI?.files.saveDialog(data); +``` + +### Window Operations + +```typescript +// Minimize window +window.electronAPI?.window.minimize(); + +// Maximize/restore window +window.electronAPI?.window.maximize(); + +// Close window +window.electronAPI?.window.close(); +``` + +## Development Tips + +1. **Hot Reload**: The development mode supports hot reload for both main and renderer processes +2. **DevTools**: Automatically opens in development mode +3. **Debugging**: Use `electron-log` for main process logging +4. **Testing**: Run `npm test` for unit tests, `npm run test:e2e` for end-to-end tests + +## Troubleshooting + +### Build Issues + +- Ensure all dependencies are installed: `npm install` +- Clean build directories: `rm -rf dist/ out/` +- Rebuild native modules: `npm rebuild` + +### Runtime Issues + +- Check electron-log output for main process errors +- Use browser DevTools for renderer process debugging +- Verify port availability for embedded server + +## Migration from NPX Only + +The Electron implementation is designed to be non-breaking: + +- All existing NPX functionality is preserved +- No changes required to the React application +- Server API remains identical +- Build processes are additive, not replacements + +Users can choose their preferred deployment method without any functionality differences. diff --git a/claude-code-prompts/api_key_section_09_22.md b/claude-code-prompts/api_key_section_09_22.md new file mode 100644 index 000000000..9a21927f0 --- /dev/null +++ b/claude-code-prompts/api_key_section_09_22.md @@ -0,0 +1,11 @@ +# Issue + +MCPJam API key section in @client/src/components/setting/AccountApiKeySection.tsx is broken. When there is no API key, it still looks like there's an API key, and that you can copy it. The API key can only be seen once, but the current experience is like you can always see it. + +# How to fix it. + +1. When there is no API key, prompt the user to generate an API key. Upon API key generation, show the API key and ask the user to copy it. The API key can only be seen once. +2. If an API key already exists, prompt the user to "re-generate". Create a warning modal that the old key will be replaced by the new key, and ask for approval. +3. Make it clear that API keys can only be seen once. Do not show the "\***\*\*\*\*\***\*\*\*\*\***\*\*\*\*\***" placeholder. + +Feel free to ask follow up clarification questions. diff --git a/claude-code-prompts/client_id_implementation_08_06.md b/claude-code-prompts/client_id_implementation_08_06.md new file mode 100644 index 000000000..db6499bef --- /dev/null +++ b/claude-code-prompts/client_id_implementation_08_06.md @@ -0,0 +1,15 @@ +# Issue + +Currently, I am unable to manually configure a client ID. The Client ID is used for Model Context Protocol (MCP) Dynamic Client Registration. Currently, the Client ID is randomly generated. + +# Instructions + +Investigate how Client IDs are currently created. Figure out a way to allow the user to manually configure the client ID. Propose how to do this before implementing it. + +## References + +https://modelcontextprotocol.io/specification/draft/basic/authorization#dynamic-client-registration +client/src/lib/mcp-oauth.ts +server/routes/mcp/oauth.ts +client/src/components/connection/AddServerModal.tsx +client/src/hooks/use-app-state.ts diff --git a/claude-code-prompts/edit_server_modal_07_29.md b/claude-code-prompts/edit_server_modal_07_29.md new file mode 100644 index 000000000..758a2cef1 --- /dev/null +++ b/claude-code-prompts/edit_server_modal_07_29.md @@ -0,0 +1,17 @@ +# Objective + +Create the ability to edit a server config. Editing a server config will open the edit server modal. The edit server modal will have all the fields of the server its editing already filled out. Allow the user to edit and save the new configuration. + +## Create the EditServerModal component + +Create a component `EditServerModal` in `src/components/connection/EditServerModal.tsx`. This modal should have the same structure as `AddServerModal`, but it should be able to take in a server config and pre-fill the form. Allow the user to edit the form in the modal. + +On submit, delete the original server and connect to an MCP with the new config. + +## Create edit server entry point + +Create a button right below the "Reconnect" option in the component `ServerConnectionCard.tsx` called "edit". Clicking on the button will open up the `EditServerModal`. + +# Additional instructions + +Propose how to do this before implementing for approval diff --git a/claude-code-prompts/export_server_endpoint_09_08.md b/claude-code-prompts/export_server_endpoint_09_08.md new file mode 100644 index 000000000..88b3b7396 --- /dev/null +++ b/claude-code-prompts/export_server_endpoint_09_08.md @@ -0,0 +1,11 @@ +## Instructions + +Below is an initial proposal. Feel free to add improvements where you think there's opportunity. Propose how to do it before implementing it. Feel free to ask follow up questions. + +## Objective + +I want to create a way to export all of the MCP server's information as a JSON. I want to export all tools (tool name, description, params, etc), resources, prompts, as a JSON file. + +## How to do it + +Create an api endpoint /mcp/export/server that takes in a `serverId` and generates a JSON of all of the server's information. Have the endpoint live in /server/routes/mcp. diff --git a/claude-code-prompts/forward-tool-calls.md b/claude-code-prompts/forward-tool-calls.md new file mode 100644 index 000000000..1752d6aa3 --- /dev/null +++ b/claude-code-prompts/forward-tool-calls.md @@ -0,0 +1,14 @@ +## Problem + +I want to build a LLM chat client. I want the client to be ran locally, and the backend where the LLM agent lives is on a server seperately. + +Node.js client, Node.js server using Vercel ai-sdk in the backend. We also want to support MCP / tool calling. The MCPClient that does MCP server connections calls the tools lives on the client. + +The client should send messages to the server, along with the MCP tool schema. The agent on the backend will request a tool call when there is one, and send it back to the client. The client will execute the tool call, and send it back to the agent to continue the conversation. + +We are using Mastra MCPClient and ai-sdk on the backend + +## Corner cases + +https://ai-sdk.dev/docs/ai-sdk-core/tools-and-tool-calling +https://mastra.ai/en/reference/tools/mcp-client diff --git a/claude-code-prompts/improve_system_prompt_ui_08_12.md b/claude-code-prompts/improve_system_prompt_ui_08_12.md new file mode 100644 index 000000000..8b292363c --- /dev/null +++ b/claude-code-prompts/improve_system_prompt_ui_08_12.md @@ -0,0 +1,15 @@ +# Issue + +The system prompt editor UI currently lives above the chat input. This takes up too much space. We want to move the system prompt input next to the model selector as a button. Clicking on the button will create a popup with a system prompt text input + +# How to fix + +1. Remove the System prompt UI. System prompt editor UI lives in client/src/components/ChatTab.tsx +2. Move the system prompt next to the model selector button + 2.1 Create a new component called `SystemPromptSelector` that has system prompt logic + 2.2 Have the SystemPromptSelector component in the client/src/components/chat/chat-input.tsx component right next to `ModelSelector` + 2.3 The system prompt should have a default value "You are a helpful assistant with access to MCP tools." + +# Acceptance criteria + +User can configure system prompt by clicking on the system prompt selector button and typing in a system prompt to save it. diff --git a/claude-code-prompts/matt_e2e_testing.md b/claude-code-prompts/matt_e2e_testing.md new file mode 100644 index 000000000..2d63a8acb --- /dev/null +++ b/claude-code-prompts/matt_e2e_testing.md @@ -0,0 +1,62 @@ +# E2E testing framework for MCP servers + +## Objective + +Companies like Asana, Paypal, Sentry, are hosting MCP servers in production. These companies need to know that their servers are up and running in production, and that the server is working for their customers' workflows. + +The purpose of End to End (E2E) testing for MCP servers is to simulate customers' workflows and ensure they're returning the right results. The high level logic of an E2E test is as follows: + +1. Developer defines an E2E test + +``` +{ + servers: [ + "asana": { + "command": "npx", + "args": ["mcp-remote", "https://mcp.asana.com/sse"] + }, + ], + test_cases: [ + { + query: "What Asana workspace am I in?" + expected: "The workspace 'MCPJam' is returned" + }, + { + query: "Create a task called 'Build E2E test'", + expected: "Task must be in the MCPJam workspace. The task 'Build E2E test' is created" + } + ] +} +``` + +2. Test are ran through an Agent. The agent connects to the MCP servers, runs through the test cases (in parallel preferrably) and the tracing is outputted. +3. The trace is passed into an LLM as a judge. The judge agent will look at the trace to determine the performance and score of the E2E test. + +### Prompt discovery test + +The purpose of the prompt discovery test is to find out what prompts are breaking. We have an agent that looks at the tools of the MCP server and generates new queries. E2E tests will be ran on these new queries. If they're breaking, then we know that workflow is broken. + +Prompt discovery tests are useful for discovering new workflows to test and make sure they're working. This test essentially is an edge case finder. + +### Benchmark test + +We want MCPJam customers to create a benchmark E2E test. Our customer would create a test definition (like example in step 1) with the most popular user queries. We would periodically run these tests to catch for any regressions in the server. + +For example, the benchmark might be 70% of the tests pass. If that drops to 30%, then we know there's been a regression. + +## Product spec requirements + +### Benchmark test is in MCPJam + +- New tab in MCPJam inspector called "Benchmark E2E tests" +- User defines an E2E test in the UI. User can create an E2E for any connected server in MCPJam. +- User can run the E2E test. Results and score is shown. +- Display thinking and agent tracing in the UI. +- We'll have the base open source version, where you can run a benchmark on any server. We'll have paid cloud features where you can save your runs and see them over time. + +### Prompt discovery test + +- Requirement is that it can generate new prompts and run the E2E tests on each new prompt. +- Prompt discovery test will not be in MCPJam open source +- We'll build this privately, offer prompt discovery E2E as a service for enterprise. +- We'll manually test their MCP servers this way ourselves. diff --git a/claude-code-prompts/migration_08_03.md b/claude-code-prompts/migration_08_03.md new file mode 100644 index 000000000..68a09f832 --- /dev/null +++ b/claude-code-prompts/migration_08_03.md @@ -0,0 +1,437 @@ +# MCP Inspector Migration: Next.js to Hono + Vite + +## Executive Summary + +This document outlines the migration of MCP Inspector from Next.js to Hono + Vite architecture to achieve better performance, reduced bundle size, enhanced desktop compatibility, and simplified deployment for NPX distribution. + +## 1. Why We're Making This Change + +### Current Pain Points with Next.js + +- **Bundle Size**: Next.js adds ~15MB+ overhead for a simple MCP inspector tool +- **Desktop Limitations**: Server-side rendering creates complexity for future Electron integration +- **Development Complexity**: Full-stack framework overkill for a lightweight inspector tool +- **NPX Distribution**: Heavy dependencies impact cold-start performance + +### Benefits of Hono + Vite Migration + +- **Lightweight**: 50KB runtime vs Next.js 15MB+ overhead +- **Desktop Ready**: Client-server separation enables easier Electron migration +- **Universal Compatibility**: Hono runs on Node.js, Bun, Deno, edge runtimes +- **Better DX**: TypeScript-first with excellent tooling +- **Performance**: 3x faster cold starts, better resource utilization +- **Future-Proof**: Clear path to desktop app with Tauri/Electron + +## 2. Current Architecture Analysis + +### Next.js Structure + +``` +src/ +├── app/ +│ ├── api/ +│ │ └── mcp/ +│ │ ├── chat/route.ts # SSE streaming chat +│ │ ├── connect/route.ts # MCP server validation +│ │ ├── prompts/ # MCP prompt management +│ │ ├── resources/ # MCP resource handling +│ │ └── tools/route.ts # MCP tool discovery +│ ├── page.tsx # Main app page +│ └── layout.tsx # Root layout +├── components/ # 70+ React components +├── lib/ # Utilities and types +└── hooks/ # Custom React hooks +``` + +### Key API Endpoints + +1. **POST /api/mcp/chat** - SSE streaming chat with MCP tool integration +2. **POST /api/mcp/connect** - Server connection validation +3. **GET /api/mcp/tools** - Tool discovery and listing +4. **GET /api/mcp/resources/list** - Resource enumeration +5. **POST /api/mcp/resources/read** - Resource content retrieval +6. **GET /api/mcp/prompts/list** - Prompt template listing + +### Dependencies Analysis + +- **Core MCP**: @mastra/core, @mastra/mcp +- **AI Providers**: @ai-sdk/anthropic, @ai-sdk/openai, ollama-ai-provider +- **UI Framework**: React 19, Tailwind CSS, Radix UI +- **State Management**: Zustand, @tanstack/react-query + +## 3. Target Architecture: Hono + Vite + +### New Structure (Single Repository, Unified Package) + +``` +mcpjam-inspector/ +├── server/ +│ ├── index.ts # Hono app entry point +│ ├── routes/ +│ │ ├── mcp/ +│ │ │ ├── chat.ts # Chat SSE endpoint +│ │ │ ├── connect.ts # Connection validation +│ │ │ ├── prompts.ts # Prompt management +│ │ │ ├── resources.ts # Resource handling +│ │ │ └── tools.ts # Tool discovery +│ │ └── static.ts # Static file serving +│ ├── middleware/ +│ │ ├── cors.ts # CORS configuration +│ │ ├── logger.ts # Request logging +│ │ └── error.ts # Error handling +│ └── utils/ +│ ├── mcp-client.ts # MCP client utilities +│ └── streaming.ts # SSE utilities +├── client/ +│ ├── src/ +│ │ ├── components/ # Existing React components +│ │ ├── lib/ # Client-side utilities +│ │ ├── hooks/ # React hooks +│ │ └── main.tsx # Vite entry point +│ ├── index.html # HTML template +│ └── vite.config.ts # Vite configuration +├── shared/ +│ └── types.ts # Shared TypeScript types +├── dist/ # Built assets (client + server) +├── bin/ +│ └── start.js # Updated NPX entry point +├── package.json # Single unified package +└── tsconfig.json # Shared TypeScript config +``` + +### Repository Strategy: Single Package Distribution + +**Why NOT a monorepo:** + +- **NPX Simplicity**: Single `npx @mcpjam/inspector` command +- **Atomic Updates**: Client/server versions always synchronized +- **Reduced Complexity**: One `package.json`, unified build process +- **Smaller Bundle**: No duplicate dependencies between packages +- **User Experience**: Simple installation, no package coordination + +**Package.json Structure:** + +```json +{ + "name": "@mcpjam/inspector", + "version": "0.8.2", + "bin": { "inspector-v1": "bin/start.js" }, + "files": ["bin", "dist", "package.json"], + "scripts": { + "dev": "concurrently \"npm run dev:server\" \"npm run dev:client\"", + "build": "npm run build:client && npm run build:server", + "dev:server": "tsx watch server/index.ts", + "dev:client": "vite client/ --port 5173", + "build:client": "vite build client/ --outDir ../dist/client", + "build:server": "tsup server/index.ts --outDir dist/server" + } +} +``` + +### Technology Stack + +- **Backend**: Hono + Node.js (with Bun upgrade path) +- **Frontend**: Vite + React + TypeScript +- **Styling**: Tailwind CSS (maintained) +- **State Management**: Zustand + TanStack Query (maintained) +- **UI Components**: Radix UI (maintained) +- **Distribution**: NPX with unified binary + +### API Mapping + +| Next.js Route | Hono Route | Notes | +| ---------------------- | ----------------------- | ------------------------ | +| `/api/mcp/chat` | `POST /api/mcp/chat` | SSE streaming maintained | +| `/api/mcp/connect` | `POST /api/mcp/connect` | Direct port | +| `/api/mcp/tools` | `GET /api/mcp/tools` | Direct port | +| `/api/mcp/resources/*` | `/api/mcp/resources/*` | Direct port | +| `/api/mcp/prompts/*` | `/api/mcp/prompts/*` | Direct port | + +## 4. Migration Plan + +_Note: This migration plan is designed for flexible, personal development pace. Complete phases as time permits._ + +### Phase 1: Foundation Setup + +**Goal**: Establish new project structure and basic configuration +**Time Commitment**: ~4-6 hours across multiple sessions + +- [ ] Create new project structure with Hono + Vite +- [ ] Set up TypeScript configuration for both client/server +- [ ] Configure Vite build with HMR for development +- [ ] Create Hono app with basic routing structure +- [ ] Set up shared types between client/server + +### Phase 2: Backend Migration - Simple Endpoints + +**Goal**: Port straightforward MCP endpoints +**Time Commitment**: ~6-8 hours across multiple sessions + +- [ ] Port MCP client utilities from Next.js to Hono +- [ ] Migrate `/api/mcp/connect` endpoint (simplest first) +- [ ] Migrate `/api/mcp/tools` endpoint +- [ ] Migrate `/api/mcp/resources/*` endpoints +- [ ] Migrate `/api/mcp/prompts/*` endpoints + +### Phase 3: Chat Streaming Migration (Most Complex) + +**Goal**: Port the complex chat streaming functionality +**Time Commitment**: ~8-12 hours (spread across sessions) + +- [ ] Port complex `/api/mcp/chat` SSE streaming logic +- [ ] Implement elicitation handling in Hono +- [ ] Test tool call streaming and real-time events +- [ ] Verify MCP client lifecycle management + +### Phase 4: Frontend Integration + +**Goal**: Move React app to Vite and connect to new backend +**Time Commitment**: ~6-8 hours across multiple sessions + +- [ ] Move React components to Vite structure +- [ ] Update API client calls to new Hono endpoints +- [ ] Configure Vite dev proxy to Hono server +- [ ] Update state management and data fetching + +### Phase 5: Static Assets & Production Build + +**Goal**: Handle static files and create production builds +**Time Commitment**: ~4-6 hours across multiple sessions + +- [ ] Configure Hono static file serving +- [ ] Set up client-side routing with React Router +- [ ] Handle SPA fallback for browser history +- [ ] Optimize asset bundling and code splitting + +### Phase 6: NPX Integration & Final Polish + +**Goal**: Update distribution and ensure everything works end-to-end +**Time Commitment**: ~4-6 hours across multiple sessions + +- [ ] Update `bin/start.js` to launch unified Hono server (serves both API + static) +- [ ] Configure production builds: `dist/server.js` + `dist/client/` assets +- [ ] Update package.json with unified build scripts and file includes +- [ ] Test NPX distribution with new single-package architecture +- [ ] Add graceful startup/shutdown handling +- [ ] Comprehensive testing of all MCP functionality +- [ ] Performance benchmarking vs Next.js version + +**Key NPX Changes:** + +```javascript +// bin/start.js - simplified approach +const server = spawn("node", ["dist/server.js"], { + env: { ...process.env, NODE_ENV: "production" }, +}); +// Hono server serves both API routes AND static client files +``` + +## 5. Technical Implementation Details + +### Hono Server Setup + +```typescript +// server/index.ts +import { Hono } from "hono"; +import { cors } from "hono/cors"; +import { logger } from "hono/logger"; +import { serveStatic } from "hono/node-server/serve-static"; + +import mcpRoutes from "./routes/mcp"; + +const app = new Hono(); + +// Middleware +app.use("*", logger()); +app.use("*", cors()); + +// API Routes +app.route("/api/mcp", mcpRoutes); + +// Static file serving +app.use("/*", serveStatic({ root: "./dist/client" })); + +// SPA fallback +app.get("*", serveStatic({ path: "./dist/client/index.html" })); + +export default app; +``` + +### MCP Chat Streaming in Hono + +```typescript +// server/routes/mcp/chat.ts +import { Hono } from "hono"; +import { streamSSE } from "hono/streaming"; + +const chat = new Hono(); + +chat.post("/", async (c) => { + const { serverConfigs, model, messages } = await c.req.json(); + + return streamSSE(c, async (stream) => { + // Port existing MCP client logic + const client = createMCPClientWithMultipleConnections(serverConfigs); + const tools = await client.getTools(); + + // Stream tool calls and results + const agent = new Agent({ model, tools }); + const result = await agent.stream(messages); + + for await (const chunk of result.textStream) { + await stream.writeSSE({ + data: JSON.stringify({ type: "text", content: chunk }), + }); + } + + await stream.writeSSE({ data: "[DONE]" }); + }); +}); + +export default chat; +``` + +### Vite Configuration + +```typescript +// client/vite.config.ts +import { defineConfig } from "vite"; +import react from "@vitejs/plugin-react"; +import path from "path"; + +export default defineConfig({ + plugins: [react()], + resolve: { + alias: { + "@": path.resolve(__dirname, "./src"), + }, + }, + server: { + proxy: { + "/api": { + target: "http://localhost:3001", + changeOrigin: true, + }, + }, + }, + build: { + outDir: "dist", + sourcemap: true, + }, +}); +``` + +## 6. Risk Assessment & Mitigation + +### High Risk Areas + +1. **SSE Streaming Complexity** + - _Risk_: Chat streaming logic is complex with tool calls and elicitation + - _Mitigation_: Port incrementally, extensive testing, fallback mechanisms + +2. **MCP Client Lifecycle** + - _Risk_: Connection management and cleanup in Hono vs Next.js + - _Mitigation_: Create abstraction layer, monitor connection leaks + +3. **State Synchronization** + - _Risk_: Client-server state management with separate processes + - _Mitigation_: Keep existing React state management, update API layer only + +### Medium Risk Areas + +1. **Static Asset Loading** + - _Risk_: Different asset handling between Next.js and Vite + - _Mitigation_: Test thoroughly, use Vite asset optimization + +2. **NPX Distribution Changes** + - _Risk_: Breaking changes to user installation flow + - _Mitigation_: Maintain backward compatibility, clear migration docs + +### Low Risk Areas + +1. **UI Component Migration** - Direct port with minimal changes +2. **TypeScript Configuration** - Similar setup between frameworks +3. **Development Experience** - Improved with Vite HMR + +## 7. Success Metrics + +### Performance Targets + +- **Bundle Size**: < 5MB total (vs current ~20MB) +- **Cold Start**: < 2s (vs current ~5s) +- **Memory Usage**: < 100MB (vs current ~200MB) +- **Build Time**: < 30s (vs current ~60s) + +### Functional Requirements + +- [ ] All existing MCP functionality preserved +- [ ] Chat streaming performance maintained +- [ ] Tool call execution works identically +- [ ] Server connection management stable +- [ ] NPX installation seamless + +### Quality Gates + +- [ ] Zero regression in MCP protocol compatibility +- [ ] TypeScript coverage maintained (>90%) +- [ ] All existing tests passing +- [ ] Performance benchmarks met +- [ ] Security audit passed + +## 8. Future Considerations + +### Desktop App Migration Path + +With the new client-server separation: + +1. **Tauri Integration**: Replace Hono server with Tauri backend +2. **Electron Option**: Package as Electron app with embedded server +3. **Native Features**: File system access, native notifications + +### Runtime Flexibility + +- **Bun Migration**: Easy upgrade path from Node.js to Bun +- **Edge Deployment**: Hono supports Cloudflare Workers, Vercel Edge +- **Container Deployment**: Simplified Docker packaging + +### Extensibility + +- **Plugin System**: Hono middleware for custom MCP extensions +- **Theme Engine**: Easier customization with Vite builds +- **Multi-Protocol**: Foundation for other protocol inspectors + +## 9. Flexible Implementation Approach + +### Total Time Commitment + +**~32-46 hours** spread across 6 phases at your own pace + +### Phase Priority Recommendations + +1. **Start with Phase 1 & 2** - Gets you a working backend quickly +2. **Phase 3 is the hardest** - Tackle when you have longer uninterrupted time +3. **Phase 4-6 are incremental** - Can be done in smaller chunks + +### Suggested Session Breakdown + +- **Short sessions (1-2 hours)**: Foundation work, simple endpoint ports, config +- **Medium sessions (2-4 hours)**: Frontend integration, static assets +- **Long sessions (4+ hours)**: Chat streaming migration (Phase 3) + +### Milestone Checkpoints + +- **After Phase 2**: Working API server with basic MCP functionality +- **After Phase 4**: Full-stack app running in development +- **After Phase 6**: Production-ready NPX distribution + +## 10. Conclusion + +The migration from Next.js to Hono + Vite will deliver: + +- **75% reduction** in bundle size +- **60% improvement** in cold-start performance +- **Clear path** to desktop application +- **Enhanced developer experience** with better tooling +- **Future-proof architecture** with runtime flexibility + +This migration positions MCP Inspector as a lightweight, performant tool while maintaining all existing functionality and providing a foundation for future enhancements. diff --git a/client/CLAUDE.md b/client/CLAUDE.md new file mode 100644 index 000000000..a418de22c --- /dev/null +++ b/client/CLAUDE.md @@ -0,0 +1,245 @@ +# MCPJam Inspector Frontend Guidelines + +## Architecture Overview + +The frontend is built with modern web technologies: + +- Vite + React for fast development and optimized builds +- Tailwind CSS with Radix UI for consistent, accessible components +- Zustand for lightweight, flexible state management +- AI SDK integrations for LLM support + +## Core Features + +1. **LLM Playground** + - OpenAI models integration + - Model selection interface + - Temperature/top-p controls + - System message configuration + - Chat history management + - Anthropic Claude support + - Claude 2/3 model options + - Context window management + - Response streaming UI + - Error state handling + - DeepSeek AI integration + - DeepSeek R1 Support + - Coding-optimized models + - Reasoning capabilities + - Context management + - Ollama model compatibility + - Local model configuration + - Model download interface + - Parameter tuning + - Resource monitoring + - Real-time chat interface + - Message threading + - Code highlighting + - Markdown rendering + - File attachments + +2. **MCP Server Testing** + - Multiple server connections + - Connection manager UI + - Server health monitoring + - Configuration persistence + - Quick switch interface + - Configuration management + - Transport protocol selection + - Authentication setup + - Rate limit configuration + - Timeout settings + - Transport protocol selection + - STDIO connection UI + - SSE stream monitoring + - HTTP request builder + - WebSocket integration + - Real-time validation + - Schema validation UI + - Error highlighting + - Fix suggestions + - Test case management + +3. **Developer Tools** + - Request/response inspector + - JSON tree viewer + - Headers examination + - Timing analysis + - Search/filter tools + - Debug console integration + - Log level controls + - Filter configuration + - Stack trace viewer + - Console commands + - Performance monitoring + - Response time graphs + - Memory usage charts + - Network analysis + - Bottleneck detection + +## Project Structure + +``` +/client + /src + /app # Next.js App Router + /chat # LLM playground pages + /servers # Server management + /settings # Configuration pages + /components # React components + /chat # Chat interface components + /servers # Server components + /shared # Common UI components + /hooks # Custom React hooks + /llm # LLM integration hooks + /mcp # MCP protocol hooks + /transport # Transport layer hooks + /lib # Utility functions + /api # API client functions + /validation # Schema validators + /transform # Data transformers + /stores # Zustand state + /chat # Chat state management + /servers # Server configurations + /settings # App settings + /styles # Tailwind themes + /public # Static assets +``` + +## Component Guidelines + +1. **React Components** + - Use functional components + - React.FC typing + - Props interface definitions + - Children prop handling + - Event handler types + - Implement TypeScript types + - Strict prop types + - Event types + - State interfaces + - Utility types + - Follow React 19 patterns + - Use hooks pattern + - Suspense boundaries + - Error boundaries + - Concurrent features + - Maintain component isolation + - Props drilling prevention + - Context usage + - Component composition + - Render optimization + +2. **State Management** + - Zustand for global state + - Store creation + - Action definitions + - Selector optimization + - Middleware usage + - React hooks for local state + - useState patterns + - useReducer implementation + - Custom hook creation + - Effect cleanup + - MCP state synchronization + - Connection state + - Request tracking + - Response handling + - Error management + - AI model state handling + - Model selection state + - Generation parameters + - Stream management + - History persistence + +3. **UI/UX Design** + - Radix UI components + - Dialog implementation + - Dropdown menus + - Form controls + - Tooltips + - Custom Tailwind themes + - Color schemes + - Typography system + - Spacing scale + - Animation classes + - Responsive layouts + - Breakpoint system + - Grid layouts + - Flex containers + - Container queries + - Accessibility compliance + - ARIA attributes + - Keyboard navigation + - Focus management + - Screen reader support + +## LLM Integration + +1. **Model Support** + - OpenAI API integration + - API client setup + - Model configuration + - Response handling + - Error recovery + - Claude API implementation + - Authentication flow + - Request formatting + - Stream processing + - Rate limiting + - Ollama local models + - Local setup + - Model management + - Inference options + - Resource control + - Response streaming + - Token processing + - UI updates + - Cancel handling + - Error states + +2. **Chat Interface** + - Real-time messaging + - Message components + - Input handling + - Stream rendering + - History management + - Code highlighting + - Syntax detection + - Theme support + - Copy functionality + - Line numbers + - Message threading + - Thread components + - Collapse/expand + - Navigation + - Search + - Context management + - Window size tracking + - Token counting + - Context pruning + - State persistence + +## MCP Testing Features + +1. **Server Management** + - Multiple connections + - Connection list UI + - Status indicators + - Quick actions + - Group management + - Transport selection + - Protocol options + - Configuration forms + - Validation rules + - Default presets + - Authentication setup + - OAuth configuration + - Token management + - Scope selection + - Refresh handling + - Configuration persistence + - Local storage + - Export/import + - Sync options + - Backup/restore diff --git a/client/README.md b/client/README.md new file mode 100644 index 000000000..69c8b135c --- /dev/null +++ b/client/README.md @@ -0,0 +1,3 @@ +## Convex HTTP Actions + +Set `VITE_CONVEX_SITE_URL` to your Convex site URL (e.g. `https://happy-animal-123.convex.site`). diff --git a/client/index.html b/client/index.html new file mode 100644 index 000000000..945b709ce --- /dev/null +++ b/client/index.html @@ -0,0 +1,13 @@ + + + + + + + MCPJam Inspector + + +
+ + + diff --git a/client/public/apps_sdk_pizza.png b/client/public/apps_sdk_pizza.png new file mode 100644 index 000000000..66ac1bd2f Binary files /dev/null and b/client/public/apps_sdk_pizza.png differ diff --git a/client/public/bedrock_logo.png b/client/public/bedrock_logo.png new file mode 100644 index 000000000..b1326b8c6 Binary files /dev/null and b/client/public/bedrock_logo.png differ diff --git a/client/public/catalyst.png b/client/public/catalyst.png new file mode 100644 index 000000000..b32348ae3 Binary files /dev/null and b/client/public/catalyst.png differ diff --git a/client/public/claude_logo.png b/client/public/claude_logo.png new file mode 100644 index 000000000..931464775 Binary files /dev/null and b/client/public/claude_logo.png differ diff --git a/client/public/deepseek_logo.svg b/client/public/deepseek_logo.svg new file mode 100644 index 000000000..3fc230240 --- /dev/null +++ b/client/public/deepseek_logo.svg @@ -0,0 +1 @@ +DeepSeek \ No newline at end of file diff --git a/client/public/demo_1.png b/client/public/demo_1.png new file mode 100644 index 000000000..4cd3cf48c Binary files /dev/null and b/client/public/demo_1.png differ diff --git a/client/public/demo_2.png b/client/public/demo_2.png new file mode 100644 index 000000000..ad8b0f944 Binary files /dev/null and b/client/public/demo_2.png differ diff --git a/client/public/demo_3.png b/client/public/demo_3.png new file mode 100644 index 000000000..7346a13f2 Binary files /dev/null and b/client/public/demo_3.png differ diff --git a/client/public/demo_4.png b/client/public/demo_4.png new file mode 100644 index 000000000..3913e2021 Binary files /dev/null and b/client/public/demo_4.png differ diff --git a/client/public/file.svg b/client/public/file.svg new file mode 100644 index 000000000..004145cdd --- /dev/null +++ b/client/public/file.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/client/public/globe.svg b/client/public/globe.svg new file mode 100644 index 000000000..567f17b0d --- /dev/null +++ b/client/public/globe.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/client/public/google_logo.png b/client/public/google_logo.png new file mode 100644 index 000000000..c8743166e Binary files /dev/null and b/client/public/google_logo.png differ diff --git a/client/public/grok_dark.png b/client/public/grok_dark.png new file mode 100644 index 000000000..c97e78655 Binary files /dev/null and b/client/public/grok_dark.png differ diff --git a/client/public/grok_light.svg b/client/public/grok_light.svg new file mode 100644 index 000000000..d81f3fa7d --- /dev/null +++ b/client/public/grok_light.svg @@ -0,0 +1,29 @@ + + + + + + + + diff --git a/client/public/litellm_logo.png b/client/public/litellm_logo.png new file mode 100644 index 000000000..b0e01c9ea Binary files /dev/null and b/client/public/litellm_logo.png differ diff --git a/client/public/mcp.svg b/client/public/mcp.svg new file mode 100644 index 000000000..5cd83a8bf --- /dev/null +++ b/client/public/mcp.svg @@ -0,0 +1 @@ +ModelContextProtocol \ No newline at end of file diff --git a/client/public/mcp_jam.svg b/client/public/mcp_jam.svg new file mode 100644 index 000000000..8ae6e7770 --- /dev/null +++ b/client/public/mcp_jam.svg @@ -0,0 +1,12 @@ + + + + + + + + + + + + diff --git a/client/public/mcp_jam_dark.png b/client/public/mcp_jam_dark.png new file mode 100644 index 000000000..a3decf590 Binary files /dev/null and b/client/public/mcp_jam_dark.png differ diff --git a/client/public/mcp_jam_light.png b/client/public/mcp_jam_light.png new file mode 100644 index 000000000..a06eccdb2 Binary files /dev/null and b/client/public/mcp_jam_light.png differ diff --git a/client/public/meta_logo.svg b/client/public/meta_logo.svg new file mode 100644 index 000000000..2c8cc4872 --- /dev/null +++ b/client/public/meta_logo.svg @@ -0,0 +1,19 @@ + +Logo of Meta Platforms -- Graphic created by Detmar Owen + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/client/public/microsoft_sponsor.jpeg b/client/public/microsoft_sponsor.jpeg new file mode 100644 index 000000000..b937f1da1 Binary files /dev/null and b/client/public/microsoft_sponsor.jpeg differ diff --git a/client/public/mistral_logo.png b/client/public/mistral_logo.png new file mode 100644 index 000000000..b630ff80a Binary files /dev/null and b/client/public/mistral_logo.png differ diff --git a/client/public/moonshot_dark.png b/client/public/moonshot_dark.png new file mode 100644 index 000000000..ae3e05ae4 Binary files /dev/null and b/client/public/moonshot_dark.png differ diff --git a/client/public/moonshot_light.png b/client/public/moonshot_light.png new file mode 100644 index 000000000..01c7d5694 Binary files /dev/null and b/client/public/moonshot_light.png differ diff --git a/client/public/next.svg b/client/public/next.svg new file mode 100644 index 000000000..5174b28c5 --- /dev/null +++ b/client/public/next.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/client/public/ollama_dark.png b/client/public/ollama_dark.png new file mode 100644 index 000000000..1f7a4ddd4 Binary files /dev/null and b/client/public/ollama_dark.png differ diff --git a/client/public/ollama_logo.svg b/client/public/ollama_logo.svg new file mode 100644 index 000000000..d7780867b --- /dev/null +++ b/client/public/ollama_logo.svg @@ -0,0 +1,7 @@ + + + + + + + \ No newline at end of file diff --git a/client/public/openai_logo.png b/client/public/openai_logo.png new file mode 100644 index 000000000..ca0ef35c0 Binary files /dev/null and b/client/public/openai_logo.png differ diff --git a/client/public/openrouter_logo.png b/client/public/openrouter_logo.png new file mode 100644 index 000000000..47963265d Binary files /dev/null and b/client/public/openrouter_logo.png differ diff --git a/client/public/vercel.svg b/client/public/vercel.svg new file mode 100644 index 000000000..770539603 --- /dev/null +++ b/client/public/vercel.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/client/public/window.svg b/client/public/window.svg new file mode 100644 index 000000000..b2b2a44f6 --- /dev/null +++ b/client/public/window.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/client/public/z-ai.png b/client/public/z-ai.png new file mode 100644 index 000000000..1d614c959 Binary files /dev/null and b/client/public/z-ai.png differ diff --git a/client/src/App.tsx b/client/src/App.tsx new file mode 100644 index 000000000..96385ae23 --- /dev/null +++ b/client/src/App.tsx @@ -0,0 +1,256 @@ +import { useEffect, useMemo, useState } from "react"; +import { ServersTab } from "./components/ServersTab"; +import { ToolsTab } from "./components/ToolsTab"; +import { ResourcesTab } from "./components/ResourcesTab"; +import { PromptsTab } from "./components/PromptsTab"; +import { ChatTab } from "./components/ChatTab"; +import { ChatTabV2 } from "./components/ChatTabV2"; +import { EvalsResultsTab } from "./components/EvalsResultsTab"; +import { EvalsRunTab } from "./components/EvalsRunTab"; +import { SettingsTab } from "./components/SettingsTab"; +import { TracingTab } from "./components/TracingTab"; +import { InterceptorTab } from "./components/InterceptorTab"; +import { AuthTab } from "./components/AuthTab"; +import { OAuthFlowTab } from "./components/OAuthFlowTab"; +import OAuthDebugCallback from "./components/OAuthDebugCallback"; +import { MCPSidebar } from "./components/mcp-sidebar"; +import { ActiveServerSelector } from "./components/ActiveServerSelector"; +import { + SidebarInset, + SidebarProvider, + SidebarTrigger, +} from "./components/ui/sidebar"; +import { useAppState } from "./hooks/use-app-state"; +import { PreferencesStoreProvider } from "./stores/preferences/preferences-provider"; +import { Toaster } from "./components/ui/sonner"; +import { useElectronOAuth } from "./hooks/useElectronOAuth"; +import { useEnsureDbUser } from "./hooks/useEnsureDbUser"; +import { usePostHog } from "posthog-js/react"; +import { usePostHogIdentify } from "./hooks/usePostHogIdentify"; + +// Import global styles +import "./index.css"; +import { AuthUpperArea } from "./components/auth/auth-upper-area"; +import { detectEnvironment, detectPlatform } from "./logs/PosthogUtils"; +import CompletingSignInLoading from "./components/CompletingSignInLoading"; +import LoadingScreen from "./components/LoadingScreen"; +import LoginPage from "./components/LoginPage"; +import { useLoginPage } from "./hooks/use-log-in-page"; +import { Header } from "./components/Header"; + +export default function App() { + const [activeTab, setActiveTab] = useState("servers"); + const posthog = usePostHog(); + const { shouldShowLoginPage, isAuthenticated, isAuthLoading } = + useLoginPage(); + + usePostHogIdentify(); + + useEffect(() => { + if (isAuthLoading) return; + posthog.capture("app_launched", { + platform: detectPlatform(), + environment: detectEnvironment(), + user_agent: navigator.userAgent, + is_authenticated: isAuthenticated, + }); + }, [isAuthLoading, isAuthenticated]); + + // Set up Electron OAuth callback handling + useElectronOAuth(); + // Ensure a `users` row exists after Convex auth + useEnsureDbUser(); + + const isDebugCallback = useMemo( + () => window.location.pathname.startsWith("/oauth/callback/debug"), + [], + ); + const isOAuthCallback = useMemo( + () => window.location.pathname === "/callback", + [], + ); + const isOAuthCallbackComplete = useMemo( + () => window.location.pathname.startsWith("/oauth/callback"), + [], + ); + + const { + appState, + isLoading, + connectedServerConfigs, + selectedMCPConfig, + handleConnect, + handleDisconnect, + handleReconnect, + handleUpdate, + handleRemoveServer, + setSelectedServer, + toggleServerSelection, + selectedMCPConfigsMap, + setSelectedMultipleServersToAllServers, + } = useAppState(); + // Sync tab with hash on mount and when hash changes + useEffect(() => { + const applyHash = () => { + const hash = (window.location.hash || "#servers").replace("#", ""); + setActiveTab(hash); + if (hash === "chat" || hash === "chat-v2") { + setSelectedMultipleServersToAllServers(); + } + }; + applyHash(); + window.addEventListener("hashchange", applyHash); + return () => window.removeEventListener("hashchange", applyHash); + }, [setSelectedMultipleServersToAllServers]); + + const handleNavigate = (section: string) => { + if (section === "chat" || section === "chat-v2") { + setSelectedMultipleServersToAllServers(); + } + window.location.hash = section; + setActiveTab(section); + }; + + if (isDebugCallback) { + return ; + } + + if (isOAuthCallback) { + // Handle the actual OAuth callback - AuthKit will process this automatically + // Show a loading screen while the OAuth flow completes + useEffect(() => { + // Fallback: redirect to home after 5 seconds if still stuck + const timeout = setTimeout(() => { + window.location.href = "/"; + }, 5000); + + return () => clearTimeout(timeout); + }, []); + + return ; + } + + if (isLoading) { + return ; + } + + const appContent = ( + + + +
+
+ {/* Active Server Selector - Only show on Tools, Resources, Prompts, Auth, OAuth Flow, and Interceptor pages */} + {(activeTab === "tools" || + activeTab === "resources" || + activeTab === "prompts" || + activeTab === "auth" || + activeTab === "oauth-flow" || + activeTab === "chat" || + activeTab === "chat-v2" || + activeTab === "interceptor") && ( + + )} + + {/* Content Areas */} + {activeTab === "servers" && ( + + )} + + {activeTab === "tools" && ( + + )} + {activeTab === "evals" && } + {activeTab === "eval-results" && } + {activeTab === "resources" && ( + + )} + + {activeTab === "prompts" && ( + + )} + + {activeTab === "auth" && ( + + )} + + {activeTab === "oauth-flow" && ( + + )} + + {activeTab === "chat" && ( + + )} + + {activeTab === "chat-v2" && ( + + )} + + {activeTab === "interceptor" && ( + + )} + + {activeTab === "tracing" && } + + {activeTab === "settings" && } +
+ + + ); + + return ( + + + {shouldShowLoginPage && !isOAuthCallbackComplete ? ( + + ) : ( + appContent + )} + + ); +} diff --git a/client/src/components/ActiveServerSelector.tsx b/client/src/components/ActiveServerSelector.tsx new file mode 100644 index 000000000..889ad9629 --- /dev/null +++ b/client/src/components/ActiveServerSelector.tsx @@ -0,0 +1,178 @@ +import { useState } from "react"; +import { ServerWithName } from "@/hooks/use-app-state"; +import { cn } from "@/lib/utils"; +import { AddServerModal } from "./connection/AddServerModal"; +import { ServerFormData } from "@/shared/types.js"; +import { Check } from "lucide-react"; +import { usePostHog } from "posthog-js/react"; +import { detectEnvironment, detectPlatform } from "@/logs/PosthogUtils"; +import { hasOAuthConfig } from "@/lib/mcp-oauth"; +interface ActiveServerSelectorProps { + connectedServerConfigs: Record; + selectedServer: string; + selectedMultipleServers: string[]; + isMultiSelectEnabled: boolean; + onServerChange: (server: string) => void; + onMultiServerToggle: (server: string) => void; + onConnect: (formData: ServerFormData) => void; + showOnlyOAuthServers?: boolean; // Only show servers that use OAuth +} + +function getStatusColor(status: string): string { + switch (status) { + case "connected": + return "bg-green-500 dark:bg-green-400"; + case "connecting": + return "bg-yellow-500 dark:bg-yellow-400 animate-pulse"; + case "failed": + return "bg-red-500 dark:bg-red-400"; + case "disconnected": + return "bg-muted-foreground"; + default: + return "bg-muted-foreground"; + } +} + +function getStatusText(status: string): string { + switch (status) { + case "connected": + return "Connected"; + case "connecting": + return "Connecting..."; + case "failed": + return "Failed"; + case "disconnected": + return "Disconnected"; + default: + return "Unknown"; + } +} + +export function ActiveServerSelector({ + connectedServerConfigs, + selectedServer, + selectedMultipleServers, + isMultiSelectEnabled, + onServerChange, + onMultiServerToggle, + onConnect, + showOnlyOAuthServers = false, +}: ActiveServerSelectorProps) { + const [isAddModalOpen, setIsAddModalOpen] = useState(false); + const posthog = usePostHog(); + + // Helper function to check if a server uses OAuth + const isOAuthServer = (server: ServerWithName): boolean => { + const isHttpServer = "url" in server.config; + if (!isHttpServer) return false; + + // Check if server has OAuth tokens, OAuth config in localStorage, or is in oauth-flow state + return !!( + server.oauthTokens || + hasOAuthConfig(server.name) || + server.connectionStatus === "oauth-flow" + ); + }; + + const servers = Object.entries(connectedServerConfigs).filter( + ([, server]) => { + if (server.enabled === false) return false; + + // If we only want OAuth servers, filter for those + if (showOnlyOAuthServers && !isOAuthServer(server)) return false; + + // For non-OAuth filtering, only show connected servers + if (!showOnlyOAuthServers && server.connectionStatus !== "connected") + return false; + + return true; + }, + ); + + return ( +
+
+ {servers.map(([name, serverConfig]) => { + const isSelected = isMultiSelectEnabled + ? selectedMultipleServers.includes(name) + : selectedServer === name; + + return ( + + ); + })} + + {/* Add Server Button */} + +
+ + setIsAddModalOpen(false)} + onSubmit={(formData) => { + posthog.capture("connecting_server", { + location: "active_server_selector", + platform: detectPlatform(), + environment: detectEnvironment(), + }); + onConnect(formData); + }} + /> +
+ ); +} diff --git a/client/src/components/AuthTab.tsx b/client/src/components/AuthTab.tsx new file mode 100644 index 000000000..a111a3759 --- /dev/null +++ b/client/src/components/AuthTab.tsx @@ -0,0 +1,668 @@ +import { useState, useCallback, useEffect, useMemo } from "react"; +import { Button } from "@/components/ui/button"; +import { AlertCircle, RefreshCw, Shield } from "lucide-react"; +import { EmptyState } from "./ui/empty-state"; +import { + AuthSettings, + DEFAULT_AUTH_SETTINGS, + StatusMessage, +} from "@/shared/types.js"; +import { Card, CardContent } from "./ui/card"; +import { + initiateOAuth, + refreshOAuthTokens, + getStoredTokens, + clearOAuthData, + MCPOAuthOptions, +} from "../lib/mcp-oauth"; +import { DebugMCPOAuthClientProvider } from "../lib/debug-oauth-provider"; +import { ServerWithName } from "../hooks/use-app-state"; +import { + OAuthFlowState, + EMPTY_OAUTH_FLOW_STATE, +} from "../lib/oauth-flow-types"; +import { OAuthFlowProgressSimple } from "./OAuthFlowProgressSimple"; +import { OAuthStateMachine } from "../lib/oauth-state-machine"; +import { MCPServerConfig } from "@/sdk"; + +interface StatusMessageProps { + message: StatusMessage; +} + +const StatusMessageComponent = ({ message }: StatusMessageProps) => { + let bgColor: string; + let textColor: string; + let borderColor: string; + + switch (message.type) { + case "error": + bgColor = "bg-red-50 dark:bg-red-950/50"; + textColor = "text-red-700 dark:text-red-400"; + borderColor = "border-red-200 dark:border-red-800"; + break; + case "success": + bgColor = "bg-green-50 dark:bg-green-950/50"; + textColor = "text-green-700 dark:text-green-400"; + borderColor = "border-green-200 dark:border-green-800"; + break; + case "info": + default: + bgColor = "bg-blue-50 dark:bg-blue-950/50"; + textColor = "text-blue-700 dark:text-blue-400"; + borderColor = "border-blue-200 dark:border-blue-800"; + break; + } + + return ( +
+
+ +

{message.message}

+
+
+ ); +}; + +interface AuthTabProps { + serverConfig?: MCPServerConfig; + serverEntry?: ServerWithName; + serverName?: string; +} + +export const AuthTab = ({ + serverConfig, + serverEntry, + serverName, +}: AuthTabProps) => { + const [authSettings, setAuthSettings] = useState( + DEFAULT_AUTH_SETTINGS, + ); + const [oauthFlowState, setOAuthFlowState] = useState( + EMPTY_OAUTH_FLOW_STATE, + ); + const [showGuidedFlow, setShowGuidedFlow] = useState(false); + + const updateAuthSettings = useCallback((updates: Partial) => { + setAuthSettings((prev) => ({ ...prev, ...updates })); + }, []); + + const updateOAuthFlowState = useCallback( + (updates: Partial) => { + setOAuthFlowState((prev) => ({ ...prev, ...updates })); + }, + [], + ); + + const resetOAuthFlow = useCallback(() => { + // Reset the guided flow state + setShowGuidedFlow(false); + updateOAuthFlowState(EMPTY_OAUTH_FLOW_STATE); + + // Clear any debug OAuth artifacts to avoid stale client info/scope + if (authSettings.serverUrl) { + try { + const provider = new DebugMCPOAuthClientProvider( + authSettings.serverUrl, + ); + provider.clear(); + } catch (e) { + console.warn("Failed to clear debug OAuth provider state:", e); + } + } + }, [authSettings.serverUrl, updateOAuthFlowState]); + + // Update auth settings when server config changes + useEffect(() => { + if (serverConfig && serverConfig.url && serverName) { + const serverUrl = serverConfig.url.toString(); + + // Check for existing tokens using the real OAuth system + const existingTokens = getStoredTokens(serverName); + + updateAuthSettings({ + serverUrl, + tokens: existingTokens, + error: null, + statusMessage: null, + }); + } else { + updateAuthSettings(DEFAULT_AUTH_SETTINGS); + } + }, [serverConfig, serverName, updateAuthSettings]); + + // Reset OAuth flow when component mounts or server changes + useEffect(() => { + // Reset the guided flow state when switching tabs or servers + resetOAuthFlow(); + }, [serverName, resetOAuthFlow]); + + const handleQuickRefresh = useCallback(async () => { + if (!serverConfig || !authSettings.serverUrl || !serverName) { + updateAuthSettings({ + statusMessage: { + type: "error", + message: "Please select a server before refreshing tokens", + }, + }); + return; + } + + updateAuthSettings({ + isAuthenticating: true, + error: null, + statusMessage: null, + }); + + try { + let result; + + if (authSettings.tokens) { + // If tokens exist, try to refresh them + result = await refreshOAuthTokens(serverName); + } else { + // If no tokens exist, initiate new OAuth flow + const oauthOptions: MCPOAuthOptions = { + serverName: serverName, + serverUrl: authSettings.serverUrl, + }; + result = await initiateOAuth(oauthOptions); + } + + if (result.success) { + // Check for updated tokens + const updatedTokens = getStoredTokens(serverName); + + updateAuthSettings({ + tokens: updatedTokens, + isAuthenticating: false, + statusMessage: { + type: "success", + message: authSettings.tokens + ? "Tokens refreshed successfully!" + : result.serverConfig + ? "OAuth authentication completed!" + : "OAuth flow initiated. You will be redirected to authorize access.", + }, + }); + + // If redirect is needed, the browser will redirect automatically + // Clear success message after 3 seconds + setTimeout(() => { + updateAuthSettings({ statusMessage: null }); + }, 3000); + } else { + updateAuthSettings({ + isAuthenticating: false, + error: result.error || "OAuth operation failed", + statusMessage: { + type: "error", + message: `Failed: ${result.error || "OAuth operation failed"}`, + }, + }); + } + } catch (error) { + updateAuthSettings({ + isAuthenticating: false, + error: error instanceof Error ? error.message : String(error), + statusMessage: { + type: "error", + message: `Failed: ${error instanceof Error ? error.message : String(error)}`, + }, + }); + } + }, [ + serverConfig, + authSettings.serverUrl, + authSettings.tokens, + serverName, + updateAuthSettings, + ]); + + const handleNewOAuth = useCallback(async () => { + if (!serverConfig || !authSettings.serverUrl || !serverName) { + updateAuthSettings({ + statusMessage: { + type: "error", + message: "Please select a server before starting OAuth", + }, + }); + return; + } + + updateAuthSettings({ + isAuthenticating: true, + error: null, + statusMessage: null, + }); + + try { + // Clear existing tokens first to force a fresh OAuth flow + clearOAuthData(serverName); + + // Always initiate new OAuth flow (fresh start) + const oauthOptions: MCPOAuthOptions = { + serverName: serverName, + serverUrl: authSettings.serverUrl, + }; + const result = await initiateOAuth(oauthOptions); + + if (result.success) { + // Check for updated tokens + const updatedTokens = getStoredTokens(serverName); + + updateAuthSettings({ + tokens: updatedTokens, + isAuthenticating: false, + statusMessage: { + type: "success", + message: result.serverConfig + ? "OAuth authentication completed!" + : "OAuth flow initiated. You will be redirected to authorize access.", + }, + }); + + // Clear success message after 3 seconds + setTimeout(() => { + updateAuthSettings({ statusMessage: null }); + }, 3000); + } else { + updateAuthSettings({ + isAuthenticating: false, + error: result.error || "OAuth authentication failed", + statusMessage: { + type: "error", + message: `Failed: ${result.error || "OAuth authentication failed"}`, + }, + }); + } + } catch (error) { + updateAuthSettings({ + isAuthenticating: false, + error: error instanceof Error ? error.message : String(error), + statusMessage: { + type: "error", + message: `Failed: ${error instanceof Error ? error.message : String(error)}`, + }, + }); + } + }, [serverConfig, authSettings.serverUrl, serverName, updateAuthSettings]); + + // Initialize OAuth state machine + const oauthStateMachine = useMemo(() => { + if (!serverConfig || !serverName || !authSettings.serverUrl) return null; + + const provider = new DebugMCPOAuthClientProvider(authSettings.serverUrl); + return new OAuthStateMachine({ + state: oauthFlowState, + serverUrl: authSettings.serverUrl, + serverName, + provider, + updateState: updateOAuthFlowState, + }); + }, [ + serverConfig, + serverName, + authSettings.serverUrl, + oauthFlowState, + updateOAuthFlowState, + ]); + + const startGuidedFlow = useCallback(() => { + // First reset any existing flow state + resetOAuthFlow(); + + // Then start the new guided flow + setShowGuidedFlow(true); + updateOAuthFlowState(EMPTY_OAUTH_FLOW_STATE); + if (oauthStateMachine) { + oauthStateMachine.proceedToNextStep(); + } + }, [oauthStateMachine, updateOAuthFlowState, resetOAuthFlow]); + + const proceedToNextStep = useCallback(async () => { + if (oauthStateMachine) { + await oauthStateMachine.proceedToNextStep(); + } + }, [oauthStateMachine]); + + const exitGuidedFlow = useCallback(() => { + setShowGuidedFlow(false); + updateOAuthFlowState(EMPTY_OAUTH_FLOW_STATE); + // Refresh tokens after guided flow completion + if (serverName) { + const updatedTokens = getStoredTokens(serverName); + updateAuthSettings({ tokens: updatedTokens }); + } + }, [serverName, updateAuthSettings, updateOAuthFlowState]); + + const handleClearTokens = useCallback(() => { + if (serverConfig && authSettings.serverUrl && serverName) { + // Use the real OAuth system to clear tokens + clearOAuthData(serverName); + + updateAuthSettings({ + tokens: null, + error: null, + statusMessage: { + type: "success", + message: "OAuth tokens cleared successfully", + }, + }); + + // Clear success message after 3 seconds + setTimeout(() => { + updateAuthSettings({ statusMessage: null }); + }, 3000); + } + }, [serverConfig, authSettings.serverUrl, serverName, updateAuthSettings]); + + // Check if server supports OAuth + // Only HTTP servers support OAuth (STDIO servers use process-based auth) + const isHttpServer = serverConfig && "url" in serverConfig; + const supportsOAuth = isHttpServer; + + // Check if OAuth is currently configured/in-use + const hasOAuthConfigured = + serverName && + (serverEntry?.oauthTokens || + getStoredTokens(serverName) || + serverEntry?.connectionStatus === "oauth-flow"); + + const contributionBanner = ( +
+
+ Help us improve this feature!{" "} + We're looking for contributors to polish up this feature. +
+ + Join our Discord + +
+ ); + + if (!serverConfig) { + return ( + + ); + } + + if (!supportsOAuth) { + return ( +
+
+ {/* Header */} +
+
+
+ +

+ Authentication +

+
+

+ Manage OAuth authentication for the selected server +

+
+
+ + {/* Content */} +
+
+ {contributionBanner} + {/* Server Info */} +
+

Selected Server

+
+
Name: {serverEntry?.name || "Unknown"}
+ {isHttpServer && ( +
URL: {(serverConfig as any).url.toString()}
+ )} + {!isHttpServer && ( +
Command: {(serverConfig as any).command}
+ )} +
+ Type: {isHttpServer ? "HTTP Server" : "STDIO Server"} +
+
+
+ + {/* No OAuth Support Message */} + + +
+
+ +
+
+

+ {!isHttpServer + ? "No OAuth Support" + : "No Authentication Required"} +

+

+ {!isHttpServer + ? "STDIO servers don't support OAuth authentication." + : `The HTTP server "${serverEntry?.name || "Unknown"}" is connected without OAuth authentication.`} +

+ {isHttpServer && ( +

+ If this server supports OAuth, you can reconnect it + with OAuth enabled from the Servers tab. +

+ )} +
+
+
+
+
+
+
+
+ ); + } + + return ( +
+
+ {/* Header */} +
+
+
+ +

+ Authentication +

+
+

+ Manage OAuth authentication for the selected server +

+
+
+ + {/* Content */} +
+
+ {contributionBanner} + {/* Server Info */} +
+

Selected Server

+
+
Name: {serverEntry?.name || "Unknown"}
+ {isHttpServer && ( +
URL: {(serverConfig as any).url.toString()}
+ )} +
Type: HTTP Server
+
+
+ + {/* OAuth Authentication */} +
+
+ +

OAuth Authentication

+
+

+ {hasOAuthConfigured + ? "Manage OAuth authentication for this server." + : "This server supports OAuth authentication. Use Quick OAuth to authenticate and get tokens."} +

+ + {authSettings.statusMessage && ( + + )} + + {authSettings.error && !authSettings.statusMessage && ( +
+
+ +

{authSettings.error}

+
+
+ )} + +
+ {authSettings.tokens && ( +
+

Current Tokens:

+
+
+

+ Access Token: +

+
+ {authSettings.tokens.access_token.substring(0, 40)}... +
+
+ {authSettings.tokens.refresh_token && ( +
+

+ Refresh Token: +

+
+ {authSettings.tokens.refresh_token.substring(0, 40)} + ... +
+
+ )} +
+ Type: {authSettings.tokens.token_type} + {authSettings.tokens.expires_in && ( + + Expires in: {authSettings.tokens.expires_in}s + + )} + {authSettings.tokens.scope && ( + Scope: {authSettings.tokens.scope} + )} +
+
+
+ )} + +
+ + + {authSettings.tokens && ( + + )} + + + + +
+ +

+ {!serverConfig + ? "Select a server to manage its OAuth authentication." + : authSettings.tokens + ? "Use Quick Refresh to renew existing tokens, or Quick OAuth to start a fresh authentication flow." + : "Use Quick OAuth to authenticate with the server and get tokens."} +

+
+ + {/* OAuth Flow Progress */} + {showGuidedFlow && authSettings.serverUrl && ( + + )} + + {/* Exit Guided Flow Button */} + {showGuidedFlow && ( +
+ +
+ )} +
+
+
+
+
+ ); +}; diff --git a/client/src/components/ChatTab.tsx b/client/src/components/ChatTab.tsx new file mode 100644 index 000000000..c04a5a819 --- /dev/null +++ b/client/src/components/ChatTab.tsx @@ -0,0 +1,617 @@ +import { useRef, useEffect, useState, type ReactNode } from "react"; +import { MessageCircle, PlusCircle, Settings, Sparkles } from "lucide-react"; +import { useChat } from "@/hooks/use-chat"; +import { Message } from "./chat/message"; +import { ChatInput } from "./chat/chat-input"; +import { ElicitationDialog } from "./ElicitationDialog"; +import { TooltipProvider } from "./ui/tooltip"; +import { motion, AnimatePresence } from "framer-motion"; +import { toast } from "sonner"; +import { getDefaultTemperatureForModel } from "@/lib/chat-utils"; +import { MCPServerConfig } from "@/sdk"; +import { useConvexAuth } from "convex/react"; +import { useAuth } from "@workos-inc/authkit-react"; +import type { ServerWithName } from "@/hooks/use-app-state"; +import { Button } from "@/components/ui/button"; +import { usePostHog } from "posthog-js/react"; +import { detectEnvironment, detectPlatform } from "@/logs/PosthogUtils"; +import { isMCPJamProvidedModel } from "@/shared/types"; +import { listTools } from "@/lib/mcp-tools-api"; +import { JsonRpcLoggerView } from "./logging/json-rpc-logger-view"; +import { + ResizablePanelGroup, + ResizablePanel, + ResizableHandle, +} from "./ui/resizable"; +interface ChatTabProps { + serverConfigs?: Record; + connectedServerConfigs?: Record; + systemPrompt?: string; +} + +export function ChatTab({ + serverConfigs, + connectedServerConfigs, + systemPrompt = "", +}: ChatTabProps) { + const messagesContainerRef = useRef(null); + const [isAtBottom, setIsAtBottom] = useState(true); + const { isAuthenticated } = useConvexAuth(); + const { signUp } = useAuth(); + const posthog = usePostHog(); + const [systemPromptState, setSystemPromptState] = useState( + systemPrompt || "You are a helpful assistant with access to MCP tools.", + ); + + const [temperatureState, setTemperatureState] = useState(1.0); + const [toolsMetadata, setToolsMetadata] = useState< + Record> + >({}); + const selectedServerNames = Object.keys(serverConfigs || {}); + const selectedConnectedNames = selectedServerNames.filter( + (name) => connectedServerConfigs?.[name]?.connectionStatus === "connected", + ); + const noServersConnected = selectedConnectedNames.length === 0; + + const { + messages, + isLoading, + error, + input, + setInput, + sendMessage, + stopGeneration, + regenerateMessage, + clearChat, + model, + availableModels, + setModel, + elicitationRequest, + elicitationLoading, + handleElicitationResponse, + } = useChat({ + systemPrompt: systemPromptState, + temperature: temperatureState, + selectedServers: selectedConnectedNames, + onError: (error) => { + toast.error(error); + }, + }); + const isUsingMcpjamProvidedModel = model + ? isMCPJamProvidedModel(model.id) + : false; + const showSignInPrompt = isUsingMcpjamProvidedModel && !isAuthenticated; + const signInPromptMessage = "Sign in to use MCPJam provided models"; + + useEffect(() => { + if (showSignInPrompt) { + setInput(""); + } + }, [showSignInPrompt, setInput]); + + // Restore model from localStorage on mount + useEffect(() => { + posthog.capture("chat_tab_viewed", { + location: "chat_tab", + platform: detectPlatform(), + environment: detectEnvironment(), + }); + }, []); + useEffect(() => { + const savedModelId = localStorage.getItem("chat-selected-model"); + if (savedModelId && availableModels.length > 0) { + const savedModel = availableModels.find((m) => m.id === savedModelId); + if (savedModel && (!model || model.id !== savedModelId)) { + setModel(savedModel); + } + } + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [availableModels]); + + // Save model to localStorage when it changes + useEffect(() => { + if (model) { + localStorage.setItem("chat-selected-model", model.id); + } + }, [model]); + + // Update temperature when model changes + useEffect(() => { + if (model) { + setTemperatureState(getDefaultTemperatureForModel(model)); + } + }, [model]); + + // Fetch tools metadata when servers connect + useEffect(() => { + const fetchToolsMetadata = async () => { + const metadata: Record> = {}; + + for (const serverId of selectedConnectedNames) { + try { + const data = await listTools(serverId); + if (data.toolsMetadata) { + Object.assign(metadata, data.toolsMetadata); + } + } catch (err) { + console.error(`Failed to fetch tools for server ${serverId}:`, err); + } + } + + setToolsMetadata(metadata); + }; + + if (selectedConnectedNames && selectedConnectedNames.length > 0) { + fetchToolsMetadata(); + } else { + setToolsMetadata({}); + } + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [JSON.stringify(selectedConnectedNames)]); + + const hasMessages = messages.length > 0; + const isChatDisabled = showSignInPrompt || noServersConnected; + const disabledMessage = showSignInPrompt + ? "Sign in to use free chat" + : "Connect an MCP server to send your first message"; + const quickStartPrompts = [ + { + label: "Available tools", + value: "What tools are available?", + icon: Sparkles, + }, + ]; + // Auto-scroll to bottom when new messages arrive + useEffect(() => { + if (isAtBottom && messagesContainerRef.current) { + messagesContainerRef.current.scrollTop = + messagesContainerRef.current.scrollHeight; + } + }, [messages, isAtBottom]); + + // Check if user is at bottom + const handleScroll = () => { + if (!messagesContainerRef.current) return; + + const { scrollTop, scrollHeight, clientHeight } = + messagesContainerRef.current; + const threshold = 100; + const atBottom = scrollHeight - scrollTop - clientHeight < threshold; + + setIsAtBottom(atBottom); + }; + + const handleCopyMessage = (content: string) => { + navigator.clipboard.writeText(content); + }; + + const handleCallTool = async ( + toolName: string, + params: Record, + ) => { + try { + const response = await fetch("/api/mcp/tools/execute", { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify({ + toolName, + parameters: params, + // Pass serverId if only one server is connected + ...(selectedConnectedNames.length === 1 + ? { serverId: selectedConnectedNames[0] } + : {}), + }), + }); + const data = await response.json(); + return data.result; + } catch (error) { + throw error; + } + }; + + const handleSendFollowup = (message: string) => { + setInput(message); + // Automatically send the message + sendMessage(message); + }; + + const renderEmptyLayout = ( + content: ReactNode, + options: { placeholder: string; disabled: boolean }, + ) => ( +
+
+ + {content} + + + + { + posthog.capture("send_message", { + location: "chat_tab", + platform: detectPlatform(), + environment: detectEnvironment(), + model_id: model?.id ?? null, + model_name: model?.name ?? null, + model_provider: model?.provider ?? null, + }); + sendMessage(input); + }} + onStop={stopGeneration} + disabled={availableModels.length === 0 || options.disabled} + isLoading={isLoading} + placeholder={options.placeholder} + className="border shadow-sm" + currentModel={model || null} + availableModels={availableModels} + onModelChange={setModel} + onClearChat={clearChat} + hasMessages={false} + systemPrompt={systemPromptState} + onSystemPromptChange={setSystemPromptState} + temperature={temperatureState} + onTemperatureChange={setTemperatureState} + isSendBlocked={showSignInPrompt} + /> + {availableModels.length === 0 && ( + + Configure API keys in Settings or start Ollama to enable chat + + )} + +
+ + {/* Elicitation Dialog */} + +
+ ); + + const renderSignUpForFreeModels = ( +
+ {/* Header */} +
+

+ Test your MCP server with frontier models for free +

+
+ + {/* Model Showcase */} +
+ {/* Claude */} +
+ Claude +
+ + {/* GPT */} +
+ OpenAI +
+ + {/* Gemini */} +
+ Google +
+ + {/* Meta */} +
+ Meta +
+ + {/* Grok/xAI */} +
+ Grok +
+ + {/* Amazon Bedrock */} +
+ Amazon Bedrock +
+
+ + {/* CTA */} +
+ +

+ or bring your own API key in{" "} + + settings + +

+
+
+ ); + + if (!hasMessages && isChatDisabled) { + const disabledContent = showSignInPrompt ? ( + renderSignUpForFreeModels + ) : ( +
+
+

+ You must connect to an MCP server +

+
+ +
+ ); + + return renderEmptyLayout(disabledContent, { + placeholder: disabledMessage, + disabled: true, + }); + } + + if (!hasMessages) { + const suggestionsContent = ( +
+
+

Test your servers

+

+ Start typing or choose a quick suggestion to begin. +

+
+
+ {quickStartPrompts.map(({ label, value, icon: Icon }) => ( + + ))} +
+
+ ); + + return renderEmptyLayout(suggestionsContent, { + placeholder: "Send a message...", + disabled: false, + }); + } + + // Active state - messages with bottom input + return ( + +
+ + {/* Main Chat Panel */} + +
+ {/* Messages Area - Scrollable with bottom padding for input */} +
+
+ + {messages.map((message, index) => ( + + {}} + onRegenerate={regenerateMessage} + onCopy={handleCopyMessage} + showActions={true} + serverConfigs={serverConfigs} + onCallTool={handleCallTool} + onSendFollowup={handleSendFollowup} + toolsMetadata={toolsMetadata} + serverId={ + selectedConnectedNames.length === 1 + ? selectedConnectedNames[0] + : undefined + } + /> + + ))} + {/* Thinking indicator */} + {isLoading && + messages.length > 0 && + messages[messages.length - 1].role === "user" && ( + +
+
+ +
+
+ + Thinking + +
+
+
+
+
+
+
+ + )} + +
+
+ + {/* Error Display */} + + {error && ( + +
+

{error}

+
+
+ )} +
+ + {/* Fixed Bottom Input */} +
+
+
+ { + posthog.capture("send_message", { + location: "chat_tab", + platform: detectPlatform(), + environment: detectEnvironment(), + model_id: model?.id ?? null, + model_name: model?.name ?? null, + model_provider: model?.provider ?? null, + }); + sendMessage(message, attachments); + }} + onStop={stopGeneration} + disabled={ + availableModels.length === 0 || + noServersConnected || + showSignInPrompt + } + isLoading={isLoading} + placeholder={ + showSignInPrompt + ? signInPromptMessage + : "Send a message..." + } + className="border-2 shadow-sm" + currentModel={model} + availableModels={availableModels} + onModelChange={setModel} + onClearChat={clearChat} + hasMessages={hasMessages} + systemPrompt={systemPromptState} + onSystemPromptChange={setSystemPromptState} + temperature={temperatureState} + onTemperatureChange={setTemperatureState} + isSendBlocked={showSignInPrompt} + /> +
+
+
+ + {/* Elicitation Dialog */} + +
+ + + + + {/* JSON-RPC Logger Panel */} + +
+ +
+
+ +
+ + ); +} diff --git a/client/src/components/ChatTabV2.tsx b/client/src/components/ChatTabV2.tsx new file mode 100644 index 000000000..a30bd5efe --- /dev/null +++ b/client/src/components/ChatTabV2.tsx @@ -0,0 +1,543 @@ +import { + FormEvent, + useMemo, + useState, + useEffect, + useCallback, + useRef, +} from "react"; +import { useChat } from "@ai-sdk/react"; +import { + DefaultChatTransport, + lastAssistantMessageIsCompleteWithToolCalls, + generateId, +} from "ai"; +import { useAuth } from "@workos-inc/authkit-react"; +import { useConvexAuth } from "convex/react"; +import { ModelDefinition, isGPT5Model } from "@/shared/types"; +import { + ProviderTokens, + useAiProviderKeys, +} from "@/hooks/use-ai-provider-keys"; +import { JsonRpcLoggerView } from "./logging/json-rpc-logger-view"; +import { + ResizablePanelGroup, + ResizablePanel, + ResizableHandle, +} from "./ui/resizable"; +import { ElicitationDialog } from "@/components/ElicitationDialog"; +import type { DialogElicitation } from "@/components/ToolsTab"; +import { + detectOllamaModels, + detectOllamaToolCapableModels, +} from "@/lib/ollama-utils"; +import { + buildAvailableModels, + getDefaultModel, +} from "@/components/chat-v2/model-helpers"; +import { isMCPJamProvidedModel } from "@/shared/types"; +import { ChatInput } from "@/components/chat-v2/chat-input"; +import { Thread } from "@/components/chat-v2/thread"; +import { ServerWithName } from "@/hooks/use-app-state"; +import { getToolsMetadata, ToolServerMap } from "@/lib/mcp-tools-api"; +import { MCPJamFreeModelsPrompt } from "@/components/chat-v2/mcpjam-free-models-prompt"; +import { ConnectMcpServerCallout } from "@/components/chat-v2/connect-mcp-server-callout"; +import { usePostHog } from "posthog-js/react"; +import { detectEnvironment, detectPlatform } from "@/logs/PosthogUtils"; +import { ErrorBox } from "@/components/chat-v2/error"; +import { usePersistedModel } from "@/hooks/use-persisted-model"; + +const DEFAULT_SYSTEM_PROMPT = + "You are a helpful assistant with access to MCP tools."; + +const STARTER_PROMPTS: Array<{ label: string; text: string }> = [ + { + label: "Show me connected tools", + text: "List my connected MCP servers and their available tools.", + }, + { + label: "Suggest an automation", + text: "Suggest an automation I can build with my current MCP setup.", + }, + { + label: "Summarize recent activity", + text: "Summarize the most recent activity across my MCP servers.", + }, +]; + +interface ChatTabProps { + connectedServerConfigs: Record; + selectedServerNames: string[]; +} + +function formatErrorMessage(error: unknown): string | null { + if (!error) return null; + if (typeof error === "string") return error; + if (error instanceof Error) return error.message; + try { + return JSON.stringify(error); + } catch { + return String(error); + } +} + +export function ChatTabV2({ + connectedServerConfigs, + selectedServerNames, +}: ChatTabProps) { + const { getAccessToken, signUp } = useAuth(); + const { isAuthenticated } = useConvexAuth(); + const posthog = usePostHog(); + const { + hasToken, + getToken, + getLiteLLMBaseUrl, + getLiteLLMModelAlias, + getOpenRouterSelectedModels, + getOllamaBaseUrl, + getBedrockRegion, + getBedrockSecretKey, + } = useAiProviderKeys(); + + const [input, setInput] = useState(""); + const [ollamaModels, setOllamaModels] = useState([]); + const [isOllamaRunning, setIsOllamaRunning] = useState(false); + const [authHeaders, setAuthHeaders] = useState< + Record | undefined + >(undefined); + const [systemPrompt, setSystemPrompt] = useState(DEFAULT_SYSTEM_PROMPT); + const [temperature, setTemperature] = useState(0.7); + const [chatSessionId, setChatSessionId] = useState(generateId()); + const [toolsMetadata, setToolsMetadata] = useState< + Record> + >({}); + const [toolServerMap, setToolServerMap] = useState({}); + const availableModels = useMemo(() => { + return buildAvailableModels({ + hasToken, + getLiteLLMBaseUrl, + getLiteLLMModelAlias, + getOpenRouterSelectedModels, + isOllamaRunning, + ollamaModels, + }); + }, [ + hasToken, + getLiteLLMBaseUrl, + getLiteLLMModelAlias, + getOpenRouterSelectedModels, + isOllamaRunning, + ollamaModels, + ]); + const { selectedModelId, setSelectedModelId } = usePersistedModel(); + const selectedModel = useMemo(() => { + const fallback = getDefaultModel(availableModels); + if (!selectedModelId) return fallback; + const found = availableModels.find((m) => String(m.id) === selectedModelId); + return found ?? fallback; + }, [availableModels, selectedModelId]); + + const [elicitation, setElicitation] = useState( + null, + ); + const [elicitationLoading, setElicitationLoading] = useState(false); + + const selectedConnectedServerNames = useMemo( + () => + selectedServerNames.filter( + (name) => + connectedServerConfigs[name]?.connectionStatus === "connected", + ), + [selectedServerNames, connectedServerConfigs], + ); + const noServersConnected = selectedConnectedServerNames.length === 0; + + const transport = useMemo(() => { + const apiKey = getToken(selectedModel.provider as keyof ProviderTokens); + const isGpt5 = isGPT5Model(selectedModel.id); + + return new DefaultChatTransport({ + api: "/api/mcp/chat-v2", + body: { + model: selectedModel, + apiKey: apiKey, + ...(isGpt5 ? {} : { temperature }), + systemPrompt, + selectedServers: selectedConnectedServerNames, + ollamaBaseUrl: getOllamaBaseUrl(), + bedrockRegion: getBedrockRegion(), + bedrockSecretKey: getBedrockSecretKey(), + }, + headers: authHeaders, + }); + }, [ + selectedModel, + getToken, + authHeaders, + temperature, + systemPrompt, + selectedConnectedServerNames, + getOllamaBaseUrl, + getBedrockRegion, + getBedrockSecretKey, + ]); + + useEffect(() => { + posthog.capture("chat_tab_viewed", { + location: "chat_tab", + platform: detectPlatform(), + environment: detectEnvironment(), + }); + }, []); + + useEffect(() => { + let active = true; + (async () => { + try { + const token = await getAccessToken?.(); + if (!active) return; + if (token) { + setAuthHeaders({ Authorization: `Bearer ${token}` }); + } else { + setAuthHeaders(undefined); + } + } catch { + if (!active) return; + setAuthHeaders(undefined); + } + resetChat(); + })(); + return () => { + active = false; + }; + }, [getAccessToken]); + + const isMcpJamModel = useMemo(() => { + return selectedModel?.id + ? isMCPJamProvidedModel(String(selectedModel.id)) + : false; + }, [selectedModel]); + + const { messages, sendMessage, stop, status, error, setMessages } = useChat({ + id: chatSessionId, + transport: transport!, + // Disable client auto-send for MCPJam-provided models; server handles tool loop + sendAutomaticallyWhen: isMcpJamModel + ? undefined + : lastAssistantMessageIsCompleteWithToolCalls, + }); + const resetChat = useCallback(() => { + setChatSessionId(generateId()); + setMessages([]); + setInput(""); + }, [setMessages]); + + useEffect(() => { + resetChat(); + }, [resetChat]); + + const previousSelectedServersRef = useRef( + selectedConnectedServerNames, + ); + + useEffect(() => { + const previousNames = previousSelectedServersRef.current; + const currentNames = selectedConnectedServerNames; + const hasChanged = + previousNames.length !== currentNames.length || + previousNames.some((name, index) => name !== currentNames[index]); + + if (hasChanged) { + resetChat(); + } + + previousSelectedServersRef.current = currentNames; + }, [selectedConnectedServerNames, resetChat]); + + useEffect(() => { + const checkOllama = async () => { + const { isRunning, availableModels } = + await detectOllamaModels(getOllamaBaseUrl()); + setIsOllamaRunning(isRunning); + + const toolCapable = isRunning + ? await detectOllamaToolCapableModels(getOllamaBaseUrl()) + : []; + const toolCapableSet = new Set(toolCapable); + const ollamaDefs: ModelDefinition[] = availableModels.map( + (modelName) => ({ + id: modelName, + name: modelName, + provider: "ollama" as const, + disabled: !toolCapableSet.has(modelName), + disabledReason: toolCapableSet.has(modelName) + ? undefined + : "Model does not support tool calling", + }), + ); + setOllamaModels(ollamaDefs); + }; + checkOllama(); + const interval = setInterval(checkOllama, 30000); + return () => clearInterval(interval); + }, [getOllamaBaseUrl]); + + // selectedModelId defaults via effectiveModel; no effect needed + + useEffect(() => { + const es = new EventSource("/api/mcp/elicitation/stream"); + es.onmessage = (ev) => { + try { + const data = JSON.parse(ev.data); + if (data?.type === "elicitation_request") { + setElicitation({ + requestId: data.requestId, + message: data.message, + schema: data.schema, + timestamp: data.timestamp || new Date().toISOString(), + }); + } else if (data?.type === "elicitation_complete") { + setElicitation((prev) => + prev?.requestId === data.requestId ? null : prev, + ); + } + } catch (error) { + console.warn("[ChatTabV2] Failed to parse elicitation event:", error); + } + }; + es.onerror = () => { + console.warn( + "[ChatTabV2] Elicitation SSE connection error, browser will retry", + ); + }; + return () => es.close(); + }, []); + + const handleElicitationResponse = async ( + action: "accept" | "decline" | "cancel", + parameters?: Record, + ) => { + if (!elicitation) return; + setElicitationLoading(true); + try { + await fetch("/api/mcp/elicitation/respond", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ + requestId: elicitation.requestId, + action, + content: parameters, + }), + }); + setElicitation(null); + } finally { + setElicitationLoading(false); + } + }; + + useEffect(() => { + const fetchToolsMetadata = async () => { + const { metadata, toolServerMap } = await getToolsMetadata( + selectedConnectedServerNames, + ); + setToolsMetadata(metadata); + setToolServerMap(toolServerMap); + }; + fetchToolsMetadata(); + }, [selectedConnectedServerNames]); + + const disableForAuthentication = !isAuthenticated && isMcpJamModel; + const disableForServers = noServersConnected; + const isStreaming = status === "streaming" || status === "submitted"; + const submitBlocked = disableForAuthentication || disableForServers; + const inputDisabled = status !== "ready" || submitBlocked; + + let placeholder = "Ask something…"; + if (disableForServers) { + placeholder = "Connect an MCP server to send your first message"; + } + if (disableForAuthentication) { + placeholder = "Sign in to use free chat"; + } + + const shouldShowUpsell = disableForAuthentication; + const shouldShowConnectCallout = disableForServers && !shouldShowUpsell; + const showDisabledCallout = + messages.length === 0 && (shouldShowUpsell || shouldShowConnectCallout); + + const errorMessage = formatErrorMessage(error); + + const handleSignUp = () => { + posthog.capture("sign_up_button_clicked", { + location: "chat_tab", + platform: detectPlatform(), + environment: detectEnvironment(), + }); + signUp(); + }; + + const onSubmit = (event: FormEvent) => { + event.preventDefault(); + if ( + input.trim() && + status === "ready" && + !disableForAuthentication && + !disableForServers + ) { + posthog.capture("send_message", { + location: "chat_tab", + platform: detectPlatform(), + environment: detectEnvironment(), + model_id: selectedModel?.id ?? null, + model_name: selectedModel?.name ?? null, + model_provider: selectedModel?.provider ?? null, + }); + sendMessage({ text: input }); + setInput(""); + } + }; + + const handleStarterPrompt = (prompt: string) => { + if (submitBlocked || inputDisabled) { + setInput(prompt); + return; + } + posthog.capture("send_message", { + location: "chat_tab", + platform: detectPlatform(), + environment: detectEnvironment(), + model_id: selectedModel?.id ?? null, + model_name: selectedModel?.name ?? null, + model_provider: selectedModel?.provider ?? null, + }); + sendMessage({ text: prompt }); + setInput(""); + }; + + const sharedChatInputProps = { + value: input, + onChange: setInput, + onSubmit, + stop, + disabled: inputDisabled, + isLoading: isStreaming, + placeholder, + currentModel: selectedModel, + availableModels, + onModelChange: (model: ModelDefinition) => { + setSelectedModelId(String(model.id)); + resetChat(); + }, + systemPrompt, + onSystemPromptChange: setSystemPrompt, + temperature, + onTemperatureChange: setTemperature, + onResetChat: resetChat, + submitDisabled: submitBlocked, + }; + + const showStarterPrompts = !showDisabledCallout && messages.length === 0; + + return ( +
+ + +
+ {messages.length === 0 ? ( +
+
+ {showDisabledCallout && ( +
+ {shouldShowUpsell ? ( + + ) : ( + + )} +
+ )} + +
+ {showStarterPrompts && ( +
+

+ Try one of these to get started +

+
+ {STARTER_PROMPTS.map((prompt) => ( + + ))} +
+
+ )} + + +
+
+
+ ) : ( + <> +
+
+ + sendMessage({ text }) + } + model={selectedModel} + isLoading={status === "submitted"} + toolsMetadata={toolsMetadata} + toolServerMap={toolServerMap} + /> +
+ {errorMessage && ( +
+ +
+ )} +
+ +
+
+ +
+
+ + )} + + +
+
+ + + + +
+ +
+
+
+
+ ); +} diff --git a/client/src/components/CompletingSignInLoading.tsx b/client/src/components/CompletingSignInLoading.tsx new file mode 100644 index 000000000..bceaa4574 --- /dev/null +++ b/client/src/components/CompletingSignInLoading.tsx @@ -0,0 +1,10 @@ +export default function CompletingSignInLoading() { + return ( +
+
+
+

Completing sign in...

+
+
+ ); +} diff --git a/client/src/components/ElicitationDialog.tsx b/client/src/components/ElicitationDialog.tsx new file mode 100644 index 000000000..eebdc840e --- /dev/null +++ b/client/src/components/ElicitationDialog.tsx @@ -0,0 +1,297 @@ +import React, { useState } from "react"; +import { Button } from "./ui/button"; +import { Input } from "./ui/input"; +import { Label } from "./ui/label"; +import { Textarea } from "./ui/textarea"; +import { Badge } from "./ui/badge"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "./ui/select"; +import { + Dialog, + DialogContent, + DialogDescription, + DialogFooter, + DialogHeader, + DialogTitle, +} from "./ui/dialog"; +import { MessageSquare, X, Check, RefreshCw } from "lucide-react"; +import { DialogElicitation } from "./ToolsTab"; + +interface FormField { + name: string; + type: string; + description?: string; + required: boolean; + value: any; + enum?: string[]; + minimum?: number; + maximum?: number; + pattern?: string; +} + +interface ElicitationDialogProps { + elicitationRequest: DialogElicitation | null; + onResponse: ( + action: "accept" | "decline" | "cancel", + parameters?: Record, + ) => Promise; + loading?: boolean; +} + +export function ElicitationDialog({ + elicitationRequest, + onResponse, + loading = false, +}: ElicitationDialogProps) { + const [fields, setFields] = useState([]); + + // Generate form fields from schema when request changes + React.useEffect(() => { + if (elicitationRequest?.schema) { + generateFormFields(elicitationRequest.schema); + } else { + setFields([]); + } + }, [elicitationRequest]); + + const generateFormFields = (schema: any) => { + if (!schema || !schema.properties) { + setFields([]); + return; + } + + const formFields: FormField[] = []; + const required = schema.required || []; + + Object.entries(schema.properties).forEach(([key, prop]: [string, any]) => { + const fieldType = prop.enum ? "enum" : prop.type || "string"; + formFields.push({ + name: key, + type: fieldType, + description: prop.description, + required: required.includes(key), + value: getDefaultValue(fieldType, prop.enum), + enum: prop.enum, + minimum: prop.minimum, + maximum: prop.maximum, + pattern: prop.pattern, + }); + }); + + setFields(formFields); + }; + + const getDefaultValue = (type: string, enumValues?: string[]) => { + switch (type) { + case "enum": + return enumValues?.[0] || ""; + case "string": + return ""; + case "number": + case "integer": + return ""; + case "boolean": + return false; + case "array": + return []; + case "object": + return {}; + default: + return ""; + } + }; + + const updateFieldValue = (fieldName: string, value: any) => { + setFields((prev) => + prev.map((field) => + field.name === fieldName ? { ...field, value } : field, + ), + ); + }; + + const buildParameters = (): Record => { + const params: Record = {}; + fields.forEach((field) => { + if ( + field.value !== "" && + field.value !== null && + field.value !== undefined + ) { + let processedValue = field.value; + + if (field.type === "number" || field.type === "integer") { + processedValue = Number(field.value); + } else if (field.type === "boolean") { + processedValue = Boolean(field.value); + } else if (field.type === "array" || field.type === "object") { + try { + processedValue = JSON.parse(field.value); + } catch { + processedValue = field.value; + } + } + + params[field.name] = processedValue; + } + }); + return params; + }; + + const handleResponse = async (action: "accept" | "decline" | "cancel") => { + if (action === "accept") { + // Validate required fields + const missingFields = fields.filter( + (field) => field.required && (!field.value || field.value === ""), + ); + + if (missingFields.length > 0) { + // You could show validation errors here + return; + } + + const parameters = buildParameters(); + await onResponse(action, parameters); + } else { + await onResponse(action); + } + }; + + const renderField = (field: FormField) => { + if (field.type === "enum") { + return ( + + ); + } else if (field.type === "boolean") { + return ( +
+ updateFieldValue(field.name, e.target.checked)} + className="w-4 h-4 text-primary bg-background border-border rounded focus:ring-ring focus:ring-2" + /> + + {field.value ? "Enabled" : "Disabled"} + +
+ ); + } else if (field.type === "array" || field.type === "object") { + return ( +