diff --git a/package-lock.json b/package-lock.json index 36333dcead..c22d0b075a 100644 --- a/package-lock.json +++ b/package-lock.json @@ -492,6 +492,27 @@ "hono": "^4" } }, + "node_modules/@isaacs/balanced-match": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/@isaacs/balanced-match/-/balanced-match-4.0.1.tgz", + "integrity": "sha512-yzMTt9lEb8Gv7zRioUilSglI0c0smZ9k5D65677DLWLtWJaXIS3CqcGyUFByYKlnUj6TkjLVs54fBl6+TiGQDQ==", + "license": "MIT", + "engines": { + "node": "20 || >=22" + } + }, + "node_modules/@isaacs/brace-expansion": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/@isaacs/brace-expansion/-/brace-expansion-5.0.0.tgz", + "integrity": "sha512-ZT55BDLV0yv0RBm2czMiZ+SqCGO7AvmOM3G/w2xhVPH+te0aKgFjmBvGlL1dH+ql2tgGO3MVrbb3jCKyvpgnxA==", + "license": "MIT", + "dependencies": { + "@isaacs/balanced-match": "^4.0.1" + }, + "engines": { + "node": "20 || >=22" + } + }, "node_modules/@isaacs/cliui": { "version": "8.0.2", "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", @@ -690,6 +711,10 @@ } } }, + "node_modules/@modelcontextprotocol/server-consolidated": { + "resolved": "src/consolidated", + "link": true + }, "node_modules/@modelcontextprotocol/server-everything": { "resolved": "src/everything", "link": true @@ -1877,6 +1902,7 @@ "resolved": "https://registry.npmjs.org/express/-/express-5.2.1.tgz", "integrity": "sha512-hIS4idWWai69NezIdRt2xFVofaF4j+6INOpJlVOLDO8zXGpUVEVzIYk12UUi2JzjEzWL3IOAxcTubgz9Po0yXw==", "license": "MIT", + "peer": true, "dependencies": { "accepts": "^2.0.0", "body-parser": "^2.2.1", @@ -3436,6 +3462,7 @@ "integrity": "sha512-j3lYzGC3P+B5Yfy/pfKNgVEg4+UtcIJcVRt2cDjIOmhLourAqPqf8P7acgxeiSgUB7E3p2P8/3gNIgDLpwzs4g==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "esbuild": "^0.21.3", "postcss": "^8.4.43", @@ -3519,6 +3546,7 @@ "integrity": "sha512-MSmPM9REYqDGBI8439mA4mWhV5sKmDlBKWIYbA3lRb2PTHACE0mgKwA8yQ2xq9vxDTuk4iPrECBAEW2aoFXY0Q==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@vitest/expect": "2.1.9", "@vitest/mocker": "2.1.9", @@ -3688,6 +3716,7 @@ "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz", "integrity": "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==", "license": "MIT", + "peer": true, "funding": { "url": "https://github.com/sponsors/colinhacks" } @@ -3736,6 +3765,51 @@ "typescript": "^5.6.2" } }, + "src/consolidated": { + "name": "@modelcontextprotocol/server-consolidated", + "version": "1.0.0", + "dependencies": { + "@modelcontextprotocol/sdk": "^1.25.2", + "chalk": "^5.3.0", + "cors": "^2.8.5", + "diff": "^8.0.3", + "express": "^5.2.1", + "glob": "^10.5.0", + "jszip": "^3.10.1", + "minimatch": "^10.0.1", + "yargs": "^17.7.2", + "zod": "^3.25.0", + "zod-to-json-schema": "^3.23.5" + }, + "bin": { + "mcp-server-consolidated": "dist/index.js" + }, + "devDependencies": { + "@types/cors": "^2.8.19", + "@types/diff": "^5.0.9", + "@types/express": "^5.0.6", + "@types/minimatch": "^5.1.2", + "@types/node": "^22", + "@types/yargs": "^17.0.32", + "shx": "^0.3.4", + "typescript": "^5.8.2" + } + }, + "src/consolidated/node_modules/minimatch": { + "version": "10.1.1", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.1.1.tgz", + "integrity": "sha512-enIvLvRAFZYXJzkCYG5RKmPfrFArdLv+R+lbQ53BmIMLIry74bjKzX6iHAm8WYamJkhSSEabrWN5D97XnKObjQ==", + "license": "BlueOak-1.0.0", + "dependencies": { + "@isaacs/brace-expansion": "^5.0.0" + }, + "engines": { + "node": "20 || >=22" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "src/duckduckgo": { "name": "@modelcontextprotocol/server-duckduckgo", "version": "0.2.0", diff --git a/src/consolidated/index.js b/src/consolidated/index.js new file mode 100644 index 0000000000..f31987ee9c --- /dev/null +++ b/src/consolidated/index.js @@ -0,0 +1,715 @@ +import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"; +import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; +import { z } from "zod"; +import fs from "fs/promises"; +import { createReadStream } from "fs"; +import path from "path"; +import os from 'os'; +import { fileURLToPath } from 'url'; +import chalk from 'chalk'; +import { randomBytes } from 'crypto'; +import { createTwoFilesPatch } from 'diff'; +import { minimatch } from 'minimatch'; +import { exec } from 'child_process'; +import { promisify } from 'util'; +const execAsync = promisify(exec); +// --- Path Utilities --- +function convertToWindowsPath(p) { + if (p.startsWith('/mnt/')) + return p; + if (p.match(/^\/[a-zA-Z]\//) && process.platform === 'win32') { + const driveLetter = p.charAt(1).toUpperCase(); + const pathPart = p.slice(2).replace(/\//g, '\\'); + return `${driveLetter}:${pathPart}`; + } + if (p.match(/^[a-zA-Z]:/)) + return p.replace(/\//g, '\\'); + return p; +} +function normalizePath(p) { + p = p.trim().replace(/^["']|["']$/g, ''); + const isUnixPath = p.startsWith('/') && (p.match(/^\/mnt\/[a-z]\//i) || + (process.platform !== 'win32') || + (process.platform === 'win32' && !p.match(/^\/[a-zA-Z]\//))); + if (isUnixPath) + return p.replace(/\/+/g, '/').replace(/(? { + if (typeof dir !== 'string' || !dir) + return false; + if (dir.includes('\x00')) + return false; + let normalizedDir; + try { + normalizedDir = path.resolve(path.normalize(dir)); + } + catch { + return false; + } + if (!path.isAbsolute(normalizedDir)) + throw new Error('Allowed directories must be absolute paths after normalization'); + if (normalizedPath === normalizedDir) + return true; + if (normalizedDir === path.sep) + return normalizedPath.startsWith(path.sep); + if (path.sep === '\\' && normalizedDir.match(/^[A-Za-z]:\\?$/)) { + const dirDrive = normalizedDir.charAt(0).toLowerCase(); + const pathDrive = normalizedPath.charAt(0).toLowerCase(); + return pathDrive === dirDrive && normalizedPath.startsWith(normalizedDir.replace(/\\?$/, '\\')); + } + return normalizedPath.startsWith(normalizedDir + path.sep); + }); +} +class KnowledgeGraphManager { + memoryFilePath; + constructor(memoryFilePath) { + this.memoryFilePath = memoryFilePath; + } + async loadGraph() { + try { + const data = await fs.readFile(this.memoryFilePath, "utf-8"); + const lines = data.split("\n").filter(line => line.trim() !== ""); + return lines.reduce((graph, line) => { + const item = JSON.parse(line); + if (item.type === "entity") { + graph.entities.push({ + name: item.name, + entityType: item.entityType, + observations: item.observations + }); + } + if (item.type === "relation") { + graph.relations.push({ + from: item.from, + to: item.to, + relationType: item.relationType + }); + } + return graph; + }, { entities: [], relations: [] }); + } + catch (error) { + if (error instanceof Error && error.code === "ENOENT") { + return { entities: [], relations: [] }; + } + throw error; + } + } + async saveGraph(graph) { + const lines = [ + ...graph.entities.map(e => JSON.stringify({ + type: "entity", + name: e.name, + entityType: e.entityType, + observations: e.observations + })), + ...graph.relations.map(r => JSON.stringify({ + type: "relation", + from: r.from, + to: r.to, + relationType: r.relationType + })), + ]; + await fs.writeFile(this.memoryFilePath, lines.join("\n")); + } + async createEntities(entities) { + const graph = await this.loadGraph(); + const newEntities = entities.filter(e => !graph.entities.some(existingEntity => existingEntity.name === e.name)); + graph.entities.push(...newEntities); + await this.saveGraph(graph); + return newEntities; + } + async createRelations(relations) { + const graph = await this.loadGraph(); + const newRelations = relations.filter(r => !graph.relations.some(existingRelation => existingRelation.from === r.from && + existingRelation.to === r.to && + existingRelation.relationType === r.relationType)); + graph.relations.push(...newRelations); + await this.saveGraph(graph); + return newRelations; + } + async addObservations(observations) { + const graph = await this.loadGraph(); + const results = observations.map(o => { + const entity = graph.entities.find(e => e.name === o.entityName); + if (!entity) { + throw new Error(`Entity with name ${o.entityName} not found`); + } + const newObservations = o.contents.filter(content => !entity.observations.includes(content)); + entity.observations.push(...newObservations); + return { entityName: o.entityName, addedObservations: newObservations }; + }); + await this.saveGraph(graph); + return results; + } + async deleteEntities(entityNames) { + const graph = await this.loadGraph(); + graph.entities = graph.entities.filter(e => !entityNames.includes(e.name)); + graph.relations = graph.relations.filter(r => !entityNames.includes(r.from) && !entityNames.includes(r.to)); + await this.saveGraph(graph); + } + async deleteObservations(deletions) { + const graph = await this.loadGraph(); + deletions.forEach(d => { + const entity = graph.entities.find(e => e.name === d.entityName); + if (entity) { + entity.observations = entity.observations.filter(o => !d.observations.includes(o)); + } + }); + await this.saveGraph(graph); + } + async deleteRelations(relations) { + const graph = await this.loadGraph(); + graph.relations = graph.relations.filter(r => !relations.some(delRelation => r.from === delRelation.from && + r.to === delRelation.to && + r.relationType === delRelation.relationType)); + await this.saveGraph(graph); + } + async readGraph() { + return this.loadGraph(); + } + async searchNodes(query) { + const graph = await this.loadGraph(); + const filteredEntities = graph.entities.filter(e => e.name.toLowerCase().includes(query.toLowerCase()) || + e.entityType.toLowerCase().includes(query.toLowerCase()) || + e.observations.some(o => o.toLowerCase().includes(query.toLowerCase()))); + const filteredEntityNames = new Set(filteredEntities.map(e => e.name)); + const filteredRelations = graph.relations.filter(r => filteredEntityNames.has(r.from) && filteredEntityNames.has(r.to)); + return { entities: filteredEntities, relations: filteredRelations }; + } + async openNodes(names) { + const graph = await this.loadGraph(); + const filteredEntities = graph.entities.filter(e => names.includes(e.name)); + const filteredEntityNames = new Set(filteredEntities.map(e => e.name)); + const filteredRelations = graph.relations.filter(r => filteredEntityNames.has(r.from) && filteredEntityNames.has(r.to)); + return { entities: filteredEntities, relations: filteredRelations }; + } +} +class SequentialThinkingServer { + thoughtHistory = []; + branches = {}; + disableThoughtLogging; + constructor() { + this.disableThoughtLogging = (process.env.DISABLE_THOUGHT_LOGGING || "").toLowerCase() === "true"; + } + formatThought(thoughtData) { + const { thoughtNumber, totalThoughts, thought, isRevision, revisesThought, branchFromThought, branchId } = thoughtData; + let prefix = ''; + let context = ''; + if (isRevision) { + prefix = chalk.yellow('šŸ”„ Revision'); + context = ` (revising thought ${revisesThought})`; + } + else if (branchFromThought) { + prefix = chalk.green('🌿 Branch'); + context = ` (from thought ${branchFromThought}, ID: ${branchId})`; + } + else { + prefix = chalk.blue('šŸ’­ Thought'); + context = ''; + } + const header = `${prefix} ${thoughtNumber}/${totalThoughts}${context}`; + const border = '─'.repeat(Math.max(header.length, thought.length) + 4); + return `\nā”Œ${border}┐\n│ ${header} │\nā”œ${border}┤\n│ ${thought.padEnd(border.length - 2)} │\nā””${border}ā”˜`; + } + processThought(input) { + if (input.thoughtNumber > input.totalThoughts) { + input.totalThoughts = input.thoughtNumber; + } + this.thoughtHistory.push(input); + if (input.branchFromThought && input.branchId) { + if (!this.branches[input.branchId]) { + this.branches[input.branchId] = []; + } + this.branches[input.branchId].push(input); + } + if (!this.disableThoughtLogging) { + console.error(this.formatThought(input)); + } + return { + thoughtNumber: input.thoughtNumber, + totalThoughts: input.totalThoughts, + nextThoughtNeeded: input.nextThoughtNeeded, + branches: Object.keys(this.branches), + thoughtHistoryLength: this.thoughtHistory.length + }; + } +} +// --- Filesystem Logic --- +let allowedDirectories = []; +function setAllowedDirectories(directories) { + allowedDirectories = [...directories]; +} +async function validatePath(requestedPath) { + const expandedPath = expandHome(requestedPath); + const absolute = path.isAbsolute(expandedPath) + ? path.resolve(expandedPath) + : path.resolve(process.cwd(), expandedPath); + const normalizedRequested = normalizePath(absolute); + if (!isPathWithinAllowedDirectories(normalizedRequested, allowedDirectories)) { + throw new Error(`Access denied - path outside allowed directories: ${absolute}`); + } + try { + const realPath = await fs.realpath(absolute); + const normalizedReal = normalizePath(realPath); + if (!isPathWithinAllowedDirectories(normalizedReal, allowedDirectories)) { + throw new Error(`Access denied - symlink target outside allowed directories: ${realPath}`); + } + return realPath; + } + catch (error) { + if (error.code === 'ENOENT') { + const parentDir = path.dirname(absolute); + try { + const realParentPath = await fs.realpath(parentDir); + const normalizedParent = normalizePath(realParentPath); + if (!isPathWithinAllowedDirectories(normalizedParent, allowedDirectories)) { + throw new Error(`Access denied - parent directory outside allowed directories: ${realParentPath}`); + } + return absolute; + } + catch { + throw new Error(`Parent directory does not exist: ${parentDir}`); + } + } + throw error; + } +} +async function readFileContent(filePath) { + return await fs.readFile(filePath, 'utf-8'); +} +async function writeFileContent(filePath, content) { + try { + await fs.writeFile(filePath, content, { encoding: "utf-8", flag: 'wx' }); + } + catch (error) { + if (error.code === 'EEXIST') { + const tempPath = `${filePath}.${randomBytes(16).toString('hex')}.tmp`; + try { + await fs.writeFile(tempPath, content, 'utf-8'); + await fs.rename(tempPath, filePath); + } + catch (renameError) { + try { + await fs.unlink(tempPath); + } + catch { } + throw renameError; + } + } + else { + throw error; + } + } +} +async function tailFile(filePath, lines) { + const content = await fs.readFile(filePath, 'utf-8'); + const allLines = content.split('\n'); + return allLines.slice(-lines).join('\n'); +} +async function headFile(filePath, lines) { + const content = await fs.readFile(filePath, 'utf-8'); + const allLines = content.split('\n'); + return allLines.slice(0, lines).join('\n'); +} +async function applyFileEdits(filePath, edits, dryRun) { + const content = await fs.readFile(filePath, 'utf-8'); + let newContent = content; + for (const edit of edits) { + if (!newContent.includes(edit.oldText)) { + throw new Error(`Could not find exact match for edit: ${edit.oldText}`); + } + newContent = newContent.replace(edit.oldText, edit.newText); + } + const diff = createTwoFilesPatch(filePath, filePath, content, newContent); + if (!dryRun) { + await fs.writeFile(filePath, newContent, 'utf-8'); + } + return diff; +} +async function readFileAsBase64Stream(filePath) { + return new Promise((resolve, reject) => { + const stream = createReadStream(filePath); + const chunks = []; + stream.on('data', (chunk) => { + chunks.push(chunk); + }); + stream.on('end', () => { + const finalBuffer = Buffer.concat(chunks); + resolve(finalBuffer.toString('base64')); + }); + stream.on('error', (err) => reject(err)); + }); +} +function formatSize(bytes) { + const units = ['B', 'KB', 'MB', 'GB', 'TB']; + let size = bytes; + let unitIndex = 0; + while (size >= 1024 && unitIndex < units.length - 1) { + size /= 1024; + unitIndex++; + } + return `${size.toFixed(1)} ${units[unitIndex]}`; +} +// --- Main Server Initialization --- +const server = new McpServer({ + name: "consolidated-mcp-server", + version: "1.0.0", +}); +// Initialize Managers +const memoryPath = path.join(path.dirname(fileURLToPath(import.meta.url)), 'memory.jsonl'); +const knowledgeGraphManager = new KnowledgeGraphManager(memoryPath); +const thinkingServer = new SequentialThinkingServer(); +// Register Memory Tools +const EntitySchema = z.object({ + name: z.string().describe("The name of the entity"), + entityType: z.string().describe("The type of the entity"), + observations: z.array(z.string()).describe("An array of observation contents associated with the entity") +}); +const RelationSchema = z.object({ + from: z.string().describe("The name of the entity where the relation starts"), + to: z.string().describe("The name of the entity where the relation ends"), + relationType: z.string().describe("The type of the relation") +}); +server.registerTool("create_entities", { inputSchema: z.object({ entities: z.array(EntitySchema) }) }, async (args) => { + const result = await knowledgeGraphManager.createEntities(args.entities); + return { content: [{ type: "text", text: JSON.stringify(result, null, 2) }] }; +}); +server.registerTool("create_relations", { inputSchema: z.object({ relations: z.array(RelationSchema) }) }, async (args) => { + const result = await knowledgeGraphManager.createRelations(args.relations); + return { content: [{ type: "text", text: JSON.stringify(result, null, 2) }] }; +}); +server.registerTool("add_observations", { + inputSchema: z.object({ + observations: z.array(z.object({ + entityName: z.string(), + contents: z.array(z.string()) + })) + }) +}, async (args) => { + const result = await knowledgeGraphManager.addObservations(args.observations); + return { content: [{ type: "text", text: JSON.stringify(result, null, 2) }] }; +}); +server.registerTool("delete_entities", { inputSchema: z.object({ entityNames: z.array(z.string()) }) }, async (args) => { + await knowledgeGraphManager.deleteEntities(args.entityNames); + return { content: [{ type: "text", text: "Entities deleted successfully" }] }; +}); +server.registerTool("delete_observations", { + inputSchema: z.object({ + deletions: z.array(z.object({ + entityName: z.string(), + observations: z.array(z.string()) + })) + }) +}, async (args) => { + await knowledgeGraphManager.deleteObservations(args.deletions); + return { content: [{ type: "text", text: "Observations deleted successfully" }] }; +}); +server.registerTool("delete_relations", { inputSchema: z.object({ relations: z.array(RelationSchema) }) }, async (args) => { + await knowledgeGraphManager.deleteRelations(args.relations); + return { content: [{ type: "text", text: "Relations deleted successfully" }] }; +}); +server.registerTool("read_graph", { inputSchema: z.object({}) }, async () => { + const graph = await knowledgeGraphManager.readGraph(); + return { content: [{ type: "text", text: JSON.stringify(graph, null, 2) }] }; +}); +server.registerTool("search_nodes", { inputSchema: z.object({ query: z.string() }) }, async (args) => { + const graph = await knowledgeGraphManager.searchNodes(args.query); + return { content: [{ type: "text", text: JSON.stringify(graph, null, 2) }] }; +}); +server.registerTool("open_nodes", { inputSchema: z.object({ names: z.array(z.string()) }) }, async (args) => { + const graph = await knowledgeGraphManager.openNodes(args.names); + return { content: [{ type: "text", text: JSON.stringify(graph, null, 2) }] }; +}); +// Register Sequential Thinking Tool +server.registerTool("sequentialthinking", { + inputSchema: z.object({ + thought: z.string(), + nextThoughtNeeded: z.boolean(), + thoughtNumber: z.number().int().min(1), + totalThoughts: z.number().int().min(1), + isRevision: z.boolean().optional(), + revisesThought: z.number().int().min(1).optional(), + branchFromThought: z.number().int().min(1).optional(), + branchId: z.string().optional(), + needsMoreThoughts: z.boolean().optional() + }) +}, async (args) => { + const result = thinkingServer.processThought(args); + return { content: [{ type: "text", text: JSON.stringify(result, null, 2) }] }; +}); +// Register Filesystem Tools +server.registerTool("read_text_file", { + inputSchema: z.object({ + path: z.string(), + tail: z.number().optional(), + head: z.number().optional() + }) +}, async (args) => { + const validPath = await validatePath(args.path); + let content; + if (args.tail) { + content = await tailFile(validPath, args.tail); + } + else if (args.head) { + content = await headFile(validPath, args.head); + } + else { + content = await readFileContent(validPath); + } + return { content: [{ type: "text", text: content }] }; +}); +server.registerTool("read_media_file", { inputSchema: z.object({ path: z.string() }) }, async (args) => { + const validPath = await validatePath(args.path); + const extension = path.extname(validPath).toLowerCase(); + const mimeTypes = { + ".png": "image/png", ".jpg": "image/jpeg", ".jpeg": "image/jpeg", ".gif": "image/gif", + ".webp": "image/webp", ".bmp": "image/bmp", ".svg": "image/svg+xml", ".mp3": "audio/mpeg", + ".wav": "audio/wav", ".ogg": "audio/ogg", ".flac": "audio/flac", + }; + const mimeType = mimeTypes[extension] || "application/octet-stream"; + const data = await readFileAsBase64Stream(validPath); + return { + content: [{ + type: mimeType.startsWith("image/") ? "image" : mimeType.startsWith("audio/") ? "audio" : "blob", + data, + mimeType + }] + }; +}); +server.registerTool("read_multiple_files", { inputSchema: z.object({ paths: z.array(z.string()) }) }, async (args) => { + const results = await Promise.all(args.paths.map(async (p) => { + try { + const validPath = await validatePath(p); + const content = await readFileContent(validPath); + return `${p}:\n${content}\n`; + } + catch (e) { + return `${p}: Error - ${e}\n`; + } + })); + return { content: [{ type: "text", text: results.join("\n---\n") }] }; +}); +server.registerTool("write_file", { inputSchema: z.object({ path: z.string(), content: z.string() }) }, async (args) => { + const validPath = await validatePath(args.path); + await writeFileContent(validPath, args.content); + return { content: [{ type: "text", text: `Successfully wrote to ${args.path}` }] }; +}); +server.registerTool("edit_file", { + inputSchema: z.object({ + path: z.string(), + edits: z.array(z.object({ oldText: z.string(), newText: z.string() })), + dryRun: z.boolean().default(false) + }) +}, async (args) => { + const validPath = await validatePath(args.path); + const diff = await applyFileEdits(validPath, args.edits, args.dryRun); + return { content: [{ type: "text", text: diff }] }; +}); +server.registerTool("create_directory", { inputSchema: z.object({ path: z.string() }) }, async (args) => { + const validPath = await validatePath(args.path); + await fs.mkdir(validPath, { recursive: true }); + return { content: [{ type: "text", text: `Successfully created directory ${args.path}` }] }; +}); +server.registerTool("list_directory", { inputSchema: z.object({ path: z.string() }) }, async (args) => { + const validPath = await validatePath(args.path); + const entries = await fs.readdir(validPath, { withFileTypes: true }); + const formatted = entries + .map((entry) => `${entry.isDirectory() ? "[DIR]" : "[FILE]"} ${entry.name}`) + .join("\n"); + return { content: [{ type: "text", text: formatted }] }; +}); +server.registerTool("list_directory_with_sizes", { + inputSchema: z.object({ + path: z.string(), + sortBy: z.enum(['name', 'size']).optional().default('name') + }) +}, async (args) => { + const validPath = await validatePath(args.path); + const entries = await fs.readdir(validPath, { withFileTypes: true }); + const detailed = await Promise.all(entries.map(async (e) => { + const stats = await fs.stat(path.join(validPath, e.name)); + return { name: e.name, isDir: e.isDirectory(), size: stats.size }; + })); + if (args.sortBy === 'size') + detailed.sort((a, b) => b.size - a.size); + else + detailed.sort((a, b) => a.name.localeCompare(b.name)); + const formatted = detailed.map(e => `${e.isDir ? "[DIR]" : "[FILE]"} ${e.name.padEnd(30)} ${e.isDir ? "" : formatSize(e.size).padStart(10)}`).join("\n"); + return { content: [{ type: "text", text: formatted }] }; +}); +server.registerTool("directory_tree", { + inputSchema: z.object({ + path: z.string(), + excludePatterns: z.array(z.string()).optional().default([]) + }) +}, async (args) => { + const rootPath = await validatePath(args.path); + async function buildTree(currentPath) { + const entries = await fs.readdir(currentPath, { withFileTypes: true }); + const result = []; + for (const entry of entries) { + const relPath = path.relative(rootPath, path.join(currentPath, entry.name)); + if (args.excludePatterns.some((p) => minimatch(relPath, p))) + continue; + const node = { name: entry.name, type: entry.isDirectory() ? 'directory' : 'file' }; + if (entry.isDirectory()) + node.children = await buildTree(path.join(currentPath, entry.name)); + result.push(node); + } + return result; + } + const tree = await buildTree(rootPath); + return { content: [{ type: "text", text: JSON.stringify(tree, null, 2) }] }; +}); +server.registerTool("move_file", { inputSchema: z.object({ source: z.string(), destination: z.string() }) }, async (args) => { + const validSource = await validatePath(args.source); + const validDest = await validatePath(args.destination); + await fs.rename(validSource, validDest); + return { content: [{ type: "text", text: `Successfully moved ${args.source} to ${args.destination}` }] }; +}); +server.registerTool("search_files", { + inputSchema: z.object({ + path: z.string(), + pattern: z.string(), + excludePatterns: z.array(z.string()).optional().default([]) + }) +}, async (args) => { + const rootPath = await validatePath(args.path); + const results = []; + async function search(currentPath) { + const entries = await fs.readdir(currentPath, { withFileTypes: true }); + for (const entry of entries) { + const fullPath = path.join(currentPath, entry.name); + const relPath = path.relative(rootPath, fullPath); + if (args.excludePatterns.some((p) => minimatch(relPath, p))) + continue; + if (minimatch(relPath, args.pattern)) + results.push(fullPath); + if (entry.isDirectory()) + await search(fullPath); + } + } + await search(rootPath); + return { content: [{ type: "text", text: results.join("\n") || "No matches found" }] }; +}); +server.registerTool("get_file_info", { inputSchema: z.object({ path: z.string() }) }, async (args) => { + const validPath = await validatePath(args.path); + const stats = await fs.stat(validPath); + const info = { + size: stats.size, + created: stats.birthtime, + modified: stats.mtime, + isDirectory: stats.isDirectory(), + isFile: stats.isFile(), + permissions: stats.mode.toString(8) + }; + return { content: [{ type: "text", text: JSON.stringify(info, null, 2) }] }; +}); +server.registerTool("list_allowed_directories", { inputSchema: z.object({}) }, async () => { + return { content: [{ type: "text", text: `Allowed directories:\n${allowedDirectories.join('\n')}` }] }; +}); +// --- Time Tools --- +server.registerTool("get_current_time", { inputSchema: z.object({ timezone: z.string() }) }, async (args) => { + const now = new Date(); + const timeStr = now.toLocaleString("en-US", { timeZone: args.timezone }); + return { content: [{ type: "text", text: `Current time in ${args.timezone}: ${timeStr}` }] }; +}); +// --- Fetch Tools --- +server.registerTool("fetch", { inputSchema: z.object({ url: z.string() }) }, async (args) => { + try { + const response = await fetch(args.url); + const text = await response.text(); + return { content: [{ type: "text", text: text.slice(0, 5000) }] }; + } + catch (error) { + return { content: [{ type: "text", text: `Error fetching ${args.url}: ${error}` }] }; + } +}); +// --- Git Tools --- +server.registerTool("git_status", { inputSchema: z.object({ repo_path: z.string() }) }, async (args) => { + try { + const { stdout } = await execAsync(`git -C "${args.repo_path}" status`); + return { content: [{ type: "text", text: stdout }] }; + } + catch (error) { + return { content: [{ type: "text", text: `Error running git status: ${error}` }] }; + } +}); +server.registerTool("git_log", { inputSchema: z.object({ repo_path: z.string(), max_count: z.number().optional().default(10) }) }, async (args) => { + try { + const { stdout } = await execAsync(`git -C "${args.repo_path}" log -n ${args.max_count}`); + return { content: [{ type: "text", text: stdout }] }; + } + catch (error) { + return { content: [{ type: "text", text: `Error running git log: ${error}` }] }; + } +}); +// --- Everything Tools --- +server.registerTool("echo", { inputSchema: z.object({ message: z.string() }) }, async (args) => { + return { content: [{ type: "text", text: `Echo: ${args.message}` }] }; +}); +async function main() { + const args = process.argv.slice(2); + const initialAllowedDirs = await Promise.all(args.map(async (dir) => { + const expanded = expandHome(dir); + const absolute = path.resolve(expanded); + try { + const resolved = await fs.realpath(absolute); + return normalizePath(resolved); + } + catch { + return normalizePath(absolute); + } + })); + setAllowedDirectories(initialAllowedDirs); + const transport = new StdioServerTransport(); + await server.connect(transport); + console.error("Consolidated MCP Server running on stdio"); +} +main().catch((error) => { + console.error("Fatal error:", error); + process.exit(1); +}); diff --git a/src/consolidated/index.ts b/src/consolidated/index.ts new file mode 100644 index 0000000000..097659dbc7 --- /dev/null +++ b/src/consolidated/index.ts @@ -0,0 +1,906 @@ +import process from "process"; +import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"; +import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; +import { + RootsListChangedNotificationSchema, + type Root, +} from "@modelcontextprotocol/sdk/types.js"; +import { z } from "zod"; +import fs from "fs/promises"; +import { createReadStream, Dirent } from "fs"; +import path from "path"; +import os from 'os'; +import { fileURLToPath } from 'url'; +import chalk from 'chalk'; +import { randomBytes } from 'crypto'; +import { createTwoFilesPatch } from 'diff'; +import { minimatch } from 'minimatch'; +import { exec } from 'child_process'; +import { promisify } from 'util'; + +const execAsync = promisify(exec); + +// --- Path Utilities --- +function convertToWindowsPath(p: string): string { + if (p.startsWith('/mnt/')) return p; + if (p.match(/^\/[a-zA-Z]\//) && process.platform === 'win32') { + const driveLetter = p.charAt(1).toUpperCase(); + const pathPart = p.slice(2).replace(/\//g, '\\'); + return `${driveLetter}:${pathPart}`; + } + if (p.match(/^[a-zA-Z]:/)) return p.replace(/\//g, '\\'); + return p; +} + +function normalizePath(p: string): string { + p = p.trim().replace(/^["']|["']$/g, ''); + const isUnixPath = p.startsWith('/') && ( + p.match(/^\/mnt\/[a-z]\//i) || + (process.platform !== 'win32') || + (process.platform === 'win32' && !p.match(/^\/[a-zA-Z]\//)) + ); + if (isUnixPath) return p.replace(/\/+/g, '/').replace(/(? { + if (typeof dir !== 'string' || !dir) return false; + if (dir.includes('\x00')) return false; + let normalizedDir: string; + try { + normalizedDir = path.resolve(path.normalize(dir)); + } catch { + return false; + } + if (!path.isAbsolute(normalizedDir)) throw new Error('Allowed directories must be absolute paths after normalization'); + if (normalizedPath === normalizedDir) return true; + if (normalizedDir === path.sep) return normalizedPath.startsWith(path.sep); + if (path.sep === '\\' && normalizedDir.match(/^[A-Za-z]:\\?$/)) { + const dirDrive = normalizedDir.charAt(0).toLowerCase(); + const pathDrive = normalizedPath.charAt(0).toLowerCase(); + return pathDrive === dirDrive && normalizedPath.startsWith(normalizedDir.replace(/\\?$/, '\\')); + } + return normalizedPath.startsWith(normalizedDir + path.sep); + }); +} + +// --- Memory Server Logic --- +export interface Entity { + name: string; + entityType: string; + observations: string[]; +} + +export interface Relation { + from: string; + to: string; + relationType: string; +} + +export interface KnowledgeGraph { + entities: Entity[]; + relations: Relation[]; +} + +class KnowledgeGraphManager { + constructor(private memoryFilePath: string) {} + + private async loadGraph(): Promise { + try { + const data = await fs.readFile(this.memoryFilePath, "utf-8"); + const lines = data.split("\n").filter(line => line.trim() !== ""); + return lines.reduce((graph: KnowledgeGraph, line) => { + const item = JSON.parse(line); + if (item.type === "entity") { + graph.entities.push({ + name: item.name, + entityType: item.entityType, + observations: item.observations + }); + } + if (item.type === "relation") { + graph.relations.push({ + from: item.from, + to: item.to, + relationType: item.relationType + }); + } + return graph; + }, { entities: [], relations: [] }); + } catch (error) { + if (error instanceof Error && (error as any).code === "ENOENT") { + return { entities: [], relations: [] }; + } + throw error; + } + } + + private async saveGraph(graph: KnowledgeGraph): Promise { + const lines = [ + ...graph.entities.map(e => JSON.stringify({ + type: "entity", + name: e.name, + entityType: e.entityType, + observations: e.observations + })), + ...graph.relations.map(r => JSON.stringify({ + type: "relation", + from: r.from, + to: r.to, + relationType: r.relationType + })), + ]; + await fs.writeFile(this.memoryFilePath, lines.join("\n")); + } + + async createEntities(entities: Entity[]): Promise { + const graph = await this.loadGraph(); + const newEntities = entities.filter(e => !graph.entities.some(existingEntity => existingEntity.name === e.name)); + graph.entities.push(...newEntities); + await this.saveGraph(graph); + return newEntities; + } + + async createRelations(relations: Relation[]): Promise { + const graph = await this.loadGraph(); + const newRelations = relations.filter(r => !graph.relations.some(existingRelation => + existingRelation.from === r.from && + existingRelation.to === r.to && + existingRelation.relationType === r.relationType + )); + graph.relations.push(...newRelations); + await this.saveGraph(graph); + return newRelations; + } + + async addObservations(observations: { entityName: string; contents: string[] }[]): Promise<{ entityName: string; addedObservations: string[] }[]> { + const graph = await this.loadGraph(); + const results = observations.map(o => { + const entity = graph.entities.find(e => e.name === o.entityName); + if (!entity) { + throw new Error(`Entity with name ${o.entityName} not found`); + } + const newObservations = o.contents.filter(content => !entity.observations.includes(content)); + entity.observations.push(...newObservations); + return { entityName: o.entityName, addedObservations: newObservations }; + }); + await this.saveGraph(graph); + return results; + } + + async deleteEntities(entityNames: string[]): Promise { + const graph = await this.loadGraph(); + graph.entities = graph.entities.filter(e => !entityNames.includes(e.name)); + graph.relations = graph.relations.filter(r => !entityNames.includes(r.from) && !entityNames.includes(r.to)); + await this.saveGraph(graph); + } + + async deleteObservations(deletions: { entityName: string; observations: string[] }[]): Promise { + const graph = await this.loadGraph(); + deletions.forEach(d => { + const entity = graph.entities.find(e => e.name === d.entityName); + if (entity) { + entity.observations = entity.observations.filter(o => !d.observations.includes(o)); + } + }); + await this.saveGraph(graph); + } + + async deleteRelations(relations: Relation[]): Promise { + const graph = await this.loadGraph(); + graph.relations = graph.relations.filter(r => !relations.some(delRelation => + r.from === delRelation.from && + r.to === delRelation.to && + r.relationType === delRelation.relationType + )); + await this.saveGraph(graph); + } + + async readGraph(): Promise { + return this.loadGraph(); + } + + async searchNodes(query: string): Promise { + const graph = await this.loadGraph(); + const filteredEntities = graph.entities.filter(e => + e.name.toLowerCase().includes(query.toLowerCase()) || + e.entityType.toLowerCase().includes(query.toLowerCase()) || + e.observations.some(o => o.toLowerCase().includes(query.toLowerCase())) + ); + const filteredEntityNames = new Set(filteredEntities.map(e => e.name)); + const filteredRelations = graph.relations.filter(r => + filteredEntityNames.has(r.from) && filteredEntityNames.has(r.to) + ); + return { entities: filteredEntities, relations: filteredRelations }; + } + + async openNodes(names: string[]): Promise { + const graph = await this.loadGraph(); + const filteredEntities = graph.entities.filter(e => names.includes(e.name)); + const filteredEntityNames = new Set(filteredEntities.map(e => e.name)); + const filteredRelations = graph.relations.filter(r => + filteredEntityNames.has(r.from) && filteredEntityNames.has(r.to) + ); + return { entities: filteredEntities, relations: filteredRelations }; + } +} + +// --- Sequential Thinking Logic --- +export interface ThoughtData { + thought: string; + thoughtNumber: number; + totalThoughts: number; + isRevision?: boolean; + revisesThought?: number; + branchFromThought?: number; + branchId?: string; + needsMoreThoughts?: boolean; + nextThoughtNeeded: boolean; +} + +class SequentialThinkingServer { + private thoughtHistory: ThoughtData[] = []; + private branches: Record = {}; + private disableThoughtLogging: boolean; + + constructor() { + this.disableThoughtLogging = (process.env.DISABLE_THOUGHT_LOGGING || "").toLowerCase() === "true"; + } + + private formatThought(thoughtData: ThoughtData): string { + const { thoughtNumber, totalThoughts, thought, isRevision, revisesThought, branchFromThought, branchId } = thoughtData; + let prefix = ''; + let context = ''; + if (isRevision) { + prefix = chalk.yellow('šŸ”„ Revision'); + context = ` (revising thought ${revisesThought})`; + } else if (branchFromThought) { + prefix = chalk.green('🌿 Branch'); + context = ` (from thought ${branchFromThought}, ID: ${branchId})`; + } else { + prefix = chalk.blue('šŸ’­ Thought'); + context = ''; + } + const header = `${prefix} ${thoughtNumber}/${totalThoughts}${context}`; + const border = '─'.repeat(Math.max(header.length, thought.length) + 4); + return `\nā”Œ${border}┐\n│ ${header} │\nā”œ${border}┤\n│ ${thought.padEnd(border.length - 2)} │\nā””${border}ā”˜`; + } + + public processThought(input: ThoughtData) { + if (input.thoughtNumber > input.totalThoughts) { + input.totalThoughts = input.thoughtNumber; + } + this.thoughtHistory.push(input); + if (input.branchFromThought && input.branchId) { + if (!this.branches[input.branchId]) { + this.branches[input.branchId] = []; + } + this.branches[input.branchId].push(input); + } + if (!this.disableThoughtLogging) { + console.error(this.formatThought(input)); + } + return { + thoughtNumber: input.thoughtNumber, + totalThoughts: input.totalThoughts, + nextThoughtNeeded: input.nextThoughtNeeded, + branches: Object.keys(this.branches), + thoughtHistoryLength: this.thoughtHistory.length + }; + } +} + +// --- Filesystem Logic --- +let allowedDirectories: string[] = []; + +function setAllowedDirectories(directories: string[]): void { + allowedDirectories = [...directories]; +} + +async function validatePath(requestedPath: string): Promise { + const expandedPath = expandHome(requestedPath); + const absolute = path.isAbsolute(expandedPath) + ? path.resolve(expandedPath) + : path.resolve(process.cwd(), expandedPath); + const normalizedRequested = normalizePath(absolute); + if (!isPathWithinAllowedDirectories(normalizedRequested, allowedDirectories)) { + throw new Error(`Access denied - path outside allowed directories: ${absolute}`); + } + try { + const realPath = await fs.realpath(absolute); + const normalizedReal = normalizePath(realPath); + if (!isPathWithinAllowedDirectories(normalizedReal, allowedDirectories)) { + throw new Error(`Access denied - symlink target outside allowed directories: ${realPath}`); + } + return realPath; + } catch (error) { + if ((error as any).code === 'ENOENT') { + const parentDir = path.dirname(absolute); + try { + const realParentPath = await fs.realpath(parentDir); + const normalizedParent = normalizePath(realParentPath); + if (!isPathWithinAllowedDirectories(normalizedParent, allowedDirectories)) { + throw new Error(`Access denied - parent directory outside allowed directories: ${realParentPath}`); + } + return absolute; + } catch { + throw new Error(`Parent directory does not exist: ${parentDir}`); + } + } + throw error; + } +} + +async function readFileContent(filePath: string): Promise { + return await fs.readFile(filePath, 'utf-8'); +} + +async function writeFileContent(filePath: string, content: string): Promise { + try { + await fs.writeFile(filePath, content, { encoding: "utf-8", flag: 'wx' }); + } catch (error) { + if ((error as any).code === 'EEXIST') { + const tempPath = `${filePath}.${randomBytes(16).toString('hex')}.tmp`; + try { + await fs.writeFile(tempPath, content, 'utf-8'); + await fs.rename(tempPath, filePath); + } catch (renameError) { + try { await fs.unlink(tempPath); } catch {} + throw renameError; + } + } else { + throw error; + } + } +} + +function normalizeLineEndings(text: string): string { + return text.replace(/\r\n/g, '\n'); +} + +async function tailFile(filePath: string, numLines: number): Promise { + const CHUNK_SIZE = 1024; + const stats = await fs.stat(filePath); + const fileSize = stats.size; + if (fileSize === 0) return ''; + const fileHandle = await fs.open(filePath, 'r'); + try { + const lines: string[] = []; + let position = fileSize; + let chunk = Buffer.alloc(CHUNK_SIZE); + let linesFound = 0; + let remainingText = ''; + while (position > 0 && linesFound < numLines) { + const size = Math.min(CHUNK_SIZE, position); + position -= size; + const { bytesRead } = await fileHandle.read(chunk, 0, size, position); + if (!bytesRead) break; + const readData = chunk.slice(0, bytesRead).toString('utf-8'); + const chunkText = readData + remainingText; + const chunkLines = normalizeLineEndings(chunkText).split('\n'); + if (position > 0) { + remainingText = chunkLines[0]; + chunkLines.shift(); + } + for (let i = chunkLines.length - 1; i >= 0 && linesFound < numLines; i--) { + lines.unshift(chunkLines[i]); + linesFound++; + } + } + return lines.join('\n'); + } finally { + await fileHandle.close(); + } +} + +async function headFile(filePath: string, numLines: number): Promise { + const fileHandle = await fs.open(filePath, 'r'); + try { + const lines: string[] = []; + let buffer = ''; + let bytesRead = 0; + const chunk = Buffer.alloc(1024); + while (lines.length < numLines) { + const result = await fileHandle.read(chunk, 0, chunk.length, bytesRead); + if (result.bytesRead === 0) break; + bytesRead += result.bytesRead; + buffer += chunk.slice(0, result.bytesRead).toString('utf-8'); + const newLineIndex = buffer.lastIndexOf('\n'); + if (newLineIndex !== -1) { + const completeLines = buffer.slice(0, newLineIndex).split('\n'); + buffer = buffer.slice(newLineIndex + 1); + for (const line of completeLines) { + lines.push(line); + if (lines.length >= numLines) break; + } + } + } + if (buffer.length > 0 && lines.length < numLines) lines.push(buffer); + return lines.join('\n'); + } finally { + await fileHandle.close(); + } +} + +async function applyFileEdits(filePath: string, edits: { oldText: string; newText: string }[], dryRun: boolean): Promise { + const content = normalizeLineEndings(await fs.readFile(filePath, 'utf-8')); + let modifiedContent = content; + for (const edit of edits) { + const normalizedOld = normalizeLineEndings(edit.oldText); + const normalizedNew = normalizeLineEndings(edit.newText); + if (modifiedContent.includes(normalizedOld)) { + modifiedContent = modifiedContent.replace(normalizedOld, normalizedNew); + continue; + } + const oldLines = normalizedOld.split('\n'); + const contentLines = modifiedContent.split('\n'); + let matchFound = false; + for (let i = 0; i <= contentLines.length - oldLines.length; i++) { + const potentialMatch = contentLines.slice(i, i + oldLines.length); + const isMatch = oldLines.every((oldLine, j) => oldLine.trim() === potentialMatch[j].trim()); + if (isMatch) { + const originalIndent = contentLines[i].match(/^\s*/)?.[0] || ''; + const newLines = normalizedNew.split('\n').map((line, j) => { + if (j === 0) return originalIndent + line.trimStart(); + const oldIndent = oldLines[j]?.match(/^\s*/)?.[0] || ''; + const newIndent = line.match(/^\s*/)?.[0] || ''; + if (oldIndent && newIndent) { + const relativeIndent = newIndent.length - oldIndent.length; + return originalIndent + ' '.repeat(Math.max(0, relativeIndent)) + line.trimStart(); + } + return line; + }); + contentLines.splice(i, oldLines.length, ...newLines); + modifiedContent = contentLines.join('\n'); + matchFound = true; + break; + } + } + if (!matchFound) throw new Error(`Could not find exact match for edit:\n${edit.oldText}`); + } + const diff = createTwoFilesPatch(filePath, filePath, content, modifiedContent); + if (!dryRun) { + const tempPath = `${filePath}.${randomBytes(16).toString('hex')}.tmp`; + await fs.writeFile(tempPath, modifiedContent, 'utf-8'); + await fs.rename(tempPath, filePath); + } + return diff; +} + +async function readFileAsBase64Stream(filePath: string): Promise { + return new Promise((resolve, reject) => { + const stream = createReadStream(filePath); + const chunks: Buffer[] = []; + stream.on('data', (chunk) => chunks.push(chunk as Buffer)); + stream.on('end', () => resolve(Buffer.concat(chunks).toString('base64'))); + stream.on('error', (err) => reject(err)); + }); +} + +function formatSize(bytes: number): string { + const units = ['B', 'KB', 'MB', 'GB', 'TB']; + let size = bytes; + let unitIndex = 0; + while (size >= 1024 && unitIndex < units.length - 1) { + size /= 1024; + unitIndex++; + } + return `${size.toFixed(1)} ${units[unitIndex]}`; +} + +// --- Main Server Initialization --- +const server = new McpServer({ + name: "consolidated-mcp-server", + version: "1.0.0", +}); + +// Initialize Managers +const memoryPath = path.join(path.dirname(fileURLToPath(import.meta.url)), 'memory.jsonl'); +const knowledgeGraphManager = new KnowledgeGraphManager(memoryPath); +const thinkingServer = new SequentialThinkingServer(); + +// Register Memory Tools +const EntitySchema = z.object({ + name: z.string().describe("The name of the entity"), + entityType: z.string().describe("The type of the entity"), + observations: z.array(z.string()).describe("An array of observation contents associated with the entity") +}); + +const RelationSchema = z.object({ + from: z.string().describe("The name of the entity where the relation starts"), + to: z.string().describe("The name of the entity where the relation ends"), + relationType: z.string().describe("The type of the relation") +}); + +server.registerTool("create_entities", { inputSchema: z.object({ entities: z.array(EntitySchema) }) }, async (args: any) => { + const result = await knowledgeGraphManager.createEntities(args.entities); + return { content: [{ type: "text", text: JSON.stringify(result, null, 2) }] }; +}); + +server.registerTool("create_relations", { inputSchema: z.object({ relations: z.array(RelationSchema) }) }, async (args: any) => { + const result = await knowledgeGraphManager.createRelations(args.relations); + return { content: [{ type: "text", text: JSON.stringify(result, null, 2) }] }; +}); + +server.registerTool("add_observations", { + inputSchema: z.object({ + observations: z.array(z.object({ + entityName: z.string(), + contents: z.array(z.string()) + })) + }) +}, async (args: any) => { + const result = await knowledgeGraphManager.addObservations(args.observations); + return { content: [{ type: "text", text: JSON.stringify(result, null, 2) }] }; +}); + +server.registerTool("delete_entities", { inputSchema: z.object({ entityNames: z.array(z.string()) }) }, async (args: any) => { + await knowledgeGraphManager.deleteEntities(args.entityNames); + return { content: [{ type: "text", text: "Entities deleted successfully" }] }; +}); + +server.registerTool("delete_observations", { + inputSchema: z.object({ + deletions: z.array(z.object({ + entityName: z.string(), + observations: z.array(z.string()) + })) + }) +}, async (args: any) => { + await knowledgeGraphManager.deleteObservations(args.deletions); + return { content: [{ type: "text", text: "Observations deleted successfully" }] }; +}); + +server.registerTool("delete_relations", { inputSchema: z.object({ relations: z.array(RelationSchema) }) }, async (args: any) => { + await knowledgeGraphManager.deleteRelations(args.relations); + return { content: [{ type: "text", text: "Relations deleted successfully" }] }; +}); + +server.registerTool("read_graph", { inputSchema: z.object({}) }, async () => { + const graph = await knowledgeGraphManager.readGraph(); + return { content: [{ type: "text", text: JSON.stringify(graph, null, 2) }] }; +}); + +server.registerTool("search_nodes", { inputSchema: z.object({ query: z.string() }) }, async (args: any) => { + const graph = await knowledgeGraphManager.searchNodes(args.query); + return { content: [{ type: "text", text: JSON.stringify(graph, null, 2) }] }; +}); + +server.registerTool("open_nodes", { inputSchema: z.object({ names: z.array(z.string()) }) }, async (args: any) => { + const graph = await knowledgeGraphManager.openNodes(args.names); + return { content: [{ type: "text", text: JSON.stringify(graph, null, 2) }] }; +}); + +// Register Sequential Thinking Tool +server.registerTool("sequentialthinking", { + inputSchema: z.object({ + thought: z.string(), + nextThoughtNeeded: z.boolean(), + thoughtNumber: z.number().int().min(1), + totalThoughts: z.number().int().min(1), + isRevision: z.boolean().optional(), + revisesThought: z.number().int().min(1).optional(), + branchFromThought: z.number().int().min(1).optional(), + branchId: z.string().optional(), + needsMoreThoughts: z.boolean().optional() + }) +}, async (args: any) => { + const result = thinkingServer.processThought(args); + return { content: [{ type: "text", text: JSON.stringify(result, null, 2) }] }; +}); + +// Register Filesystem Tools +server.registerTool("read_text_file", { + inputSchema: z.object({ + path: z.string(), + tail: z.number().optional(), + head: z.number().optional() + }) +}, async (args: any) => { + const validPath = await validatePath(args.path); + let content: string; + if (args.tail) { + content = await tailFile(validPath, args.tail); + } else if (args.head) { + content = await headFile(validPath, args.head); + } else { + content = await readFileContent(validPath); + } + return { content: [{ type: "text", text: content }] }; +}); + +server.registerTool("read_media_file", { inputSchema: z.object({ path: z.string() }) }, async (args: any) => { + const validPath = await validatePath(args.path); + const extension = path.extname(validPath).toLowerCase(); + const mimeTypes: Record = { + ".png": "image/png", ".jpg": "image/jpeg", ".jpeg": "image/jpeg", ".gif": "image/gif", + ".webp": "image/webp", ".bmp": "image/bmp", ".svg": "image/svg+xml", ".mp3": "audio/mpeg", + ".wav": "audio/wav", ".ogg": "audio/ogg", ".flac": "audio/flac", + }; + const mimeType = mimeTypes[extension] || "application/octet-stream"; + const data = await readFileAsBase64Stream(validPath); + return { + content: [{ + type: mimeType.startsWith("image/") ? "image" : mimeType.startsWith("audio/") ? "audio" : "blob", + data, + mimeType + } as any] + }; +}); + +server.registerTool("read_multiple_files", { inputSchema: z.object({ paths: z.array(z.string()) }) }, async (args: any) => { + const results = await Promise.all(args.paths.map(async (p: string) => { + try { + const validPath = await validatePath(p); + const content = await readFileContent(validPath); + return `${p}:\n${content}\n`; + } catch (e) { + return `${p}: Error - ${e}\n`; + } + })); + return { content: [{ type: "text", text: results.join("\n---\n") }] }; +}); + +server.registerTool("write_file", { inputSchema: z.object({ path: z.string(), content: z.string() }) }, async (args: any) => { + const validPath = await validatePath(args.path); + await writeFileContent(validPath, args.content); + return { content: [{ type: "text", text: `Successfully wrote to ${args.path}` }] }; +}); + +server.registerTool("edit_file", { + inputSchema: z.object({ + path: z.string(), + edits: z.array(z.object({ oldText: z.string(), newText: z.string() })), + dryRun: z.boolean().default(false) + }) +}, async (args: any) => { + const validPath = await validatePath(args.path); + const diff = await applyFileEdits(validPath, args.edits, args.dryRun); + return { content: [{ type: "text", text: diff }] }; +}); + +server.registerTool("create_directory", { inputSchema: z.object({ path: z.string() }) }, async (args: any) => { + const validPath = await validatePath(args.path); + await fs.mkdir(validPath, { recursive: true }); + return { content: [{ type: "text", text: `Successfully created directory ${args.path}` }] }; +}); + +server.registerTool("list_directory", { inputSchema: z.object({ path: z.string() }) }, async (args: any) => { + const validPath = await validatePath(args.path); + const entries = await fs.readdir(validPath, { withFileTypes: true }); + const formatted = entries + .map((entry: Dirent) => `${entry.isDirectory() ? "[DIR]" : "[FILE]"} ${entry.name}`) + .join("\n"); + return { content: [{ type: "text", text: formatted }] }; +}); + +server.registerTool("list_directory_with_sizes", { + inputSchema: z.object({ + path: z.string(), + sortBy: z.enum(['name', 'size']).optional().default('name') + }) +}, async (args: any) => { + const validPath = await validatePath(args.path); + const entries = await fs.readdir(validPath, { withFileTypes: true }); + const detailed = await Promise.all(entries.map(async (e: Dirent) => { + const stats = await fs.stat(path.join(validPath, e.name)); + return { name: e.name, isDir: e.isDirectory(), size: stats.size }; + })); + if (args.sortBy === 'size') detailed.sort((a, b) => b.size - a.size); + else detailed.sort((a, b) => a.name.localeCompare(b.name)); + const formatted = detailed.map(e => `${e.isDir ? "[DIR]" : "[FILE]"} ${e.name.padEnd(30)} ${e.isDir ? "" : formatSize(e.size).padStart(10)}`).join("\n"); + return { content: [{ type: "text", text: formatted }] }; +}); + +server.registerTool("directory_tree", { + inputSchema: z.object({ + path: z.string(), + excludePatterns: z.array(z.string()).optional().default([]) + }) +}, async (args: any) => { + const rootPath = await validatePath(args.path); + async function buildTree(currentPath: string): Promise { + const entries = await fs.readdir(currentPath, { withFileTypes: true }); + const result: any[] = []; + for (const entry of entries as Dirent[]) { + const relPath = path.relative(rootPath, path.join(currentPath, entry.name)); + if (args.excludePatterns.some((p: string) => minimatch(relPath, p, { dot: true }))) continue; + const node: any = { name: entry.name, type: entry.isDirectory() ? 'directory' : 'file' }; + if (entry.isDirectory()) node.children = await buildTree(path.join(currentPath, entry.name)); + result.push(node); + } + return result; + } + const tree = await buildTree(rootPath); + return { content: [{ type: "text", text: JSON.stringify(tree, null, 2) }] }; +}); + +server.registerTool("move_file", { inputSchema: z.object({ source: z.string(), destination: z.string() }) }, async (args: any) => { + const validSource = await validatePath(args.source); + const validDest = await validatePath(args.destination); + await fs.rename(validSource, validDest); + return { content: [{ type: "text", text: `Successfully moved ${args.source} to ${args.destination}` }] }; +}); + +server.registerTool("search_files", { + inputSchema: z.object({ + path: z.string(), + pattern: z.string(), + excludePatterns: z.array(z.string()).optional().default([]) + }) +}, async (args: any) => { + const rootPath = await validatePath(args.path); + const results: string[] = []; + async function search(currentPath: string) { + const entries = await fs.readdir(currentPath, { withFileTypes: true }); + for (const entry of entries as Dirent[]) { + const fullPath = path.join(currentPath, entry.name); + const relPath = path.relative(rootPath, fullPath); + if (args.excludePatterns.some((p: string) => minimatch(relPath, p, { dot: true }))) continue; + if (minimatch(relPath, args.pattern, { dot: true })) results.push(fullPath); + if (entry.isDirectory()) await search(fullPath); + } + } + await search(rootPath); + return { content: [{ type: "text", text: results.join("\n") || "No matches found" }] }; +}); + +server.registerTool("get_file_info", { inputSchema: z.object({ path: z.string() }) }, async (args: any) => { + const validPath = await validatePath(args.path); + const stats = await fs.stat(validPath); + const info = { + size: stats.size, + created: stats.birthtime, + modified: stats.mtime, + isDirectory: stats.isDirectory(), + isFile: stats.isFile(), + permissions: stats.mode.toString(8).slice(-3) + }; + return { content: [{ type: "text", text: JSON.stringify(info, null, 2) }] }; +}); + +server.registerTool("list_allowed_directories", { inputSchema: z.object({}) }, async () => { + return { content: [{ type: "text", text: `Allowed directories:\n${allowedDirectories.join('\n')}` }] }; +}); + +// --- Time Tools --- +server.registerTool("get_current_time", { inputSchema: z.object({ timezone: z.string() }) }, async (args: any) => { + const now = new Date(); + const timeStr = now.toLocaleString("en-US", { timeZone: args.timezone }); + return { content: [{ type: "text", text: `Current time in ${args.timezone}: ${timeStr}` }] }; +}); + +// --- Fetch Tools --- +server.registerTool("fetch", { inputSchema: z.object({ url: z.string() }) }, async (args: any) => { + try { + const response = await fetch(args.url); + const text = await response.text(); + return { content: [{ type: "text", text: text.slice(0, 5000) }] }; + } catch (error) { + return { content: [{ type: "text", text: `Error fetching ${args.url}: ${error}` }] }; + } +}); + +// --- Git Tools --- +server.registerTool("git_status", { inputSchema: z.object({ repo_path: z.string() }) }, async (args: any) => { + try { + const { stdout } = await execAsync(`git -C "${args.repo_path}" status`); + return { content: [{ type: "text", text: stdout }] }; + } catch (error) { + return { content: [{ type: "text", text: `Error running git status: ${error}` }] }; + } +}); + +server.registerTool("git_log", { inputSchema: z.object({ repo_path: z.string(), max_count: z.number().optional().default(10) }) }, async (args: any) => { + try { + const { stdout } = await execAsync(`git -C "${args.repo_path}" log -n ${args.max_count}`); + return { content: [{ type: "text", text: stdout }] }; + } catch (error) { + return { content: [{ type: "text", text: `Error running git log: ${error}` }] }; + } +}); + +// --- Everything Tools --- +server.registerTool("echo", { inputSchema: z.object({ message: z.string() }) }, async (args: any) => { + return { content: [{ type: "text", text: `Echo: ${args.message}` }] }; +}); + +// --- Roots Protocol Support --- +async function updateAllowedDirectoriesFromRoots(requestedRoots: Root[]) { + const validatedRootDirs = await Promise.all(requestedRoots.map(async (root) => { + const rootPath = fileURLToPath(root.uri); + const expanded = expandHome(rootPath); + const absolute = path.resolve(expanded); + try { + const resolved = await fs.realpath(absolute); + return normalizePath(resolved); + } catch { + return normalizePath(absolute); + } + })); + if (validatedRootDirs.length > 0) { + allowedDirectories = [...validatedRootDirs]; + console.error(`Updated allowed directories from MCP roots: ${validatedRootDirs.length} valid directories`); + } +} + +server.server.setNotificationHandler(RootsListChangedNotificationSchema, async () => { + try { + const response = await server.server.listRoots(); + if (response && 'roots' in response) { + await updateAllowedDirectoriesFromRoots(response.roots); + } + } catch (error) { + console.error("Failed to request roots from client:", error); + } +}); + +server.server.oninitialized = async () => { + const clientCapabilities = server.server.getClientCapabilities(); + if (clientCapabilities?.roots) { + try { + const response = await server.server.listRoots(); + if (response && 'roots' in response) { + await updateAllowedDirectoriesFromRoots(response.roots); + } + } catch (error) { + console.error("Failed to request initial roots from client:", error); + } + } +}; + +async function main() { + const args = process.argv.slice(2); + const initialAllowedDirs = await Promise.all(args.map(async (dir) => { + const expanded = expandHome(dir); + const absolute = path.resolve(expanded); + try { + const resolved = await fs.realpath(absolute); + return normalizePath(resolved); + } catch { + return normalizePath(absolute); + } + })); + setAllowedDirectories(initialAllowedDirs); + + const transport = new StdioServerTransport(); + await server.connect(transport); + console.error("Consolidated MCP Server running on stdio"); +} + +main().catch((error) => { + console.error("Fatal error:", error); + process.exit(1); +}); diff --git a/src/consolidated/package.json b/src/consolidated/package.json new file mode 100644 index 0000000000..636cc913cf --- /dev/null +++ b/src/consolidated/package.json @@ -0,0 +1,36 @@ +{ + "name": "@modelcontextprotocol/server-consolidated", + "version": "1.0.0", + "description": "Consolidated MCP server combining multiple functionalities", + "type": "module", + "bin": { + "mcp-server-consolidated": "dist/index.js" + }, + "scripts": { + "build": "tsc && shx chmod +x dist/*.js", + "start": "node dist/index.js" + }, + "dependencies": { + "@modelcontextprotocol/sdk": "^1.25.2", + "diff": "^8.0.3", + "glob": "^10.5.0", + "minimatch": "^10.0.1", + "zod": "^3.25.0", + "zod-to-json-schema": "^3.23.5", + "chalk": "^5.3.0", + "yargs": "^17.7.2", + "cors": "^2.8.5", + "express": "^5.2.1", + "jszip": "^3.10.1" + }, + "devDependencies": { + "@types/node": "^22", + "@types/diff": "^5.0.9", + "@types/minimatch": "^5.1.2", + "@types/yargs": "^17.0.32", + "@types/cors": "^2.8.19", + "@types/express": "^5.0.6", + "shx": "^0.3.4", + "typescript": "^5.8.2" + } +} diff --git a/src/consolidated/tsconfig.json b/src/consolidated/tsconfig.json new file mode 100644 index 0000000000..d69877d79f --- /dev/null +++ b/src/consolidated/tsconfig.json @@ -0,0 +1,16 @@ +{ + "compilerOptions": { + "target": "ES2022", + "module": "Node16", + "moduleResolution": "Node16", + "outDir": "./dist", + "rootDir": "./", + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "resolveJsonModule": true + }, + "include": ["index.ts"], + "exclude": ["node_modules"] +} diff --git a/src/everything/index.js b/src/everything/index.js new file mode 100644 index 0000000000..b7c4ec0df9 --- /dev/null +++ b/src/everything/index.js @@ -0,0 +1,41 @@ +#!/usr/bin/env node +// Parse command line arguments first +const args = process.argv.slice(2); +const scriptName = args[0] || "stdio"; +async function run() { + try { + // Dynamically import only the requested module to prevent all modules from initializing + switch (scriptName) { + case "stdio": + // Import and run the default server + await import("./transports/stdio.js"); + break; + case "sse": + // Import and run the SSE server + await import("./transports/sse.js"); + break; + case "streamableHttp": + // Import and run the streamable HTTP server + await import("./transports/streamableHttp.js"); + break; + default: + console.error(`-`.repeat(53)); + console.error(` Everything Server Launcher`); + console.error(` Usage: node ./index.js [stdio|sse|streamableHttp]`); + console.error(` Default transport: stdio`); + console.error(`-`.repeat(53)); + console.error(`Unknown transport: ${scriptName}`); + console.log("Available transports:"); + console.log("- stdio"); + console.log("- sse"); + console.log("- streamableHttp"); + process.exit(1); + } + } + catch (error) { + console.error("Error running script:", error); + process.exit(1); + } +} +await run(); +export {}; diff --git a/src/everything/prompts/args.js b/src/everything/prompts/args.js new file mode 100644 index 0000000000..68719dab6d --- /dev/null +++ b/src/everything/prompts/args.js @@ -0,0 +1,34 @@ +import { z } from "zod"; +/** + * Register a prompt with arguments + * - Two arguments, one required and one optional + * - Combines argument values in the returned prompt + * + * @param server + */ +export const registerArgumentsPrompt = (server) => { + // Prompt arguments + const promptArgsSchema = { + city: z.string().describe("Name of the city"), + state: z.string().describe("Name of the state").optional(), + }; + // Register the prompt + server.registerPrompt("args-prompt", { + title: "Arguments Prompt", + description: "A prompt with two arguments, one required and one optional", + argsSchema: promptArgsSchema, + }, (args) => { + const location = `${args?.city}${args?.state ? `, ${args?.state}` : ""}`; + return { + messages: [ + { + role: "user", + content: { + type: "text", + text: `What's weather in ${location}?`, + }, + }, + ], + }; + }); +}; diff --git a/src/everything/prompts/completions.js b/src/everything/prompts/completions.js new file mode 100644 index 0000000000..e6068041e8 --- /dev/null +++ b/src/everything/prompts/completions.js @@ -0,0 +1,52 @@ +import { z } from "zod"; +import { completable } from "@modelcontextprotocol/sdk/server/completable.js"; +/** + * Register a prompt with completable arguments + * - Two required arguments, both with completion handlers + * - First argument value will be included in context for second argument + * - Allows second argument to depend on the first argument value + * + * @param server + */ +export const registerPromptWithCompletions = (server) => { + // Prompt arguments + const promptArgsSchema = { + department: completable(z.string().describe("Choose the department."), (value) => { + return ["Engineering", "Sales", "Marketing", "Support"].filter((d) => d.startsWith(value)); + }), + name: completable(z + .string() + .describe("Choose a team member to lead the selected department."), (value, context) => { + const department = context?.arguments?.["department"]; + if (department === "Engineering") { + return ["Alice", "Bob", "Charlie"].filter((n) => n.startsWith(value)); + } + else if (department === "Sales") { + return ["David", "Eve", "Frank"].filter((n) => n.startsWith(value)); + } + else if (department === "Marketing") { + return ["Grace", "Henry", "Iris"].filter((n) => n.startsWith(value)); + } + else if (department === "Support") { + return ["John", "Kim", "Lee"].filter((n) => n.startsWith(value)); + } + return []; + }), + }; + // Register the prompt + server.registerPrompt("completable-prompt", { + title: "Team Management", + description: "First argument choice narrows values for second argument.", + argsSchema: promptArgsSchema, + }, ({ department, name }) => ({ + messages: [ + { + role: "user", + content: { + type: "text", + text: `Please promote ${name} to the head of the ${department} team.`, + }, + }, + ], + })); +}; diff --git a/src/everything/prompts/index.js b/src/everything/prompts/index.js new file mode 100644 index 0000000000..988fa84053 --- /dev/null +++ b/src/everything/prompts/index.js @@ -0,0 +1,15 @@ +import { registerSimplePrompt } from "./simple.js"; +import { registerArgumentsPrompt } from "./args.js"; +import { registerPromptWithCompletions } from "./completions.js"; +import { registerEmbeddedResourcePrompt } from "./resource.js"; +/** + * Register the prompts with the MCP server. + * + * @param server + */ +export const registerPrompts = (server) => { + registerSimplePrompt(server); + registerArgumentsPrompt(server); + registerPromptWithCompletions(server); + registerEmbeddedResourcePrompt(server); +}; diff --git a/src/everything/prompts/resource.js b/src/everything/prompts/resource.js new file mode 100644 index 0000000000..6a5d759c77 --- /dev/null +++ b/src/everything/prompts/resource.js @@ -0,0 +1,60 @@ +import { resourceTypeCompleter, resourceIdForPromptCompleter, } from "../resources/templates.js"; +import { textResource, textResourceUri, blobResourceUri, blobResource, RESOURCE_TYPE_BLOB, RESOURCE_TYPE_TEXT, RESOURCE_TYPES, } from "../resources/templates.js"; +/** + * Register a prompt with an embedded resource reference + * - Takes a resource type and id + * - Returns the corresponding dynamically created resource + * + * @param server + */ +export const registerEmbeddedResourcePrompt = (server) => { + // Prompt arguments + const promptArgsSchema = { + resourceType: resourceTypeCompleter, + resourceId: resourceIdForPromptCompleter, + }; + // Register the prompt + server.registerPrompt("resource-prompt", { + title: "Resource Prompt", + description: "A prompt that includes an embedded resource reference", + argsSchema: promptArgsSchema, + }, (args) => { + // Validate resource type argument + const resourceType = args.resourceType; + if (!RESOURCE_TYPES.includes(resourceType)) { + throw new Error(`Invalid resourceType: ${args?.resourceType}. Must be ${RESOURCE_TYPE_TEXT} or ${RESOURCE_TYPE_BLOB}.`); + } + // Validate resourceId argument + const resourceId = Number(args?.resourceId); + if (!Number.isFinite(resourceId) || + !Number.isInteger(resourceId) || + resourceId < 1) { + throw new Error(`Invalid resourceId: ${args?.resourceId}. Must be a finite positive integer.`); + } + // Get resource based on the resource type + const uri = resourceType === RESOURCE_TYPE_TEXT + ? textResourceUri(resourceId) + : blobResourceUri(resourceId); + const resource = resourceType === RESOURCE_TYPE_TEXT + ? textResource(uri, resourceId) + : blobResource(uri, resourceId); + return { + messages: [ + { + role: "user", + content: { + type: "text", + text: `This prompt includes the ${resourceType} resource with id: ${resourceId}. Please analyze the following resource:`, + }, + }, + { + role: "user", + content: { + type: "resource", + resource: resource, + }, + }, + ], + }; + }); +}; diff --git a/src/everything/prompts/simple.js b/src/everything/prompts/simple.js new file mode 100644 index 0000000000..4d05f25dce --- /dev/null +++ b/src/everything/prompts/simple.js @@ -0,0 +1,23 @@ +/** + * Register a simple prompt with no arguments + * - Returns the fixed text of the prompt with no modifications + * + * @param server + */ +export const registerSimplePrompt = (server) => { + // Register the prompt + server.registerPrompt("simple-prompt", { + title: "Simple Prompt", + description: "A prompt with no arguments", + }, () => ({ + messages: [ + { + role: "user", + content: { + type: "text", + text: "This is a simple prompt without arguments.", + }, + }, + ], + })); +}; diff --git a/src/everything/resources/files.js b/src/everything/resources/files.js new file mode 100644 index 0000000000..8dabdc7ca4 --- /dev/null +++ b/src/everything/resources/files.js @@ -0,0 +1,83 @@ +import { dirname, join } from "path"; +import { fileURLToPath } from "url"; +import { readdirSync, readFileSync, statSync } from "fs"; +/** + * Register static file resources + * - Each file in src/everything/docs is exposed as an individual static resource + * - URIs follow the pattern: "demo://static/docs/" + * - Markdown (.md) files are served as mime type "text/markdown" + * - Text (.txt) files are served as mime type "text/plain" + * - JSON (.json) files are served as mime type "application/json" + * + * @param server + */ +export const registerFileResources = (server) => { + // Read the entries in the docs directory + const __filename = fileURLToPath(import.meta.url); + const __dirname = dirname(__filename); + const docsDir = join(__dirname, "..", "docs"); + let entries = []; + try { + entries = readdirSync(docsDir); + } + catch (e) { + // If docs/ folder is missing or unreadable, just skip registration + return; + } + // Register each file as a static resource + for (const name of entries) { + // Only process files, not directories + const fullPath = join(docsDir, name); + try { + const st = statSync(fullPath); + if (!st.isFile()) + continue; + } + catch { + continue; + } + // Prepare file resource info + const uri = `demo://resource/static/document/${encodeURIComponent(name)}`; + const mimeType = getMimeType(name); + const description = `Static document file exposed from /docs: ${name}`; + // Register file resource + server.registerResource(name, uri, { mimeType, description }, async (uri) => { + const text = readFileSafe(fullPath); + return { + contents: [ + { + uri: uri.toString(), + mimeType, + text, + }, + ], + }; + }); + } +}; +/** + * Get the mimetype based on filename + * @param fileName + */ +function getMimeType(fileName) { + const lower = fileName.toLowerCase(); + if (lower.endsWith(".md") || lower.endsWith(".markdown")) + return "text/markdown"; + if (lower.endsWith(".txt")) + return "text/plain"; + if (lower.endsWith(".json")) + return "application/json"; + return "text/plain"; +} +/** + * Read a file or return an error message if it fails + * @param path + */ +function readFileSafe(path) { + try { + return readFileSync(path, "utf-8"); + } + catch (e) { + return `Error reading file: ${path}. ${e}`; + } +} diff --git a/src/everything/resources/index.js b/src/everything/resources/index.js new file mode 100644 index 0000000000..e71968026b --- /dev/null +++ b/src/everything/resources/index.js @@ -0,0 +1,33 @@ +import { registerResourceTemplates } from "./templates.js"; +import { registerFileResources } from "./files.js"; +import { fileURLToPath } from "url"; +import { dirname, join } from "path"; +import { readFileSync } from "fs"; +/** + * Register the resources with the MCP server. + * @param server + */ +export const registerResources = (server) => { + registerResourceTemplates(server); + registerFileResources(server); +}; +/** + * Reads the server instructions from the corresponding markdown file. + * Attempts to load the content of the file located in the `docs` directory. + * If the file cannot be loaded, an error message is returned instead. + * + * @return {string} The content of the server instructions file, or an error message if reading fails. + */ +export function readInstructions() { + const __filename = fileURLToPath(import.meta.url); + const __dirname = dirname(__filename); + const filePath = join(__dirname, "..", "docs", "instructions.md"); + let instructions; + try { + instructions = readFileSync(filePath, "utf-8"); + } + catch (e) { + instructions = "Server instructions not loaded: " + e; + } + return instructions; +} diff --git a/src/everything/resources/session.js b/src/everything/resources/session.js new file mode 100644 index 0000000000..3a822f514e --- /dev/null +++ b/src/everything/resources/session.js @@ -0,0 +1,44 @@ +/** + * Generates a session-scoped resource URI string based on the provided resource name. + * + * @param {string} name - The name of the resource to create a URI for. + * @returns {string} The formatted session resource URI. + */ +export const getSessionResourceURI = (name) => { + return `demo://resource/session/${name}`; +}; +/** + * Registers a session-scoped resource with the provided server and returns a resource link. + * + * The registered resource is available during the life of the session only; it is not otherwise persisted. + * + * @param {McpServer} server - The server instance responsible for handling the resource registration. + * @param {Resource} resource - The resource object containing metadata such as URI, name, description, and mimeType. + * @param {"text"|"blob"} type + * @param payload + * @returns {ResourceLink} An object representing the resource link, with associated metadata. + */ +export const registerSessionResource = (server, resource, type, payload) => { + // Destructure resource + const { uri, name, mimeType, description, title, annotations, icons, _meta } = resource; + // Prepare the resource content to return + // See https://modelcontextprotocol.io/specification/2025-11-25/server/resources#resource-contents + const resourceContent = type === "text" + ? { + uri: uri.toString(), + mimeType, + text: payload, + } + : { + uri: uri.toString(), + mimeType, + blob: payload, + }; + // Register file resource + server.registerResource(name, uri, { mimeType, description, title, annotations, icons, _meta }, async (uri) => { + return { + contents: [resourceContent], + }; + }); + return { type: "resource_link", ...resource }; +}; diff --git a/src/everything/resources/subscriptions.js b/src/everything/resources/subscriptions.js new file mode 100644 index 0000000000..892ea3c373 --- /dev/null +++ b/src/everything/resources/subscriptions.js @@ -0,0 +1,125 @@ +import { SubscribeRequestSchema, UnsubscribeRequestSchema, } from "@modelcontextprotocol/sdk/types.js"; +// Track subscriber session id lists by URI +const subscriptions = new Map(); +// Interval to send notifications to subscribers +const subsUpdateIntervals = new Map(); +/** + * Sets up the subscription and unsubscription handlers for the provided server. + * + * The function defines two request handlers: + * 1. A `Subscribe` handler that allows clients to subscribe to specific resource URIs. + * 2. An `Unsubscribe` handler that allows clients to unsubscribe from specific resource URIs. + * + * The `Subscribe` handler performs the following actions: + * - Extracts the URI and session ID from the request. + * - Logs a message acknowledging the subscription request. + * - Updates the internal tracking of subscribers for the given URI. + * + * The `Unsubscribe` handler performs the following actions: + * - Extracts the URI and session ID from the request. + * - Logs a message acknowledging the unsubscription request. + * - Removes the subscriber for the specified URI. + * + * @param {McpServer} server - The server instance to which subscription handlers will be attached. + */ +export const setSubscriptionHandlers = (server) => { + // Set the subscription handler + server.server.setRequestHandler(SubscribeRequestSchema, async (request, extra) => { + // Get the URI to subscribe to + const { uri } = request.params; + // Get the session id (can be undefined for stdio) + const sessionId = extra.sessionId; + // Acknowledge the subscribe request + await server.sendLoggingMessage({ + level: "info", + data: `Received Subscribe Resource request for URI: ${uri} ${sessionId ? `from session ${sessionId}` : ""}`, + }, sessionId); + // Get the subscribers for this URI + const subscribers = subscriptions.has(uri) + ? subscriptions.get(uri) + : new Set(); + subscribers.add(sessionId); + subscriptions.set(uri, subscribers); + return {}; + }); + // Set the unsubscription handler + server.server.setRequestHandler(UnsubscribeRequestSchema, async (request, extra) => { + // Get the URI to subscribe to + const { uri } = request.params; + // Get the session id (can be undefined for stdio) + const sessionId = extra.sessionId; + // Acknowledge the subscribe request + await server.sendLoggingMessage({ + level: "info", + data: `Received Unsubscribe Resource request: ${uri} ${sessionId ? `from session ${sessionId}` : ""}`, + }, sessionId); + // Remove the subscriber + if (subscriptions.has(uri)) { + const subscribers = subscriptions.get(uri); + if (subscribers.has(sessionId)) + subscribers.delete(sessionId); + } + return {}; + }); +}; +/** + * Sends simulated resource update notifications to the subscribed client. + * + * This function iterates through all resource URIs stored in the subscriptions + * and checks if the specified session ID is subscribed to them. If so, it sends + * a notification through the provided server. If the session ID is no longer valid + * (disconnected), it removes the session ID from the list of subscribers. + * + * @param {McpServer} server - The server instance used to send notifications. + * @param {string | undefined} sessionId - The session ID of the client to check for subscriptions. + * @returns {Promise} Resolves once all applicable notifications are sent. + */ +const sendSimulatedResourceUpdates = async (server, sessionId) => { + // Search all URIs for ones this client is subscribed to + for (const uri of subscriptions.keys()) { + const subscribers = subscriptions.get(uri); + // If this client is subscribed, send the notification + if (subscribers.has(sessionId)) { + await server.server.notification({ + method: "notifications/resources/updated", + params: { uri }, + }); + } + else { + subscribers.delete(sessionId); // subscriber has disconnected + } + } +}; +/** + * Starts the process of simulating resource updates and sending server notifications + * to the client for the resources they are subscribed to. If the update interval is + * already active, invoking this function will not start another interval. + * + * @param server + * @param sessionId + */ +export const beginSimulatedResourceUpdates = (server, sessionId) => { + if (!subsUpdateIntervals.has(sessionId)) { + // Send once immediately + sendSimulatedResourceUpdates(server, sessionId); + // Set the interval to send later resource update notifications to this client + subsUpdateIntervals.set(sessionId, setInterval(() => sendSimulatedResourceUpdates(server, sessionId), 5000)); + } +}; +/** + * Stops simulated resource updates for a given session. + * + * This function halts any active intervals associated with the provided session ID + * and removes the session's corresponding entries from resource management collections. + * Session ID can be undefined for stdio. + * + * @param {string} [sessionId] + */ +export const stopSimulatedResourceUpdates = (sessionId) => { + // Remove active intervals + if (subsUpdateIntervals.has(sessionId)) { + const subsUpdateInterval = subsUpdateIntervals.get(sessionId); + clearInterval(subsUpdateInterval); + subsUpdateIntervals.delete(sessionId); + } +}; diff --git a/src/everything/resources/templates.js b/src/everything/resources/templates.js new file mode 100644 index 0000000000..76b931dc59 --- /dev/null +++ b/src/everything/resources/templates.js @@ -0,0 +1,171 @@ +import { z } from "zod"; +import { ResourceTemplate, } from "@modelcontextprotocol/sdk/server/mcp.js"; +import { completable } from "@modelcontextprotocol/sdk/server/completable.js"; +// Resource types +export const RESOURCE_TYPE_TEXT = "Text"; +export const RESOURCE_TYPE_BLOB = "Blob"; +export const RESOURCE_TYPES = [ + RESOURCE_TYPE_TEXT, + RESOURCE_TYPE_BLOB, +]; +/** + * A completer function for resource types. + * + * This variable provides functionality to perform autocompletion for the resource types based on user input. + * It uses a schema description to validate the input and filters through a predefined list of resource types + * to return suggestions that start with the given input. + * + * The input value is expected to be a string representing the type of resource to fetch. + * The completion logic matches the input against available resource types. + */ +export const resourceTypeCompleter = completable(z.string().describe("Type of resource to fetch"), (value) => { + return RESOURCE_TYPES.filter((t) => t.startsWith(value)); +}); +/** + * A completer function for resource IDs as strings. + * + * The `resourceIdCompleter` accepts a string input representing the ID of a text resource + * and validates whether the provided value corresponds to an integer resource ID. + * + * NOTE: Currently, prompt arguments can only be strings since type is not field of `PromptArgument` + * Consequently, we must define it as a string and convert the argument to number before using it + * https://modelcontextprotocol.io/specification/2025-11-25/schema#promptargument + * + * If the value is a valid integer, it returns the value within an array. + * Otherwise, it returns an empty array. + * + * The input string is first transformed into a number and checked to ensure it is an integer. + * This helps validate and suggest appropriate resource IDs. + */ +export const resourceIdForPromptCompleter = completable(z.string().describe("ID of the text resource to fetch"), (value) => { + const resourceId = Number(value); + return Number.isInteger(resourceId) && resourceId > 0 ? [value] : []; +}); +/** + * A callback function that acts as a completer for resource ID values, validating and returning + * the input value as part of a resource template. + * + * @typedef {CompleteResourceTemplateCallback} + * @param {string} value - The input string value to be evaluated as a resource ID. + * @returns {string[]} Returns an array containing the input value if it represents a positive + * integer resource ID, otherwise returns an empty array. + */ +export const resourceIdForResourceTemplateCompleter = (value) => { + const resourceId = Number(value); + return Number.isInteger(resourceId) && resourceId > 0 ? [value] : []; +}; +const uriBase = "demo://resource/dynamic"; +const textUriBase = `${uriBase}/text`; +const blobUriBase = `${uriBase}/blob`; +const textUriTemplate = `${textUriBase}/{resourceId}`; +const blobUriTemplate = `${blobUriBase}/{resourceId}`; +/** + * Create a dynamic text resource + * - Exposed for use by embedded resource prompt example + * @param uri + * @param resourceId + */ +export const textResource = (uri, resourceId) => { + const timestamp = new Date().toLocaleTimeString(); + return { + uri: uri.toString(), + mimeType: "text/plain", + text: `Resource ${resourceId}: This is a plaintext resource created at ${timestamp}`, + }; +}; +/** + * Create a dynamic blob resource + * - Exposed for use by embedded resource prompt example + * @param uri + * @param resourceId + */ +export const blobResource = (uri, resourceId) => { + const timestamp = new Date().toLocaleTimeString(); + const resourceText = Buffer.from(`Resource ${resourceId}: This is a base64 blob created at ${timestamp}`).toString("base64"); + return { + uri: uri.toString(), + mimeType: "text/plain", + blob: resourceText, + }; +}; +/** + * Create a dynamic text resource URI + * - Exposed for use by embedded resource prompt example + * @param resourceId + */ +export const textResourceUri = (resourceId) => new URL(`${textUriBase}/${resourceId}`); +/** + * Create a dynamic blob resource URI + * - Exposed for use by embedded resource prompt example + * @param resourceId + */ +export const blobResourceUri = (resourceId) => new URL(`${blobUriBase}/${resourceId}`); +/** + * Parses the resource identifier from the provided URI and validates it + * against the given variables. Throws an error if the URI corresponds + * to an unknown resource or if the resource identifier is invalid. + * + * @param {URL} uri - The URI of the resource to be parsed. + * @param {Record} variables - A record containing context-specific variables that include the resourceId. + * @returns {number} The parsed and validated resource identifier as an integer. + * @throws {Error} Throws an error if the URI matches unsupported base URIs or if the resourceId is invalid. + */ +const parseResourceId = (uri, variables) => { + const uriError = `Unknown resource: ${uri.toString()}`; + if (uri.toString().startsWith(textUriBase) && + uri.toString().startsWith(blobUriBase)) { + throw new Error(uriError); + } + else { + const idxStr = String(variables.resourceId ?? ""); + const idx = Number(idxStr); + if (Number.isFinite(idx) && Number.isInteger(idx) && idx > 0) { + return idx; + } + else { + throw new Error(uriError); + } + } +}; +/** + * Register resource templates with the MCP server. + * - Text and blob resources, dynamically generated from the URI {resourceId} variable + * - Any finite positive integer is acceptable for the resourceId variable + * - List resources method will not return these resources + * - These are only accessible via template URIs + * - Both blob and text resources: + * - have content that is dynamically generated, including a timestamp + * - have different template URIs + * - Blob: "demo://resource/dynamic/blob/{resourceId}" + * - Text: "demo://resource/dynamic/text/{resourceId}" + * + * @param server + */ +export const registerResourceTemplates = (server) => { + // Register the text resource template + server.registerResource("Dynamic Text Resource", new ResourceTemplate(textUriTemplate, { + list: undefined, + complete: { resourceId: resourceIdForResourceTemplateCompleter }, + }), { + mimeType: "text/plain", + description: "Plaintext dynamic resource fabricated from the {resourceId} variable, which must be an integer.", + }, async (uri, variables) => { + const resourceId = parseResourceId(uri, variables); + return { + contents: [textResource(uri, resourceId)], + }; + }); + // Register the blob resource template + server.registerResource("Dynamic Blob Resource", new ResourceTemplate(blobUriTemplate, { + list: undefined, + complete: { resourceId: resourceIdForResourceTemplateCompleter }, + }), { + mimeType: "application/octet-stream", + description: "Binary (base64) dynamic resource fabricated from the {resourceId} variable, which must be an integer.", + }, async (uri, variables) => { + const resourceId = parseResourceId(uri, variables); + return { + contents: [blobResource(uri, resourceId)], + }; + }); +}; diff --git a/src/everything/server/index.js b/src/everything/server/index.js new file mode 100644 index 0000000000..cf59e5c282 --- /dev/null +++ b/src/everything/server/index.js @@ -0,0 +1,93 @@ +import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"; +import { InMemoryTaskStore, InMemoryTaskMessageQueue, } from "@modelcontextprotocol/sdk/experimental/tasks"; +import { setSubscriptionHandlers, stopSimulatedResourceUpdates, } from "../resources/subscriptions.js"; +import { registerConditionalTools, registerTools } from "../tools/index.js"; +import { registerResources, readInstructions } from "../resources/index.js"; +import { registerPrompts } from "../prompts/index.js"; +import { stopSimulatedLogging } from "./logging.js"; +import { syncRoots } from "./roots.js"; +/** + * Server Factory + * + * This function initializes a `McpServer` with specific capabilities and instructions, + * registers tools, resources, and prompts, and configures resource subscription handlers. + * + * @returns {ServerFactoryResponse} An object containing the server instance, and a `cleanup` + * function for handling server-side cleanup when a session ends. + * + * Properties of the returned object: + * - `server` {Object}: The initialized server instance. + * - `cleanup` {Function}: Function to perform cleanup operations for a closing session. + */ +export const createServer = () => { + // Read the server instructions + const instructions = readInstructions(); + // Create task store and message queue for task support + const taskStore = new InMemoryTaskStore(); + const taskMessageQueue = new InMemoryTaskMessageQueue(); + let initializeTimeout = null; + // Create the server + const server = new McpServer({ + name: "mcp-servers/everything", + title: "Everything Reference Server", + version: "2.0.0", + }, { + capabilities: { + tools: { + listChanged: true, + }, + prompts: { + listChanged: true, + }, + resources: { + subscribe: true, + listChanged: true, + }, + logging: {}, + tasks: { + list: {}, + cancel: {}, + requests: { + tools: { + call: {}, + }, + }, + }, + }, + instructions, + taskStore, + taskMessageQueue, + }); + // Register the tools + registerTools(server); + // Register the resources + registerResources(server); + // Register the prompts + registerPrompts(server); + // Set resource subscription handlers + setSubscriptionHandlers(server); + // Perform post-initialization operations + server.server.oninitialized = async () => { + // Register conditional tools now that client capabilities are known. + // This finishes before the `notifications/initialized` handler finishes. + registerConditionalTools(server); + // Sync roots if the client supports them. + // This is delayed until after the `notifications/initialized` handler finishes, + // otherwise, the request gets lost. + const sessionId = server.server.transport?.sessionId; + initializeTimeout = setTimeout(() => syncRoots(server, sessionId), 350); + }; + // Return the ServerFactoryResponse + return { + server, + cleanup: (sessionId) => { + // Stop any simulated logging or resource updates that may have been initiated. + stopSimulatedLogging(sessionId); + stopSimulatedResourceUpdates(sessionId); + // Clean up task store timers + taskStore.cleanup(); + if (initializeTimeout) + clearTimeout(initializeTimeout); + }, + }; +}; diff --git a/src/everything/server/logging.js b/src/everything/server/logging.js new file mode 100644 index 0000000000..611a98f979 --- /dev/null +++ b/src/everything/server/logging.js @@ -0,0 +1,64 @@ +// Map session ID to the interval for sending logging messages to the client +const logsUpdateIntervals = new Map(); +/** + * Initiates a simulated logging process by sending random log messages to the client at a + * fixed interval. Each log message contains a random logging level and optional session ID. + * + * @param {McpServer} server - The server instance responsible for handling the logging messages. + * @param {string | undefined} sessionId - An optional identifier for the session. If provided, + * the session ID will be appended to log messages. + */ +export const beginSimulatedLogging = (server, sessionId) => { + const maybeAppendSessionId = sessionId ? ` - SessionId ${sessionId}` : ""; + const messages = [ + { level: "debug", data: `Debug-level message${maybeAppendSessionId}` }, + { level: "info", data: `Info-level message${maybeAppendSessionId}` }, + { level: "notice", data: `Notice-level message${maybeAppendSessionId}` }, + { + level: "warning", + data: `Warning-level message${maybeAppendSessionId}`, + }, + { level: "error", data: `Error-level message${maybeAppendSessionId}` }, + { + level: "critical", + data: `Critical-level message${maybeAppendSessionId}`, + }, + { level: "alert", data: `Alert level-message${maybeAppendSessionId}` }, + { + level: "emergency", + data: `Emergency-level message${maybeAppendSessionId}`, + }, + ]; + /** + * Send a simulated logging message to the client + */ + const sendSimulatedLoggingMessage = async (sessionId) => { + // By using the `sendLoggingMessage` function to send the message, we + // ensure that the client's chosen logging level will be respected + await server.sendLoggingMessage(messages[Math.floor(Math.random() * messages.length)], sessionId); + }; + // Set the interval to send later logging messages to this client + if (!logsUpdateIntervals.has(sessionId)) { + // Send once immediately + sendSimulatedLoggingMessage(sessionId); + // Send a randomly-leveled log message every 5 seconds + logsUpdateIntervals.set(sessionId, setInterval(() => sendSimulatedLoggingMessage(sessionId), 5000)); + } +}; +/** + * Stops the simulated logging process for a given session. + * + * This function halts the periodic logging updates associated with the specified + * session ID by clearing the interval and removing the session's tracking + * reference. Session ID can be undefined for stdio. + * + * @param {string} [sessionId] - The optional unique identifier of the session. + */ +export const stopSimulatedLogging = (sessionId) => { + // Remove active intervals + if (logsUpdateIntervals.has(sessionId)) { + const logsUpdateInterval = logsUpdateIntervals.get(sessionId); + clearInterval(logsUpdateInterval); + logsUpdateIntervals.delete(sessionId); + } +}; diff --git a/src/everything/server/roots.js b/src/everything/server/roots.js new file mode 100644 index 0000000000..f73d40356c --- /dev/null +++ b/src/everything/server/roots.js @@ -0,0 +1,65 @@ +import { RootsListChangedNotificationSchema, } from "@modelcontextprotocol/sdk/types.js"; +// Track roots by session id +export const roots = new Map(); +/** + * Get the latest the client roots list for the session. + * + * - Request and cache the roots list for the session if it has not been fetched before. + * - Return the cached roots list for the session if it exists. + * + * When requesting the roots list for a session, it also sets up a `roots/list_changed` + * notification handler. This ensures that updates are automatically fetched and handled + * in real-time. + * + * This function is idempotent. It should only request roots from the client once per session, + * returning the cached version thereafter. + * + * @param {McpServer} server - An instance of the MCP server used to communicate with the client. + * @param {string} [sessionId] - An optional session id used to associate the roots list with a specific client session. + * + * @throws {Error} In case of a failure to request the roots from the client, an error log message is sent. + */ +export const syncRoots = async (server, sessionId) => { + const clientCapabilities = server.server.getClientCapabilities() || {}; + const clientSupportsRoots = clientCapabilities?.roots !== undefined; + // Fetch the roots list for this client + if (clientSupportsRoots) { + // Function to request the updated roots list from the client + const requestRoots = async () => { + try { + // Request the updated roots list from the client + const response = await server.server.listRoots(); + if (response && "roots" in response) { + // Store the roots list for this client + roots.set(sessionId, response.roots); + // Notify the client of roots received + await server.sendLoggingMessage({ + level: "info", + logger: "everything-server", + data: `Roots updated: ${response?.roots?.length} root(s) received from client`, + }, sessionId); + } + else { + await server.sendLoggingMessage({ + level: "info", + logger: "everything-server", + data: "Client returned no roots set", + }, sessionId); + } + } + catch (error) { + console.error(`Failed to request roots from client ${sessionId}: ${error instanceof Error ? error.message : String(error)}`); + } + }; + // If the roots have not been synced for this client, + // set notification handler and request initial roots + if (!roots.has(sessionId)) { + // Set the list changed notification handler + server.server.setNotificationHandler(RootsListChangedNotificationSchema, requestRoots); + // Request the initial roots list immediately + await requestRoots(); + } + // Return the roots list for this client + return roots.get(sessionId); + } +}; diff --git a/src/everything/tools/echo.js b/src/everything/tools/echo.js new file mode 100644 index 0000000000..1017ed4211 --- /dev/null +++ b/src/everything/tools/echo.js @@ -0,0 +1,29 @@ +import { z } from "zod"; +// Tool input schema +export const EchoSchema = z.object({ + message: z.string().describe("Message to echo"), +}); +// Tool configuration +const name = "echo"; +const config = { + title: "Echo Tool", + description: "Echoes back the input string", + inputSchema: EchoSchema, +}; +/** + * Registers the 'echo' tool. + * + * The registered tool validates input arguments using the EchoSchema and + * returns a response that echoes the message provided in the arguments. + * + * @param {McpServer} server - The McpServer instance where the tool will be registered. + * @returns {void} + */ +export const registerEchoTool = (server) => { + server.registerTool(name, config, async (args) => { + const validatedArgs = EchoSchema.parse(args); + return { + content: [{ type: "text", text: `Echo: ${validatedArgs.message}` }], + }; + }); +}; diff --git a/src/everything/tools/get-annotated-message.js b/src/everything/tools/get-annotated-message.js new file mode 100644 index 0000000000..1c06b66837 --- /dev/null +++ b/src/everything/tools/get-annotated-message.js @@ -0,0 +1,81 @@ +import { z } from "zod"; +import { MCP_TINY_IMAGE } from "./get-tiny-image.js"; +// Tool input schema +const GetAnnotatedMessageSchema = z.object({ + messageType: z + .enum(["error", "success", "debug"]) + .describe("Type of message to demonstrate different annotation patterns"), + includeImage: z + .boolean() + .default(false) + .describe("Whether to include an example image"), +}); +// Tool configuration +const name = "get-annotated-message"; +const config = { + title: "Get Annotated Message Tool", + description: "Demonstrates how annotations can be used to provide metadata about content.", + inputSchema: GetAnnotatedMessageSchema, +}; +/** + * Registers the 'get-annotated-message' tool. + * + * The registered tool generates and sends messages with specific types, such as error, + * success, or debug, carrying associated annotations like priority level and intended + * audience. + * + * The response will have annotations and optionally contain an annotated image. + * + * @function + * @param {McpServer} server - The McpServer instance where the tool will be registered. + */ +export const registerGetAnnotatedMessageTool = (server) => { + server.registerTool(name, config, async (args) => { + const { messageType, includeImage } = GetAnnotatedMessageSchema.parse(args); + const content = []; + // Main message with different priorities/audiences based on type + if (messageType === "error") { + content.push({ + type: "text", + text: "Error: Operation failed", + annotations: { + priority: 1.0, // Errors are highest priority + audience: ["user", "assistant"], // Both need to know about errors + }, + }); + } + else if (messageType === "success") { + content.push({ + type: "text", + text: "Operation completed successfully", + annotations: { + priority: 0.7, // Success messages are important but not critical + audience: ["user"], // Success mainly for user consumption + }, + }); + } + else if (messageType === "debug") { + content.push({ + type: "text", + text: "Debug: Cache hit ratio 0.95, latency 150ms", + annotations: { + priority: 0.3, // Debug info is low priority + audience: ["assistant"], // Technical details for assistant + }, + }); + } + // Optional image with its own annotations + if (includeImage) { + content.push({ + type: "image", + data: MCP_TINY_IMAGE, + mimeType: "image/png", + annotations: { + priority: 0.5, + audience: ["user"], // Images primarily for user visualization + }, + }); + } + return { content }; + }); +}; diff --git a/src/everything/tools/get-env.js b/src/everything/tools/get-env.js new file mode 100644 index 0000000000..1ae47281a4 --- /dev/null +++ b/src/everything/tools/get-env.js @@ -0,0 +1,28 @@ +// Tool configuration +const name = "get-env"; +const config = { + title: "Print Environment Tool", + description: "Returns all environment variables, helpful for debugging MCP server configuration", + inputSchema: {}, +}; +/** + * Registers the 'get-env' tool. + * + * The registered tool Retrieves and returns the environment variables + * of the current process as a JSON-formatted string encapsulated in a text response. + * + * @param {McpServer} server - The McpServer instance where the tool will be registered. + * @returns {void} + */ +export const registerGetEnvTool = (server) => { + server.registerTool(name, config, async (args) => { + return { + content: [ + { + type: "text", + text: JSON.stringify(process.env, null, 2), + }, + ], + }; + }); +}; diff --git a/src/everything/tools/get-resource-links.js b/src/everything/tools/get-resource-links.js new file mode 100644 index 0000000000..7e8a8970a2 --- /dev/null +++ b/src/everything/tools/get-resource-links.js @@ -0,0 +1,62 @@ +import { z } from "zod"; +import { textResource, textResourceUri, blobResourceUri, blobResource, } from "../resources/templates.js"; +// Tool input schema +const GetResourceLinksSchema = z.object({ + count: z + .number() + .min(1) + .max(10) + .default(3) + .describe("Number of resource links to return (1-10)"), +}); +// Tool configuration +const name = "get-resource-links"; +const config = { + title: "Get Resource Links Tool", + description: "Returns up to ten resource links that reference different types of resources", + inputSchema: GetResourceLinksSchema, +}; +/** + * Registers the 'get-resource-reference' tool. + * + * The registered tool retrieves a specified number of resource links and their metadata. + * Resource links are dynamically generated as either text or binary blob resources, + * based on their ID being even or odd. + + * The response contains a "text" introductory block and multiple "resource_link" blocks. + * + * @param {McpServer} server - The McpServer instance where the tool will be registered. + */ +export const registerGetResourceLinksTool = (server) => { + server.registerTool(name, config, async (args) => { + const { count } = GetResourceLinksSchema.parse(args); + // Add intro text content block + const content = []; + content.push({ + type: "text", + text: `Here are ${count} resource links to resources available in this server:`, + }); + // Create resource link content blocks + for (let resourceId = 1; resourceId <= count; resourceId++) { + // Get resource uri for text or blob resource based on odd/even resourceId + const isOdd = resourceId % 2 === 0; + const uri = isOdd + ? textResourceUri(resourceId) + : blobResourceUri(resourceId); + // Get resource based on the resource type + const resource = isOdd + ? textResource(uri, resourceId) + : blobResource(uri, resourceId); + content.push({ + type: "resource_link", + uri: resource.uri, + name: `${isOdd ? "Text" : "Blob"} Resource ${resourceId}`, + description: `Resource ${resourceId}: ${resource.mimeType === "text/plain" + ? "plaintext resource" + : "binary blob resource"}`, + mimeType: resource.mimeType, + }); + } + return { content }; + }); +}; diff --git a/src/everything/tools/get-resource-reference.js b/src/everything/tools/get-resource-reference.js new file mode 100644 index 0000000000..f5d42e442a --- /dev/null +++ b/src/everything/tools/get-resource-reference.js @@ -0,0 +1,74 @@ +import { z } from "zod"; +import { textResource, textResourceUri, blobResourceUri, blobResource, RESOURCE_TYPE_BLOB, RESOURCE_TYPE_TEXT, RESOURCE_TYPES, } from "../resources/templates.js"; +// Tool input schema +const GetResourceReferenceSchema = z.object({ + resourceType: z + .enum([RESOURCE_TYPE_TEXT, RESOURCE_TYPE_BLOB]) + .default(RESOURCE_TYPE_TEXT), + resourceId: z + .number() + .default(1) + .describe("ID of the text resource to fetch"), +}); +// Tool configuration +const name = "get-resource-reference"; +const config = { + title: "Get Resource Reference Tool", + description: "Returns a resource reference that can be used by MCP clients", + inputSchema: GetResourceReferenceSchema, +}; +/** + * Registers the 'get-resource-reference' tool. + * + * The registered tool validates and processes arguments for retrieving a resource + * reference. Supported resource types include predefined `RESOURCE_TYPE_TEXT` and + * `RESOURCE_TYPE_BLOB`. The retrieved resource's reference will include the resource + * ID, type, and its associated URI. + * + * The tool performs the following operations: + * 1. Validates the `resourceType` argument to ensure it matches a supported type. + * 2. Validates the `resourceId` argument to ensure it is a finite positive integer. + * 3. Constructs a URI for the resource based on its type (text or blob). + * 4. Retrieves the resource and returns it in a content block. + * + * @param {McpServer} server - The McpServer instance where the tool will be registered. + */ +export const registerGetResourceReferenceTool = (server) => { + server.registerTool(name, config, async (args) => { + // Validate resource type argument + const { resourceType } = args; + if (!RESOURCE_TYPES.includes(resourceType)) { + throw new Error(`Invalid resourceType: ${args?.resourceType}. Must be ${RESOURCE_TYPE_TEXT} or ${RESOURCE_TYPE_BLOB}.`); + } + // Validate resourceId argument + const resourceId = Number(args?.resourceId); + if (!Number.isFinite(resourceId) || + !Number.isInteger(resourceId) || + resourceId < 1) { + throw new Error(`Invalid resourceId: ${args?.resourceId}. Must be a finite positive integer.`); + } + // Get resource based on the resource type + const uri = resourceType === RESOURCE_TYPE_TEXT + ? textResourceUri(resourceId) + : blobResourceUri(resourceId); + const resource = resourceType === RESOURCE_TYPE_TEXT + ? textResource(uri, resourceId) + : blobResource(uri, resourceId); + return { + content: [ + { + type: "text", + text: `Returning resource reference for Resource ${resourceId}:`, + }, + { + type: "resource", + resource: resource, + }, + { + type: "text", + text: `You can access this resource using the URI: ${resource.uri}`, + }, + ], + }; + }); +}; diff --git a/src/everything/tools/get-roots-list.js b/src/everything/tools/get-roots-list.js new file mode 100644 index 0000000000..83d2d729af --- /dev/null +++ b/src/everything/tools/get-roots-list.js @@ -0,0 +1,71 @@ +import { syncRoots } from "../server/roots.js"; +// Tool configuration +const name = "get-roots-list"; +const config = { + title: "Get Roots List Tool", + description: "Lists the current MCP roots provided by the client. Demonstrates the roots protocol capability even though this server doesn't access files.", + inputSchema: {}, +}; +/** + * Registers the 'get-roots-list' tool. + * + * If the client does not support the roots capability, the tool is not registered. + * + * The registered tool interacts with the MCP roots capability, which enables the server to access + * information about the client's workspace directories or file system roots. + * + * When supported, the server automatically retrieves and formats the current list of roots from the + * client upon connection and whenever the client sends a `roots/list_changed` notification. + * + * Therefore, this tool displays the roots that the server currently knows about for the connected + * client. If for some reason the server never got the initial roots list, the tool will request the + * list from the client again. + * + * @param {McpServer} server - The McpServer instance where the tool will be registered. + */ +export const registerGetRootsListTool = (server) => { + // Does client support roots? + const clientCapabilities = server.server.getClientCapabilities() || {}; + const clientSupportsRoots = clientCapabilities.roots !== undefined; + // If so, register tool + if (clientSupportsRoots) { + server.registerTool(name, config, async (args, extra) => { + // Get the current rootsFetch the current roots list from the client if need be + const currentRoots = await syncRoots(server, extra.sessionId); + // Respond if client supports roots but doesn't have any configured + if (clientSupportsRoots && + (!currentRoots || currentRoots.length === 0)) { + return { + content: [ + { + type: "text", + text: "The client supports roots but no roots are currently configured.\n\n" + + "This could mean:\n" + + "1. The client hasn't provided any roots yet\n" + + "2. The client provided an empty roots list\n" + + "3. The roots configuration is still being loaded", + }, + ], + }; + } + // Create formatted response if there is a list of roots + const rootsList = currentRoots + ? currentRoots + .map((root, index) => { + return `${index + 1}. ${root.name || "Unnamed Root"}\n URI: ${root.uri}`; + }) + .join("\n\n") + : "No roots found"; + return { + content: [ + { + type: "text", + text: `Current MCP Roots (${currentRoots.length} total):\n\n${rootsList}\n\n` + + "Note: This server demonstrates the roots protocol capability but doesn't actually access files. " + + "The roots are provided by the MCP client and can be used by servers that need file system access.", + }, + ], + }; + }); + } +}; diff --git a/src/everything/tools/get-structured-content.js b/src/everything/tools/get-structured-content.js new file mode 100644 index 0000000000..4848eefbe6 --- /dev/null +++ b/src/everything/tools/get-structured-content.js @@ -0,0 +1,72 @@ +import { z } from "zod"; +// Tool input schema +const GetStructuredContentInputSchema = { + location: z + .enum(["New York", "Chicago", "Los Angeles"]) + .describe("Choose city"), +}; +// Tool output schema +const GetStructuredContentOutputSchema = z.object({ + temperature: z.number().describe("Temperature in celsius"), + conditions: z.string().describe("Weather conditions description"), + humidity: z.number().describe("Humidity percentage"), +}); +// Tool configuration +const name = "get-structured-content"; +const config = { + title: "Get Structured Content Tool", + description: "Returns structured content along with an output schema for client data validation", + inputSchema: GetStructuredContentInputSchema, + outputSchema: GetStructuredContentOutputSchema, +}; +/** + * Registers the 'get-structured-content' tool. + * + * The registered tool processes incoming arguments using a predefined input schema, + * generates structured content with weather information including temperature, + * conditions, and humidity, and returns both backward-compatible content blocks + * and structured content in the response. + * + * The response contains: + * - `content`: An array of content blocks, presented as JSON stringified objects. + * - `structuredContent`: A JSON structured representation of the weather data. + * + * @param {McpServer} server - The McpServer instance where the tool will be registered. + */ +export const registerGetStructuredContentTool = (server) => { + server.registerTool(name, config, async (args) => { + // Get simulated weather for the chosen city + let weather; + switch (args.location) { + case "New York": + weather = { + temperature: 33, + conditions: "Cloudy", + humidity: 82, + }; + break; + case "Chicago": + weather = { + temperature: 36, + conditions: "Light rain / drizzle", + humidity: 82, + }; + break; + case "Los Angeles": + weather = { + temperature: 73, + conditions: "Sunny / Clear", + humidity: 48, + }; + break; + } + const backwardCompatibleContentBlock = { + type: "text", + text: JSON.stringify(weather), + }; + return { + content: [backwardCompatibleContentBlock], + structuredContent: weather, + }; + }); +}; diff --git a/src/everything/tools/get-sum.js b/src/everything/tools/get-sum.js new file mode 100644 index 0000000000..f8962468b6 --- /dev/null +++ b/src/everything/tools/get-sum.js @@ -0,0 +1,40 @@ +import { z } from "zod"; +// Tool input schema +const GetSumSchema = z.object({ + a: z.number().describe("First number"), + b: z.number().describe("Second number"), +}); +// Tool configuration +const name = "get-sum"; +const config = { + title: "Get Sum Tool", + description: "Returns the sum of two numbers", + inputSchema: GetSumSchema, +}; +/** + * Registers the 'get-sum' tool. + ** + * The registered tool processes input arguments, validates them using a predefined schema, + * calculates the sum of two numeric values, and returns the result in a content block. + * + * Expects input arguments to conform to a specific schema that includes two numeric properties, `a` and `b`. + * Validation is performed to ensure the input adheres to the expected structure before calculating the sum. + * + * The result is returned as a Promise resolving to an object containing the computed sum in a text format. + * + * @param {McpServer} server - The McpServer instance where the tool will be registered. + */ +export const registerGetSumTool = (server) => { + server.registerTool(name, config, async (args) => { + const validatedArgs = GetSumSchema.parse(args); + const sum = validatedArgs.a + validatedArgs.b; + return { + content: [ + { + type: "text", + text: `The sum of ${validatedArgs.a} and ${validatedArgs.b} is ${sum}.`, + }, + ], + }; + }); +}; diff --git a/src/everything/tools/get-tiny-image.js b/src/everything/tools/get-tiny-image.js new file mode 100644 index 0000000000..dbdfcc4337 --- /dev/null +++ b/src/everything/tools/get-tiny-image.js @@ -0,0 +1,41 @@ +// A tiny encoded MCP logo image +export const MCP_TINY_IMAGE = "iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAKsGlDQ1BJQ0MgUHJvZmlsZQAASImVlwdUU+kSgOfe9JDQEiIgJfQmSCeAlBBaAAXpYCMkAUKJMRBU7MriClZURLCs6KqIgo0idizYFsWC3QVZBNR1sWDDlXeBQ9jdd9575805c+a7c+efmf+e/z9nLgCdKZDJMlF1gCxpjjwyyI8dn5DIJvUABRiY0kBdIMyWcSMiwgCTUft3+dgGyJC9YzuU69/f/1fREImzhQBIBMbJomxhFsbHMe0TyuQ5ALg9mN9kbo5siK9gzJRjDWL8ZIhTR7hviJOHGY8fjomO5GGsDUCmCQTyVACaKeZn5wpTsTw0f4ztpSKJFGPsGbyzsmaLMMbqgiUWI8N4KD8n+S95Uv+WM1mZUyBIVfLIXoaF7C/JlmUK5v+fn+N/S1amYrSGOaa0NHlwJGaxvpAHGbNDlSxNnhI+yhLRcPwwpymCY0ZZmM1LHGWRwD9UuTZzStgop0gC+co8OfzoURZnB0SNsnx2pLJWipzHHWWBfKyuIiNG6U8T85X589Ki40Y5VxI7ZZSzM6JCx2J4Sr9cEansXywN8hurG6jce1b2X/Yr4SvX5qRFByv3LhjrXyzljuXMjlf2JhL7B4zFxCjjZTl+ylqyzAhlvDgzSOnPzo1Srs3BDuTY2gjlN0wXhESMMoRBELAhBjIhB+QggECQgBTEOeJ5Q2cUeLNl8+WS1LQcNhe7ZWI2Xyq0m8B2tHd0Bhi6syNH4j1r+C4irGtjvhWVAF4nBgcHT475Qm4BHEkCoNaO+SxnAKh3A1w5JVTIc0d8Q9cJCEAFNWCCDhiACViCLTiCK3iCLwRACIRDNCTATBBCGmRhnc+FhbAMCqAI1sNmKIOdsBv2wyE4CvVwCs7DZbgOt+AePIZ26IJX0AcfYQBBEBJCRxiIDmKImCE2iCPCQbyRACQMiUQSkCQkFZEiCmQhsgIpQoqRMmQXUokcQU4g55GrSCvyEOlAepF3yFcUh9JQJqqPmqMTUQ7KRUPRaHQGmorOQfPQfHQtWopWoAfROvQ8eh29h7ajr9B+HOBUcCycEc4Wx8HxcOG4RFwKTo5bjCvEleAqcNW4Rlwz7g6uHfca9wVPxDPwbLwt3hMfjI/BC/Fz8Ivxq/Fl+P34OvxF/B18B74P/51AJ+gRbAgeBD4hnpBKmEsoIJQQ9hJqCZcI9whdhI9EIpFFtCC6EYOJCcR04gLiauJ2Yg3xHLGV2EnsJ5FIOiQbkhcpnCQg5ZAKSFtJB0lnSbdJXaTPZBWyIdmRHEhOJEvJy8kl5APkM+Tb5G7yAEWdYkbxoIRTRJT5lHWUPZRGyk1KF2WAqkG1oHpRo6np1GXUUmo19RL1CfW9ioqKsYq7ylQVicpSlVKVwypXVDpUvtA0adY0Hm06TUFbS9tHO0d7SHtPp9PN6b70RHoOfS29kn6B/oz+WZWhaqfKVxWpLlEtV61Tva36Ro2iZqbGVZuplqdWonZM7abaa3WKurk6T12gvli9XP2E+n31fg2GhoNGuEaWxmqNAxpXNXo0SZrmmgGaIs18zd2aFzQ7GTiGCYPHEDJWMPYwLjG6mESmBZPPTGcWMQ8xW5h9WppazlqxWvO0yrVOa7WzcCxzFp+VyVrHOspqY30dpz+OO048btW46nG3x33SHq/tqy3WLtSu0b6n/VWHrROgk6GzQade56kuXtdad6ruXN0dupd0X49njvccLxxfOP7o+Ed6qJ61XqTeAr3dejf0+vUN9IP0Zfpb9S/ovzZgGfgapBtsMjhj0GvIMPQ2lBhuMjxr+JKtxeayM9ml7IvsPiM9o2AjhdEuoxajAWML4xjj5cY1xk9NqCYckxSTTSZNJn2mhqaTTReaVpk+MqOYcczSzLaYNZt9MrcwjzNfaV5v3mOhbcG3yLOosnhiSbf0sZxjWWF514poxbHKsNpudcsatXaxTrMut75pg9q42khsttu0TiBMcJ8gnVAx4b4tzZZrm2tbZdthx7ILs1tuV2/3ZqLpxMSJGyY2T/xu72Kfab/H/rGDpkOIw3KHRod3jtaOQsdyx7tOdKdApyVODU5vnW2cxc47nB+4MFwmu6x0aXL509XNVe5a7drrZuqW5LbN7T6HyYngrOZccSe4+7kvcT/l/sXD1SPH46jHH562nhmeBzx7JllMEk/aM6nTy9hL4LXLq92b7Z3k/ZN3u4+Rj8Cnwue5r4mvyHevbzfXipvOPch942fvJ/er9fvE8+At4p3zx/kH+Rf6twRoBsQElAU8CzQOTA2sCuwLcglaEHQumBAcGrwh+D5fny/kV/L7QtxCFoVcDKWFRoWWhT4Psw6ThzVORieHTN44+ckUsynSKfXhEM4P3xj+NMIiYk7EyanEqRFTy6e+iHSIXBjZHMWImhV1IOpjtF/0uujHMZYxipimWLXY6bGVsZ/i/OOK49rjJ8Yvir+eoJsgSWhIJCXGJu5N7J8WMG3ztK7pLtMLprfNsJgxb8bVmbozM2eenqU2SzDrWBIhKS7pQNI3QbigQtCfzE/eltwn5Am3CF+JfEWbRL1iL3GxuDvFK6U4pSfVK3Vjam+aT1pJ2msJT1ImeZsenL4z/VNGeMa+jMHMuMyaLHJWUtYJqaY0Q3pxtsHsebNbZTayAln7HI85m+f0yUPle7OR7BnZDTlMbDi6obBU/KDoyPXOLc/9PDd27rF5GvOk827Mt56/an53XmDezwvwC4QLmhYaLVy2sGMRd9Guxcji5MVNS0yW5C/pWhq0dP8y6rKMZb8st19evPzDirgVjfn6+UvzO38I+qGqQLVAXnB/pefKnT/if5T82LLKadXWVd8LRYXXiuyLSoq+rRauvrbGYU3pmsG1KWtb1rmu27GeuF66vm2Dz4b9xRrFecWdGydvrNvE3lS46cPmWZuvljiX7NxC3aLY0l4aVtqw1XTr+q3fytLK7pX7ldds09u2atun7aLtt3f47qjeqb+zaOfXnyQ/PdgVtKuuwryiZDdxd+7uF3ti9zT/zPm5cq/u3qK9f+6T7mvfH7n/YqVbZeUBvQPrqtAqRVXvwekHbx3yP9RQbVu9q4ZVU3QYDisOvzySdKTtaOjRpmOcY9XHzY5vq2XUFtYhdfPr+urT6tsbEhpaT4ScaGr0bKw9aXdy3ymjU+WntU6vO0M9k39m8Gze2f5zsnOvz6ee72ya1fT4QvyFuxenXmy5FHrpyuXAyxeauc1nr3hdOXXV4+qJa5xr9dddr9fdcLlR+4vLL7Utri11N91uNtzyv9XYOqn1zG2f2+fv+N+5fJd/9/q9Kfda22LaHtyffr/9gehBz8PMh28f5T4aeLz0CeFJ4VP1pyXP9J5V/Gr1a027a/vpDv+OG8+jnj/uFHa++i37t29d+S/oL0q6Dbsrexx7TvUG9t56Oe1l1yvZq4HXBb9r/L7tjeWb43/4/nGjL76v66387eC71e913u/74PyhqT+i/9nHrI8Dnwo/63ze/4Xzpflr3NfugbnfSN9K/7T6s/F76Pcng1mDgzKBXDA8CuAwRVNSAN7tA6AnADCwGYI6bWSmHhZk5D9gmOA/8cjcPSyuANWYGRqNeOcADmNqvhRAzRdgaCyK9gXUyUmpo/Pv8Kw+JAbYv8K0HECi2x6tebQU/iEjc/xf+v6nBWXWv9l/AV0EC6JTIblRAAAAeGVYSWZNTQAqAAAACAAFARIAAwAAAAEAAQAAARoABQAAAAEAAABKARsABQAAAAEAAABSASgAAwAAAAEAAgAAh2kABAAAAAEAAABaAAAAAAAAAJAAAAABAAAAkAAAAAEAAqACAAQAAAABAAAAFKADAAQAAAABAAAAFAAAAAAXNii1AAAACXBIWXMAABYlAAAWJQFJUiTwAAAB82lUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iWE1QIENvcmUgNi4wLjAiPgogICA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPgogICAgICA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIgogICAgICAgICAgICB4bWxuczp0aWZmPSJodHRwOi8vbnMuYWRvYmUuY29tL3RpZmYvMS4wLyI+CiAgICAgICAgIDx0aWZmOllSZXNvbHV0aW9uPjE0NDwvdGlmZjpZUmVzb2x1dGlvbj4KICAgICAgICAgPHRpZmY6T3JpZW50YXRpb24+MTwvdGlmZjpPcmllbnRhdGlvbj4KICAgICAgICAgPHRpZmY6WFJlc29sdXRpb24+MTQ0PC90aWZmOlhSZXNvbHV0aW9uPgogICAgICAgICA8dGlmZjpSZXNvbHV0aW9uVW5pdD4yPC90aWZmOlJlc29sdXRpb25Vbml0PgogICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KReh49gAAAjRJREFUOBGFlD2vMUEUx2clvoNCcW8hCqFAo1dKhEQpvsF9KrWEBh/ALbQ0KkInBI3SWyGPCCJEQliXgsTLefaca/bBWjvJzs6cOf/fnDkzOQJIjWm06/XKBEGgD8c6nU5VIWgBtQDPZPWtJE8O63a7LBgMMo/Hw0ql0jPjcY4RvmqXy4XMjUYDUwLtdhtmsxnYbDbI5/O0djqdFFKmsEiGZ9jP9gem0yn0ej2Yz+fg9XpfycimAD7DttstQTDKfr8Po9GIIg6Hw1Cr1RTgB+A72GAwgMPhQLBMJgNSXsFqtUI2myUo18pA6QJogefsPrLBX4QdCVatViklw+EQRFGEj88P2O12pEUGATmsXq+TaLPZ0AXgMRF2vMEqlQoJTSYTpNNpApvNZliv1/+BHDaZTAi2Wq1A3Ig0xmMej7+RcZjdbodUKkWAaDQK+GHjHPnImB88JrZIJAKFQgH2+z2BOczhcMiwRCIBgUAA+NN5BP6mj2DYff35gk6nA61WCzBn2JxO5wPM7/fLz4vD0E+OECfn8xl/0Gw2KbLxeAyLxQIsFgt8p75pDSO7h/HbpUWpewCike9WLpfB7XaDy+WCYrFI/slk8i0MnRRAUt46hPMI4vE4+Hw+ec7t9/44VgWigEeby+UgFArJWjUYOqhWG6x50rpcSfR6PVUfNOgEVRlTX0HhrZBKz4MZjUYWi8VoA+lc9H/VaRZYjBKrtXR8tlwumcFgeMWRbZpA9ORQWfVm8A/FsrLaxebd5wAAAABJRU5ErkJggg=="; +// Tool configuration +const name = "get-tiny-image"; +const config = { + title: "Get Tiny Image Tool", + description: "Returns a tiny MCP logo image.", + inputSchema: {}, +}; +/** + * Registers the "get-tiny-image" tool. + * + * The registered tool returns a response containing a small image alongside some + * descriptive text. + * + * The response structure includes textual content before and after the image. + * The image is served as a PNG data type and represents the default MCP tiny image. + * + * @param server - The McpServer instance where the tool will be registered. + */ +export const registerGetTinyImageTool = (server) => { + server.registerTool(name, config, async (args) => { + return { + content: [ + { + type: "text", + text: "Here's the image you requested:", + }, + { + type: "image", + data: MCP_TINY_IMAGE, + mimeType: "image/png", + }, + { + type: "text", + text: "The image above is the MCP logo.", + }, + ], + }; + }); +}; diff --git a/src/everything/tools/gzip-file-as-resource.js b/src/everything/tools/gzip-file-as-resource.js new file mode 100644 index 0000000000..57a46b8c04 --- /dev/null +++ b/src/everything/tools/gzip-file-as-resource.js @@ -0,0 +1,182 @@ +import { z } from "zod"; +import { gzipSync } from "node:zlib"; +import { getSessionResourceURI, registerSessionResource, } from "../resources/session.js"; +// Maximum input file size - 10 MB default +const GZIP_MAX_FETCH_SIZE = Number(process.env.GZIP_MAX_FETCH_SIZE ?? String(10 * 1024 * 1024)); +// Maximum fetch time - 30 seconds default. +const GZIP_MAX_FETCH_TIME_MILLIS = Number(process.env.GZIP_MAX_FETCH_TIME_MILLIS ?? String(30 * 1000)); +// Comma-separated list of allowed domains. Empty means all domains are allowed. +const GZIP_ALLOWED_DOMAINS = (process.env.GZIP_ALLOWED_DOMAINS ?? "") + .split(",") + .map((d) => d.trim().toLowerCase()) + .filter((d) => d.length > 0); +// Tool input schema +const GZipFileAsResourceSchema = z.object({ + name: z.string().describe("Name of the output file").default("README.md.gz"), + data: z + .string() + .url() + .describe("URL or data URI of the file content to compress") + .default("https://raw.githubusercontent.com/modelcontextprotocol/servers/refs/heads/main/README.md"), + outputType: z + .enum(["resourceLink", "resource"]) + .default("resourceLink") + .describe("How the resulting gzipped file should be returned. 'resourceLink' returns a link to a resource that can be read later, 'resource' returns a full resource object."), +}); +// Tool configuration +const name = "gzip-file-as-resource"; +const config = { + title: "GZip File as Resource Tool", + description: "Compresses a single file using gzip compression. Depending upon the selected output type, returns either the compressed data as a gzipped resource or a resource link, allowing it to be downloaded in a subsequent request during the current session.", + inputSchema: GZipFileAsResourceSchema, +}; +/** + * Registers the `gzip-file-as-resource` tool. + * + * The registered tool compresses input data using gzip, and makes the resulting file accessible + * as a resource for the duration of the session. + * + * The tool supports two output types: + * - "resource": Returns the resource directly, including its URI, MIME type, and base64-encoded content. + * - "resourceLink": Returns a link to access the resource later. + * + * If an unrecognized `outputType` is provided, the tool throws an error. + * + * @param {McpServer} server - The McpServer instance where the tool will be registered. + * @throws {Error} Throws an error if an unknown output type is specified. + */ +export const registerGZipFileAsResourceTool = (server) => { + server.registerTool(name, config, async (args) => { + const { name, data: dataUri, outputType, } = GZipFileAsResourceSchema.parse(args); + // Validate data uri + const url = validateDataURI(dataUri); + // Fetch the data + const response = await fetchSafely(url, { + maxBytes: GZIP_MAX_FETCH_SIZE, + timeoutMillis: GZIP_MAX_FETCH_TIME_MILLIS, + }); + // Compress the data using gzip + const inputBuffer = Buffer.from(response); + const compressedBuffer = gzipSync(inputBuffer); + // Create resource + const uri = getSessionResourceURI(name); + const blob = compressedBuffer.toString("base64"); + const mimeType = "application/gzip"; + const resource = { uri, name, mimeType }; + // Register resource, get resource link in return + const resourceLink = registerSessionResource(server, resource, "blob", blob); + // Return the resource or a resource link that can be used to access this resource later + if (outputType === "resource") { + return { + content: [ + { + type: "resource", + resource: { uri, mimeType, blob }, + }, + ], + }; + } + else if (outputType === "resourceLink") { + return { + content: [resourceLink], + }; + } + else { + throw new Error(`Unknown outputType: ${outputType}`); + } + }); +}; +/** + * Validates a given data URI to ensure it follows the appropriate protocols and rules. + * + * @param {string} dataUri - The data URI to validate. Must be an HTTP, HTTPS, or data protocol URL. If a domain is provided, it must match the allowed domains list if applicable. + * @return {URL} The validated and parsed URL object. + * @throws {Error} If the data URI does not use a supported protocol or does not meet allowed domains criteria. + */ +function validateDataURI(dataUri) { + // Validate Inputs + const url = new URL(dataUri); + try { + if (url.protocol !== "http:" && + url.protocol !== "https:" && + url.protocol !== "data:") { + throw new Error(`Unsupported URL protocol for ${dataUri}. Only http, https, and data URLs are supported.`); + } + if (GZIP_ALLOWED_DOMAINS.length > 0 && + (url.protocol === "http:" || url.protocol === "https:")) { + const domain = url.hostname; + const domainAllowed = GZIP_ALLOWED_DOMAINS.some((allowedDomain) => { + return domain === allowedDomain || domain.endsWith(`.${allowedDomain}`); + }); + if (!domainAllowed) { + throw new Error(`Domain ${domain} is not in the allowed domains list.`); + } + } + } + catch (error) { + throw new Error(`Error processing file ${dataUri}: ${error instanceof Error ? error.message : String(error)}`); + } + return url; +} +/** + * Fetches data safely from a given URL while ensuring constraints on maximum byte size and timeout duration. + * + * @param {URL} url The URL to fetch data from. + * @param {Object} options An object containing options for the fetch operation. + * @param {number} options.maxBytes The maximum allowed size (in bytes) of the response. If the response exceeds this size, the operation will be aborted. + * @param {number} options.timeoutMillis The timeout duration (in milliseconds) for the fetch operation. If the fetch takes longer, it will be aborted. + * @return {Promise} A promise that resolves with the response as an ArrayBuffer if successful. + * @throws {Error} Throws an error if the response size exceeds the defined limit, the fetch times out, or the response is otherwise invalid. + */ +async function fetchSafely(url, { maxBytes, timeoutMillis }) { + const controller = new AbortController(); + const timeout = setTimeout(() => controller.abort(`Fetching ${url} took more than ${timeoutMillis} ms and was aborted.`), timeoutMillis); + try { + // Fetch the data + const response = await fetch(url, { signal: controller.signal }); + if (!response.body) { + throw new Error("No response body"); + } + // Note: we can't trust the Content-Length header: a malicious or clumsy server could return much more data than advertised. + // We check it here for early bail-out, but we still need to monitor actual bytes read below. + const contentLengthHeader = response.headers.get("content-length"); + if (contentLengthHeader != null) { + const contentLength = parseInt(contentLengthHeader, 10); + if (contentLength > maxBytes) { + throw new Error(`Content-Length for ${url} exceeds max of ${maxBytes}: ${contentLength}`); + } + } + // Read the fetched data from the response body + const reader = response.body.getReader(); + const chunks = []; + let totalSize = 0; + // Read chunks until done + try { + while (true) { + const { done, value } = await reader.read(); + if (done) + break; + totalSize += value.length; + if (totalSize > maxBytes) { + reader.cancel(); + throw new Error(`Response from ${url} exceeds ${maxBytes} bytes`); + } + chunks.push(value); + } + } + finally { + reader.releaseLock(); + } + // Combine chunks into a single buffer + const buffer = new Uint8Array(totalSize); + let offset = 0; + for (const chunk of chunks) { + buffer.set(chunk, offset); + offset += chunk.length; + } + return buffer.buffer; + } + finally { + clearTimeout(timeout); + } +} diff --git a/src/everything/tools/index.js b/src/everything/tools/index.js new file mode 100644 index 0000000000..2f951d3b0f --- /dev/null +++ b/src/everything/tools/index.js @@ -0,0 +1,50 @@ +import { registerGetAnnotatedMessageTool } from "./get-annotated-message.js"; +import { registerEchoTool } from "./echo.js"; +import { registerGetEnvTool } from "./get-env.js"; +import { registerGetResourceLinksTool } from "./get-resource-links.js"; +import { registerGetResourceReferenceTool } from "./get-resource-reference.js"; +import { registerGetRootsListTool } from "./get-roots-list.js"; +import { registerGetStructuredContentTool } from "./get-structured-content.js"; +import { registerGetSumTool } from "./get-sum.js"; +import { registerGetTinyImageTool } from "./get-tiny-image.js"; +import { registerGZipFileAsResourceTool } from "./gzip-file-as-resource.js"; +import { registerToggleSimulatedLoggingTool } from "./toggle-simulated-logging.js"; +import { registerToggleSubscriberUpdatesTool } from "./toggle-subscriber-updates.js"; +import { registerTriggerElicitationRequestTool } from "./trigger-elicitation-request.js"; +import { registerTriggerLongRunningOperationTool } from "./trigger-long-running-operation.js"; +import { registerTriggerSamplingRequestTool } from "./trigger-sampling-request.js"; +import { registerTriggerSamplingRequestAsyncTool } from "./trigger-sampling-request-async.js"; +import { registerTriggerElicitationRequestAsyncTool } from "./trigger-elicitation-request-async.js"; +import { registerSimulateResearchQueryTool } from "./simulate-research-query.js"; +/** + * Register the tools with the MCP server. + * @param server + */ +export const registerTools = (server) => { + registerEchoTool(server); + registerGetAnnotatedMessageTool(server); + registerGetEnvTool(server); + registerGetResourceLinksTool(server); + registerGetResourceReferenceTool(server); + registerGetStructuredContentTool(server); + registerGetSumTool(server); + registerGetTinyImageTool(server); + registerGZipFileAsResourceTool(server); + registerToggleSimulatedLoggingTool(server); + registerToggleSubscriberUpdatesTool(server); + registerTriggerLongRunningOperationTool(server); +}; +/** + * Register the tools that are conditional upon client capabilities. + * These must be registered conditionally, after initialization. + */ +export const registerConditionalTools = (server) => { + registerGetRootsListTool(server); + registerTriggerElicitationRequestTool(server); + registerTriggerSamplingRequestTool(server); + // Task-based research tool (uses experimental tasks API) + registerSimulateResearchQueryTool(server); + // Bidirectional task tools - server sends requests that client executes as tasks + registerTriggerSamplingRequestAsyncTool(server); + registerTriggerElicitationRequestAsyncTool(server); +}; diff --git a/src/everything/tools/simulate-research-query.js b/src/everything/tools/simulate-research-query.js new file mode 100644 index 0000000000..363099f9fb --- /dev/null +++ b/src/everything/tools/simulate-research-query.js @@ -0,0 +1,249 @@ +import { z } from "zod"; +import { ElicitResultSchema, } from "@modelcontextprotocol/sdk/types.js"; +// Tool input schema +const SimulateResearchQuerySchema = z.object({ + topic: z.string().describe("The research topic to investigate"), + ambiguous: z + .boolean() + .default(false) + .describe("Simulate an ambiguous query that requires clarification (triggers input_required status)"), +}); +// Research stages +const STAGES = [ + "Gathering sources", + "Analyzing content", + "Synthesizing findings", + "Generating report", +]; +// Duration per stage in milliseconds +const STAGE_DURATION = 1000; +// Map to store research state per task +const researchStates = new Map(); +/** + * Runs the background research process. + * Updates task status as it progresses through stages. + * If clarification is needed, attempts elicitation via sendRequest. + * + * Note: Elicitation only works on STDIO transport. On HTTP transport, + * sendRequest will fail and the task will use a default interpretation. + * Full HTTP support requires SDK PR #1210's elicitInputStream API. + */ +async function runResearchProcess(taskId, args, taskStore, +// eslint-disable-next-line @typescript-eslint/no-explicit-any +sendRequest) { + const state = researchStates.get(taskId); + if (!state) + return; + // Process each stage + for (let i = state.currentStage; i < STAGES.length; i++) { + state.currentStage = i; + // Check if task was cancelled externally + if (state.completed) + return; + // Update status message for current stage + await taskStore.updateTaskStatus(taskId, "working", `${STAGES[i]}...`); + // At synthesis stage (index 2), check if clarification is needed + if (i === 2 && state.ambiguous && !state.clarification) { + // Update status to show we're requesting input (spec SHOULD) + await taskStore.updateTaskStatus(taskId, "input_required", `Found multiple interpretations for "${state.topic}". Requesting clarification...`); + try { + // Try elicitation via sendRequest (works on STDIO, fails on HTTP) + const elicitResult = await sendRequest({ + method: "elicitation/create", + params: { + message: `The research query "${state.topic}" could have multiple interpretations. Please clarify what you're looking for:`, + requestedSchema: { + type: "object", + properties: { + interpretation: { + type: "string", + title: "Clarification", + description: "Which interpretation of the topic do you mean?", + oneOf: getInterpretationsForTopic(state.topic), + }, + }, + required: ["interpretation"], + }, + }, + }, ElicitResultSchema); + // Process elicitation response + if (elicitResult.action === "accept" && elicitResult.content) { + state.clarification = + elicitResult.content + .interpretation || "User accepted without selection"; + } + else if (elicitResult.action === "decline") { + state.clarification = "User declined - using default interpretation"; + } + else { + state.clarification = "User cancelled - using default interpretation"; + } + } + catch (error) { + // Elicitation failed (likely HTTP transport without streaming support) + // Use default interpretation and continue - task should still complete + console.warn(`Elicitation failed for task ${taskId} (HTTP transport?):`, error instanceof Error ? error.message : String(error)); + state.clarification = + "technical (default - elicitation unavailable on HTTP)"; + } + // Resume with working status (spec SHOULD) + await taskStore.updateTaskStatus(taskId, "working", `Continuing with interpretation: "${state.clarification}"...`); + // Continue processing (no return - just keep going through the loop) + } + // Simulate work for this stage + await new Promise((resolve) => setTimeout(resolve, STAGE_DURATION)); + } + // All stages complete - generate result + state.completed = true; + const result = generateResearchReport(state); + state.result = result; + await taskStore.storeTaskResult(taskId, "completed", result); +} +/** + * Generates the final research report with educational content about tasks. + */ +function generateResearchReport(state) { + const topic = state.clarification + ? `${state.topic} (${state.clarification})` + : state.topic; + const report = `# Research Report: ${topic} + +## Research Parameters +- **Topic**: ${state.topic} +${state.clarification ? `- **Clarification**: ${state.clarification}` : ""} + +## Synthesis +This research query was processed through ${STAGES.length} stages: +${STAGES.map((s, i) => `- Stage ${i + 1}: ${s} āœ“`).join("\n")} + +--- + +## About This Demo (SEP-1686: Tasks) + +This tool demonstrates MCP's task-based execution pattern for long-running operations: + +**Task Lifecycle Demonstrated:** +1. \`tools/call\` with \`task\` parameter → Server returns \`CreateTaskResult\` (not the final result) +2. Client polls \`tasks/get\` → Server returns current status and \`statusMessage\` +3. Status progressed: \`working\` → ${state.clarification ? `\`input_required\` → \`working\` → ` : ""}\`completed\` +4. Client calls \`tasks/result\` → Server returns this final result + +${state.clarification + ? `**Elicitation Flow:** +When the query was ambiguous, the server sent an \`elicitation/create\` request +to the client. The task status changed to \`input_required\` while awaiting user input. +${state.clarification.includes("unavailable on HTTP") + ? ` +**Note:** Elicitation was skipped because this server is running over HTTP transport. +The current SDK's \`sendRequest\` only works over STDIO. Full HTTP elicitation support +requires SDK PR #1210's streaming \`elicitInputStream\` API. +` + : `After receiving clarification ("${state.clarification}"), the task resumed processing and completed.`} +` + : ""} +**Key Concepts:** +- Tasks enable "call now, fetch later" patterns +- \`statusMessage\` provides human-readable progress updates +- Tasks have TTL (time-to-live) for automatic cleanup +- \`pollInterval\` suggests how often to check status +- Elicitation requests can be sent directly during task execution + +*This is a simulated research report from the Everything MCP Server.* +`; + return { + content: [ + { + type: "text", + text: report, + }, + ], + }; +} +/** + * Registers the 'simulate-research-query' tool as a task-based tool. + * + * This tool demonstrates the MCP Tasks feature (SEP-1686) with a real-world scenario: + * a research tool that gathers and synthesizes information from multiple sources. + * If the query is ambiguous, it pauses to ask for clarification before completing. + * + * @param {McpServer} server - The McpServer instance where the tool will be registered. + */ +export const registerSimulateResearchQueryTool = (server) => { + // Check if client supports elicitation (needed for input_required flow) + const clientCapabilities = server.server.getClientCapabilities() || {}; + const clientSupportsElicitation = clientCapabilities.elicitation !== undefined; + server.experimental.tasks.registerToolTask("simulate-research-query", { + title: "Simulate Research Query", + description: "Simulates a deep research operation that gathers, analyzes, and synthesizes information. " + + "Demonstrates MCP task-based operations with progress through multiple stages. " + + "If 'ambiguous' is true and client supports elicitation, sends an elicitation request for clarification.", + inputSchema: SimulateResearchQuerySchema, + execution: { taskSupport: "required" }, + }, { + /** + * Creates a new research task and starts background processing. + */ + createTask: async (args, extra) => { + const validatedArgs = SimulateResearchQuerySchema.parse(args); + // Create the task in the store + const task = await extra.taskStore.createTask({ + ttl: 300000, // 5 minutes + pollInterval: 1000, + }); + // Initialize research state + const state = { + topic: validatedArgs.topic, + ambiguous: validatedArgs.ambiguous && clientSupportsElicitation, + currentStage: 0, + completed: false, + }; + researchStates.set(task.taskId, state); + // Start background research (don't await - runs asynchronously) + // Pass sendRequest for elicitation (works on STDIO, gracefully degrades on HTTP) + runResearchProcess(task.taskId, validatedArgs, extra.taskStore, extra.sendRequest).catch((error) => { + console.error(`Research task ${task.taskId} failed:`, error); + extra.taskStore + .updateTaskStatus(task.taskId, "failed", String(error)) + .catch(console.error); + }); + return { task }; + }, + /** + * Returns the current status of the research task. + */ + getTask: async (args, extra) => { + return await extra.taskStore.getTask(extra.taskId); + }, + /** + * Returns the task result. + * Elicitation is now handled directly in the background process. + */ + getTaskResult: async (args, extra) => { + // Return the stored result + const result = await extra.taskStore.getTaskResult(extra.taskId); + // Clean up state + researchStates.delete(extra.taskId); + return result; + }, + }); +}; +/** + * Returns contextual interpretation options based on the topic. + */ +function getInterpretationsForTopic(topic) { + const lowerTopic = topic.toLowerCase(); + // Example: contextual interpretations for "python" + if (lowerTopic.includes("python")) { + return [ + { const: "programming", title: "Python programming language" }, + { const: "snake", title: "Python snake species" }, + { const: "comedy", title: "Monty Python comedy group" }, + ]; + } + // Default generic interpretations + return [ + { const: "technical", title: "Technical/scientific perspective" }, + { const: "historical", title: "Historical perspective" }, + { const: "current", title: "Current events/news perspective" }, + ]; +} diff --git a/src/everything/tools/toggle-simulated-logging.js b/src/everything/tools/toggle-simulated-logging.js new file mode 100644 index 0000000000..972d4385d8 --- /dev/null +++ b/src/everything/tools/toggle-simulated-logging.js @@ -0,0 +1,41 @@ +import { beginSimulatedLogging, stopSimulatedLogging, } from "../server/logging.js"; +// Tool configuration +const name = "toggle-simulated-logging"; +const config = { + title: "Toggle Simulated Logging", + description: "Toggles simulated, random-leveled logging on or off.", + inputSchema: {}, +}; +// Track enabled clients by session id +const clients = new Set(); +/** + * Registers the `toggle-simulated-logging` tool. + * + * The registered tool enables or disables the sending of periodic, random-leveled + * logging messages the connected client. + * + * When invoked, it either starts or stops simulated logging based on the session's + * current state. If logging for the specified session is active, it will be stopped; + * if it is inactive, logging will be started. + * + * @param {McpServer} server - The McpServer instance where the tool will be registered. + */ +export const registerToggleSimulatedLoggingTool = (server) => { + server.registerTool(name, config, async (_args, extra) => { + const sessionId = extra?.sessionId; + let response; + if (clients.has(sessionId)) { + stopSimulatedLogging(sessionId); + clients.delete(sessionId); + response = `Stopped simulated logging for session ${sessionId}`; + } + else { + beginSimulatedLogging(server, sessionId); + clients.add(sessionId); + response = `Started simulated, random-leveled logging for session ${sessionId} at a 5 second pace. Client's selected logging level will be respected. If an interval elapses and the message to be sent is below the selected level, it will not be sent. Thus at higher chosen logging levels, messages should arrive further apart. `; + } + return { + content: [{ type: "text", text: `${response}` }], + }; + }); +}; diff --git a/src/everything/tools/toggle-subscriber-updates.js b/src/everything/tools/toggle-subscriber-updates.js new file mode 100644 index 0000000000..e2c8abd808 --- /dev/null +++ b/src/everything/tools/toggle-subscriber-updates.js @@ -0,0 +1,44 @@ +import { beginSimulatedResourceUpdates, stopSimulatedResourceUpdates, } from "../resources/subscriptions.js"; +// Tool configuration +const name = "toggle-subscriber-updates"; +const config = { + title: "Toggle Subscriber Updates", + description: "Toggles simulated resource subscription updates on or off.", + inputSchema: {}, +}; +// Track enabled clients by session id +const clients = new Set(); +/** + * Registers the `toggle-subscriber-updates` tool. + * + * The registered tool enables or disables the sending of periodic, simulated resource + * update messages the connected client for any subscriptions they have made. + * + * When invoked, it either starts or stops simulated resource updates based on the session's + * current state. If simulated updates for the specified session is active, it will be stopped; + * if it is inactive, simulated updates will be started. + * + * The response provides feedback indicating whether simulated updates were started or stopped, + * including the session ID. + * + * @param {McpServer} server - The McpServer instance where the tool will be registered. + */ +export const registerToggleSubscriberUpdatesTool = (server) => { + server.registerTool(name, config, async (_args, extra) => { + const sessionId = extra?.sessionId; + let response; + if (clients.has(sessionId)) { + stopSimulatedResourceUpdates(sessionId); + clients.delete(sessionId); + response = `Stopped simulated resource updates for session ${sessionId}`; + } + else { + beginSimulatedResourceUpdates(server, sessionId); + clients.add(sessionId); + response = `Started simulated resource updated notifications for session ${sessionId} at a 5 second pace. Client will receive updates for any resources the it is subscribed to.`; + } + return { + content: [{ type: "text", text: `${response}` }], + }; + }); +}; diff --git a/src/everything/tools/trigger-elicitation-request-async.js b/src/everything/tools/trigger-elicitation-request-async.js new file mode 100644 index 0000000000..03e67ae307 --- /dev/null +++ b/src/everything/tools/trigger-elicitation-request-async.js @@ -0,0 +1,202 @@ +import { z } from "zod"; +// Tool configuration +const name = "trigger-elicitation-request-async"; +const config = { + title: "Trigger Async Elicitation Request Tool", + description: "Trigger an async elicitation request that the CLIENT executes as a background task. " + + "Demonstrates bidirectional MCP tasks where the server sends an elicitation request and " + + "the client handles user input asynchronously, allowing the server to poll for completion.", + inputSchema: {}, +}; +// Poll interval in milliseconds +const POLL_INTERVAL = 1000; +// Maximum poll attempts before timeout (10 minutes for user input) +const MAX_POLL_ATTEMPTS = 600; +/** + * Registers the 'trigger-elicitation-request-async' tool. + * + * This tool demonstrates bidirectional MCP tasks for elicitation: + * - Server sends elicitation request to client with task metadata + * - Client creates a task and returns CreateTaskResult + * - Client prompts user for input (task status: input_required) + * - Server polls client's tasks/get endpoint for status + * - Server fetches final result from client's tasks/result endpoint + * + * @param {McpServer} server - The McpServer instance where the tool will be registered. + */ +export const registerTriggerElicitationRequestAsyncTool = (server) => { + // Check client capabilities + const clientCapabilities = server.server.getClientCapabilities() || {}; + // Client must support elicitation AND tasks.requests.elicitation + const clientSupportsElicitation = clientCapabilities.elicitation !== undefined; + const clientTasksCapability = clientCapabilities.tasks; + const clientSupportsAsyncElicitation = clientTasksCapability?.requests?.elicitation?.create !== undefined; + if (clientSupportsElicitation && clientSupportsAsyncElicitation) { + server.registerTool(name, config, async (args, extra) => { + // Create the elicitation request WITH task metadata + // Using z.any() schema to avoid complex type matching with _meta + const request = { + method: "elicitation/create", + params: { + task: { + ttl: 600000, // 10 minutes (user input may take a while) + }, + message: "Please provide inputs for the following fields (async task demo):", + requestedSchema: { + type: "object", + properties: { + name: { + title: "Your Name", + type: "string", + description: "Your full name", + }, + favoriteColor: { + title: "Favorite Color", + type: "string", + description: "What is your favorite color?", + enum: ["Red", "Blue", "Green", "Yellow", "Purple"], + }, + agreeToTerms: { + title: "Terms Agreement", + type: "boolean", + description: "Do you agree to the terms and conditions?", + }, + }, + required: ["name"], + }, + }, + }; + // Send the elicitation request + // Client may return either: + // - ElicitResult (synchronous execution) + // - CreateTaskResult (task-based execution with { task } object) + const elicitResponse = await extra.sendRequest(request, z.union([ + // CreateTaskResult - client created a task + z.object({ + task: z.object({ + taskId: z.string(), + status: z.string(), + pollInterval: z.number().optional(), + statusMessage: z.string().optional(), + }), + }), + // ElicitResult - synchronous execution + z.object({ + action: z.string(), + content: z.any().optional(), + }), + ])); + // Check if client returned CreateTaskResult (has task object) + const isTaskResult = "task" in elicitResponse && elicitResponse.task; + if (!isTaskResult) { + // Client executed synchronously - return the direct response + return { + content: [ + { + type: "text", + text: `[SYNC] Client executed synchronously:\n${JSON.stringify(elicitResponse, null, 2)}`, + }, + ], + }; + } + const taskId = elicitResponse.task.taskId; + const statusMessages = []; + statusMessages.push(`Task created: ${taskId}`); + // Poll for task completion + let attempts = 0; + let taskStatus = elicitResponse.task.status; + let taskStatusMessage; + while (taskStatus !== "completed" && + taskStatus !== "failed" && + taskStatus !== "cancelled" && + attempts < MAX_POLL_ATTEMPTS) { + // Wait before polling + await new Promise((resolve) => setTimeout(resolve, POLL_INTERVAL)); + attempts++; + // Get task status from client + const pollResult = await extra.sendRequest({ + method: "tasks/get", + params: { taskId }, + }, z + .object({ + status: z.string(), + statusMessage: z.string().optional(), + }) + .passthrough()); + taskStatus = pollResult.status; + taskStatusMessage = pollResult.statusMessage; + // Only log status changes or every 10 polls to avoid spam + if (attempts === 1 || + attempts % 10 === 0 || + taskStatus !== "input_required") { + statusMessages.push(`Poll ${attempts}: ${taskStatus}${taskStatusMessage ? ` - ${taskStatusMessage}` : ""}`); + } + } + // Check for timeout + if (attempts >= MAX_POLL_ATTEMPTS) { + return { + content: [ + { + type: "text", + text: `[TIMEOUT] Task timed out after ${MAX_POLL_ATTEMPTS} poll attempts\n\nProgress:\n${statusMessages.join("\n")}`, + }, + ], + }; + } + // Check for failure/cancellation + if (taskStatus === "failed" || taskStatus === "cancelled") { + return { + content: [ + { + type: "text", + text: `[${taskStatus.toUpperCase()}] ${taskStatusMessage || "No message"}\n\nProgress:\n${statusMessages.join("\n")}`, + }, + ], + }; + } + // Fetch the final result + const result = await extra.sendRequest({ + method: "tasks/result", + params: { taskId }, + }, z.any()); + // Format the elicitation result + const content = []; + if (result.action === "accept" && result.content) { + content.push({ + type: "text", + text: `[COMPLETED] User provided the requested information!`, + }); + const userData = result.content; + const lines = []; + if (userData.name) + lines.push(`- Name: ${userData.name}`); + if (userData.favoriteColor) + lines.push(`- Favorite Color: ${userData.favoriteColor}`); + if (userData.agreeToTerms !== undefined) + lines.push(`- Agreed to terms: ${userData.agreeToTerms}`); + content.push({ + type: "text", + text: `User inputs:\n${lines.join("\n")}`, + }); + } + else if (result.action === "decline") { + content.push({ + type: "text", + text: `[DECLINED] User declined to provide the requested information.`, + }); + } + else if (result.action === "cancel") { + content.push({ + type: "text", + text: `[CANCELLED] User cancelled the elicitation dialog.`, + }); + } + // Include progress and raw result for debugging + content.push({ + type: "text", + text: `\nProgress:\n${statusMessages.join("\n")}\n\nRaw result: ${JSON.stringify(result, null, 2)}`, + }); + return { content }; + }); + } +}; diff --git a/src/everything/tools/trigger-elicitation-request.js b/src/everything/tools/trigger-elicitation-request.js new file mode 100644 index 0000000000..29227ab70f --- /dev/null +++ b/src/everything/tools/trigger-elicitation-request.js @@ -0,0 +1,210 @@ +import { ElicitResultSchema, } from "@modelcontextprotocol/sdk/types.js"; +// Tool configuration +const name = "trigger-elicitation-request"; +const config = { + title: "Trigger Elicitation Request Tool", + description: "Trigger a Request from the Server for User Elicitation", + inputSchema: {}, +}; +/** + * Registers the 'trigger-elicitation-request' tool. + * + * If the client does not support the elicitation capability, the tool is not registered. + * + * The registered tool sends an elicitation request for the user to provide information + * based on a pre-defined schema of fields including text inputs, booleans, numbers, + * email, dates, enums of various types, etc. It uses validation and handles multiple + * possible outcomes from the user's response, such as acceptance with content, decline, + * or cancellation of the dialog. The process also ensures parsing and validating + * the elicitation input arguments at runtime. + * + * The elicitation dialog response is returned, formatted into a structured result, + * which contains both user-submitted input data (if provided) and debugging information, + * including raw results. + * + * @param {McpServer} server - TThe McpServer instance where the tool will be registered. + */ +export const registerTriggerElicitationRequestTool = (server) => { + // Does the client support elicitation? + const clientCapabilities = server.server.getClientCapabilities() || {}; + const clientSupportsElicitation = clientCapabilities.elicitation !== undefined; + // If so, register tool + if (clientSupportsElicitation) { + server.registerTool(name, config, async (args, extra) => { + const elicitationResult = await extra.sendRequest({ + method: "elicitation/create", + params: { + message: "Please provide inputs for the following fields:", + requestedSchema: { + type: "object", + properties: { + name: { + title: "String", + type: "string", + description: "Your full, legal name", + }, + check: { + title: "Boolean", + type: "boolean", + description: "Agree to the terms and conditions", + }, + firstLine: { + title: "String with default", + type: "string", + description: "Favorite first line of a story", + default: "It was a dark and stormy night.", + }, + email: { + title: "String with email format", + type: "string", + format: "email", + description: "Your email address (will be verified, and never shared with anyone else)", + }, + homepage: { + type: "string", + format: "uri", + title: "String with uri format", + description: "Portfolio / personal website", + }, + birthdate: { + title: "String with date format", + type: "string", + format: "date", + description: "Your date of birth", + }, + integer: { + title: "Integer", + type: "integer", + description: "Your favorite integer (do not give us your phone number, pin, or other sensitive info)", + minimum: 1, + maximum: 100, + default: 42, + }, + number: { + title: "Number in range 1-1000", + type: "number", + description: "Favorite number (there are no wrong answers)", + minimum: 0, + maximum: 1000, + default: 3.14, + }, + untitledSingleSelectEnum: { + type: "string", + title: "Untitled Single Select Enum", + description: "Choose your favorite friend", + enum: [ + "Monica", + "Rachel", + "Joey", + "Chandler", + "Ross", + "Phoebe", + ], + default: "Monica", + }, + untitledMultipleSelectEnum: { + type: "array", + title: "Untitled Multiple Select Enum", + description: "Choose your favorite instruments", + minItems: 1, + maxItems: 3, + items: { + type: "string", + enum: ["Guitar", "Piano", "Violin", "Drums", "Bass"], + }, + default: ["Guitar"], + }, + titledSingleSelectEnum: { + type: "string", + title: "Titled Single Select Enum", + description: "Choose your favorite hero", + oneOf: [ + { const: "hero-1", title: "Superman" }, + { const: "hero-2", title: "Green Lantern" }, + { const: "hero-3", title: "Wonder Woman" }, + ], + default: "hero-1", + }, + titledMultipleSelectEnum: { + type: "array", + title: "Titled Multiple Select Enum", + description: "Choose your favorite types of fish", + minItems: 1, + maxItems: 3, + items: { + anyOf: [ + { const: "fish-1", title: "Tuna" }, + { const: "fish-2", title: "Salmon" }, + { const: "fish-3", title: "Trout" }, + ], + }, + default: ["fish-1"], + }, + legacyTitledEnum: { + type: "string", + title: "Legacy Titled Single Select Enum", + description: "Choose your favorite type of pet", + enum: ["pet-1", "pet-2", "pet-3", "pet-4", "pet-5"], + enumNames: ["Cats", "Dogs", "Birds", "Fish", "Reptiles"], + default: "pet-1", + }, + }, + required: ["name"], + }, + }, + }, ElicitResultSchema, { timeout: 10 * 60 * 1000 /* 10 minutes */ }); + // Handle different response actions + const content = []; + if (elicitationResult.action === "accept" && + elicitationResult.content) { + content.push({ + type: "text", + text: `āœ… User provided the requested information!`, + }); + // Only access elicitationResult.content when action is accept + const userData = elicitationResult.content; + const lines = []; + if (userData.name) + lines.push(`- Name: ${userData.name}`); + if (userData.check !== undefined) + lines.push(`- Agreed to terms: ${userData.check}`); + if (userData.color) + lines.push(`- Favorite Color: ${userData.color}`); + if (userData.email) + lines.push(`- Email: ${userData.email}`); + if (userData.homepage) + lines.push(`- Homepage: ${userData.homepage}`); + if (userData.birthdate) + lines.push(`- Birthdate: ${userData.birthdate}`); + if (userData.integer !== undefined) + lines.push(`- Favorite Integer: ${userData.integer}`); + if (userData.number !== undefined) + lines.push(`- Favorite Number: ${userData.number}`); + if (userData.petType) + lines.push(`- Pet Type: ${userData.petType}`); + content.push({ + type: "text", + text: `User inputs:\n${lines.join("\n")}`, + }); + } + else if (elicitationResult.action === "decline") { + content.push({ + type: "text", + text: `āŒ User declined to provide the requested information.`, + }); + } + else if (elicitationResult.action === "cancel") { + content.push({ + type: "text", + text: `āš ļø User cancelled the elicitation dialog.`, + }); + } + // Include raw result for debugging + content.push({ + type: "text", + text: `\nRaw result: ${JSON.stringify(elicitationResult, null, 2)}`, + }); + return { content }; + }); + } +}; diff --git a/src/everything/tools/trigger-long-running-operation.js b/src/everything/tools/trigger-long-running-operation.js new file mode 100644 index 0000000000..459b9d1754 --- /dev/null +++ b/src/everything/tools/trigger-long-running-operation.js @@ -0,0 +1,59 @@ +import { z } from "zod"; +// Tool input schema +const TriggerLongRunningOperationSchema = z.object({ + duration: z + .number() + .default(10) + .describe("Duration of the operation in seconds"), + steps: z.number().default(5).describe("Number of steps in the operation"), +}); +// Tool configuration +const name = "trigger-long-running-operation"; +const config = { + title: "Trigger Long Running Operation Tool", + description: "Demonstrates a long running operation with progress updates.", + inputSchema: TriggerLongRunningOperationSchema, +}; +/** + * Registers the 'trigger-tong-running-operation' tool. + * + * The registered tool starts a long-running operation defined by a specific duration and + * number of steps. + * + * Progress notifications are sent back to the client at each step if a `progressToken` + * is provided in the metadata. + * + * At the end of the operation, the tool returns a message indicating the completion of the + * operation, including the total duration and steps. + * + * @param {McpServer} server - The McpServer instance where the tool will be registered. + */ +export const registerTriggerLongRunningOperationTool = (server) => { + server.registerTool(name, config, async (args, extra) => { + const validatedArgs = TriggerLongRunningOperationSchema.parse(args); + const { duration, steps } = validatedArgs; + const stepDuration = duration / steps; + const progressToken = extra._meta?.progressToken; + for (let i = 1; i < steps + 1; i++) { + await new Promise((resolve) => setTimeout(resolve, stepDuration * 1000)); + if (progressToken !== undefined) { + await server.server.notification({ + method: "notifications/progress", + params: { + progress: i, + total: steps, + progressToken, + }, + }, { relatedRequestId: extra.requestId }); + } + } + return { + content: [ + { + type: "text", + text: `Long running operation completed. Duration: ${duration} seconds, Steps: ${steps}.`, + }, + ], + }; + }); +}; diff --git a/src/everything/tools/trigger-sampling-request-async.js b/src/everything/tools/trigger-sampling-request-async.js new file mode 100644 index 0000000000..be2cba5140 --- /dev/null +++ b/src/everything/tools/trigger-sampling-request-async.js @@ -0,0 +1,168 @@ +import { z } from "zod"; +// Tool input schema +const TriggerSamplingRequestAsyncSchema = z.object({ + prompt: z.string().describe("The prompt to send to the LLM"), + maxTokens: z + .number() + .default(100) + .describe("Maximum number of tokens to generate"), +}); +// Tool configuration +const name = "trigger-sampling-request-async"; +const config = { + title: "Trigger Async Sampling Request Tool", + description: "Trigger an async sampling request that the CLIENT executes as a background task. " + + "Demonstrates bidirectional MCP tasks where the server sends a request and the client " + + "executes it asynchronously, allowing the server to poll for progress and results.", + inputSchema: TriggerSamplingRequestAsyncSchema, +}; +// Poll interval in milliseconds +const POLL_INTERVAL = 1000; +// Maximum poll attempts before timeout +const MAX_POLL_ATTEMPTS = 60; +/** + * Registers the 'trigger-sampling-request-async' tool. + * + * This tool demonstrates bidirectional MCP tasks: + * - Server sends sampling request to client with task metadata + * - Client creates a task and returns CreateTaskResult + * - Server polls client's tasks/get endpoint for status + * - Server fetches final result from client's tasks/result endpoint + * + * @param {McpServer} server - The McpServer instance where the tool will be registered. + */ +export const registerTriggerSamplingRequestAsyncTool = (server) => { + // Check client capabilities + const clientCapabilities = server.server.getClientCapabilities() || {}; + // Client must support sampling AND tasks.requests.sampling + const clientSupportsSampling = clientCapabilities.sampling !== undefined; + const clientTasksCapability = clientCapabilities.tasks; + const clientSupportsAsyncSampling = clientTasksCapability?.requests?.sampling?.createMessage !== undefined; + if (clientSupportsSampling && clientSupportsAsyncSampling) { + server.registerTool(name, config, async (args, extra) => { + const validatedArgs = TriggerSamplingRequestAsyncSchema.parse(args); + const { prompt, maxTokens } = validatedArgs; + // Create the sampling request WITH task metadata + // The params.task field signals to the client that this should be executed as a task + const request = { + method: "sampling/createMessage", + params: { + task: { + ttl: 300000, // 5 minutes + }, + messages: [ + { + role: "user", + content: { + type: "text", + text: `Resource ${name} context: ${prompt}`, + }, + }, + ], + systemPrompt: "You are a helpful test server.", + maxTokens, + temperature: 0.7, + }, + }; + // Send the sampling request + // Client may return either: + // - CreateMessageResult (synchronous execution) + // - CreateTaskResult (task-based execution with { task } object) + const samplingResponse = await extra.sendRequest(request, z.union([ + // CreateTaskResult - client created a task + z.object({ + task: z.object({ + taskId: z.string(), + status: z.string(), + pollInterval: z.number().optional(), + statusMessage: z.string().optional(), + }), + }), + // CreateMessageResult - synchronous execution + z.object({ + role: z.string(), + content: z.any(), + model: z.string(), + stopReason: z.string().optional(), + }), + ])); + // Check if client returned CreateTaskResult (has task object) + const isTaskResult = "task" in samplingResponse && samplingResponse.task; + if (!isTaskResult) { + // Client executed synchronously - return the direct response + return { + content: [ + { + type: "text", + text: `[SYNC] Client executed synchronously:\n${JSON.stringify(samplingResponse, null, 2)}`, + }, + ], + }; + } + const taskId = samplingResponse.task.taskId; + const statusMessages = []; + statusMessages.push(`Task created: ${taskId}`); + // Poll for task completion + let attempts = 0; + let taskStatus = samplingResponse.task.status; + let taskStatusMessage; + while (taskStatus !== "completed" && + taskStatus !== "failed" && + taskStatus !== "cancelled" && + attempts < MAX_POLL_ATTEMPTS) { + // Wait before polling + await new Promise((resolve) => setTimeout(resolve, POLL_INTERVAL)); + attempts++; + // Get task status from client + const pollResult = await extra.sendRequest({ + method: "tasks/get", + params: { taskId }, + }, z + .object({ + status: z.string(), + statusMessage: z.string().optional(), + }) + .passthrough()); + taskStatus = pollResult.status; + taskStatusMessage = pollResult.statusMessage; + statusMessages.push(`Poll ${attempts}: ${taskStatus}${taskStatusMessage ? ` - ${taskStatusMessage}` : ""}`); + } + // Check for timeout + if (attempts >= MAX_POLL_ATTEMPTS) { + return { + content: [ + { + type: "text", + text: `[TIMEOUT] Task timed out after ${MAX_POLL_ATTEMPTS} poll attempts\n\nProgress:\n${statusMessages.join("\n")}`, + }, + ], + }; + } + // Check for failure/cancellation + if (taskStatus === "failed" || taskStatus === "cancelled") { + return { + content: [ + { + type: "text", + text: `[${taskStatus.toUpperCase()}] ${taskStatusMessage || "No message"}\n\nProgress:\n${statusMessages.join("\n")}`, + }, + ], + }; + } + // Fetch the final result + const result = await extra.sendRequest({ + method: "tasks/result", + params: { taskId }, + }, z.any()); + // Return the result with status history + return { + content: [ + { + type: "text", + text: `[COMPLETED] Async sampling completed!\n\n**Progress:**\n${statusMessages.join("\n")}\n\n**Result:**\n${JSON.stringify(result, null, 2)}`, + }, + ], + }; + }); + } +}; diff --git a/src/everything/tools/trigger-sampling-request.js b/src/everything/tools/trigger-sampling-request.js new file mode 100644 index 0000000000..7c2e684d0a --- /dev/null +++ b/src/everything/tools/trigger-sampling-request.js @@ -0,0 +1,71 @@ +import { CreateMessageResultSchema, } from "@modelcontextprotocol/sdk/types.js"; +import { z } from "zod"; +// Tool input schema +const TriggerSamplingRequestSchema = z.object({ + prompt: z.string().describe("The prompt to send to the LLM"), + maxTokens: z + .number() + .default(100) + .describe("Maximum number of tokens to generate"), +}); +// Tool configuration +const name = "trigger-sampling-request"; +const config = { + title: "Trigger Sampling Request Tool", + description: "Trigger a Request from the Server for LLM Sampling", + inputSchema: TriggerSamplingRequestSchema, +}; +/** + * Registers the 'trigger-sampling-request' tool. + * + * If the client does not support the sampling capability, the tool is not registered. + * + * The registered tool performs the following operations: + * - Validates incoming arguments using `TriggerSamplingRequestSchema`. + * - Constructs a `sampling/createMessage` request object using provided prompt and maximum tokens. + * - Sends the request to the server for sampling. + * - Formats and returns the sampling result content to the client. + * + * @param {McpServer} server - The McpServer instance where the tool will be registered. + */ +export const registerTriggerSamplingRequestTool = (server) => { + // Does the client support sampling? + const clientCapabilities = server.server.getClientCapabilities() || {}; + const clientSupportsSampling = clientCapabilities.sampling !== undefined; + // If so, register tool + if (clientSupportsSampling) { + server.registerTool(name, config, async (args, extra) => { + const validatedArgs = TriggerSamplingRequestSchema.parse(args); + const { prompt, maxTokens } = validatedArgs; + // Create the sampling request + const request = { + method: "sampling/createMessage", + params: { + messages: [ + { + role: "user", + content: { + type: "text", + text: `Resource ${name} context: ${prompt}`, + }, + }, + ], + systemPrompt: "You are a helpful test server.", + maxTokens, + temperature: 0.7, + }, + }; + // Send the sampling request to the client + const result = await extra.sendRequest(request, CreateMessageResultSchema); + // Return the result to the client + return { + content: [ + { + type: "text", + text: `LLM sampling result: \n${JSON.stringify(result, null, 2)}`, + }, + ], + }; + }); + } +}; diff --git a/src/everything/transports/sse.js b/src/everything/transports/sse.js new file mode 100644 index 0000000000..7443210130 --- /dev/null +++ b/src/everything/transports/sse.js @@ -0,0 +1,61 @@ +import { SSEServerTransport } from "@modelcontextprotocol/sdk/server/sse.js"; +import express from "express"; +import { createServer } from "../server/index.js"; +import cors from "cors"; +console.error("Starting SSE server..."); +// Express app with permissive CORS for testing with Inspector direct connect mode +const app = express(); +app.use(cors({ + origin: "*", // use "*" with caution in production + methods: "GET,POST", + preflightContinue: false, + optionsSuccessStatus: 204, +})); +// Map sessionId to transport for each client +const transports = new Map(); +// Handle GET requests for new SSE streams +app.get("/sse", async (req, res) => { + let transport; + const { server, cleanup } = createServer(); + // Session Id should not exist for GET /sse requests + if (req?.query?.sessionId) { + const sessionId = req?.query?.sessionId; + transport = transports.get(sessionId); + console.error("Client Reconnecting? This shouldn't happen; when client has a sessionId, GET /sse should not be called again.", transport.sessionId); + } + else { + // Create and store transport for the new session + transport = new SSEServerTransport("/message", res); + transports.set(transport.sessionId, transport); + // Connect server to transport + await server.connect(transport); + const sessionId = transport.sessionId; + console.error("Client Connected: ", sessionId); + // Handle close of connection + server.server.onclose = async () => { + const sessionId = transport.sessionId; + console.error("Client Disconnected: ", sessionId); + transports.delete(sessionId); + cleanup(sessionId); + }; + } +}); +// Handle POST requests for client messages +app.post("/message", async (req, res) => { + // Session Id should exist for POST /message requests + const sessionId = req?.query?.sessionId; + // Get the transport for this session and use it to handle the request + const transport = transports.get(sessionId); + if (transport) { + console.error("Client Message from", sessionId); + await transport.handlePostMessage(req, res); + } + else { + console.error(`No transport found for sessionId ${sessionId}`); + } +}); +// Start the express server +const PORT = process.env.PORT || 3001; +app.listen(PORT, () => { + console.error(`Server is running on port ${PORT}`); +}); diff --git a/src/everything/transports/stdio.js b/src/everything/transports/stdio.js new file mode 100644 index 0000000000..38a01223fe --- /dev/null +++ b/src/everything/transports/stdio.js @@ -0,0 +1,27 @@ +#!/usr/bin/env node +import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; +import { createServer } from "../server/index.js"; +console.error("Starting default (STDIO) server..."); +/** + * The main method + * - Initializes the StdioServerTransport, sets up the server, + * - Handles cleanup on process exit. + * + * @return {Promise} A promise that resolves when the main function has executed and the process exits. + */ +async function main() { + const transport = new StdioServerTransport(); + const { server, cleanup } = createServer(); + // Connect transport to server + await server.connect(transport); + // Cleanup on exit + process.on("SIGINT", async () => { + await server.close(); + cleanup(); + process.exit(0); + }); +} +main().catch((error) => { + console.error("Server error:", error); + process.exit(1); +}); diff --git a/src/everything/transports/streamableHttp.js b/src/everything/transports/streamableHttp.js new file mode 100644 index 0000000000..fbec4f250c --- /dev/null +++ b/src/everything/transports/streamableHttp.js @@ -0,0 +1,206 @@ +import { StreamableHTTPServerTransport, } from "@modelcontextprotocol/sdk/server/streamableHttp.js"; +import express from "express"; +import { createServer } from "../server/index.js"; +import { randomUUID } from "node:crypto"; +import cors from "cors"; +// Simple in-memory event store for SSE resumability +class InMemoryEventStore { + events = new Map(); + async storeEvent(streamId, message) { + const eventId = randomUUID(); + this.events.set(eventId, { streamId, message }); + return eventId; + } + async replayEventsAfter(lastEventId, { send }) { + const entries = Array.from(this.events.entries()); + const startIndex = entries.findIndex(([id]) => id === lastEventId); + if (startIndex === -1) + return lastEventId; + let lastId = lastEventId; + for (let i = startIndex + 1; i < entries.length; i++) { + const [eventId, { message }] = entries[i]; + await send(eventId, message); + lastId = eventId; + } + return lastId; + } +} +console.log("Starting Streamable HTTP server..."); +// Express app with permissive CORS for testing with Inspector direct connect mode +const app = express(); +app.use(cors({ + origin: "*", // use "*" with caution in production + methods: "GET,POST,DELETE", + preflightContinue: false, + optionsSuccessStatus: 204, + exposedHeaders: ["mcp-session-id", "last-event-id", "mcp-protocol-version"], +})); +// Map sessionId to server transport for each client +const transports = new Map(); +// Handle POST requests for client messages +app.post("/mcp", async (req, res) => { + console.log("Received MCP POST request"); + try { + // Check for existing session ID + const sessionId = req.headers["mcp-session-id"]; + let transport; + if (sessionId && transports.has(sessionId)) { + // Reuse existing transport + transport = transports.get(sessionId); + } + else if (!sessionId) { + const { server, cleanup } = createServer(); + // New initialization request + const eventStore = new InMemoryEventStore(); + transport = new StreamableHTTPServerTransport({ + sessionIdGenerator: () => randomUUID(), + eventStore, // Enable resumability + onsessioninitialized: (sessionId) => { + // Store the transport by session ID when a session is initialized + // This avoids race conditions where requests might come in before the session is stored + console.log(`Session initialized with ID: ${sessionId}`); + transports.set(sessionId, transport); + }, + }); + // Set up onclose handler to clean up transport when closed + server.server.onclose = async () => { + const sid = transport.sessionId; + if (sid && transports.has(sid)) { + console.log(`Transport closed for session ${sid}, removing from transports map`); + transports.delete(sid); + cleanup(sid); + } + }; + // Connect the transport to the MCP server BEFORE handling the request + // so responses can flow back through the same transport + await server.connect(transport); + await transport.handleRequest(req, res); + return; + } + else { + // Invalid request - no session ID or not initialization request + res.status(400).json({ + jsonrpc: "2.0", + error: { + code: -32000, + message: "Bad Request: No valid session ID provided", + }, + id: req?.body?.id, + }); + return; + } + // Handle the request with existing transport - no need to reconnect + // The existing transport is already connected to the server + await transport.handleRequest(req, res); + } + catch (error) { + console.log("Error handling MCP request:", error); + if (!res.headersSent) { + res.status(500).json({ + jsonrpc: "2.0", + error: { + code: -32603, + message: "Internal server error", + }, + id: req?.body?.id, + }); + return; + } + } +}); +// Handle GET requests for SSE streams +app.get("/mcp", async (req, res) => { + console.log("Received MCP GET request"); + const sessionId = req.headers["mcp-session-id"]; + if (!sessionId || !transports.has(sessionId)) { + res.status(400).json({ + jsonrpc: "2.0", + error: { + code: -32000, + message: "Bad Request: No valid session ID provided", + }, + id: req?.body?.id, + }); + return; + } + // Check for Last-Event-ID header for resumability + const lastEventId = req.headers["last-event-id"]; + if (lastEventId) { + console.log(`Client reconnecting with Last-Event-ID: ${lastEventId}`); + } + else { + console.log(`Establishing new SSE stream for session ${sessionId}`); + } + const transport = transports.get(sessionId); + await transport.handleRequest(req, res); +}); +// Handle DELETE requests for session termination +app.delete("/mcp", async (req, res) => { + const sessionId = req.headers["mcp-session-id"]; + if (!sessionId || !transports.has(sessionId)) { + res.status(400).json({ + jsonrpc: "2.0", + error: { + code: -32000, + message: "Bad Request: No valid session ID provided", + }, + id: req?.body?.id, + }); + return; + } + console.log(`Received session termination request for session ${sessionId}`); + try { + const transport = transports.get(sessionId); + await transport.handleRequest(req, res); + } + catch (error) { + console.log("Error handling session termination:", error); + if (!res.headersSent) { + res.status(500).json({ + jsonrpc: "2.0", + error: { + code: -32603, + message: "Error handling session termination", + }, + id: req?.body?.id, + }); + return; + } + } +}); +// Start the server +const PORT = process.env.PORT || 3001; +const server = app.listen(PORT, () => { + console.error(`MCP Streamable HTTP Server listening on port ${PORT}`); +}); +// Handle server errors +server.on("error", (err) => { + const code = typeof err === "object" && err !== null && "code" in err + ? err.code + : undefined; + if (code === "EADDRINUSE") { + console.error(`Failed to start: Port ${PORT} is already in use. Set PORT to a free port or stop the conflicting process.`); + } + else { + console.error("HTTP server encountered an error while starting:", err); + } + // Ensure a non-zero exit so npm reports the failure instead of silently exiting + process.exit(1); +}); +// Handle server shutdown +process.on("SIGINT", async () => { + console.log("Shutting down server..."); + // Close all active transports to properly clean up resources + for (const sessionId in transports) { + try { + console.log(`Closing transport for session ${sessionId}`); + await transports.get(sessionId).close(); + transports.delete(sessionId); + } + catch (error) { + console.log(`Error closing transport for session ${sessionId}:`, error); + } + } + console.log("Server shutdown complete"); + process.exit(0); +}); diff --git a/src/filesystem/__tests__/directory-tree.test.js b/src/filesystem/__tests__/directory-tree.test.js new file mode 100644 index 0000000000..cd94bf376c --- /dev/null +++ b/src/filesystem/__tests__/directory-tree.test.js @@ -0,0 +1,114 @@ +import { describe, it, expect, beforeEach, afterEach } from 'vitest'; +import * as fs from 'fs/promises'; +import * as path from 'path'; +import * as os from 'os'; +// We need to test the buildTree function, but it's defined inside the request handler +// So we'll extract the core logic into a testable function +import { minimatch } from 'minimatch'; +async function buildTreeForTesting(currentPath, rootPath, excludePatterns = []) { + const entries = await fs.readdir(currentPath, { withFileTypes: true }); + const result = []; + for (const entry of entries) { + const relativePath = path.relative(rootPath, path.join(currentPath, entry.name)); + const shouldExclude = excludePatterns.some(pattern => { + if (pattern.includes('*')) { + return minimatch(relativePath, pattern, { dot: true }); + } + // For files: match exact name or as part of path + // For directories: match as directory path + return minimatch(relativePath, pattern, { dot: true }) || + minimatch(relativePath, `**/${pattern}`, { dot: true }) || + minimatch(relativePath, `**/${pattern}/**`, { dot: true }); + }); + if (shouldExclude) + continue; + const entryData = { + name: entry.name, + type: entry.isDirectory() ? 'directory' : 'file' + }; + if (entry.isDirectory()) { + const subPath = path.join(currentPath, entry.name); + entryData.children = await buildTreeForTesting(subPath, rootPath, excludePatterns); + } + result.push(entryData); + } + return result; +} +describe('buildTree exclude patterns', () => { + let testDir; + beforeEach(async () => { + testDir = await fs.mkdtemp(path.join(os.tmpdir(), 'filesystem-test-')); + // Create test directory structure + await fs.mkdir(path.join(testDir, 'src')); + await fs.mkdir(path.join(testDir, 'node_modules')); + await fs.mkdir(path.join(testDir, '.git')); + await fs.mkdir(path.join(testDir, 'nested', 'node_modules'), { recursive: true }); + // Create test files + await fs.writeFile(path.join(testDir, '.env'), 'SECRET=value'); + await fs.writeFile(path.join(testDir, '.env.local'), 'LOCAL_SECRET=value'); + await fs.writeFile(path.join(testDir, 'src', 'index.js'), 'console.log("hello");'); + await fs.writeFile(path.join(testDir, 'package.json'), '{}'); + await fs.writeFile(path.join(testDir, 'node_modules', 'module.js'), 'module.exports = {};'); + await fs.writeFile(path.join(testDir, 'nested', 'node_modules', 'deep.js'), 'module.exports = {};'); + }); + afterEach(async () => { + await fs.rm(testDir, { recursive: true, force: true }); + }); + it('should exclude files matching simple patterns', async () => { + // Test the current implementation - this will fail until the bug is fixed + const tree = await buildTreeForTesting(testDir, testDir, ['.env']); + const fileNames = tree.map(entry => entry.name); + expect(fileNames).not.toContain('.env'); + expect(fileNames).toContain('.env.local'); // Should not exclude this + expect(fileNames).toContain('src'); + expect(fileNames).toContain('package.json'); + }); + it('should exclude directories matching simple patterns', async () => { + const tree = await buildTreeForTesting(testDir, testDir, ['node_modules']); + const dirNames = tree.map(entry => entry.name); + expect(dirNames).not.toContain('node_modules'); + expect(dirNames).toContain('src'); + expect(dirNames).toContain('.git'); + }); + it('should exclude nested directories with same pattern', async () => { + const tree = await buildTreeForTesting(testDir, testDir, ['node_modules']); + // Find the nested directory + const nestedDir = tree.find(entry => entry.name === 'nested'); + expect(nestedDir).toBeDefined(); + expect(nestedDir.children).toBeDefined(); + // The nested/node_modules should also be excluded + const nestedChildren = nestedDir.children.map(child => child.name); + expect(nestedChildren).not.toContain('node_modules'); + }); + it('should handle glob patterns correctly', async () => { + const tree = await buildTreeForTesting(testDir, testDir, ['*.env']); + const fileNames = tree.map(entry => entry.name); + expect(fileNames).not.toContain('.env'); + expect(fileNames).toContain('.env.local'); // *.env should not match .env.local + expect(fileNames).toContain('src'); + }); + it('should handle dot files correctly', async () => { + const tree = await buildTreeForTesting(testDir, testDir, ['.git']); + const dirNames = tree.map(entry => entry.name); + expect(dirNames).not.toContain('.git'); + expect(dirNames).toContain('.env'); // Should not exclude this + }); + it('should work with multiple exclude patterns', async () => { + const tree = await buildTreeForTesting(testDir, testDir, ['node_modules', '.env', '.git']); + const entryNames = tree.map(entry => entry.name); + expect(entryNames).not.toContain('node_modules'); + expect(entryNames).not.toContain('.env'); + expect(entryNames).not.toContain('.git'); + expect(entryNames).toContain('src'); + expect(entryNames).toContain('package.json'); + }); + it('should handle empty exclude patterns', async () => { + const tree = await buildTreeForTesting(testDir, testDir, []); + const entryNames = tree.map(entry => entry.name); + // All entries should be included + expect(entryNames).toContain('node_modules'); + expect(entryNames).toContain('.env'); + expect(entryNames).toContain('.git'); + expect(entryNames).toContain('src'); + }); +}); diff --git a/src/filesystem/__tests__/lib.test.js b/src/filesystem/__tests__/lib.test.js new file mode 100644 index 0000000000..7d4c76464a --- /dev/null +++ b/src/filesystem/__tests__/lib.test.js @@ -0,0 +1,489 @@ +import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest'; +import fs from 'fs/promises'; +import path from 'path'; +import { +// Pure utility functions +formatSize, normalizeLineEndings, createUnifiedDiff, +// Security & validation functions +validatePath, setAllowedDirectories, +// File operations +getFileStats, readFileContent, writeFileContent, +// Search & filtering functions +searchFilesWithValidation, +// File editing functions +applyFileEdits, tailFile, headFile } from '../lib.js'; +// Mock fs module +vi.mock('fs/promises'); +const mockFs = fs; +describe('Lib Functions', () => { + beforeEach(() => { + vi.clearAllMocks(); + // Set up allowed directories for tests + const allowedDirs = process.platform === 'win32' ? ['C:\\Users\\test', 'C:\\temp', 'C:\\allowed'] : ['/home/user', '/tmp', '/allowed']; + setAllowedDirectories(allowedDirs); + }); + afterEach(() => { + vi.restoreAllMocks(); + // Clear allowed directories after tests + setAllowedDirectories([]); + }); + describe('Pure Utility Functions', () => { + describe('formatSize', () => { + it('formats bytes correctly', () => { + expect(formatSize(0)).toBe('0 B'); + expect(formatSize(512)).toBe('512 B'); + expect(formatSize(1024)).toBe('1.00 KB'); + expect(formatSize(1536)).toBe('1.50 KB'); + expect(formatSize(1048576)).toBe('1.00 MB'); + expect(formatSize(1073741824)).toBe('1.00 GB'); + expect(formatSize(1099511627776)).toBe('1.00 TB'); + }); + it('handles edge cases', () => { + expect(formatSize(1023)).toBe('1023 B'); + expect(formatSize(1025)).toBe('1.00 KB'); + expect(formatSize(1048575)).toBe('1024.00 KB'); + }); + it('handles very large numbers beyond TB', () => { + // The function only supports up to TB, so very large numbers will show as TB + expect(formatSize(1024 * 1024 * 1024 * 1024 * 1024)).toBe('1024.00 TB'); + expect(formatSize(Number.MAX_SAFE_INTEGER)).toContain('TB'); + }); + it('handles negative numbers', () => { + // Negative numbers will result in NaN for the log calculation + expect(formatSize(-1024)).toContain('NaN'); + expect(formatSize(-0)).toBe('0 B'); + }); + it('handles decimal numbers', () => { + expect(formatSize(1536.5)).toBe('1.50 KB'); + expect(formatSize(1023.9)).toBe('1023.9 B'); + }); + it('handles very small positive numbers', () => { + expect(formatSize(1)).toBe('1 B'); + expect(formatSize(0.5)).toBe('0.5 B'); + expect(formatSize(0.1)).toBe('0.1 B'); + }); + }); + describe('normalizeLineEndings', () => { + it('converts CRLF to LF', () => { + expect(normalizeLineEndings('line1\r\nline2\r\nline3')).toBe('line1\nline2\nline3'); + }); + it('leaves LF unchanged', () => { + expect(normalizeLineEndings('line1\nline2\nline3')).toBe('line1\nline2\nline3'); + }); + it('handles mixed line endings', () => { + expect(normalizeLineEndings('line1\r\nline2\nline3\r\n')).toBe('line1\nline2\nline3\n'); + }); + it('handles empty string', () => { + expect(normalizeLineEndings('')).toBe(''); + }); + }); + describe('createUnifiedDiff', () => { + it('creates diff for simple changes', () => { + const original = 'line1\nline2\nline3'; + const modified = 'line1\nmodified line2\nline3'; + const diff = createUnifiedDiff(original, modified, 'test.txt'); + expect(diff).toContain('--- test.txt'); + expect(diff).toContain('+++ test.txt'); + expect(diff).toContain('-line2'); + expect(diff).toContain('+modified line2'); + }); + it('handles CRLF normalization', () => { + const original = 'line1\r\nline2\r\n'; + const modified = 'line1\nmodified line2\n'; + const diff = createUnifiedDiff(original, modified); + expect(diff).toContain('-line2'); + expect(diff).toContain('+modified line2'); + }); + it('handles identical content', () => { + const content = 'line1\nline2\nline3'; + const diff = createUnifiedDiff(content, content); + // Should not contain any +/- lines for identical content (excluding header lines) + expect(diff.split('\n').filter((line) => line.startsWith('+++') || line.startsWith('---'))).toHaveLength(2); + expect(diff.split('\n').filter((line) => line.startsWith('+') && !line.startsWith('+++'))).toHaveLength(0); + expect(diff.split('\n').filter((line) => line.startsWith('-') && !line.startsWith('---'))).toHaveLength(0); + }); + it('handles empty content', () => { + const diff = createUnifiedDiff('', ''); + expect(diff).toContain('--- file'); + expect(diff).toContain('+++ file'); + }); + it('handles default filename parameter', () => { + const diff = createUnifiedDiff('old', 'new'); + expect(diff).toContain('--- file'); + expect(diff).toContain('+++ file'); + }); + it('handles custom filename', () => { + const diff = createUnifiedDiff('old', 'new', 'custom.txt'); + expect(diff).toContain('--- custom.txt'); + expect(diff).toContain('+++ custom.txt'); + }); + }); + }); + describe('Security & Validation Functions', () => { + describe('validatePath', () => { + // Use Windows-compatible paths for testing + const allowedDirs = process.platform === 'win32' ? ['C:\\Users\\test', 'C:\\temp'] : ['/home/user', '/tmp']; + beforeEach(() => { + mockFs.realpath.mockImplementation(async (path) => path.toString()); + }); + it('validates allowed paths', async () => { + const testPath = process.platform === 'win32' ? 'C:\\Users\\test\\file.txt' : '/home/user/file.txt'; + const result = await validatePath(testPath); + expect(result).toBe(testPath); + }); + it('rejects disallowed paths', async () => { + const testPath = process.platform === 'win32' ? 'C:\\Windows\\System32\\file.txt' : '/etc/passwd'; + await expect(validatePath(testPath)) + .rejects.toThrow('Access denied - path outside allowed directories'); + }); + it('handles non-existent files by checking parent directory', async () => { + const newFilePath = process.platform === 'win32' ? 'C:\\Users\\test\\newfile.txt' : '/home/user/newfile.txt'; + const parentPath = process.platform === 'win32' ? 'C:\\Users\\test' : '/home/user'; + // Create an error with the ENOENT code that the implementation checks for + const enoentError = new Error('ENOENT'); + enoentError.code = 'ENOENT'; + mockFs.realpath + .mockRejectedValueOnce(enoentError) + .mockResolvedValueOnce(parentPath); + const result = await validatePath(newFilePath); + expect(result).toBe(path.resolve(newFilePath)); + }); + it('rejects when parent directory does not exist', async () => { + const newFilePath = process.platform === 'win32' ? 'C:\\Users\\test\\nonexistent\\newfile.txt' : '/home/user/nonexistent/newfile.txt'; + // Create errors with the ENOENT code + const enoentError1 = new Error('ENOENT'); + enoentError1.code = 'ENOENT'; + const enoentError2 = new Error('ENOENT'); + enoentError2.code = 'ENOENT'; + mockFs.realpath + .mockRejectedValueOnce(enoentError1) + .mockRejectedValueOnce(enoentError2); + await expect(validatePath(newFilePath)) + .rejects.toThrow('Parent directory does not exist'); + }); + }); + }); + describe('File Operations', () => { + describe('getFileStats', () => { + it('returns file statistics', async () => { + const mockStats = { + size: 1024, + birthtime: new Date('2023-01-01'), + mtime: new Date('2023-01-02'), + atime: new Date('2023-01-03'), + isDirectory: () => false, + isFile: () => true, + mode: 0o644 + }; + mockFs.stat.mockResolvedValueOnce(mockStats); + const result = await getFileStats('/test/file.txt'); + expect(result).toEqual({ + size: 1024, + created: new Date('2023-01-01'), + modified: new Date('2023-01-02'), + accessed: new Date('2023-01-03'), + isDirectory: false, + isFile: true, + permissions: '644' + }); + }); + it('handles directory statistics', async () => { + const mockStats = { + size: 4096, + birthtime: new Date('2023-01-01'), + mtime: new Date('2023-01-02'), + atime: new Date('2023-01-03'), + isDirectory: () => true, + isFile: () => false, + mode: 0o755 + }; + mockFs.stat.mockResolvedValueOnce(mockStats); + const result = await getFileStats('/test/dir'); + expect(result.isDirectory).toBe(true); + expect(result.isFile).toBe(false); + expect(result.permissions).toBe('755'); + }); + }); + describe('readFileContent', () => { + it('reads file with default encoding', async () => { + mockFs.readFile.mockResolvedValueOnce('file content'); + const result = await readFileContent('/test/file.txt'); + expect(result).toBe('file content'); + expect(mockFs.readFile).toHaveBeenCalledWith('/test/file.txt', 'utf-8'); + }); + it('reads file with custom encoding', async () => { + mockFs.readFile.mockResolvedValueOnce('file content'); + const result = await readFileContent('/test/file.txt', 'ascii'); + expect(result).toBe('file content'); + expect(mockFs.readFile).toHaveBeenCalledWith('/test/file.txt', 'ascii'); + }); + }); + describe('writeFileContent', () => { + it('writes file content', async () => { + mockFs.writeFile.mockResolvedValueOnce(undefined); + await writeFileContent('/test/file.txt', 'new content'); + expect(mockFs.writeFile).toHaveBeenCalledWith('/test/file.txt', 'new content', { encoding: "utf-8", flag: 'wx' }); + }); + }); + }); + describe('Search & Filtering Functions', () => { + describe('searchFilesWithValidation', () => { + beforeEach(() => { + mockFs.realpath.mockImplementation(async (path) => path.toString()); + }); + it('excludes files matching exclude patterns', async () => { + const mockEntries = [ + { name: 'test.txt', isDirectory: () => false }, + { name: 'test.log', isDirectory: () => false }, + { name: 'node_modules', isDirectory: () => true } + ]; + mockFs.readdir.mockResolvedValueOnce(mockEntries); + const testDir = process.platform === 'win32' ? 'C:\\allowed\\dir' : '/allowed/dir'; + const allowedDirs = process.platform === 'win32' ? ['C:\\allowed'] : ['/allowed']; + // Mock realpath to return the same path for validation to pass + mockFs.realpath.mockImplementation(async (inputPath) => { + const pathStr = inputPath.toString(); + // Return the path as-is for validation + return pathStr; + }); + const result = await searchFilesWithValidation(testDir, '*test*', allowedDirs, { excludePatterns: ['*.log', 'node_modules'] }); + const expectedResult = process.platform === 'win32' ? 'C:\\allowed\\dir\\test.txt' : '/allowed/dir/test.txt'; + expect(result).toEqual([expectedResult]); + }); + it('handles validation errors during search', async () => { + const mockEntries = [ + { name: 'test.txt', isDirectory: () => false }, + { name: 'invalid_file.txt', isDirectory: () => false } + ]; + mockFs.readdir.mockResolvedValueOnce(mockEntries); + // Mock validatePath to throw error for invalid_file.txt + mockFs.realpath.mockImplementation(async (path) => { + if (path.toString().includes('invalid_file.txt')) { + throw new Error('Access denied'); + } + return path.toString(); + }); + const testDir = process.platform === 'win32' ? 'C:\\allowed\\dir' : '/allowed/dir'; + const allowedDirs = process.platform === 'win32' ? ['C:\\allowed'] : ['/allowed']; + const result = await searchFilesWithValidation(testDir, '*test*', allowedDirs, {}); + // Should only return the valid file, skipping the invalid one + const expectedResult = process.platform === 'win32' ? 'C:\\allowed\\dir\\test.txt' : '/allowed/dir/test.txt'; + expect(result).toEqual([expectedResult]); + }); + it('handles complex exclude patterns with wildcards', async () => { + const mockEntries = [ + { name: 'test.txt', isDirectory: () => false }, + { name: 'test.backup', isDirectory: () => false }, + { name: 'important_test.js', isDirectory: () => false } + ]; + mockFs.readdir.mockResolvedValueOnce(mockEntries); + const testDir = process.platform === 'win32' ? 'C:\\allowed\\dir' : '/allowed/dir'; + const allowedDirs = process.platform === 'win32' ? ['C:\\allowed'] : ['/allowed']; + const result = await searchFilesWithValidation(testDir, '*test*', allowedDirs, { excludePatterns: ['*.backup'] }); + const expectedResults = process.platform === 'win32' ? [ + 'C:\\allowed\\dir\\test.txt', + 'C:\\allowed\\dir\\important_test.js' + ] : [ + '/allowed/dir/test.txt', + '/allowed/dir/important_test.js' + ]; + expect(result).toEqual(expectedResults); + }); + }); + }); + describe('File Editing Functions', () => { + describe('applyFileEdits', () => { + beforeEach(() => { + mockFs.readFile.mockResolvedValue('line1\nline2\nline3\n'); + mockFs.writeFile.mockResolvedValue(undefined); + }); + it('applies simple text replacement', async () => { + const edits = [ + { oldText: 'line2', newText: 'modified line2' } + ]; + mockFs.rename.mockResolvedValueOnce(undefined); + const result = await applyFileEdits('/test/file.txt', edits, false); + expect(result).toContain('modified line2'); + // Should write to temporary file then rename + expect(mockFs.writeFile).toHaveBeenCalledWith(expect.stringMatching(/\/test\/file\.txt\.[a-f0-9]+\.tmp$/), 'line1\nmodified line2\nline3\n', 'utf-8'); + expect(mockFs.rename).toHaveBeenCalledWith(expect.stringMatching(/\/test\/file\.txt\.[a-f0-9]+\.tmp$/), '/test/file.txt'); + }); + it('handles dry run mode', async () => { + const edits = [ + { oldText: 'line2', newText: 'modified line2' } + ]; + const result = await applyFileEdits('/test/file.txt', edits, true); + expect(result).toContain('modified line2'); + expect(mockFs.writeFile).not.toHaveBeenCalled(); + }); + it('applies multiple edits sequentially', async () => { + const edits = [ + { oldText: 'line1', newText: 'first line' }, + { oldText: 'line3', newText: 'third line' } + ]; + mockFs.rename.mockResolvedValueOnce(undefined); + await applyFileEdits('/test/file.txt', edits, false); + expect(mockFs.writeFile).toHaveBeenCalledWith(expect.stringMatching(/\/test\/file\.txt\.[a-f0-9]+\.tmp$/), 'first line\nline2\nthird line\n', 'utf-8'); + expect(mockFs.rename).toHaveBeenCalledWith(expect.stringMatching(/\/test\/file\.txt\.[a-f0-9]+\.tmp$/), '/test/file.txt'); + }); + it('handles whitespace-flexible matching', async () => { + mockFs.readFile.mockResolvedValue(' line1\n line2\n line3\n'); + const edits = [ + { oldText: 'line2', newText: 'modified line2' } + ]; + mockFs.rename.mockResolvedValueOnce(undefined); + await applyFileEdits('/test/file.txt', edits, false); + expect(mockFs.writeFile).toHaveBeenCalledWith(expect.stringMatching(/\/test\/file\.txt\.[a-f0-9]+\.tmp$/), ' line1\n modified line2\n line3\n', 'utf-8'); + expect(mockFs.rename).toHaveBeenCalledWith(expect.stringMatching(/\/test\/file\.txt\.[a-f0-9]+\.tmp$/), '/test/file.txt'); + }); + it('throws error for non-matching edits', async () => { + const edits = [ + { oldText: 'nonexistent line', newText: 'replacement' } + ]; + await expect(applyFileEdits('/test/file.txt', edits, false)) + .rejects.toThrow('Could not find exact match for edit'); + }); + it('handles complex multi-line edits with indentation', async () => { + mockFs.readFile.mockResolvedValue('function test() {\n console.log("hello");\n return true;\n}'); + const edits = [ + { + oldText: ' console.log("hello");\n return true;', + newText: ' console.log("world");\n console.log("test");\n return false;' + } + ]; + mockFs.rename.mockResolvedValueOnce(undefined); + await applyFileEdits('/test/file.js', edits, false); + expect(mockFs.writeFile).toHaveBeenCalledWith(expect.stringMatching(/\/test\/file\.js\.[a-f0-9]+\.tmp$/), 'function test() {\n console.log("world");\n console.log("test");\n return false;\n}', 'utf-8'); + expect(mockFs.rename).toHaveBeenCalledWith(expect.stringMatching(/\/test\/file\.js\.[a-f0-9]+\.tmp$/), '/test/file.js'); + }); + it('handles edits with different indentation patterns', async () => { + mockFs.readFile.mockResolvedValue(' if (condition) {\n doSomething();\n }'); + const edits = [ + { + oldText: 'doSomething();', + newText: 'doSomethingElse();\n doAnotherThing();' + } + ]; + mockFs.rename.mockResolvedValueOnce(undefined); + await applyFileEdits('/test/file.js', edits, false); + expect(mockFs.writeFile).toHaveBeenCalledWith(expect.stringMatching(/\/test\/file\.js\.[a-f0-9]+\.tmp$/), ' if (condition) {\n doSomethingElse();\n doAnotherThing();\n }', 'utf-8'); + expect(mockFs.rename).toHaveBeenCalledWith(expect.stringMatching(/\/test\/file\.js\.[a-f0-9]+\.tmp$/), '/test/file.js'); + }); + it('handles CRLF line endings in file content', async () => { + mockFs.readFile.mockResolvedValue('line1\r\nline2\r\nline3\r\n'); + const edits = [ + { oldText: 'line2', newText: 'modified line2' } + ]; + mockFs.rename.mockResolvedValueOnce(undefined); + await applyFileEdits('/test/file.txt', edits, false); + expect(mockFs.writeFile).toHaveBeenCalledWith(expect.stringMatching(/\/test\/file\.txt\.[a-f0-9]+\.tmp$/), 'line1\nmodified line2\nline3\n', 'utf-8'); + expect(mockFs.rename).toHaveBeenCalledWith(expect.stringMatching(/\/test\/file\.txt\.[a-f0-9]+\.tmp$/), '/test/file.txt'); + }); + }); + describe('tailFile', () => { + it('handles empty files', async () => { + mockFs.stat.mockResolvedValue({ size: 0 }); + const result = await tailFile('/test/empty.txt', 5); + expect(result).toBe(''); + expect(mockFs.open).not.toHaveBeenCalled(); + }); + it('calls stat to check file size', async () => { + mockFs.stat.mockResolvedValue({ size: 100 }); + // Mock file handle with proper typing + const mockFileHandle = { + read: vi.fn(), + close: vi.fn() + }; + mockFileHandle.read.mockResolvedValue({ bytesRead: 0 }); + mockFileHandle.close.mockResolvedValue(undefined); + mockFs.open.mockResolvedValue(mockFileHandle); + await tailFile('/test/file.txt', 2); + expect(mockFs.stat).toHaveBeenCalledWith('/test/file.txt'); + expect(mockFs.open).toHaveBeenCalledWith('/test/file.txt', 'r'); + }); + it('handles files with content and returns last lines', async () => { + mockFs.stat.mockResolvedValue({ size: 50 }); + const mockFileHandle = { + read: vi.fn(), + close: vi.fn() + }; + // Simulate reading file content in chunks + mockFileHandle.read + .mockResolvedValueOnce({ bytesRead: 20, buffer: Buffer.from('line3\nline4\nline5\n') }) + .mockResolvedValueOnce({ bytesRead: 0 }); + mockFileHandle.close.mockResolvedValue(undefined); + mockFs.open.mockResolvedValue(mockFileHandle); + const result = await tailFile('/test/file.txt', 2); + expect(mockFileHandle.close).toHaveBeenCalled(); + }); + it('handles read errors gracefully', async () => { + mockFs.stat.mockResolvedValue({ size: 100 }); + const mockFileHandle = { + read: vi.fn(), + close: vi.fn() + }; + mockFileHandle.read.mockResolvedValue({ bytesRead: 0 }); + mockFileHandle.close.mockResolvedValue(undefined); + mockFs.open.mockResolvedValue(mockFileHandle); + await tailFile('/test/file.txt', 5); + expect(mockFileHandle.close).toHaveBeenCalled(); + }); + }); + describe('headFile', () => { + it('opens file for reading', async () => { + // Mock file handle with proper typing + const mockFileHandle = { + read: vi.fn(), + close: vi.fn() + }; + mockFileHandle.read.mockResolvedValue({ bytesRead: 0 }); + mockFileHandle.close.mockResolvedValue(undefined); + mockFs.open.mockResolvedValue(mockFileHandle); + await headFile('/test/file.txt', 2); + expect(mockFs.open).toHaveBeenCalledWith('/test/file.txt', 'r'); + }); + it('handles files with content and returns first lines', async () => { + const mockFileHandle = { + read: vi.fn(), + close: vi.fn() + }; + // Simulate reading file content with newlines + mockFileHandle.read + .mockResolvedValueOnce({ bytesRead: 20, buffer: Buffer.from('line1\nline2\nline3\n') }) + .mockResolvedValueOnce({ bytesRead: 0 }); + mockFileHandle.close.mockResolvedValue(undefined); + mockFs.open.mockResolvedValue(mockFileHandle); + const result = await headFile('/test/file.txt', 2); + expect(mockFileHandle.close).toHaveBeenCalled(); + }); + it('handles files with leftover content', async () => { + const mockFileHandle = { + read: vi.fn(), + close: vi.fn() + }; + // Simulate reading file content without final newline + mockFileHandle.read + .mockResolvedValueOnce({ bytesRead: 15, buffer: Buffer.from('line1\nline2\nend') }) + .mockResolvedValueOnce({ bytesRead: 0 }); + mockFileHandle.close.mockResolvedValue(undefined); + mockFs.open.mockResolvedValue(mockFileHandle); + const result = await headFile('/test/file.txt', 5); + expect(mockFileHandle.close).toHaveBeenCalled(); + }); + it('handles reaching requested line count', async () => { + const mockFileHandle = { + read: vi.fn(), + close: vi.fn() + }; + // Simulate reading exactly the requested number of lines + mockFileHandle.read + .mockResolvedValueOnce({ bytesRead: 12, buffer: Buffer.from('line1\nline2\n') }) + .mockResolvedValueOnce({ bytesRead: 0 }); + mockFileHandle.close.mockResolvedValue(undefined); + mockFs.open.mockResolvedValue(mockFileHandle); + const result = await headFile('/test/file.txt', 2); + expect(mockFileHandle.close).toHaveBeenCalled(); + }); + }); + }); +}); diff --git a/src/filesystem/__tests__/path-utils.test.js b/src/filesystem/__tests__/path-utils.test.js new file mode 100644 index 0000000000..0ce0140514 --- /dev/null +++ b/src/filesystem/__tests__/path-utils.test.js @@ -0,0 +1,324 @@ +import { describe, it, expect, afterEach } from 'vitest'; +import { normalizePath, expandHome, convertToWindowsPath } from '../path-utils.js'; +describe('Path Utilities', () => { + describe('convertToWindowsPath', () => { + it('leaves Unix paths unchanged', () => { + expect(convertToWindowsPath('/usr/local/bin')) + .toBe('/usr/local/bin'); + expect(convertToWindowsPath('/home/user/some path')) + .toBe('/home/user/some path'); + }); + it('never converts WSL paths (they work correctly in WSL with Node.js fs)', () => { + // WSL paths should NEVER be converted, regardless of platform + // They are valid Linux paths that work with Node.js fs operations inside WSL + expect(convertToWindowsPath('/mnt/c/NS/MyKindleContent')) + .toBe('/mnt/c/NS/MyKindleContent'); + expect(convertToWindowsPath('/mnt/d/Documents')) + .toBe('/mnt/d/Documents'); + }); + it('converts Unix-style Windows paths only on Windows platform', () => { + // On Windows, /c/ style paths should be converted + if (process.platform === 'win32') { + expect(convertToWindowsPath('/c/NS/MyKindleContent')) + .toBe('C:\\NS\\MyKindleContent'); + } + else { + // On Linux, leave them unchanged + expect(convertToWindowsPath('/c/NS/MyKindleContent')) + .toBe('/c/NS/MyKindleContent'); + } + }); + it('leaves Windows paths unchanged but ensures backslashes', () => { + expect(convertToWindowsPath('C:\\NS\\MyKindleContent')) + .toBe('C:\\NS\\MyKindleContent'); + expect(convertToWindowsPath('C:/NS/MyKindleContent')) + .toBe('C:\\NS\\MyKindleContent'); + }); + it('handles Windows paths with spaces', () => { + expect(convertToWindowsPath('C:\\Program Files\\Some App')) + .toBe('C:\\Program Files\\Some App'); + expect(convertToWindowsPath('C:/Program Files/Some App')) + .toBe('C:\\Program Files\\Some App'); + }); + it('handles drive letter paths based on platform', () => { + // WSL paths should never be converted + expect(convertToWindowsPath('/mnt/d/some/path')) + .toBe('/mnt/d/some/path'); + if (process.platform === 'win32') { + // On Windows, Unix-style paths like /d/ should be converted + expect(convertToWindowsPath('/d/some/path')) + .toBe('D:\\some\\path'); + } + else { + // On Linux, /d/ is just a regular Unix path + expect(convertToWindowsPath('/d/some/path')) + .toBe('/d/some/path'); + } + }); + }); + describe('normalizePath', () => { + it('preserves Unix paths', () => { + expect(normalizePath('/usr/local/bin')) + .toBe('/usr/local/bin'); + expect(normalizePath('/home/user/some path')) + .toBe('/home/user/some path'); + expect(normalizePath('"/usr/local/some app/"')) + .toBe('/usr/local/some app'); + expect(normalizePath('/usr/local//bin/app///')) + .toBe('/usr/local/bin/app'); + expect(normalizePath('/')) + .toBe('/'); + expect(normalizePath('///')) + .toBe('/'); + }); + it('removes surrounding quotes', () => { + expect(normalizePath('"C:\\NS\\My Kindle Content"')) + .toBe('C:\\NS\\My Kindle Content'); + }); + it('normalizes backslashes', () => { + expect(normalizePath('C:\\\\NS\\\\MyKindleContent')) + .toBe('C:\\NS\\MyKindleContent'); + }); + it('converts forward slashes to backslashes on Windows', () => { + expect(normalizePath('C:/NS/MyKindleContent')) + .toBe('C:\\NS\\MyKindleContent'); + }); + it('always preserves WSL paths (they work correctly in WSL)', () => { + // WSL paths should ALWAYS be preserved, regardless of platform + // This is the fix for issue #2795 + expect(normalizePath('/mnt/c/NS/MyKindleContent')) + .toBe('/mnt/c/NS/MyKindleContent'); + expect(normalizePath('/mnt/d/Documents')) + .toBe('/mnt/d/Documents'); + }); + it('handles Unix-style Windows paths', () => { + // On Windows, /c/ paths should be converted + if (process.platform === 'win32') { + expect(normalizePath('/c/NS/MyKindleContent')) + .toBe('C:\\NS\\MyKindleContent'); + } + else if (process.platform === 'linux') { + // On Linux, /c/ is just a regular Unix path + expect(normalizePath('/c/NS/MyKindleContent')) + .toBe('/c/NS/MyKindleContent'); + } + }); + it('handles paths with spaces and mixed slashes', () => { + expect(normalizePath('C:/NS/My Kindle Content')) + .toBe('C:\\NS\\My Kindle Content'); + // WSL paths should always be preserved + expect(normalizePath('/mnt/c/NS/My Kindle Content')) + .toBe('/mnt/c/NS/My Kindle Content'); + expect(normalizePath('C:\\Program Files (x86)\\App Name')) + .toBe('C:\\Program Files (x86)\\App Name'); + expect(normalizePath('"C:\\Program Files\\App Name"')) + .toBe('C:\\Program Files\\App Name'); + expect(normalizePath(' C:\\Program Files\\App Name ')) + .toBe('C:\\Program Files\\App Name'); + }); + it('preserves spaces in all path formats', () => { + // WSL paths should always be preserved + expect(normalizePath('/mnt/c/Program Files/App Name')) + .toBe('/mnt/c/Program Files/App Name'); + if (process.platform === 'win32') { + // On Windows, Unix-style paths like /c/ should be converted + expect(normalizePath('/c/Program Files/App Name')) + .toBe('C:\\Program Files\\App Name'); + } + else { + // On Linux, /c/ is just a regular Unix path + expect(normalizePath('/c/Program Files/App Name')) + .toBe('/c/Program Files/App Name'); + } + expect(normalizePath('C:/Program Files/App Name')) + .toBe('C:\\Program Files\\App Name'); + }); + it('handles special characters in paths', () => { + // Test ampersand in path + expect(normalizePath('C:\\NS\\Sub&Folder')) + .toBe('C:\\NS\\Sub&Folder'); + expect(normalizePath('C:/NS/Sub&Folder')) + .toBe('C:\\NS\\Sub&Folder'); + // WSL paths should always be preserved + expect(normalizePath('/mnt/c/NS/Sub&Folder')) + .toBe('/mnt/c/NS/Sub&Folder'); + // Test tilde in path (short names in Windows) + expect(normalizePath('C:\\NS\\MYKIND~1')) + .toBe('C:\\NS\\MYKIND~1'); + expect(normalizePath('/Users/NEMANS~1/FOLDER~2/SUBFO~1/Public/P12PST~1')) + .toBe('/Users/NEMANS~1/FOLDER~2/SUBFO~1/Public/P12PST~1'); + // Test other special characters + expect(normalizePath('C:\\Path with #hash')) + .toBe('C:\\Path with #hash'); + expect(normalizePath('C:\\Path with (parentheses)')) + .toBe('C:\\Path with (parentheses)'); + expect(normalizePath('C:\\Path with [brackets]')) + .toBe('C:\\Path with [brackets]'); + expect(normalizePath('C:\\Path with @at+plus$dollar%percent')) + .toBe('C:\\Path with @at+plus$dollar%percent'); + }); + it('capitalizes lowercase drive letters for Windows paths', () => { + expect(normalizePath('c:/windows/system32')) + .toBe('C:\\windows\\system32'); + // WSL paths should always be preserved + expect(normalizePath('/mnt/d/my/folder')) + .toBe('/mnt/d/my/folder'); + if (process.platform === 'win32') { + // On Windows, Unix-style paths should be converted and capitalized + expect(normalizePath('/e/another/folder')) + .toBe('E:\\another\\folder'); + } + else { + // On Linux, /e/ is just a regular Unix path + expect(normalizePath('/e/another/folder')) + .toBe('/e/another/folder'); + } + }); + it('handles UNC paths correctly', () => { + // UNC paths should preserve the leading double backslash + const uncPath = '\\\\SERVER\\share\\folder'; + expect(normalizePath(uncPath)).toBe('\\\\SERVER\\share\\folder'); + // Test UNC path with double backslashes that need normalization + const uncPathWithDoubles = '\\\\\\\\SERVER\\\\share\\\\folder'; + expect(normalizePath(uncPathWithDoubles)).toBe('\\\\SERVER\\share\\folder'); + }); + it('returns normalized non-Windows/WSL/Unix-style Windows paths as is after basic normalization', () => { + // A path that looks somewhat absolute but isn't a drive or recognized Unix root for Windows conversion + // These paths should be preserved as-is (not converted to Windows C:\ format or WSL format) + const otherAbsolutePath = '\\someserver\\share\\file'; + expect(normalizePath(otherAbsolutePath)).toBe(otherAbsolutePath); + }); + }); + describe('expandHome', () => { + it('expands ~ to home directory', () => { + const result = expandHome('~/test'); + expect(result).toContain('test'); + expect(result).not.toContain('~'); + }); + it('expands bare ~ to home directory', () => { + const result = expandHome('~'); + expect(result).not.toContain('~'); + expect(result.length).toBeGreaterThan(0); + }); + it('leaves other paths unchanged', () => { + expect(expandHome('C:/test')).toBe('C:/test'); + }); + }); + describe('WSL path handling (issue #2795 fix)', () => { + // Save original platform + const originalPlatform = process.platform; + afterEach(() => { + // Restore platform after each test + Object.defineProperty(process, 'platform', { + value: originalPlatform, + writable: true, + configurable: true + }); + }); + it('should NEVER convert WSL paths - they work correctly in WSL with Node.js fs', () => { + // The key insight: When running `wsl npx ...`, Node.js runs INSIDE WSL (process.platform === 'linux') + // and /mnt/c/ paths work correctly with Node.js fs operations in that environment. + // Converting them to C:\ format breaks fs operations because Windows paths don't work inside WSL. + // Mock Linux platform (inside WSL) + Object.defineProperty(process, 'platform', { + value: 'linux', + writable: true, + configurable: true + }); + // WSL paths should NOT be converted, even inside WSL + expect(normalizePath('/mnt/c/Users/username/folder')) + .toBe('/mnt/c/Users/username/folder'); + expect(normalizePath('/mnt/d/Documents/project')) + .toBe('/mnt/d/Documents/project'); + }); + it('should also preserve WSL paths when running on Windows', () => { + // Mock Windows platform + Object.defineProperty(process, 'platform', { + value: 'win32', + writable: true, + configurable: true + }); + // WSL paths should still be preserved (though they wouldn't be accessible from Windows Node.js) + expect(normalizePath('/mnt/c/Users/username/folder')) + .toBe('/mnt/c/Users/username/folder'); + expect(normalizePath('/mnt/d/Documents/project')) + .toBe('/mnt/d/Documents/project'); + }); + it('should convert Unix-style Windows paths (/c/) only when running on Windows (win32)', () => { + // Mock process.platform to be 'win32' (Windows) + Object.defineProperty(process, 'platform', { + value: 'win32', + writable: true, + configurable: true + }); + // Unix-style Windows paths like /c/ should be converted on Windows + expect(normalizePath('/c/Users/username/folder')) + .toBe('C:\\Users\\username\\folder'); + expect(normalizePath('/d/Documents/project')) + .toBe('D:\\Documents\\project'); + }); + it('should NOT convert Unix-style paths (/c/) when running inside WSL (linux)', () => { + // Mock process.platform to be 'linux' (WSL/Linux) + Object.defineProperty(process, 'platform', { + value: 'linux', + writable: true, + configurable: true + }); + // When on Linux, /c/ is just a regular Unix directory, not a drive letter + expect(normalizePath('/c/some/path')) + .toBe('/c/some/path'); + expect(normalizePath('/d/another/path')) + .toBe('/d/another/path'); + }); + it('should preserve regular Unix paths on all platforms', () => { + // Test on Linux + Object.defineProperty(process, 'platform', { + value: 'linux', + writable: true, + configurable: true + }); + expect(normalizePath('/home/user/documents')) + .toBe('/home/user/documents'); + expect(normalizePath('/var/log/app')) + .toBe('/var/log/app'); + // Test on Windows (though these paths wouldn't work on Windows) + Object.defineProperty(process, 'platform', { + value: 'win32', + writable: true, + configurable: true + }); + expect(normalizePath('/home/user/documents')) + .toBe('/home/user/documents'); + expect(normalizePath('/var/log/app')) + .toBe('/var/log/app'); + }); + it('reproduces exact scenario from issue #2795', () => { + // Simulate running inside WSL: wsl npx @modelcontextprotocol/server-filesystem /mnt/c/Users/username/folder + Object.defineProperty(process, 'platform', { + value: 'linux', + writable: true, + configurable: true + }); + // This is the exact path from the issue + const inputPath = '/mnt/c/Users/username/folder'; + const result = normalizePath(inputPath); + // Should NOT convert to C:\Users\username\folder + expect(result).toBe('/mnt/c/Users/username/folder'); + expect(result).not.toContain('C:'); + expect(result).not.toContain('\\'); + }); + it('should handle relative path slash conversion based on platform', () => { + // This test verifies platform-specific behavior naturally without mocking + // On Windows: forward slashes converted to backslashes + // On Linux/Unix: forward slashes preserved + const relativePath = 'some/relative/path'; + const result = normalizePath(relativePath); + if (originalPlatform === 'win32') { + expect(result).toBe('some\\relative\\path'); + } + else { + expect(result).toBe('some/relative/path'); + } + }); + }); +}); diff --git a/src/filesystem/__tests__/path-validation.test.js b/src/filesystem/__tests__/path-validation.test.js new file mode 100644 index 0000000000..4bc7a5b1e5 --- /dev/null +++ b/src/filesystem/__tests__/path-validation.test.js @@ -0,0 +1,758 @@ +import { describe, it, expect, beforeEach, afterEach } from 'vitest'; +import * as path from 'path'; +import * as fs from 'fs/promises'; +import * as os from 'os'; +import { isPathWithinAllowedDirectories } from '../path-validation.js'; +/** + * Check if the current environment supports symlink creation + */ +async function checkSymlinkSupport() { + const testDir = await fs.mkdtemp(path.join(os.tmpdir(), 'symlink-test-')); + try { + const targetFile = path.join(testDir, 'target.txt'); + const linkFile = path.join(testDir, 'link.txt'); + await fs.writeFile(targetFile, 'test'); + await fs.symlink(targetFile, linkFile); + // If we get here, symlinks are supported + return true; + } + catch (error) { + // EPERM indicates no symlink permissions + if (error.code === 'EPERM') { + return false; + } + // Other errors might indicate a real problem + throw error; + } + finally { + await fs.rm(testDir, { recursive: true, force: true }); + } +} +// Global variable to store symlink support status +let symlinkSupported = null; +/** + * Get cached symlink support status, checking once per test run + */ +async function getSymlinkSupport() { + if (symlinkSupported === null) { + symlinkSupported = await checkSymlinkSupport(); + if (!symlinkSupported) { + console.log('\nāš ļø Symlink tests will be skipped - symlink creation not supported in this environment'); + console.log(' On Windows, enable Developer Mode or run as Administrator to enable symlink tests'); + } + } + return symlinkSupported; +} +describe('Path Validation', () => { + it('allows exact directory match', () => { + const allowed = ['/home/user/project']; + expect(isPathWithinAllowedDirectories('/home/user/project', allowed)).toBe(true); + }); + it('allows subdirectories', () => { + const allowed = ['/home/user/project']; + expect(isPathWithinAllowedDirectories('/home/user/project/src', allowed)).toBe(true); + expect(isPathWithinAllowedDirectories('/home/user/project/src/index.js', allowed)).toBe(true); + expect(isPathWithinAllowedDirectories('/home/user/project/deeply/nested/file.txt', allowed)).toBe(true); + }); + it('blocks similar directory names (prefix vulnerability)', () => { + const allowed = ['/home/user/project']; + expect(isPathWithinAllowedDirectories('/home/user/project2', allowed)).toBe(false); + expect(isPathWithinAllowedDirectories('/home/user/project_backup', allowed)).toBe(false); + expect(isPathWithinAllowedDirectories('/home/user/project-old', allowed)).toBe(false); + expect(isPathWithinAllowedDirectories('/home/user/projectile', allowed)).toBe(false); + expect(isPathWithinAllowedDirectories('/home/user/project.bak', allowed)).toBe(false); + }); + it('blocks paths outside allowed directories', () => { + const allowed = ['/home/user/project']; + expect(isPathWithinAllowedDirectories('/home/user/other', allowed)).toBe(false); + expect(isPathWithinAllowedDirectories('/etc/passwd', allowed)).toBe(false); + expect(isPathWithinAllowedDirectories('/home/user', allowed)).toBe(false); + expect(isPathWithinAllowedDirectories('/', allowed)).toBe(false); + }); + it('handles multiple allowed directories', () => { + const allowed = ['/home/user/project1', '/home/user/project2']; + expect(isPathWithinAllowedDirectories('/home/user/project1/src', allowed)).toBe(true); + expect(isPathWithinAllowedDirectories('/home/user/project2/src', allowed)).toBe(true); + expect(isPathWithinAllowedDirectories('/home/user/project3', allowed)).toBe(false); + expect(isPathWithinAllowedDirectories('/home/user/project1_backup', allowed)).toBe(false); + expect(isPathWithinAllowedDirectories('/home/user/project2-old', allowed)).toBe(false); + }); + it('blocks parent and sibling directories', () => { + const allowed = ['/test/allowed']; + // Parent directory + expect(isPathWithinAllowedDirectories('/test', allowed)).toBe(false); + expect(isPathWithinAllowedDirectories('/', allowed)).toBe(false); + // Sibling with common prefix + expect(isPathWithinAllowedDirectories('/test/allowed_sibling', allowed)).toBe(false); + expect(isPathWithinAllowedDirectories('/test/allowed2', allowed)).toBe(false); + }); + it('handles paths with special characters', () => { + const allowed = ['/home/user/my-project (v2)']; + expect(isPathWithinAllowedDirectories('/home/user/my-project (v2)', allowed)).toBe(true); + expect(isPathWithinAllowedDirectories('/home/user/my-project (v2)/src', allowed)).toBe(true); + expect(isPathWithinAllowedDirectories('/home/user/my-project (v2)_backup', allowed)).toBe(false); + expect(isPathWithinAllowedDirectories('/home/user/my-project', allowed)).toBe(false); + }); + describe('Input validation', () => { + it('rejects empty inputs', () => { + const allowed = ['/home/user/project']; + expect(isPathWithinAllowedDirectories('', allowed)).toBe(false); + expect(isPathWithinAllowedDirectories('/home/user/project', [])).toBe(false); + }); + it('handles trailing separators correctly', () => { + const allowed = ['/home/user/project']; + // Path with trailing separator should still match + expect(isPathWithinAllowedDirectories('/home/user/project/', allowed)).toBe(true); + // Allowed directory with trailing separator + const allowedWithSep = ['/home/user/project/']; + expect(isPathWithinAllowedDirectories('/home/user/project', allowedWithSep)).toBe(true); + expect(isPathWithinAllowedDirectories('/home/user/project/', allowedWithSep)).toBe(true); + // Should still block similar names with or without trailing separators + expect(isPathWithinAllowedDirectories('/home/user/project2', allowedWithSep)).toBe(false); + expect(isPathWithinAllowedDirectories('/home/user/project2', allowed)).toBe(false); + expect(isPathWithinAllowedDirectories('/home/user/project2/', allowed)).toBe(false); + }); + it('skips empty directory entries in allowed list', () => { + const allowed = ['', '/home/user/project', '']; + expect(isPathWithinAllowedDirectories('/home/user/project', allowed)).toBe(true); + expect(isPathWithinAllowedDirectories('/home/user/project/src', allowed)).toBe(true); + // Should still validate properly with empty entries + expect(isPathWithinAllowedDirectories('/home/user/other', allowed)).toBe(false); + }); + it('handles Windows paths with trailing separators', () => { + if (path.sep === '\\') { + const allowed = ['C:\\Users\\project']; + // Path with trailing separator + expect(isPathWithinAllowedDirectories('C:\\Users\\project\\', allowed)).toBe(true); + // Allowed with trailing separator + const allowedWithSep = ['C:\\Users\\project\\']; + expect(isPathWithinAllowedDirectories('C:\\Users\\project', allowedWithSep)).toBe(true); + expect(isPathWithinAllowedDirectories('C:\\Users\\project\\', allowedWithSep)).toBe(true); + // Should still block similar names + expect(isPathWithinAllowedDirectories('C:\\Users\\project2\\', allowed)).toBe(false); + } + }); + }); + describe('Error handling', () => { + it('normalizes relative paths to absolute', () => { + const allowed = [process.cwd()]; + // Relative paths get normalized to absolute paths based on cwd + expect(isPathWithinAllowedDirectories('relative/path', allowed)).toBe(true); + expect(isPathWithinAllowedDirectories('./file', allowed)).toBe(true); + // Parent directory references that escape allowed directory + const parentAllowed = ['/home/user/project']; + expect(isPathWithinAllowedDirectories('../parent', parentAllowed)).toBe(false); + }); + it('returns false for relative paths in allowed directories', () => { + const badAllowed = ['relative/path', '/some/other/absolute/path']; + // Relative paths in allowed dirs are normalized to absolute based on cwd + // The normalized 'relative/path' won't match our test path + expect(isPathWithinAllowedDirectories('/some/other/absolute/path/file', badAllowed)).toBe(true); + expect(isPathWithinAllowedDirectories('/absolute/path/file', badAllowed)).toBe(false); + }); + it('handles null and undefined inputs gracefully', () => { + const allowed = ['/home/user/project']; + // Should return false, not crash + expect(isPathWithinAllowedDirectories(null, allowed)).toBe(false); + expect(isPathWithinAllowedDirectories(undefined, allowed)).toBe(false); + expect(isPathWithinAllowedDirectories('/path', null)).toBe(false); + expect(isPathWithinAllowedDirectories('/path', undefined)).toBe(false); + }); + }); + describe('Unicode and special characters', () => { + it('handles unicode characters in paths', () => { + const allowed = ['/home/user/cafĆ©']; + expect(isPathWithinAllowedDirectories('/home/user/cafĆ©', allowed)).toBe(true); + expect(isPathWithinAllowedDirectories('/home/user/cafĆ©/file', allowed)).toBe(true); + // Different unicode representation won't match (not normalized) + const decomposed = '/home/user/cafe\u0301'; // e + combining accent + expect(isPathWithinAllowedDirectories(decomposed, allowed)).toBe(false); + }); + it('handles paths with spaces correctly', () => { + const allowed = ['/home/user/my project']; + expect(isPathWithinAllowedDirectories('/home/user/my project', allowed)).toBe(true); + expect(isPathWithinAllowedDirectories('/home/user/my project/file', allowed)).toBe(true); + // Partial matches should fail + expect(isPathWithinAllowedDirectories('/home/user/my', allowed)).toBe(false); + expect(isPathWithinAllowedDirectories('/home/user/my proj', allowed)).toBe(false); + }); + }); + describe('Overlapping allowed directories', () => { + it('handles nested allowed directories correctly', () => { + const allowed = ['/home', '/home/user', '/home/user/project']; + // All paths under /home are allowed + expect(isPathWithinAllowedDirectories('/home/anything', allowed)).toBe(true); + expect(isPathWithinAllowedDirectories('/home/user/anything', allowed)).toBe(true); + expect(isPathWithinAllowedDirectories('/home/user/project/anything', allowed)).toBe(true); + // First match wins (most permissive) + expect(isPathWithinAllowedDirectories('/home/other/deep/path', allowed)).toBe(true); + }); + it('handles root directory as allowed', () => { + const allowed = ['/']; + // Everything is allowed under root (dangerous configuration) + expect(isPathWithinAllowedDirectories('/', allowed)).toBe(true); + expect(isPathWithinAllowedDirectories('/any/path', allowed)).toBe(true); + expect(isPathWithinAllowedDirectories('/etc/passwd', allowed)).toBe(true); + expect(isPathWithinAllowedDirectories('/home/user/secret', allowed)).toBe(true); + // But only on the same filesystem root + if (path.sep === '\\') { + expect(isPathWithinAllowedDirectories('D:\\other', ['/'])).toBe(false); + } + }); + }); + describe('Cross-platform behavior', () => { + it('handles Windows-style paths on Windows', () => { + if (path.sep === '\\') { + const allowed = ['C:\\Users\\project']; + expect(isPathWithinAllowedDirectories('C:\\Users\\project', allowed)).toBe(true); + expect(isPathWithinAllowedDirectories('C:\\Users\\project\\src', allowed)).toBe(true); + expect(isPathWithinAllowedDirectories('C:\\Users\\project2', allowed)).toBe(false); + expect(isPathWithinAllowedDirectories('C:\\Users\\project_backup', allowed)).toBe(false); + } + }); + it('handles Unix-style paths on Unix', () => { + if (path.sep === '/') { + const allowed = ['/home/user/project']; + expect(isPathWithinAllowedDirectories('/home/user/project', allowed)).toBe(true); + expect(isPathWithinAllowedDirectories('/home/user/project/src', allowed)).toBe(true); + expect(isPathWithinAllowedDirectories('/home/user/project2', allowed)).toBe(false); + } + }); + }); + describe('Validation Tests - Path Traversal', () => { + it('blocks path traversal attempts', () => { + const allowed = ['/home/user/project']; + // Basic traversal attempts + expect(isPathWithinAllowedDirectories('/home/user/project/../../../etc/passwd', allowed)).toBe(false); + expect(isPathWithinAllowedDirectories('/home/user/project/../../other', allowed)).toBe(false); + expect(isPathWithinAllowedDirectories('/home/user/project/../project2', allowed)).toBe(false); + // Mixed traversal with valid segments + expect(isPathWithinAllowedDirectories('/home/user/project/src/../../project2', allowed)).toBe(false); + expect(isPathWithinAllowedDirectories('/home/user/project/./../../other', allowed)).toBe(false); + // Multiple traversal sequences + expect(isPathWithinAllowedDirectories('/home/user/project/../project/../../../etc', allowed)).toBe(false); + }); + it('blocks traversal in allowed directories', () => { + const allowed = ['/home/user/project/../safe']; + // The allowed directory itself should be normalized and safe + expect(isPathWithinAllowedDirectories('/home/user/safe/file', allowed)).toBe(true); + expect(isPathWithinAllowedDirectories('/home/user/project/file', allowed)).toBe(false); + }); + it('handles complex traversal patterns', () => { + const allowed = ['/home/user/project']; + // Double dots in filenames (not traversal) - these normalize to paths within allowed dir + expect(isPathWithinAllowedDirectories('/home/user/project/..test', allowed)).toBe(true); // Not traversal + expect(isPathWithinAllowedDirectories('/home/user/project/test..', allowed)).toBe(true); // Not traversal + expect(isPathWithinAllowedDirectories('/home/user/project/te..st', allowed)).toBe(true); // Not traversal + // Actual traversal + expect(isPathWithinAllowedDirectories('/home/user/project/../test', allowed)).toBe(false); // Is traversal - goes to /home/user/test + // Edge case: /home/user/project/.. normalizes to /home/user (parent dir) + expect(isPathWithinAllowedDirectories('/home/user/project/..', allowed)).toBe(false); // Goes to parent + }); + }); + describe('Validation Tests - Null Bytes', () => { + it('rejects paths with null bytes', () => { + const allowed = ['/home/user/project']; + expect(isPathWithinAllowedDirectories('/home/user/project\x00/etc/passwd', allowed)).toBe(false); + expect(isPathWithinAllowedDirectories('/home/user/project/test\x00.txt', allowed)).toBe(false); + expect(isPathWithinAllowedDirectories('\x00/home/user/project', allowed)).toBe(false); + expect(isPathWithinAllowedDirectories('/home/user/project/\x00', allowed)).toBe(false); + }); + it('rejects allowed directories with null bytes', () => { + const allowed = ['/home/user/project\x00']; + expect(isPathWithinAllowedDirectories('/home/user/project', allowed)).toBe(false); + expect(isPathWithinAllowedDirectories('/home/user/project/file', allowed)).toBe(false); + }); + }); + describe('Validation Tests - Special Characters', () => { + it('allows percent signs in filenames', () => { + const allowed = ['/home/user/project']; + // Percent is a valid filename character + expect(isPathWithinAllowedDirectories('/home/user/project/report_50%.pdf', allowed)).toBe(true); + expect(isPathWithinAllowedDirectories('/home/user/project/Q1_25%_growth', allowed)).toBe(true); + expect(isPathWithinAllowedDirectories('/home/user/project/%41', allowed)).toBe(true); // File named %41 + // URL encoding is NOT decoded by path.normalize, so these are just odd filenames + expect(isPathWithinAllowedDirectories('/home/user/project/%2e%2e', allowed)).toBe(true); // File named "%2e%2e" + expect(isPathWithinAllowedDirectories('/home/user/project/file%20name', allowed)).toBe(true); // File with %20 in name + }); + it('handles percent signs in allowed directories', () => { + const allowed = ['/home/user/project%20files']; + // This is a directory literally named "project%20files" + expect(isPathWithinAllowedDirectories('/home/user/project%20files/test', allowed)).toBe(true); + expect(isPathWithinAllowedDirectories('/home/user/project files/test', allowed)).toBe(false); // Different dir + }); + }); + describe('Path Normalization', () => { + it('normalizes paths before comparison', () => { + const allowed = ['/home/user/project']; + // Trailing slashes + expect(isPathWithinAllowedDirectories('/home/user/project/', allowed)).toBe(true); + expect(isPathWithinAllowedDirectories('/home/user/project//', allowed)).toBe(true); + expect(isPathWithinAllowedDirectories('/home/user/project///', allowed)).toBe(true); + // Current directory references + expect(isPathWithinAllowedDirectories('/home/user/project/./src', allowed)).toBe(true); + expect(isPathWithinAllowedDirectories('/home/user/./project/src', allowed)).toBe(true); + // Multiple slashes + expect(isPathWithinAllowedDirectories('/home/user/project//src//file', allowed)).toBe(true); + expect(isPathWithinAllowedDirectories('/home//user//project//src', allowed)).toBe(true); + // Should still block outside paths + expect(isPathWithinAllowedDirectories('/home/user//project2', allowed)).toBe(false); + }); + it('handles mixed separators correctly', () => { + if (path.sep === '\\') { + const allowed = ['C:\\Users\\project']; + // Mixed separators should be normalized + expect(isPathWithinAllowedDirectories('C:/Users/project', allowed)).toBe(true); + expect(isPathWithinAllowedDirectories('C:\\Users/project\\src', allowed)).toBe(true); + expect(isPathWithinAllowedDirectories('C:/Users\\project/src', allowed)).toBe(true); + } + }); + }); + describe('Edge Cases', () => { + it('rejects non-string inputs safely', () => { + const allowed = ['/home/user/project']; + expect(isPathWithinAllowedDirectories(123, allowed)).toBe(false); + expect(isPathWithinAllowedDirectories({}, allowed)).toBe(false); + expect(isPathWithinAllowedDirectories([], allowed)).toBe(false); + expect(isPathWithinAllowedDirectories(null, allowed)).toBe(false); + expect(isPathWithinAllowedDirectories(undefined, allowed)).toBe(false); + // Non-string in allowed directories + expect(isPathWithinAllowedDirectories('/home/user/project', [123])).toBe(false); + expect(isPathWithinAllowedDirectories('/home/user/project', [{}])).toBe(false); + }); + it('handles very long paths', () => { + const allowed = ['/home/user/project']; + // Create a very long path that's still valid + const longSubPath = 'a/'.repeat(1000) + 'file.txt'; + expect(isPathWithinAllowedDirectories(`/home/user/project/${longSubPath}`, allowed)).toBe(true); + // Very long path that escapes + const escapePath = 'a/'.repeat(1000) + '../'.repeat(1001) + 'etc/passwd'; + expect(isPathWithinAllowedDirectories(`/home/user/project/${escapePath}`, allowed)).toBe(false); + }); + }); + describe('Additional Coverage', () => { + it('handles allowed directories with traversal that normalizes safely', () => { + // These allowed dirs contain traversal but normalize to valid paths + const allowed = ['/home/user/../user/project']; + // Should normalize to /home/user/project and work correctly + expect(isPathWithinAllowedDirectories('/home/user/project/file', allowed)).toBe(true); + expect(isPathWithinAllowedDirectories('/home/user/other', allowed)).toBe(false); + }); + it('handles symbolic dots in filenames', () => { + const allowed = ['/home/user/project']; + // Single and double dots as actual filenames (not traversal) + expect(isPathWithinAllowedDirectories('/home/user/project/.', allowed)).toBe(true); + expect(isPathWithinAllowedDirectories('/home/user/project/..', allowed)).toBe(false); // This normalizes to parent + expect(isPathWithinAllowedDirectories('/home/user/project/...', allowed)).toBe(true); // Three dots is a valid filename + expect(isPathWithinAllowedDirectories('/home/user/project/....', allowed)).toBe(true); // Four dots is a valid filename + }); + it('handles UNC paths on Windows', () => { + if (path.sep === '\\') { + const allowed = ['\\\\server\\share\\project']; + expect(isPathWithinAllowedDirectories('\\\\server\\share\\project', allowed)).toBe(true); + expect(isPathWithinAllowedDirectories('\\\\server\\share\\project\\file', allowed)).toBe(true); + expect(isPathWithinAllowedDirectories('\\\\server\\share\\other', allowed)).toBe(false); + expect(isPathWithinAllowedDirectories('\\\\other\\share\\project', allowed)).toBe(false); + } + }); + }); + describe('Symlink Tests', () => { + let testDir; + let allowedDir; + let forbiddenDir; + beforeEach(async () => { + testDir = await fs.mkdtemp(path.join(os.tmpdir(), 'fs-error-test-')); + allowedDir = path.join(testDir, 'allowed'); + forbiddenDir = path.join(testDir, 'forbidden'); + await fs.mkdir(allowedDir, { recursive: true }); + await fs.mkdir(forbiddenDir, { recursive: true }); + }); + afterEach(async () => { + await fs.rm(testDir, { recursive: true, force: true }); + }); + it('validates symlink handling', async () => { + // Test with symlinks + try { + const linkPath = path.join(allowedDir, 'bad-link'); + const targetPath = path.join(forbiddenDir, 'target.txt'); + await fs.writeFile(targetPath, 'content'); + await fs.symlink(targetPath, linkPath); + // In real implementation, this would throw with the resolved path + const realPath = await fs.realpath(linkPath); + const allowed = [allowedDir]; + // Symlink target should be outside allowed directory + expect(isPathWithinAllowedDirectories(realPath, allowed)).toBe(false); + } + catch (error) { + // Skip if no symlink permissions + } + }); + it('handles non-existent paths correctly', async () => { + const newFilePath = path.join(allowedDir, 'subdir', 'newfile.txt'); + // Parent directory doesn't exist + try { + await fs.access(newFilePath); + } + catch (error) { + expect(error.code).toBe('ENOENT'); + } + // After creating parent, validation should work + await fs.mkdir(path.dirname(newFilePath), { recursive: true }); + const allowed = [allowedDir]; + expect(isPathWithinAllowedDirectories(newFilePath, allowed)).toBe(true); + }); + // Test path resolution consistency for symlinked files + it('validates symlinked files consistently between path and resolved forms', async () => { + try { + // Setup: Create target file in forbidden area + const targetFile = path.join(forbiddenDir, 'target.txt'); + await fs.writeFile(targetFile, 'TARGET_CONTENT'); + // Create symlink inside allowed directory pointing to forbidden file + const symlinkPath = path.join(allowedDir, 'link-to-target.txt'); + await fs.symlink(targetFile, symlinkPath); + // The symlink path itself passes validation (looks like it's in allowed dir) + expect(isPathWithinAllowedDirectories(symlinkPath, [allowedDir])).toBe(true); + // But the resolved path should fail validation + const resolvedPath = await fs.realpath(symlinkPath); + expect(isPathWithinAllowedDirectories(resolvedPath, [allowedDir])).toBe(false); + // Verify the resolved path goes to the forbidden location (normalize both paths for macOS temp dirs) + expect(await fs.realpath(resolvedPath)).toBe(await fs.realpath(targetFile)); + } + catch (error) { + // Skip if no symlink permissions on the system + if (error.code !== 'EPERM') { + throw error; + } + } + }); + // Test allowed directory resolution behavior + it('validates paths correctly when allowed directory is resolved from symlink', async () => { + try { + // Setup: Create the actual target directory with content + const actualTargetDir = path.join(testDir, 'actual-target'); + await fs.mkdir(actualTargetDir, { recursive: true }); + const targetFile = path.join(actualTargetDir, 'file.txt'); + await fs.writeFile(targetFile, 'FILE_CONTENT'); + // Setup: Create symlink directory that points to target + const symlinkDir = path.join(testDir, 'symlink-dir'); + await fs.symlink(actualTargetDir, symlinkDir); + // Simulate resolved allowed directory (what the server startup should do) + const resolvedAllowedDir = await fs.realpath(symlinkDir); + const resolvedTargetDir = await fs.realpath(actualTargetDir); + expect(resolvedAllowedDir).toBe(resolvedTargetDir); + // Test 1: File access through original symlink path should pass validation with resolved allowed dir + const fileViaSymlink = path.join(symlinkDir, 'file.txt'); + const resolvedFile = await fs.realpath(fileViaSymlink); + expect(isPathWithinAllowedDirectories(resolvedFile, [resolvedAllowedDir])).toBe(true); + // Test 2: File access through resolved path should also pass validation + const fileViaResolved = path.join(resolvedTargetDir, 'file.txt'); + expect(isPathWithinAllowedDirectories(fileViaResolved, [resolvedAllowedDir])).toBe(true); + // Test 3: Demonstrate inconsistent behavior with unresolved allowed directories + // If allowed dirs were not resolved (storing symlink paths instead): + const unresolvedAllowedDirs = [symlinkDir]; + // This validation would incorrectly fail for the same content: + expect(isPathWithinAllowedDirectories(resolvedFile, unresolvedAllowedDirs)).toBe(false); + } + catch (error) { + // Skip if no symlink permissions on the system + if (error.code !== 'EPERM') { + throw error; + } + } + }); + it('resolves nested symlink chains completely', async () => { + try { + // Setup: Create target file in forbidden area + const actualTarget = path.join(forbiddenDir, 'target-file.txt'); + await fs.writeFile(actualTarget, 'FINAL_CONTENT'); + // Create chain of symlinks: allowedFile -> link2 -> link1 -> actualTarget + const link1 = path.join(testDir, 'intermediate-link1'); + const link2 = path.join(testDir, 'intermediate-link2'); + const allowedFile = path.join(allowedDir, 'seemingly-safe-file'); + await fs.symlink(actualTarget, link1); + await fs.symlink(link1, link2); + await fs.symlink(link2, allowedFile); + // The allowed file path passes basic validation + expect(isPathWithinAllowedDirectories(allowedFile, [allowedDir])).toBe(true); + // But complete resolution reveals the forbidden target + const fullyResolvedPath = await fs.realpath(allowedFile); + expect(isPathWithinAllowedDirectories(fullyResolvedPath, [allowedDir])).toBe(false); + expect(await fs.realpath(fullyResolvedPath)).toBe(await fs.realpath(actualTarget)); + } + catch (error) { + // Skip if no symlink permissions on the system + if (error.code !== 'EPERM') { + throw error; + } + } + }); + }); + describe('Path Validation Race Condition Tests', () => { + let testDir; + let allowedDir; + let forbiddenDir; + let targetFile; + let testPath; + beforeEach(async () => { + testDir = await fs.mkdtemp(path.join(os.tmpdir(), 'race-test-')); + allowedDir = path.join(testDir, 'allowed'); + forbiddenDir = path.join(testDir, 'outside'); + targetFile = path.join(forbiddenDir, 'target.txt'); + testPath = path.join(allowedDir, 'test.txt'); + await fs.mkdir(allowedDir, { recursive: true }); + await fs.mkdir(forbiddenDir, { recursive: true }); + await fs.writeFile(targetFile, 'ORIGINAL CONTENT', 'utf-8'); + }); + afterEach(async () => { + await fs.rm(testDir, { recursive: true, force: true }); + }); + it('validates non-existent file paths based on parent directory', async () => { + const allowed = [allowedDir]; + expect(isPathWithinAllowedDirectories(testPath, allowed)).toBe(true); + await expect(fs.access(testPath)).rejects.toThrow(); + const parentDir = path.dirname(testPath); + expect(isPathWithinAllowedDirectories(parentDir, allowed)).toBe(true); + }); + it('demonstrates symlink race condition allows writing outside allowed directories', async () => { + const symlinkSupported = await getSymlinkSupport(); + if (!symlinkSupported) { + console.log(' ā­ļø Skipping symlink race condition test - symlinks not supported'); + return; + } + const allowed = [allowedDir]; + await expect(fs.access(testPath)).rejects.toThrow(); + expect(isPathWithinAllowedDirectories(testPath, allowed)).toBe(true); + await fs.symlink(targetFile, testPath); + await fs.writeFile(testPath, 'MODIFIED CONTENT', 'utf-8'); + const targetContent = await fs.readFile(targetFile, 'utf-8'); + expect(targetContent).toBe('MODIFIED CONTENT'); + const resolvedPath = await fs.realpath(testPath); + expect(isPathWithinAllowedDirectories(resolvedPath, allowed)).toBe(false); + }); + it('shows timing differences between validation approaches', async () => { + const symlinkSupported = await getSymlinkSupport(); + if (!symlinkSupported) { + console.log(' ā­ļø Skipping timing validation test - symlinks not supported'); + return; + } + const allowed = [allowedDir]; + const validation1 = isPathWithinAllowedDirectories(testPath, allowed); + expect(validation1).toBe(true); + await fs.symlink(targetFile, testPath); + const resolvedPath = await fs.realpath(testPath); + const validation2 = isPathWithinAllowedDirectories(resolvedPath, allowed); + expect(validation2).toBe(false); + expect(validation1).not.toBe(validation2); + }); + it('validates directory creation timing', async () => { + const symlinkSupported = await getSymlinkSupport(); + if (!symlinkSupported) { + console.log(' ā­ļø Skipping directory creation timing test - symlinks not supported'); + return; + } + const allowed = [allowedDir]; + const testDir = path.join(allowedDir, 'newdir'); + expect(isPathWithinAllowedDirectories(testDir, allowed)).toBe(true); + await fs.symlink(forbiddenDir, testDir); + expect(isPathWithinAllowedDirectories(testDir, allowed)).toBe(true); + const resolved = await fs.realpath(testDir); + expect(isPathWithinAllowedDirectories(resolved, allowed)).toBe(false); + }); + it('demonstrates exclusive file creation behavior', async () => { + const symlinkSupported = await getSymlinkSupport(); + if (!symlinkSupported) { + console.log(' ā­ļø Skipping exclusive file creation test - symlinks not supported'); + return; + } + const allowed = [allowedDir]; + await fs.symlink(targetFile, testPath); + await expect(fs.open(testPath, 'wx')).rejects.toThrow(/EEXIST/); + await fs.writeFile(testPath, 'NEW CONTENT', 'utf-8'); + const targetContent = await fs.readFile(targetFile, 'utf-8'); + expect(targetContent).toBe('NEW CONTENT'); + }); + it('should use resolved parent paths for non-existent files', async () => { + const symlinkSupported = await getSymlinkSupport(); + if (!symlinkSupported) { + console.log(' ā­ļø Skipping resolved parent paths test - symlinks not supported'); + return; + } + const allowed = [allowedDir]; + const symlinkDir = path.join(allowedDir, 'link'); + await fs.symlink(forbiddenDir, symlinkDir); + const fileThroughSymlink = path.join(symlinkDir, 'newfile.txt'); + expect(fileThroughSymlink.startsWith(allowedDir)).toBe(true); + const parentDir = path.dirname(fileThroughSymlink); + const resolvedParent = await fs.realpath(parentDir); + expect(isPathWithinAllowedDirectories(resolvedParent, allowed)).toBe(false); + const expectedSafePath = path.join(resolvedParent, path.basename(fileThroughSymlink)); + expect(isPathWithinAllowedDirectories(expectedSafePath, allowed)).toBe(false); + }); + it('demonstrates parent directory symlink traversal', async () => { + const symlinkSupported = await getSymlinkSupport(); + if (!symlinkSupported) { + console.log(' ā­ļø Skipping parent directory symlink traversal test - symlinks not supported'); + return; + } + const allowed = [allowedDir]; + const deepPath = path.join(allowedDir, 'sub1', 'sub2', 'file.txt'); + expect(isPathWithinAllowedDirectories(deepPath, allowed)).toBe(true); + const sub1Path = path.join(allowedDir, 'sub1'); + await fs.symlink(forbiddenDir, sub1Path); + await fs.mkdir(path.join(sub1Path, 'sub2'), { recursive: true }); + await fs.writeFile(deepPath, 'CONTENT', 'utf-8'); + const realPath = await fs.realpath(deepPath); + const realAllowedDir = await fs.realpath(allowedDir); + const realForbiddenDir = await fs.realpath(forbiddenDir); + expect(realPath.startsWith(realAllowedDir)).toBe(false); + expect(realPath.startsWith(realForbiddenDir)).toBe(true); + }); + it('should prevent race condition between validatePath and file operation', async () => { + const symlinkSupported = await getSymlinkSupport(); + if (!symlinkSupported) { + console.log(' ā­ļø Skipping race condition prevention test - symlinks not supported'); + return; + } + const allowed = [allowedDir]; + const racePath = path.join(allowedDir, 'race-file.txt'); + const targetFile = path.join(forbiddenDir, 'target.txt'); + await fs.writeFile(targetFile, 'ORIGINAL CONTENT', 'utf-8'); + // Path validation would pass (file doesn't exist, parent is in allowed dir) + expect(await fs.access(racePath).then(() => false).catch(() => true)).toBe(true); + expect(isPathWithinAllowedDirectories(racePath, allowed)).toBe(true); + // Race condition: symlink created after validation but before write + await fs.symlink(targetFile, racePath); + // With exclusive write flag, write should fail on symlink + await expect(fs.writeFile(racePath, 'NEW CONTENT', { encoding: 'utf-8', flag: 'wx' })).rejects.toThrow(/EEXIST/); + // Verify content unchanged + const targetContent = await fs.readFile(targetFile, 'utf-8'); + expect(targetContent).toBe('ORIGINAL CONTENT'); + // The symlink exists but write was blocked + const actualWritePath = await fs.realpath(racePath); + expect(actualWritePath).toBe(await fs.realpath(targetFile)); + expect(isPathWithinAllowedDirectories(actualWritePath, allowed)).toBe(false); + }); + it('should allow overwrites to legitimate files within allowed directories', async () => { + const allowed = [allowedDir]; + const legitFile = path.join(allowedDir, 'legit-file.txt'); + // Create a legitimate file + await fs.writeFile(legitFile, 'ORIGINAL', 'utf-8'); + // Opening with w should work for legitimate files + const fd = await fs.open(legitFile, 'w'); + try { + await fd.write('UPDATED', 0, 'utf-8'); + } + finally { + await fd.close(); + } + const content = await fs.readFile(legitFile, 'utf-8'); + expect(content).toBe('UPDATED'); + }); + it('should handle symlinks that point within allowed directories', async () => { + const symlinkSupported = await getSymlinkSupport(); + if (!symlinkSupported) { + console.log(' ā­ļø Skipping symlinks within allowed directories test - symlinks not supported'); + return; + } + const allowed = [allowedDir]; + const targetFile = path.join(allowedDir, 'target.txt'); + const symlinkPath = path.join(allowedDir, 'symlink.txt'); + // Create target file within allowed directory + await fs.writeFile(targetFile, 'TARGET CONTENT', 'utf-8'); + // Create symlink pointing to allowed file + await fs.symlink(targetFile, symlinkPath); + // Opening symlink with w follows it to the target + const fd = await fs.open(symlinkPath, 'w'); + try { + await fd.write('UPDATED VIA SYMLINK', 0, 'utf-8'); + } + finally { + await fd.close(); + } + // Both symlink and target should show updated content + const symlinkContent = await fs.readFile(symlinkPath, 'utf-8'); + const targetContent = await fs.readFile(targetFile, 'utf-8'); + expect(symlinkContent).toBe('UPDATED VIA SYMLINK'); + expect(targetContent).toBe('UPDATED VIA SYMLINK'); + }); + it('should prevent overwriting files through symlinks pointing outside allowed directories', async () => { + const symlinkSupported = await getSymlinkSupport(); + if (!symlinkSupported) { + console.log(' ā­ļø Skipping symlink overwrite prevention test - symlinks not supported'); + return; + } + const allowed = [allowedDir]; + const legitFile = path.join(allowedDir, 'existing.txt'); + const targetFile = path.join(forbiddenDir, 'target.txt'); + // Create a legitimate file first + await fs.writeFile(legitFile, 'LEGIT CONTENT', 'utf-8'); + // Create target file in forbidden directory + await fs.writeFile(targetFile, 'FORBIDDEN CONTENT', 'utf-8'); + // Now replace the legitimate file with a symlink to forbidden location + await fs.unlink(legitFile); + await fs.symlink(targetFile, legitFile); + // Simulate the server's validation logic + const stats = await fs.lstat(legitFile); + expect(stats.isSymbolicLink()).toBe(true); + const realPath = await fs.realpath(legitFile); + expect(isPathWithinAllowedDirectories(realPath, allowed)).toBe(false); + // With atomic rename, symlinks are replaced not followed + // So this test now demonstrates the protection + // Verify content remains unchanged + const targetContent = await fs.readFile(targetFile, 'utf-8'); + expect(targetContent).toBe('FORBIDDEN CONTENT'); + }); + it('demonstrates race condition in read operations', async () => { + const symlinkSupported = await getSymlinkSupport(); + if (!symlinkSupported) { + console.log(' ā­ļø Skipping race condition in read operations test - symlinks not supported'); + return; + } + const allowed = [allowedDir]; + const legitFile = path.join(allowedDir, 'readable.txt'); + const secretFile = path.join(forbiddenDir, 'secret.txt'); + // Create legitimate file + await fs.writeFile(legitFile, 'PUBLIC CONTENT', 'utf-8'); + // Create secret file in forbidden directory + await fs.writeFile(secretFile, 'SECRET CONTENT', 'utf-8'); + // Step 1: validatePath would pass for legitimate file + expect(isPathWithinAllowedDirectories(legitFile, allowed)).toBe(true); + // Step 2: Race condition - replace file with symlink after validation + await fs.unlink(legitFile); + await fs.symlink(secretFile, legitFile); + // Step 3: Read operation follows symlink to forbidden location + const content = await fs.readFile(legitFile, 'utf-8'); + // This shows the vulnerability - we read forbidden content + expect(content).toBe('SECRET CONTENT'); + expect(isPathWithinAllowedDirectories(await fs.realpath(legitFile), allowed)).toBe(false); + }); + it('verifies rename does not follow symlinks', async () => { + const symlinkSupported = await getSymlinkSupport(); + if (!symlinkSupported) { + console.log(' ā­ļø Skipping rename symlink test - symlinks not supported'); + return; + } + const allowed = [allowedDir]; + const tempFile = path.join(allowedDir, 'temp.txt'); + const targetSymlink = path.join(allowedDir, 'target-symlink.txt'); + const forbiddenTarget = path.join(forbiddenDir, 'forbidden-target.txt'); + // Create forbidden target + await fs.writeFile(forbiddenTarget, 'ORIGINAL CONTENT', 'utf-8'); + // Create symlink pointing to forbidden location + await fs.symlink(forbiddenTarget, targetSymlink); + // Write temp file + await fs.writeFile(tempFile, 'NEW CONTENT', 'utf-8'); + // Rename temp file to symlink path + await fs.rename(tempFile, targetSymlink); + // Check what happened + const symlinkExists = await fs.lstat(targetSymlink).then(() => true).catch(() => false); + const isSymlink = symlinkExists && (await fs.lstat(targetSymlink)).isSymbolicLink(); + const targetContent = await fs.readFile(targetSymlink, 'utf-8'); + const forbiddenContent = await fs.readFile(forbiddenTarget, 'utf-8'); + // Rename should replace the symlink with a regular file + expect(isSymlink).toBe(false); + expect(targetContent).toBe('NEW CONTENT'); + expect(forbiddenContent).toBe('ORIGINAL CONTENT'); // Unchanged + }); + }); +}); diff --git a/src/filesystem/__tests__/roots-utils.test.js b/src/filesystem/__tests__/roots-utils.test.js new file mode 100644 index 0000000000..e452e594d1 --- /dev/null +++ b/src/filesystem/__tests__/roots-utils.test.js @@ -0,0 +1,68 @@ +import { describe, it, expect, beforeEach, afterEach } from 'vitest'; +import { getValidRootDirectories } from '../roots-utils.js'; +import { mkdtempSync, rmSync, mkdirSync, writeFileSync, realpathSync } from 'fs'; +import { tmpdir } from 'os'; +import { join } from 'path'; +describe('getValidRootDirectories', () => { + let testDir1; + let testDir2; + let testDir3; + let testFile; + beforeEach(() => { + // Create test directories + testDir1 = realpathSync(mkdtempSync(join(tmpdir(), 'mcp-roots-test1-'))); + testDir2 = realpathSync(mkdtempSync(join(tmpdir(), 'mcp-roots-test2-'))); + testDir3 = realpathSync(mkdtempSync(join(tmpdir(), 'mcp-roots-test3-'))); + // Create a test file (not a directory) + testFile = join(testDir1, 'test-file.txt'); + writeFileSync(testFile, 'test content'); + }); + afterEach(() => { + // Cleanup + rmSync(testDir1, { recursive: true, force: true }); + rmSync(testDir2, { recursive: true, force: true }); + rmSync(testDir3, { recursive: true, force: true }); + }); + describe('valid directory processing', () => { + it('should process all URI formats and edge cases', async () => { + const roots = [ + { uri: `file://${testDir1}`, name: 'File URI' }, + { uri: testDir2, name: 'Plain path' }, + { uri: testDir3 } // Plain path without name property + ]; + const result = await getValidRootDirectories(roots); + expect(result).toContain(testDir1); + expect(result).toContain(testDir2); + expect(result).toContain(testDir3); + expect(result).toHaveLength(3); + }); + it('should normalize complex paths', async () => { + const subDir = join(testDir1, 'subdir'); + mkdirSync(subDir); + const roots = [ + { uri: `file://${testDir1}/./subdir/../subdir`, name: 'Complex Path' } + ]; + const result = await getValidRootDirectories(roots); + expect(result).toHaveLength(1); + expect(result[0]).toBe(subDir); + }); + }); + describe('error handling', () => { + it('should handle various error types', async () => { + const nonExistentDir = join(tmpdir(), 'non-existent-directory-12345'); + const invalidPath = '\0invalid\0path'; // Null bytes cause different error types + const roots = [ + { uri: `file://${testDir1}`, name: 'Valid Dir' }, + { uri: `file://${nonExistentDir}`, name: 'Non-existent Dir' }, + { uri: `file://${testFile}`, name: 'File Not Dir' }, + { uri: `file://${invalidPath}`, name: 'Invalid Path' } + ]; + const result = await getValidRootDirectories(roots); + expect(result).toContain(testDir1); + expect(result).not.toContain(nonExistentDir); + expect(result).not.toContain(testFile); + expect(result).not.toContain(invalidPath); + expect(result).toHaveLength(1); + }); + }); +}); diff --git a/src/filesystem/__tests__/structured-content.test.js b/src/filesystem/__tests__/structured-content.test.js new file mode 100644 index 0000000000..7e4681170b --- /dev/null +++ b/src/filesystem/__tests__/structured-content.test.js @@ -0,0 +1,128 @@ +import { describe, it, expect, beforeEach, afterEach } from 'vitest'; +import * as fs from 'fs/promises'; +import * as path from 'path'; +import * as os from 'os'; +import { Client } from '@modelcontextprotocol/sdk/client/index.js'; +import { StdioClientTransport } from '@modelcontextprotocol/sdk/client/stdio.js'; +/** + * Integration tests to verify that tool handlers return structuredContent + * that matches the declared outputSchema. + * + * These tests address issues #3110, #3106, #3093 where tools were returning + * structuredContent: { content: [contentBlock] } (array) instead of + * structuredContent: { content: string } as declared in outputSchema. + */ +describe('structuredContent schema compliance', () => { + let client; + let transport; + let testDir; + beforeEach(async () => { + // Create a temp directory for testing + testDir = await fs.mkdtemp(path.join(os.tmpdir(), 'mcp-fs-test-')); + // Create test files + await fs.writeFile(path.join(testDir, 'test.txt'), 'test content'); + await fs.mkdir(path.join(testDir, 'subdir')); + await fs.writeFile(path.join(testDir, 'subdir', 'nested.txt'), 'nested content'); + // Start the MCP server + const serverPath = path.resolve(__dirname, '../dist/index.js'); + transport = new StdioClientTransport({ + command: 'node', + args: [serverPath, testDir], + }); + client = new Client({ + name: 'test-client', + version: '1.0.0', + }, { + capabilities: {} + }); + await client.connect(transport); + }); + afterEach(async () => { + await client?.close(); + await fs.rm(testDir, { recursive: true, force: true }); + }); + describe('directory_tree', () => { + it('should return structuredContent.content as a string, not an array', async () => { + const result = await client.callTool({ + name: 'directory_tree', + arguments: { path: testDir } + }); + // The result should have structuredContent + expect(result.structuredContent).toBeDefined(); + // structuredContent.content should be a string (matching outputSchema: { content: z.string() }) + const structuredContent = result.structuredContent; + expect(typeof structuredContent.content).toBe('string'); + // It should NOT be an array + expect(Array.isArray(structuredContent.content)).toBe(false); + // The content should be valid JSON representing the tree + const treeData = JSON.parse(structuredContent.content); + expect(Array.isArray(treeData)).toBe(true); + }); + }); + describe('list_directory_with_sizes', () => { + it('should return structuredContent.content as a string, not an array', async () => { + const result = await client.callTool({ + name: 'list_directory_with_sizes', + arguments: { path: testDir } + }); + // The result should have structuredContent + expect(result.structuredContent).toBeDefined(); + // structuredContent.content should be a string (matching outputSchema: { content: z.string() }) + const structuredContent = result.structuredContent; + expect(typeof structuredContent.content).toBe('string'); + // It should NOT be an array + expect(Array.isArray(structuredContent.content)).toBe(false); + // The content should contain directory listing info + expect(structuredContent.content).toContain('[FILE]'); + }); + }); + describe('move_file', () => { + it('should return structuredContent.content as a string, not an array', async () => { + const sourcePath = path.join(testDir, 'test.txt'); + const destPath = path.join(testDir, 'moved.txt'); + const result = await client.callTool({ + name: 'move_file', + arguments: { + source: sourcePath, + destination: destPath + } + }); + // The result should have structuredContent + expect(result.structuredContent).toBeDefined(); + // structuredContent.content should be a string (matching outputSchema: { content: z.string() }) + const structuredContent = result.structuredContent; + expect(typeof structuredContent.content).toBe('string'); + // It should NOT be an array + expect(Array.isArray(structuredContent.content)).toBe(false); + // The content should contain success message + expect(structuredContent.content).toContain('Successfully moved'); + }); + }); + describe('list_directory (control - already working)', () => { + it('should return structuredContent.content as a string', async () => { + const result = await client.callTool({ + name: 'list_directory', + arguments: { path: testDir } + }); + expect(result.structuredContent).toBeDefined(); + const structuredContent = result.structuredContent; + expect(typeof structuredContent.content).toBe('string'); + expect(Array.isArray(structuredContent.content)).toBe(false); + }); + }); + describe('search_files (control - already working)', () => { + it('should return structuredContent.content as a string', async () => { + const result = await client.callTool({ + name: 'search_files', + arguments: { + path: testDir, + pattern: '*.txt' + } + }); + expect(result.structuredContent).toBeDefined(); + const structuredContent = result.structuredContent; + expect(typeof structuredContent.content).toBe('string'); + expect(Array.isArray(structuredContent.content)).toBe(false); + }); + }); +}); diff --git a/src/filesystem/index.js b/src/filesystem/index.js new file mode 100644 index 0000000000..5c1a9dc651 --- /dev/null +++ b/src/filesystem/index.js @@ -0,0 +1,601 @@ +#!/usr/bin/env node +import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"; +import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; +import { RootsListChangedNotificationSchema, } from "@modelcontextprotocol/sdk/types.js"; +import fs from "fs/promises"; +import { createReadStream } from "fs"; +import path from "path"; +import { z } from "zod"; +import { minimatch } from "minimatch"; +import { normalizePath, expandHome } from './path-utils.js'; +import { getValidRootDirectories } from './roots-utils.js'; +import { +// Function imports +formatSize, validatePath, getFileStats, readFileContent, writeFileContent, searchFilesWithValidation, applyFileEdits, tailFile, headFile, setAllowedDirectories, } from './lib.js'; +// Command line argument parsing +const args = process.argv.slice(2); +if (args.length === 0) { + console.error("Usage: mcp-server-filesystem [allowed-directory] [additional-directories...]"); + console.error("Note: Allowed directories can be provided via:"); + console.error(" 1. Command-line arguments (shown above)"); + console.error(" 2. MCP roots protocol (if client supports it)"); + console.error("At least one directory must be provided by EITHER method for the server to operate."); +} +// Store allowed directories in normalized and resolved form +let allowedDirectories = await Promise.all(args.map(async (dir) => { + const expanded = expandHome(dir); + const absolute = path.resolve(expanded); + try { + // Security: Resolve symlinks in allowed directories during startup + // This ensures we know the real paths and can validate against them later + const resolved = await fs.realpath(absolute); + return normalizePath(resolved); + } + catch (error) { + // If we can't resolve (doesn't exist), use the normalized absolute path + // This allows configuring allowed dirs that will be created later + return normalizePath(absolute); + } +})); +// Validate that all directories exist and are accessible +await Promise.all(allowedDirectories.map(async (dir) => { + try { + const stats = await fs.stat(dir); + if (!stats.isDirectory()) { + console.error(`Error: ${dir} is not a directory`); + process.exit(1); + } + } + catch (error) { + console.error(`Error accessing directory ${dir}:`, error); + process.exit(1); + } +})); +// Initialize the global allowedDirectories in lib.ts +setAllowedDirectories(allowedDirectories); +// Schema definitions +const ReadTextFileArgsSchema = z.object({ + path: z.string(), + tail: z.number().optional().describe('If provided, returns only the last N lines of the file'), + head: z.number().optional().describe('If provided, returns only the first N lines of the file') +}); +const ReadMediaFileArgsSchema = z.object({ + path: z.string() +}); +const ReadMultipleFilesArgsSchema = z.object({ + paths: z + .array(z.string()) + .min(1, "At least one file path must be provided") + .describe("Array of file paths to read. Each path must be a string pointing to a valid file within allowed directories."), +}); +const WriteFileArgsSchema = z.object({ + path: z.string(), + content: z.string(), +}); +const EditOperation = z.object({ + oldText: z.string().describe('Text to search for - must match exactly'), + newText: z.string().describe('Text to replace with') +}); +const EditFileArgsSchema = z.object({ + path: z.string(), + edits: z.array(EditOperation), + dryRun: z.boolean().default(false).describe('Preview changes using git-style diff format') +}); +const CreateDirectoryArgsSchema = z.object({ + path: z.string(), +}); +const ListDirectoryArgsSchema = z.object({ + path: z.string(), +}); +const ListDirectoryWithSizesArgsSchema = z.object({ + path: z.string(), + sortBy: z.enum(['name', 'size']).optional().default('name').describe('Sort entries by name or size'), +}); +const DirectoryTreeArgsSchema = z.object({ + path: z.string(), + excludePatterns: z.array(z.string()).optional().default([]) +}); +const MoveFileArgsSchema = z.object({ + source: z.string(), + destination: z.string(), +}); +const SearchFilesArgsSchema = z.object({ + path: z.string(), + pattern: z.string(), + excludePatterns: z.array(z.string()).optional().default([]) +}); +const GetFileInfoArgsSchema = z.object({ + path: z.string(), +}); +// Server setup +const server = new McpServer({ + name: "secure-filesystem-server", + version: "0.2.0", +}); +// Reads a file as a stream of buffers, concatenates them, and then encodes +// the result to a Base64 string. This is a memory-efficient way to handle +// binary data from a stream before the final encoding. +async function readFileAsBase64Stream(filePath) { + return new Promise((resolve, reject) => { + const stream = createReadStream(filePath); + const chunks = []; + stream.on('data', (chunk) => { + chunks.push(chunk); + }); + stream.on('end', () => { + const finalBuffer = Buffer.concat(chunks); + resolve(finalBuffer.toString('base64')); + }); + stream.on('error', (err) => reject(err)); + }); +} +// Tool registrations +// read_file (deprecated) and read_text_file +const readTextFileHandler = async (args) => { + const validPath = await validatePath(args.path); + if (args.head && args.tail) { + throw new Error("Cannot specify both head and tail parameters simultaneously"); + } + let content; + if (args.tail) { + content = await tailFile(validPath, args.tail); + } + else if (args.head) { + content = await headFile(validPath, args.head); + } + else { + content = await readFileContent(validPath); + } + return { + content: [{ type: "text", text: content }], + structuredContent: { content } + }; +}; +server.registerTool("read_file", { + title: "Read File (Deprecated)", + description: "Read the complete contents of a file as text. DEPRECATED: Use read_text_file instead.", + inputSchema: ReadTextFileArgsSchema.shape, + outputSchema: { content: z.string() }, + annotations: { readOnlyHint: true } +}, readTextFileHandler); +server.registerTool("read_text_file", { + title: "Read Text File", + description: "Read the complete contents of a file from the file system as text. " + + "Handles various text encodings and provides detailed error messages " + + "if the file cannot be read. Use this tool when you need to examine " + + "the contents of a single file. Use the 'head' parameter to read only " + + "the first N lines of a file, or the 'tail' parameter to read only " + + "the last N lines of a file. Operates on the file as text regardless of extension. " + + "Only works within allowed directories.", + inputSchema: { + path: z.string(), + tail: z.number().optional().describe("If provided, returns only the last N lines of the file"), + head: z.number().optional().describe("If provided, returns only the first N lines of the file") + }, + outputSchema: { content: z.string() }, + annotations: { readOnlyHint: true } +}, readTextFileHandler); +server.registerTool("read_media_file", { + title: "Read Media File", + description: "Read an image or audio file. Returns the base64 encoded data and MIME type. " + + "Only works within allowed directories.", + inputSchema: { + path: z.string() + }, + outputSchema: { + content: z.array(z.object({ + type: z.enum(["image", "audio", "blob"]), + data: z.string(), + mimeType: z.string() + })) + }, + annotations: { readOnlyHint: true } +}, async (args) => { + const validPath = await validatePath(args.path); + const extension = path.extname(validPath).toLowerCase(); + const mimeTypes = { + ".png": "image/png", + ".jpg": "image/jpeg", + ".jpeg": "image/jpeg", + ".gif": "image/gif", + ".webp": "image/webp", + ".bmp": "image/bmp", + ".svg": "image/svg+xml", + ".mp3": "audio/mpeg", + ".wav": "audio/wav", + ".ogg": "audio/ogg", + ".flac": "audio/flac", + }; + const mimeType = mimeTypes[extension] || "application/octet-stream"; + const data = await readFileAsBase64Stream(validPath); + const type = mimeType.startsWith("image/") + ? "image" + : mimeType.startsWith("audio/") + ? "audio" + // Fallback for other binary types, not officially supported by the spec but has been used for some time + : "blob"; + const contentItem = { type: type, data, mimeType }; + return { + content: [contentItem], + structuredContent: { content: [contentItem] } + }; +}); +server.registerTool("read_multiple_files", { + title: "Read Multiple Files", + description: "Read the contents of multiple files simultaneously. This is more " + + "efficient than reading files one by one when you need to analyze " + + "or compare multiple files. Each file's content is returned with its " + + "path as a reference. Failed reads for individual files won't stop " + + "the entire operation. Only works within allowed directories.", + inputSchema: { + paths: z.array(z.string()) + .min(1) + .describe("Array of file paths to read. Each path must be a string pointing to a valid file within allowed directories.") + }, + outputSchema: { content: z.string() }, + annotations: { readOnlyHint: true } +}, async (args) => { + const results = await Promise.all(args.paths.map(async (filePath) => { + try { + const validPath = await validatePath(filePath); + const content = await readFileContent(validPath); + return `${filePath}:\n${content}\n`; + } + catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + return `${filePath}: Error - ${errorMessage}`; + } + })); + const text = results.join("\n---\n"); + return { + content: [{ type: "text", text }], + structuredContent: { content: text } + }; +}); +server.registerTool("write_file", { + title: "Write File", + description: "Create a new file or completely overwrite an existing file with new content. " + + "Use with caution as it will overwrite existing files without warning. " + + "Handles text content with proper encoding. Only works within allowed directories.", + inputSchema: { + path: z.string(), + content: z.string() + }, + outputSchema: { content: z.string() }, + annotations: { readOnlyHint: false, idempotentHint: true, destructiveHint: true } +}, async (args) => { + const validPath = await validatePath(args.path); + await writeFileContent(validPath, args.content); + const text = `Successfully wrote to ${args.path}`; + return { + content: [{ type: "text", text }], + structuredContent: { content: text } + }; +}); +server.registerTool("edit_file", { + title: "Edit File", + description: "Make line-based edits to a text file. Each edit replaces exact line sequences " + + "with new content. Returns a git-style diff showing the changes made. " + + "Only works within allowed directories.", + inputSchema: { + path: z.string(), + edits: z.array(z.object({ + oldText: z.string().describe("Text to search for - must match exactly"), + newText: z.string().describe("Text to replace with") + })), + dryRun: z.boolean().default(false).describe("Preview changes using git-style diff format") + }, + outputSchema: { content: z.string() }, + annotations: { readOnlyHint: false, idempotentHint: false, destructiveHint: true } +}, async (args) => { + const validPath = await validatePath(args.path); + const result = await applyFileEdits(validPath, args.edits, args.dryRun); + return { + content: [{ type: "text", text: result }], + structuredContent: { content: result } + }; +}); +server.registerTool("create_directory", { + title: "Create Directory", + description: "Create a new directory or ensure a directory exists. Can create multiple " + + "nested directories in one operation. If the directory already exists, " + + "this operation will succeed silently. Perfect for setting up directory " + + "structures for projects or ensuring required paths exist. Only works within allowed directories.", + inputSchema: { + path: z.string() + }, + outputSchema: { content: z.string() }, + annotations: { readOnlyHint: false, idempotentHint: true, destructiveHint: false } +}, async (args) => { + const validPath = await validatePath(args.path); + await fs.mkdir(validPath, { recursive: true }); + const text = `Successfully created directory ${args.path}`; + return { + content: [{ type: "text", text }], + structuredContent: { content: text } + }; +}); +server.registerTool("list_directory", { + title: "List Directory", + description: "Get a detailed listing of all files and directories in a specified path. " + + "Results clearly distinguish between files and directories with [FILE] and [DIR] " + + "prefixes. This tool is essential for understanding directory structure and " + + "finding specific files within a directory. Only works within allowed directories.", + inputSchema: { + path: z.string() + }, + outputSchema: { content: z.string() }, + annotations: { readOnlyHint: true } +}, async (args) => { + const validPath = await validatePath(args.path); + const entries = await fs.readdir(validPath, { withFileTypes: true }); + const formatted = entries + .map((entry) => `${entry.isDirectory() ? "[DIR]" : "[FILE]"} ${entry.name}`) + .join("\n"); + return { + content: [{ type: "text", text: formatted }], + structuredContent: { content: formatted } + }; +}); +server.registerTool("list_directory_with_sizes", { + title: "List Directory with Sizes", + description: "Get a detailed listing of all files and directories in a specified path, including sizes. " + + "Results clearly distinguish between files and directories with [FILE] and [DIR] " + + "prefixes. This tool is useful for understanding directory structure and " + + "finding specific files within a directory. Only works within allowed directories.", + inputSchema: { + path: z.string(), + sortBy: z.enum(["name", "size"]).optional().default("name").describe("Sort entries by name or size") + }, + outputSchema: { content: z.string() }, + annotations: { readOnlyHint: true } +}, async (args) => { + const validPath = await validatePath(args.path); + const entries = await fs.readdir(validPath, { withFileTypes: true }); + // Get detailed information for each entry + const detailedEntries = await Promise.all(entries.map(async (entry) => { + const entryPath = path.join(validPath, entry.name); + try { + const stats = await fs.stat(entryPath); + return { + name: entry.name, + isDirectory: entry.isDirectory(), + size: stats.size, + mtime: stats.mtime + }; + } + catch (error) { + return { + name: entry.name, + isDirectory: entry.isDirectory(), + size: 0, + mtime: new Date(0) + }; + } + })); + // Sort entries based on sortBy parameter + const sortedEntries = [...detailedEntries].sort((a, b) => { + if (args.sortBy === 'size') { + return b.size - a.size; // Descending by size + } + // Default sort by name + return a.name.localeCompare(b.name); + }); + // Format the output + const formattedEntries = sortedEntries.map(entry => `${entry.isDirectory ? "[DIR]" : "[FILE]"} ${entry.name.padEnd(30)} ${entry.isDirectory ? "" : formatSize(entry.size).padStart(10)}`); + // Add summary + const totalFiles = detailedEntries.filter(e => !e.isDirectory).length; + const totalDirs = detailedEntries.filter(e => e.isDirectory).length; + const totalSize = detailedEntries.reduce((sum, entry) => sum + (entry.isDirectory ? 0 : entry.size), 0); + const summary = [ + "", + `Total: ${totalFiles} files, ${totalDirs} directories`, + `Combined size: ${formatSize(totalSize)}` + ]; + const text = [...formattedEntries, ...summary].join("\n"); + const contentBlock = { type: "text", text }; + return { + content: [contentBlock], + structuredContent: { content: text } + }; +}); +server.registerTool("directory_tree", { + title: "Directory Tree", + description: "Get a recursive tree view of files and directories as a JSON structure. " + + "Each entry includes 'name', 'type' (file/directory), and 'children' for directories. " + + "Files have no children array, while directories always have a children array (which may be empty). " + + "The output is formatted with 2-space indentation for readability. Only works within allowed directories.", + inputSchema: { + path: z.string(), + excludePatterns: z.array(z.string()).optional().default([]) + }, + outputSchema: { content: z.string() }, + annotations: { readOnlyHint: true } +}, async (args) => { + const rootPath = args.path; + async function buildTree(currentPath, excludePatterns = []) { + const validPath = await validatePath(currentPath); + const entries = await fs.readdir(validPath, { withFileTypes: true }); + const result = []; + for (const entry of entries) { + const relativePath = path.relative(rootPath, path.join(currentPath, entry.name)); + const shouldExclude = excludePatterns.some(pattern => { + if (pattern.includes('*')) { + return minimatch(relativePath, pattern, { dot: true }); + } + // For files: match exact name or as part of path + // For directories: match as directory path + return minimatch(relativePath, pattern, { dot: true }) || + minimatch(relativePath, `**/${pattern}`, { dot: true }) || + minimatch(relativePath, `**/${pattern}/**`, { dot: true }); + }); + if (shouldExclude) + continue; + const entryData = { + name: entry.name, + type: entry.isDirectory() ? 'directory' : 'file' + }; + if (entry.isDirectory()) { + const subPath = path.join(currentPath, entry.name); + entryData.children = await buildTree(subPath, excludePatterns); + } + result.push(entryData); + } + return result; + } + const treeData = await buildTree(rootPath, args.excludePatterns); + const text = JSON.stringify(treeData, null, 2); + const contentBlock = { type: "text", text }; + return { + content: [contentBlock], + structuredContent: { content: text } + }; +}); +server.registerTool("move_file", { + title: "Move File", + description: "Move or rename files and directories. Can move files between directories " + + "and rename them in a single operation. If the destination exists, the " + + "operation will fail. Works across different directories and can be used " + + "for simple renaming within the same directory. Both source and destination must be within allowed directories.", + inputSchema: { + source: z.string(), + destination: z.string() + }, + outputSchema: { content: z.string() }, + annotations: { readOnlyHint: false, idempotentHint: false, destructiveHint: false } +}, async (args) => { + const validSourcePath = await validatePath(args.source); + const validDestPath = await validatePath(args.destination); + await fs.rename(validSourcePath, validDestPath); + const text = `Successfully moved ${args.source} to ${args.destination}`; + const contentBlock = { type: "text", text }; + return { + content: [contentBlock], + structuredContent: { content: text } + }; +}); +server.registerTool("search_files", { + title: "Search Files", + description: "Recursively search for files and directories matching a pattern. " + + "The patterns should be glob-style patterns that match paths relative to the working directory. " + + "Use pattern like '*.ext' to match files in current directory, and '**/*.ext' to match files in all subdirectories. " + + "Returns full paths to all matching items. Great for finding files when you don't know their exact location. " + + "Only searches within allowed directories.", + inputSchema: { + path: z.string(), + pattern: z.string(), + excludePatterns: z.array(z.string()).optional().default([]) + }, + outputSchema: { content: z.string() }, + annotations: { readOnlyHint: true } +}, async (args) => { + const validPath = await validatePath(args.path); + const results = await searchFilesWithValidation(validPath, args.pattern, allowedDirectories, { excludePatterns: args.excludePatterns }); + const text = results.length > 0 ? results.join("\n") : "No matches found"; + return { + content: [{ type: "text", text }], + structuredContent: { content: text } + }; +}); +server.registerTool("get_file_info", { + title: "Get File Info", + description: "Retrieve detailed metadata about a file or directory. Returns comprehensive " + + "information including size, creation time, last modified time, permissions, " + + "and type. This tool is perfect for understanding file characteristics " + + "without reading the actual content. Only works within allowed directories.", + inputSchema: { + path: z.string() + }, + outputSchema: { content: z.string() }, + annotations: { readOnlyHint: true } +}, async (args) => { + const validPath = await validatePath(args.path); + const info = await getFileStats(validPath); + const text = Object.entries(info) + .map(([key, value]) => `${key}: ${value}`) + .join("\n"); + return { + content: [{ type: "text", text }], + structuredContent: { content: text } + }; +}); +server.registerTool("list_allowed_directories", { + title: "List Allowed Directories", + description: "Returns the list of directories that this server is allowed to access. " + + "Subdirectories within these allowed directories are also accessible. " + + "Use this to understand which directories and their nested paths are available " + + "before trying to access files.", + inputSchema: {}, + outputSchema: { content: z.string() }, + annotations: { readOnlyHint: true } +}, async () => { + const text = `Allowed directories:\n${allowedDirectories.join('\n')}`; + return { + content: [{ type: "text", text }], + structuredContent: { content: text } + }; +}); +// Updates allowed directories based on MCP client roots +async function updateAllowedDirectoriesFromRoots(requestedRoots) { + const validatedRootDirs = await getValidRootDirectories(requestedRoots); + if (validatedRootDirs.length > 0) { + allowedDirectories = [...validatedRootDirs]; + setAllowedDirectories(allowedDirectories); // Update the global state in lib.ts + console.error(`Updated allowed directories from MCP roots: ${validatedRootDirs.length} valid directories`); + } + else { + console.error("No valid root directories provided by client"); + } +} +// Handles dynamic roots updates during runtime, when client sends "roots/list_changed" notification, server fetches the updated roots and replaces all allowed directories with the new roots. +server.server.setNotificationHandler(RootsListChangedNotificationSchema, async () => { + try { + // Request the updated roots list from the client + const response = await server.server.listRoots(); + if (response && 'roots' in response) { + await updateAllowedDirectoriesFromRoots(response.roots); + } + } + catch (error) { + console.error("Failed to request roots from client:", error instanceof Error ? error.message : String(error)); + } +}); +// Handles post-initialization setup, specifically checking for and fetching MCP roots. +server.server.oninitialized = async () => { + const clientCapabilities = server.server.getClientCapabilities(); + if (clientCapabilities?.roots) { + try { + const response = await server.server.listRoots(); + if (response && 'roots' in response) { + await updateAllowedDirectoriesFromRoots(response.roots); + } + else { + console.error("Client returned no roots set, keeping current settings"); + } + } + catch (error) { + console.error("Failed to request initial roots from client:", error instanceof Error ? error.message : String(error)); + } + } + else { + if (allowedDirectories.length > 0) { + console.error("Client does not support MCP Roots, using allowed directories set from server args:", allowedDirectories); + } + else { + throw new Error(`Server cannot operate: No allowed directories available. Server was started without command-line directories and client either does not support MCP roots protocol or provided empty roots. Please either: 1) Start server with directory arguments, or 2) Use a client that supports MCP roots protocol and provides valid root directories.`); + } + } +}; +// Start server +async function runServer() { + const transport = new StdioServerTransport(); + await server.connect(transport); + console.error("Secure MCP Filesystem Server running on stdio"); + if (allowedDirectories.length === 0) { + console.error("Started without allowed directories - waiting for client to provide roots via MCP protocol"); + } +} +runServer().catch((error) => { + console.error("Fatal error running server:", error); + process.exit(1); +}); diff --git a/src/filesystem/lib.js b/src/filesystem/lib.js new file mode 100644 index 0000000000..9a5ed717be --- /dev/null +++ b/src/filesystem/lib.js @@ -0,0 +1,310 @@ +import fs from "fs/promises"; +import path from "path"; +import { randomBytes } from 'crypto'; +import { createTwoFilesPatch } from 'diff'; +import { minimatch } from 'minimatch'; +import { normalizePath, expandHome } from './path-utils.js'; +import { isPathWithinAllowedDirectories } from './path-validation.js'; +// Global allowed directories - set by the main module +let allowedDirectories = []; +// Function to set allowed directories from the main module +export function setAllowedDirectories(directories) { + allowedDirectories = [...directories]; +} +// Function to get current allowed directories +export function getAllowedDirectories() { + return [...allowedDirectories]; +} +// Pure Utility Functions +export function formatSize(bytes) { + const units = ['B', 'KB', 'MB', 'GB', 'TB']; + if (bytes === 0) + return '0 B'; + const i = Math.floor(Math.log(bytes) / Math.log(1024)); + if (i < 0 || i === 0) + return `${bytes} ${units[0]}`; + const unitIndex = Math.min(i, units.length - 1); + return `${(bytes / Math.pow(1024, unitIndex)).toFixed(2)} ${units[unitIndex]}`; +} +export function normalizeLineEndings(text) { + return text.replace(/\r\n/g, '\n'); +} +export function createUnifiedDiff(originalContent, newContent, filepath = 'file') { + // Ensure consistent line endings for diff + const normalizedOriginal = normalizeLineEndings(originalContent); + const normalizedNew = normalizeLineEndings(newContent); + return createTwoFilesPatch(filepath, filepath, normalizedOriginal, normalizedNew, 'original', 'modified'); +} +// Security & Validation Functions +export async function validatePath(requestedPath) { + const expandedPath = expandHome(requestedPath); + const absolute = path.isAbsolute(expandedPath) + ? path.resolve(expandedPath) + : path.resolve(process.cwd(), expandedPath); + const normalizedRequested = normalizePath(absolute); + // Security: Check if path is within allowed directories before any file operations + const isAllowed = isPathWithinAllowedDirectories(normalizedRequested, allowedDirectories); + if (!isAllowed) { + throw new Error(`Access denied - path outside allowed directories: ${absolute} not in ${allowedDirectories.join(', ')}`); + } + // Security: Handle symlinks by checking their real path to prevent symlink attacks + // This prevents attackers from creating symlinks that point outside allowed directories + try { + const realPath = await fs.realpath(absolute); + const normalizedReal = normalizePath(realPath); + if (!isPathWithinAllowedDirectories(normalizedReal, allowedDirectories)) { + throw new Error(`Access denied - symlink target outside allowed directories: ${realPath} not in ${allowedDirectories.join(', ')}`); + } + return realPath; + } + catch (error) { + // Security: For new files that don't exist yet, verify parent directory + // This ensures we can't create files in unauthorized locations + if (error.code === 'ENOENT') { + const parentDir = path.dirname(absolute); + try { + const realParentPath = await fs.realpath(parentDir); + const normalizedParent = normalizePath(realParentPath); + if (!isPathWithinAllowedDirectories(normalizedParent, allowedDirectories)) { + throw new Error(`Access denied - parent directory outside allowed directories: ${realParentPath} not in ${allowedDirectories.join(', ')}`); + } + return absolute; + } + catch { + throw new Error(`Parent directory does not exist: ${parentDir}`); + } + } + throw error; + } +} +// File Operations +export async function getFileStats(filePath) { + const stats = await fs.stat(filePath); + return { + size: stats.size, + created: stats.birthtime, + modified: stats.mtime, + accessed: stats.atime, + isDirectory: stats.isDirectory(), + isFile: stats.isFile(), + permissions: stats.mode.toString(8).slice(-3), + }; +} +export async function readFileContent(filePath, encoding = 'utf-8') { + return await fs.readFile(filePath, encoding); +} +export async function writeFileContent(filePath, content) { + try { + // Security: 'wx' flag ensures exclusive creation - fails if file/symlink exists, + // preventing writes through pre-existing symlinks + await fs.writeFile(filePath, content, { encoding: "utf-8", flag: 'wx' }); + } + catch (error) { + if (error.code === 'EEXIST') { + // Security: Use atomic rename to prevent race conditions where symlinks + // could be created between validation and write. Rename operations + // replace the target file atomically and don't follow symlinks. + const tempPath = `${filePath}.${randomBytes(16).toString('hex')}.tmp`; + try { + await fs.writeFile(tempPath, content, 'utf-8'); + await fs.rename(tempPath, filePath); + } + catch (renameError) { + try { + await fs.unlink(tempPath); + } + catch { } + throw renameError; + } + } + else { + throw error; + } + } +} +export async function applyFileEdits(filePath, edits, dryRun = false) { + // Read file content and normalize line endings + const content = normalizeLineEndings(await fs.readFile(filePath, 'utf-8')); + // Apply edits sequentially + let modifiedContent = content; + for (const edit of edits) { + const normalizedOld = normalizeLineEndings(edit.oldText); + const normalizedNew = normalizeLineEndings(edit.newText); + // If exact match exists, use it + if (modifiedContent.includes(normalizedOld)) { + modifiedContent = modifiedContent.replace(normalizedOld, normalizedNew); + continue; + } + // Otherwise, try line-by-line matching with flexibility for whitespace + const oldLines = normalizedOld.split('\n'); + const contentLines = modifiedContent.split('\n'); + let matchFound = false; + for (let i = 0; i <= contentLines.length - oldLines.length; i++) { + const potentialMatch = contentLines.slice(i, i + oldLines.length); + // Compare lines with normalized whitespace + const isMatch = oldLines.every((oldLine, j) => { + const contentLine = potentialMatch[j]; + return oldLine.trim() === contentLine.trim(); + }); + if (isMatch) { + // Preserve original indentation of first line + const originalIndent = contentLines[i].match(/^\s*/)?.[0] || ''; + const newLines = normalizedNew.split('\n').map((line, j) => { + if (j === 0) + return originalIndent + line.trimStart(); + // For subsequent lines, try to preserve relative indentation + const oldIndent = oldLines[j]?.match(/^\s*/)?.[0] || ''; + const newIndent = line.match(/^\s*/)?.[0] || ''; + if (oldIndent && newIndent) { + const relativeIndent = newIndent.length - oldIndent.length; + return originalIndent + ' '.repeat(Math.max(0, relativeIndent)) + line.trimStart(); + } + return line; + }); + contentLines.splice(i, oldLines.length, ...newLines); + modifiedContent = contentLines.join('\n'); + matchFound = true; + break; + } + } + if (!matchFound) { + throw new Error(`Could not find exact match for edit:\n${edit.oldText}`); + } + } + // Create unified diff + const diff = createUnifiedDiff(content, modifiedContent, filePath); + // Format diff with appropriate number of backticks + let numBackticks = 3; + while (diff.includes('`'.repeat(numBackticks))) { + numBackticks++; + } + const formattedDiff = `${'`'.repeat(numBackticks)}diff\n${diff}${'`'.repeat(numBackticks)}\n\n`; + if (!dryRun) { + // Security: Use atomic rename to prevent race conditions where symlinks + // could be created between validation and write. Rename operations + // replace the target file atomically and don't follow symlinks. + const tempPath = `${filePath}.${randomBytes(16).toString('hex')}.tmp`; + try { + await fs.writeFile(tempPath, modifiedContent, 'utf-8'); + await fs.rename(tempPath, filePath); + } + catch (error) { + try { + await fs.unlink(tempPath); + } + catch { } + throw error; + } + } + return formattedDiff; +} +// Memory-efficient implementation to get the last N lines of a file +export async function tailFile(filePath, numLines) { + const CHUNK_SIZE = 1024; // Read 1KB at a time + const stats = await fs.stat(filePath); + const fileSize = stats.size; + if (fileSize === 0) + return ''; + // Open file for reading + const fileHandle = await fs.open(filePath, 'r'); + try { + const lines = []; + let position = fileSize; + let chunk = Buffer.alloc(CHUNK_SIZE); + let linesFound = 0; + let remainingText = ''; + // Read chunks from the end of the file until we have enough lines + while (position > 0 && linesFound < numLines) { + const size = Math.min(CHUNK_SIZE, position); + position -= size; + const { bytesRead } = await fileHandle.read(chunk, 0, size, position); + if (!bytesRead) + break; + // Get the chunk as a string and prepend any remaining text from previous iteration + const readData = chunk.slice(0, bytesRead).toString('utf-8'); + const chunkText = readData + remainingText; + // Split by newlines and count + const chunkLines = normalizeLineEndings(chunkText).split('\n'); + // If this isn't the end of the file, the first line is likely incomplete + // Save it to prepend to the next chunk + if (position > 0) { + remainingText = chunkLines[0]; + chunkLines.shift(); // Remove the first (incomplete) line + } + // Add lines to our result (up to the number we need) + for (let i = chunkLines.length - 1; i >= 0 && linesFound < numLines; i--) { + lines.unshift(chunkLines[i]); + linesFound++; + } + } + return lines.join('\n'); + } + finally { + await fileHandle.close(); + } +} +// New function to get the first N lines of a file +export async function headFile(filePath, numLines) { + const fileHandle = await fs.open(filePath, 'r'); + try { + const lines = []; + let buffer = ''; + let bytesRead = 0; + const chunk = Buffer.alloc(1024); // 1KB buffer + // Read chunks and count lines until we have enough or reach EOF + while (lines.length < numLines) { + const result = await fileHandle.read(chunk, 0, chunk.length, bytesRead); + if (result.bytesRead === 0) + break; // End of file + bytesRead += result.bytesRead; + buffer += chunk.slice(0, result.bytesRead).toString('utf-8'); + const newLineIndex = buffer.lastIndexOf('\n'); + if (newLineIndex !== -1) { + const completeLines = buffer.slice(0, newLineIndex).split('\n'); + buffer = buffer.slice(newLineIndex + 1); + for (const line of completeLines) { + lines.push(line); + if (lines.length >= numLines) + break; + } + } + } + // If there is leftover content and we still need lines, add it + if (buffer.length > 0 && lines.length < numLines) { + lines.push(buffer); + } + return lines.join('\n'); + } + finally { + await fileHandle.close(); + } +} +export async function searchFilesWithValidation(rootPath, pattern, allowedDirectories, options = {}) { + const { excludePatterns = [] } = options; + const results = []; + async function search(currentPath) { + const entries = await fs.readdir(currentPath, { withFileTypes: true }); + for (const entry of entries) { + const fullPath = path.join(currentPath, entry.name); + try { + await validatePath(fullPath); + const relativePath = path.relative(rootPath, fullPath); + const shouldExclude = excludePatterns.some(excludePattern => minimatch(relativePath, excludePattern, { dot: true })); + if (shouldExclude) + continue; + // Use glob matching for the search pattern + if (minimatch(relativePath, pattern, { dot: true })) { + results.push(fullPath); + } + if (entry.isDirectory()) { + await search(fullPath); + } + } + catch { + continue; + } + } + } + await search(rootPath); + return results; +} diff --git a/src/filesystem/path-utils.js b/src/filesystem/path-utils.js new file mode 100644 index 0000000000..dd14256487 --- /dev/null +++ b/src/filesystem/path-utils.js @@ -0,0 +1,103 @@ +import path from "path"; +import os from 'os'; +/** + * Converts WSL or Unix-style Windows paths to Windows format + * @param p The path to convert + * @returns Converted Windows path + */ +export function convertToWindowsPath(p) { + // Handle WSL paths (/mnt/c/...) + // NEVER convert WSL paths - they are valid Linux paths that work with Node.js fs operations in WSL + // Converting them to Windows format (C:\...) breaks fs operations inside WSL + if (p.startsWith('/mnt/')) { + return p; // Leave WSL paths unchanged + } + // Handle Unix-style Windows paths (/c/...) + // Only convert when running on Windows + if (p.match(/^\/[a-zA-Z]\//) && process.platform === 'win32') { + const driveLetter = p.charAt(1).toUpperCase(); + const pathPart = p.slice(2).replace(/\//g, '\\'); + return `${driveLetter}:${pathPart}`; + } + // Handle standard Windows paths, ensuring backslashes + if (p.match(/^[a-zA-Z]:/)) { + return p.replace(/\//g, '\\'); + } + // Leave non-Windows paths unchanged + return p; +} +/** + * Normalizes path by standardizing format while preserving OS-specific behavior + * @param p The path to normalize + * @returns Normalized path + */ +export function normalizePath(p) { + // Remove any surrounding quotes and whitespace + p = p.trim().replace(/^["']|["']$/g, ''); + // Check if this is a Unix path that should not be converted + // WSL paths (/mnt/) should ALWAYS be preserved as they work correctly in WSL with Node.js fs + // Regular Unix paths should also be preserved + const isUnixPath = p.startsWith('/') && ( + // Always preserve WSL paths (/mnt/c/, /mnt/d/, etc.) + p.match(/^\/mnt\/[a-z]\//i) || + // On non-Windows platforms, treat all absolute paths as Unix paths + (process.platform !== 'win32') || + // On Windows, preserve Unix paths that aren't Unix-style Windows paths (/c/, /d/, etc.) + (process.platform === 'win32' && !p.match(/^\/[a-zA-Z]\//))); + if (isUnixPath) { + // For Unix paths, just normalize without converting to Windows format + // Replace double slashes with single slashes and remove trailing slashes + return p.replace(/\/+/g, '/').replace(/(? { + if (typeof dir !== 'string' || !dir) { + return false; + } + // Reject null bytes in allowed dirs + if (dir.includes('\x00')) { + return false; + } + // Normalize the allowed directory + let normalizedDir; + try { + normalizedDir = path.resolve(path.normalize(dir)); + } + catch { + return false; + } + // Verify allowed directory is absolute after normalization + if (!path.isAbsolute(normalizedDir)) { + throw new Error('Allowed directories must be absolute paths after normalization'); + } + // Check if normalizedPath is within normalizedDir + // Path is inside if it's the same or a subdirectory + if (normalizedPath === normalizedDir) { + return true; + } + // Special case for root directory to avoid double slash + // On Windows, we need to check if both paths are on the same drive + if (normalizedDir === path.sep) { + return normalizedPath.startsWith(path.sep); + } + // On Windows, also check for drive root (e.g., "C:\") + if (path.sep === '\\' && normalizedDir.match(/^[A-Za-z]:\\?$/)) { + // Ensure both paths are on the same drive + const dirDrive = normalizedDir.charAt(0).toLowerCase(); + const pathDrive = normalizedPath.charAt(0).toLowerCase(); + return pathDrive === dirDrive && normalizedPath.startsWith(normalizedDir.replace(/\\?$/, '\\')); + } + return normalizedPath.startsWith(normalizedDir + path.sep); + }); +} diff --git a/src/filesystem/roots-utils.js b/src/filesystem/roots-utils.js new file mode 100644 index 0000000000..13e4eb52b4 --- /dev/null +++ b/src/filesystem/roots-utils.js @@ -0,0 +1,70 @@ +import { promises as fs } from 'fs'; +import path from 'path'; +import os from 'os'; +import { normalizePath } from './path-utils.js'; +/** + * Converts a root URI to a normalized directory path with basic security validation. + * @param rootUri - File URI (file://...) or plain directory path + * @returns Promise resolving to validated path or null if invalid + */ +async function parseRootUri(rootUri) { + try { + const rawPath = rootUri.startsWith('file://') ? rootUri.slice(7) : rootUri; + const expandedPath = rawPath.startsWith('~/') || rawPath === '~' + ? path.join(os.homedir(), rawPath.slice(1)) + : rawPath; + const absolutePath = path.resolve(expandedPath); + const resolvedPath = await fs.realpath(absolutePath); + return normalizePath(resolvedPath); + } + catch { + return null; // Path doesn't exist or other error + } +} +/** + * Formats error message for directory validation failures. + * @param dir - Directory path that failed validation + * @param error - Error that occurred during validation + * @param reason - Specific reason for failure + * @returns Formatted error message + */ +function formatDirectoryError(dir, error, reason) { + if (reason) { + return `Skipping ${reason}: ${dir}`; + } + const message = error instanceof Error ? error.message : String(error); + return `Skipping invalid directory: ${dir} due to error: ${message}`; +} +/** + * Resolves requested root directories from MCP root specifications. + * + * Converts root URI specifications (file:// URIs or plain paths) into normalized + * directory paths, validating that each path exists and is a directory. + * Includes symlink resolution for security. + * + * @param requestedRoots - Array of root specifications with URI and optional name + * @returns Promise resolving to array of validated directory paths + */ +export async function getValidRootDirectories(requestedRoots) { + const validatedDirectories = []; + for (const requestedRoot of requestedRoots) { + const resolvedPath = await parseRootUri(requestedRoot.uri); + if (!resolvedPath) { + console.error(formatDirectoryError(requestedRoot.uri, undefined, 'invalid path or inaccessible')); + continue; + } + try { + const stats = await fs.stat(resolvedPath); + if (stats.isDirectory()) { + validatedDirectories.push(resolvedPath); + } + else { + console.error(formatDirectoryError(resolvedPath, undefined, 'non-directory root')); + } + } + catch (error) { + console.error(formatDirectoryError(resolvedPath, error)); + } + } + return validatedDirectories; +} diff --git a/src/filesystem/vitest.config.js b/src/filesystem/vitest.config.js new file mode 100644 index 0000000000..07c570529e --- /dev/null +++ b/src/filesystem/vitest.config.js @@ -0,0 +1,13 @@ +import { defineConfig } from 'vitest/config'; +export default defineConfig({ + test: { + globals: true, + environment: 'node', + include: ['**/__tests__/**/*.test.ts'], + coverage: { + provider: 'v8', + include: ['**/*.ts'], + exclude: ['**/__tests__/**', '**/dist/**'], + }, + }, +}); diff --git a/src/memory/__tests__/file-path.test.js b/src/memory/__tests__/file-path.test.js new file mode 100644 index 0000000000..fe0b98be60 --- /dev/null +++ b/src/memory/__tests__/file-path.test.js @@ -0,0 +1,119 @@ +import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest'; +import { promises as fs } from 'fs'; +import path from 'path'; +import { fileURLToPath } from 'url'; +import { ensureMemoryFilePath, defaultMemoryPath } from '../index.js'; +describe('ensureMemoryFilePath', () => { + const testDir = path.dirname(fileURLToPath(import.meta.url)); + const oldMemoryPath = path.join(testDir, '..', 'memory.json'); + const newMemoryPath = path.join(testDir, '..', 'memory.jsonl'); + let originalEnv; + beforeEach(() => { + // Save original environment variable + originalEnv = process.env.MEMORY_FILE_PATH; + // Delete environment variable + delete process.env.MEMORY_FILE_PATH; + }); + afterEach(async () => { + // Restore original environment variable + if (originalEnv !== undefined) { + process.env.MEMORY_FILE_PATH = originalEnv; + } + else { + delete process.env.MEMORY_FILE_PATH; + } + // Clean up test files + try { + await fs.unlink(oldMemoryPath); + } + catch { + // Ignore if file doesn't exist + } + try { + await fs.unlink(newMemoryPath); + } + catch { + // Ignore if file doesn't exist + } + }); + describe('with MEMORY_FILE_PATH environment variable', () => { + it('should return absolute path when MEMORY_FILE_PATH is absolute', async () => { + const absolutePath = '/tmp/custom-memory.jsonl'; + process.env.MEMORY_FILE_PATH = absolutePath; + const result = await ensureMemoryFilePath(); + expect(result).toBe(absolutePath); + }); + it('should convert relative path to absolute when MEMORY_FILE_PATH is relative', async () => { + const relativePath = 'custom-memory.jsonl'; + process.env.MEMORY_FILE_PATH = relativePath; + const result = await ensureMemoryFilePath(); + expect(path.isAbsolute(result)).toBe(true); + expect(result).toContain('custom-memory.jsonl'); + }); + it('should handle Windows absolute paths', async () => { + const windowsPath = 'C:\\temp\\memory.jsonl'; + process.env.MEMORY_FILE_PATH = windowsPath; + const result = await ensureMemoryFilePath(); + // On Windows, should return as-is; on Unix, will be treated as relative + if (process.platform === 'win32') { + expect(result).toBe(windowsPath); + } + else { + expect(path.isAbsolute(result)).toBe(true); + } + }); + }); + describe('without MEMORY_FILE_PATH environment variable', () => { + it('should return default path when no files exist', async () => { + const result = await ensureMemoryFilePath(); + expect(result).toBe(defaultMemoryPath); + }); + it('should migrate from memory.json to memory.jsonl when only old file exists', async () => { + // Create old memory.json file + await fs.writeFile(oldMemoryPath, '{"test":"data"}'); + const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => { }); + const result = await ensureMemoryFilePath(); + expect(result).toBe(defaultMemoryPath); + // Verify migration happened + const newFileExists = await fs.access(newMemoryPath).then(() => true).catch(() => false); + const oldFileExists = await fs.access(oldMemoryPath).then(() => true).catch(() => false); + expect(newFileExists).toBe(true); + expect(oldFileExists).toBe(false); + // Verify console messages + expect(consoleErrorSpy).toHaveBeenCalledWith(expect.stringContaining('DETECTED: Found legacy memory.json file')); + expect(consoleErrorSpy).toHaveBeenCalledWith(expect.stringContaining('COMPLETED: Successfully migrated')); + consoleErrorSpy.mockRestore(); + }); + it('should use new file when both old and new files exist', async () => { + // Create both files + await fs.writeFile(oldMemoryPath, '{"old":"data"}'); + await fs.writeFile(newMemoryPath, '{"new":"data"}'); + const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => { }); + const result = await ensureMemoryFilePath(); + expect(result).toBe(defaultMemoryPath); + // Verify no migration happened (both files should still exist) + const newFileExists = await fs.access(newMemoryPath).then(() => true).catch(() => false); + const oldFileExists = await fs.access(oldMemoryPath).then(() => true).catch(() => false); + expect(newFileExists).toBe(true); + expect(oldFileExists).toBe(true); + // Verify no console messages about migration + expect(consoleErrorSpy).not.toHaveBeenCalled(); + consoleErrorSpy.mockRestore(); + }); + it('should preserve file content during migration', async () => { + const testContent = '{"entities": [{"name": "test", "type": "person"}]}'; + await fs.writeFile(oldMemoryPath, testContent); + await ensureMemoryFilePath(); + const migratedContent = await fs.readFile(newMemoryPath, 'utf-8'); + expect(migratedContent).toBe(testContent); + }); + }); + describe('defaultMemoryPath', () => { + it('should end with memory.jsonl', () => { + expect(defaultMemoryPath).toMatch(/memory\.jsonl$/); + }); + it('should be an absolute path', () => { + expect(path.isAbsolute(defaultMemoryPath)).toBe(true); + }); + }); +}); diff --git a/src/memory/__tests__/knowledge-graph.test.js b/src/memory/__tests__/knowledge-graph.test.js new file mode 100644 index 0000000000..d35e8c9f28 --- /dev/null +++ b/src/memory/__tests__/knowledge-graph.test.js @@ -0,0 +1,394 @@ +import { describe, it, expect, beforeEach, afterEach } from 'vitest'; +import { promises as fs } from 'fs'; +import path from 'path'; +import { fileURLToPath } from 'url'; +import { KnowledgeGraphManager } from '../index.js'; +describe('KnowledgeGraphManager', () => { + let manager; + let testFilePath; + beforeEach(async () => { + // Create a temporary test file path + testFilePath = path.join(path.dirname(fileURLToPath(import.meta.url)), `test-memory-${Date.now()}.jsonl`); + manager = new KnowledgeGraphManager(testFilePath); + }); + afterEach(async () => { + // Clean up test file + try { + await fs.unlink(testFilePath); + } + catch (error) { + // Ignore errors if file doesn't exist + } + }); + describe('createEntities', () => { + it('should create new entities', async () => { + const entities = [ + { name: 'Alice', entityType: 'person', observations: ['works at Acme Corp'] }, + { name: 'Bob', entityType: 'person', observations: ['likes programming'] }, + ]; + const newEntities = await manager.createEntities(entities); + expect(newEntities).toHaveLength(2); + expect(newEntities).toEqual(entities); + const graph = await manager.readGraph(); + expect(graph.entities).toHaveLength(2); + }); + it('should not create duplicate entities', async () => { + const entities = [ + { name: 'Alice', entityType: 'person', observations: ['works at Acme Corp'] }, + ]; + await manager.createEntities(entities); + const newEntities = await manager.createEntities(entities); + expect(newEntities).toHaveLength(0); + const graph = await manager.readGraph(); + expect(graph.entities).toHaveLength(1); + }); + it('should handle empty entity arrays', async () => { + const newEntities = await manager.createEntities([]); + expect(newEntities).toHaveLength(0); + }); + }); + describe('createRelations', () => { + it('should create new relations', async () => { + await manager.createEntities([ + { name: 'Alice', entityType: 'person', observations: [] }, + { name: 'Bob', entityType: 'person', observations: [] }, + ]); + const relations = [ + { from: 'Alice', to: 'Bob', relationType: 'knows' }, + ]; + const newRelations = await manager.createRelations(relations); + expect(newRelations).toHaveLength(1); + expect(newRelations).toEqual(relations); + const graph = await manager.readGraph(); + expect(graph.relations).toHaveLength(1); + }); + it('should not create duplicate relations', async () => { + await manager.createEntities([ + { name: 'Alice', entityType: 'person', observations: [] }, + { name: 'Bob', entityType: 'person', observations: [] }, + ]); + const relations = [ + { from: 'Alice', to: 'Bob', relationType: 'knows' }, + ]; + await manager.createRelations(relations); + const newRelations = await manager.createRelations(relations); + expect(newRelations).toHaveLength(0); + const graph = await manager.readGraph(); + expect(graph.relations).toHaveLength(1); + }); + it('should handle empty relation arrays', async () => { + const newRelations = await manager.createRelations([]); + expect(newRelations).toHaveLength(0); + }); + }); + describe('addObservations', () => { + it('should add observations to existing entities', async () => { + await manager.createEntities([ + { name: 'Alice', entityType: 'person', observations: ['works at Acme Corp'] }, + ]); + const results = await manager.addObservations([ + { entityName: 'Alice', contents: ['likes coffee', 'has a dog'] }, + ]); + expect(results).toHaveLength(1); + expect(results[0].entityName).toBe('Alice'); + expect(results[0].addedObservations).toHaveLength(2); + const graph = await manager.readGraph(); + const alice = graph.entities.find(e => e.name === 'Alice'); + expect(alice?.observations).toHaveLength(3); + }); + it('should not add duplicate observations', async () => { + await manager.createEntities([ + { name: 'Alice', entityType: 'person', observations: ['works at Acme Corp'] }, + ]); + await manager.addObservations([ + { entityName: 'Alice', contents: ['likes coffee'] }, + ]); + const results = await manager.addObservations([ + { entityName: 'Alice', contents: ['likes coffee', 'has a dog'] }, + ]); + expect(results[0].addedObservations).toHaveLength(1); + expect(results[0].addedObservations).toContain('has a dog'); + const graph = await manager.readGraph(); + const alice = graph.entities.find(e => e.name === 'Alice'); + expect(alice?.observations).toHaveLength(3); + }); + it('should throw error for non-existent entity', async () => { + await expect(manager.addObservations([ + { entityName: 'NonExistent', contents: ['some observation'] }, + ])).rejects.toThrow('Entity with name NonExistent not found'); + }); + }); + describe('deleteEntities', () => { + it('should delete entities', async () => { + await manager.createEntities([ + { name: 'Alice', entityType: 'person', observations: [] }, + { name: 'Bob', entityType: 'person', observations: [] }, + ]); + await manager.deleteEntities(['Alice']); + const graph = await manager.readGraph(); + expect(graph.entities).toHaveLength(1); + expect(graph.entities[0].name).toBe('Bob'); + }); + it('should cascade delete relations when deleting entities', async () => { + await manager.createEntities([ + { name: 'Alice', entityType: 'person', observations: [] }, + { name: 'Bob', entityType: 'person', observations: [] }, + { name: 'Charlie', entityType: 'person', observations: [] }, + ]); + await manager.createRelations([ + { from: 'Alice', to: 'Bob', relationType: 'knows' }, + { from: 'Bob', to: 'Charlie', relationType: 'knows' }, + ]); + await manager.deleteEntities(['Bob']); + const graph = await manager.readGraph(); + expect(graph.entities).toHaveLength(2); + expect(graph.relations).toHaveLength(0); + }); + it('should handle deleting non-existent entities', async () => { + await manager.deleteEntities(['NonExistent']); + const graph = await manager.readGraph(); + expect(graph.entities).toHaveLength(0); + }); + }); + describe('deleteObservations', () => { + it('should delete observations from entities', async () => { + await manager.createEntities([ + { name: 'Alice', entityType: 'person', observations: ['works at Acme Corp', 'likes coffee'] }, + ]); + await manager.deleteObservations([ + { entityName: 'Alice', observations: ['likes coffee'] }, + ]); + const graph = await manager.readGraph(); + const alice = graph.entities.find(e => e.name === 'Alice'); + expect(alice?.observations).toHaveLength(1); + expect(alice?.observations).toContain('works at Acme Corp'); + }); + it('should handle deleting from non-existent entities', async () => { + await manager.deleteObservations([ + { entityName: 'NonExistent', observations: ['some observation'] }, + ]); + // Should not throw error + const graph = await manager.readGraph(); + expect(graph.entities).toHaveLength(0); + }); + }); + describe('deleteRelations', () => { + it('should delete specific relations', async () => { + await manager.createEntities([ + { name: 'Alice', entityType: 'person', observations: [] }, + { name: 'Bob', entityType: 'person', observations: [] }, + ]); + await manager.createRelations([ + { from: 'Alice', to: 'Bob', relationType: 'knows' }, + { from: 'Alice', to: 'Bob', relationType: 'works_with' }, + ]); + await manager.deleteRelations([ + { from: 'Alice', to: 'Bob', relationType: 'knows' }, + ]); + const graph = await manager.readGraph(); + expect(graph.relations).toHaveLength(1); + expect(graph.relations[0].relationType).toBe('works_with'); + }); + }); + describe('readGraph', () => { + it('should return empty graph when file does not exist', async () => { + const graph = await manager.readGraph(); + expect(graph.entities).toHaveLength(0); + expect(graph.relations).toHaveLength(0); + }); + it('should return complete graph with entities and relations', async () => { + await manager.createEntities([ + { name: 'Alice', entityType: 'person', observations: ['works at Acme Corp'] }, + ]); + await manager.createRelations([ + { from: 'Alice', to: 'Alice', relationType: 'self' }, + ]); + const graph = await manager.readGraph(); + expect(graph.entities).toHaveLength(1); + expect(graph.relations).toHaveLength(1); + }); + }); + describe('searchNodes', () => { + beforeEach(async () => { + await manager.createEntities([ + { name: 'Alice', entityType: 'person', observations: ['works at Acme Corp', 'likes programming'] }, + { name: 'Bob', entityType: 'person', observations: ['works at TechCo'] }, + { name: 'Acme Corp', entityType: 'company', observations: ['tech company'] }, + ]); + await manager.createRelations([ + { from: 'Alice', to: 'Acme Corp', relationType: 'works_at' }, + { from: 'Bob', to: 'Acme Corp', relationType: 'competitor' }, + ]); + }); + it('should search by entity name', async () => { + const result = await manager.searchNodes('Alice'); + expect(result.entities).toHaveLength(1); + expect(result.entities[0].name).toBe('Alice'); + }); + it('should search by entity type', async () => { + const result = await manager.searchNodes('company'); + expect(result.entities).toHaveLength(1); + expect(result.entities[0].name).toBe('Acme Corp'); + }); + it('should search by observation content', async () => { + const result = await manager.searchNodes('programming'); + expect(result.entities).toHaveLength(1); + expect(result.entities[0].name).toBe('Alice'); + }); + it('should be case insensitive', async () => { + const result = await manager.searchNodes('ALICE'); + expect(result.entities).toHaveLength(1); + expect(result.entities[0].name).toBe('Alice'); + }); + it('should include relations between matched entities', async () => { + const result = await manager.searchNodes('Acme'); + expect(result.entities).toHaveLength(2); // Alice and Acme Corp + expect(result.relations).toHaveLength(1); // Only Alice -> Acme Corp relation + }); + it('should return empty graph for no matches', async () => { + const result = await manager.searchNodes('NonExistent'); + expect(result.entities).toHaveLength(0); + expect(result.relations).toHaveLength(0); + }); + }); + describe('openNodes', () => { + beforeEach(async () => { + await manager.createEntities([ + { name: 'Alice', entityType: 'person', observations: [] }, + { name: 'Bob', entityType: 'person', observations: [] }, + { name: 'Charlie', entityType: 'person', observations: [] }, + ]); + await manager.createRelations([ + { from: 'Alice', to: 'Bob', relationType: 'knows' }, + { from: 'Bob', to: 'Charlie', relationType: 'knows' }, + ]); + }); + it('should open specific nodes by name', async () => { + const result = await manager.openNodes(['Alice', 'Bob']); + expect(result.entities).toHaveLength(2); + expect(result.entities.map(e => e.name)).toContain('Alice'); + expect(result.entities.map(e => e.name)).toContain('Bob'); + }); + it('should include relations between opened nodes', async () => { + const result = await manager.openNodes(['Alice', 'Bob']); + expect(result.relations).toHaveLength(1); + expect(result.relations[0].from).toBe('Alice'); + expect(result.relations[0].to).toBe('Bob'); + }); + it('should exclude relations to unopened nodes', async () => { + const result = await manager.openNodes(['Bob']); + expect(result.relations).toHaveLength(0); + }); + it('should handle opening non-existent nodes', async () => { + const result = await manager.openNodes(['NonExistent']); + expect(result.entities).toHaveLength(0); + }); + it('should handle empty node list', async () => { + const result = await manager.openNodes([]); + expect(result.entities).toHaveLength(0); + expect(result.relations).toHaveLength(0); + }); + }); + describe('file persistence', () => { + it('should persist data across manager instances', async () => { + await manager.createEntities([ + { name: 'Alice', entityType: 'person', observations: ['persistent data'] }, + ]); + // Create new manager instance with same file path + const manager2 = new KnowledgeGraphManager(testFilePath); + const graph = await manager2.readGraph(); + expect(graph.entities).toHaveLength(1); + expect(graph.entities[0].name).toBe('Alice'); + }); + it('should handle JSONL format correctly', async () => { + await manager.createEntities([ + { name: 'Alice', entityType: 'person', observations: [] }, + ]); + await manager.createRelations([ + { from: 'Alice', to: 'Alice', relationType: 'self' }, + ]); + // Read file directly + const fileContent = await fs.readFile(testFilePath, 'utf-8'); + const lines = fileContent.split('\n').filter(line => line.trim()); + expect(lines).toHaveLength(2); + expect(JSON.parse(lines[0])).toHaveProperty('type', 'entity'); + expect(JSON.parse(lines[1])).toHaveProperty('type', 'relation'); + }); + it('should strip type field from entities when loading from file', async () => { + // Create entities and relations (these get saved with type field) + await manager.createEntities([ + { name: 'Alice', entityType: 'person', observations: ['test observation'] }, + { name: 'Bob', entityType: 'person', observations: [] }, + ]); + await manager.createRelations([ + { from: 'Alice', to: 'Bob', relationType: 'knows' }, + ]); + // Verify file contains type field (order may vary) + const fileContent = await fs.readFile(testFilePath, 'utf-8'); + const fileLines = fileContent.split('\n').filter(line => line.trim()); + const fileItems = fileLines.map(line => JSON.parse(line)); + const fileEntity = fileItems.find(item => item.type === 'entity'); + const fileRelation = fileItems.find(item => item.type === 'relation'); + expect(fileEntity).toBeDefined(); + expect(fileEntity).toHaveProperty('type', 'entity'); + expect(fileRelation).toBeDefined(); + expect(fileRelation).toHaveProperty('type', 'relation'); + // Create new manager instance to force reload from file + const manager2 = new KnowledgeGraphManager(testFilePath); + const graph = await manager2.readGraph(); + // Verify loaded entities don't have type field + expect(graph.entities).toHaveLength(2); + graph.entities.forEach(entity => { + expect(entity).not.toHaveProperty('type'); + expect(entity).toHaveProperty('name'); + expect(entity).toHaveProperty('entityType'); + expect(entity).toHaveProperty('observations'); + }); + // Verify loaded relations don't have type field + expect(graph.relations).toHaveLength(1); + graph.relations.forEach(relation => { + expect(relation).not.toHaveProperty('type'); + expect(relation).toHaveProperty('from'); + expect(relation).toHaveProperty('to'); + expect(relation).toHaveProperty('relationType'); + }); + }); + it('should strip type field from searchNodes results', async () => { + await manager.createEntities([ + { name: 'Alice', entityType: 'person', observations: ['works at Acme'] }, + ]); + await manager.createRelations([ + { from: 'Alice', to: 'Alice', relationType: 'self' }, + ]); + // Create new manager instance to force reload from file + const manager2 = new KnowledgeGraphManager(testFilePath); + const result = await manager2.searchNodes('Alice'); + // Verify search results don't have type field + expect(result.entities).toHaveLength(1); + expect(result.entities[0]).not.toHaveProperty('type'); + expect(result.entities[0].name).toBe('Alice'); + expect(result.relations).toHaveLength(1); + expect(result.relations[0]).not.toHaveProperty('type'); + expect(result.relations[0].from).toBe('Alice'); + }); + it('should strip type field from openNodes results', async () => { + await manager.createEntities([ + { name: 'Alice', entityType: 'person', observations: [] }, + { name: 'Bob', entityType: 'person', observations: [] }, + ]); + await manager.createRelations([ + { from: 'Alice', to: 'Bob', relationType: 'knows' }, + ]); + // Create new manager instance to force reload from file + const manager2 = new KnowledgeGraphManager(testFilePath); + const result = await manager2.openNodes(['Alice', 'Bob']); + // Verify open results don't have type field + expect(result.entities).toHaveLength(2); + result.entities.forEach(entity => { + expect(entity).not.toHaveProperty('type'); + }); + expect(result.relations).toHaveLength(1); + expect(result.relations[0]).not.toHaveProperty('type'); + }); + }); +}); diff --git a/src/memory/index.js b/src/memory/index.js new file mode 100644 index 0000000000..a19efd03c5 --- /dev/null +++ b/src/memory/index.js @@ -0,0 +1,380 @@ +#!/usr/bin/env node +import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"; +import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; +import { z } from "zod"; +import { promises as fs } from 'fs'; +import path from 'path'; +import { fileURLToPath } from 'url'; +// Define memory file path using environment variable with fallback +export const defaultMemoryPath = path.join(path.dirname(fileURLToPath(import.meta.url)), 'memory.jsonl'); +// Handle backward compatibility: migrate memory.json to memory.jsonl if needed +export async function ensureMemoryFilePath() { + if (process.env.MEMORY_FILE_PATH) { + // Custom path provided, use it as-is (with absolute path resolution) + return path.isAbsolute(process.env.MEMORY_FILE_PATH) + ? process.env.MEMORY_FILE_PATH + : path.join(path.dirname(fileURLToPath(import.meta.url)), process.env.MEMORY_FILE_PATH); + } + // No custom path set, check for backward compatibility migration + const oldMemoryPath = path.join(path.dirname(fileURLToPath(import.meta.url)), 'memory.json'); + const newMemoryPath = defaultMemoryPath; + try { + // Check if old file exists and new file doesn't + await fs.access(oldMemoryPath); + try { + await fs.access(newMemoryPath); + // Both files exist, use new one (no migration needed) + return newMemoryPath; + } + catch { + // Old file exists, new file doesn't - migrate + console.error('DETECTED: Found legacy memory.json file, migrating to memory.jsonl for JSONL format compatibility'); + await fs.rename(oldMemoryPath, newMemoryPath); + console.error('COMPLETED: Successfully migrated memory.json to memory.jsonl'); + return newMemoryPath; + } + } + catch { + // Old file doesn't exist, use new path + return newMemoryPath; + } +} +// Initialize memory file path (will be set during startup) +let MEMORY_FILE_PATH; +// The KnowledgeGraphManager class contains all operations to interact with the knowledge graph +export class KnowledgeGraphManager { + memoryFilePath; + constructor(memoryFilePath) { + this.memoryFilePath = memoryFilePath; + } + async loadGraph() { + try { + const data = await fs.readFile(this.memoryFilePath, "utf-8"); + const lines = data.split("\n").filter(line => line.trim() !== ""); + return lines.reduce((graph, line) => { + const item = JSON.parse(line); + if (item.type === "entity") { + graph.entities.push({ + name: item.name, + entityType: item.entityType, + observations: item.observations + }); + } + if (item.type === "relation") { + graph.relations.push({ + from: item.from, + to: item.to, + relationType: item.relationType + }); + } + return graph; + }, { entities: [], relations: [] }); + } + catch (error) { + if (error instanceof Error && 'code' in error && error.code === "ENOENT") { + return { entities: [], relations: [] }; + } + throw error; + } + } + async saveGraph(graph) { + const lines = [ + ...graph.entities.map(e => JSON.stringify({ + type: "entity", + name: e.name, + entityType: e.entityType, + observations: e.observations + })), + ...graph.relations.map(r => JSON.stringify({ + type: "relation", + from: r.from, + to: r.to, + relationType: r.relationType + })), + ]; + await fs.writeFile(this.memoryFilePath, lines.join("\n")); + } + async createEntities(entities) { + const graph = await this.loadGraph(); + const newEntities = entities.filter(e => !graph.entities.some(existingEntity => existingEntity.name === e.name)); + graph.entities.push(...newEntities); + await this.saveGraph(graph); + return newEntities; + } + async createRelations(relations) { + const graph = await this.loadGraph(); + const newRelations = relations.filter(r => !graph.relations.some(existingRelation => existingRelation.from === r.from && + existingRelation.to === r.to && + existingRelation.relationType === r.relationType)); + graph.relations.push(...newRelations); + await this.saveGraph(graph); + return newRelations; + } + async addObservations(observations) { + const graph = await this.loadGraph(); + const results = observations.map(o => { + const entity = graph.entities.find(e => e.name === o.entityName); + if (!entity) { + throw new Error(`Entity with name ${o.entityName} not found`); + } + const newObservations = o.contents.filter(content => !entity.observations.includes(content)); + entity.observations.push(...newObservations); + return { entityName: o.entityName, addedObservations: newObservations }; + }); + await this.saveGraph(graph); + return results; + } + async deleteEntities(entityNames) { + const graph = await this.loadGraph(); + graph.entities = graph.entities.filter(e => !entityNames.includes(e.name)); + graph.relations = graph.relations.filter(r => !entityNames.includes(r.from) && !entityNames.includes(r.to)); + await this.saveGraph(graph); + } + async deleteObservations(deletions) { + const graph = await this.loadGraph(); + deletions.forEach(d => { + const entity = graph.entities.find(e => e.name === d.entityName); + if (entity) { + entity.observations = entity.observations.filter(o => !d.observations.includes(o)); + } + }); + await this.saveGraph(graph); + } + async deleteRelations(relations) { + const graph = await this.loadGraph(); + graph.relations = graph.relations.filter(r => !relations.some(delRelation => r.from === delRelation.from && + r.to === delRelation.to && + r.relationType === delRelation.relationType)); + await this.saveGraph(graph); + } + async readGraph() { + return this.loadGraph(); + } + // Very basic search function + async searchNodes(query) { + const graph = await this.loadGraph(); + // Filter entities + const filteredEntities = graph.entities.filter(e => e.name.toLowerCase().includes(query.toLowerCase()) || + e.entityType.toLowerCase().includes(query.toLowerCase()) || + e.observations.some(o => o.toLowerCase().includes(query.toLowerCase()))); + // Create a Set of filtered entity names for quick lookup + const filteredEntityNames = new Set(filteredEntities.map(e => e.name)); + // Filter relations to only include those between filtered entities + const filteredRelations = graph.relations.filter(r => filteredEntityNames.has(r.from) && filteredEntityNames.has(r.to)); + const filteredGraph = { + entities: filteredEntities, + relations: filteredRelations, + }; + return filteredGraph; + } + async openNodes(names) { + const graph = await this.loadGraph(); + // Filter entities + const filteredEntities = graph.entities.filter(e => names.includes(e.name)); + // Create a Set of filtered entity names for quick lookup + const filteredEntityNames = new Set(filteredEntities.map(e => e.name)); + // Filter relations to only include those between filtered entities + const filteredRelations = graph.relations.filter(r => filteredEntityNames.has(r.from) && filteredEntityNames.has(r.to)); + const filteredGraph = { + entities: filteredEntities, + relations: filteredRelations, + }; + return filteredGraph; + } +} +let knowledgeGraphManager; +// Zod schemas for entities and relations +const EntitySchema = z.object({ + name: z.string().describe("The name of the entity"), + entityType: z.string().describe("The type of the entity"), + observations: z.array(z.string()).describe("An array of observation contents associated with the entity") +}); +const RelationSchema = z.object({ + from: z.string().describe("The name of the entity where the relation starts"), + to: z.string().describe("The name of the entity where the relation ends"), + relationType: z.string().describe("The type of the relation") +}); +// The server instance and tools exposed to Claude +const server = new McpServer({ + name: "memory-server", + version: "0.6.3", +}); +// Register create_entities tool +server.registerTool("create_entities", { + title: "Create Entities", + description: "Create multiple new entities in the knowledge graph", + inputSchema: { + entities: z.array(EntitySchema) + }, + outputSchema: { + entities: z.array(EntitySchema) + } +}, async ({ entities }) => { + const result = await knowledgeGraphManager.createEntities(entities); + return { + content: [{ type: "text", text: JSON.stringify(result, null, 2) }], + structuredContent: { entities: result } + }; +}); +// Register create_relations tool +server.registerTool("create_relations", { + title: "Create Relations", + description: "Create multiple new relations between entities in the knowledge graph. Relations should be in active voice", + inputSchema: { + relations: z.array(RelationSchema) + }, + outputSchema: { + relations: z.array(RelationSchema) + } +}, async ({ relations }) => { + const result = await knowledgeGraphManager.createRelations(relations); + return { + content: [{ type: "text", text: JSON.stringify(result, null, 2) }], + structuredContent: { relations: result } + }; +}); +// Register add_observations tool +server.registerTool("add_observations", { + title: "Add Observations", + description: "Add new observations to existing entities in the knowledge graph", + inputSchema: { + observations: z.array(z.object({ + entityName: z.string().describe("The name of the entity to add the observations to"), + contents: z.array(z.string()).describe("An array of observation contents to add") + })) + }, + outputSchema: { + results: z.array(z.object({ + entityName: z.string(), + addedObservations: z.array(z.string()) + })) + } +}, async ({ observations }) => { + const result = await knowledgeGraphManager.addObservations(observations); + return { + content: [{ type: "text", text: JSON.stringify(result, null, 2) }], + structuredContent: { results: result } + }; +}); +// Register delete_entities tool +server.registerTool("delete_entities", { + title: "Delete Entities", + description: "Delete multiple entities and their associated relations from the knowledge graph", + inputSchema: { + entityNames: z.array(z.string()).describe("An array of entity names to delete") + }, + outputSchema: { + success: z.boolean(), + message: z.string() + } +}, async ({ entityNames }) => { + await knowledgeGraphManager.deleteEntities(entityNames); + return { + content: [{ type: "text", text: "Entities deleted successfully" }], + structuredContent: { success: true, message: "Entities deleted successfully" } + }; +}); +// Register delete_observations tool +server.registerTool("delete_observations", { + title: "Delete Observations", + description: "Delete specific observations from entities in the knowledge graph", + inputSchema: { + deletions: z.array(z.object({ + entityName: z.string().describe("The name of the entity containing the observations"), + observations: z.array(z.string()).describe("An array of observations to delete") + })) + }, + outputSchema: { + success: z.boolean(), + message: z.string() + } +}, async ({ deletions }) => { + await knowledgeGraphManager.deleteObservations(deletions); + return { + content: [{ type: "text", text: "Observations deleted successfully" }], + structuredContent: { success: true, message: "Observations deleted successfully" } + }; +}); +// Register delete_relations tool +server.registerTool("delete_relations", { + title: "Delete Relations", + description: "Delete multiple relations from the knowledge graph", + inputSchema: { + relations: z.array(RelationSchema).describe("An array of relations to delete") + }, + outputSchema: { + success: z.boolean(), + message: z.string() + } +}, async ({ relations }) => { + await knowledgeGraphManager.deleteRelations(relations); + return { + content: [{ type: "text", text: "Relations deleted successfully" }], + structuredContent: { success: true, message: "Relations deleted successfully" } + }; +}); +// Register read_graph tool +server.registerTool("read_graph", { + title: "Read Graph", + description: "Read the entire knowledge graph", + inputSchema: {}, + outputSchema: { + entities: z.array(EntitySchema), + relations: z.array(RelationSchema) + } +}, async () => { + const graph = await knowledgeGraphManager.readGraph(); + return { + content: [{ type: "text", text: JSON.stringify(graph, null, 2) }], + structuredContent: { ...graph } + }; +}); +// Register search_nodes tool +server.registerTool("search_nodes", { + title: "Search Nodes", + description: "Search for nodes in the knowledge graph based on a query", + inputSchema: { + query: z.string().describe("The search query to match against entity names, types, and observation content") + }, + outputSchema: { + entities: z.array(EntitySchema), + relations: z.array(RelationSchema) + } +}, async ({ query }) => { + const graph = await knowledgeGraphManager.searchNodes(query); + return { + content: [{ type: "text", text: JSON.stringify(graph, null, 2) }], + structuredContent: { ...graph } + }; +}); +// Register open_nodes tool +server.registerTool("open_nodes", { + title: "Open Nodes", + description: "Open specific nodes in the knowledge graph by their names", + inputSchema: { + names: z.array(z.string()).describe("An array of entity names to retrieve") + }, + outputSchema: { + entities: z.array(EntitySchema), + relations: z.array(RelationSchema) + } +}, async ({ names }) => { + const graph = await knowledgeGraphManager.openNodes(names); + return { + content: [{ type: "text", text: JSON.stringify(graph, null, 2) }], + structuredContent: { ...graph } + }; +}); +async function main() { + // Initialize memory file path with backward compatibility + MEMORY_FILE_PATH = await ensureMemoryFilePath(); + // Initialize knowledge graph manager with the memory file path + knowledgeGraphManager = new KnowledgeGraphManager(MEMORY_FILE_PATH); + const transport = new StdioServerTransport(); + await server.connect(transport); + console.error("Knowledge Graph MCP Server running on stdio"); +} +main().catch((error) => { + console.error("Fatal error in main():", error); + process.exit(1); +}); diff --git a/src/memory/vitest.config.js b/src/memory/vitest.config.js new file mode 100644 index 0000000000..07c570529e --- /dev/null +++ b/src/memory/vitest.config.js @@ -0,0 +1,13 @@ +import { defineConfig } from 'vitest/config'; +export default defineConfig({ + test: { + globals: true, + environment: 'node', + include: ['**/__tests__/**/*.test.ts'], + coverage: { + provider: 'v8', + include: ['**/*.ts'], + exclude: ['**/__tests__/**', '**/dist/**'], + }, + }, +}); diff --git a/src/sequentialthinking/__tests__/lib.test.js b/src/sequentialthinking/__tests__/lib.test.js new file mode 100644 index 0000000000..907f420944 --- /dev/null +++ b/src/sequentialthinking/__tests__/lib.test.js @@ -0,0 +1,258 @@ +import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest'; +import { SequentialThinkingServer } from '../lib.js'; +// Mock chalk to avoid ESM issues +vi.mock('chalk', () => { + const chalkMock = { + yellow: (str) => str, + green: (str) => str, + blue: (str) => str, + }; + return { + default: chalkMock, + }; +}); +describe('SequentialThinkingServer', () => { + let server; + beforeEach(() => { + // Disable thought logging for tests + process.env.DISABLE_THOUGHT_LOGGING = 'true'; + server = new SequentialThinkingServer(); + }); + // Note: Input validation tests removed - validation now happens at the tool + // registration layer via Zod schemas before processThought is called + describe('processThought - valid inputs', () => { + it('should accept valid basic thought', () => { + const input = { + thought: 'This is my first thought', + thoughtNumber: 1, + totalThoughts: 3, + nextThoughtNeeded: true + }; + const result = server.processThought(input); + expect(result.isError).toBeUndefined(); + const data = JSON.parse(result.content[0].text); + expect(data.thoughtNumber).toBe(1); + expect(data.totalThoughts).toBe(3); + expect(data.nextThoughtNeeded).toBe(true); + expect(data.thoughtHistoryLength).toBe(1); + }); + it('should accept thought with optional fields', () => { + const input = { + thought: 'Revising my earlier idea', + thoughtNumber: 2, + totalThoughts: 3, + nextThoughtNeeded: true, + isRevision: true, + revisesThought: 1, + needsMoreThoughts: false + }; + const result = server.processThought(input); + expect(result.isError).toBeUndefined(); + const data = JSON.parse(result.content[0].text); + expect(data.thoughtNumber).toBe(2); + expect(data.thoughtHistoryLength).toBe(1); + }); + it('should track multiple thoughts in history', () => { + const input1 = { + thought: 'First thought', + thoughtNumber: 1, + totalThoughts: 3, + nextThoughtNeeded: true + }; + const input2 = { + thought: 'Second thought', + thoughtNumber: 2, + totalThoughts: 3, + nextThoughtNeeded: true + }; + const input3 = { + thought: 'Final thought', + thoughtNumber: 3, + totalThoughts: 3, + nextThoughtNeeded: false + }; + server.processThought(input1); + server.processThought(input2); + const result = server.processThought(input3); + const data = JSON.parse(result.content[0].text); + expect(data.thoughtHistoryLength).toBe(3); + expect(data.nextThoughtNeeded).toBe(false); + }); + it('should auto-adjust totalThoughts if thoughtNumber exceeds it', () => { + const input = { + thought: 'Thought 5', + thoughtNumber: 5, + totalThoughts: 3, + nextThoughtNeeded: true + }; + const result = server.processThought(input); + const data = JSON.parse(result.content[0].text); + expect(data.totalThoughts).toBe(5); + }); + }); + describe('processThought - branching', () => { + it('should track branches correctly', () => { + const input1 = { + thought: 'Main thought', + thoughtNumber: 1, + totalThoughts: 3, + nextThoughtNeeded: true + }; + const input2 = { + thought: 'Branch A thought', + thoughtNumber: 2, + totalThoughts: 3, + nextThoughtNeeded: true, + branchFromThought: 1, + branchId: 'branch-a' + }; + const input3 = { + thought: 'Branch B thought', + thoughtNumber: 2, + totalThoughts: 3, + nextThoughtNeeded: false, + branchFromThought: 1, + branchId: 'branch-b' + }; + server.processThought(input1); + server.processThought(input2); + const result = server.processThought(input3); + const data = JSON.parse(result.content[0].text); + expect(data.branches).toContain('branch-a'); + expect(data.branches).toContain('branch-b'); + expect(data.branches.length).toBe(2); + expect(data.thoughtHistoryLength).toBe(3); + }); + it('should allow multiple thoughts in same branch', () => { + const input1 = { + thought: 'Branch thought 1', + thoughtNumber: 1, + totalThoughts: 2, + nextThoughtNeeded: true, + branchFromThought: 1, + branchId: 'branch-a' + }; + const input2 = { + thought: 'Branch thought 2', + thoughtNumber: 2, + totalThoughts: 2, + nextThoughtNeeded: false, + branchFromThought: 1, + branchId: 'branch-a' + }; + server.processThought(input1); + const result = server.processThought(input2); + const data = JSON.parse(result.content[0].text); + expect(data.branches).toContain('branch-a'); + expect(data.branches.length).toBe(1); + }); + }); + describe('processThought - edge cases', () => { + it('should handle very long thought strings', () => { + const input = { + thought: 'a'.repeat(10000), + thoughtNumber: 1, + totalThoughts: 1, + nextThoughtNeeded: false + }; + const result = server.processThought(input); + expect(result.isError).toBeUndefined(); + }); + it('should handle thoughtNumber = 1, totalThoughts = 1', () => { + const input = { + thought: 'Only thought', + thoughtNumber: 1, + totalThoughts: 1, + nextThoughtNeeded: false + }; + const result = server.processThought(input); + expect(result.isError).toBeUndefined(); + const data = JSON.parse(result.content[0].text); + expect(data.thoughtNumber).toBe(1); + expect(data.totalThoughts).toBe(1); + }); + it('should handle nextThoughtNeeded = false', () => { + const input = { + thought: 'Final thought', + thoughtNumber: 3, + totalThoughts: 3, + nextThoughtNeeded: false + }; + const result = server.processThought(input); + const data = JSON.parse(result.content[0].text); + expect(data.nextThoughtNeeded).toBe(false); + }); + }); + describe('processThought - response format', () => { + it('should return correct response structure on success', () => { + const input = { + thought: 'Test thought', + thoughtNumber: 1, + totalThoughts: 1, + nextThoughtNeeded: false + }; + const result = server.processThought(input); + expect(result).toHaveProperty('content'); + expect(Array.isArray(result.content)).toBe(true); + expect(result.content.length).toBe(1); + expect(result.content[0]).toHaveProperty('type', 'text'); + expect(result.content[0]).toHaveProperty('text'); + }); + it('should return valid JSON in response', () => { + const input = { + thought: 'Test thought', + thoughtNumber: 1, + totalThoughts: 1, + nextThoughtNeeded: false + }; + const result = server.processThought(input); + expect(() => JSON.parse(result.content[0].text)).not.toThrow(); + }); + }); + describe('processThought - with logging enabled', () => { + let serverWithLogging; + beforeEach(() => { + // Enable thought logging for these tests + delete process.env.DISABLE_THOUGHT_LOGGING; + serverWithLogging = new SequentialThinkingServer(); + }); + afterEach(() => { + // Reset to disabled for other tests + process.env.DISABLE_THOUGHT_LOGGING = 'true'; + }); + it('should format and log regular thoughts', () => { + const input = { + thought: 'Test thought with logging', + thoughtNumber: 1, + totalThoughts: 3, + nextThoughtNeeded: true + }; + const result = serverWithLogging.processThought(input); + expect(result.isError).toBeUndefined(); + }); + it('should format and log revision thoughts', () => { + const input = { + thought: 'Revised thought', + thoughtNumber: 2, + totalThoughts: 3, + nextThoughtNeeded: true, + isRevision: true, + revisesThought: 1 + }; + const result = serverWithLogging.processThought(input); + expect(result.isError).toBeUndefined(); + }); + it('should format and log branch thoughts', () => { + const input = { + thought: 'Branch thought', + thoughtNumber: 2, + totalThoughts: 3, + nextThoughtNeeded: false, + branchFromThought: 1, + branchId: 'branch-a' + }; + const result = serverWithLogging.processThought(input); + expect(result.isError).toBeUndefined(); + }); + }); +}); diff --git a/src/sequentialthinking/index.js b/src/sequentialthinking/index.js new file mode 100644 index 0000000000..90bb1cdc25 --- /dev/null +++ b/src/sequentialthinking/index.js @@ -0,0 +1,105 @@ +#!/usr/bin/env node +import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"; +import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; +import { z } from "zod"; +import { SequentialThinkingServer } from './lib.js'; +const server = new McpServer({ + name: "sequential-thinking-server", + version: "0.2.0", +}); +const thinkingServer = new SequentialThinkingServer(); +server.registerTool("sequentialthinking", { + title: "Sequential Thinking", + description: `A detailed tool for dynamic and reflective problem-solving through thoughts. +This tool helps analyze problems through a flexible thinking process that can adapt and evolve. +Each thought can build on, question, or revise previous insights as understanding deepens. + +When to use this tool: +- Breaking down complex problems into steps +- Planning and design with room for revision +- Analysis that might need course correction +- Problems where the full scope might not be clear initially +- Problems that require a multi-step solution +- Tasks that need to maintain context over multiple steps +- Situations where irrelevant information needs to be filtered out + +Key features: +- You can adjust total_thoughts up or down as you progress +- You can question or revise previous thoughts +- You can add more thoughts even after reaching what seemed like the end +- You can express uncertainty and explore alternative approaches +- Not every thought needs to build linearly - you can branch or backtrack +- Generates a solution hypothesis +- Verifies the hypothesis based on the Chain of Thought steps +- Repeats the process until satisfied +- Provides a correct answer + +Parameters explained: +- thought: Your current thinking step, which can include: + * Regular analytical steps + * Revisions of previous thoughts + * Questions about previous decisions + * Realizations about needing more analysis + * Changes in approach + * Hypothesis generation + * Hypothesis verification +- nextThoughtNeeded: True if you need more thinking, even if at what seemed like the end +- thoughtNumber: Current number in sequence (can go beyond initial total if needed) +- totalThoughts: Current estimate of thoughts needed (can be adjusted up/down) +- isRevision: A boolean indicating if this thought revises previous thinking +- revisesThought: If is_revision is true, which thought number is being reconsidered +- branchFromThought: If branching, which thought number is the branching point +- branchId: Identifier for the current branch (if any) +- needsMoreThoughts: If reaching end but realizing more thoughts needed + +You should: +1. Start with an initial estimate of needed thoughts, but be ready to adjust +2. Feel free to question or revise previous thoughts +3. Don't hesitate to add more thoughts if needed, even at the "end" +4. Express uncertainty when present +5. Mark thoughts that revise previous thinking or branch into new paths +6. Ignore information that is irrelevant to the current step +7. Generate a solution hypothesis when appropriate +8. Verify the hypothesis based on the Chain of Thought steps +9. Repeat the process until satisfied with the solution +10. Provide a single, ideally correct answer as the final output +11. Only set nextThoughtNeeded to false when truly done and a satisfactory answer is reached`, + inputSchema: { + thought: z.string().describe("Your current thinking step"), + nextThoughtNeeded: z.boolean().describe("Whether another thought step is needed"), + thoughtNumber: z.number().int().min(1).describe("Current thought number (numeric value, e.g., 1, 2, 3)"), + totalThoughts: z.number().int().min(1).describe("Estimated total thoughts needed (numeric value, e.g., 5, 10)"), + isRevision: z.boolean().optional().describe("Whether this revises previous thinking"), + revisesThought: z.number().int().min(1).optional().describe("Which thought is being reconsidered"), + branchFromThought: z.number().int().min(1).optional().describe("Branching point thought number"), + branchId: z.string().optional().describe("Branch identifier"), + needsMoreThoughts: z.boolean().optional().describe("If more thoughts are needed") + }, + outputSchema: { + thoughtNumber: z.number(), + totalThoughts: z.number(), + nextThoughtNeeded: z.boolean(), + branches: z.array(z.string()), + thoughtHistoryLength: z.number() + }, +}, async (args) => { + const result = thinkingServer.processThought(args); + if (result.isError) { + return result; + } + // Parse the JSON response to get structured content + const parsedContent = JSON.parse(result.content[0].text); + return { + content: result.content, + structuredContent: parsedContent + }; +}); +async function runServer() { + const transport = new StdioServerTransport(); + await server.connect(transport); + console.error("Sequential Thinking MCP Server running on stdio"); +} +runServer().catch((error) => { + console.error("Fatal error running server:", error); + process.exit(1); +}); diff --git a/src/sequentialthinking/lib.js b/src/sequentialthinking/lib.js new file mode 100644 index 0000000000..54f3da3659 --- /dev/null +++ b/src/sequentialthinking/lib.js @@ -0,0 +1,78 @@ +import chalk from 'chalk'; +export class SequentialThinkingServer { + thoughtHistory = []; + branches = {}; + disableThoughtLogging; + constructor() { + this.disableThoughtLogging = (process.env.DISABLE_THOUGHT_LOGGING || "").toLowerCase() === "true"; + } + formatThought(thoughtData) { + const { thoughtNumber, totalThoughts, thought, isRevision, revisesThought, branchFromThought, branchId } = thoughtData; + let prefix = ''; + let context = ''; + if (isRevision) { + prefix = chalk.yellow('šŸ”„ Revision'); + context = ` (revising thought ${revisesThought})`; + } + else if (branchFromThought) { + prefix = chalk.green('🌿 Branch'); + context = ` (from thought ${branchFromThought}, ID: ${branchId})`; + } + else { + prefix = chalk.blue('šŸ’­ Thought'); + context = ''; + } + const header = `${prefix} ${thoughtNumber}/${totalThoughts}${context}`; + const border = '─'.repeat(Math.max(header.length, thought.length) + 4); + return ` +ā”Œ${border}┐ +│ ${header} │ +ā”œ${border}┤ +│ ${thought.padEnd(border.length - 2)} │ +ā””${border}ā”˜`; + } + processThought(input) { + try { + // Validation happens at the tool registration layer via Zod + // Adjust totalThoughts if thoughtNumber exceeds it + if (input.thoughtNumber > input.totalThoughts) { + input.totalThoughts = input.thoughtNumber; + } + this.thoughtHistory.push(input); + if (input.branchFromThought && input.branchId) { + if (!this.branches[input.branchId]) { + this.branches[input.branchId] = []; + } + this.branches[input.branchId].push(input); + } + if (!this.disableThoughtLogging) { + const formattedThought = this.formatThought(input); + console.error(formattedThought); + } + return { + content: [{ + type: "text", + text: JSON.stringify({ + thoughtNumber: input.thoughtNumber, + totalThoughts: input.totalThoughts, + nextThoughtNeeded: input.nextThoughtNeeded, + branches: Object.keys(this.branches), + thoughtHistoryLength: this.thoughtHistory.length + }, null, 2) + }] + }; + } + catch (error) { + return { + content: [{ + type: "text", + text: JSON.stringify({ + error: error instanceof Error ? error.message : String(error), + status: 'failed' + }, null, 2) + }], + isError: true + }; + } + } +} diff --git a/src/sequentialthinking/vitest.config.js b/src/sequentialthinking/vitest.config.js new file mode 100644 index 0000000000..07c570529e --- /dev/null +++ b/src/sequentialthinking/vitest.config.js @@ -0,0 +1,13 @@ +import { defineConfig } from 'vitest/config'; +export default defineConfig({ + test: { + globals: true, + environment: 'node', + include: ['**/__tests__/**/*.test.ts'], + coverage: { + provider: 'v8', + include: ['**/*.ts'], + exclude: ['**/__tests__/**', '**/dist/**'], + }, + }, +});