diff --git a/.changeset/patch-defer-cache-memory-saves.md b/.changeset/patch-defer-cache-memory-saves.md new file mode 100644 index 0000000000..56db42d50d --- /dev/null +++ b/.changeset/patch-defer-cache-memory-saves.md @@ -0,0 +1,14 @@ +--- +"gh-aw": patch +--- + +Defer cache-memory saves until after threat detection validates agent output. + +The agent job now uploads cache-memory artifacts and the new `update_cache_memory` +job saves those artifacts to the Actions cache only after threat detection passes. + +This fixes a race where cache memories could be saved before detection validated +the agent's output. + +Fixes githubnext/gh-aw#5763 + diff --git a/.github/aw/actions-lock.json b/.github/aw/actions-lock.json index 13156845b2..ad52ade1a1 100644 --- a/.github/aw/actions-lock.json +++ b/.github/aw/actions-lock.json @@ -10,6 +10,11 @@ "version": "v4", "sha": "0057852bfaa89a56745cba8c7296529d2fc39830" }, + "actions/cache/save@v4": { + "repo": "actions/cache/save", + "version": "v4", + "sha": "0057852bfaa89a56745cba8c7296529d2fc39830" + }, "actions/cache@v4": { "repo": "actions/cache", "version": "v4", diff --git a/.github/workflows/audit-workflows.lock.yml b/.github/workflows/audit-workflows.lock.yml index 8bff6dc864..fbb2c022cb 100644 --- a/.github/workflows/audit-workflows.lock.yml +++ b/.github/workflows/audit-workflows.lock.yml @@ -71,17 +71,21 @@ # conclusion["conclusion"] # create_discussion["create_discussion"] # detection["detection"] +# update_cache_memory["update_cache_memory"] # upload_assets["upload_assets"] # activation --> agent # activation --> conclusion # agent --> conclusion # agent --> create_discussion # agent --> detection +# agent --> update_cache_memory # agent --> upload_assets # create_discussion --> conclusion # detection --> conclusion # detection --> create_discussion +# detection --> update_cache_memory # detection --> upload_assets +# update_cache_memory --> conclusion # upload_assets --> conclusion # ``` # @@ -828,7 +832,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -1037,8 +1043,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: trending-data-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -5862,6 +5868,7 @@ jobs: - agent - create_discussion - detection + - update_cache_memory - upload_assets if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim @@ -7283,6 +7290,26 @@ jobs: path: /tmp/gh-aw/threat-detection/detection.log if-no-files-found: ignore + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: trending-data-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + upload_assets: needs: - agent diff --git a/.github/workflows/changeset.lock.yml b/.github/workflows/changeset.lock.yml index 43b6388cd2..9f90953370 100644 --- a/.github/workflows/changeset.lock.yml +++ b/.github/workflows/changeset.lock.yml @@ -6926,8 +6926,432 @@ jobs: with: github-token: ${{ steps.app-token.outputs.token }} script: | - const { runUpdateWorkflow, createRenderStagedItem, createGetSummaryLine } = require("./update_runner.cjs"); - const { updatePRBody } = require("./update_pr_description_helpers.cjs"); + const fs = require("fs"); + const MAX_LOG_CONTENT_LENGTH = 10000; + function truncateForLogging(content) { + if (content.length <= MAX_LOG_CONTENT_LENGTH) { + return content; + } + return content.substring(0, MAX_LOG_CONTENT_LENGTH) + `\n... (truncated, total length: ${content.length})`; + } + function loadAgentOutput() { + const agentOutputFile = process.env.GH_AW_AGENT_OUTPUT; + if (!agentOutputFile) { + core.info("No GH_AW_AGENT_OUTPUT environment variable found"); + return { success: false }; + } + let outputContent; + try { + outputContent = fs.readFileSync(agentOutputFile, "utf8"); + } catch (error) { + const errorMessage = `Error reading agent output file: ${error instanceof Error ? error.message : String(error)}`; + core.error(errorMessage); + return { success: false, error: errorMessage }; + } + if (outputContent.trim() === "") { + core.info("Agent output content is empty"); + return { success: false }; + } + core.info(`Agent output content length: ${outputContent.length}`); + let validatedOutput; + try { + validatedOutput = JSON.parse(outputContent); + } catch (error) { + const errorMessage = `Error parsing agent output JSON: ${error instanceof Error ? error.message : String(error)}`; + core.error(errorMessage); + core.info(`Failed to parse content:\n${truncateForLogging(outputContent)}`); + return { success: false, error: errorMessage }; + } + if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { + core.info("No valid items found in agent output"); + core.info(`Parsed content: ${truncateForLogging(JSON.stringify(validatedOutput))}`); + return { success: false }; + } + return { success: true, items: validatedOutput.items }; + } + async function generateStagedPreview(options) { + const { title, description, items, renderItem } = options; + let summaryContent = `## 🎭 Staged Mode: ${title} Preview\n\n`; + summaryContent += `${description}\n\n`; + for (let i = 0; i < items.length; i++) { + const item = items[i]; + summaryContent += renderItem(item, i); + summaryContent += "---\n\n"; + } + try { + await core.summary.addRaw(summaryContent).write(); + core.info(summaryContent); + core.info(`πŸ“ ${title} preview written to step summary`); + } catch (error) { + core.setFailed(error instanceof Error ? error : String(error)); + } + } + function resolveTargetNumber(params) { + const { updateTarget, item, numberField, isValidContext, contextNumber, displayName } = params; + if (updateTarget === "*") { + const explicitNumber = item[numberField]; + if (explicitNumber) { + const parsed = parseInt(explicitNumber, 10); + if (isNaN(parsed) || parsed <= 0) { + return { success: false, error: `Invalid ${numberField} specified: ${explicitNumber}` }; + } + return { success: true, number: parsed }; + } else { + return { success: false, error: `Target is "*" but no ${numberField} specified in update item` }; + } + } else if (updateTarget && updateTarget !== "triggering") { + const parsed = parseInt(updateTarget, 10); + if (isNaN(parsed) || parsed <= 0) { + return { success: false, error: `Invalid ${displayName} number in target configuration: ${updateTarget}` }; + } + return { success: true, number: parsed }; + } else { + if (isValidContext && contextNumber) { + return { success: true, number: contextNumber }; + } + return { success: false, error: `Could not determine ${displayName} number` }; + } + } + function buildUpdateData(params) { + const { item, canUpdateStatus, canUpdateTitle, canUpdateBody, supportsStatus } = params; + const updateData = {}; + let hasUpdates = false; + const logMessages = []; + if (supportsStatus && canUpdateStatus && item.status !== undefined) { + if (item.status === "open" || item.status === "closed") { + updateData.state = item.status; + hasUpdates = true; + logMessages.push(`Will update status to: ${item.status}`); + } else { + logMessages.push(`Invalid status value: ${item.status}. Must be 'open' or 'closed'`); + } + } + if (canUpdateTitle && item.title !== undefined) { + const trimmedTitle = typeof item.title === "string" ? item.title.trim() : ""; + if (trimmedTitle.length > 0) { + updateData.title = trimmedTitle; + hasUpdates = true; + logMessages.push(`Will update title to: ${trimmedTitle}`); + } else { + logMessages.push("Invalid title value: must be a non-empty string"); + } + } + if (canUpdateBody && item.body !== undefined) { + if (typeof item.body === "string") { + updateData.body = item.body; + hasUpdates = true; + logMessages.push(`Will update body (length: ${item.body.length})`); + } else { + logMessages.push("Invalid body value: must be a string"); + } + } + return { hasUpdates, updateData, logMessages }; + } + async function runUpdateWorkflow(config) { + const { + itemType, + displayName, + displayNamePlural, + numberField, + outputNumberKey, + outputUrlKey, + isValidContext, + getContextNumber, + supportsStatus, + supportsOperation, + renderStagedItem, + executeUpdate, + getSummaryLine, + } = config; + const isStaged = process.env.GH_AW_SAFE_OUTPUTS_STAGED === "true"; + const result = loadAgentOutput(); + if (!result.success) { + return; + } + const updateItems = result.items.filter( item => item.type === itemType); + if (updateItems.length === 0) { + core.info(`No ${itemType} items found in agent output`); + return; + } + core.info(`Found ${updateItems.length} ${itemType} item(s)`); + if (isStaged) { + await generateStagedPreview({ + title: `Update ${displayNamePlural.charAt(0).toUpperCase() + displayNamePlural.slice(1)}`, + description: `The following ${displayName} updates would be applied if staged mode was disabled:`, + items: updateItems, + renderItem: renderStagedItem, + }); + return; + } + const updateTarget = process.env.GH_AW_UPDATE_TARGET || "triggering"; + const canUpdateStatus = process.env.GH_AW_UPDATE_STATUS === "true"; + const canUpdateTitle = process.env.GH_AW_UPDATE_TITLE === "true"; + const canUpdateBody = process.env.GH_AW_UPDATE_BODY === "true"; + core.info(`Update target configuration: ${updateTarget}`); + if (supportsStatus) { + core.info(`Can update status: ${canUpdateStatus}, title: ${canUpdateTitle}, body: ${canUpdateBody}`); + } else { + core.info(`Can update title: ${canUpdateTitle}, body: ${canUpdateBody}`); + } + const contextIsValid = isValidContext(context.eventName, context.payload); + const contextNumber = getContextNumber(context.payload); + if (updateTarget === "triggering" && !contextIsValid) { + core.info(`Target is "triggering" but not running in ${displayName} context, skipping ${displayName} update`); + return; + } + const updatedItems = []; + for (let i = 0; i < updateItems.length; i++) { + const updateItem = updateItems[i]; + core.info(`Processing ${itemType} item ${i + 1}/${updateItems.length}`); + const targetResult = resolveTargetNumber({ + updateTarget, + item: updateItem, + numberField, + isValidContext: contextIsValid, + contextNumber, + displayName, + }); + if (!targetResult.success) { + core.info(targetResult.error); + continue; + } + const targetNumber = targetResult.number; + core.info(`Updating ${displayName} #${targetNumber}`); + const { hasUpdates, updateData, logMessages } = buildUpdateData({ + item: updateItem, + canUpdateStatus, + canUpdateTitle, + canUpdateBody, + supportsStatus, + }); + for (const msg of logMessages) { + core.info(msg); + } + if (supportsOperation && canUpdateBody && updateItem.body !== undefined && typeof updateItem.body === "string") { + updateData._operation = updateItem.operation || "append"; + updateData._rawBody = updateItem.body; + } + if (!hasUpdates) { + core.info("No valid updates to apply for this item"); + continue; + } + try { + const updatedItem = await executeUpdate(github, context, targetNumber, updateData); + core.info(`Updated ${displayName} #${updatedItem.number}: ${updatedItem.html_url}`); + updatedItems.push(updatedItem); + if (i === updateItems.length - 1) { + core.setOutput(outputNumberKey, updatedItem.number); + core.setOutput(outputUrlKey, updatedItem.html_url); + } + } catch (error) { + core.error(`βœ— Failed to update ${displayName} #${targetNumber}: ${error instanceof Error ? error.message : String(error)}`); + throw error; + } + } + if (updatedItems.length > 0) { + let summaryContent = `\n\n## Updated ${displayNamePlural.charAt(0).toUpperCase() + displayNamePlural.slice(1)}\n`; + for (const item of updatedItems) { + summaryContent += getSummaryLine(item); + } + await core.summary.addRaw(summaryContent).write(); + } + core.info(`Successfully updated ${updatedItems.length} ${displayName}(s)`); + return updatedItems; + } + function createRenderStagedItem(config) { + const { entityName, numberField, targetLabel, currentTargetText, includeOperation = false } = config; + return function renderStagedItem(item, index) { + let content = `### ${entityName} Update ${index + 1}\n`; + if (item[numberField]) { + content += `**${targetLabel}** #${item[numberField]}\n\n`; + } else { + content += `**Target:** ${currentTargetText}\n\n`; + } + if (item.title !== undefined) { + content += `**New Title:** ${item.title}\n\n`; + } + if (item.body !== undefined) { + if (includeOperation) { + const operation = item.operation || "append"; + content += `**Operation:** ${operation}\n`; + content += `**Body Content:**\n${item.body}\n\n`; + } else { + content += `**New Body:**\n${item.body}\n\n`; + } + } + if (item.status !== undefined) { + content += `**New Status:** ${item.status}\n\n`; + } + return content; + }; + } + function createGetSummaryLine(config) { + const { entityPrefix } = config; + return function getSummaryLine(item) { + return `- ${entityPrefix} #${item.number}: [${item.title}](${item.html_url})\n`; + }; + } + function getMessages() { + const messagesEnv = process.env.GH_AW_SAFE_OUTPUT_MESSAGES; + if (!messagesEnv) { + return null; + } + try { + return JSON.parse(messagesEnv); + } catch (error) { + core.warning(`Failed to parse GH_AW_SAFE_OUTPUT_MESSAGES: ${error instanceof Error ? error.message : String(error)}`); + return null; + } + } + function renderTemplate(template, context) { + return template.replace(/\{(\w+)\}/g, (match, key) => { + const value = context[key]; + return value !== undefined && value !== null ? String(value) : match; + }); + } + function toSnakeCase(obj) { + const result = {}; + for (const [key, value] of Object.entries(obj)) { + const snakeKey = key.replace(/([A-Z])/g, "_$1").toLowerCase(); + result[snakeKey] = value; + result[key] = value; + } + return result; + } + function getFooterMessage(ctx) { + const messages = getMessages(); + const templateContext = toSnakeCase(ctx); + const defaultFooter = "> πŸ΄β€β˜ οΈ Ahoy! This treasure was crafted by [{workflow_name}]({run_url})"; + let footer = messages?.footer ? renderTemplate(messages.footer, templateContext) : renderTemplate(defaultFooter, templateContext); + if (ctx.triggeringNumber) { + footer += ` fer issue #{triggering_number} πŸ—ΊοΈ`.replace("{triggering_number}", String(ctx.triggeringNumber)); + } + return footer; + } + function getFooterInstallMessage(ctx) { + if (!ctx.workflowSource || !ctx.workflowSourceUrl) { + return ""; + } + const messages = getMessages(); + const templateContext = toSnakeCase(ctx); + const defaultInstall = + "> 🦜 Arr! To plunder this workflow fer yer own ship, run `gh aw add {workflow_source}`. Chart yer course at [{workflow_source_url}]({workflow_source_url})!"; + return messages?.footerInstall + ? renderTemplate(messages.footerInstall, templateContext) + : renderTemplate(defaultInstall, templateContext); + } + function generateXMLMarker(workflowName, runUrl) { + const engineId = process.env.GH_AW_ENGINE_ID || ""; + const engineVersion = process.env.GH_AW_ENGINE_VERSION || ""; + const engineModel = process.env.GH_AW_ENGINE_MODEL || ""; + const trackerId = process.env.GH_AW_TRACKER_ID || ""; + const parts = []; + parts.push(`agentic-workflow: ${workflowName}`); + if (trackerId) { + parts.push(`tracker-id: ${trackerId}`); + } + if (engineId) { + parts.push(`engine: ${engineId}`); + } + if (engineVersion) { + parts.push(`version: ${engineVersion}`); + } + if (engineModel) { + parts.push(`model: ${engineModel}`); + } + parts.push(`run: ${runUrl}`); + return ``; + } + function generateFooterWithMessages( + workflowName, + runUrl, + workflowSource, + workflowSourceURL, + triggeringIssueNumber, + triggeringPRNumber, + triggeringDiscussionNumber + ) { + let triggeringNumber; + if (triggeringIssueNumber) { + triggeringNumber = triggeringIssueNumber; + } else if (triggeringPRNumber) { + triggeringNumber = triggeringPRNumber; + } else if (triggeringDiscussionNumber) { + triggeringNumber = `discussion #${triggeringDiscussionNumber}`; + } + const ctx = { + workflowName, + runUrl, + workflowSource, + workflowSourceUrl: workflowSourceURL, + triggeringNumber, + }; + let footer = "\n\n" + getFooterMessage(ctx); + const installMessage = getFooterInstallMessage(ctx); + if (installMessage) { + footer += "\n>\n" + installMessage; + } + footer += "\n\n" + generateXMLMarker(workflowName, runUrl); + footer += "\n"; + return footer; + } + function buildAIFooter(workflowName, runUrl) { + return "\n\n" + getFooterMessage({ workflowName, runUrl }); + } + function buildIslandStartMarker(runId) { + return ``; + } + function buildIslandEndMarker(runId) { + return ``; + } + function findIsland(body, runId) { + const startMarker = buildIslandStartMarker(runId); + const endMarker = buildIslandEndMarker(runId); + const startIndex = body.indexOf(startMarker); + if (startIndex === -1) { + return { found: false, startIndex: -1, endIndex: -1 }; + } + const endIndex = body.indexOf(endMarker, startIndex); + if (endIndex === -1) { + return { found: false, startIndex: -1, endIndex: -1 }; + } + return { found: true, startIndex, endIndex: endIndex + endMarker.length }; + } + function updatePRBody(params) { + const { currentBody, newContent, operation, workflowName, runUrl, runId } = params; + const aiFooter = buildAIFooter(workflowName, runUrl); + if (operation === "replace") { + core.info("Operation: replace (full body replacement)"); + return newContent; + } + if (operation === "replace-island") { + const island = findIsland(currentBody, runId); + if (island.found) { + core.info(`Operation: replace-island (updating existing island for run ${runId})`); + const startMarker = buildIslandStartMarker(runId); + const endMarker = buildIslandEndMarker(runId); + const islandContent = `${startMarker}\n${newContent}${aiFooter}\n${endMarker}`; + const before = currentBody.substring(0, island.startIndex); + const after = currentBody.substring(island.endIndex); + return before + islandContent + after; + } else { + core.info(`Operation: replace-island (island not found for run ${runId}, falling back to append)`); + const startMarker = buildIslandStartMarker(runId); + const endMarker = buildIslandEndMarker(runId); + const islandContent = `${startMarker}\n${newContent}${aiFooter}\n${endMarker}`; + const appendSection = `\n\n---\n\n${islandContent}`; + return currentBody + appendSection; + } + } + if (operation === "prepend") { + core.info("Operation: prepend (add to start with separator)"); + const prependSection = `${newContent}${aiFooter}\n\n---\n\n`; + return prependSection + currentBody; + } + core.info("Operation: append (add to end with separator)"); + const appendSection = `\n\n---\n\n${newContent}${aiFooter}`; + return currentBody + appendSection; + } function isPRContext(eventName, payload) { const isPR = eventName === "pull_request" || diff --git a/.github/workflows/ci-doctor.lock.yml b/.github/workflows/ci-doctor.lock.yml index 6afc546b79..d06c5c7480 100644 --- a/.github/workflows/ci-doctor.lock.yml +++ b/.github/workflows/ci-doctor.lock.yml @@ -79,6 +79,7 @@ # create_issue["create_issue"] # detection["detection"] # pre_activation["pre_activation"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # add_comment --> conclusion @@ -86,12 +87,15 @@ # agent --> conclusion # agent --> create_issue # agent --> detection +# agent --> update_cache_memory # create_issue --> add_comment # create_issue --> conclusion # detection --> add_comment # detection --> conclusion # detection --> create_issue +# detection --> update_cache_memory # pre_activation --> activation +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -252,7 +256,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -1024,8 +1030,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -5856,6 +5862,7 @@ jobs: - agent - create_issue - detection + - update_cache_memory if: ((always()) && (needs.agent.result != 'skipped')) && (!(needs.add_comment.outputs.comment_id)) runs-on: ubuntu-slim permissions: @@ -7309,3 +7316,23 @@ jobs: } await main(); + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + diff --git a/.github/workflows/cli-version-checker.lock.yml b/.github/workflows/cli-version-checker.lock.yml index 5116b36494..f32cbfaba9 100644 --- a/.github/workflows/cli-version-checker.lock.yml +++ b/.github/workflows/cli-version-checker.lock.yml @@ -62,14 +62,18 @@ # conclusion["conclusion"] # create_issue["create_issue"] # detection["detection"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # agent --> conclusion # agent --> create_issue # agent --> detection +# agent --> update_cache_memory # create_issue --> conclusion # detection --> conclusion # detection --> create_issue +# detection --> update_cache_memory +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -384,7 +388,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -547,8 +553,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -4929,6 +4935,7 @@ jobs: - agent - create_issue - detection + - update_cache_memory if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim permissions: @@ -6253,3 +6260,23 @@ jobs: path: /tmp/gh-aw/threat-detection/detection.log if-no-files-found: ignore + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + diff --git a/.github/workflows/cloclo.lock.yml b/.github/workflows/cloclo.lock.yml index 80985f7e93..e1e4c3ef59 100644 --- a/.github/workflows/cloclo.lock.yml +++ b/.github/workflows/cloclo.lock.yml @@ -76,6 +76,7 @@ # create_pull_request["create_pull_request"] # detection["detection"] # pre_activation["pre_activation"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # activation --> create_pull_request @@ -84,12 +85,15 @@ # agent --> conclusion # agent --> create_pull_request # agent --> detection +# agent --> update_cache_memory # create_pull_request --> add_comment # create_pull_request --> conclusion # detection --> add_comment # detection --> conclusion # detection --> create_pull_request +# detection --> update_cache_memory # pre_activation --> activation +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -334,7 +338,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -1920,8 +1926,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: cloclo-memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -6388,6 +6394,7 @@ jobs: - agent - create_pull_request - detection + - update_cache_memory if: ((always()) && (needs.agent.result != 'skipped')) && (!(needs.add_comment.outputs.comment_id)) runs-on: ubuntu-slim permissions: @@ -7911,3 +7918,23 @@ jobs: } await main(); + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: cloclo-memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + diff --git a/.github/workflows/close-old-discussions.lock.yml b/.github/workflows/close-old-discussions.lock.yml index b5461a7b8c..d03a20ad1a 100644 --- a/.github/workflows/close-old-discussions.lock.yml +++ b/.github/workflows/close-old-discussions.lock.yml @@ -176,14 +176,18 @@ # close_discussion["close_discussion"] # conclusion["conclusion"] # detection["detection"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # agent --> close_discussion # agent --> conclusion # agent --> detection +# agent --> update_cache_memory # close_discussion --> conclusion # detection --> close_discussion # detection --> conclusion +# detection --> update_cache_memory +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -387,7 +391,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -566,8 +572,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: discussions-data-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -5209,6 +5215,7 @@ jobs: - agent - close_discussion - detection + - update_cache_memory if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim permissions: @@ -5921,3 +5928,23 @@ jobs: path: /tmp/gh-aw/threat-detection/detection.log if-no-files-found: ignore + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: discussions-data-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + diff --git a/.github/workflows/copilot-agent-analysis.lock.yml b/.github/workflows/copilot-agent-analysis.lock.yml index 7db362fd7d..777d1891e1 100644 --- a/.github/workflows/copilot-agent-analysis.lock.yml +++ b/.github/workflows/copilot-agent-analysis.lock.yml @@ -89,14 +89,18 @@ # conclusion["conclusion"] # create_discussion["create_discussion"] # detection["detection"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # agent --> conclusion # agent --> create_discussion # agent --> detection +# agent --> update_cache_memory # create_discussion --> conclusion # detection --> conclusion # detection --> create_discussion +# detection --> update_cache_memory +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -705,7 +709,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -875,8 +881,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: copilot-pr-data-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -5542,6 +5548,7 @@ jobs: - agent - create_discussion - detection + - update_cache_memory if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim permissions: @@ -6959,3 +6966,23 @@ jobs: path: /tmp/gh-aw/threat-detection/detection.log if-no-files-found: ignore + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: copilot-pr-data-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + diff --git a/.github/workflows/copilot-pr-nlp-analysis.lock.yml b/.github/workflows/copilot-pr-nlp-analysis.lock.yml index 898b3bff8b..85e28a1dd0 100644 --- a/.github/workflows/copilot-pr-nlp-analysis.lock.yml +++ b/.github/workflows/copilot-pr-nlp-analysis.lock.yml @@ -122,17 +122,21 @@ # conclusion["conclusion"] # create_discussion["create_discussion"] # detection["detection"] +# update_cache_memory["update_cache_memory"] # upload_assets["upload_assets"] # activation --> agent # activation --> conclusion # agent --> conclusion # agent --> create_discussion # agent --> detection +# agent --> update_cache_memory # agent --> upload_assets # create_discussion --> conclusion # detection --> conclusion # detection --> create_discussion +# detection --> update_cache_memory # detection --> upload_assets +# update_cache_memory --> conclusion # upload_assets --> conclusion # ``` # @@ -963,7 +967,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -1165,8 +1171,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: copilot-pr-data-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -6647,6 +6653,7 @@ jobs: - agent - create_discussion - detection + - update_cache_memory - upload_assets if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim @@ -8057,6 +8064,26 @@ jobs: path: /tmp/gh-aw/threat-detection/detection.log if-no-files-found: ignore + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: copilot-pr-data-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + upload_assets: needs: - agent diff --git a/.github/workflows/copilot-pr-prompt-analysis.lock.yml b/.github/workflows/copilot-pr-prompt-analysis.lock.yml index 0b81a39f17..8aa1a4fdb9 100644 --- a/.github/workflows/copilot-pr-prompt-analysis.lock.yml +++ b/.github/workflows/copilot-pr-prompt-analysis.lock.yml @@ -81,14 +81,18 @@ # conclusion["conclusion"] # create_discussion["create_discussion"] # detection["detection"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # agent --> conclusion # agent --> create_discussion # agent --> detection +# agent --> update_cache_memory # create_discussion --> conclusion # detection --> conclusion # detection --> create_discussion +# detection --> update_cache_memory +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -521,7 +525,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -691,8 +697,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: copilot-pr-data-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -5729,6 +5735,7 @@ jobs: - agent - create_discussion - detection + - update_cache_memory if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim permissions: @@ -7138,3 +7145,23 @@ jobs: path: /tmp/gh-aw/threat-detection/detection.log if-no-files-found: ignore + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: copilot-pr-data-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + diff --git a/.github/workflows/copilot-session-insights.lock.yml b/.github/workflows/copilot-session-insights.lock.yml index effe9270bc..36f06acfd8 100644 --- a/.github/workflows/copilot-session-insights.lock.yml +++ b/.github/workflows/copilot-session-insights.lock.yml @@ -91,17 +91,21 @@ # conclusion["conclusion"] # create_discussion["create_discussion"] # detection["detection"] +# update_cache_memory["update_cache_memory"] # upload_assets["upload_assets"] # activation --> agent # activation --> conclusion # agent --> conclusion # agent --> create_discussion # agent --> detection +# agent --> update_cache_memory # agent --> upload_assets # create_discussion --> conclusion # detection --> conclusion # detection --> create_discussion +# detection --> update_cache_memory # detection --> upload_assets +# update_cache_memory --> conclusion # upload_assets --> conclusion # ``` # @@ -1385,7 +1389,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -1580,8 +1586,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -6951,6 +6957,7 @@ jobs: - agent - create_discussion - detection + - update_cache_memory - upload_assets if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim @@ -8369,6 +8376,26 @@ jobs: path: /tmp/gh-aw/threat-detection/detection.log if-no-files-found: ignore + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + upload_assets: needs: - agent diff --git a/.github/workflows/daily-code-metrics.lock.yml b/.github/workflows/daily-code-metrics.lock.yml index 7d910dea0c..517d3171b5 100644 --- a/.github/workflows/daily-code-metrics.lock.yml +++ b/.github/workflows/daily-code-metrics.lock.yml @@ -63,14 +63,18 @@ # conclusion["conclusion"] # create_discussion["create_discussion"] # detection["detection"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # agent --> conclusion # agent --> create_discussion # agent --> detection +# agent --> update_cache_memory # create_discussion --> conclusion # detection --> conclusion # detection --> create_discussion +# detection --> update_cache_memory +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -923,7 +927,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -1106,8 +1112,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: trending-data-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -5992,6 +5998,7 @@ jobs: - agent - create_discussion - detection + - update_cache_memory if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim permissions: @@ -7412,3 +7419,23 @@ jobs: path: /tmp/gh-aw/threat-detection/detection.log if-no-files-found: ignore + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: trending-data-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + diff --git a/.github/workflows/daily-copilot-token-report.lock.yml b/.github/workflows/daily-copilot-token-report.lock.yml index 7f6dbbabf3..408fd9c943 100644 --- a/.github/workflows/daily-copilot-token-report.lock.yml +++ b/.github/workflows/daily-copilot-token-report.lock.yml @@ -81,17 +81,21 @@ # conclusion["conclusion"] # create_discussion["create_discussion"] # detection["detection"] +# update_cache_memory["update_cache_memory"] # upload_assets["upload_assets"] # activation --> agent # activation --> conclusion # agent --> conclusion # agent --> create_discussion # agent --> detection +# agent --> update_cache_memory # agent --> upload_assets # create_discussion --> conclusion # detection --> conclusion # detection --> create_discussion +# detection --> update_cache_memory # detection --> upload_assets +# update_cache_memory --> conclusion # upload_assets --> conclusion # ``` # @@ -1041,7 +1045,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -1233,8 +1239,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -6815,6 +6821,7 @@ jobs: - agent - create_discussion - detection + - update_cache_memory - upload_assets if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim @@ -8228,6 +8235,26 @@ jobs: path: /tmp/gh-aw/threat-detection/detection.log if-no-files-found: ignore + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + upload_assets: needs: - agent diff --git a/.github/workflows/daily-doc-updater.lock.yml b/.github/workflows/daily-doc-updater.lock.yml index 5f4f5f972c..2fcad2b33b 100644 --- a/.github/workflows/daily-doc-updater.lock.yml +++ b/.github/workflows/daily-doc-updater.lock.yml @@ -74,15 +74,19 @@ # conclusion["conclusion"] # create_pull_request["create_pull_request"] # detection["detection"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # activation --> create_pull_request # agent --> conclusion # agent --> create_pull_request # agent --> detection +# agent --> update_cache_memory # create_pull_request --> conclusion # detection --> conclusion # detection --> create_pull_request +# detection --> update_cache_memory +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -250,7 +254,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -410,8 +416,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -4661,6 +4667,7 @@ jobs: - agent - create_pull_request - detection + - update_cache_memory if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim permissions: @@ -6072,3 +6079,23 @@ jobs: path: /tmp/gh-aw/threat-detection/detection.log if-no-files-found: ignore + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + diff --git a/.github/workflows/daily-firewall-report.lock.yml b/.github/workflows/daily-firewall-report.lock.yml index 37718356fe..c5438c8e37 100644 --- a/.github/workflows/daily-firewall-report.lock.yml +++ b/.github/workflows/daily-firewall-report.lock.yml @@ -77,6 +77,7 @@ # create_discussion["create_discussion"] # detection["detection"] # push_repo_memory["push_repo_memory"] +# update_cache_memory["update_cache_memory"] # upload_assets["upload_assets"] # activation --> agent # activation --> conclusion @@ -84,13 +85,16 @@ # agent --> create_discussion # agent --> detection # agent --> push_repo_memory +# agent --> update_cache_memory # agent --> upload_assets # create_discussion --> conclusion # detection --> conclusion # detection --> create_discussion # detection --> push_repo_memory +# detection --> update_cache_memory # detection --> upload_assets # push_repo_memory --> conclusion +# update_cache_memory --> conclusion # upload_assets --> conclusion # ``` # @@ -701,7 +705,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -904,8 +910,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: trending-data-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -6237,6 +6243,7 @@ jobs: - create_discussion - detection - push_repo_memory + - update_cache_memory - upload_assets if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim @@ -7843,6 +7850,26 @@ jobs: core.setFailed(`Unexpected error: ${error instanceof Error ? error.message : String(error)}`); }); + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: trending-data-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + upload_assets: needs: - agent diff --git a/.github/workflows/daily-issues-report.lock.yml b/.github/workflows/daily-issues-report.lock.yml index d4289310fb..26f57aa856 100644 --- a/.github/workflows/daily-issues-report.lock.yml +++ b/.github/workflows/daily-issues-report.lock.yml @@ -74,6 +74,7 @@ # conclusion["conclusion"] # create_discussion["create_discussion"] # detection["detection"] +# update_cache_memory["update_cache_memory"] # upload_assets["upload_assets"] # activation --> agent # activation --> conclusion @@ -81,13 +82,16 @@ # agent --> conclusion # agent --> create_discussion # agent --> detection +# agent --> update_cache_memory # agent --> upload_assets # close_discussion --> conclusion # create_discussion --> conclusion # detection --> close_discussion # detection --> conclusion # detection --> create_discussion +# detection --> update_cache_memory # detection --> upload_assets +# update_cache_memory --> conclusion # upload_assets --> conclusion # ``` # @@ -1081,7 +1085,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -1278,8 +1284,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -6815,6 +6821,7 @@ jobs: - close_discussion - create_discussion - detection + - update_cache_memory - upload_assets if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim @@ -8217,6 +8224,26 @@ jobs: path: /tmp/gh-aw/threat-detection/detection.log if-no-files-found: ignore + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + upload_assets: needs: - agent diff --git a/.github/workflows/daily-news.lock.yml b/.github/workflows/daily-news.lock.yml index 6f14d573a1..814c7c7243 100644 --- a/.github/workflows/daily-news.lock.yml +++ b/.github/workflows/daily-news.lock.yml @@ -262,17 +262,21 @@ # conclusion["conclusion"] # create_discussion["create_discussion"] # detection["detection"] +# update_cache_memory["update_cache_memory"] # upload_assets["upload_assets"] # activation --> agent # activation --> conclusion # agent --> conclusion # agent --> create_discussion # agent --> detection +# agent --> update_cache_memory # agent --> upload_assets # create_discussion --> conclusion # detection --> conclusion # detection --> create_discussion +# detection --> update_cache_memory # detection --> upload_assets +# update_cache_memory --> conclusion # upload_assets --> conclusion # ``` # @@ -1053,7 +1057,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -1251,8 +1257,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -6704,6 +6710,7 @@ jobs: - agent - create_discussion - detection + - update_cache_memory - upload_assets if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim @@ -8117,6 +8124,26 @@ jobs: path: /tmp/gh-aw/threat-detection/detection.log if-no-files-found: ignore + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + upload_assets: needs: - agent diff --git a/.github/workflows/daily-performance-summary.lock.yml b/.github/workflows/daily-performance-summary.lock.yml index 752974b355..4c3c466be0 100644 --- a/.github/workflows/daily-performance-summary.lock.yml +++ b/.github/workflows/daily-performance-summary.lock.yml @@ -70,6 +70,7 @@ # conclusion["conclusion"] # create_discussion["create_discussion"] # detection["detection"] +# update_cache_memory["update_cache_memory"] # upload_assets["upload_assets"] # activation --> agent # activation --> conclusion @@ -77,13 +78,16 @@ # agent --> conclusion # agent --> create_discussion # agent --> detection +# agent --> update_cache_memory # agent --> upload_assets # close_discussion --> conclusion # create_discussion --> conclusion # detection --> close_discussion # detection --> conclusion # detection --> create_discussion +# detection --> update_cache_memory # detection --> upload_assets +# update_cache_memory --> conclusion # upload_assets --> conclusion # ``` # @@ -849,7 +853,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -1039,8 +1045,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: trending-data-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -8029,6 +8035,7 @@ jobs: - close_discussion - create_discussion - detection + - update_cache_memory - upload_assets if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim @@ -9431,6 +9438,26 @@ jobs: path: /tmp/gh-aw/threat-detection/detection.log if-no-files-found: ignore + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: trending-data-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + upload_assets: needs: - agent diff --git a/.github/workflows/daily-repo-chronicle.lock.yml b/.github/workflows/daily-repo-chronicle.lock.yml index 87988e1d4c..16bc1d2b65 100644 --- a/.github/workflows/daily-repo-chronicle.lock.yml +++ b/.github/workflows/daily-repo-chronicle.lock.yml @@ -75,17 +75,21 @@ # conclusion["conclusion"] # create_discussion["create_discussion"] # detection["detection"] +# update_cache_memory["update_cache_memory"] # upload_assets["upload_assets"] # activation --> agent # activation --> conclusion # agent --> conclusion # agent --> create_discussion # agent --> detection +# agent --> update_cache_memory # agent --> upload_assets # create_discussion --> conclusion # detection --> conclusion # detection --> create_discussion +# detection --> update_cache_memory # detection --> upload_assets +# update_cache_memory --> conclusion # upload_assets --> conclusion # ``` # @@ -749,7 +753,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -937,8 +943,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -6248,6 +6254,7 @@ jobs: - agent - create_discussion - detection + - update_cache_memory - upload_assets if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim @@ -7661,6 +7668,26 @@ jobs: path: /tmp/gh-aw/threat-detection/detection.log if-no-files-found: ignore + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + upload_assets: needs: - agent diff --git a/.github/workflows/deep-report.lock.yml b/.github/workflows/deep-report.lock.yml index 6c9b35c948..0721905131 100644 --- a/.github/workflows/deep-report.lock.yml +++ b/.github/workflows/deep-report.lock.yml @@ -92,6 +92,7 @@ # create_discussion["create_discussion"] # detection["detection"] # push_repo_memory["push_repo_memory"] +# update_cache_memory["update_cache_memory"] # upload_assets["upload_assets"] # activation --> agent # activation --> conclusion @@ -99,13 +100,16 @@ # agent --> create_discussion # agent --> detection # agent --> push_repo_memory +# agent --> update_cache_memory # agent --> upload_assets # create_discussion --> conclusion # detection --> conclusion # detection --> create_discussion # detection --> push_repo_memory +# detection --> update_cache_memory # detection --> upload_assets # push_repo_memory --> conclusion +# update_cache_memory --> conclusion # upload_assets --> conclusion # ``` # @@ -594,7 +598,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -788,8 +794,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: weekly-issues-data-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -5465,6 +5471,7 @@ jobs: - create_discussion - detection - push_repo_memory + - update_cache_memory - upload_assets if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim @@ -7060,6 +7067,26 @@ jobs: core.setFailed(`Unexpected error: ${error instanceof Error ? error.message : String(error)}`); }); + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: weekly-issues-data-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + upload_assets: needs: - agent diff --git a/.github/workflows/developer-docs-consolidator.lock.yml b/.github/workflows/developer-docs-consolidator.lock.yml index a27f66d1df..b4805c4f25 100644 --- a/.github/workflows/developer-docs-consolidator.lock.yml +++ b/.github/workflows/developer-docs-consolidator.lock.yml @@ -88,6 +88,7 @@ # create_discussion["create_discussion"] # create_pull_request["create_pull_request"] # detection["detection"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # activation --> create_pull_request @@ -95,11 +96,14 @@ # agent --> create_discussion # agent --> create_pull_request # agent --> detection +# agent --> update_cache_memory # create_discussion --> conclusion # create_pull_request --> conclusion # detection --> conclusion # detection --> create_discussion # detection --> create_pull_request +# detection --> update_cache_memory +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -753,7 +757,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -933,8 +939,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: developer-docs-cache-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -5748,6 +5754,7 @@ jobs: - create_discussion - create_pull_request - detection + - update_cache_memory if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim permissions: @@ -7788,3 +7795,23 @@ jobs: path: /tmp/gh-aw/threat-detection/detection.log if-no-files-found: ignore + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: developer-docs-cache-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + diff --git a/.github/workflows/github-mcp-structural-analysis.lock.yml b/.github/workflows/github-mcp-structural-analysis.lock.yml index f602bfa187..b68c6b404f 100644 --- a/.github/workflows/github-mcp-structural-analysis.lock.yml +++ b/.github/workflows/github-mcp-structural-analysis.lock.yml @@ -69,17 +69,21 @@ # conclusion["conclusion"] # create_discussion["create_discussion"] # detection["detection"] +# update_cache_memory["update_cache_memory"] # upload_assets["upload_assets"] # activation --> agent # activation --> conclusion # agent --> conclusion # agent --> create_discussion # agent --> detection +# agent --> update_cache_memory # agent --> upload_assets # create_discussion --> conclusion # detection --> conclusion # detection --> create_discussion +# detection --> update_cache_memory # detection --> upload_assets +# update_cache_memory --> conclusion # upload_assets --> conclusion # ``` # @@ -743,7 +747,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -937,8 +943,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -5674,6 +5680,7 @@ jobs: - agent - create_discussion - detection + - update_cache_memory - upload_assets if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim @@ -7092,6 +7099,26 @@ jobs: path: /tmp/gh-aw/threat-detection/detection.log if-no-files-found: ignore + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + upload_assets: needs: - agent diff --git a/.github/workflows/github-mcp-tools-report.lock.yml b/.github/workflows/github-mcp-tools-report.lock.yml index 8c34143f67..ebb86782b4 100644 --- a/.github/workflows/github-mcp-tools-report.lock.yml +++ b/.github/workflows/github-mcp-tools-report.lock.yml @@ -70,6 +70,7 @@ # create_discussion["create_discussion"] # create_pull_request["create_pull_request"] # detection["detection"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # activation --> create_pull_request @@ -77,11 +78,14 @@ # agent --> create_discussion # agent --> create_pull_request # agent --> detection +# agent --> update_cache_memory # create_discussion --> conclusion # create_pull_request --> conclusion # detection --> conclusion # detection --> create_discussion # detection --> create_pull_request +# detection --> update_cache_memory +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -621,7 +625,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -789,8 +795,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -5455,6 +5461,7 @@ jobs: - create_discussion - create_pull_request - detection + - update_cache_memory if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim permissions: @@ -7546,3 +7553,23 @@ jobs: path: /tmp/gh-aw/threat-detection/detection.log if-no-files-found: ignore + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + diff --git a/.github/workflows/glossary-maintainer.lock.yml b/.github/workflows/glossary-maintainer.lock.yml index 5918ecfe1f..922a7bc46a 100644 --- a/.github/workflows/glossary-maintainer.lock.yml +++ b/.github/workflows/glossary-maintainer.lock.yml @@ -83,15 +83,19 @@ # conclusion["conclusion"] # create_pull_request["create_pull_request"] # detection["detection"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # activation --> create_pull_request # agent --> conclusion # agent --> create_pull_request # agent --> detection +# agent --> update_cache_memory # create_pull_request --> conclusion # detection --> conclusion # detection --> create_pull_request +# detection --> update_cache_memory +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -697,7 +701,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -877,8 +883,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -6142,6 +6148,7 @@ jobs: - agent - create_pull_request - detection + - update_cache_memory if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim permissions: @@ -7490,3 +7497,23 @@ jobs: path: /tmp/gh-aw/threat-detection/detection.log if-no-files-found: ignore + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + diff --git a/.github/workflows/go-fan.lock.yml b/.github/workflows/go-fan.lock.yml index 9b42c9c9a5..a3eacc7d00 100644 --- a/.github/workflows/go-fan.lock.yml +++ b/.github/workflows/go-fan.lock.yml @@ -86,14 +86,18 @@ # conclusion["conclusion"] # create_discussion["create_discussion"] # detection["detection"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # agent --> conclusion # agent --> create_discussion # agent --> detection +# agent --> update_cache_memory # create_discussion --> conclusion # detection --> conclusion # detection --> create_discussion +# detection --> update_cache_memory +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -442,7 +446,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -622,8 +628,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -5049,6 +5055,7 @@ jobs: - agent - create_discussion - detection + - update_cache_memory if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim permissions: @@ -6470,3 +6477,23 @@ jobs: path: /tmp/gh-aw/threat-detection/detection.log if-no-files-found: ignore + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + diff --git a/.github/workflows/go-logger.lock.yml b/.github/workflows/go-logger.lock.yml index 7d6efc27d2..8af3248eeb 100644 --- a/.github/workflows/go-logger.lock.yml +++ b/.github/workflows/go-logger.lock.yml @@ -85,15 +85,19 @@ # conclusion["conclusion"] # create_pull_request["create_pull_request"] # detection["detection"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # activation --> create_pull_request # agent --> conclusion # agent --> create_pull_request # agent --> detection +# agent --> update_cache_memory # create_pull_request --> conclusion # detection --> conclusion # detection --> create_pull_request +# detection --> update_cache_memory +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -360,7 +364,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -538,8 +544,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -4890,6 +4896,7 @@ jobs: - agent - create_pull_request - detection + - update_cache_memory if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim permissions: @@ -6246,3 +6253,23 @@ jobs: path: /tmp/gh-aw/threat-detection/detection.log if-no-files-found: ignore + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + diff --git a/.github/workflows/grumpy-reviewer.lock.yml b/.github/workflows/grumpy-reviewer.lock.yml index fb19a16c67..0abfa81bbc 100644 --- a/.github/workflows/grumpy-reviewer.lock.yml +++ b/.github/workflows/grumpy-reviewer.lock.yml @@ -59,6 +59,7 @@ # create_pr_review_comment["create_pr_review_comment"] # detection["detection"] # pre_activation["pre_activation"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # add_comment --> conclusion @@ -66,11 +67,14 @@ # agent --> conclusion # agent --> create_pr_review_comment # agent --> detection +# agent --> update_cache_memory # create_pr_review_comment --> conclusion # detection --> add_comment # detection --> conclusion # detection --> create_pr_review_comment +# detection --> update_cache_memory # pre_activation --> activation +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -203,7 +207,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -1722,8 +1728,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -6527,6 +6533,7 @@ jobs: - agent - create_pr_review_comment - detection + - update_cache_memory if: ((always()) && (needs.agent.result != 'skipped')) && (!(needs.add_comment.outputs.comment_id)) runs-on: ubuntu-slim permissions: @@ -7767,3 +7774,23 @@ jobs: } await main(); + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + diff --git a/.github/workflows/instructions-janitor.lock.yml b/.github/workflows/instructions-janitor.lock.yml index 456ca28c76..747f686536 100644 --- a/.github/workflows/instructions-janitor.lock.yml +++ b/.github/workflows/instructions-janitor.lock.yml @@ -72,15 +72,19 @@ # conclusion["conclusion"] # create_pull_request["create_pull_request"] # detection["detection"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # activation --> create_pull_request # agent --> conclusion # agent --> create_pull_request # agent --> detection +# agent --> update_cache_memory # create_pull_request --> conclusion # detection --> conclusion # detection --> create_pull_request +# detection --> update_cache_memory +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -247,7 +251,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -407,8 +413,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -4655,6 +4661,7 @@ jobs: - agent - create_pull_request - detection + - update_cache_memory if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim permissions: @@ -6011,3 +6018,23 @@ jobs: path: /tmp/gh-aw/threat-detection/detection.log if-no-files-found: ignore + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + diff --git a/.github/workflows/lockfile-stats.lock.yml b/.github/workflows/lockfile-stats.lock.yml index c927f750c3..7feae228f4 100644 --- a/.github/workflows/lockfile-stats.lock.yml +++ b/.github/workflows/lockfile-stats.lock.yml @@ -58,14 +58,18 @@ # conclusion["conclusion"] # create_discussion["create_discussion"] # detection["detection"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # agent --> conclusion # agent --> create_discussion # agent --> detection +# agent --> update_cache_memory # create_discussion --> conclusion # detection --> conclusion # detection --> create_discussion +# detection --> update_cache_memory +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -482,7 +486,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -642,8 +648,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -5095,6 +5101,7 @@ jobs: - agent - create_discussion - detection + - update_cache_memory if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim permissions: @@ -6511,3 +6518,23 @@ jobs: path: /tmp/gh-aw/threat-detection/detection.log if-no-files-found: ignore + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + diff --git a/.github/workflows/mcp-inspector.lock.yml b/.github/workflows/mcp-inspector.lock.yml index 6d54ffb928..6931e43ef9 100644 --- a/.github/workflows/mcp-inspector.lock.yml +++ b/.github/workflows/mcp-inspector.lock.yml @@ -103,6 +103,7 @@ # detection["detection"] # notion_add_comment["notion_add_comment"] # post_to_slack_channel["post_to_slack_channel"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # agent --> conclusion @@ -110,13 +111,16 @@ # agent --> detection # agent --> notion_add_comment # agent --> post_to_slack_channel +# agent --> update_cache_memory # create_discussion --> conclusion # detection --> conclusion # detection --> create_discussion # detection --> notion_add_comment # detection --> post_to_slack_channel +# detection --> update_cache_memory # notion_add_comment --> conclusion # post_to_slack_channel --> conclusion +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -381,7 +385,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -577,8 +583,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -5777,6 +5783,7 @@ jobs: - detection - notion_add_comment - post_to_slack_channel + - update_cache_memory if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim permissions: @@ -7459,3 +7466,23 @@ jobs: } } + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + diff --git a/.github/workflows/org-health-report.lock.yml b/.github/workflows/org-health-report.lock.yml index 9761cf4a37..9e33c847e4 100644 --- a/.github/workflows/org-health-report.lock.yml +++ b/.github/workflows/org-health-report.lock.yml @@ -76,17 +76,21 @@ # conclusion["conclusion"] # create_discussion["create_discussion"] # detection["detection"] +# update_cache_memory["update_cache_memory"] # upload_assets["upload_assets"] # activation --> agent # activation --> conclusion # agent --> conclusion # agent --> create_discussion # agent --> detection +# agent --> update_cache_memory # agent --> upload_assets # create_discussion --> conclusion # detection --> conclusion # detection --> create_discussion +# detection --> update_cache_memory # detection --> upload_assets +# update_cache_memory --> conclusion # upload_assets --> conclusion # ``` # @@ -954,7 +958,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -1146,8 +1152,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -6643,6 +6649,7 @@ jobs: - agent - create_discussion - detection + - update_cache_memory - upload_assets if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim @@ -8052,6 +8059,26 @@ jobs: path: /tmp/gh-aw/threat-detection/detection.log if-no-files-found: ignore + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + upload_assets: needs: - agent diff --git a/.github/workflows/pdf-summary.lock.yml b/.github/workflows/pdf-summary.lock.yml index 2ae75667b5..a414f15494 100644 --- a/.github/workflows/pdf-summary.lock.yml +++ b/.github/workflows/pdf-summary.lock.yml @@ -81,15 +81,19 @@ # conclusion["conclusion"] # detection["detection"] # pre_activation["pre_activation"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # add_comment --> conclusion # agent --> add_comment # agent --> conclusion # agent --> detection +# agent --> update_cache_memory # detection --> add_comment # detection --> conclusion +# detection --> update_cache_memory # pre_activation --> activation +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -239,7 +243,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -1782,8 +1788,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -6545,6 +6551,7 @@ jobs: - add_comment - agent - detection + - update_cache_memory if: ((always()) && (needs.agent.result != 'skipped')) && (!(needs.add_comment.outputs.comment_id)) runs-on: ubuntu-slim permissions: @@ -7421,3 +7428,23 @@ jobs: } await main(); + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + diff --git a/.github/workflows/poem-bot.lock.yml b/.github/workflows/poem-bot.lock.yml index 1055c5c1fe..d5c8195b76 100644 --- a/.github/workflows/poem-bot.lock.yml +++ b/.github/workflows/poem-bot.lock.yml @@ -180,6 +180,7 @@ # link_sub_issue["link_sub_issue"] # pre_activation["pre_activation"] # push_to_pull_request_branch["push_to_pull_request_branch"] +# update_cache_memory["update_cache_memory"] # update_issue["update_issue"] # upload_assets["upload_assets"] # activation --> agent @@ -200,6 +201,7 @@ # agent --> detection # agent --> link_sub_issue # agent --> push_to_pull_request_branch +# agent --> update_cache_memory # agent --> update_issue # agent --> upload_assets # close_pull_request --> conclusion @@ -224,11 +226,13 @@ # detection --> create_pull_request # detection --> link_sub_issue # detection --> push_to_pull_request_branch +# detection --> update_cache_memory # detection --> update_issue # detection --> upload_assets # link_sub_issue --> conclusion # pre_activation --> activation # push_to_pull_request_branch --> conclusion +# update_cache_memory --> conclusion # update_issue --> conclusion # upload_assets --> conclusion # ``` @@ -280,7 +284,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -2382,8 +2388,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: poem-memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -7708,6 +7714,7 @@ jobs: - detection - link_sub_issue - push_to_pull_request_branch + - update_cache_memory - update_issue - upload_assets if: ((always()) && (needs.agent.result != 'skipped')) && (!(needs.add_comment.outputs.comment_id)) @@ -12102,6 +12109,26 @@ jobs: } await main(); + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: poem-memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + update_issue: needs: - agent diff --git a/.github/workflows/pr-nitpick-reviewer.lock.yml b/.github/workflows/pr-nitpick-reviewer.lock.yml index 0851727477..334b6827f0 100644 --- a/.github/workflows/pr-nitpick-reviewer.lock.yml +++ b/.github/workflows/pr-nitpick-reviewer.lock.yml @@ -69,6 +69,7 @@ # create_pr_review_comment["create_pr_review_comment"] # detection["detection"] # pre_activation["pre_activation"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # add_comment --> conclusion @@ -77,6 +78,7 @@ # agent --> create_discussion # agent --> create_pr_review_comment # agent --> detection +# agent --> update_cache_memory # create_discussion --> add_comment # create_discussion --> conclusion # create_pr_review_comment --> conclusion @@ -84,7 +86,9 @@ # detection --> conclusion # detection --> create_discussion # detection --> create_pr_review_comment +# detection --> update_cache_memory # pre_activation --> activation +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -522,7 +526,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -1706,8 +1712,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -6871,6 +6877,7 @@ jobs: - create_discussion - create_pr_review_comment - detection + - update_cache_memory if: ((always()) && (needs.agent.result != 'skipped')) && (!(needs.add_comment.outputs.comment_id)) runs-on: ubuntu-slim permissions: @@ -8804,3 +8811,23 @@ jobs: } await main(); + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + diff --git a/.github/workflows/prompt-clustering-analysis.lock.yml b/.github/workflows/prompt-clustering-analysis.lock.yml index 7acca2b0c5..45a186c862 100644 --- a/.github/workflows/prompt-clustering-analysis.lock.yml +++ b/.github/workflows/prompt-clustering-analysis.lock.yml @@ -147,14 +147,18 @@ # conclusion["conclusion"] # create_discussion["create_discussion"] # detection["detection"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # agent --> conclusion # agent --> create_discussion # agent --> detection +# agent --> update_cache_memory # create_discussion --> conclusion # detection --> conclusion # detection --> create_discussion +# detection --> update_cache_memory +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -1098,6 +1102,10 @@ # Pinned GitHub Actions: # - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd # - actions/download-artifact@v6 (018cc2cf5baa6db3ef3c5f8a56943fffe632ef53) @@ -1325,8 +1333,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: trending-data-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -6304,6 +6312,7 @@ jobs: - agent - create_discussion - detection + - update_cache_memory if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim permissions: @@ -7721,3 +7730,23 @@ jobs: path: /tmp/gh-aw/threat-detection/detection.log if-no-files-found: ignore + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: trending-data-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + diff --git a/.github/workflows/python-data-charts.lock.yml b/.github/workflows/python-data-charts.lock.yml index 1a3d9bcb5c..98e19d72c4 100644 --- a/.github/workflows/python-data-charts.lock.yml +++ b/.github/workflows/python-data-charts.lock.yml @@ -58,17 +58,21 @@ # conclusion["conclusion"] # create_discussion["create_discussion"] # detection["detection"] +# update_cache_memory["update_cache_memory"] # upload_assets["upload_assets"] # activation --> agent # activation --> conclusion # agent --> conclusion # agent --> create_discussion # agent --> detection +# agent --> update_cache_memory # agent --> upload_assets # create_discussion --> conclusion # detection --> conclusion # detection --> create_discussion +# detection --> update_cache_memory # detection --> upload_assets +# update_cache_memory --> conclusion # upload_assets --> conclusion # ``` # @@ -1046,7 +1050,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -1232,8 +1238,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -6877,6 +6883,7 @@ jobs: - agent - create_discussion - detection + - update_cache_memory - upload_assets if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim @@ -8285,6 +8292,26 @@ jobs: path: /tmp/gh-aw/threat-detection/detection.log if-no-files-found: ignore + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + upload_assets: needs: - agent diff --git a/.github/workflows/q.lock.yml b/.github/workflows/q.lock.yml index 5519a7ebf5..a8f395a3de 100644 --- a/.github/workflows/q.lock.yml +++ b/.github/workflows/q.lock.yml @@ -80,6 +80,7 @@ # create_pull_request["create_pull_request"] # detection["detection"] # pre_activation["pre_activation"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # activation --> create_pull_request @@ -88,12 +89,15 @@ # agent --> conclusion # agent --> create_pull_request # agent --> detection +# agent --> update_cache_memory # create_pull_request --> add_comment # create_pull_request --> conclusion # detection --> add_comment # detection --> conclusion # detection --> create_pull_request +# detection --> update_cache_memory # pre_activation --> activation +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -462,7 +466,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -2038,8 +2044,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -7133,6 +7139,7 @@ jobs: - agent - create_pull_request - detection + - update_cache_memory if: ((always()) && (needs.agent.result != 'skipped')) && (!(needs.add_comment.outputs.comment_id)) runs-on: ubuntu-slim permissions: @@ -8693,3 +8700,23 @@ jobs: } await main(); + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + diff --git a/.github/workflows/release.lock.yml b/.github/workflows/release.lock.yml index 0ba6e35b97..bfddcfddf9 100644 --- a/.github/workflows/release.lock.yml +++ b/.github/workflows/release.lock.yml @@ -5973,19 +5973,19 @@ jobs: - name: Download Go modules run: go mod download - name: Generate SBOM (SPDX format) - uses: anchore/sbom-action@fbfd9c6c189226748411491745178e0c2017392d # v0.20.10 + uses: anchore/sbom-action@fbfd9c6c189226748411491745178e0c2017392d # v0 with: artifact-name: sbom.spdx.json format: spdx-json output-file: sbom.spdx.json - name: Generate SBOM (CycloneDX format) - uses: anchore/sbom-action@fbfd9c6c189226748411491745178e0c2017392d # v0.20.10 + uses: anchore/sbom-action@fbfd9c6c189226748411491745178e0c2017392d # v0 with: artifact-name: sbom.cdx.json format: cyclonedx-json output-file: sbom.cdx.json - name: Upload SBOM artifacts - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: name: sbom-artifacts path: | diff --git a/.github/workflows/repository-quality-improver.lock.yml b/.github/workflows/repository-quality-improver.lock.yml index e46ca11ad7..c0c8d16fbe 100644 --- a/.github/workflows/repository-quality-improver.lock.yml +++ b/.github/workflows/repository-quality-improver.lock.yml @@ -67,14 +67,18 @@ # conclusion["conclusion"] # create_discussion["create_discussion"] # detection["detection"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # agent --> conclusion # agent --> create_discussion # agent --> detection +# agent --> update_cache_memory # create_discussion --> conclusion # detection --> conclusion # detection --> create_discussion +# detection --> update_cache_memory +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -684,7 +688,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -861,8 +867,8 @@ jobs: - name: Create cache-memory directory (focus-areas) run: | mkdir -p /tmp/gh-aw/cache-memory-focus-areas - - name: Cache memory file share data (focus-areas) - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data (focus-areas) + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: quality-focus-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory-focus-areas @@ -6088,6 +6094,7 @@ jobs: - agent - create_discussion - detection + - update_cache_memory if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim permissions: @@ -7496,3 +7503,23 @@ jobs: path: /tmp/gh-aw/threat-detection/detection.log if-no-files-found: ignore + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (focus-areas) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory-focus-areas + path: /tmp/gh-aw/cache-memory-focus-areas + - name: Save cache-memory to cache (focus-areas) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: quality-focus-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory-focus-areas + diff --git a/.github/workflows/safe-output-health.lock.yml b/.github/workflows/safe-output-health.lock.yml index 2dd3484b55..302f83a42f 100644 --- a/.github/workflows/safe-output-health.lock.yml +++ b/.github/workflows/safe-output-health.lock.yml @@ -68,14 +68,18 @@ # conclusion["conclusion"] # create_discussion["create_discussion"] # detection["detection"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # agent --> conclusion # agent --> create_discussion # agent --> detection +# agent --> update_cache_memory # create_discussion --> conclusion # detection --> conclusion # detection --> create_discussion +# detection --> update_cache_memory +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -583,7 +587,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -767,8 +773,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -5327,6 +5333,7 @@ jobs: - agent - create_discussion - detection + - update_cache_memory if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim permissions: @@ -6743,3 +6750,23 @@ jobs: path: /tmp/gh-aw/threat-detection/detection.log if-no-files-found: ignore + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + diff --git a/.github/workflows/schema-consistency-checker.lock.yml b/.github/workflows/schema-consistency-checker.lock.yml index 3c0214288e..98c60367ce 100644 --- a/.github/workflows/schema-consistency-checker.lock.yml +++ b/.github/workflows/schema-consistency-checker.lock.yml @@ -64,14 +64,18 @@ # conclusion["conclusion"] # create_discussion["create_discussion"] # detection["detection"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # agent --> conclusion # agent --> create_discussion # agent --> detection +# agent --> update_cache_memory # create_discussion --> conclusion # detection --> conclusion # detection --> create_discussion +# detection --> update_cache_memory +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -492,7 +496,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -654,8 +660,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: schema-consistency-cache-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -5105,6 +5111,7 @@ jobs: - agent - create_discussion - detection + - update_cache_memory if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim permissions: @@ -6522,3 +6529,23 @@ jobs: path: /tmp/gh-aw/threat-detection/detection.log if-no-files-found: ignore + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: schema-consistency-cache-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + diff --git a/.github/workflows/scout.lock.yml b/.github/workflows/scout.lock.yml index 2dfa9ebef0..5f34e4ac2e 100644 --- a/.github/workflows/scout.lock.yml +++ b/.github/workflows/scout.lock.yml @@ -82,15 +82,19 @@ # conclusion["conclusion"] # detection["detection"] # pre_activation["pre_activation"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # add_comment --> conclusion # agent --> add_comment # agent --> conclusion # agent --> detection +# agent --> update_cache_memory # detection --> add_comment # detection --> conclusion +# detection --> update_cache_memory # pre_activation --> activation +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -419,7 +423,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -1983,8 +1989,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -6438,6 +6444,7 @@ jobs: - add_comment - agent - detection + - update_cache_memory if: ((always()) && (needs.agent.result != 'skipped')) && (!(needs.add_comment.outputs.comment_id)) runs-on: ubuntu-slim permissions: @@ -7332,3 +7339,23 @@ jobs: } await main(); + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + diff --git a/.github/workflows/security-fix-pr.lock.yml b/.github/workflows/security-fix-pr.lock.yml index 10e73bf25d..4276648031 100644 --- a/.github/workflows/security-fix-pr.lock.yml +++ b/.github/workflows/security-fix-pr.lock.yml @@ -62,16 +62,20 @@ # create_pull_request["create_pull_request"] # detection["detection"] # pre_activation["pre_activation"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # activation --> create_pull_request # agent --> conclusion # agent --> create_pull_request # agent --> detection +# agent --> update_cache_memory # create_pull_request --> conclusion # detection --> conclusion # detection --> create_pull_request +# detection --> update_cache_memory # pre_activation --> activation +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -216,7 +220,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -384,8 +390,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -4593,6 +4599,7 @@ jobs: - agent - create_pull_request - detection + - update_cache_memory if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim permissions: @@ -6146,3 +6153,23 @@ jobs: } await main(); + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + diff --git a/.github/workflows/smoke-claude.lock.yml b/.github/workflows/smoke-claude.lock.yml index 0bc6ea90a8..5a4aadaf14 100644 --- a/.github/workflows/smoke-claude.lock.yml +++ b/.github/workflows/smoke-claude.lock.yml @@ -42,7 +42,6 @@ # max-turns: 15 # strict: false # imports: -# - shared/gh.md # - shared/mcp-pagination.md # network: # allowed: @@ -50,6 +49,7 @@ # - github # - playwright # tools: +# cache-memory: true # github: # toolsets: [repos, pull_requests] # playwright: @@ -74,7 +74,6 @@ # # Resolved workflow manifest: # Imports: -# - shared/gh.md # - shared/mcp-pagination.md # # Job Dependency Graph: @@ -88,6 +87,7 @@ # create_issue["create_issue"] # detection["detection"] # pre_activation["pre_activation"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # add_comment --> conclusion @@ -97,13 +97,16 @@ # agent --> conclusion # agent --> create_issue # agent --> detection +# agent --> update_cache_memory # create_issue --> add_comment # create_issue --> conclusion # detection --> add_comment # detection --> add_labels # detection --> conclusion # detection --> create_issue +# detection --> update_cache_memory # pre_activation --> activation +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -229,7 +232,8 @@ # 2. **File Writing Testing**: Create a test file `/tmp/gh-aw/agent/smoke-test-claude-${{ github.run_id }}.txt` with content "Smoke test passed for Claude at $(date)" (create the directory if it doesn't exist) # 3. **Bash Tool Testing**: Execute bash commands to verify file creation was successful (use `cat` to read the file back) # 4. **Playwright MCP Testing**: Use playwright to navigate to https://github.com and verify the page title contains "GitHub" -# 5. **Safe Input gh Tool Testing**: Use the `gh` safe-input tool to run "gh issues list --limit 3" to verify the tool can access GitHub issues +# 5. **Cache Memory Testing**: Write a test file to `/tmp/gh-aw/cache-memory/smoke-test-${{ github.run_id }}.txt` with content "Cache memory test for run ${{ github.run_id }}" and verify it was created successfully +# 6. **Safe Input gh Tool Testing**: Use the `gh` safe-input tool to run "gh issues list --limit 3" to verify the tool can access GitHub issues # # ## Output # @@ -242,6 +246,10 @@ # ``` # # Pinned GitHub Actions: +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd # - actions/download-artifact@v6 (018cc2cf5baa6db3ef3c5f8a56943fffe632ef53) @@ -1974,6 +1982,26 @@ jobs: mkdir -p /tmp/gh-aw/agent mkdir -p /tmp/gh-aw/sandbox/agent/logs echo "Created /tmp/gh-aw/agent directory for agentic workflow temporary files" + # Cache memory file share configuration from frontmatter processed below + - name: Create cache-memory directory + run: | + mkdir -p /tmp/gh-aw/cache-memory + echo "Cache memory directory created at /tmp/gh-aw/cache-memory" + echo "This folder provides persistent file storage across workflow runs" + echo "LLMs and agentic tools can freely read and write files in this directory" + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + restore-keys: | + memory-${{ github.workflow }}- + memory- + - name: Upload cache-memory data as artifact + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory - name: Configure Git credentials env: REPO_NAME: ${{ github.repository }} @@ -3357,1506 +3385,142 @@ jobs: lines: patchResult.patchLines, }, }), - }, - ], - }; - }; - return { - defaultHandler, - uploadAssetHandler, - createPullRequestHandler, - pushToPullRequestBranchHandler, - }; - } - function loadTools(server) { - const toolsPath = process.env.GH_AW_SAFE_OUTPUTS_TOOLS_PATH || "/tmp/gh-aw/safeoutputs/tools.json"; - let ALL_TOOLS = []; - server.debug(`Reading tools from file: ${toolsPath}`); - try { - if (fs.existsSync(toolsPath)) { - server.debug(`Tools file exists at: ${toolsPath}`); - const toolsFileContent = fs.readFileSync(toolsPath, "utf8"); - server.debug(`Tools file content length: ${toolsFileContent.length} characters`); - server.debug(`Tools file read successfully, attempting to parse JSON`); - ALL_TOOLS = JSON.parse(toolsFileContent); - server.debug(`Successfully parsed ${ALL_TOOLS.length} tools from file`); - } else { - server.debug(`Tools file does not exist at: ${toolsPath}`); - server.debug(`Using empty tools array`); - ALL_TOOLS = []; - } - } catch (error) { - server.debug(`Error reading tools file: ${error instanceof Error ? error.message : String(error)}`); - server.debug(`Falling back to empty tools array`); - ALL_TOOLS = []; - } - return ALL_TOOLS; - } - function attachHandlers(tools, handlers) { - tools.forEach(tool => { - if (tool.name === "create_pull_request") { - tool.handler = handlers.createPullRequestHandler; - } else if (tool.name === "push_to_pull_request_branch") { - tool.handler = handlers.pushToPullRequestBranchHandler; - } else if (tool.name === "upload_asset") { - tool.handler = handlers.uploadAssetHandler; - } - }); - return tools; - } - function registerPredefinedTools(server, tools, config, registerTool, normalizeTool) { - tools.forEach(tool => { - if (Object.keys(config).find(configKey => normalizeTool(configKey) === tool.name)) { - registerTool(server, tool); - } - }); - } - function registerDynamicTools(server, tools, config, outputFile, registerTool, normalizeTool) { - Object.keys(config).forEach(configKey => { - const normalizedKey = normalizeTool(configKey); - if (server.tools[normalizedKey]) { - return; - } - if (!tools.find(t => t.name === normalizedKey)) { - const jobConfig = config[configKey]; - const dynamicTool = { - name: normalizedKey, - description: jobConfig && jobConfig.description ? jobConfig.description : `Custom safe-job: ${configKey}`, - inputSchema: { - type: "object", - properties: {}, - additionalProperties: true, - }, - handler: args => { - const entry = { - type: normalizedKey, - ...args, - }; - const entryJSON = JSON.stringify(entry); - fs.appendFileSync(outputFile, entryJSON + "\n"); - const outputText = - jobConfig && jobConfig.output - ? jobConfig.output - : `Safe-job '${configKey}' executed successfully with arguments: ${JSON.stringify(args)}`; - return { - content: [ - { - type: "text", - text: JSON.stringify({ result: outputText }), - }, - ], - }; - }, - }; - if (jobConfig && jobConfig.inputs) { - dynamicTool.inputSchema.properties = {}; - dynamicTool.inputSchema.required = []; - Object.keys(jobConfig.inputs).forEach(inputName => { - const inputDef = jobConfig.inputs[inputName]; - const propSchema = { - type: inputDef.type || "string", - description: inputDef.description || `Input parameter: ${inputName}`, - }; - if (inputDef.options && Array.isArray(inputDef.options)) { - propSchema.enum = inputDef.options; - } - dynamicTool.inputSchema.properties[inputName] = propSchema; - if (inputDef.required) { - dynamicTool.inputSchema.required.push(inputName); - } - }); - } - registerTool(server, dynamicTool); - } - }); - } - const SERVER_INFO = { name: "safeoutputs", version: "1.0.0" }; - const MCP_LOG_DIR = process.env.GH_AW_MCP_LOG_DIR; - const server = createServer(SERVER_INFO, { logDir: MCP_LOG_DIR }); - const { config: safeOutputsConfig, outputFile } = loadConfig(server); - const appendSafeOutput = createAppendFunction(outputFile); - const handlers = createHandlers(server, appendSafeOutput); - const { defaultHandler } = handlers; - let ALL_TOOLS = loadTools(server); - ALL_TOOLS = attachHandlers(ALL_TOOLS, handlers); - server.debug(` output file: ${outputFile}`); - server.debug(` config: ${JSON.stringify(safeOutputsConfig)}`); - registerPredefinedTools(server, ALL_TOOLS, safeOutputsConfig, registerTool, normalizeTool); - registerDynamicTools(server, ALL_TOOLS, safeOutputsConfig, outputFile, registerTool, normalizeTool); - server.debug(` tools: ${Object.keys(server.tools).join(", ")}`); - if (!Object.keys(server.tools).length) throw new Error("No tools enabled in configuration"); - start(server, { defaultHandler }); - EOF - chmod +x /tmp/gh-aw/safeoutputs/mcp-server.cjs - - - name: Setup Safe Inputs JavaScript and Config - run: | - mkdir -p /tmp/gh-aw/safe-inputs/logs - cat > /tmp/gh-aw/safe-inputs/read_buffer.cjs << 'EOF_READ_BUFFER' - class ReadBuffer { - constructor() { - this._buffer = null; - } - append(chunk) { - this._buffer = this._buffer ? Buffer.concat([this._buffer, chunk]) : chunk; - } - readMessage() { - if (!this._buffer) { - return null; - } - const index = this._buffer.indexOf("\n"); - if (index === -1) { - return null; - } - const line = this._buffer.toString("utf8", 0, index).replace(/\r$/, ""); - this._buffer = this._buffer.subarray(index + 1); - if (line.trim() === "") { - return this.readMessage(); - } - try { - return JSON.parse(line); - } catch (error) { - throw new Error(`Parse error: ${error instanceof Error ? error.message : String(error)}`); - } - } - } - module.exports = { - ReadBuffer, - }; - EOF_READ_BUFFER - cat > /tmp/gh-aw/safe-inputs/mcp_server_core.cjs << 'EOF_MCP_CORE' - const fs = require("fs"); - const path = require("path"); - const { ReadBuffer } = require("./read_buffer.cjs"); - const { validateRequiredFields } = require("./safe_inputs_validation.cjs"); - const encoder = new TextEncoder(); - function initLogFile(server) { - if (server.logFileInitialized || !server.logDir || !server.logFilePath) return; - try { - if (!fs.existsSync(server.logDir)) { - fs.mkdirSync(server.logDir, { recursive: true }); - } - const timestamp = new Date().toISOString(); - fs.writeFileSync( - server.logFilePath, - `# ${server.serverInfo.name} MCP Server Log\n# Started: ${timestamp}\n# Version: ${server.serverInfo.version}\n\n` - ); - server.logFileInitialized = true; - } catch { - } - } - function createDebugFunction(server) { - return msg => { - const timestamp = new Date().toISOString(); - const formattedMsg = `[${timestamp}] [${server.serverInfo.name}] ${msg}\n`; - process.stderr.write(formattedMsg); - if (server.logDir && server.logFilePath) { - if (!server.logFileInitialized) { - initLogFile(server); - } - if (server.logFileInitialized) { - try { - fs.appendFileSync(server.logFilePath, formattedMsg); - } catch { - } - } - } - }; - } - function createDebugErrorFunction(server) { - return (prefix, error) => { - const errorMessage = error instanceof Error ? error.message : String(error); - server.debug(`${prefix}${errorMessage}`); - if (error instanceof Error && error.stack) { - server.debug(`${prefix}Stack trace: ${error.stack}`); - } - }; - } - function createWriteMessageFunction(server) { - return obj => { - const json = JSON.stringify(obj); - server.debug(`send: ${json}`); - const message = json + "\n"; - const bytes = encoder.encode(message); - fs.writeSync(1, bytes); - }; - } - function createReplyResultFunction(server) { - return (id, result) => { - if (id === undefined || id === null) return; - const res = { jsonrpc: "2.0", id, result }; - server.writeMessage(res); - }; - } - function createReplyErrorFunction(server) { - return (id, code, message) => { - if (id === undefined || id === null) { - server.debug(`Error for notification: ${message}`); - return; - } - const error = { code, message }; - const res = { - jsonrpc: "2.0", - id, - error, - }; - server.writeMessage(res); - }; - } - function createServer(serverInfo, options = {}) { - const logDir = options.logDir || undefined; - const logFilePath = logDir ? path.join(logDir, "server.log") : undefined; - const server = { - serverInfo, - tools: {}, - debug: () => {}, - debugError: () => {}, - writeMessage: () => {}, - replyResult: () => {}, - replyError: () => {}, - readBuffer: new ReadBuffer(), - logDir, - logFilePath, - logFileInitialized: false, - }; - server.debug = createDebugFunction(server); - server.debugError = createDebugErrorFunction(server); - server.writeMessage = createWriteMessageFunction(server); - server.replyResult = createReplyResultFunction(server); - server.replyError = createReplyErrorFunction(server); - return server; - } - function createWrappedHandler(server, toolName, handlerFn) { - return async args => { - server.debug(` [${toolName}] Invoking handler with args: ${JSON.stringify(args)}`); - try { - const result = await Promise.resolve(handlerFn(args)); - server.debug(` [${toolName}] Handler returned result type: ${typeof result}`); - if (result && typeof result === "object" && Array.isArray(result.content)) { - server.debug(` [${toolName}] Result is already in MCP format`); - return result; - } - let serializedResult; - try { - serializedResult = JSON.stringify(result); - } catch (serializationError) { - server.debugError(` [${toolName}] Serialization error: `, serializationError); - serializedResult = String(result); - } - server.debug(` [${toolName}] Serialized result: ${serializedResult.substring(0, 200)}${serializedResult.length > 200 ? "..." : ""}`); - return { - content: [ - { - type: "text", - text: serializedResult, - }, - ], - }; - } catch (error) { - server.debugError(` [${toolName}] Handler threw error: `, error); - throw error; - } - }; - } - function loadToolHandlers(server, tools, basePath) { - server.debug(`Loading tool handlers...`); - server.debug(` Total tools to process: ${tools.length}`); - server.debug(` Base path: ${basePath || "(not specified)"}`); - let loadedCount = 0; - let skippedCount = 0; - let errorCount = 0; - for (const tool of tools) { - const toolName = tool.name || "(unnamed)"; - if (!tool.handler) { - server.debug(` [${toolName}] No handler path specified, skipping handler load`); - skippedCount++; - continue; - } - const handlerPath = tool.handler; - server.debug(` [${toolName}] Handler path specified: ${handlerPath}`); - let resolvedPath = handlerPath; - if (basePath && !path.isAbsolute(handlerPath)) { - resolvedPath = path.resolve(basePath, handlerPath); - server.debug(` [${toolName}] Resolved relative path to: ${resolvedPath}`); - const normalizedBase = path.resolve(basePath); - const normalizedResolved = path.resolve(resolvedPath); - if (!normalizedResolved.startsWith(normalizedBase + path.sep) && normalizedResolved !== normalizedBase) { - server.debug(` [${toolName}] ERROR: Handler path escapes base directory: ${resolvedPath} is not within ${basePath}`); - errorCount++; - continue; - } - } else if (path.isAbsolute(handlerPath)) { - server.debug(` [${toolName}] Using absolute path (bypasses basePath validation): ${handlerPath}`); - } - tool.handlerPath = handlerPath; - try { - server.debug(` [${toolName}] Loading handler from: ${resolvedPath}`); - if (!fs.existsSync(resolvedPath)) { - server.debug(` [${toolName}] ERROR: Handler file does not exist: ${resolvedPath}`); - errorCount++; - continue; - } - const ext = path.extname(resolvedPath).toLowerCase(); - server.debug(` [${toolName}] Handler file extension: ${ext}`); - if (ext === ".sh") { - server.debug(` [${toolName}] Detected shell script handler`); - try { - fs.accessSync(resolvedPath, fs.constants.X_OK); - server.debug(` [${toolName}] Shell script is executable`); - } catch { - try { - fs.chmodSync(resolvedPath, 0o755); - server.debug(` [${toolName}] Made shell script executable`); - } catch (chmodError) { - server.debugError(` [${toolName}] Warning: Could not make shell script executable: `, chmodError); - } - } - const { createShellHandler } = require("./mcp_handler_shell.cjs"); - const timeout = tool.timeout || 60; - tool.handler = createShellHandler(server, toolName, resolvedPath, timeout); - loadedCount++; - server.debug(` [${toolName}] Shell handler created successfully with timeout: ${timeout}s`); - } else if (ext === ".py") { - server.debug(` [${toolName}] Detected Python script handler`); - try { - fs.accessSync(resolvedPath, fs.constants.X_OK); - server.debug(` [${toolName}] Python script is executable`); - } catch { - try { - fs.chmodSync(resolvedPath, 0o755); - server.debug(` [${toolName}] Made Python script executable`); - } catch (chmodError) { - server.debugError(` [${toolName}] Warning: Could not make Python script executable: `, chmodError); - } - } - const { createPythonHandler } = require("./mcp_handler_python.cjs"); - const timeout = tool.timeout || 60; - tool.handler = createPythonHandler(server, toolName, resolvedPath, timeout); - loadedCount++; - server.debug(` [${toolName}] Python handler created successfully with timeout: ${timeout}s`); - } else { - server.debug(` [${toolName}] Loading JavaScript handler module`); - const handlerModule = require(resolvedPath); - server.debug(` [${toolName}] Handler module loaded successfully`); - server.debug(` [${toolName}] Module type: ${typeof handlerModule}`); - let handlerFn = handlerModule; - if (handlerModule && typeof handlerModule === "object" && typeof handlerModule.default === "function") { - handlerFn = handlerModule.default; - server.debug(` [${toolName}] Using module.default export`); - } - if (typeof handlerFn !== "function") { - server.debug(` [${toolName}] ERROR: Handler is not a function, got: ${typeof handlerFn}`); - server.debug(` [${toolName}] Module keys: ${Object.keys(handlerModule || {}).join(", ") || "(none)"}`); - errorCount++; - continue; - } - server.debug(` [${toolName}] Handler function validated successfully`); - server.debug(` [${toolName}] Handler function name: ${handlerFn.name || "(anonymous)"}`); - tool.handler = createWrappedHandler(server, toolName, handlerFn); - loadedCount++; - server.debug(` [${toolName}] JavaScript handler loaded and wrapped successfully`); - } - } catch (error) { - server.debugError(` [${toolName}] ERROR loading handler: `, error); - errorCount++; - } - } - server.debug(`Handler loading complete:`); - server.debug(` Loaded: ${loadedCount}`); - server.debug(` Skipped (no handler path): ${skippedCount}`); - server.debug(` Errors: ${errorCount}`); - return tools; - } - function registerTool(server, tool) { - const normalizedName = normalizeTool(tool.name); - server.tools[normalizedName] = { - ...tool, - name: normalizedName, - }; - server.debug(`Registered tool: ${normalizedName}`); - } - function normalizeTool(name) { - return name.replace(/-/g, "_").toLowerCase(); - } - async function handleMessage(server, req, defaultHandler) { - if (!req || typeof req !== "object") { - server.debug(`Invalid message: not an object`); - return; - } - if (req.jsonrpc !== "2.0") { - server.debug(`Invalid message: missing or invalid jsonrpc field`); - return; - } - const { id, method, params } = req; - if (!method || typeof method !== "string") { - server.replyError(id, -32600, "Invalid Request: method must be a string"); - return; - } - try { - if (method === "initialize") { - const clientInfo = params?.clientInfo ?? {}; - server.debug(`client info: ${JSON.stringify(clientInfo)}`); - const protocolVersion = params?.protocolVersion ?? undefined; - const result = { - serverInfo: server.serverInfo, - ...(protocolVersion ? { protocolVersion } : {}), - capabilities: { - tools: {}, - }, - }; - server.replyResult(id, result); - } else if (method === "tools/list") { - const list = []; - Object.values(server.tools).forEach(tool => { - const toolDef = { - name: tool.name, - description: tool.description, - inputSchema: tool.inputSchema, - }; - list.push(toolDef); - }); - server.replyResult(id, { tools: list }); - } else if (method === "tools/call") { - const name = params?.name; - const args = params?.arguments ?? {}; - if (!name || typeof name !== "string") { - server.replyError(id, -32602, "Invalid params: 'name' must be a string"); - return; - } - const tool = server.tools[normalizeTool(name)]; - if (!tool) { - server.replyError(id, -32601, `Tool not found: ${name} (${normalizeTool(name)})`); - return; - } - let handler = tool.handler; - if (!handler && defaultHandler) { - handler = defaultHandler(tool.name); - } - if (!handler) { - server.replyError(id, -32603, `No handler for tool: ${name}`); - return; - } - const missing = validateRequiredFields(args, tool.inputSchema); - if (missing.length) { - server.replyError(id, -32602, `Invalid arguments: missing or empty ${missing.map(m => `'${m}'`).join(", ")}`); - return; - } - server.debug(`Calling handler for tool: ${name}`); - const result = await Promise.resolve(handler(args)); - server.debug(`Handler returned for tool: ${name}`); - const content = result && result.content ? result.content : []; - server.replyResult(id, { content, isError: false }); - } else if (/^notifications\//.test(method)) { - server.debug(`ignore ${method}`); - } else { - server.replyError(id, -32601, `Method not found: ${method}`); - } - } catch (e) { - server.replyError(id, -32603, e instanceof Error ? e.message : String(e)); - } - } - async function processReadBuffer(server, defaultHandler) { - while (true) { - try { - const message = server.readBuffer.readMessage(); - if (!message) { - break; - } - server.debug(`recv: ${JSON.stringify(message)}`); - await handleMessage(server, message, defaultHandler); - } catch (error) { - server.debug(`Parse error: ${error instanceof Error ? error.message : String(error)}`); - } - } - } - function start(server, options = {}) { - const { defaultHandler } = options; - server.debug(`v${server.serverInfo.version} ready on stdio`); - server.debug(` tools: ${Object.keys(server.tools).join(", ")}`); - if (!Object.keys(server.tools).length) { - throw new Error("No tools registered"); - } - const onData = async chunk => { - server.readBuffer.append(chunk); - await processReadBuffer(server, defaultHandler); - }; - process.stdin.on("data", onData); - process.stdin.on("error", err => server.debug(`stdin error: ${err}`)); - process.stdin.resume(); - server.debug(`listening...`); - } - module.exports = { - createServer, - registerTool, - normalizeTool, - handleMessage, - processReadBuffer, - start, - loadToolHandlers, - }; - EOF_MCP_CORE - cat > /tmp/gh-aw/safe-inputs/mcp_server.cjs << 'EOF_MCP_SERVER' - class MCPServer { - constructor(serverInfo, options = {}) { - this.serverInfo = serverInfo; - this.capabilities = options.capabilities || { tools: {} }; - this.tools = new Map(); - this.transport = null; - this.initialized = false; - } - tool(name, description, inputSchema, handler) { - this.tools.set(name, { - name, - description, - inputSchema, - handler, - }); - } - async connect(transport) { - this.transport = transport; - transport.setServer(this); - await transport.start(); - } - handleInitialize(params) { - this.initialized = true; - return { - protocolVersion: params.protocolVersion || "2024-11-05", - serverInfo: this.serverInfo, - capabilities: this.capabilities, - }; - } - handleToolsList() { - const tools = Array.from(this.tools.values()).map(tool => ({ - name: tool.name, - description: tool.description, - inputSchema: tool.inputSchema, - })); - return { tools }; - } - async handleToolsCall(params) { - const tool = this.tools.get(params.name); - if (!tool) { - throw { - code: -32602, - message: `Tool '${params.name}' not found`, - }; - } - try { - const result = await tool.handler(params.arguments || {}); - return result; - } catch (error) { - throw { - code: -32603, - message: error instanceof Error ? error.message : String(error), - }; - } - } - handlePing() { - return {}; - } - async handleRequest(request) { - const { id, method, params } = request; - try { - if (!("id" in request)) { - return null; - } - let result; - switch (method) { - case "initialize": - result = this.handleInitialize(params || {}); - break; - case "ping": - result = this.handlePing(); - break; - case "tools/list": - result = this.handleToolsList(); - break; - case "tools/call": - result = await this.handleToolsCall(params || {}); - break; - default: - throw { - code: -32601, - message: `Method '${method}' not found`, - }; - } - return { - jsonrpc: "2.0", - id, - result, - }; - } catch (error) { - return { - jsonrpc: "2.0", - id, - error: { - code: error.code || -32603, - message: error.message || "Internal error", - }, - }; - } - } - } - module.exports = { - MCPServer, - }; - EOF_MCP_SERVER - cat > /tmp/gh-aw/safe-inputs/mcp_http_transport.cjs << 'EOF_MCP_HTTP_TRANSPORT' - const http = require("http"); - const { randomUUID } = require("crypto"); - const { MCPServer } = require("./mcp_server.cjs"); - class MCPHTTPTransport { - constructor(options = {}) { - this.sessionIdGenerator = options.sessionIdGenerator; - this.enableJsonResponse = options.enableJsonResponse !== false; - this.enableDnsRebindingProtection = options.enableDnsRebindingProtection || false; - this.server = null; - this.sessionId = null; - this.started = false; - } - setServer(server) { - this.server = server; - } - async start() { - if (this.started) { - throw new Error("Transport already started"); - } - this.started = true; - } - async handleRequest(req, res, parsedBody) { - res.setHeader("Access-Control-Allow-Origin", "*"); - res.setHeader("Access-Control-Allow-Methods", "GET, POST, OPTIONS"); - res.setHeader("Access-Control-Allow-Headers", "Content-Type, Accept, Mcp-Session-Id"); - if (req.method === "OPTIONS") { - res.writeHead(200); - res.end(); - return; - } - if (req.method !== "POST") { - res.writeHead(405, { "Content-Type": "application/json" }); - res.end(JSON.stringify({ error: "Method not allowed" })); - return; - } - try { - let body = parsedBody; - if (!body) { - const chunks = []; - for await (const chunk of req) { - chunks.push(chunk); - } - const bodyStr = Buffer.concat(chunks).toString(); - try { - body = bodyStr ? JSON.parse(bodyStr) : null; - } catch (parseError) { - res.writeHead(400, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - jsonrpc: "2.0", - error: { - code: -32700, - message: "Parse error: Invalid JSON in request body", - }, - id: null, - }) - ); - return; - } - } - if (!body) { - res.writeHead(400, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - jsonrpc: "2.0", - error: { - code: -32600, - message: "Invalid Request: Empty request body", - }, - id: null, - }) - ); - return; - } - if (!body.jsonrpc || body.jsonrpc !== "2.0") { - res.writeHead(400, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - jsonrpc: "2.0", - error: { - code: -32600, - message: "Invalid Request: jsonrpc must be '2.0'", - }, - id: body.id || null, - }) - ); - return; - } - if (this.sessionIdGenerator) { - if (body.method === "initialize") { - this.sessionId = this.sessionIdGenerator(); - } else { - const requestSessionId = req.headers["mcp-session-id"]; - if (!requestSessionId) { - res.writeHead(400, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - jsonrpc: "2.0", - error: { - code: -32600, - message: "Invalid Request: Missing Mcp-Session-Id header", - }, - id: body.id || null, - }) - ); - return; - } - if (requestSessionId !== this.sessionId) { - res.writeHead(404, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - jsonrpc: "2.0", - error: { - code: -32001, - message: "Session not found", - }, - id: body.id || null, - }) - ); - return; - } - } - } - const response = await this.server.handleRequest(body); - if (response === null) { - res.writeHead(204); - res.end(); - return; - } - const headers = { "Content-Type": "application/json" }; - if (this.sessionId) { - headers["mcp-session-id"] = this.sessionId; - } - res.writeHead(200, headers); - res.end(JSON.stringify(response)); - } catch (error) { - if (!res.headersSent) { - res.writeHead(500, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - jsonrpc: "2.0", - error: { - code: -32603, - message: error instanceof Error ? error.message : String(error), - }, - id: null, - }) - ); - } - } - } - } - module.exports = { - MCPServer, - MCPHTTPTransport, - }; - EOF_MCP_HTTP_TRANSPORT - cat > /tmp/gh-aw/safe-inputs/mcp_logger.cjs << 'EOF_MCP_LOGGER' - function createLogger(serverName) { - const logger = { - debug: msg => { - const timestamp = new Date().toISOString(); - process.stderr.write(`[${timestamp}] [${serverName}] ${msg}\n`); - }, - debugError: (prefix, error) => { - const errorMessage = error instanceof Error ? error.message : String(error); - logger.debug(`${prefix}${errorMessage}`); - if (error instanceof Error && error.stack) { - logger.debug(`${prefix}Stack trace: ${error.stack}`); - } - }, - }; - return logger; - } - module.exports = { - createLogger, - }; - EOF_MCP_LOGGER - cat > /tmp/gh-aw/safe-inputs/mcp_handler_shell.cjs << 'EOF_HANDLER_SHELL' - const fs = require("fs"); - const path = require("path"); - const { execFile } = require("child_process"); - const os = require("os"); - function createShellHandler(server, toolName, scriptPath, timeoutSeconds = 60) { - return async args => { - server.debug(` [${toolName}] Invoking shell handler: ${scriptPath}`); - server.debug(` [${toolName}] Shell handler args: ${JSON.stringify(args)}`); - server.debug(` [${toolName}] Timeout: ${timeoutSeconds}s`); - const env = { ...process.env }; - for (const [key, value] of Object.entries(args || {})) { - const envKey = `INPUT_${key.toUpperCase().replace(/-/g, "_")}`; - env[envKey] = String(value); - server.debug(` [${toolName}] Set env: ${envKey}=${String(value).substring(0, 100)}${String(value).length > 100 ? "..." : ""}`); - } - const outputFile = path.join(os.tmpdir(), `mcp-shell-output-${Date.now()}-${Math.random().toString(36).substring(2)}.txt`); - env.GITHUB_OUTPUT = outputFile; - server.debug(` [${toolName}] Output file: ${outputFile}`); - fs.writeFileSync(outputFile, ""); - return new Promise((resolve, reject) => { - server.debug(` [${toolName}] Executing shell script...`); - execFile( - scriptPath, - [], - { - env, - timeout: timeoutSeconds * 1000, - maxBuffer: 10 * 1024 * 1024, - }, - (error, stdout, stderr) => { - if (stdout) { - server.debug(` [${toolName}] stdout: ${stdout.substring(0, 500)}${stdout.length > 500 ? "..." : ""}`); - } - if (stderr) { - server.debug(` [${toolName}] stderr: ${stderr.substring(0, 500)}${stderr.length > 500 ? "..." : ""}`); - } - if (error) { - server.debugError(` [${toolName}] Shell script error: `, error); - try { - if (fs.existsSync(outputFile)) { - fs.unlinkSync(outputFile); - } - } catch { - } - reject(error); - return; - } - const outputs = {}; - try { - if (fs.existsSync(outputFile)) { - const outputContent = fs.readFileSync(outputFile, "utf-8"); - server.debug( - ` [${toolName}] Output file content: ${outputContent.substring(0, 500)}${outputContent.length > 500 ? "..." : ""}` - ); - const lines = outputContent.split("\n"); - for (const line of lines) { - const trimmed = line.trim(); - if (trimmed && trimmed.includes("=")) { - const eqIndex = trimmed.indexOf("="); - const key = trimmed.substring(0, eqIndex); - const value = trimmed.substring(eqIndex + 1); - outputs[key] = value; - server.debug(` [${toolName}] Parsed output: ${key}=${value.substring(0, 100)}${value.length > 100 ? "..." : ""}`); - } - } - } - } catch (readError) { - server.debugError(` [${toolName}] Error reading output file: `, readError); - } - try { - if (fs.existsSync(outputFile)) { - fs.unlinkSync(outputFile); - } - } catch { - } - const result = { - stdout: stdout || "", - stderr: stderr || "", - outputs, - }; - server.debug(` [${toolName}] Shell handler completed, outputs: ${Object.keys(outputs).join(", ") || "(none)"}`); - resolve({ - content: [ - { - type: "text", - text: JSON.stringify(result), - }, - ], - }); - } - ); - }); - }; - } - module.exports = { - createShellHandler, - }; - EOF_HANDLER_SHELL - cat > /tmp/gh-aw/safe-inputs/mcp_handler_python.cjs << 'EOF_HANDLER_PYTHON' - const { execFile } = require("child_process"); - function createPythonHandler(server, toolName, scriptPath, timeoutSeconds = 60) { - return async args => { - server.debug(` [${toolName}] Invoking Python handler: ${scriptPath}`); - server.debug(` [${toolName}] Python handler args: ${JSON.stringify(args)}`); - server.debug(` [${toolName}] Timeout: ${timeoutSeconds}s`); - const inputJson = JSON.stringify(args || {}); - server.debug( - ` [${toolName}] Input JSON (${inputJson.length} bytes): ${inputJson.substring(0, 200)}${inputJson.length > 200 ? "..." : ""}` - ); - return new Promise((resolve, reject) => { - server.debug(` [${toolName}] Executing Python script...`); - const child = execFile( - "python3", - [scriptPath], - { - env: process.env, - timeout: timeoutSeconds * 1000, - maxBuffer: 10 * 1024 * 1024, - }, - (error, stdout, stderr) => { - if (stdout) { - server.debug(` [${toolName}] stdout: ${stdout.substring(0, 500)}${stdout.length > 500 ? "..." : ""}`); - } - if (stderr) { - server.debug(` [${toolName}] stderr: ${stderr.substring(0, 500)}${stderr.length > 500 ? "..." : ""}`); - } - if (error) { - server.debugError(` [${toolName}] Python script error: `, error); - reject(error); - return; - } - let result; - try { - if (stdout && stdout.trim()) { - result = JSON.parse(stdout.trim()); - } else { - result = { stdout: stdout || "", stderr: stderr || "" }; - } - } catch (parseError) { - server.debug(` [${toolName}] Output is not JSON, returning as text`); - result = { stdout: stdout || "", stderr: stderr || "" }; - } - server.debug(` [${toolName}] Python handler completed successfully`); - resolve({ - content: [ - { - type: "text", - text: JSON.stringify(result), - }, - ], - }); - } - ); - if (child.stdin) { - child.stdin.write(inputJson); - child.stdin.end(); - } - }); + }, + ], + }; }; - } - module.exports = { - createPythonHandler, - }; - EOF_HANDLER_PYTHON - cat > /tmp/gh-aw/safe-inputs/safe_inputs_config_loader.cjs << 'EOF_CONFIG_LOADER' - const fs = require("fs"); - function loadConfig(configPath) { - if (!fs.existsSync(configPath)) { - throw new Error(`Configuration file not found: ${configPath}`); - } - const configContent = fs.readFileSync(configPath, "utf-8"); - const config = JSON.parse(configContent); - if (!config.tools || !Array.isArray(config.tools)) { - throw new Error("Configuration must contain a 'tools' array"); - } - return config; - } - module.exports = { - loadConfig, - }; - EOF_CONFIG_LOADER - cat > /tmp/gh-aw/safe-inputs/safe_inputs_tool_factory.cjs << 'EOF_TOOL_FACTORY' - function createToolConfig(name, description, inputSchema, handlerPath) { return { - name, - description, - inputSchema, - handler: handlerPath, + defaultHandler, + uploadAssetHandler, + createPullRequestHandler, + pushToPullRequestBranchHandler, }; } - module.exports = { - createToolConfig, - }; - EOF_TOOL_FACTORY - cat > /tmp/gh-aw/safe-inputs/safe_inputs_validation.cjs << 'EOF_VALIDATION' - function validateRequiredFields(args, inputSchema) { - const requiredFields = inputSchema && Array.isArray(inputSchema.required) ? inputSchema.required : []; - if (!requiredFields.length) { - return []; - } - const missing = requiredFields.filter(f => { - const value = args[f]; - return value === undefined || value === null || (typeof value === "string" && value.trim() === ""); - }); - return missing; - } - module.exports = { - validateRequiredFields, - }; - EOF_VALIDATION - cat > /tmp/gh-aw/safe-inputs/safe_inputs_mcp_server.cjs << 'EOF_SAFE_INPUTS_SERVER' - const path = require("path"); - const { createServer, registerTool, loadToolHandlers, start } = require("./mcp_server_core.cjs"); - const { loadConfig } = require("./safe_inputs_config_loader.cjs"); - const { createToolConfig } = require("./safe_inputs_tool_factory.cjs"); - function startSafeInputsServer(configPath, options = {}) { - const config = loadConfig(configPath); - const basePath = path.dirname(configPath); - const serverName = config.serverName || "safeinputs"; - const version = config.version || "1.0.0"; - const logDir = options.logDir || config.logDir || undefined; - const server = createServer({ name: serverName, version }, { logDir }); - server.debug(`Loading safe-inputs configuration from: ${configPath}`); - server.debug(`Base path for handlers: ${basePath}`); - server.debug(`Tools to load: ${config.tools.length}`); - const tools = loadToolHandlers(server, config.tools, basePath); - for (const tool of tools) { - registerTool(server, tool); - } + function loadTools(server) { + const toolsPath = process.env.GH_AW_SAFE_OUTPUTS_TOOLS_PATH || "/tmp/gh-aw/safeoutputs/tools.json"; + let ALL_TOOLS = []; + server.debug(`Reading tools from file: ${toolsPath}`); try { - const fs = require("fs"); - if (fs.existsSync(configPath)) { - fs.unlinkSync(configPath); - server.debug(`Deleted configuration file: ${configPath}`); + if (fs.existsSync(toolsPath)) { + server.debug(`Tools file exists at: ${toolsPath}`); + const toolsFileContent = fs.readFileSync(toolsPath, "utf8"); + server.debug(`Tools file content length: ${toolsFileContent.length} characters`); + server.debug(`Tools file read successfully, attempting to parse JSON`); + ALL_TOOLS = JSON.parse(toolsFileContent); + server.debug(`Successfully parsed ${ALL_TOOLS.length} tools from file`); + } else { + server.debug(`Tools file does not exist at: ${toolsPath}`); + server.debug(`Using empty tools array`); + ALL_TOOLS = []; } } catch (error) { - server.debugError(`Warning: Could not delete configuration file: `, error); + server.debug(`Error reading tools file: ${error instanceof Error ? error.message : String(error)}`); + server.debug(`Falling back to empty tools array`); + ALL_TOOLS = []; } - start(server); + return ALL_TOOLS; } - if (require.main === module) { - const args = process.argv.slice(2); - if (args.length < 1) { - console.error("Usage: node safe_inputs_mcp_server.cjs [--log-dir ]"); - process.exit(1); - } - const configPath = args[0]; - const options = {}; - for (let i = 1; i < args.length; i++) { - if (args[i] === "--log-dir" && args[i + 1]) { - options.logDir = args[i + 1]; - i++; + function attachHandlers(tools, handlers) { + tools.forEach(tool => { + if (tool.name === "create_pull_request") { + tool.handler = handlers.createPullRequestHandler; + } else if (tool.name === "push_to_pull_request_branch") { + tool.handler = handlers.pushToPullRequestBranchHandler; + } else if (tool.name === "upload_asset") { + tool.handler = handlers.uploadAssetHandler; } - } - try { - startSafeInputsServer(configPath, options); - } catch (error) { - console.error(`Error starting safe-inputs server: ${error instanceof Error ? error.message : String(error)}`); - process.exit(1); - } + }); + return tools; } - module.exports = { - startSafeInputsServer, - loadConfig, - createToolConfig, - }; - EOF_SAFE_INPUTS_SERVER - cat > /tmp/gh-aw/safe-inputs/safe_inputs_mcp_server_http.cjs << 'EOF_SAFE_INPUTS_SERVER_HTTP' - const path = require("path"); - const http = require("http"); - const { randomUUID } = require("crypto"); - const { MCPServer, MCPHTTPTransport } = require("./mcp_http_transport.cjs"); - const { loadConfig } = require("./safe_inputs_config_loader.cjs"); - const { loadToolHandlers } = require("./mcp_server_core.cjs"); - const { validateRequiredFields } = require("./safe_inputs_validation.cjs"); - const { createLogger } = require("./mcp_logger.cjs"); - function createMCPServer(configPath, options = {}) { - const config = loadConfig(configPath); - const basePath = path.dirname(configPath); - const serverName = config.serverName || "safeinputs"; - const version = config.version || "1.0.0"; - const server = new MCPServer( - { - name: serverName, - version: version, - }, - { - capabilities: { - tools: {}, - }, - } - ); - const logger = createLogger(serverName); - logger.debug(`=== Creating MCP Server ===`); - logger.debug(`Configuration file: ${configPath}`); - logger.debug(`Loading safe-inputs configuration from: ${configPath}`); - logger.debug(`Base path for handlers: ${basePath}`); - logger.debug(`Server name: ${serverName}`); - logger.debug(`Server version: ${version}`); - logger.debug(`Tools to load: ${config.tools.length}`); - const tempServer = { debug: logger.debug, debugError: logger.debugError }; - const tools = loadToolHandlers(tempServer, config.tools, basePath); - logger.debug(`Registering tools with MCP server...`); - let registeredCount = 0; - let skippedCount = 0; - for (const tool of tools) { - if (!tool.handler) { - logger.debug(`Skipping tool ${tool.name} - no handler loaded`); - skippedCount++; - continue; + function registerPredefinedTools(server, tools, config, registerTool, normalizeTool) { + tools.forEach(tool => { + if (Object.keys(config).find(configKey => normalizeTool(configKey) === tool.name)) { + registerTool(server, tool); } - logger.debug(`Registering tool: ${tool.name}`); - server.tool(tool.name, tool.description || "", tool.inputSchema || { type: "object", properties: {} }, async args => { - logger.debug(`Calling handler for tool: ${tool.name}`); - const missing = validateRequiredFields(args, tool.inputSchema); - if (missing.length) { - throw new Error(`Invalid arguments: missing or empty ${missing.map(m => `'${m}'`).join(", ")}`); - } - const result = await Promise.resolve(tool.handler(args)); - logger.debug(`Handler returned for tool: ${tool.name}`); - const content = result && result.content ? result.content : []; - return { content, isError: false }; - }); - registeredCount++; - } - logger.debug(`Tool registration complete: ${registeredCount} registered, ${skippedCount} skipped`); - logger.debug(`=== MCP Server Creation Complete ===`); - try { - const fs = require("fs"); - if (fs.existsSync(configPath)) { - fs.unlinkSync(configPath); - logger.debug(`Deleted configuration file: ${configPath}`); + }); + } + function registerDynamicTools(server, tools, config, outputFile, registerTool, normalizeTool) { + Object.keys(config).forEach(configKey => { + const normalizedKey = normalizeTool(configKey); + if (server.tools[normalizedKey]) { + return; } - } catch (error) { - logger.debugError(`Warning: Could not delete configuration file: `, error); - } - return { server, config, logger }; - } - async function startHttpServer(configPath, options = {}) { - const port = options.port || 3000; - const stateless = options.stateless || false; - const logger = createLogger("safe-inputs-startup"); - logger.debug(`=== Starting Safe Inputs MCP HTTP Server ===`); - logger.debug(`Configuration file: ${configPath}`); - logger.debug(`Port: ${port}`); - logger.debug(`Mode: ${stateless ? "stateless" : "stateful"}`); - logger.debug(`Environment: NODE_VERSION=${process.version}, PLATFORM=${process.platform}`); - try { - const { server, config, logger: mcpLogger } = createMCPServer(configPath, { logDir: options.logDir }); - Object.assign(logger, mcpLogger); - logger.debug(`MCP server created successfully`); - logger.debug(`Server name: ${config.serverName || "safeinputs"}`); - logger.debug(`Server version: ${config.version || "1.0.0"}`); - logger.debug(`Tools configured: ${config.tools.length}`); - logger.debug(`Creating HTTP transport...`); - const transport = new MCPHTTPTransport({ - sessionIdGenerator: stateless ? undefined : () => randomUUID(), - enableJsonResponse: true, - enableDnsRebindingProtection: false, - }); - logger.debug(`HTTP transport created`); - logger.debug(`Connecting server to transport...`); - await server.connect(transport); - logger.debug(`Server connected to transport successfully`); - logger.debug(`Creating HTTP server...`); - const httpServer = http.createServer(async (req, res) => { - res.setHeader("Access-Control-Allow-Origin", "*"); - res.setHeader("Access-Control-Allow-Methods", "GET, POST, OPTIONS"); - res.setHeader("Access-Control-Allow-Headers", "Content-Type, Accept"); - if (req.method === "OPTIONS") { - res.writeHead(200); - res.end(); - return; - } - if (req.method === "GET" && req.url === "/health") { - res.writeHead(200, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - status: "ok", - server: config.serverName || "safeinputs", - version: config.version || "1.0.0", - tools: config.tools.length, - }) - ); - return; - } - if (req.method !== "POST") { - res.writeHead(405, { "Content-Type": "application/json" }); - res.end(JSON.stringify({ error: "Method not allowed" })); - return; - } - try { - let body = null; - if (req.method === "POST") { - const chunks = []; - for await (const chunk of req) { - chunks.push(chunk); + if (!tools.find(t => t.name === normalizedKey)) { + const jobConfig = config[configKey]; + const dynamicTool = { + name: normalizedKey, + description: jobConfig && jobConfig.description ? jobConfig.description : `Custom safe-job: ${configKey}`, + inputSchema: { + type: "object", + properties: {}, + additionalProperties: true, + }, + handler: args => { + const entry = { + type: normalizedKey, + ...args, + }; + const entryJSON = JSON.stringify(entry); + fs.appendFileSync(outputFile, entryJSON + "\n"); + const outputText = + jobConfig && jobConfig.output + ? jobConfig.output + : `Safe-job '${configKey}' executed successfully with arguments: ${JSON.stringify(args)}`; + return { + content: [ + { + type: "text", + text: JSON.stringify({ result: outputText }), + }, + ], + }; + }, + }; + if (jobConfig && jobConfig.inputs) { + dynamicTool.inputSchema.properties = {}; + dynamicTool.inputSchema.required = []; + Object.keys(jobConfig.inputs).forEach(inputName => { + const inputDef = jobConfig.inputs[inputName]; + const propSchema = { + type: inputDef.type || "string", + description: inputDef.description || `Input parameter: ${inputName}`, + }; + if (inputDef.options && Array.isArray(inputDef.options)) { + propSchema.enum = inputDef.options; } - const bodyStr = Buffer.concat(chunks).toString(); - try { - body = bodyStr ? JSON.parse(bodyStr) : null; - } catch (parseError) { - res.writeHead(400, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - jsonrpc: "2.0", - error: { - code: -32700, - message: "Parse error: Invalid JSON in request body", - }, - id: null, - }) - ); - return; + dynamicTool.inputSchema.properties[inputName] = propSchema; + if (inputDef.required) { + dynamicTool.inputSchema.required.push(inputName); } - } - await transport.handleRequest(req, res, body); - } catch (error) { - logger.debugError("Error handling request: ", error); - if (!res.headersSent) { - res.writeHead(500, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - jsonrpc: "2.0", - error: { - code: -32603, - message: error instanceof Error ? error.message : String(error), - }, - id: null, - }) - ); - } - } - }); - logger.debug(`Attempting to bind to port ${port}...`); - httpServer.listen(port, () => { - logger.debug(`=== Safe Inputs MCP HTTP Server Started Successfully ===`); - logger.debug(`HTTP server listening on http://localhost:${port}`); - logger.debug(`MCP endpoint: POST http://localhost:${port}/`); - logger.debug(`Server name: ${config.serverName || "safeinputs"}`); - logger.debug(`Server version: ${config.version || "1.0.0"}`); - logger.debug(`Tools available: ${config.tools.length}`); - logger.debug(`Server is ready to accept requests`); - }); - httpServer.on("error", error => { - if (error.code === "EADDRINUSE") { - logger.debugError(`ERROR: Port ${port} is already in use. `, error); - } else if (error.code === "EACCES") { - logger.debugError(`ERROR: Permission denied to bind to port ${port}. `, error); - } else { - logger.debugError(`ERROR: Failed to start HTTP server: `, error); + }); } - process.exit(1); - }); - process.on("SIGINT", () => { - logger.debug("Received SIGINT, shutting down..."); - httpServer.close(() => { - logger.debug("HTTP server closed"); - process.exit(0); - }); - }); - process.on("SIGTERM", () => { - logger.debug("Received SIGTERM, shutting down..."); - httpServer.close(() => { - logger.debug("HTTP server closed"); - process.exit(0); - }); - }); - return httpServer; - } catch (error) { - const errorLogger = createLogger("safe-inputs-startup-error"); - errorLogger.debug(`=== FATAL ERROR: Failed to start Safe Inputs MCP HTTP Server ===`); - errorLogger.debug(`Error type: ${error.constructor.name}`); - errorLogger.debug(`Error message: ${error.message}`); - if (error.stack) { - errorLogger.debug(`Stack trace:\n${error.stack}`); - } - if (error.code) { - errorLogger.debug(`Error code: ${error.code}`); - } - errorLogger.debug(`Configuration file: ${configPath}`); - errorLogger.debug(`Port: ${port}`); - throw error; - } - } - if (require.main === module) { - const args = process.argv.slice(2); - if (args.length < 1) { - console.error("Usage: node safe_inputs_mcp_server_http.cjs [--port ] [--stateless] [--log-dir ]"); - process.exit(1); - } - const configPath = args[0]; - const options = { - port: 3000, - stateless: false, - logDir: undefined, - }; - for (let i = 1; i < args.length; i++) { - if (args[i] === "--port" && args[i + 1]) { - options.port = parseInt(args[i + 1], 10); - i++; - } else if (args[i] === "--stateless") { - options.stateless = true; - } else if (args[i] === "--log-dir" && args[i + 1]) { - options.logDir = args[i + 1]; - i++; - } - } - startHttpServer(configPath, options).catch(error => { - console.error(`Error starting HTTP server: ${error instanceof Error ? error.message : String(error)}`); - process.exit(1); + registerTool(server, dynamicTool); + } }); } - module.exports = { - startHttpServer, - createMCPServer, - }; - EOF_SAFE_INPUTS_SERVER_HTTP - cat > /tmp/gh-aw/safe-inputs/tools.json << 'EOF_TOOLS_JSON' - { - "serverName": "safeinputs", - "version": "1.0.0", - "logDir": "/tmp/gh-aw/safe-inputs/logs", - "tools": [ - { - "name": "gh", - "description": "Execute any gh CLI command. Provide the full command after 'gh' (e.g., args: 'pr list --limit 5'). The tool will run: gh \u003cargs\u003e. Use single quotes ' for complex args to avoid shell interpretation issues.", - "inputSchema": { - "properties": { - "args": { - "description": "Arguments to pass to gh CLI (without the 'gh' prefix). Examples: 'pr list --limit 5', 'issue view 123', 'api repos/{owner}/{repo}'", - "type": "string" - } - }, - "required": [ - "args" - ], - "type": "object" - }, - "handler": "gh.sh", - "env": { - "GH_AW_GH_TOKEN": "GH_AW_GH_TOKEN" - }, - "timeout": 60 - } - ] - } - EOF_TOOLS_JSON - cat > /tmp/gh-aw/safe-inputs/mcp-server.cjs << 'EOFSI' - const path = require("path"); - const { startHttpServer } = require("./safe_inputs_mcp_server_http.cjs"); - const configPath = path.join(__dirname, "tools.json"); - const port = parseInt(process.env.GH_AW_SAFE_INPUTS_PORT || "3000", 10); - const apiKey = process.env.GH_AW_SAFE_INPUTS_API_KEY || ""; - startHttpServer(configPath, { - port: port, - stateless: false, - logDir: "/tmp/gh-aw/safe-inputs/logs" - }).catch(error => { - console.error("Failed to start safe-inputs HTTP server:", error); - process.exit(1); - }); - EOFSI - chmod +x /tmp/gh-aw/safe-inputs/mcp-server.cjs - - - name: Setup Safe Inputs Tool Files - run: | - cat > /tmp/gh-aw/safe-inputs/gh.sh << 'EOFSH_gh' - #!/bin/bash - # Auto-generated safe-input tool: gh - # Execute any gh CLI command. Provide the full command after 'gh' (e.g., args: 'pr list --limit 5'). The tool will run: gh . Use single quotes ' for complex args to avoid shell interpretation issues. - - set -euo pipefail - - GH_TOKEN=$GH_AW_GH_TOKEN gh $INPUT_ARGS - - EOFSH_gh - chmod +x /tmp/gh-aw/safe-inputs/gh.sh - - - name: Generate Safe Inputs MCP Server Config - id: safe-inputs-config - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 - with: - script: | - function generateSafeInputsConfig({ core, crypto }) { - const apiKeyBuffer = crypto.randomBytes(32); - const apiKey = apiKeyBuffer.toString("base64").replace(/[/+=]/g, ""); - const port = 3000; - core.setOutput("safe_inputs_api_key", apiKey); - core.setOutput("safe_inputs_port", port.toString()); - core.info(`Safe Inputs MCP server will run on port ${port}`); - return { apiKey, port }; - } - - // Execute the function - const crypto = require('crypto'); - generateSafeInputsConfig({ core, crypto }); - - - name: Start Safe Inputs MCP HTTP Server - id: safe-inputs-start - run: | - # Set environment variables for the server - export GH_AW_SAFE_INPUTS_PORT=${{ steps.safe-inputs-config.outputs.safe_inputs_port }} - export GH_AW_SAFE_INPUTS_API_KEY=${{ steps.safe-inputs-config.outputs.safe_inputs_api_key }} - - export GH_AW_GH_TOKEN="${GH_AW_GH_TOKEN}" - - cd /tmp/gh-aw/safe-inputs - # Verify required files exist - echo "Verifying safe-inputs setup..." - if [ ! -f mcp-server.cjs ]; then - echo "ERROR: mcp-server.cjs not found in /tmp/gh-aw/safe-inputs" - ls -la /tmp/gh-aw/safe-inputs/ - exit 1 - fi - if [ ! -f tools.json ]; then - echo "ERROR: tools.json not found in /tmp/gh-aw/safe-inputs" - ls -la /tmp/gh-aw/safe-inputs/ - exit 1 - fi - echo "Configuration files verified" - # Log environment configuration - echo "Server configuration:" - echo " Port: $GH_AW_SAFE_INPUTS_PORT" - echo " API Key: ${GH_AW_SAFE_INPUTS_API_KEY:0:8}..." - echo " Working directory: $(pwd)" - # Ensure logs directory exists - mkdir -p /tmp/gh-aw/safe-inputs/logs - # Create initial server.log file for artifact upload - echo "Safe Inputs MCP Server Log" > /tmp/gh-aw/safe-inputs/logs/server.log - echo "Start time: $(date)" >> /tmp/gh-aw/safe-inputs/logs/server.log - echo "===========================================" >> /tmp/gh-aw/safe-inputs/logs/server.log - echo "" >> /tmp/gh-aw/safe-inputs/logs/server.log - # Start the HTTP server in the background - echo "Starting safe-inputs MCP HTTP server..." - node mcp-server.cjs >> /tmp/gh-aw/safe-inputs/logs/server.log 2>&1 & - SERVER_PID=$! - echo "Started safe-inputs MCP server with PID $SERVER_PID" - # Wait for server to be ready (max 10 seconds) - echo "Waiting for server to become ready..." - for i in {1..10}; do - # Check if process is still running - if ! kill -0 $SERVER_PID 2>/dev/null; then - echo "ERROR: Server process $SERVER_PID has died" - echo "Server log contents:" - cat /tmp/gh-aw/safe-inputs/logs/server.log - exit 1 - fi - # Check if server is responding - if curl -s -f http://localhost:$GH_AW_SAFE_INPUTS_PORT/health > /dev/null 2>&1; then - echo "Safe Inputs MCP server is ready (attempt $i/10)" - break - fi - if [ $i -eq 10 ]; then - echo "ERROR: Safe Inputs MCP server failed to start after 10 seconds" - echo "Process status: $(ps aux | grep '[m]cp-server.cjs' || echo 'not running')" - echo "Server log contents:" - cat /tmp/gh-aw/safe-inputs/logs/server.log - echo "Checking port availability:" - netstat -tuln | grep $GH_AW_SAFE_INPUTS_PORT || echo "Port $GH_AW_SAFE_INPUTS_PORT not listening" - exit 1 - fi - echo "Waiting for server... (attempt $i/10)" - sleep 1 - done - # Output the configuration for the MCP client - echo "port=$GH_AW_SAFE_INPUTS_PORT" >> $GITHUB_OUTPUT - echo "api_key=$GH_AW_SAFE_INPUTS_API_KEY" >> $GITHUB_OUTPUT + const SERVER_INFO = { name: "safeoutputs", version: "1.0.0" }; + const MCP_LOG_DIR = process.env.GH_AW_MCP_LOG_DIR; + const server = createServer(SERVER_INFO, { logDir: MCP_LOG_DIR }); + const { config: safeOutputsConfig, outputFile } = loadConfig(server); + const appendSafeOutput = createAppendFunction(outputFile); + const handlers = createHandlers(server, appendSafeOutput); + const { defaultHandler } = handlers; + let ALL_TOOLS = loadTools(server); + ALL_TOOLS = attachHandlers(ALL_TOOLS, handlers); + server.debug(` output file: ${outputFile}`); + server.debug(` config: ${JSON.stringify(safeOutputsConfig)}`); + registerPredefinedTools(server, ALL_TOOLS, safeOutputsConfig, registerTool, normalizeTool); + registerDynamicTools(server, ALL_TOOLS, safeOutputsConfig, outputFile, registerTool, normalizeTool); + server.debug(` tools: ${Object.keys(server.tools).join(", ")}`); + if (!Object.keys(server.tools).length) throw new Error("No tools enabled in configuration"); + start(server, { defaultHandler }); + EOF + chmod +x /tmp/gh-aw/safeoutputs/mcp-server.cjs - name: Setup MCPs env: GITHUB_MCP_SERVER_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN || secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} GH_AW_SAFE_OUTPUTS: ${{ env.GH_AW_SAFE_OUTPUTS }} - GH_AW_SAFE_INPUTS_PORT: ${{ steps.safe-inputs-start.outputs.port }} - GH_AW_SAFE_INPUTS_API_KEY: ${{ steps.safe-inputs-start.outputs.api_key }} - GH_AW_GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | mkdir -p /tmp/gh-aw/mcp-config cat > /tmp/gh-aw/mcp-config/mcp-servers.json << EOF @@ -4894,18 +3558,6 @@ jobs: "localhost;localhost:*;127.0.0.1;127.0.0.1:*;github.com" ] }, - "safeinputs": { - "type": "http", - "url": "http://localhost:$GH_AW_SAFE_INPUTS_PORT", - "headers": { - "Authorization": "Bearer $GH_AW_SAFE_INPUTS_API_KEY" - }, - "env": { - "GH_AW_SAFE_INPUTS_PORT": "$GH_AW_SAFE_INPUTS_PORT", - "GH_AW_SAFE_INPUTS_API_KEY": "$GH_AW_SAFE_INPUTS_API_KEY", - "GH_AW_GH_TOKEN": "$GH_AW_GH_TOKEN" - } - }, "safeoutputs": { "command": "node", "args": ["/tmp/gh-aw/safeoutputs/mcp-server.cjs"], @@ -5030,8 +3682,6 @@ jobs: PROMPT_DIR="$(dirname "$GH_AW_PROMPT")" mkdir -p "$PROMPT_DIR" cat << 'PROMPT_EOF' | envsubst > "$GH_AW_PROMPT" - - ## MCP Response Size Limits MCP tool responses have a **25,000 token limit**. When GitHub API responses exceed this limit, workflows must retry with pagination parameters, wasting turns and tokens. @@ -5153,7 +3803,8 @@ jobs: 2. **File Writing Testing**: Create a test file `/tmp/gh-aw/agent/smoke-test-claude-${GH_AW_GITHUB_RUN_ID}.txt` with content "Smoke test passed for Claude at $(date)" (create the directory if it doesn't exist) 3. **Bash Tool Testing**: Execute bash commands to verify file creation was successful (use `cat` to read the file back) 4. **Playwright MCP Testing**: Use playwright to navigate to https://github.com and verify the page title contains "GitHub" - 5. **Safe Input gh Tool Testing**: Use the `gh` safe-input tool to run "gh issues list --limit 3" to verify the tool can access GitHub issues + 5. **Cache Memory Testing**: Write a test file to `/tmp/gh-aw/cache-memory/smoke-test-${GH_AW_GITHUB_RUN_ID}.txt` with content "Cache memory test for run ${GH_AW_GITHUB_RUN_ID}" and verify it was created successfully + 6. **Safe Input gh Tool Testing**: Use the `gh` safe-input tool to run "gh issues list --limit 3" to verify the tool can access GitHub issues ## Output @@ -5223,6 +3874,31 @@ jobs: Do NOT attempt to edit files outside these directories as you do not have the necessary permissions. + PROMPT_EOF + - name: Append cache memory instructions to prompt + env: + GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt + run: | + cat << 'PROMPT_EOF' | envsubst >> "$GH_AW_PROMPT" + + --- + + ## Cache Folder Available + + You have access to a persistent cache folder at `/tmp/gh-aw/cache-memory/` where you can read and write files to create memories and store information. + + - **Read/Write Access**: You can freely read from and write to any files in this folder + - **Persistence**: Files in this folder persist across workflow runs via GitHub Actions cache + - **Last Write Wins**: If multiple processes write to the same file, the last write will be preserved + - **File Share**: Use this as a simple file share - organize files as you see fit + + Examples of what you can store: + - `/tmp/gh-aw/cache-memory/notes.txt` - general notes and observations + - `/tmp/gh-aw/cache-memory/preferences.json` - user preferences and settings + - `/tmp/gh-aw/cache-memory/history.log` - activity history and logs + - `/tmp/gh-aw/cache-memory/state/` - organized state files in subdirectories + + Feel free to create, read, update, and organize files in this folder as needed for your tasks. PROMPT_EOF - name: Append safe outputs instructions to prompt env: @@ -5392,18 +4068,22 @@ jobs: # - Bash # - BashOutput # - Edit + # - Edit(/tmp/gh-aw/cache-memory/*) # - ExitPlanMode # - Glob # - Grep # - KillBash # - LS # - MultiEdit + # - MultiEdit(/tmp/gh-aw/cache-memory/*) # - NotebookEdit # - NotebookRead # - Read + # - Read(/tmp/gh-aw/cache-memory/*) # - Task # - TodoWrite # - Write + # - Write(/tmp/gh-aw/cache-memory/*) # - mcp__github__download_workflow_run_artifact # - mcp__github__get_code_scanning_alert # - mcp__github__get_commit @@ -5481,7 +4161,7 @@ jobs: run: | set -o pipefail # Execute Claude Code CLI with prompt from file - claude --print --disable-slash-commands --max-turns 15 --mcp-config /tmp/gh-aw/mcp-config/mcp-servers.json --allowed-tools Bash,BashOutput,Edit,ExitPlanMode,Glob,Grep,KillBash,LS,MultiEdit,NotebookEdit,NotebookRead,Read,Task,TodoWrite,Write,mcp__github__download_workflow_run_artifact,mcp__github__get_code_scanning_alert,mcp__github__get_commit,mcp__github__get_dependabot_alert,mcp__github__get_discussion,mcp__github__get_discussion_comments,mcp__github__get_file_contents,mcp__github__get_job_logs,mcp__github__get_label,mcp__github__get_latest_release,mcp__github__get_me,mcp__github__get_notification_details,mcp__github__get_pull_request,mcp__github__get_pull_request_comments,mcp__github__get_pull_request_diff,mcp__github__get_pull_request_files,mcp__github__get_pull_request_review_comments,mcp__github__get_pull_request_reviews,mcp__github__get_pull_request_status,mcp__github__get_release_by_tag,mcp__github__get_secret_scanning_alert,mcp__github__get_tag,mcp__github__get_workflow_run,mcp__github__get_workflow_run_logs,mcp__github__get_workflow_run_usage,mcp__github__issue_read,mcp__github__list_branches,mcp__github__list_code_scanning_alerts,mcp__github__list_commits,mcp__github__list_dependabot_alerts,mcp__github__list_discussion_categories,mcp__github__list_discussions,mcp__github__list_issue_types,mcp__github__list_issues,mcp__github__list_label,mcp__github__list_notifications,mcp__github__list_pull_requests,mcp__github__list_releases,mcp__github__list_secret_scanning_alerts,mcp__github__list_starred_repositories,mcp__github__list_tags,mcp__github__list_workflow_jobs,mcp__github__list_workflow_run_artifacts,mcp__github__list_workflow_runs,mcp__github__list_workflows,mcp__github__pull_request_read,mcp__github__search_code,mcp__github__search_issues,mcp__github__search_orgs,mcp__github__search_pull_requests,mcp__github__search_repositories,mcp__github__search_users,mcp__playwright__browser_click,mcp__playwright__browser_close,mcp__playwright__browser_console_messages,mcp__playwright__browser_drag,mcp__playwright__browser_evaluate,mcp__playwright__browser_file_upload,mcp__playwright__browser_fill_form,mcp__playwright__browser_handle_dialog,mcp__playwright__browser_hover,mcp__playwright__browser_install,mcp__playwright__browser_navigate,mcp__playwright__browser_navigate_back,mcp__playwright__browser_network_requests,mcp__playwright__browser_press_key,mcp__playwright__browser_resize,mcp__playwright__browser_select_option,mcp__playwright__browser_snapshot,mcp__playwright__browser_tabs,mcp__playwright__browser_take_screenshot,mcp__playwright__browser_type,mcp__playwright__browser_wait_for --debug --verbose --permission-mode bypassPermissions --output-format stream-json --settings /tmp/gh-aw/.claude/settings.json "$(cat /tmp/gh-aw/aw-prompts/prompt.txt)"${GH_AW_MODEL_AGENT_CLAUDE:+ --model "$GH_AW_MODEL_AGENT_CLAUDE"} 2>&1 | tee /tmp/gh-aw/agent-stdio.log + claude --print --disable-slash-commands --max-turns 15 --mcp-config /tmp/gh-aw/mcp-config/mcp-servers.json --allowed-tools 'Bash,BashOutput,Edit,Edit(/tmp/gh-aw/cache-memory/*),ExitPlanMode,Glob,Grep,KillBash,LS,MultiEdit,MultiEdit(/tmp/gh-aw/cache-memory/*),NotebookEdit,NotebookRead,Read,Read(/tmp/gh-aw/cache-memory/*),Task,TodoWrite,Write,Write(/tmp/gh-aw/cache-memory/*),mcp__github__download_workflow_run_artifact,mcp__github__get_code_scanning_alert,mcp__github__get_commit,mcp__github__get_dependabot_alert,mcp__github__get_discussion,mcp__github__get_discussion_comments,mcp__github__get_file_contents,mcp__github__get_job_logs,mcp__github__get_label,mcp__github__get_latest_release,mcp__github__get_me,mcp__github__get_notification_details,mcp__github__get_pull_request,mcp__github__get_pull_request_comments,mcp__github__get_pull_request_diff,mcp__github__get_pull_request_files,mcp__github__get_pull_request_review_comments,mcp__github__get_pull_request_reviews,mcp__github__get_pull_request_status,mcp__github__get_release_by_tag,mcp__github__get_secret_scanning_alert,mcp__github__get_tag,mcp__github__get_workflow_run,mcp__github__get_workflow_run_logs,mcp__github__get_workflow_run_usage,mcp__github__issue_read,mcp__github__list_branches,mcp__github__list_code_scanning_alerts,mcp__github__list_commits,mcp__github__list_dependabot_alerts,mcp__github__list_discussion_categories,mcp__github__list_discussions,mcp__github__list_issue_types,mcp__github__list_issues,mcp__github__list_label,mcp__github__list_notifications,mcp__github__list_pull_requests,mcp__github__list_releases,mcp__github__list_secret_scanning_alerts,mcp__github__list_starred_repositories,mcp__github__list_tags,mcp__github__list_workflow_jobs,mcp__github__list_workflow_run_artifacts,mcp__github__list_workflow_runs,mcp__github__list_workflows,mcp__github__pull_request_read,mcp__github__search_code,mcp__github__search_issues,mcp__github__search_orgs,mcp__github__search_pull_requests,mcp__github__search_repositories,mcp__github__search_users,mcp__playwright__browser_click,mcp__playwright__browser_close,mcp__playwright__browser_console_messages,mcp__playwright__browser_drag,mcp__playwright__browser_evaluate,mcp__playwright__browser_file_upload,mcp__playwright__browser_fill_form,mcp__playwright__browser_handle_dialog,mcp__playwright__browser_hover,mcp__playwright__browser_install,mcp__playwright__browser_navigate,mcp__playwright__browser_navigate_back,mcp__playwright__browser_network_requests,mcp__playwright__browser_press_key,mcp__playwright__browser_resize,mcp__playwright__browser_select_option,mcp__playwright__browser_snapshot,mcp__playwright__browser_tabs,mcp__playwright__browser_take_screenshot,mcp__playwright__browser_type,mcp__playwright__browser_wait_for' --debug --verbose --permission-mode bypassPermissions --output-format stream-json --settings /tmp/gh-aw/.claude/settings.json "$(cat /tmp/gh-aw/aw-prompts/prompt.txt)"${GH_AW_MODEL_AGENT_CLAUDE:+ --model "$GH_AW_MODEL_AGENT_CLAUDE"} 2>&1 | tee /tmp/gh-aw/agent-stdio.log env: ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} CLAUDE_CODE_OAUTH_TOKEN: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }} @@ -5497,7 +4177,6 @@ jobs: GH_AW_SAFE_OUTPUTS: ${{ env.GH_AW_SAFE_OUTPUTS }} GH_AW_MAX_TURNS: 15 GH_AW_MODEL_AGENT_CLAUDE: ${{ vars.GH_AW_MODEL_AGENT_CLAUDE || '' }} - GH_AW_GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Clean up network proxy hook files if: always() run: | @@ -6555,13 +5234,6 @@ jobs: name: mcp-logs path: /tmp/gh-aw/mcp-logs/ if-no-files-found: ignore - - name: Upload SafeInputs logs - if: always() - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 - with: - name: safeinputs - path: /tmp/gh-aw/safe-inputs/logs/ - if-no-files-found: ignore - name: Parse agent logs for step summary if: always() uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 @@ -7661,6 +6333,7 @@ jobs: - agent - create_issue - detection + - update_cache_memory if: ((always()) && (needs.agent.result != 'skipped')) && (!(needs.add_comment.outputs.comment_id)) runs-on: ubuntu-slim permissions: @@ -9080,3 +7753,23 @@ jobs: } await main(); + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + diff --git a/.github/workflows/smoke-claude.md b/.github/workflows/smoke-claude.md index 887f594c2e..e8aabb97de 100644 --- a/.github/workflows/smoke-claude.md +++ b/.github/workflows/smoke-claude.md @@ -19,7 +19,6 @@ engine: max-turns: 15 strict: false imports: - - shared/gh.md - shared/mcp-pagination.md network: allowed: @@ -27,6 +26,7 @@ network: - github - playwright tools: + cache-memory: true github: toolsets: [repos, pull_requests] playwright: @@ -59,7 +59,8 @@ timeout-minutes: 10 2. **File Writing Testing**: Create a test file `/tmp/gh-aw/agent/smoke-test-claude-${{ github.run_id }}.txt` with content "Smoke test passed for Claude at $(date)" (create the directory if it doesn't exist) 3. **Bash Tool Testing**: Execute bash commands to verify file creation was successful (use `cat` to read the file back) 4. **Playwright MCP Testing**: Use playwright to navigate to https://github.com and verify the page title contains "GitHub" -5. **Safe Input gh Tool Testing**: Use the `gh` safe-input tool to run "gh issues list --limit 3" to verify the tool can access GitHub issues +5. **Cache Memory Testing**: Write a test file to `/tmp/gh-aw/cache-memory/smoke-test-${{ github.run_id }}.txt` with content "Cache memory test for run ${{ github.run_id }}" and verify it was created successfully +6. **Safe Input gh Tool Testing**: Use the `gh` safe-input tool to run "gh issues list --limit 3" to verify the tool can access GitHub issues ## Output @@ -68,4 +69,4 @@ Add a **very brief** comment (max 5-10 lines) to the current pull request with: - βœ… or ❌ for each test result - Overall status: PASS or FAIL -If all tests pass, add the label `smoke-claude` to the pull request. \ No newline at end of file +If all tests pass, add the label `smoke-claude` to the pull request. diff --git a/.github/workflows/smoke-codex.lock.yml b/.github/workflows/smoke-codex.lock.yml index 7261e84ae4..11d06e84e4 100644 --- a/.github/workflows/smoke-codex.lock.yml +++ b/.github/workflows/smoke-codex.lock.yml @@ -38,14 +38,13 @@ # name: Smoke Codex # engine: codex # strict: false -# imports: -# - shared/gh.md # network: # allowed: # - defaults # - github # - playwright # tools: +# cache-memory: true # github: # playwright: # allowed_domains: @@ -67,10 +66,6 @@ # timeout-minutes: 10 # ``` # -# Resolved workflow manifest: -# Imports: -# - shared/gh.md -# # Job Dependency Graph: # ```mermaid # graph LR @@ -82,6 +77,7 @@ # create_issue["create_issue"] # detection["detection"] # pre_activation["pre_activation"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # add_comment --> conclusion @@ -91,13 +87,16 @@ # agent --> conclusion # agent --> create_issue # agent --> detection +# agent --> update_cache_memory # create_issue --> add_comment # create_issue --> conclusion # detection --> add_comment # detection --> add_labels # detection --> conclusion # detection --> create_issue +# detection --> update_cache_memory # pre_activation --> activation +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -112,7 +111,8 @@ # 2. **File Writing Testing**: Create a test file `/tmp/gh-aw/agent/smoke-test-codex-${{ github.run_id }}.txt` with content "Smoke test passed for Codex at $(date)" (create the directory if it doesn't exist) # 3. **Bash Tool Testing**: Execute bash commands to verify file creation was successful (use `cat` to read the file back) # 4. **Playwright MCP Testing**: Use playwright to navigate to https://github.com and verify the page title contains "GitHub" -# 5. **Safe Input gh Tool Testing**: Use the `gh` safe-input tool to run "gh issues list --limit 3" to verify the tool can access GitHub issues +# 5. **Cache Memory Testing**: Write a test file to `/tmp/gh-aw/cache-memory/smoke-test-${{ github.run_id }}.txt` with content "Cache memory test for run ${{ github.run_id }}" and verify it was created successfully +# 6. **Safe Input gh Tool Testing**: Use the `gh` safe-input tool to run "gh issues list --limit 3" to verify the tool can access GitHub issues # # ## Output # @@ -125,6 +125,10 @@ # ``` # # Pinned GitHub Actions: +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd # - actions/download-artifact@v6 (018cc2cf5baa6db3ef3c5f8a56943fffe632ef53) @@ -1857,6 +1861,26 @@ jobs: mkdir -p /tmp/gh-aw/agent mkdir -p /tmp/gh-aw/sandbox/agent/logs echo "Created /tmp/gh-aw/agent directory for agentic workflow temporary files" + # Cache memory file share configuration from frontmatter processed below + - name: Create cache-memory directory + run: | + mkdir -p /tmp/gh-aw/cache-memory + echo "Cache memory directory created at /tmp/gh-aw/cache-memory" + echo "This folder provides persistent file storage across workflow runs" + echo "LLMs and agentic tools can freely read and write files in this directory" + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + restore-keys: | + memory-${{ github.workflow }}- + memory- + - name: Upload cache-memory data as artifact + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory - name: Configure Git credentials env: REPO_NAME: ${{ github.repository }} @@ -3130,1507 +3154,143 @@ jobs: size: patchResult.patchSize, lines: patchResult.patchLines, }, - }), - }, - ], - }; - }; - return { - defaultHandler, - uploadAssetHandler, - createPullRequestHandler, - pushToPullRequestBranchHandler, - }; - } - function loadTools(server) { - const toolsPath = process.env.GH_AW_SAFE_OUTPUTS_TOOLS_PATH || "/tmp/gh-aw/safeoutputs/tools.json"; - let ALL_TOOLS = []; - server.debug(`Reading tools from file: ${toolsPath}`); - try { - if (fs.existsSync(toolsPath)) { - server.debug(`Tools file exists at: ${toolsPath}`); - const toolsFileContent = fs.readFileSync(toolsPath, "utf8"); - server.debug(`Tools file content length: ${toolsFileContent.length} characters`); - server.debug(`Tools file read successfully, attempting to parse JSON`); - ALL_TOOLS = JSON.parse(toolsFileContent); - server.debug(`Successfully parsed ${ALL_TOOLS.length} tools from file`); - } else { - server.debug(`Tools file does not exist at: ${toolsPath}`); - server.debug(`Using empty tools array`); - ALL_TOOLS = []; - } - } catch (error) { - server.debug(`Error reading tools file: ${error instanceof Error ? error.message : String(error)}`); - server.debug(`Falling back to empty tools array`); - ALL_TOOLS = []; - } - return ALL_TOOLS; - } - function attachHandlers(tools, handlers) { - tools.forEach(tool => { - if (tool.name === "create_pull_request") { - tool.handler = handlers.createPullRequestHandler; - } else if (tool.name === "push_to_pull_request_branch") { - tool.handler = handlers.pushToPullRequestBranchHandler; - } else if (tool.name === "upload_asset") { - tool.handler = handlers.uploadAssetHandler; - } - }); - return tools; - } - function registerPredefinedTools(server, tools, config, registerTool, normalizeTool) { - tools.forEach(tool => { - if (Object.keys(config).find(configKey => normalizeTool(configKey) === tool.name)) { - registerTool(server, tool); - } - }); - } - function registerDynamicTools(server, tools, config, outputFile, registerTool, normalizeTool) { - Object.keys(config).forEach(configKey => { - const normalizedKey = normalizeTool(configKey); - if (server.tools[normalizedKey]) { - return; - } - if (!tools.find(t => t.name === normalizedKey)) { - const jobConfig = config[configKey]; - const dynamicTool = { - name: normalizedKey, - description: jobConfig && jobConfig.description ? jobConfig.description : `Custom safe-job: ${configKey}`, - inputSchema: { - type: "object", - properties: {}, - additionalProperties: true, - }, - handler: args => { - const entry = { - type: normalizedKey, - ...args, - }; - const entryJSON = JSON.stringify(entry); - fs.appendFileSync(outputFile, entryJSON + "\n"); - const outputText = - jobConfig && jobConfig.output - ? jobConfig.output - : `Safe-job '${configKey}' executed successfully with arguments: ${JSON.stringify(args)}`; - return { - content: [ - { - type: "text", - text: JSON.stringify({ result: outputText }), - }, - ], - }; - }, - }; - if (jobConfig && jobConfig.inputs) { - dynamicTool.inputSchema.properties = {}; - dynamicTool.inputSchema.required = []; - Object.keys(jobConfig.inputs).forEach(inputName => { - const inputDef = jobConfig.inputs[inputName]; - const propSchema = { - type: inputDef.type || "string", - description: inputDef.description || `Input parameter: ${inputName}`, - }; - if (inputDef.options && Array.isArray(inputDef.options)) { - propSchema.enum = inputDef.options; - } - dynamicTool.inputSchema.properties[inputName] = propSchema; - if (inputDef.required) { - dynamicTool.inputSchema.required.push(inputName); - } - }); - } - registerTool(server, dynamicTool); - } - }); - } - const SERVER_INFO = { name: "safeoutputs", version: "1.0.0" }; - const MCP_LOG_DIR = process.env.GH_AW_MCP_LOG_DIR; - const server = createServer(SERVER_INFO, { logDir: MCP_LOG_DIR }); - const { config: safeOutputsConfig, outputFile } = loadConfig(server); - const appendSafeOutput = createAppendFunction(outputFile); - const handlers = createHandlers(server, appendSafeOutput); - const { defaultHandler } = handlers; - let ALL_TOOLS = loadTools(server); - ALL_TOOLS = attachHandlers(ALL_TOOLS, handlers); - server.debug(` output file: ${outputFile}`); - server.debug(` config: ${JSON.stringify(safeOutputsConfig)}`); - registerPredefinedTools(server, ALL_TOOLS, safeOutputsConfig, registerTool, normalizeTool); - registerDynamicTools(server, ALL_TOOLS, safeOutputsConfig, outputFile, registerTool, normalizeTool); - server.debug(` tools: ${Object.keys(server.tools).join(", ")}`); - if (!Object.keys(server.tools).length) throw new Error("No tools enabled in configuration"); - start(server, { defaultHandler }); - EOF - chmod +x /tmp/gh-aw/safeoutputs/mcp-server.cjs - - - name: Setup Safe Inputs JavaScript and Config - run: | - mkdir -p /tmp/gh-aw/safe-inputs/logs - cat > /tmp/gh-aw/safe-inputs/read_buffer.cjs << 'EOF_READ_BUFFER' - class ReadBuffer { - constructor() { - this._buffer = null; - } - append(chunk) { - this._buffer = this._buffer ? Buffer.concat([this._buffer, chunk]) : chunk; - } - readMessage() { - if (!this._buffer) { - return null; - } - const index = this._buffer.indexOf("\n"); - if (index === -1) { - return null; - } - const line = this._buffer.toString("utf8", 0, index).replace(/\r$/, ""); - this._buffer = this._buffer.subarray(index + 1); - if (line.trim() === "") { - return this.readMessage(); - } - try { - return JSON.parse(line); - } catch (error) { - throw new Error(`Parse error: ${error instanceof Error ? error.message : String(error)}`); - } - } - } - module.exports = { - ReadBuffer, - }; - EOF_READ_BUFFER - cat > /tmp/gh-aw/safe-inputs/mcp_server_core.cjs << 'EOF_MCP_CORE' - const fs = require("fs"); - const path = require("path"); - const { ReadBuffer } = require("./read_buffer.cjs"); - const { validateRequiredFields } = require("./safe_inputs_validation.cjs"); - const encoder = new TextEncoder(); - function initLogFile(server) { - if (server.logFileInitialized || !server.logDir || !server.logFilePath) return; - try { - if (!fs.existsSync(server.logDir)) { - fs.mkdirSync(server.logDir, { recursive: true }); - } - const timestamp = new Date().toISOString(); - fs.writeFileSync( - server.logFilePath, - `# ${server.serverInfo.name} MCP Server Log\n# Started: ${timestamp}\n# Version: ${server.serverInfo.version}\n\n` - ); - server.logFileInitialized = true; - } catch { - } - } - function createDebugFunction(server) { - return msg => { - const timestamp = new Date().toISOString(); - const formattedMsg = `[${timestamp}] [${server.serverInfo.name}] ${msg}\n`; - process.stderr.write(formattedMsg); - if (server.logDir && server.logFilePath) { - if (!server.logFileInitialized) { - initLogFile(server); - } - if (server.logFileInitialized) { - try { - fs.appendFileSync(server.logFilePath, formattedMsg); - } catch { - } - } - } - }; - } - function createDebugErrorFunction(server) { - return (prefix, error) => { - const errorMessage = error instanceof Error ? error.message : String(error); - server.debug(`${prefix}${errorMessage}`); - if (error instanceof Error && error.stack) { - server.debug(`${prefix}Stack trace: ${error.stack}`); - } - }; - } - function createWriteMessageFunction(server) { - return obj => { - const json = JSON.stringify(obj); - server.debug(`send: ${json}`); - const message = json + "\n"; - const bytes = encoder.encode(message); - fs.writeSync(1, bytes); - }; - } - function createReplyResultFunction(server) { - return (id, result) => { - if (id === undefined || id === null) return; - const res = { jsonrpc: "2.0", id, result }; - server.writeMessage(res); - }; - } - function createReplyErrorFunction(server) { - return (id, code, message) => { - if (id === undefined || id === null) { - server.debug(`Error for notification: ${message}`); - return; - } - const error = { code, message }; - const res = { - jsonrpc: "2.0", - id, - error, - }; - server.writeMessage(res); - }; - } - function createServer(serverInfo, options = {}) { - const logDir = options.logDir || undefined; - const logFilePath = logDir ? path.join(logDir, "server.log") : undefined; - const server = { - serverInfo, - tools: {}, - debug: () => {}, - debugError: () => {}, - writeMessage: () => {}, - replyResult: () => {}, - replyError: () => {}, - readBuffer: new ReadBuffer(), - logDir, - logFilePath, - logFileInitialized: false, - }; - server.debug = createDebugFunction(server); - server.debugError = createDebugErrorFunction(server); - server.writeMessage = createWriteMessageFunction(server); - server.replyResult = createReplyResultFunction(server); - server.replyError = createReplyErrorFunction(server); - return server; - } - function createWrappedHandler(server, toolName, handlerFn) { - return async args => { - server.debug(` [${toolName}] Invoking handler with args: ${JSON.stringify(args)}`); - try { - const result = await Promise.resolve(handlerFn(args)); - server.debug(` [${toolName}] Handler returned result type: ${typeof result}`); - if (result && typeof result === "object" && Array.isArray(result.content)) { - server.debug(` [${toolName}] Result is already in MCP format`); - return result; - } - let serializedResult; - try { - serializedResult = JSON.stringify(result); - } catch (serializationError) { - server.debugError(` [${toolName}] Serialization error: `, serializationError); - serializedResult = String(result); - } - server.debug(` [${toolName}] Serialized result: ${serializedResult.substring(0, 200)}${serializedResult.length > 200 ? "..." : ""}`); - return { - content: [ - { - type: "text", - text: serializedResult, - }, - ], - }; - } catch (error) { - server.debugError(` [${toolName}] Handler threw error: `, error); - throw error; - } - }; - } - function loadToolHandlers(server, tools, basePath) { - server.debug(`Loading tool handlers...`); - server.debug(` Total tools to process: ${tools.length}`); - server.debug(` Base path: ${basePath || "(not specified)"}`); - let loadedCount = 0; - let skippedCount = 0; - let errorCount = 0; - for (const tool of tools) { - const toolName = tool.name || "(unnamed)"; - if (!tool.handler) { - server.debug(` [${toolName}] No handler path specified, skipping handler load`); - skippedCount++; - continue; - } - const handlerPath = tool.handler; - server.debug(` [${toolName}] Handler path specified: ${handlerPath}`); - let resolvedPath = handlerPath; - if (basePath && !path.isAbsolute(handlerPath)) { - resolvedPath = path.resolve(basePath, handlerPath); - server.debug(` [${toolName}] Resolved relative path to: ${resolvedPath}`); - const normalizedBase = path.resolve(basePath); - const normalizedResolved = path.resolve(resolvedPath); - if (!normalizedResolved.startsWith(normalizedBase + path.sep) && normalizedResolved !== normalizedBase) { - server.debug(` [${toolName}] ERROR: Handler path escapes base directory: ${resolvedPath} is not within ${basePath}`); - errorCount++; - continue; - } - } else if (path.isAbsolute(handlerPath)) { - server.debug(` [${toolName}] Using absolute path (bypasses basePath validation): ${handlerPath}`); - } - tool.handlerPath = handlerPath; - try { - server.debug(` [${toolName}] Loading handler from: ${resolvedPath}`); - if (!fs.existsSync(resolvedPath)) { - server.debug(` [${toolName}] ERROR: Handler file does not exist: ${resolvedPath}`); - errorCount++; - continue; - } - const ext = path.extname(resolvedPath).toLowerCase(); - server.debug(` [${toolName}] Handler file extension: ${ext}`); - if (ext === ".sh") { - server.debug(` [${toolName}] Detected shell script handler`); - try { - fs.accessSync(resolvedPath, fs.constants.X_OK); - server.debug(` [${toolName}] Shell script is executable`); - } catch { - try { - fs.chmodSync(resolvedPath, 0o755); - server.debug(` [${toolName}] Made shell script executable`); - } catch (chmodError) { - server.debugError(` [${toolName}] Warning: Could not make shell script executable: `, chmodError); - } - } - const { createShellHandler } = require("./mcp_handler_shell.cjs"); - const timeout = tool.timeout || 60; - tool.handler = createShellHandler(server, toolName, resolvedPath, timeout); - loadedCount++; - server.debug(` [${toolName}] Shell handler created successfully with timeout: ${timeout}s`); - } else if (ext === ".py") { - server.debug(` [${toolName}] Detected Python script handler`); - try { - fs.accessSync(resolvedPath, fs.constants.X_OK); - server.debug(` [${toolName}] Python script is executable`); - } catch { - try { - fs.chmodSync(resolvedPath, 0o755); - server.debug(` [${toolName}] Made Python script executable`); - } catch (chmodError) { - server.debugError(` [${toolName}] Warning: Could not make Python script executable: `, chmodError); - } - } - const { createPythonHandler } = require("./mcp_handler_python.cjs"); - const timeout = tool.timeout || 60; - tool.handler = createPythonHandler(server, toolName, resolvedPath, timeout); - loadedCount++; - server.debug(` [${toolName}] Python handler created successfully with timeout: ${timeout}s`); - } else { - server.debug(` [${toolName}] Loading JavaScript handler module`); - const handlerModule = require(resolvedPath); - server.debug(` [${toolName}] Handler module loaded successfully`); - server.debug(` [${toolName}] Module type: ${typeof handlerModule}`); - let handlerFn = handlerModule; - if (handlerModule && typeof handlerModule === "object" && typeof handlerModule.default === "function") { - handlerFn = handlerModule.default; - server.debug(` [${toolName}] Using module.default export`); - } - if (typeof handlerFn !== "function") { - server.debug(` [${toolName}] ERROR: Handler is not a function, got: ${typeof handlerFn}`); - server.debug(` [${toolName}] Module keys: ${Object.keys(handlerModule || {}).join(", ") || "(none)"}`); - errorCount++; - continue; - } - server.debug(` [${toolName}] Handler function validated successfully`); - server.debug(` [${toolName}] Handler function name: ${handlerFn.name || "(anonymous)"}`); - tool.handler = createWrappedHandler(server, toolName, handlerFn); - loadedCount++; - server.debug(` [${toolName}] JavaScript handler loaded and wrapped successfully`); - } - } catch (error) { - server.debugError(` [${toolName}] ERROR loading handler: `, error); - errorCount++; - } - } - server.debug(`Handler loading complete:`); - server.debug(` Loaded: ${loadedCount}`); - server.debug(` Skipped (no handler path): ${skippedCount}`); - server.debug(` Errors: ${errorCount}`); - return tools; - } - function registerTool(server, tool) { - const normalizedName = normalizeTool(tool.name); - server.tools[normalizedName] = { - ...tool, - name: normalizedName, - }; - server.debug(`Registered tool: ${normalizedName}`); - } - function normalizeTool(name) { - return name.replace(/-/g, "_").toLowerCase(); - } - async function handleMessage(server, req, defaultHandler) { - if (!req || typeof req !== "object") { - server.debug(`Invalid message: not an object`); - return; - } - if (req.jsonrpc !== "2.0") { - server.debug(`Invalid message: missing or invalid jsonrpc field`); - return; - } - const { id, method, params } = req; - if (!method || typeof method !== "string") { - server.replyError(id, -32600, "Invalid Request: method must be a string"); - return; - } - try { - if (method === "initialize") { - const clientInfo = params?.clientInfo ?? {}; - server.debug(`client info: ${JSON.stringify(clientInfo)}`); - const protocolVersion = params?.protocolVersion ?? undefined; - const result = { - serverInfo: server.serverInfo, - ...(protocolVersion ? { protocolVersion } : {}), - capabilities: { - tools: {}, - }, - }; - server.replyResult(id, result); - } else if (method === "tools/list") { - const list = []; - Object.values(server.tools).forEach(tool => { - const toolDef = { - name: tool.name, - description: tool.description, - inputSchema: tool.inputSchema, - }; - list.push(toolDef); - }); - server.replyResult(id, { tools: list }); - } else if (method === "tools/call") { - const name = params?.name; - const args = params?.arguments ?? {}; - if (!name || typeof name !== "string") { - server.replyError(id, -32602, "Invalid params: 'name' must be a string"); - return; - } - const tool = server.tools[normalizeTool(name)]; - if (!tool) { - server.replyError(id, -32601, `Tool not found: ${name} (${normalizeTool(name)})`); - return; - } - let handler = tool.handler; - if (!handler && defaultHandler) { - handler = defaultHandler(tool.name); - } - if (!handler) { - server.replyError(id, -32603, `No handler for tool: ${name}`); - return; - } - const missing = validateRequiredFields(args, tool.inputSchema); - if (missing.length) { - server.replyError(id, -32602, `Invalid arguments: missing or empty ${missing.map(m => `'${m}'`).join(", ")}`); - return; - } - server.debug(`Calling handler for tool: ${name}`); - const result = await Promise.resolve(handler(args)); - server.debug(`Handler returned for tool: ${name}`); - const content = result && result.content ? result.content : []; - server.replyResult(id, { content, isError: false }); - } else if (/^notifications\//.test(method)) { - server.debug(`ignore ${method}`); - } else { - server.replyError(id, -32601, `Method not found: ${method}`); - } - } catch (e) { - server.replyError(id, -32603, e instanceof Error ? e.message : String(e)); - } - } - async function processReadBuffer(server, defaultHandler) { - while (true) { - try { - const message = server.readBuffer.readMessage(); - if (!message) { - break; - } - server.debug(`recv: ${JSON.stringify(message)}`); - await handleMessage(server, message, defaultHandler); - } catch (error) { - server.debug(`Parse error: ${error instanceof Error ? error.message : String(error)}`); - } - } - } - function start(server, options = {}) { - const { defaultHandler } = options; - server.debug(`v${server.serverInfo.version} ready on stdio`); - server.debug(` tools: ${Object.keys(server.tools).join(", ")}`); - if (!Object.keys(server.tools).length) { - throw new Error("No tools registered"); - } - const onData = async chunk => { - server.readBuffer.append(chunk); - await processReadBuffer(server, defaultHandler); - }; - process.stdin.on("data", onData); - process.stdin.on("error", err => server.debug(`stdin error: ${err}`)); - process.stdin.resume(); - server.debug(`listening...`); - } - module.exports = { - createServer, - registerTool, - normalizeTool, - handleMessage, - processReadBuffer, - start, - loadToolHandlers, - }; - EOF_MCP_CORE - cat > /tmp/gh-aw/safe-inputs/mcp_server.cjs << 'EOF_MCP_SERVER' - class MCPServer { - constructor(serverInfo, options = {}) { - this.serverInfo = serverInfo; - this.capabilities = options.capabilities || { tools: {} }; - this.tools = new Map(); - this.transport = null; - this.initialized = false; - } - tool(name, description, inputSchema, handler) { - this.tools.set(name, { - name, - description, - inputSchema, - handler, - }); - } - async connect(transport) { - this.transport = transport; - transport.setServer(this); - await transport.start(); - } - handleInitialize(params) { - this.initialized = true; - return { - protocolVersion: params.protocolVersion || "2024-11-05", - serverInfo: this.serverInfo, - capabilities: this.capabilities, - }; - } - handleToolsList() { - const tools = Array.from(this.tools.values()).map(tool => ({ - name: tool.name, - description: tool.description, - inputSchema: tool.inputSchema, - })); - return { tools }; - } - async handleToolsCall(params) { - const tool = this.tools.get(params.name); - if (!tool) { - throw { - code: -32602, - message: `Tool '${params.name}' not found`, - }; - } - try { - const result = await tool.handler(params.arguments || {}); - return result; - } catch (error) { - throw { - code: -32603, - message: error instanceof Error ? error.message : String(error), - }; - } - } - handlePing() { - return {}; - } - async handleRequest(request) { - const { id, method, params } = request; - try { - if (!("id" in request)) { - return null; - } - let result; - switch (method) { - case "initialize": - result = this.handleInitialize(params || {}); - break; - case "ping": - result = this.handlePing(); - break; - case "tools/list": - result = this.handleToolsList(); - break; - case "tools/call": - result = await this.handleToolsCall(params || {}); - break; - default: - throw { - code: -32601, - message: `Method '${method}' not found`, - }; - } - return { - jsonrpc: "2.0", - id, - result, - }; - } catch (error) { - return { - jsonrpc: "2.0", - id, - error: { - code: error.code || -32603, - message: error.message || "Internal error", - }, - }; - } - } - } - module.exports = { - MCPServer, - }; - EOF_MCP_SERVER - cat > /tmp/gh-aw/safe-inputs/mcp_http_transport.cjs << 'EOF_MCP_HTTP_TRANSPORT' - const http = require("http"); - const { randomUUID } = require("crypto"); - const { MCPServer } = require("./mcp_server.cjs"); - class MCPHTTPTransport { - constructor(options = {}) { - this.sessionIdGenerator = options.sessionIdGenerator; - this.enableJsonResponse = options.enableJsonResponse !== false; - this.enableDnsRebindingProtection = options.enableDnsRebindingProtection || false; - this.server = null; - this.sessionId = null; - this.started = false; - } - setServer(server) { - this.server = server; - } - async start() { - if (this.started) { - throw new Error("Transport already started"); - } - this.started = true; - } - async handleRequest(req, res, parsedBody) { - res.setHeader("Access-Control-Allow-Origin", "*"); - res.setHeader("Access-Control-Allow-Methods", "GET, POST, OPTIONS"); - res.setHeader("Access-Control-Allow-Headers", "Content-Type, Accept, Mcp-Session-Id"); - if (req.method === "OPTIONS") { - res.writeHead(200); - res.end(); - return; - } - if (req.method !== "POST") { - res.writeHead(405, { "Content-Type": "application/json" }); - res.end(JSON.stringify({ error: "Method not allowed" })); - return; - } - try { - let body = parsedBody; - if (!body) { - const chunks = []; - for await (const chunk of req) { - chunks.push(chunk); - } - const bodyStr = Buffer.concat(chunks).toString(); - try { - body = bodyStr ? JSON.parse(bodyStr) : null; - } catch (parseError) { - res.writeHead(400, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - jsonrpc: "2.0", - error: { - code: -32700, - message: "Parse error: Invalid JSON in request body", - }, - id: null, - }) - ); - return; - } - } - if (!body) { - res.writeHead(400, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - jsonrpc: "2.0", - error: { - code: -32600, - message: "Invalid Request: Empty request body", - }, - id: null, - }) - ); - return; - } - if (!body.jsonrpc || body.jsonrpc !== "2.0") { - res.writeHead(400, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - jsonrpc: "2.0", - error: { - code: -32600, - message: "Invalid Request: jsonrpc must be '2.0'", - }, - id: body.id || null, - }) - ); - return; - } - if (this.sessionIdGenerator) { - if (body.method === "initialize") { - this.sessionId = this.sessionIdGenerator(); - } else { - const requestSessionId = req.headers["mcp-session-id"]; - if (!requestSessionId) { - res.writeHead(400, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - jsonrpc: "2.0", - error: { - code: -32600, - message: "Invalid Request: Missing Mcp-Session-Id header", - }, - id: body.id || null, - }) - ); - return; - } - if (requestSessionId !== this.sessionId) { - res.writeHead(404, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - jsonrpc: "2.0", - error: { - code: -32001, - message: "Session not found", - }, - id: body.id || null, - }) - ); - return; - } - } - } - const response = await this.server.handleRequest(body); - if (response === null) { - res.writeHead(204); - res.end(); - return; - } - const headers = { "Content-Type": "application/json" }; - if (this.sessionId) { - headers["mcp-session-id"] = this.sessionId; - } - res.writeHead(200, headers); - res.end(JSON.stringify(response)); - } catch (error) { - if (!res.headersSent) { - res.writeHead(500, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - jsonrpc: "2.0", - error: { - code: -32603, - message: error instanceof Error ? error.message : String(error), - }, - id: null, - }) - ); - } - } - } - } - module.exports = { - MCPServer, - MCPHTTPTransport, - }; - EOF_MCP_HTTP_TRANSPORT - cat > /tmp/gh-aw/safe-inputs/mcp_logger.cjs << 'EOF_MCP_LOGGER' - function createLogger(serverName) { - const logger = { - debug: msg => { - const timestamp = new Date().toISOString(); - process.stderr.write(`[${timestamp}] [${serverName}] ${msg}\n`); - }, - debugError: (prefix, error) => { - const errorMessage = error instanceof Error ? error.message : String(error); - logger.debug(`${prefix}${errorMessage}`); - if (error instanceof Error && error.stack) { - logger.debug(`${prefix}Stack trace: ${error.stack}`); - } - }, - }; - return logger; - } - module.exports = { - createLogger, - }; - EOF_MCP_LOGGER - cat > /tmp/gh-aw/safe-inputs/mcp_handler_shell.cjs << 'EOF_HANDLER_SHELL' - const fs = require("fs"); - const path = require("path"); - const { execFile } = require("child_process"); - const os = require("os"); - function createShellHandler(server, toolName, scriptPath, timeoutSeconds = 60) { - return async args => { - server.debug(` [${toolName}] Invoking shell handler: ${scriptPath}`); - server.debug(` [${toolName}] Shell handler args: ${JSON.stringify(args)}`); - server.debug(` [${toolName}] Timeout: ${timeoutSeconds}s`); - const env = { ...process.env }; - for (const [key, value] of Object.entries(args || {})) { - const envKey = `INPUT_${key.toUpperCase().replace(/-/g, "_")}`; - env[envKey] = String(value); - server.debug(` [${toolName}] Set env: ${envKey}=${String(value).substring(0, 100)}${String(value).length > 100 ? "..." : ""}`); - } - const outputFile = path.join(os.tmpdir(), `mcp-shell-output-${Date.now()}-${Math.random().toString(36).substring(2)}.txt`); - env.GITHUB_OUTPUT = outputFile; - server.debug(` [${toolName}] Output file: ${outputFile}`); - fs.writeFileSync(outputFile, ""); - return new Promise((resolve, reject) => { - server.debug(` [${toolName}] Executing shell script...`); - execFile( - scriptPath, - [], - { - env, - timeout: timeoutSeconds * 1000, - maxBuffer: 10 * 1024 * 1024, - }, - (error, stdout, stderr) => { - if (stdout) { - server.debug(` [${toolName}] stdout: ${stdout.substring(0, 500)}${stdout.length > 500 ? "..." : ""}`); - } - if (stderr) { - server.debug(` [${toolName}] stderr: ${stderr.substring(0, 500)}${stderr.length > 500 ? "..." : ""}`); - } - if (error) { - server.debugError(` [${toolName}] Shell script error: `, error); - try { - if (fs.existsSync(outputFile)) { - fs.unlinkSync(outputFile); - } - } catch { - } - reject(error); - return; - } - const outputs = {}; - try { - if (fs.existsSync(outputFile)) { - const outputContent = fs.readFileSync(outputFile, "utf-8"); - server.debug( - ` [${toolName}] Output file content: ${outputContent.substring(0, 500)}${outputContent.length > 500 ? "..." : ""}` - ); - const lines = outputContent.split("\n"); - for (const line of lines) { - const trimmed = line.trim(); - if (trimmed && trimmed.includes("=")) { - const eqIndex = trimmed.indexOf("="); - const key = trimmed.substring(0, eqIndex); - const value = trimmed.substring(eqIndex + 1); - outputs[key] = value; - server.debug(` [${toolName}] Parsed output: ${key}=${value.substring(0, 100)}${value.length > 100 ? "..." : ""}`); - } - } - } - } catch (readError) { - server.debugError(` [${toolName}] Error reading output file: `, readError); - } - try { - if (fs.existsSync(outputFile)) { - fs.unlinkSync(outputFile); - } - } catch { - } - const result = { - stdout: stdout || "", - stderr: stderr || "", - outputs, - }; - server.debug(` [${toolName}] Shell handler completed, outputs: ${Object.keys(outputs).join(", ") || "(none)"}`); - resolve({ - content: [ - { - type: "text", - text: JSON.stringify(result), - }, - ], - }); - } - ); - }); - }; - } - module.exports = { - createShellHandler, - }; - EOF_HANDLER_SHELL - cat > /tmp/gh-aw/safe-inputs/mcp_handler_python.cjs << 'EOF_HANDLER_PYTHON' - const { execFile } = require("child_process"); - function createPythonHandler(server, toolName, scriptPath, timeoutSeconds = 60) { - return async args => { - server.debug(` [${toolName}] Invoking Python handler: ${scriptPath}`); - server.debug(` [${toolName}] Python handler args: ${JSON.stringify(args)}`); - server.debug(` [${toolName}] Timeout: ${timeoutSeconds}s`); - const inputJson = JSON.stringify(args || {}); - server.debug( - ` [${toolName}] Input JSON (${inputJson.length} bytes): ${inputJson.substring(0, 200)}${inputJson.length > 200 ? "..." : ""}` - ); - return new Promise((resolve, reject) => { - server.debug(` [${toolName}] Executing Python script...`); - const child = execFile( - "python3", - [scriptPath], - { - env: process.env, - timeout: timeoutSeconds * 1000, - maxBuffer: 10 * 1024 * 1024, - }, - (error, stdout, stderr) => { - if (stdout) { - server.debug(` [${toolName}] stdout: ${stdout.substring(0, 500)}${stdout.length > 500 ? "..." : ""}`); - } - if (stderr) { - server.debug(` [${toolName}] stderr: ${stderr.substring(0, 500)}${stderr.length > 500 ? "..." : ""}`); - } - if (error) { - server.debugError(` [${toolName}] Python script error: `, error); - reject(error); - return; - } - let result; - try { - if (stdout && stdout.trim()) { - result = JSON.parse(stdout.trim()); - } else { - result = { stdout: stdout || "", stderr: stderr || "" }; - } - } catch (parseError) { - server.debug(` [${toolName}] Output is not JSON, returning as text`); - result = { stdout: stdout || "", stderr: stderr || "" }; - } - server.debug(` [${toolName}] Python handler completed successfully`); - resolve({ - content: [ - { - type: "text", - text: JSON.stringify(result), - }, - ], - }); - } - ); - if (child.stdin) { - child.stdin.write(inputJson); - child.stdin.end(); - } - }); + }), + }, + ], + }; }; - } - module.exports = { - createPythonHandler, - }; - EOF_HANDLER_PYTHON - cat > /tmp/gh-aw/safe-inputs/safe_inputs_config_loader.cjs << 'EOF_CONFIG_LOADER' - const fs = require("fs"); - function loadConfig(configPath) { - if (!fs.existsSync(configPath)) { - throw new Error(`Configuration file not found: ${configPath}`); - } - const configContent = fs.readFileSync(configPath, "utf-8"); - const config = JSON.parse(configContent); - if (!config.tools || !Array.isArray(config.tools)) { - throw new Error("Configuration must contain a 'tools' array"); - } - return config; - } - module.exports = { - loadConfig, - }; - EOF_CONFIG_LOADER - cat > /tmp/gh-aw/safe-inputs/safe_inputs_tool_factory.cjs << 'EOF_TOOL_FACTORY' - function createToolConfig(name, description, inputSchema, handlerPath) { return { - name, - description, - inputSchema, - handler: handlerPath, + defaultHandler, + uploadAssetHandler, + createPullRequestHandler, + pushToPullRequestBranchHandler, }; } - module.exports = { - createToolConfig, - }; - EOF_TOOL_FACTORY - cat > /tmp/gh-aw/safe-inputs/safe_inputs_validation.cjs << 'EOF_VALIDATION' - function validateRequiredFields(args, inputSchema) { - const requiredFields = inputSchema && Array.isArray(inputSchema.required) ? inputSchema.required : []; - if (!requiredFields.length) { - return []; - } - const missing = requiredFields.filter(f => { - const value = args[f]; - return value === undefined || value === null || (typeof value === "string" && value.trim() === ""); - }); - return missing; - } - module.exports = { - validateRequiredFields, - }; - EOF_VALIDATION - cat > /tmp/gh-aw/safe-inputs/safe_inputs_mcp_server.cjs << 'EOF_SAFE_INPUTS_SERVER' - const path = require("path"); - const { createServer, registerTool, loadToolHandlers, start } = require("./mcp_server_core.cjs"); - const { loadConfig } = require("./safe_inputs_config_loader.cjs"); - const { createToolConfig } = require("./safe_inputs_tool_factory.cjs"); - function startSafeInputsServer(configPath, options = {}) { - const config = loadConfig(configPath); - const basePath = path.dirname(configPath); - const serverName = config.serverName || "safeinputs"; - const version = config.version || "1.0.0"; - const logDir = options.logDir || config.logDir || undefined; - const server = createServer({ name: serverName, version }, { logDir }); - server.debug(`Loading safe-inputs configuration from: ${configPath}`); - server.debug(`Base path for handlers: ${basePath}`); - server.debug(`Tools to load: ${config.tools.length}`); - const tools = loadToolHandlers(server, config.tools, basePath); - for (const tool of tools) { - registerTool(server, tool); - } + function loadTools(server) { + const toolsPath = process.env.GH_AW_SAFE_OUTPUTS_TOOLS_PATH || "/tmp/gh-aw/safeoutputs/tools.json"; + let ALL_TOOLS = []; + server.debug(`Reading tools from file: ${toolsPath}`); try { - const fs = require("fs"); - if (fs.existsSync(configPath)) { - fs.unlinkSync(configPath); - server.debug(`Deleted configuration file: ${configPath}`); + if (fs.existsSync(toolsPath)) { + server.debug(`Tools file exists at: ${toolsPath}`); + const toolsFileContent = fs.readFileSync(toolsPath, "utf8"); + server.debug(`Tools file content length: ${toolsFileContent.length} characters`); + server.debug(`Tools file read successfully, attempting to parse JSON`); + ALL_TOOLS = JSON.parse(toolsFileContent); + server.debug(`Successfully parsed ${ALL_TOOLS.length} tools from file`); + } else { + server.debug(`Tools file does not exist at: ${toolsPath}`); + server.debug(`Using empty tools array`); + ALL_TOOLS = []; } } catch (error) { - server.debugError(`Warning: Could not delete configuration file: `, error); + server.debug(`Error reading tools file: ${error instanceof Error ? error.message : String(error)}`); + server.debug(`Falling back to empty tools array`); + ALL_TOOLS = []; } - start(server); + return ALL_TOOLS; } - if (require.main === module) { - const args = process.argv.slice(2); - if (args.length < 1) { - console.error("Usage: node safe_inputs_mcp_server.cjs [--log-dir ]"); - process.exit(1); - } - const configPath = args[0]; - const options = {}; - for (let i = 1; i < args.length; i++) { - if (args[i] === "--log-dir" && args[i + 1]) { - options.logDir = args[i + 1]; - i++; + function attachHandlers(tools, handlers) { + tools.forEach(tool => { + if (tool.name === "create_pull_request") { + tool.handler = handlers.createPullRequestHandler; + } else if (tool.name === "push_to_pull_request_branch") { + tool.handler = handlers.pushToPullRequestBranchHandler; + } else if (tool.name === "upload_asset") { + tool.handler = handlers.uploadAssetHandler; } - } - try { - startSafeInputsServer(configPath, options); - } catch (error) { - console.error(`Error starting safe-inputs server: ${error instanceof Error ? error.message : String(error)}`); - process.exit(1); - } + }); + return tools; } - module.exports = { - startSafeInputsServer, - loadConfig, - createToolConfig, - }; - EOF_SAFE_INPUTS_SERVER - cat > /tmp/gh-aw/safe-inputs/safe_inputs_mcp_server_http.cjs << 'EOF_SAFE_INPUTS_SERVER_HTTP' - const path = require("path"); - const http = require("http"); - const { randomUUID } = require("crypto"); - const { MCPServer, MCPHTTPTransport } = require("./mcp_http_transport.cjs"); - const { loadConfig } = require("./safe_inputs_config_loader.cjs"); - const { loadToolHandlers } = require("./mcp_server_core.cjs"); - const { validateRequiredFields } = require("./safe_inputs_validation.cjs"); - const { createLogger } = require("./mcp_logger.cjs"); - function createMCPServer(configPath, options = {}) { - const config = loadConfig(configPath); - const basePath = path.dirname(configPath); - const serverName = config.serverName || "safeinputs"; - const version = config.version || "1.0.0"; - const server = new MCPServer( - { - name: serverName, - version: version, - }, - { - capabilities: { - tools: {}, - }, - } - ); - const logger = createLogger(serverName); - logger.debug(`=== Creating MCP Server ===`); - logger.debug(`Configuration file: ${configPath}`); - logger.debug(`Loading safe-inputs configuration from: ${configPath}`); - logger.debug(`Base path for handlers: ${basePath}`); - logger.debug(`Server name: ${serverName}`); - logger.debug(`Server version: ${version}`); - logger.debug(`Tools to load: ${config.tools.length}`); - const tempServer = { debug: logger.debug, debugError: logger.debugError }; - const tools = loadToolHandlers(tempServer, config.tools, basePath); - logger.debug(`Registering tools with MCP server...`); - let registeredCount = 0; - let skippedCount = 0; - for (const tool of tools) { - if (!tool.handler) { - logger.debug(`Skipping tool ${tool.name} - no handler loaded`); - skippedCount++; - continue; + function registerPredefinedTools(server, tools, config, registerTool, normalizeTool) { + tools.forEach(tool => { + if (Object.keys(config).find(configKey => normalizeTool(configKey) === tool.name)) { + registerTool(server, tool); } - logger.debug(`Registering tool: ${tool.name}`); - server.tool(tool.name, tool.description || "", tool.inputSchema || { type: "object", properties: {} }, async args => { - logger.debug(`Calling handler for tool: ${tool.name}`); - const missing = validateRequiredFields(args, tool.inputSchema); - if (missing.length) { - throw new Error(`Invalid arguments: missing or empty ${missing.map(m => `'${m}'`).join(", ")}`); - } - const result = await Promise.resolve(tool.handler(args)); - logger.debug(`Handler returned for tool: ${tool.name}`); - const content = result && result.content ? result.content : []; - return { content, isError: false }; - }); - registeredCount++; - } - logger.debug(`Tool registration complete: ${registeredCount} registered, ${skippedCount} skipped`); - logger.debug(`=== MCP Server Creation Complete ===`); - try { - const fs = require("fs"); - if (fs.existsSync(configPath)) { - fs.unlinkSync(configPath); - logger.debug(`Deleted configuration file: ${configPath}`); + }); + } + function registerDynamicTools(server, tools, config, outputFile, registerTool, normalizeTool) { + Object.keys(config).forEach(configKey => { + const normalizedKey = normalizeTool(configKey); + if (server.tools[normalizedKey]) { + return; } - } catch (error) { - logger.debugError(`Warning: Could not delete configuration file: `, error); - } - return { server, config, logger }; - } - async function startHttpServer(configPath, options = {}) { - const port = options.port || 3000; - const stateless = options.stateless || false; - const logger = createLogger("safe-inputs-startup"); - logger.debug(`=== Starting Safe Inputs MCP HTTP Server ===`); - logger.debug(`Configuration file: ${configPath}`); - logger.debug(`Port: ${port}`); - logger.debug(`Mode: ${stateless ? "stateless" : "stateful"}`); - logger.debug(`Environment: NODE_VERSION=${process.version}, PLATFORM=${process.platform}`); - try { - const { server, config, logger: mcpLogger } = createMCPServer(configPath, { logDir: options.logDir }); - Object.assign(logger, mcpLogger); - logger.debug(`MCP server created successfully`); - logger.debug(`Server name: ${config.serverName || "safeinputs"}`); - logger.debug(`Server version: ${config.version || "1.0.0"}`); - logger.debug(`Tools configured: ${config.tools.length}`); - logger.debug(`Creating HTTP transport...`); - const transport = new MCPHTTPTransport({ - sessionIdGenerator: stateless ? undefined : () => randomUUID(), - enableJsonResponse: true, - enableDnsRebindingProtection: false, - }); - logger.debug(`HTTP transport created`); - logger.debug(`Connecting server to transport...`); - await server.connect(transport); - logger.debug(`Server connected to transport successfully`); - logger.debug(`Creating HTTP server...`); - const httpServer = http.createServer(async (req, res) => { - res.setHeader("Access-Control-Allow-Origin", "*"); - res.setHeader("Access-Control-Allow-Methods", "GET, POST, OPTIONS"); - res.setHeader("Access-Control-Allow-Headers", "Content-Type, Accept"); - if (req.method === "OPTIONS") { - res.writeHead(200); - res.end(); - return; - } - if (req.method === "GET" && req.url === "/health") { - res.writeHead(200, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - status: "ok", - server: config.serverName || "safeinputs", - version: config.version || "1.0.0", - tools: config.tools.length, - }) - ); - return; - } - if (req.method !== "POST") { - res.writeHead(405, { "Content-Type": "application/json" }); - res.end(JSON.stringify({ error: "Method not allowed" })); - return; - } - try { - let body = null; - if (req.method === "POST") { - const chunks = []; - for await (const chunk of req) { - chunks.push(chunk); + if (!tools.find(t => t.name === normalizedKey)) { + const jobConfig = config[configKey]; + const dynamicTool = { + name: normalizedKey, + description: jobConfig && jobConfig.description ? jobConfig.description : `Custom safe-job: ${configKey}`, + inputSchema: { + type: "object", + properties: {}, + additionalProperties: true, + }, + handler: args => { + const entry = { + type: normalizedKey, + ...args, + }; + const entryJSON = JSON.stringify(entry); + fs.appendFileSync(outputFile, entryJSON + "\n"); + const outputText = + jobConfig && jobConfig.output + ? jobConfig.output + : `Safe-job '${configKey}' executed successfully with arguments: ${JSON.stringify(args)}`; + return { + content: [ + { + type: "text", + text: JSON.stringify({ result: outputText }), + }, + ], + }; + }, + }; + if (jobConfig && jobConfig.inputs) { + dynamicTool.inputSchema.properties = {}; + dynamicTool.inputSchema.required = []; + Object.keys(jobConfig.inputs).forEach(inputName => { + const inputDef = jobConfig.inputs[inputName]; + const propSchema = { + type: inputDef.type || "string", + description: inputDef.description || `Input parameter: ${inputName}`, + }; + if (inputDef.options && Array.isArray(inputDef.options)) { + propSchema.enum = inputDef.options; } - const bodyStr = Buffer.concat(chunks).toString(); - try { - body = bodyStr ? JSON.parse(bodyStr) : null; - } catch (parseError) { - res.writeHead(400, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - jsonrpc: "2.0", - error: { - code: -32700, - message: "Parse error: Invalid JSON in request body", - }, - id: null, - }) - ); - return; + dynamicTool.inputSchema.properties[inputName] = propSchema; + if (inputDef.required) { + dynamicTool.inputSchema.required.push(inputName); } - } - await transport.handleRequest(req, res, body); - } catch (error) { - logger.debugError("Error handling request: ", error); - if (!res.headersSent) { - res.writeHead(500, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - jsonrpc: "2.0", - error: { - code: -32603, - message: error instanceof Error ? error.message : String(error), - }, - id: null, - }) - ); - } - } - }); - logger.debug(`Attempting to bind to port ${port}...`); - httpServer.listen(port, () => { - logger.debug(`=== Safe Inputs MCP HTTP Server Started Successfully ===`); - logger.debug(`HTTP server listening on http://localhost:${port}`); - logger.debug(`MCP endpoint: POST http://localhost:${port}/`); - logger.debug(`Server name: ${config.serverName || "safeinputs"}`); - logger.debug(`Server version: ${config.version || "1.0.0"}`); - logger.debug(`Tools available: ${config.tools.length}`); - logger.debug(`Server is ready to accept requests`); - }); - httpServer.on("error", error => { - if (error.code === "EADDRINUSE") { - logger.debugError(`ERROR: Port ${port} is already in use. `, error); - } else if (error.code === "EACCES") { - logger.debugError(`ERROR: Permission denied to bind to port ${port}. `, error); - } else { - logger.debugError(`ERROR: Failed to start HTTP server: `, error); + }); } - process.exit(1); - }); - process.on("SIGINT", () => { - logger.debug("Received SIGINT, shutting down..."); - httpServer.close(() => { - logger.debug("HTTP server closed"); - process.exit(0); - }); - }); - process.on("SIGTERM", () => { - logger.debug("Received SIGTERM, shutting down..."); - httpServer.close(() => { - logger.debug("HTTP server closed"); - process.exit(0); - }); - }); - return httpServer; - } catch (error) { - const errorLogger = createLogger("safe-inputs-startup-error"); - errorLogger.debug(`=== FATAL ERROR: Failed to start Safe Inputs MCP HTTP Server ===`); - errorLogger.debug(`Error type: ${error.constructor.name}`); - errorLogger.debug(`Error message: ${error.message}`); - if (error.stack) { - errorLogger.debug(`Stack trace:\n${error.stack}`); - } - if (error.code) { - errorLogger.debug(`Error code: ${error.code}`); - } - errorLogger.debug(`Configuration file: ${configPath}`); - errorLogger.debug(`Port: ${port}`); - throw error; - } - } - if (require.main === module) { - const args = process.argv.slice(2); - if (args.length < 1) { - console.error("Usage: node safe_inputs_mcp_server_http.cjs [--port ] [--stateless] [--log-dir ]"); - process.exit(1); - } - const configPath = args[0]; - const options = { - port: 3000, - stateless: false, - logDir: undefined, - }; - for (let i = 1; i < args.length; i++) { - if (args[i] === "--port" && args[i + 1]) { - options.port = parseInt(args[i + 1], 10); - i++; - } else if (args[i] === "--stateless") { - options.stateless = true; - } else if (args[i] === "--log-dir" && args[i + 1]) { - options.logDir = args[i + 1]; - i++; - } - } - startHttpServer(configPath, options).catch(error => { - console.error(`Error starting HTTP server: ${error instanceof Error ? error.message : String(error)}`); - process.exit(1); + registerTool(server, dynamicTool); + } }); } - module.exports = { - startHttpServer, - createMCPServer, - }; - EOF_SAFE_INPUTS_SERVER_HTTP - cat > /tmp/gh-aw/safe-inputs/tools.json << 'EOF_TOOLS_JSON' - { - "serverName": "safeinputs", - "version": "1.0.0", - "logDir": "/tmp/gh-aw/safe-inputs/logs", - "tools": [ - { - "name": "gh", - "description": "Execute any gh CLI command. Provide the full command after 'gh' (e.g., args: 'pr list --limit 5'). The tool will run: gh \u003cargs\u003e. Use single quotes ' for complex args to avoid shell interpretation issues.", - "inputSchema": { - "properties": { - "args": { - "description": "Arguments to pass to gh CLI (without the 'gh' prefix). Examples: 'pr list --limit 5', 'issue view 123', 'api repos/{owner}/{repo}'", - "type": "string" - } - }, - "required": [ - "args" - ], - "type": "object" - }, - "handler": "gh.sh", - "env": { - "GH_AW_GH_TOKEN": "GH_AW_GH_TOKEN" - }, - "timeout": 60 - } - ] - } - EOF_TOOLS_JSON - cat > /tmp/gh-aw/safe-inputs/mcp-server.cjs << 'EOFSI' - const path = require("path"); - const { startHttpServer } = require("./safe_inputs_mcp_server_http.cjs"); - const configPath = path.join(__dirname, "tools.json"); - const port = parseInt(process.env.GH_AW_SAFE_INPUTS_PORT || "3000", 10); - const apiKey = process.env.GH_AW_SAFE_INPUTS_API_KEY || ""; - startHttpServer(configPath, { - port: port, - stateless: false, - logDir: "/tmp/gh-aw/safe-inputs/logs" - }).catch(error => { - console.error("Failed to start safe-inputs HTTP server:", error); - process.exit(1); - }); - EOFSI - chmod +x /tmp/gh-aw/safe-inputs/mcp-server.cjs - - - name: Setup Safe Inputs Tool Files - run: | - cat > /tmp/gh-aw/safe-inputs/gh.sh << 'EOFSH_gh' - #!/bin/bash - # Auto-generated safe-input tool: gh - # Execute any gh CLI command. Provide the full command after 'gh' (e.g., args: 'pr list --limit 5'). The tool will run: gh . Use single quotes ' for complex args to avoid shell interpretation issues. - - set -euo pipefail - - GH_TOKEN=$GH_AW_GH_TOKEN gh $INPUT_ARGS - - EOFSH_gh - chmod +x /tmp/gh-aw/safe-inputs/gh.sh - - - name: Generate Safe Inputs MCP Server Config - id: safe-inputs-config - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 - with: - script: | - function generateSafeInputsConfig({ core, crypto }) { - const apiKeyBuffer = crypto.randomBytes(32); - const apiKey = apiKeyBuffer.toString("base64").replace(/[/+=]/g, ""); - const port = 3000; - core.setOutput("safe_inputs_api_key", apiKey); - core.setOutput("safe_inputs_port", port.toString()); - core.info(`Safe Inputs MCP server will run on port ${port}`); - return { apiKey, port }; - } - - // Execute the function - const crypto = require('crypto'); - generateSafeInputsConfig({ core, crypto }); - - - name: Start Safe Inputs MCP HTTP Server - id: safe-inputs-start - run: | - # Set environment variables for the server - export GH_AW_SAFE_INPUTS_PORT=${{ steps.safe-inputs-config.outputs.safe_inputs_port }} - export GH_AW_SAFE_INPUTS_API_KEY=${{ steps.safe-inputs-config.outputs.safe_inputs_api_key }} - - export GH_AW_GH_TOKEN="${GH_AW_GH_TOKEN}" - - cd /tmp/gh-aw/safe-inputs - # Verify required files exist - echo "Verifying safe-inputs setup..." - if [ ! -f mcp-server.cjs ]; then - echo "ERROR: mcp-server.cjs not found in /tmp/gh-aw/safe-inputs" - ls -la /tmp/gh-aw/safe-inputs/ - exit 1 - fi - if [ ! -f tools.json ]; then - echo "ERROR: tools.json not found in /tmp/gh-aw/safe-inputs" - ls -la /tmp/gh-aw/safe-inputs/ - exit 1 - fi - echo "Configuration files verified" - # Log environment configuration - echo "Server configuration:" - echo " Port: $GH_AW_SAFE_INPUTS_PORT" - echo " API Key: ${GH_AW_SAFE_INPUTS_API_KEY:0:8}..." - echo " Working directory: $(pwd)" - # Ensure logs directory exists - mkdir -p /tmp/gh-aw/safe-inputs/logs - # Create initial server.log file for artifact upload - echo "Safe Inputs MCP Server Log" > /tmp/gh-aw/safe-inputs/logs/server.log - echo "Start time: $(date)" >> /tmp/gh-aw/safe-inputs/logs/server.log - echo "===========================================" >> /tmp/gh-aw/safe-inputs/logs/server.log - echo "" >> /tmp/gh-aw/safe-inputs/logs/server.log - # Start the HTTP server in the background - echo "Starting safe-inputs MCP HTTP server..." - node mcp-server.cjs >> /tmp/gh-aw/safe-inputs/logs/server.log 2>&1 & - SERVER_PID=$! - echo "Started safe-inputs MCP server with PID $SERVER_PID" - # Wait for server to be ready (max 10 seconds) - echo "Waiting for server to become ready..." - for i in {1..10}; do - # Check if process is still running - if ! kill -0 $SERVER_PID 2>/dev/null; then - echo "ERROR: Server process $SERVER_PID has died" - echo "Server log contents:" - cat /tmp/gh-aw/safe-inputs/logs/server.log - exit 1 - fi - # Check if server is responding - if curl -s -f http://localhost:$GH_AW_SAFE_INPUTS_PORT/health > /dev/null 2>&1; then - echo "Safe Inputs MCP server is ready (attempt $i/10)" - break - fi - if [ $i -eq 10 ]; then - echo "ERROR: Safe Inputs MCP server failed to start after 10 seconds" - echo "Process status: $(ps aux | grep '[m]cp-server.cjs' || echo 'not running')" - echo "Server log contents:" - cat /tmp/gh-aw/safe-inputs/logs/server.log - echo "Checking port availability:" - netstat -tuln | grep $GH_AW_SAFE_INPUTS_PORT || echo "Port $GH_AW_SAFE_INPUTS_PORT not listening" - exit 1 - fi - echo "Waiting for server... (attempt $i/10)" - sleep 1 - done - # Output the configuration for the MCP client - echo "port=$GH_AW_SAFE_INPUTS_PORT" >> $GITHUB_OUTPUT - echo "api_key=$GH_AW_SAFE_INPUTS_API_KEY" >> $GITHUB_OUTPUT + const SERVER_INFO = { name: "safeoutputs", version: "1.0.0" }; + const MCP_LOG_DIR = process.env.GH_AW_MCP_LOG_DIR; + const server = createServer(SERVER_INFO, { logDir: MCP_LOG_DIR }); + const { config: safeOutputsConfig, outputFile } = loadConfig(server); + const appendSafeOutput = createAppendFunction(outputFile); + const handlers = createHandlers(server, appendSafeOutput); + const { defaultHandler } = handlers; + let ALL_TOOLS = loadTools(server); + ALL_TOOLS = attachHandlers(ALL_TOOLS, handlers); + server.debug(` output file: ${outputFile}`); + server.debug(` config: ${JSON.stringify(safeOutputsConfig)}`); + registerPredefinedTools(server, ALL_TOOLS, safeOutputsConfig, registerTool, normalizeTool); + registerDynamicTools(server, ALL_TOOLS, safeOutputsConfig, outputFile, registerTool, normalizeTool); + server.debug(` tools: ${Object.keys(server.tools).join(", ")}`); + if (!Object.keys(server.tools).length) throw new Error("No tools enabled in configuration"); + start(server, { defaultHandler }); + EOF + chmod +x /tmp/gh-aw/safeoutputs/mcp-server.cjs - name: Setup MCPs env: GITHUB_MCP_SERVER_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN || secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} GH_AW_SAFE_OUTPUTS: ${{ env.GH_AW_SAFE_OUTPUTS }} - GH_AW_SAFE_INPUTS_PORT: ${{ steps.safe-inputs-start.outputs.port }} - GH_AW_SAFE_INPUTS_API_KEY: ${{ steps.safe-inputs-start.outputs.api_key }} - GH_AW_GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | mkdir -p /tmp/gh-aw/mcp-config cat > /tmp/gh-aw/mcp-config/config.toml << EOF @@ -4674,13 +3334,6 @@ jobs: "localhost;localhost:*;127.0.0.1;127.0.0.1:*;github.com" ] - [mcp_servers.safeinputs] - command = "node" - args = [ - "/tmp/gh-aw/safe-inputs/mcp-server.cjs", - ] - env_vars = ["GH_AW_GH_TOKEN"] - [mcp_servers.safeoutputs] command = "node" args = [ @@ -4794,8 +3447,6 @@ jobs: PROMPT_DIR="$(dirname "$GH_AW_PROMPT")" mkdir -p "$PROMPT_DIR" cat << 'PROMPT_EOF' | envsubst > "$GH_AW_PROMPT" - - # Smoke Test: Codex Engine Validation **IMPORTANT: Keep all outputs extremely short and concise. Use single-line responses where possible. No verbose explanations.** @@ -4806,7 +3457,8 @@ jobs: 2. **File Writing Testing**: Create a test file `/tmp/gh-aw/agent/smoke-test-codex-${GH_AW_GITHUB_RUN_ID}.txt` with content "Smoke test passed for Codex at $(date)" (create the directory if it doesn't exist) 3. **Bash Tool Testing**: Execute bash commands to verify file creation was successful (use `cat` to read the file back) 4. **Playwright MCP Testing**: Use playwright to navigate to https://github.com and verify the page title contains "GitHub" - 5. **Safe Input gh Tool Testing**: Use the `gh` safe-input tool to run "gh issues list --limit 3" to verify the tool can access GitHub issues + 5. **Cache Memory Testing**: Write a test file to `/tmp/gh-aw/cache-memory/smoke-test-${GH_AW_GITHUB_RUN_ID}.txt` with content "Cache memory test for run ${GH_AW_GITHUB_RUN_ID}" and verify it was created successfully + 6. **Safe Input gh Tool Testing**: Use the `gh` safe-input tool to run "gh issues list --limit 3" to verify the tool can access GitHub issues ## Output @@ -4876,6 +3528,31 @@ jobs: Do NOT attempt to edit files outside these directories as you do not have the necessary permissions. + PROMPT_EOF + - name: Append cache memory instructions to prompt + env: + GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt + run: | + cat << 'PROMPT_EOF' | envsubst >> "$GH_AW_PROMPT" + + --- + + ## Cache Folder Available + + You have access to a persistent cache folder at `/tmp/gh-aw/cache-memory/` where you can read and write files to create memories and store information. + + - **Read/Write Access**: You can freely read from and write to any files in this folder + - **Persistence**: Files in this folder persist across workflow runs via GitHub Actions cache + - **Last Write Wins**: If multiple processes write to the same file, the last write will be preserved + - **File Share**: Use this as a simple file share - organize files as you see fit + + Examples of what you can store: + - `/tmp/gh-aw/cache-memory/notes.txt` - general notes and observations + - `/tmp/gh-aw/cache-memory/preferences.json` - user preferences and settings + - `/tmp/gh-aw/cache-memory/history.log` - activity history and logs + - `/tmp/gh-aw/cache-memory/state/` - organized state files in subdirectories + + Feel free to create, read, update, and organize files in this folder as needed for your tasks. PROMPT_EOF - name: Append safe outputs instructions to prompt env: @@ -5048,7 +3725,6 @@ jobs: env: CODEX_API_KEY: ${{ secrets.CODEX_API_KEY || secrets.OPENAI_API_KEY }} CODEX_HOME: /tmp/gh-aw/mcp-config - GH_AW_GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GH_AW_GITHUB_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN || secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} GH_AW_MCP_CONFIG: /tmp/gh-aw/mcp-config/config.toml GH_AW_MODEL_AGENT_CODEX: ${{ vars.GH_AW_MODEL_AGENT_CODEX || '' }} @@ -6117,13 +4793,6 @@ jobs: name: mcp-logs path: /tmp/gh-aw/mcp-logs/ if-no-files-found: ignore - - name: Upload SafeInputs logs - if: always() - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 - with: - name: safeinputs - path: /tmp/gh-aw/safe-inputs/logs/ - if-no-files-found: ignore - name: Parse agent logs for step summary if: always() uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 @@ -7446,6 +6115,7 @@ jobs: - agent - create_issue - detection + - update_cache_memory if: ((always()) && (needs.agent.result != 'skipped')) && (!(needs.add_comment.outputs.comment_id)) runs-on: ubuntu-slim permissions: @@ -8844,3 +7514,23 @@ jobs: } await main(); + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + diff --git a/.github/workflows/smoke-codex.md b/.github/workflows/smoke-codex.md index 031b9848b6..064d7bd8da 100644 --- a/.github/workflows/smoke-codex.md +++ b/.github/workflows/smoke-codex.md @@ -15,14 +15,13 @@ permissions: name: Smoke Codex engine: codex strict: false -imports: - - shared/gh.md network: allowed: - defaults - github - playwright tools: + cache-memory: true github: playwright: allowed_domains: @@ -54,7 +53,8 @@ timeout-minutes: 10 2. **File Writing Testing**: Create a test file `/tmp/gh-aw/agent/smoke-test-codex-${{ github.run_id }}.txt` with content "Smoke test passed for Codex at $(date)" (create the directory if it doesn't exist) 3. **Bash Tool Testing**: Execute bash commands to verify file creation was successful (use `cat` to read the file back) 4. **Playwright MCP Testing**: Use playwright to navigate to https://github.com and verify the page title contains "GitHub" -5. **Safe Input gh Tool Testing**: Use the `gh` safe-input tool to run "gh issues list --limit 3" to verify the tool can access GitHub issues +5. **Cache Memory Testing**: Write a test file to `/tmp/gh-aw/cache-memory/smoke-test-${{ github.run_id }}.txt` with content "Cache memory test for run ${{ github.run_id }}" and verify it was created successfully +6. **Safe Input gh Tool Testing**: Use the `gh` safe-input tool to run "gh issues list --limit 3" to verify the tool can access GitHub issues ## Output @@ -63,4 +63,4 @@ Add a **very brief** comment (max 5-10 lines) to the current pull request with: - βœ… or ❌ for each test result - Overall status: PASS or FAIL -If all tests pass, add the label `smoke-codex` to the pull request. \ No newline at end of file +If all tests pass, add the label `smoke-codex` to the pull request. diff --git a/.github/workflows/smoke-copilot-no-firewall.lock.yml b/.github/workflows/smoke-copilot-no-firewall.lock.yml index d689658e09..52f2777d94 100644 --- a/.github/workflows/smoke-copilot-no-firewall.lock.yml +++ b/.github/workflows/smoke-copilot-no-firewall.lock.yml @@ -37,8 +37,6 @@ # issues: read # name: Smoke Copilot No Firewall # engine: copilot -# imports: -# - shared/gh.md # network: # allowed: # - defaults @@ -47,6 +45,7 @@ # - playwright # firewall: false # tools: +# cache-memory: true # edit: # bash: # - "*" @@ -70,10 +69,6 @@ # strict: false # ``` # -# Resolved workflow manifest: -# Imports: -# - shared/gh.md -# # Job Dependency Graph: # ```mermaid # graph LR @@ -85,6 +80,7 @@ # create_issue["create_issue"] # detection["detection"] # pre_activation["pre_activation"] +# update_cache_memory["update_cache_memory"] # update_pull_request["update_pull_request"] # activation --> agent # activation --> conclusion @@ -95,6 +91,7 @@ # agent --> conclusion # agent --> create_issue # agent --> detection +# agent --> update_cache_memory # agent --> update_pull_request # create_issue --> add_comment # create_issue --> conclusion @@ -102,8 +99,10 @@ # detection --> add_labels # detection --> conclusion # detection --> create_issue +# detection --> update_cache_memory # detection --> update_pull_request # pre_activation --> activation +# update_cache_memory --> conclusion # update_pull_request --> conclusion # ``` # @@ -119,7 +118,8 @@ # 2. **File Writing Testing**: Create a test file `/tmp/gh-aw/agent/smoke-test-copilot-${{ github.run_id }}.txt` with content "Smoke test passed for Copilot at $(date)" (create the directory if it doesn't exist) # 3. **Bash Tool Testing**: Execute bash commands to verify file creation was successful (use `cat` to read the file back) # 4. **Playwright MCP Testing**: Use playwright to navigate to https://github.com and verify the page title contains "GitHub" -# 5. **Safe Input gh Tool Testing**: Use the `gh` safe-input tool to run "gh issues list --limit 3" to verify the tool can access GitHub issues +# 5. **Cache Memory Testing**: Write a test file to `/tmp/gh-aw/cache-memory/smoke-test-${{ github.run_id }}.txt` with content "Cache memory test for run ${{ github.run_id }}" and verify it was created successfully +# 6. **Safe Input gh Tool Testing**: Use the `gh` safe-input tool to run "gh issues list --limit 3" to verify the tool can access GitHub issues # # ## Output # @@ -137,6 +137,10 @@ # ``` # # Pinned GitHub Actions: +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd # - actions/download-artifact@v6 (018cc2cf5baa6db3ef3c5f8a56943fffe632ef53) @@ -1869,6 +1873,26 @@ jobs: mkdir -p /tmp/gh-aw/agent mkdir -p /tmp/gh-aw/sandbox/agent/logs echo "Created /tmp/gh-aw/agent directory for agentic workflow temporary files" + # Cache memory file share configuration from frontmatter processed below + - name: Create cache-memory directory + run: | + mkdir -p /tmp/gh-aw/cache-memory + echo "Cache memory directory created at /tmp/gh-aw/cache-memory" + echo "This folder provides persistent file storage across workflow runs" + echo "LLMs and agentic tools can freely read and write files in this directory" + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + restore-keys: | + memory-${{ github.workflow }}- + memory- + - name: Upload cache-memory data as artifact + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory - name: Configure Git credentials env: REPO_NAME: ${{ github.repository }} @@ -3336,1511 +3360,134 @@ jobs: EOF chmod +x /tmp/gh-aw/safeoutputs/mcp-server.cjs - - name: Setup Safe Inputs JavaScript and Config + - name: Setup MCPs + env: + GITHUB_MCP_SERVER_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN || secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} + GH_AW_SAFE_OUTPUTS: ${{ env.GH_AW_SAFE_OUTPUTS }} run: | - mkdir -p /tmp/gh-aw/safe-inputs/logs - cat > /tmp/gh-aw/safe-inputs/read_buffer.cjs << 'EOF_READ_BUFFER' - class ReadBuffer { - constructor() { - this._buffer = null; - } - append(chunk) { - this._buffer = this._buffer ? Buffer.concat([this._buffer, chunk]) : chunk; - } - readMessage() { - if (!this._buffer) { - return null; - } - const index = this._buffer.indexOf("\n"); - if (index === -1) { - return null; - } - const line = this._buffer.toString("utf8", 0, index).replace(/\r$/, ""); - this._buffer = this._buffer.subarray(index + 1); - if (line.trim() === "") { - return this.readMessage(); + mkdir -p /tmp/gh-aw/mcp-config + mkdir -p /home/runner/.copilot + cat > /home/runner/.copilot/mcp-config.json << EOF + { + "mcpServers": { + "github": { + "type": "local", + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "GITHUB_PERSONAL_ACCESS_TOKEN", + "-e", + "GITHUB_READ_ONLY=1", + "-e", + "GITHUB_TOOLSETS=context,repos,issues,pull_requests", + "ghcr.io/github/github-mcp-server:v0.24.0" + ], + "tools": ["*"], + "env": { + "GITHUB_PERSONAL_ACCESS_TOKEN": "\${GITHUB_MCP_SERVER_TOKEN}" } - try { - return JSON.parse(line); - } catch (error) { - throw new Error(`Parse error: ${error instanceof Error ? error.message : String(error)}`); + }, + "playwright": { + "type": "local", + "command": "docker", + "args": ["run", "-i", "--rm", "--init", "mcr.microsoft.com/playwright/mcp", "--output-dir", "/tmp/gh-aw/mcp-logs/playwright", "--allowed-hosts", "localhost;localhost:*;127.0.0.1;127.0.0.1:*;github.com"], + "tools": ["*"] + }, + "safeoutputs": { + "type": "local", + "command": "node", + "args": ["/tmp/gh-aw/safeoutputs/mcp-server.cjs"], + "tools": ["*"], + "env": { + "GH_AW_SAFE_OUTPUTS": "\${GH_AW_SAFE_OUTPUTS}", + "GH_AW_ASSETS_BRANCH": "\${GH_AW_ASSETS_BRANCH}", + "GH_AW_ASSETS_MAX_SIZE_KB": "\${GH_AW_ASSETS_MAX_SIZE_KB}", + "GH_AW_ASSETS_ALLOWED_EXTS": "\${GH_AW_ASSETS_ALLOWED_EXTS}", + "GITHUB_REPOSITORY": "\${GITHUB_REPOSITORY}", + "GITHUB_SERVER_URL": "\${GITHUB_SERVER_URL}", + "GITHUB_SHA": "\${GITHUB_SHA}", + "GITHUB_WORKSPACE": "\${GITHUB_WORKSPACE}", + "DEFAULT_BRANCH": "\${DEFAULT_BRANCH}" } + }, + "serena": { + "type": "local", + "command": "uvx", + "args": ["--from", "git+https://github.com/oraios/serena", "serena", "start-mcp-server", "--context", "codex", "--project", "${{ github.workspace }}"], + "tools": ["*"] } } - module.exports = { - ReadBuffer, + } + EOF + echo "-------START MCP CONFIG-----------" + cat /home/runner/.copilot/mcp-config.json + echo "-------END MCP CONFIG-----------" + echo "-------/home/runner/.copilot-----------" + find /home/runner/.copilot + echo "HOME: $HOME" + echo "GITHUB_COPILOT_CLI_MODE: $GITHUB_COPILOT_CLI_MODE" + - name: Generate agentic run info + id: generate_aw_info + uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 + with: + script: | + const fs = require('fs'); + + const awInfo = { + engine_id: "copilot", + engine_name: "GitHub Copilot CLI", + model: process.env.GH_AW_MODEL_AGENT_COPILOT || "", + version: "", + agent_version: "0.0.367", + workflow_name: "Smoke Copilot No Firewall", + experimental: false, + supports_tools_allowlist: true, + supports_http_transport: true, + run_id: context.runId, + run_number: context.runNumber, + run_attempt: process.env.GITHUB_RUN_ATTEMPT, + repository: context.repo.owner + '/' + context.repo.repo, + ref: context.ref, + sha: context.sha, + actor: context.actor, + event_name: context.eventName, + staged: false, + network_mode: "defaults", + allowed_domains: ["defaults","node","github","playwright"], + firewall_enabled: false, + firewall_version: "", + steps: { + firewall: "" + }, + created_at: new Date().toISOString() }; - EOF_READ_BUFFER - cat > /tmp/gh-aw/safe-inputs/mcp_server_core.cjs << 'EOF_MCP_CORE' - const fs = require("fs"); - const path = require("path"); - const { ReadBuffer } = require("./read_buffer.cjs"); - const { validateRequiredFields } = require("./safe_inputs_validation.cjs"); - const encoder = new TextEncoder(); - function initLogFile(server) { - if (server.logFileInitialized || !server.logDir || !server.logFilePath) return; - try { - if (!fs.existsSync(server.logDir)) { - fs.mkdirSync(server.logDir, { recursive: true }); - } - const timestamp = new Date().toISOString(); - fs.writeFileSync( - server.logFilePath, - `# ${server.serverInfo.name} MCP Server Log\n# Started: ${timestamp}\n# Version: ${server.serverInfo.version}\n\n` - ); - server.logFileInitialized = true; - } catch { - } - } - function createDebugFunction(server) { - return msg => { - const timestamp = new Date().toISOString(); - const formattedMsg = `[${timestamp}] [${server.serverInfo.name}] ${msg}\n`; - process.stderr.write(formattedMsg); - if (server.logDir && server.logFilePath) { - if (!server.logFileInitialized) { - initLogFile(server); - } - if (server.logFileInitialized) { - try { - fs.appendFileSync(server.logFilePath, formattedMsg); - } catch { - } - } - } - }; - } - function createDebugErrorFunction(server) { - return (prefix, error) => { - const errorMessage = error instanceof Error ? error.message : String(error); - server.debug(`${prefix}${errorMessage}`); - if (error instanceof Error && error.stack) { - server.debug(`${prefix}Stack trace: ${error.stack}`); - } - }; - } - function createWriteMessageFunction(server) { - return obj => { - const json = JSON.stringify(obj); - server.debug(`send: ${json}`); - const message = json + "\n"; - const bytes = encoder.encode(message); - fs.writeSync(1, bytes); - }; - } - function createReplyResultFunction(server) { - return (id, result) => { - if (id === undefined || id === null) return; - const res = { jsonrpc: "2.0", id, result }; - server.writeMessage(res); - }; - } - function createReplyErrorFunction(server) { - return (id, code, message) => { - if (id === undefined || id === null) { - server.debug(`Error for notification: ${message}`); - return; - } - const error = { code, message }; - const res = { - jsonrpc: "2.0", - id, - error, - }; - server.writeMessage(res); - }; - } - function createServer(serverInfo, options = {}) { - const logDir = options.logDir || undefined; - const logFilePath = logDir ? path.join(logDir, "server.log") : undefined; - const server = { - serverInfo, - tools: {}, - debug: () => {}, - debugError: () => {}, - writeMessage: () => {}, - replyResult: () => {}, - replyError: () => {}, - readBuffer: new ReadBuffer(), - logDir, - logFilePath, - logFileInitialized: false, - }; - server.debug = createDebugFunction(server); - server.debugError = createDebugErrorFunction(server); - server.writeMessage = createWriteMessageFunction(server); - server.replyResult = createReplyResultFunction(server); - server.replyError = createReplyErrorFunction(server); - return server; - } - function createWrappedHandler(server, toolName, handlerFn) { - return async args => { - server.debug(` [${toolName}] Invoking handler with args: ${JSON.stringify(args)}`); - try { - const result = await Promise.resolve(handlerFn(args)); - server.debug(` [${toolName}] Handler returned result type: ${typeof result}`); - if (result && typeof result === "object" && Array.isArray(result.content)) { - server.debug(` [${toolName}] Result is already in MCP format`); - return result; - } - let serializedResult; - try { - serializedResult = JSON.stringify(result); - } catch (serializationError) { - server.debugError(` [${toolName}] Serialization error: `, serializationError); - serializedResult = String(result); - } - server.debug(` [${toolName}] Serialized result: ${serializedResult.substring(0, 200)}${serializedResult.length > 200 ? "..." : ""}`); - return { - content: [ - { - type: "text", - text: serializedResult, - }, - ], - }; - } catch (error) { - server.debugError(` [${toolName}] Handler threw error: `, error); - throw error; - } - }; - } - function loadToolHandlers(server, tools, basePath) { - server.debug(`Loading tool handlers...`); - server.debug(` Total tools to process: ${tools.length}`); - server.debug(` Base path: ${basePath || "(not specified)"}`); - let loadedCount = 0; - let skippedCount = 0; - let errorCount = 0; - for (const tool of tools) { - const toolName = tool.name || "(unnamed)"; - if (!tool.handler) { - server.debug(` [${toolName}] No handler path specified, skipping handler load`); - skippedCount++; - continue; - } - const handlerPath = tool.handler; - server.debug(` [${toolName}] Handler path specified: ${handlerPath}`); - let resolvedPath = handlerPath; - if (basePath && !path.isAbsolute(handlerPath)) { - resolvedPath = path.resolve(basePath, handlerPath); - server.debug(` [${toolName}] Resolved relative path to: ${resolvedPath}`); - const normalizedBase = path.resolve(basePath); - const normalizedResolved = path.resolve(resolvedPath); - if (!normalizedResolved.startsWith(normalizedBase + path.sep) && normalizedResolved !== normalizedBase) { - server.debug(` [${toolName}] ERROR: Handler path escapes base directory: ${resolvedPath} is not within ${basePath}`); - errorCount++; - continue; - } - } else if (path.isAbsolute(handlerPath)) { - server.debug(` [${toolName}] Using absolute path (bypasses basePath validation): ${handlerPath}`); - } - tool.handlerPath = handlerPath; - try { - server.debug(` [${toolName}] Loading handler from: ${resolvedPath}`); - if (!fs.existsSync(resolvedPath)) { - server.debug(` [${toolName}] ERROR: Handler file does not exist: ${resolvedPath}`); - errorCount++; - continue; - } - const ext = path.extname(resolvedPath).toLowerCase(); - server.debug(` [${toolName}] Handler file extension: ${ext}`); - if (ext === ".sh") { - server.debug(` [${toolName}] Detected shell script handler`); - try { - fs.accessSync(resolvedPath, fs.constants.X_OK); - server.debug(` [${toolName}] Shell script is executable`); - } catch { - try { - fs.chmodSync(resolvedPath, 0o755); - server.debug(` [${toolName}] Made shell script executable`); - } catch (chmodError) { - server.debugError(` [${toolName}] Warning: Could not make shell script executable: `, chmodError); - } - } - const { createShellHandler } = require("./mcp_handler_shell.cjs"); - const timeout = tool.timeout || 60; - tool.handler = createShellHandler(server, toolName, resolvedPath, timeout); - loadedCount++; - server.debug(` [${toolName}] Shell handler created successfully with timeout: ${timeout}s`); - } else if (ext === ".py") { - server.debug(` [${toolName}] Detected Python script handler`); - try { - fs.accessSync(resolvedPath, fs.constants.X_OK); - server.debug(` [${toolName}] Python script is executable`); - } catch { - try { - fs.chmodSync(resolvedPath, 0o755); - server.debug(` [${toolName}] Made Python script executable`); - } catch (chmodError) { - server.debugError(` [${toolName}] Warning: Could not make Python script executable: `, chmodError); - } - } - const { createPythonHandler } = require("./mcp_handler_python.cjs"); - const timeout = tool.timeout || 60; - tool.handler = createPythonHandler(server, toolName, resolvedPath, timeout); - loadedCount++; - server.debug(` [${toolName}] Python handler created successfully with timeout: ${timeout}s`); - } else { - server.debug(` [${toolName}] Loading JavaScript handler module`); - const handlerModule = require(resolvedPath); - server.debug(` [${toolName}] Handler module loaded successfully`); - server.debug(` [${toolName}] Module type: ${typeof handlerModule}`); - let handlerFn = handlerModule; - if (handlerModule && typeof handlerModule === "object" && typeof handlerModule.default === "function") { - handlerFn = handlerModule.default; - server.debug(` [${toolName}] Using module.default export`); - } - if (typeof handlerFn !== "function") { - server.debug(` [${toolName}] ERROR: Handler is not a function, got: ${typeof handlerFn}`); - server.debug(` [${toolName}] Module keys: ${Object.keys(handlerModule || {}).join(", ") || "(none)"}`); - errorCount++; - continue; - } - server.debug(` [${toolName}] Handler function validated successfully`); - server.debug(` [${toolName}] Handler function name: ${handlerFn.name || "(anonymous)"}`); - tool.handler = createWrappedHandler(server, toolName, handlerFn); - loadedCount++; - server.debug(` [${toolName}] JavaScript handler loaded and wrapped successfully`); - } - } catch (error) { - server.debugError(` [${toolName}] ERROR loading handler: `, error); - errorCount++; - } - } - server.debug(`Handler loading complete:`); - server.debug(` Loaded: ${loadedCount}`); - server.debug(` Skipped (no handler path): ${skippedCount}`); - server.debug(` Errors: ${errorCount}`); - return tools; - } - function registerTool(server, tool) { - const normalizedName = normalizeTool(tool.name); - server.tools[normalizedName] = { - ...tool, - name: normalizedName, - }; - server.debug(`Registered tool: ${normalizedName}`); - } - function normalizeTool(name) { - return name.replace(/-/g, "_").toLowerCase(); - } - async function handleMessage(server, req, defaultHandler) { - if (!req || typeof req !== "object") { - server.debug(`Invalid message: not an object`); - return; - } - if (req.jsonrpc !== "2.0") { - server.debug(`Invalid message: missing or invalid jsonrpc field`); - return; - } - const { id, method, params } = req; - if (!method || typeof method !== "string") { - server.replyError(id, -32600, "Invalid Request: method must be a string"); - return; - } - try { - if (method === "initialize") { - const clientInfo = params?.clientInfo ?? {}; - server.debug(`client info: ${JSON.stringify(clientInfo)}`); - const protocolVersion = params?.protocolVersion ?? undefined; - const result = { - serverInfo: server.serverInfo, - ...(protocolVersion ? { protocolVersion } : {}), - capabilities: { - tools: {}, - }, - }; - server.replyResult(id, result); - } else if (method === "tools/list") { - const list = []; - Object.values(server.tools).forEach(tool => { - const toolDef = { - name: tool.name, - description: tool.description, - inputSchema: tool.inputSchema, - }; - list.push(toolDef); - }); - server.replyResult(id, { tools: list }); - } else if (method === "tools/call") { - const name = params?.name; - const args = params?.arguments ?? {}; - if (!name || typeof name !== "string") { - server.replyError(id, -32602, "Invalid params: 'name' must be a string"); - return; - } - const tool = server.tools[normalizeTool(name)]; - if (!tool) { - server.replyError(id, -32601, `Tool not found: ${name} (${normalizeTool(name)})`); - return; - } - let handler = tool.handler; - if (!handler && defaultHandler) { - handler = defaultHandler(tool.name); - } - if (!handler) { - server.replyError(id, -32603, `No handler for tool: ${name}`); - return; - } - const missing = validateRequiredFields(args, tool.inputSchema); - if (missing.length) { - server.replyError(id, -32602, `Invalid arguments: missing or empty ${missing.map(m => `'${m}'`).join(", ")}`); - return; - } - server.debug(`Calling handler for tool: ${name}`); - const result = await Promise.resolve(handler(args)); - server.debug(`Handler returned for tool: ${name}`); - const content = result && result.content ? result.content : []; - server.replyResult(id, { content, isError: false }); - } else if (/^notifications\//.test(method)) { - server.debug(`ignore ${method}`); - } else { - server.replyError(id, -32601, `Method not found: ${method}`); - } - } catch (e) { - server.replyError(id, -32603, e instanceof Error ? e.message : String(e)); - } - } - async function processReadBuffer(server, defaultHandler) { - while (true) { - try { - const message = server.readBuffer.readMessage(); - if (!message) { - break; - } - server.debug(`recv: ${JSON.stringify(message)}`); - await handleMessage(server, message, defaultHandler); - } catch (error) { - server.debug(`Parse error: ${error instanceof Error ? error.message : String(error)}`); - } - } - } - function start(server, options = {}) { - const { defaultHandler } = options; - server.debug(`v${server.serverInfo.version} ready on stdio`); - server.debug(` tools: ${Object.keys(server.tools).join(", ")}`); - if (!Object.keys(server.tools).length) { - throw new Error("No tools registered"); - } - const onData = async chunk => { - server.readBuffer.append(chunk); - await processReadBuffer(server, defaultHandler); - }; - process.stdin.on("data", onData); - process.stdin.on("error", err => server.debug(`stdin error: ${err}`)); - process.stdin.resume(); - server.debug(`listening...`); - } - module.exports = { - createServer, - registerTool, - normalizeTool, - handleMessage, - processReadBuffer, - start, - loadToolHandlers, - }; - EOF_MCP_CORE - cat > /tmp/gh-aw/safe-inputs/mcp_server.cjs << 'EOF_MCP_SERVER' - class MCPServer { - constructor(serverInfo, options = {}) { - this.serverInfo = serverInfo; - this.capabilities = options.capabilities || { tools: {} }; - this.tools = new Map(); - this.transport = null; - this.initialized = false; - } - tool(name, description, inputSchema, handler) { - this.tools.set(name, { - name, - description, - inputSchema, - handler, - }); - } - async connect(transport) { - this.transport = transport; - transport.setServer(this); - await transport.start(); - } - handleInitialize(params) { - this.initialized = true; - return { - protocolVersion: params.protocolVersion || "2024-11-05", - serverInfo: this.serverInfo, - capabilities: this.capabilities, - }; - } - handleToolsList() { - const tools = Array.from(this.tools.values()).map(tool => ({ - name: tool.name, - description: tool.description, - inputSchema: tool.inputSchema, - })); - return { tools }; - } - async handleToolsCall(params) { - const tool = this.tools.get(params.name); - if (!tool) { - throw { - code: -32602, - message: `Tool '${params.name}' not found`, - }; - } - try { - const result = await tool.handler(params.arguments || {}); - return result; - } catch (error) { - throw { - code: -32603, - message: error instanceof Error ? error.message : String(error), - }; - } - } - handlePing() { - return {}; - } - async handleRequest(request) { - const { id, method, params } = request; - try { - if (!("id" in request)) { - return null; - } - let result; - switch (method) { - case "initialize": - result = this.handleInitialize(params || {}); - break; - case "ping": - result = this.handlePing(); - break; - case "tools/list": - result = this.handleToolsList(); - break; - case "tools/call": - result = await this.handleToolsCall(params || {}); - break; - default: - throw { - code: -32601, - message: `Method '${method}' not found`, - }; - } - return { - jsonrpc: "2.0", - id, - result, - }; - } catch (error) { - return { - jsonrpc: "2.0", - id, - error: { - code: error.code || -32603, - message: error.message || "Internal error", - }, - }; - } - } - } - module.exports = { - MCPServer, - }; - EOF_MCP_SERVER - cat > /tmp/gh-aw/safe-inputs/mcp_http_transport.cjs << 'EOF_MCP_HTTP_TRANSPORT' - const http = require("http"); - const { randomUUID } = require("crypto"); - const { MCPServer } = require("./mcp_server.cjs"); - class MCPHTTPTransport { - constructor(options = {}) { - this.sessionIdGenerator = options.sessionIdGenerator; - this.enableJsonResponse = options.enableJsonResponse !== false; - this.enableDnsRebindingProtection = options.enableDnsRebindingProtection || false; - this.server = null; - this.sessionId = null; - this.started = false; - } - setServer(server) { - this.server = server; - } - async start() { - if (this.started) { - throw new Error("Transport already started"); - } - this.started = true; - } - async handleRequest(req, res, parsedBody) { - res.setHeader("Access-Control-Allow-Origin", "*"); - res.setHeader("Access-Control-Allow-Methods", "GET, POST, OPTIONS"); - res.setHeader("Access-Control-Allow-Headers", "Content-Type, Accept, Mcp-Session-Id"); - if (req.method === "OPTIONS") { - res.writeHead(200); - res.end(); - return; - } - if (req.method !== "POST") { - res.writeHead(405, { "Content-Type": "application/json" }); - res.end(JSON.stringify({ error: "Method not allowed" })); - return; - } - try { - let body = parsedBody; - if (!body) { - const chunks = []; - for await (const chunk of req) { - chunks.push(chunk); - } - const bodyStr = Buffer.concat(chunks).toString(); - try { - body = bodyStr ? JSON.parse(bodyStr) : null; - } catch (parseError) { - res.writeHead(400, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - jsonrpc: "2.0", - error: { - code: -32700, - message: "Parse error: Invalid JSON in request body", - }, - id: null, - }) - ); - return; - } - } - if (!body) { - res.writeHead(400, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - jsonrpc: "2.0", - error: { - code: -32600, - message: "Invalid Request: Empty request body", - }, - id: null, - }) - ); - return; - } - if (!body.jsonrpc || body.jsonrpc !== "2.0") { - res.writeHead(400, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - jsonrpc: "2.0", - error: { - code: -32600, - message: "Invalid Request: jsonrpc must be '2.0'", - }, - id: body.id || null, - }) - ); - return; - } - if (this.sessionIdGenerator) { - if (body.method === "initialize") { - this.sessionId = this.sessionIdGenerator(); - } else { - const requestSessionId = req.headers["mcp-session-id"]; - if (!requestSessionId) { - res.writeHead(400, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - jsonrpc: "2.0", - error: { - code: -32600, - message: "Invalid Request: Missing Mcp-Session-Id header", - }, - id: body.id || null, - }) - ); - return; - } - if (requestSessionId !== this.sessionId) { - res.writeHead(404, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - jsonrpc: "2.0", - error: { - code: -32001, - message: "Session not found", - }, - id: body.id || null, - }) - ); - return; - } - } - } - const response = await this.server.handleRequest(body); - if (response === null) { - res.writeHead(204); - res.end(); - return; - } - const headers = { "Content-Type": "application/json" }; - if (this.sessionId) { - headers["mcp-session-id"] = this.sessionId; - } - res.writeHead(200, headers); - res.end(JSON.stringify(response)); - } catch (error) { - if (!res.headersSent) { - res.writeHead(500, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - jsonrpc: "2.0", - error: { - code: -32603, - message: error instanceof Error ? error.message : String(error), - }, - id: null, - }) - ); - } - } - } - } - module.exports = { - MCPServer, - MCPHTTPTransport, - }; - EOF_MCP_HTTP_TRANSPORT - cat > /tmp/gh-aw/safe-inputs/mcp_logger.cjs << 'EOF_MCP_LOGGER' - function createLogger(serverName) { - const logger = { - debug: msg => { - const timestamp = new Date().toISOString(); - process.stderr.write(`[${timestamp}] [${serverName}] ${msg}\n`); - }, - debugError: (prefix, error) => { - const errorMessage = error instanceof Error ? error.message : String(error); - logger.debug(`${prefix}${errorMessage}`); - if (error instanceof Error && error.stack) { - logger.debug(`${prefix}Stack trace: ${error.stack}`); - } - }, - }; - return logger; - } - module.exports = { - createLogger, - }; - EOF_MCP_LOGGER - cat > /tmp/gh-aw/safe-inputs/mcp_handler_shell.cjs << 'EOF_HANDLER_SHELL' - const fs = require("fs"); - const path = require("path"); - const { execFile } = require("child_process"); - const os = require("os"); - function createShellHandler(server, toolName, scriptPath, timeoutSeconds = 60) { - return async args => { - server.debug(` [${toolName}] Invoking shell handler: ${scriptPath}`); - server.debug(` [${toolName}] Shell handler args: ${JSON.stringify(args)}`); - server.debug(` [${toolName}] Timeout: ${timeoutSeconds}s`); - const env = { ...process.env }; - for (const [key, value] of Object.entries(args || {})) { - const envKey = `INPUT_${key.toUpperCase().replace(/-/g, "_")}`; - env[envKey] = String(value); - server.debug(` [${toolName}] Set env: ${envKey}=${String(value).substring(0, 100)}${String(value).length > 100 ? "..." : ""}`); - } - const outputFile = path.join(os.tmpdir(), `mcp-shell-output-${Date.now()}-${Math.random().toString(36).substring(2)}.txt`); - env.GITHUB_OUTPUT = outputFile; - server.debug(` [${toolName}] Output file: ${outputFile}`); - fs.writeFileSync(outputFile, ""); - return new Promise((resolve, reject) => { - server.debug(` [${toolName}] Executing shell script...`); - execFile( - scriptPath, - [], - { - env, - timeout: timeoutSeconds * 1000, - maxBuffer: 10 * 1024 * 1024, - }, - (error, stdout, stderr) => { - if (stdout) { - server.debug(` [${toolName}] stdout: ${stdout.substring(0, 500)}${stdout.length > 500 ? "..." : ""}`); - } - if (stderr) { - server.debug(` [${toolName}] stderr: ${stderr.substring(0, 500)}${stderr.length > 500 ? "..." : ""}`); - } - if (error) { - server.debugError(` [${toolName}] Shell script error: `, error); - try { - if (fs.existsSync(outputFile)) { - fs.unlinkSync(outputFile); - } - } catch { - } - reject(error); - return; - } - const outputs = {}; - try { - if (fs.existsSync(outputFile)) { - const outputContent = fs.readFileSync(outputFile, "utf-8"); - server.debug( - ` [${toolName}] Output file content: ${outputContent.substring(0, 500)}${outputContent.length > 500 ? "..." : ""}` - ); - const lines = outputContent.split("\n"); - for (const line of lines) { - const trimmed = line.trim(); - if (trimmed && trimmed.includes("=")) { - const eqIndex = trimmed.indexOf("="); - const key = trimmed.substring(0, eqIndex); - const value = trimmed.substring(eqIndex + 1); - outputs[key] = value; - server.debug(` [${toolName}] Parsed output: ${key}=${value.substring(0, 100)}${value.length > 100 ? "..." : ""}`); - } - } - } - } catch (readError) { - server.debugError(` [${toolName}] Error reading output file: `, readError); - } - try { - if (fs.existsSync(outputFile)) { - fs.unlinkSync(outputFile); - } - } catch { - } - const result = { - stdout: stdout || "", - stderr: stderr || "", - outputs, - }; - server.debug(` [${toolName}] Shell handler completed, outputs: ${Object.keys(outputs).join(", ") || "(none)"}`); - resolve({ - content: [ - { - type: "text", - text: JSON.stringify(result), - }, - ], - }); - } - ); - }); - }; - } - module.exports = { - createShellHandler, - }; - EOF_HANDLER_SHELL - cat > /tmp/gh-aw/safe-inputs/mcp_handler_python.cjs << 'EOF_HANDLER_PYTHON' - const { execFile } = require("child_process"); - function createPythonHandler(server, toolName, scriptPath, timeoutSeconds = 60) { - return async args => { - server.debug(` [${toolName}] Invoking Python handler: ${scriptPath}`); - server.debug(` [${toolName}] Python handler args: ${JSON.stringify(args)}`); - server.debug(` [${toolName}] Timeout: ${timeoutSeconds}s`); - const inputJson = JSON.stringify(args || {}); - server.debug( - ` [${toolName}] Input JSON (${inputJson.length} bytes): ${inputJson.substring(0, 200)}${inputJson.length > 200 ? "..." : ""}` - ); - return new Promise((resolve, reject) => { - server.debug(` [${toolName}] Executing Python script...`); - const child = execFile( - "python3", - [scriptPath], - { - env: process.env, - timeout: timeoutSeconds * 1000, - maxBuffer: 10 * 1024 * 1024, - }, - (error, stdout, stderr) => { - if (stdout) { - server.debug(` [${toolName}] stdout: ${stdout.substring(0, 500)}${stdout.length > 500 ? "..." : ""}`); - } - if (stderr) { - server.debug(` [${toolName}] stderr: ${stderr.substring(0, 500)}${stderr.length > 500 ? "..." : ""}`); - } - if (error) { - server.debugError(` [${toolName}] Python script error: `, error); - reject(error); - return; - } - let result; - try { - if (stdout && stdout.trim()) { - result = JSON.parse(stdout.trim()); - } else { - result = { stdout: stdout || "", stderr: stderr || "" }; - } - } catch (parseError) { - server.debug(` [${toolName}] Output is not JSON, returning as text`); - result = { stdout: stdout || "", stderr: stderr || "" }; - } - server.debug(` [${toolName}] Python handler completed successfully`); - resolve({ - content: [ - { - type: "text", - text: JSON.stringify(result), - }, - ], - }); - } - ); - if (child.stdin) { - child.stdin.write(inputJson); - child.stdin.end(); - } - }); - }; - } - module.exports = { - createPythonHandler, - }; - EOF_HANDLER_PYTHON - cat > /tmp/gh-aw/safe-inputs/safe_inputs_config_loader.cjs << 'EOF_CONFIG_LOADER' - const fs = require("fs"); - function loadConfig(configPath) { - if (!fs.existsSync(configPath)) { - throw new Error(`Configuration file not found: ${configPath}`); - } - const configContent = fs.readFileSync(configPath, "utf-8"); - const config = JSON.parse(configContent); - if (!config.tools || !Array.isArray(config.tools)) { - throw new Error("Configuration must contain a 'tools' array"); - } - return config; - } - module.exports = { - loadConfig, - }; - EOF_CONFIG_LOADER - cat > /tmp/gh-aw/safe-inputs/safe_inputs_tool_factory.cjs << 'EOF_TOOL_FACTORY' - function createToolConfig(name, description, inputSchema, handlerPath) { - return { - name, - description, - inputSchema, - handler: handlerPath, - }; - } - module.exports = { - createToolConfig, - }; - EOF_TOOL_FACTORY - cat > /tmp/gh-aw/safe-inputs/safe_inputs_validation.cjs << 'EOF_VALIDATION' - function validateRequiredFields(args, inputSchema) { - const requiredFields = inputSchema && Array.isArray(inputSchema.required) ? inputSchema.required : []; - if (!requiredFields.length) { - return []; - } - const missing = requiredFields.filter(f => { - const value = args[f]; - return value === undefined || value === null || (typeof value === "string" && value.trim() === ""); - }); - return missing; - } - module.exports = { - validateRequiredFields, - }; - EOF_VALIDATION - cat > /tmp/gh-aw/safe-inputs/safe_inputs_mcp_server.cjs << 'EOF_SAFE_INPUTS_SERVER' - const path = require("path"); - const { createServer, registerTool, loadToolHandlers, start } = require("./mcp_server_core.cjs"); - const { loadConfig } = require("./safe_inputs_config_loader.cjs"); - const { createToolConfig } = require("./safe_inputs_tool_factory.cjs"); - function startSafeInputsServer(configPath, options = {}) { - const config = loadConfig(configPath); - const basePath = path.dirname(configPath); - const serverName = config.serverName || "safeinputs"; - const version = config.version || "1.0.0"; - const logDir = options.logDir || config.logDir || undefined; - const server = createServer({ name: serverName, version }, { logDir }); - server.debug(`Loading safe-inputs configuration from: ${configPath}`); - server.debug(`Base path for handlers: ${basePath}`); - server.debug(`Tools to load: ${config.tools.length}`); - const tools = loadToolHandlers(server, config.tools, basePath); - for (const tool of tools) { - registerTool(server, tool); - } - try { - const fs = require("fs"); - if (fs.existsSync(configPath)) { - fs.unlinkSync(configPath); - server.debug(`Deleted configuration file: ${configPath}`); - } - } catch (error) { - server.debugError(`Warning: Could not delete configuration file: `, error); - } - start(server); - } - if (require.main === module) { - const args = process.argv.slice(2); - if (args.length < 1) { - console.error("Usage: node safe_inputs_mcp_server.cjs [--log-dir ]"); - process.exit(1); - } - const configPath = args[0]; - const options = {}; - for (let i = 1; i < args.length; i++) { - if (args[i] === "--log-dir" && args[i + 1]) { - options.logDir = args[i + 1]; - i++; - } - } - try { - startSafeInputsServer(configPath, options); - } catch (error) { - console.error(`Error starting safe-inputs server: ${error instanceof Error ? error.message : String(error)}`); - process.exit(1); - } - } - module.exports = { - startSafeInputsServer, - loadConfig, - createToolConfig, - }; - EOF_SAFE_INPUTS_SERVER - cat > /tmp/gh-aw/safe-inputs/safe_inputs_mcp_server_http.cjs << 'EOF_SAFE_INPUTS_SERVER_HTTP' - const path = require("path"); - const http = require("http"); - const { randomUUID } = require("crypto"); - const { MCPServer, MCPHTTPTransport } = require("./mcp_http_transport.cjs"); - const { loadConfig } = require("./safe_inputs_config_loader.cjs"); - const { loadToolHandlers } = require("./mcp_server_core.cjs"); - const { validateRequiredFields } = require("./safe_inputs_validation.cjs"); - const { createLogger } = require("./mcp_logger.cjs"); - function createMCPServer(configPath, options = {}) { - const config = loadConfig(configPath); - const basePath = path.dirname(configPath); - const serverName = config.serverName || "safeinputs"; - const version = config.version || "1.0.0"; - const server = new MCPServer( - { - name: serverName, - version: version, - }, - { - capabilities: { - tools: {}, - }, - } - ); - const logger = createLogger(serverName); - logger.debug(`=== Creating MCP Server ===`); - logger.debug(`Configuration file: ${configPath}`); - logger.debug(`Loading safe-inputs configuration from: ${configPath}`); - logger.debug(`Base path for handlers: ${basePath}`); - logger.debug(`Server name: ${serverName}`); - logger.debug(`Server version: ${version}`); - logger.debug(`Tools to load: ${config.tools.length}`); - const tempServer = { debug: logger.debug, debugError: logger.debugError }; - const tools = loadToolHandlers(tempServer, config.tools, basePath); - logger.debug(`Registering tools with MCP server...`); - let registeredCount = 0; - let skippedCount = 0; - for (const tool of tools) { - if (!tool.handler) { - logger.debug(`Skipping tool ${tool.name} - no handler loaded`); - skippedCount++; - continue; - } - logger.debug(`Registering tool: ${tool.name}`); - server.tool(tool.name, tool.description || "", tool.inputSchema || { type: "object", properties: {} }, async args => { - logger.debug(`Calling handler for tool: ${tool.name}`); - const missing = validateRequiredFields(args, tool.inputSchema); - if (missing.length) { - throw new Error(`Invalid arguments: missing or empty ${missing.map(m => `'${m}'`).join(", ")}`); - } - const result = await Promise.resolve(tool.handler(args)); - logger.debug(`Handler returned for tool: ${tool.name}`); - const content = result && result.content ? result.content : []; - return { content, isError: false }; - }); - registeredCount++; - } - logger.debug(`Tool registration complete: ${registeredCount} registered, ${skippedCount} skipped`); - logger.debug(`=== MCP Server Creation Complete ===`); - try { - const fs = require("fs"); - if (fs.existsSync(configPath)) { - fs.unlinkSync(configPath); - logger.debug(`Deleted configuration file: ${configPath}`); - } - } catch (error) { - logger.debugError(`Warning: Could not delete configuration file: `, error); - } - return { server, config, logger }; - } - async function startHttpServer(configPath, options = {}) { - const port = options.port || 3000; - const stateless = options.stateless || false; - const logger = createLogger("safe-inputs-startup"); - logger.debug(`=== Starting Safe Inputs MCP HTTP Server ===`); - logger.debug(`Configuration file: ${configPath}`); - logger.debug(`Port: ${port}`); - logger.debug(`Mode: ${stateless ? "stateless" : "stateful"}`); - logger.debug(`Environment: NODE_VERSION=${process.version}, PLATFORM=${process.platform}`); - try { - const { server, config, logger: mcpLogger } = createMCPServer(configPath, { logDir: options.logDir }); - Object.assign(logger, mcpLogger); - logger.debug(`MCP server created successfully`); - logger.debug(`Server name: ${config.serverName || "safeinputs"}`); - logger.debug(`Server version: ${config.version || "1.0.0"}`); - logger.debug(`Tools configured: ${config.tools.length}`); - logger.debug(`Creating HTTP transport...`); - const transport = new MCPHTTPTransport({ - sessionIdGenerator: stateless ? undefined : () => randomUUID(), - enableJsonResponse: true, - enableDnsRebindingProtection: false, - }); - logger.debug(`HTTP transport created`); - logger.debug(`Connecting server to transport...`); - await server.connect(transport); - logger.debug(`Server connected to transport successfully`); - logger.debug(`Creating HTTP server...`); - const httpServer = http.createServer(async (req, res) => { - res.setHeader("Access-Control-Allow-Origin", "*"); - res.setHeader("Access-Control-Allow-Methods", "GET, POST, OPTIONS"); - res.setHeader("Access-Control-Allow-Headers", "Content-Type, Accept"); - if (req.method === "OPTIONS") { - res.writeHead(200); - res.end(); - return; - } - if (req.method === "GET" && req.url === "/health") { - res.writeHead(200, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - status: "ok", - server: config.serverName || "safeinputs", - version: config.version || "1.0.0", - tools: config.tools.length, - }) - ); - return; - } - if (req.method !== "POST") { - res.writeHead(405, { "Content-Type": "application/json" }); - res.end(JSON.stringify({ error: "Method not allowed" })); - return; - } - try { - let body = null; - if (req.method === "POST") { - const chunks = []; - for await (const chunk of req) { - chunks.push(chunk); - } - const bodyStr = Buffer.concat(chunks).toString(); - try { - body = bodyStr ? JSON.parse(bodyStr) : null; - } catch (parseError) { - res.writeHead(400, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - jsonrpc: "2.0", - error: { - code: -32700, - message: "Parse error: Invalid JSON in request body", - }, - id: null, - }) - ); - return; - } - } - await transport.handleRequest(req, res, body); - } catch (error) { - logger.debugError("Error handling request: ", error); - if (!res.headersSent) { - res.writeHead(500, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - jsonrpc: "2.0", - error: { - code: -32603, - message: error instanceof Error ? error.message : String(error), - }, - id: null, - }) - ); - } - } - }); - logger.debug(`Attempting to bind to port ${port}...`); - httpServer.listen(port, () => { - logger.debug(`=== Safe Inputs MCP HTTP Server Started Successfully ===`); - logger.debug(`HTTP server listening on http://localhost:${port}`); - logger.debug(`MCP endpoint: POST http://localhost:${port}/`); - logger.debug(`Server name: ${config.serverName || "safeinputs"}`); - logger.debug(`Server version: ${config.version || "1.0.0"}`); - logger.debug(`Tools available: ${config.tools.length}`); - logger.debug(`Server is ready to accept requests`); - }); - httpServer.on("error", error => { - if (error.code === "EADDRINUSE") { - logger.debugError(`ERROR: Port ${port} is already in use. `, error); - } else if (error.code === "EACCES") { - logger.debugError(`ERROR: Permission denied to bind to port ${port}. `, error); - } else { - logger.debugError(`ERROR: Failed to start HTTP server: `, error); - } - process.exit(1); - }); - process.on("SIGINT", () => { - logger.debug("Received SIGINT, shutting down..."); - httpServer.close(() => { - logger.debug("HTTP server closed"); - process.exit(0); - }); - }); - process.on("SIGTERM", () => { - logger.debug("Received SIGTERM, shutting down..."); - httpServer.close(() => { - logger.debug("HTTP server closed"); - process.exit(0); - }); - }); - return httpServer; - } catch (error) { - const errorLogger = createLogger("safe-inputs-startup-error"); - errorLogger.debug(`=== FATAL ERROR: Failed to start Safe Inputs MCP HTTP Server ===`); - errorLogger.debug(`Error type: ${error.constructor.name}`); - errorLogger.debug(`Error message: ${error.message}`); - if (error.stack) { - errorLogger.debug(`Stack trace:\n${error.stack}`); - } - if (error.code) { - errorLogger.debug(`Error code: ${error.code}`); - } - errorLogger.debug(`Configuration file: ${configPath}`); - errorLogger.debug(`Port: ${port}`); - throw error; - } - } - if (require.main === module) { - const args = process.argv.slice(2); - if (args.length < 1) { - console.error("Usage: node safe_inputs_mcp_server_http.cjs [--port ] [--stateless] [--log-dir ]"); - process.exit(1); - } - const configPath = args[0]; - const options = { - port: 3000, - stateless: false, - logDir: undefined, - }; - for (let i = 1; i < args.length; i++) { - if (args[i] === "--port" && args[i + 1]) { - options.port = parseInt(args[i + 1], 10); - i++; - } else if (args[i] === "--stateless") { - options.stateless = true; - } else if (args[i] === "--log-dir" && args[i + 1]) { - options.logDir = args[i + 1]; - i++; - } - } - startHttpServer(configPath, options).catch(error => { - console.error(`Error starting HTTP server: ${error instanceof Error ? error.message : String(error)}`); - process.exit(1); - }); - } - module.exports = { - startHttpServer, - createMCPServer, - }; - EOF_SAFE_INPUTS_SERVER_HTTP - cat > /tmp/gh-aw/safe-inputs/tools.json << 'EOF_TOOLS_JSON' - { - "serverName": "safeinputs", - "version": "1.0.0", - "logDir": "/tmp/gh-aw/safe-inputs/logs", - "tools": [ - { - "name": "gh", - "description": "Execute any gh CLI command. Provide the full command after 'gh' (e.g., args: 'pr list --limit 5'). The tool will run: gh \u003cargs\u003e. Use single quotes ' for complex args to avoid shell interpretation issues.", - "inputSchema": { - "properties": { - "args": { - "description": "Arguments to pass to gh CLI (without the 'gh' prefix). Examples: 'pr list --limit 5', 'issue view 123', 'api repos/{owner}/{repo}'", - "type": "string" - } - }, - "required": [ - "args" - ], - "type": "object" - }, - "handler": "gh.sh", - "env": { - "GH_AW_GH_TOKEN": "GH_AW_GH_TOKEN" - }, - "timeout": 60 - } - ] - } - EOF_TOOLS_JSON - cat > /tmp/gh-aw/safe-inputs/mcp-server.cjs << 'EOFSI' - const path = require("path"); - const { startHttpServer } = require("./safe_inputs_mcp_server_http.cjs"); - const configPath = path.join(__dirname, "tools.json"); - const port = parseInt(process.env.GH_AW_SAFE_INPUTS_PORT || "3000", 10); - const apiKey = process.env.GH_AW_SAFE_INPUTS_API_KEY || ""; - startHttpServer(configPath, { - port: port, - stateless: false, - logDir: "/tmp/gh-aw/safe-inputs/logs" - }).catch(error => { - console.error("Failed to start safe-inputs HTTP server:", error); - process.exit(1); - }); - EOFSI - chmod +x /tmp/gh-aw/safe-inputs/mcp-server.cjs - - - name: Setup Safe Inputs Tool Files - run: | - cat > /tmp/gh-aw/safe-inputs/gh.sh << 'EOFSH_gh' - #!/bin/bash - # Auto-generated safe-input tool: gh - # Execute any gh CLI command. Provide the full command after 'gh' (e.g., args: 'pr list --limit 5'). The tool will run: gh . Use single quotes ' for complex args to avoid shell interpretation issues. - - set -euo pipefail - - GH_TOKEN=$GH_AW_GH_TOKEN gh $INPUT_ARGS - - EOFSH_gh - chmod +x /tmp/gh-aw/safe-inputs/gh.sh - - - name: Generate Safe Inputs MCP Server Config - id: safe-inputs-config - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 - with: - script: | - function generateSafeInputsConfig({ core, crypto }) { - const apiKeyBuffer = crypto.randomBytes(32); - const apiKey = apiKeyBuffer.toString("base64").replace(/[/+=]/g, ""); - const port = 3000; - core.setOutput("safe_inputs_api_key", apiKey); - core.setOutput("safe_inputs_port", port.toString()); - core.info(`Safe Inputs MCP server will run on port ${port}`); - return { apiKey, port }; - } - - // Execute the function - const crypto = require('crypto'); - generateSafeInputsConfig({ core, crypto }); - - - name: Start Safe Inputs MCP HTTP Server - id: safe-inputs-start - run: | - # Set environment variables for the server - export GH_AW_SAFE_INPUTS_PORT=${{ steps.safe-inputs-config.outputs.safe_inputs_port }} - export GH_AW_SAFE_INPUTS_API_KEY=${{ steps.safe-inputs-config.outputs.safe_inputs_api_key }} - - export GH_AW_GH_TOKEN="${GH_AW_GH_TOKEN}" - - cd /tmp/gh-aw/safe-inputs - # Verify required files exist - echo "Verifying safe-inputs setup..." - if [ ! -f mcp-server.cjs ]; then - echo "ERROR: mcp-server.cjs not found in /tmp/gh-aw/safe-inputs" - ls -la /tmp/gh-aw/safe-inputs/ - exit 1 - fi - if [ ! -f tools.json ]; then - echo "ERROR: tools.json not found in /tmp/gh-aw/safe-inputs" - ls -la /tmp/gh-aw/safe-inputs/ - exit 1 - fi - echo "Configuration files verified" - # Log environment configuration - echo "Server configuration:" - echo " Port: $GH_AW_SAFE_INPUTS_PORT" - echo " API Key: ${GH_AW_SAFE_INPUTS_API_KEY:0:8}..." - echo " Working directory: $(pwd)" - # Ensure logs directory exists - mkdir -p /tmp/gh-aw/safe-inputs/logs - # Create initial server.log file for artifact upload - echo "Safe Inputs MCP Server Log" > /tmp/gh-aw/safe-inputs/logs/server.log - echo "Start time: $(date)" >> /tmp/gh-aw/safe-inputs/logs/server.log - echo "===========================================" >> /tmp/gh-aw/safe-inputs/logs/server.log - echo "" >> /tmp/gh-aw/safe-inputs/logs/server.log - # Start the HTTP server in the background - echo "Starting safe-inputs MCP HTTP server..." - node mcp-server.cjs >> /tmp/gh-aw/safe-inputs/logs/server.log 2>&1 & - SERVER_PID=$! - echo "Started safe-inputs MCP server with PID $SERVER_PID" - # Wait for server to be ready (max 10 seconds) - echo "Waiting for server to become ready..." - for i in {1..10}; do - # Check if process is still running - if ! kill -0 $SERVER_PID 2>/dev/null; then - echo "ERROR: Server process $SERVER_PID has died" - echo "Server log contents:" - cat /tmp/gh-aw/safe-inputs/logs/server.log - exit 1 - fi - # Check if server is responding - if curl -s -f http://localhost:$GH_AW_SAFE_INPUTS_PORT/health > /dev/null 2>&1; then - echo "Safe Inputs MCP server is ready (attempt $i/10)" - break - fi - if [ $i -eq 10 ]; then - echo "ERROR: Safe Inputs MCP server failed to start after 10 seconds" - echo "Process status: $(ps aux | grep '[m]cp-server.cjs' || echo 'not running')" - echo "Server log contents:" - cat /tmp/gh-aw/safe-inputs/logs/server.log - echo "Checking port availability:" - netstat -tuln | grep $GH_AW_SAFE_INPUTS_PORT || echo "Port $GH_AW_SAFE_INPUTS_PORT not listening" - exit 1 - fi - echo "Waiting for server... (attempt $i/10)" - sleep 1 - done - # Output the configuration for the MCP client - echo "port=$GH_AW_SAFE_INPUTS_PORT" >> $GITHUB_OUTPUT - echo "api_key=$GH_AW_SAFE_INPUTS_API_KEY" >> $GITHUB_OUTPUT - - - name: Setup MCPs - env: - GITHUB_MCP_SERVER_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN || secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} - GH_AW_SAFE_OUTPUTS: ${{ env.GH_AW_SAFE_OUTPUTS }} - GH_AW_SAFE_INPUTS_PORT: ${{ steps.safe-inputs-start.outputs.port }} - GH_AW_SAFE_INPUTS_API_KEY: ${{ steps.safe-inputs-start.outputs.api_key }} - GH_AW_GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - mkdir -p /tmp/gh-aw/mcp-config - mkdir -p /home/runner/.copilot - cat > /home/runner/.copilot/mcp-config.json << EOF - { - "mcpServers": { - "github": { - "type": "local", - "command": "docker", - "args": [ - "run", - "-i", - "--rm", - "-e", - "GITHUB_PERSONAL_ACCESS_TOKEN", - "-e", - "GITHUB_READ_ONLY=1", - "-e", - "GITHUB_TOOLSETS=context,repos,issues,pull_requests", - "ghcr.io/github/github-mcp-server:v0.24.0" - ], - "tools": ["*"], - "env": { - "GITHUB_PERSONAL_ACCESS_TOKEN": "\${GITHUB_MCP_SERVER_TOKEN}" - } - }, - "playwright": { - "type": "local", - "command": "docker", - "args": ["run", "-i", "--rm", "--init", "mcr.microsoft.com/playwright/mcp", "--output-dir", "/tmp/gh-aw/mcp-logs/playwright", "--allowed-hosts", "localhost;localhost:*;127.0.0.1;127.0.0.1:*;github.com"], - "tools": ["*"] - }, - "safeinputs": { - "type": "http", - "url": "http://localhost:\${GH_AW_SAFE_INPUTS_PORT}", - "headers": { - "Authorization": "Bearer \${GH_AW_SAFE_INPUTS_API_KEY}" - }, - "tools": ["*"], - "env": { - "GH_AW_SAFE_INPUTS_PORT": "\${GH_AW_SAFE_INPUTS_PORT}", - "GH_AW_SAFE_INPUTS_API_KEY": "\${GH_AW_SAFE_INPUTS_API_KEY}", - "GH_AW_GH_TOKEN": "\${GH_AW_GH_TOKEN}" - } - }, - "safeoutputs": { - "type": "local", - "command": "node", - "args": ["/tmp/gh-aw/safeoutputs/mcp-server.cjs"], - "tools": ["*"], - "env": { - "GH_AW_SAFE_OUTPUTS": "\${GH_AW_SAFE_OUTPUTS}", - "GH_AW_ASSETS_BRANCH": "\${GH_AW_ASSETS_BRANCH}", - "GH_AW_ASSETS_MAX_SIZE_KB": "\${GH_AW_ASSETS_MAX_SIZE_KB}", - "GH_AW_ASSETS_ALLOWED_EXTS": "\${GH_AW_ASSETS_ALLOWED_EXTS}", - "GITHUB_REPOSITORY": "\${GITHUB_REPOSITORY}", - "GITHUB_SERVER_URL": "\${GITHUB_SERVER_URL}", - "GITHUB_SHA": "\${GITHUB_SHA}", - "GITHUB_WORKSPACE": "\${GITHUB_WORKSPACE}", - "DEFAULT_BRANCH": "\${DEFAULT_BRANCH}" - } - }, - "serena": { - "type": "local", - "command": "uvx", - "args": ["--from", "git+https://github.com/oraios/serena", "serena", "start-mcp-server", "--context", "codex", "--project", "${{ github.workspace }}"], - "tools": ["*"] - } - } - } - EOF - echo "-------START MCP CONFIG-----------" - cat /home/runner/.copilot/mcp-config.json - echo "-------END MCP CONFIG-----------" - echo "-------/home/runner/.copilot-----------" - find /home/runner/.copilot - echo "HOME: $HOME" - echo "GITHUB_COPILOT_CLI_MODE: $GITHUB_COPILOT_CLI_MODE" - - name: Generate agentic run info - id: generate_aw_info - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 - with: - script: | - const fs = require('fs'); - - const awInfo = { - engine_id: "copilot", - engine_name: "GitHub Copilot CLI", - model: process.env.GH_AW_MODEL_AGENT_COPILOT || "", - version: "", - agent_version: "0.0.367", - workflow_name: "Smoke Copilot No Firewall", - experimental: false, - supports_tools_allowlist: true, - supports_http_transport: true, - run_id: context.runId, - run_number: context.runNumber, - run_attempt: process.env.GITHUB_RUN_ATTEMPT, - repository: context.repo.owner + '/' + context.repo.repo, - ref: context.ref, - sha: context.sha, - actor: context.actor, - event_name: context.eventName, - staged: false, - network_mode: "defaults", - allowed_domains: ["defaults","node","github","playwright"], - firewall_enabled: false, - firewall_version: "", - steps: { - firewall: "" - }, - created_at: new Date().toISOString() - }; - - // Write to /tmp/gh-aw directory to avoid inclusion in PR - const tmpPath = '/tmp/gh-aw/aw_info.json'; - fs.writeFileSync(tmpPath, JSON.stringify(awInfo, null, 2)); - console.log('Generated aw_info.json at:', tmpPath); - console.log(JSON.stringify(awInfo, null, 2)); - - // Set model as output for reuse in other steps/jobs - core.setOutput('model', awInfo.model); - - name: Generate workflow overview - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 - with: - script: | - const fs = require('fs'); - const awInfoPath = '/tmp/gh-aw/aw_info.json'; - - // Load aw_info.json - const awInfo = JSON.parse(fs.readFileSync(awInfoPath, 'utf8')); - - let networkDetails = ''; - if (awInfo.allowed_domains && awInfo.allowed_domains.length > 0) { - networkDetails = awInfo.allowed_domains.slice(0, 10).map(d => ` - ${d}`).join('\n'); - if (awInfo.allowed_domains.length > 10) { - networkDetails += `\n - ... and ${awInfo.allowed_domains.length - 10} more`; + + // Write to /tmp/gh-aw directory to avoid inclusion in PR + const tmpPath = '/tmp/gh-aw/aw_info.json'; + fs.writeFileSync(tmpPath, JSON.stringify(awInfo, null, 2)); + console.log('Generated aw_info.json at:', tmpPath); + console.log(JSON.stringify(awInfo, null, 2)); + + // Set model as output for reuse in other steps/jobs + core.setOutput('model', awInfo.model); + - name: Generate workflow overview + uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 + with: + script: | + const fs = require('fs'); + const awInfoPath = '/tmp/gh-aw/aw_info.json'; + + // Load aw_info.json + const awInfo = JSON.parse(fs.readFileSync(awInfoPath, 'utf8')); + + let networkDetails = ''; + if (awInfo.allowed_domains && awInfo.allowed_domains.length > 0) { + networkDetails = awInfo.allowed_domains.slice(0, 10).map(d => ` - ${d}`).join('\n'); + if (awInfo.allowed_domains.length > 10) { + networkDetails += `\n - ... and ${awInfo.allowed_domains.length - 10} more`; } } @@ -4875,8 +3522,6 @@ jobs: PROMPT_DIR="$(dirname "$GH_AW_PROMPT")" mkdir -p "$PROMPT_DIR" cat << 'PROMPT_EOF' | envsubst > "$GH_AW_PROMPT" - - # Smoke Test: Copilot Engine Validation (No Firewall) **IMPORTANT: Keep all outputs extremely short and concise. Use single-line responses where possible. No verbose explanations.** @@ -4887,7 +3532,8 @@ jobs: 2. **File Writing Testing**: Create a test file `/tmp/gh-aw/agent/smoke-test-copilot-${GH_AW_GITHUB_RUN_ID}.txt` with content "Smoke test passed for Copilot at $(date)" (create the directory if it doesn't exist) 3. **Bash Tool Testing**: Execute bash commands to verify file creation was successful (use `cat` to read the file back) 4. **Playwright MCP Testing**: Use playwright to navigate to https://github.com and verify the page title contains "GitHub" - 5. **Safe Input gh Tool Testing**: Use the `gh` safe-input tool to run "gh issues list --limit 3" to verify the tool can access GitHub issues + 5. **Cache Memory Testing**: Write a test file to `/tmp/gh-aw/cache-memory/smoke-test-${GH_AW_GITHUB_RUN_ID}.txt` with content "Cache memory test for run ${GH_AW_GITHUB_RUN_ID}" and verify it was created successfully + 6. **Safe Input gh Tool Testing**: Use the `gh` safe-input tool to run "gh issues list --limit 3" to verify the tool can access GitHub issues ## Output @@ -4962,6 +3608,31 @@ jobs: Do NOT attempt to edit files outside these directories as you do not have the necessary permissions. + PROMPT_EOF + - name: Append cache memory instructions to prompt + env: + GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt + run: | + cat << 'PROMPT_EOF' | envsubst >> "$GH_AW_PROMPT" + + --- + + ## Cache Folder Available + + You have access to a persistent cache folder at `/tmp/gh-aw/cache-memory/` where you can read and write files to create memories and store information. + + - **Read/Write Access**: You can freely read from and write to any files in this folder + - **Persistence**: Files in this folder persist across workflow runs via GitHub Actions cache + - **Last Write Wins**: If multiple processes write to the same file, the last write will be preserved + - **File Share**: Use this as a simple file share - organize files as you see fit + + Examples of what you can store: + - `/tmp/gh-aw/cache-memory/notes.txt` - general notes and observations + - `/tmp/gh-aw/cache-memory/preferences.json` - user preferences and settings + - `/tmp/gh-aw/cache-memory/history.log` - activity history and logs + - `/tmp/gh-aw/cache-memory/state/` - organized state files in subdirectories + + Feel free to create, read, update, and organize files in this folder as needed for your tasks. PROMPT_EOF - name: Append safe outputs instructions to prompt env: @@ -5135,12 +3806,12 @@ jobs: mkdir -p /tmp/ mkdir -p /tmp/gh-aw/ mkdir -p /tmp/gh-aw/agent/ + mkdir -p /tmp/gh-aw/cache-memory/ mkdir -p /tmp/gh-aw/sandbox/agent/logs/ - copilot --add-dir /tmp/ --add-dir /tmp/gh-aw/ --add-dir /tmp/gh-aw/agent/ --log-level all --log-dir /tmp/gh-aw/sandbox/agent/logs/ --disable-builtin-mcps --allow-all-tools --allow-all-paths --prompt "$COPILOT_CLI_INSTRUCTION"${GH_AW_MODEL_AGENT_COPILOT:+ --model "$GH_AW_MODEL_AGENT_COPILOT"} 2>&1 | tee /tmp/gh-aw/agent-stdio.log + copilot --add-dir /tmp/ --add-dir /tmp/gh-aw/ --add-dir /tmp/gh-aw/agent/ --log-level all --log-dir /tmp/gh-aw/sandbox/agent/logs/ --disable-builtin-mcps --allow-all-tools --add-dir /tmp/gh-aw/cache-memory/ --allow-all-paths --prompt "$COPILOT_CLI_INSTRUCTION"${GH_AW_MODEL_AGENT_COPILOT:+ --model "$GH_AW_MODEL_AGENT_COPILOT"} 2>&1 | tee /tmp/gh-aw/agent-stdio.log env: COPILOT_AGENT_RUNNER_TYPE: STANDALONE COPILOT_GITHUB_TOKEN: ${{ secrets.COPILOT_GITHUB_TOKEN || secrets.COPILOT_CLI_TOKEN }} - GH_AW_GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GH_AW_MCP_CONFIG: /home/runner/.copilot/mcp-config.json GH_AW_MODEL_AGENT_COPILOT: ${{ vars.GH_AW_MODEL_AGENT_COPILOT || '' }} GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt @@ -6208,14 +4879,7 @@ jobs: uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 with: name: mcp-logs - path: /tmp/gh-aw/mcp-logs/ - if-no-files-found: ignore - - name: Upload SafeInputs logs - if: always() - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 - with: - name: safeinputs - path: /tmp/gh-aw/safe-inputs/logs/ + path: /tmp/gh-aw/mcp-logs/ if-no-files-found: ignore - name: Parse agent logs for step summary if: always() @@ -7733,6 +6397,7 @@ jobs: - agent - create_issue - detection + - update_cache_memory - update_pull_request if: ((always()) && (needs.agent.result != 'skipped')) && (!(needs.add_comment.outputs.comment_id)) runs-on: ubuntu-slim @@ -9144,6 +7809,26 @@ jobs: } await main(); + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + update_pull_request: needs: - agent @@ -9184,8 +7869,432 @@ jobs: with: github-token: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN || secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} script: | - const { runUpdateWorkflow, createRenderStagedItem, createGetSummaryLine } = require("./update_runner.cjs"); - const { updatePRBody } = require("./update_pr_description_helpers.cjs"); + const fs = require("fs"); + const MAX_LOG_CONTENT_LENGTH = 10000; + function truncateForLogging(content) { + if (content.length <= MAX_LOG_CONTENT_LENGTH) { + return content; + } + return content.substring(0, MAX_LOG_CONTENT_LENGTH) + `\n... (truncated, total length: ${content.length})`; + } + function loadAgentOutput() { + const agentOutputFile = process.env.GH_AW_AGENT_OUTPUT; + if (!agentOutputFile) { + core.info("No GH_AW_AGENT_OUTPUT environment variable found"); + return { success: false }; + } + let outputContent; + try { + outputContent = fs.readFileSync(agentOutputFile, "utf8"); + } catch (error) { + const errorMessage = `Error reading agent output file: ${error instanceof Error ? error.message : String(error)}`; + core.error(errorMessage); + return { success: false, error: errorMessage }; + } + if (outputContent.trim() === "") { + core.info("Agent output content is empty"); + return { success: false }; + } + core.info(`Agent output content length: ${outputContent.length}`); + let validatedOutput; + try { + validatedOutput = JSON.parse(outputContent); + } catch (error) { + const errorMessage = `Error parsing agent output JSON: ${error instanceof Error ? error.message : String(error)}`; + core.error(errorMessage); + core.info(`Failed to parse content:\n${truncateForLogging(outputContent)}`); + return { success: false, error: errorMessage }; + } + if (!validatedOutput.items || !Array.isArray(validatedOutput.items)) { + core.info("No valid items found in agent output"); + core.info(`Parsed content: ${truncateForLogging(JSON.stringify(validatedOutput))}`); + return { success: false }; + } + return { success: true, items: validatedOutput.items }; + } + async function generateStagedPreview(options) { + const { title, description, items, renderItem } = options; + let summaryContent = `## 🎭 Staged Mode: ${title} Preview\n\n`; + summaryContent += `${description}\n\n`; + for (let i = 0; i < items.length; i++) { + const item = items[i]; + summaryContent += renderItem(item, i); + summaryContent += "---\n\n"; + } + try { + await core.summary.addRaw(summaryContent).write(); + core.info(summaryContent); + core.info(`πŸ“ ${title} preview written to step summary`); + } catch (error) { + core.setFailed(error instanceof Error ? error : String(error)); + } + } + function resolveTargetNumber(params) { + const { updateTarget, item, numberField, isValidContext, contextNumber, displayName } = params; + if (updateTarget === "*") { + const explicitNumber = item[numberField]; + if (explicitNumber) { + const parsed = parseInt(explicitNumber, 10); + if (isNaN(parsed) || parsed <= 0) { + return { success: false, error: `Invalid ${numberField} specified: ${explicitNumber}` }; + } + return { success: true, number: parsed }; + } else { + return { success: false, error: `Target is "*" but no ${numberField} specified in update item` }; + } + } else if (updateTarget && updateTarget !== "triggering") { + const parsed = parseInt(updateTarget, 10); + if (isNaN(parsed) || parsed <= 0) { + return { success: false, error: `Invalid ${displayName} number in target configuration: ${updateTarget}` }; + } + return { success: true, number: parsed }; + } else { + if (isValidContext && contextNumber) { + return { success: true, number: contextNumber }; + } + return { success: false, error: `Could not determine ${displayName} number` }; + } + } + function buildUpdateData(params) { + const { item, canUpdateStatus, canUpdateTitle, canUpdateBody, supportsStatus } = params; + const updateData = {}; + let hasUpdates = false; + const logMessages = []; + if (supportsStatus && canUpdateStatus && item.status !== undefined) { + if (item.status === "open" || item.status === "closed") { + updateData.state = item.status; + hasUpdates = true; + logMessages.push(`Will update status to: ${item.status}`); + } else { + logMessages.push(`Invalid status value: ${item.status}. Must be 'open' or 'closed'`); + } + } + if (canUpdateTitle && item.title !== undefined) { + const trimmedTitle = typeof item.title === "string" ? item.title.trim() : ""; + if (trimmedTitle.length > 0) { + updateData.title = trimmedTitle; + hasUpdates = true; + logMessages.push(`Will update title to: ${trimmedTitle}`); + } else { + logMessages.push("Invalid title value: must be a non-empty string"); + } + } + if (canUpdateBody && item.body !== undefined) { + if (typeof item.body === "string") { + updateData.body = item.body; + hasUpdates = true; + logMessages.push(`Will update body (length: ${item.body.length})`); + } else { + logMessages.push("Invalid body value: must be a string"); + } + } + return { hasUpdates, updateData, logMessages }; + } + async function runUpdateWorkflow(config) { + const { + itemType, + displayName, + displayNamePlural, + numberField, + outputNumberKey, + outputUrlKey, + isValidContext, + getContextNumber, + supportsStatus, + supportsOperation, + renderStagedItem, + executeUpdate, + getSummaryLine, + } = config; + const isStaged = process.env.GH_AW_SAFE_OUTPUTS_STAGED === "true"; + const result = loadAgentOutput(); + if (!result.success) { + return; + } + const updateItems = result.items.filter( item => item.type === itemType); + if (updateItems.length === 0) { + core.info(`No ${itemType} items found in agent output`); + return; + } + core.info(`Found ${updateItems.length} ${itemType} item(s)`); + if (isStaged) { + await generateStagedPreview({ + title: `Update ${displayNamePlural.charAt(0).toUpperCase() + displayNamePlural.slice(1)}`, + description: `The following ${displayName} updates would be applied if staged mode was disabled:`, + items: updateItems, + renderItem: renderStagedItem, + }); + return; + } + const updateTarget = process.env.GH_AW_UPDATE_TARGET || "triggering"; + const canUpdateStatus = process.env.GH_AW_UPDATE_STATUS === "true"; + const canUpdateTitle = process.env.GH_AW_UPDATE_TITLE === "true"; + const canUpdateBody = process.env.GH_AW_UPDATE_BODY === "true"; + core.info(`Update target configuration: ${updateTarget}`); + if (supportsStatus) { + core.info(`Can update status: ${canUpdateStatus}, title: ${canUpdateTitle}, body: ${canUpdateBody}`); + } else { + core.info(`Can update title: ${canUpdateTitle}, body: ${canUpdateBody}`); + } + const contextIsValid = isValidContext(context.eventName, context.payload); + const contextNumber = getContextNumber(context.payload); + if (updateTarget === "triggering" && !contextIsValid) { + core.info(`Target is "triggering" but not running in ${displayName} context, skipping ${displayName} update`); + return; + } + const updatedItems = []; + for (let i = 0; i < updateItems.length; i++) { + const updateItem = updateItems[i]; + core.info(`Processing ${itemType} item ${i + 1}/${updateItems.length}`); + const targetResult = resolveTargetNumber({ + updateTarget, + item: updateItem, + numberField, + isValidContext: contextIsValid, + contextNumber, + displayName, + }); + if (!targetResult.success) { + core.info(targetResult.error); + continue; + } + const targetNumber = targetResult.number; + core.info(`Updating ${displayName} #${targetNumber}`); + const { hasUpdates, updateData, logMessages } = buildUpdateData({ + item: updateItem, + canUpdateStatus, + canUpdateTitle, + canUpdateBody, + supportsStatus, + }); + for (const msg of logMessages) { + core.info(msg); + } + if (supportsOperation && canUpdateBody && updateItem.body !== undefined && typeof updateItem.body === "string") { + updateData._operation = updateItem.operation || "append"; + updateData._rawBody = updateItem.body; + } + if (!hasUpdates) { + core.info("No valid updates to apply for this item"); + continue; + } + try { + const updatedItem = await executeUpdate(github, context, targetNumber, updateData); + core.info(`Updated ${displayName} #${updatedItem.number}: ${updatedItem.html_url}`); + updatedItems.push(updatedItem); + if (i === updateItems.length - 1) { + core.setOutput(outputNumberKey, updatedItem.number); + core.setOutput(outputUrlKey, updatedItem.html_url); + } + } catch (error) { + core.error(`βœ— Failed to update ${displayName} #${targetNumber}: ${error instanceof Error ? error.message : String(error)}`); + throw error; + } + } + if (updatedItems.length > 0) { + let summaryContent = `\n\n## Updated ${displayNamePlural.charAt(0).toUpperCase() + displayNamePlural.slice(1)}\n`; + for (const item of updatedItems) { + summaryContent += getSummaryLine(item); + } + await core.summary.addRaw(summaryContent).write(); + } + core.info(`Successfully updated ${updatedItems.length} ${displayName}(s)`); + return updatedItems; + } + function createRenderStagedItem(config) { + const { entityName, numberField, targetLabel, currentTargetText, includeOperation = false } = config; + return function renderStagedItem(item, index) { + let content = `### ${entityName} Update ${index + 1}\n`; + if (item[numberField]) { + content += `**${targetLabel}** #${item[numberField]}\n\n`; + } else { + content += `**Target:** ${currentTargetText}\n\n`; + } + if (item.title !== undefined) { + content += `**New Title:** ${item.title}\n\n`; + } + if (item.body !== undefined) { + if (includeOperation) { + const operation = item.operation || "append"; + content += `**Operation:** ${operation}\n`; + content += `**Body Content:**\n${item.body}\n\n`; + } else { + content += `**New Body:**\n${item.body}\n\n`; + } + } + if (item.status !== undefined) { + content += `**New Status:** ${item.status}\n\n`; + } + return content; + }; + } + function createGetSummaryLine(config) { + const { entityPrefix } = config; + return function getSummaryLine(item) { + return `- ${entityPrefix} #${item.number}: [${item.title}](${item.html_url})\n`; + }; + } + function getMessages() { + const messagesEnv = process.env.GH_AW_SAFE_OUTPUT_MESSAGES; + if (!messagesEnv) { + return null; + } + try { + return JSON.parse(messagesEnv); + } catch (error) { + core.warning(`Failed to parse GH_AW_SAFE_OUTPUT_MESSAGES: ${error instanceof Error ? error.message : String(error)}`); + return null; + } + } + function renderTemplate(template, context) { + return template.replace(/\{(\w+)\}/g, (match, key) => { + const value = context[key]; + return value !== undefined && value !== null ? String(value) : match; + }); + } + function toSnakeCase(obj) { + const result = {}; + for (const [key, value] of Object.entries(obj)) { + const snakeKey = key.replace(/([A-Z])/g, "_$1").toLowerCase(); + result[snakeKey] = value; + result[key] = value; + } + return result; + } + function getFooterMessage(ctx) { + const messages = getMessages(); + const templateContext = toSnakeCase(ctx); + const defaultFooter = "> πŸ΄β€β˜ οΈ Ahoy! This treasure was crafted by [{workflow_name}]({run_url})"; + let footer = messages?.footer ? renderTemplate(messages.footer, templateContext) : renderTemplate(defaultFooter, templateContext); + if (ctx.triggeringNumber) { + footer += ` fer issue #{triggering_number} πŸ—ΊοΈ`.replace("{triggering_number}", String(ctx.triggeringNumber)); + } + return footer; + } + function getFooterInstallMessage(ctx) { + if (!ctx.workflowSource || !ctx.workflowSourceUrl) { + return ""; + } + const messages = getMessages(); + const templateContext = toSnakeCase(ctx); + const defaultInstall = + "> 🦜 Arr! To plunder this workflow fer yer own ship, run `gh aw add {workflow_source}`. Chart yer course at [{workflow_source_url}]({workflow_source_url})!"; + return messages?.footerInstall + ? renderTemplate(messages.footerInstall, templateContext) + : renderTemplate(defaultInstall, templateContext); + } + function generateXMLMarker(workflowName, runUrl) { + const engineId = process.env.GH_AW_ENGINE_ID || ""; + const engineVersion = process.env.GH_AW_ENGINE_VERSION || ""; + const engineModel = process.env.GH_AW_ENGINE_MODEL || ""; + const trackerId = process.env.GH_AW_TRACKER_ID || ""; + const parts = []; + parts.push(`agentic-workflow: ${workflowName}`); + if (trackerId) { + parts.push(`tracker-id: ${trackerId}`); + } + if (engineId) { + parts.push(`engine: ${engineId}`); + } + if (engineVersion) { + parts.push(`version: ${engineVersion}`); + } + if (engineModel) { + parts.push(`model: ${engineModel}`); + } + parts.push(`run: ${runUrl}`); + return ``; + } + function generateFooterWithMessages( + workflowName, + runUrl, + workflowSource, + workflowSourceURL, + triggeringIssueNumber, + triggeringPRNumber, + triggeringDiscussionNumber + ) { + let triggeringNumber; + if (triggeringIssueNumber) { + triggeringNumber = triggeringIssueNumber; + } else if (triggeringPRNumber) { + triggeringNumber = triggeringPRNumber; + } else if (triggeringDiscussionNumber) { + triggeringNumber = `discussion #${triggeringDiscussionNumber}`; + } + const ctx = { + workflowName, + runUrl, + workflowSource, + workflowSourceUrl: workflowSourceURL, + triggeringNumber, + }; + let footer = "\n\n" + getFooterMessage(ctx); + const installMessage = getFooterInstallMessage(ctx); + if (installMessage) { + footer += "\n>\n" + installMessage; + } + footer += "\n\n" + generateXMLMarker(workflowName, runUrl); + footer += "\n"; + return footer; + } + function buildAIFooter(workflowName, runUrl) { + return "\n\n" + getFooterMessage({ workflowName, runUrl }); + } + function buildIslandStartMarker(runId) { + return ``; + } + function buildIslandEndMarker(runId) { + return ``; + } + function findIsland(body, runId) { + const startMarker = buildIslandStartMarker(runId); + const endMarker = buildIslandEndMarker(runId); + const startIndex = body.indexOf(startMarker); + if (startIndex === -1) { + return { found: false, startIndex: -1, endIndex: -1 }; + } + const endIndex = body.indexOf(endMarker, startIndex); + if (endIndex === -1) { + return { found: false, startIndex: -1, endIndex: -1 }; + } + return { found: true, startIndex, endIndex: endIndex + endMarker.length }; + } + function updatePRBody(params) { + const { currentBody, newContent, operation, workflowName, runUrl, runId } = params; + const aiFooter = buildAIFooter(workflowName, runUrl); + if (operation === "replace") { + core.info("Operation: replace (full body replacement)"); + return newContent; + } + if (operation === "replace-island") { + const island = findIsland(currentBody, runId); + if (island.found) { + core.info(`Operation: replace-island (updating existing island for run ${runId})`); + const startMarker = buildIslandStartMarker(runId); + const endMarker = buildIslandEndMarker(runId); + const islandContent = `${startMarker}\n${newContent}${aiFooter}\n${endMarker}`; + const before = currentBody.substring(0, island.startIndex); + const after = currentBody.substring(island.endIndex); + return before + islandContent + after; + } else { + core.info(`Operation: replace-island (island not found for run ${runId}, falling back to append)`); + const startMarker = buildIslandStartMarker(runId); + const endMarker = buildIslandEndMarker(runId); + const islandContent = `${startMarker}\n${newContent}${aiFooter}\n${endMarker}`; + const appendSection = `\n\n---\n\n${islandContent}`; + return currentBody + appendSection; + } + } + if (operation === "prepend") { + core.info("Operation: prepend (add to start with separator)"); + const prependSection = `${newContent}${aiFooter}\n\n---\n\n`; + return prependSection + currentBody; + } + core.info("Operation: append (add to end with separator)"); + const appendSection = `\n\n---\n\n${newContent}${aiFooter}`; + return currentBody + appendSection; + } function isPRContext(eventName, payload) { const isPR = eventName === "pull_request" || diff --git a/.github/workflows/smoke-copilot-no-firewall.md b/.github/workflows/smoke-copilot-no-firewall.md index b84a9d0ebf..77470962e0 100644 --- a/.github/workflows/smoke-copilot-no-firewall.md +++ b/.github/workflows/smoke-copilot-no-firewall.md @@ -14,8 +14,6 @@ permissions: issues: read name: Smoke Copilot No Firewall engine: copilot -imports: - - shared/gh.md network: allowed: - defaults @@ -24,6 +22,7 @@ network: - playwright firewall: false tools: + cache-memory: true edit: bash: - "*" @@ -57,7 +56,8 @@ strict: false 2. **File Writing Testing**: Create a test file `/tmp/gh-aw/agent/smoke-test-copilot-${{ github.run_id }}.txt` with content "Smoke test passed for Copilot at $(date)" (create the directory if it doesn't exist) 3. **Bash Tool Testing**: Execute bash commands to verify file creation was successful (use `cat` to read the file back) 4. **Playwright MCP Testing**: Use playwright to navigate to https://github.com and verify the page title contains "GitHub" -5. **Safe Input gh Tool Testing**: Use the `gh` safe-input tool to run "gh issues list --limit 3" to verify the tool can access GitHub issues +5. **Cache Memory Testing**: Write a test file to `/tmp/gh-aw/cache-memory/smoke-test-${{ github.run_id }}.txt` with content "Cache memory test for run ${{ github.run_id }}" and verify it was created successfully +6. **Safe Input gh Tool Testing**: Use the `gh` safe-input tool to run "gh issues list --limit 3" to verify the tool can access GitHub issues ## Output diff --git a/.github/workflows/smoke-copilot-playwright.lock.yml b/.github/workflows/smoke-copilot-playwright.lock.yml index 9fa2b3bccf..433f23c37d 100644 --- a/.github/workflows/smoke-copilot-playwright.lock.yml +++ b/.github/workflows/smoke-copilot-playwright.lock.yml @@ -55,6 +55,7 @@ # firewall: # log-level: debug # Enable debug-level firewall logs # tools: +# cache-memory: true # edit: # bash: # - "*" @@ -146,6 +147,7 @@ # create_issue["create_issue"] # detection["detection"] # pre_activation["pre_activation"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # add_comment --> conclusion @@ -155,13 +157,16 @@ # agent --> conclusion # agent --> create_issue # agent --> detection +# agent --> update_cache_memory # create_issue --> add_comment # create_issue --> conclusion # detection --> add_comment # detection --> add_labels # detection --> conclusion # detection --> create_issue +# detection --> update_cache_memory # pre_activation --> activation +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -172,7 +177,8 @@ # # ## Test Requirements # -# **Playwright MCP Testing**: Use playwright to navigate to https://github.com and verify the page title contains "GitHub" +# 1. **Playwright MCP Testing**: Use playwright to navigate to https://github.com and verify the page title contains "GitHub" +# 2. **Cache Memory Testing**: Write a test file to `/tmp/gh-aw/cache-memory/smoke-test-${{ github.run_id }}.txt` with content "Cache memory test for run ${{ github.run_id }}" and verify it was created successfully # # **Safe Input gh Tool Testing**: Use the `gh` safe-input tool to run "gh issues list --limit 3" to verify the tool can access GitHub issues # @@ -186,6 +192,10 @@ # ``` # # Pinned GitHub Actions: +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd # - actions/download-artifact@v6 (018cc2cf5baa6db3ef3c5f8a56943fffe632ef53) @@ -1921,6 +1931,26 @@ jobs: - name: Pre-flight Playwright MCP Test run: "echo \"πŸ§ͺ Testing Playwright MCP Docker container startup...\"\n\n# Pull the Playwright MCP Docker image\necho \"Pulling Playwright MCP Docker image...\"\ndocker pull mcr.microsoft.com/playwright/mcp\n\n# Test container startup with a simple healthcheck\necho \"Testing container startup...\"\ntimeout 30 docker run --rm -i mcr.microsoft.com/playwright/mcp --help || {\n echo \"❌ Playwright MCP container failed to start\"\n exit 1\n}\n\necho \"βœ… Playwright MCP container pre-flight check passed\"\n" + # Cache memory file share configuration from frontmatter processed below + - name: Create cache-memory directory + run: | + mkdir -p /tmp/gh-aw/cache-memory + echo "Cache memory directory created at /tmp/gh-aw/cache-memory" + echo "This folder provides persistent file storage across workflow runs" + echo "LLMs and agentic tools can freely read and write files in this directory" + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + restore-keys: | + memory-${{ github.workflow }}- + memory- + - name: Upload cache-memory data as artifact + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory - name: Configure Git credentials env: REPO_NAME: ${{ github.repository }} @@ -4868,6 +4898,7 @@ jobs: env: GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt GH_AW_SAFE_OUTPUTS: ${{ env.GH_AW_SAFE_OUTPUTS }} + GH_AW_GITHUB_RUN_ID: ${{ github.run_id }} run: | PROMPT_DIR="$(dirname "$GH_AW_PROMPT")" mkdir -p "$PROMPT_DIR" @@ -4880,7 +4911,8 @@ jobs: ## Test Requirements - **Playwright MCP Testing**: Use playwright to navigate to https://github.com and verify the page title contains "GitHub" + 1. **Playwright MCP Testing**: Use playwright to navigate to https://github.com and verify the page title contains "GitHub" + 2. **Cache Memory Testing**: Write a test file to `/tmp/gh-aw/cache-memory/smoke-test-${GH_AW_GITHUB_RUN_ID}.txt` with content "Cache memory test for run ${GH_AW_GITHUB_RUN_ID}" and verify it was created successfully **Safe Input gh Tool Testing**: Use the `gh` safe-input tool to run "gh issues list --limit 3" to verify the tool can access GitHub issues @@ -4951,6 +4983,31 @@ jobs: Do NOT attempt to edit files outside these directories as you do not have the necessary permissions. + PROMPT_EOF + - name: Append cache memory instructions to prompt + env: + GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt + run: | + cat << 'PROMPT_EOF' | envsubst >> "$GH_AW_PROMPT" + + --- + + ## Cache Folder Available + + You have access to a persistent cache folder at `/tmp/gh-aw/cache-memory/` where you can read and write files to create memories and store information. + + - **Read/Write Access**: You can freely read from and write to any files in this folder + - **Persistence**: Files in this folder persist across workflow runs via GitHub Actions cache + - **Last Write Wins**: If multiple processes write to the same file, the last write will be preserved + - **File Share**: Use this as a simple file share - organize files as you see fit + + Examples of what you can store: + - `/tmp/gh-aw/cache-memory/notes.txt` - general notes and observations + - `/tmp/gh-aw/cache-memory/preferences.json` - user preferences and settings + - `/tmp/gh-aw/cache-memory/history.log` - activity history and logs + - `/tmp/gh-aw/cache-memory/state/` - organized state files in subdirectories + + Feel free to create, read, update, and organize files in this folder as needed for your tasks. PROMPT_EOF - name: Append safe outputs instructions to prompt env: @@ -5014,6 +5071,7 @@ jobs: uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 env: GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt + GH_AW_GITHUB_RUN_ID: ${{ github.run_id }} with: script: | const fs = require("fs"); @@ -5119,7 +5177,7 @@ jobs: run: | set -o pipefail sudo -E awf --env-all --container-workdir "${GITHUB_WORKSPACE}" --mount /tmp:/tmp:rw --mount "${GITHUB_WORKSPACE}:${GITHUB_WORKSPACE}:rw" --mount /usr/bin/date:/usr/bin/date:ro --mount /usr/bin/gh:/usr/bin/gh:ro --mount /usr/bin/yq:/usr/bin/yq:ro --allow-domains '*.githubusercontent.com,accounts.google.com,android.clients.google.com,api.business.githubcopilot.com,api.enterprise.githubcopilot.com,api.github.com,api.npms.io,api.snapcraft.io,archive.ubuntu.com,azure.archive.ubuntu.com,bun.sh,cdn.playwright.dev,clients2.google.com,codeload.github.com,crl.geotrust.com,crl.globalsign.com,crl.identrust.com,crl.sectigo.com,crl.thawte.com,crl.usertrust.com,crl.verisign.com,crl3.digicert.com,crl4.digicert.com,crls.ssl.com,deb.nodesource.com,deno.land,get.pnpm.io,github-cloud.githubusercontent.com,github-cloud.s3.amazonaws.com,github.com,json-schema.org,json.schemastore.org,keyserver.ubuntu.com,lfs.github.com,nodejs.org,npm.pkg.github.com,npmjs.com,npmjs.org,objects.githubusercontent.com,ocsp.digicert.com,ocsp.geotrust.com,ocsp.globalsign.com,ocsp.identrust.com,ocsp.sectigo.com,ocsp.ssl.com,ocsp.thawte.com,ocsp.usertrust.com,ocsp.verisign.com,packagecloud.io,packages.cloud.google.com,packages.microsoft.com,playwright.download.prss.microsoft.com,ppa.launchpad.net,raw.githubusercontent.com,registry.bower.io,registry.npmjs.com,registry.npmjs.org,registry.yarnpkg.com,repo.yarnpkg.com,s.symcb.com,s.symcd.com,security.ubuntu.com,skimdb.npmjs.com,ts-crl.ws.symantec.com,ts-ocsp.ws.symantec.com,www.google.com,www.npmjs.com,www.npmjs.org,yarnpkg.com' --log-level debug --proxy-logs-dir /tmp/gh-aw/sandbox/firewall/logs \ - -- npx -y @github/copilot@0.0.367 --add-dir /tmp/gh-aw/ --log-level all --log-dir /tmp/gh-aw/sandbox/agent/logs/ --add-dir "${GITHUB_WORKSPACE}" --disable-builtin-mcps --allow-all-tools --allow-all-paths --prompt "$(cat /tmp/gh-aw/aw-prompts/prompt.txt)"${GH_AW_MODEL_AGENT_COPILOT:+ --model "$GH_AW_MODEL_AGENT_COPILOT"} \ + -- npx -y @github/copilot@0.0.367 --add-dir /tmp/gh-aw/ --log-level all --log-dir /tmp/gh-aw/sandbox/agent/logs/ --add-dir "${GITHUB_WORKSPACE}" --disable-builtin-mcps --allow-all-tools --add-dir /tmp/gh-aw/cache-memory/ --allow-all-paths --prompt "$(cat /tmp/gh-aw/aw-prompts/prompt.txt)"${GH_AW_MODEL_AGENT_COPILOT:+ --model "$GH_AW_MODEL_AGENT_COPILOT"} \ 2>&1 | tee /tmp/gh-aw/agent-stdio.log env: COPILOT_AGENT_RUNNER_TYPE: STANDALONE @@ -8030,6 +8088,7 @@ jobs: - agent - create_issue - detection + - update_cache_memory if: ((always()) && (needs.agent.result != 'skipped')) && (!(needs.add_comment.outputs.comment_id)) runs-on: ubuntu-slim permissions: @@ -9441,3 +9500,23 @@ jobs: } await main(); + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + diff --git a/.github/workflows/smoke-copilot-playwright.md b/.github/workflows/smoke-copilot-playwright.md index e0b5dd8ec7..057f1b2ca6 100644 --- a/.github/workflows/smoke-copilot-playwright.md +++ b/.github/workflows/smoke-copilot-playwright.md @@ -32,6 +32,7 @@ network: firewall: log-level: debug # Enable debug-level firewall logs tools: + cache-memory: true edit: bash: - "*" @@ -114,7 +115,8 @@ post-steps: ## Test Requirements -**Playwright MCP Testing**: Use playwright to navigate to https://github.com and verify the page title contains "GitHub" +1. **Playwright MCP Testing**: Use playwright to navigate to https://github.com and verify the page title contains "GitHub" +2. **Cache Memory Testing**: Write a test file to `/tmp/gh-aw/cache-memory/smoke-test-${{ github.run_id }}.txt` with content "Cache memory test for run ${{ github.run_id }}" and verify it was created successfully **Safe Input gh Tool Testing**: Use the `gh` safe-input tool to run "gh issues list --limit 3" to verify the tool can access GitHub issues diff --git a/.github/workflows/smoke-copilot.lock.yml b/.github/workflows/smoke-copilot.lock.yml index 53909708b6..bb6de0091f 100644 --- a/.github/workflows/smoke-copilot.lock.yml +++ b/.github/workflows/smoke-copilot.lock.yml @@ -37,8 +37,6 @@ # issues: read # name: Smoke Copilot # engine: copilot -# imports: -# - shared/gh.md # network: # allowed: # - defaults @@ -47,6 +45,7 @@ # firewall: # log-level: debug # Enable debug-level firewall logs # tools: +# cache-memory: true # edit: # bash: # - "*" @@ -65,10 +64,6 @@ # strict: true # ``` # -# Resolved workflow manifest: -# Imports: -# - shared/gh.md -# # Job Dependency Graph: # ```mermaid # graph LR @@ -80,6 +75,7 @@ # create_issue["create_issue"] # detection["detection"] # pre_activation["pre_activation"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # add_comment --> conclusion @@ -89,13 +85,16 @@ # agent --> conclusion # agent --> create_issue # agent --> detection +# agent --> update_cache_memory # create_issue --> add_comment # create_issue --> conclusion # detection --> add_comment # detection --> add_labels # detection --> conclusion # detection --> create_issue +# detection --> update_cache_memory # pre_activation --> activation +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -110,7 +109,8 @@ # 2. **File Writing Testing**: Create a test file `/tmp/gh-aw/agent/smoke-test-copilot-${{ github.run_id }}.txt` with content "Smoke test passed for Copilot at $(date)" (create the directory if it doesn't exist) # 3. **Bash Tool Testing**: Execute bash commands to verify file creation was successful (use `cat` to read the file back) # 4. **GitHub MCP Default Toolset Testing**: Verify that the `get_me` tool is NOT available with default toolsets. Try to use it and confirm it fails with a tool not found error. -# 5. **Safe Input gh Tool Testing**: Use the `gh` safe-input tool to run "gh issues list --limit 3" to verify the tool can access GitHub issues +# 5. **Cache Memory Testing**: Write a test file to `/tmp/gh-aw/cache-memory/smoke-test-${{ github.run_id }}.txt` with content "Cache memory test for run ${{ github.run_id }}" and verify it was created successfully +# 6. **Safe Input gh Tool Testing**: Use the `gh` safe-input tool to run "gh issues list --limit 3" to verify the tool can access GitHub issues # # ## Output # @@ -123,6 +123,10 @@ # ``` # # Pinned GitHub Actions: +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd # - actions/download-artifact@v6 (018cc2cf5baa6db3ef3c5f8a56943fffe632ef53) @@ -1837,6 +1841,26 @@ jobs: mkdir -p /tmp/gh-aw/agent mkdir -p /tmp/gh-aw/sandbox/agent/logs echo "Created /tmp/gh-aw/agent directory for agentic workflow temporary files" + # Cache memory file share configuration from frontmatter processed below + - name: Create cache-memory directory + run: | + mkdir -p /tmp/gh-aw/cache-memory + echo "Cache memory directory created at /tmp/gh-aw/cache-memory" + echo "This folder provides persistent file storage across workflow runs" + echo "LLMs and agentic tools can freely read and write files in this directory" + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + restore-keys: | + memory-${{ github.workflow }}- + memory- + - name: Upload cache-memory data as artifact + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory - name: Configure Git credentials env: REPO_NAME: ${{ github.repository }} @@ -3120,1504 +3144,140 @@ jobs: }), }, ], - }; - }; - return { - defaultHandler, - uploadAssetHandler, - createPullRequestHandler, - pushToPullRequestBranchHandler, - }; - } - function loadTools(server) { - const toolsPath = process.env.GH_AW_SAFE_OUTPUTS_TOOLS_PATH || "/tmp/gh-aw/safeoutputs/tools.json"; - let ALL_TOOLS = []; - server.debug(`Reading tools from file: ${toolsPath}`); - try { - if (fs.existsSync(toolsPath)) { - server.debug(`Tools file exists at: ${toolsPath}`); - const toolsFileContent = fs.readFileSync(toolsPath, "utf8"); - server.debug(`Tools file content length: ${toolsFileContent.length} characters`); - server.debug(`Tools file read successfully, attempting to parse JSON`); - ALL_TOOLS = JSON.parse(toolsFileContent); - server.debug(`Successfully parsed ${ALL_TOOLS.length} tools from file`); - } else { - server.debug(`Tools file does not exist at: ${toolsPath}`); - server.debug(`Using empty tools array`); - ALL_TOOLS = []; - } - } catch (error) { - server.debug(`Error reading tools file: ${error instanceof Error ? error.message : String(error)}`); - server.debug(`Falling back to empty tools array`); - ALL_TOOLS = []; - } - return ALL_TOOLS; - } - function attachHandlers(tools, handlers) { - tools.forEach(tool => { - if (tool.name === "create_pull_request") { - tool.handler = handlers.createPullRequestHandler; - } else if (tool.name === "push_to_pull_request_branch") { - tool.handler = handlers.pushToPullRequestBranchHandler; - } else if (tool.name === "upload_asset") { - tool.handler = handlers.uploadAssetHandler; - } - }); - return tools; - } - function registerPredefinedTools(server, tools, config, registerTool, normalizeTool) { - tools.forEach(tool => { - if (Object.keys(config).find(configKey => normalizeTool(configKey) === tool.name)) { - registerTool(server, tool); - } - }); - } - function registerDynamicTools(server, tools, config, outputFile, registerTool, normalizeTool) { - Object.keys(config).forEach(configKey => { - const normalizedKey = normalizeTool(configKey); - if (server.tools[normalizedKey]) { - return; - } - if (!tools.find(t => t.name === normalizedKey)) { - const jobConfig = config[configKey]; - const dynamicTool = { - name: normalizedKey, - description: jobConfig && jobConfig.description ? jobConfig.description : `Custom safe-job: ${configKey}`, - inputSchema: { - type: "object", - properties: {}, - additionalProperties: true, - }, - handler: args => { - const entry = { - type: normalizedKey, - ...args, - }; - const entryJSON = JSON.stringify(entry); - fs.appendFileSync(outputFile, entryJSON + "\n"); - const outputText = - jobConfig && jobConfig.output - ? jobConfig.output - : `Safe-job '${configKey}' executed successfully with arguments: ${JSON.stringify(args)}`; - return { - content: [ - { - type: "text", - text: JSON.stringify({ result: outputText }), - }, - ], - }; - }, - }; - if (jobConfig && jobConfig.inputs) { - dynamicTool.inputSchema.properties = {}; - dynamicTool.inputSchema.required = []; - Object.keys(jobConfig.inputs).forEach(inputName => { - const inputDef = jobConfig.inputs[inputName]; - const propSchema = { - type: inputDef.type || "string", - description: inputDef.description || `Input parameter: ${inputName}`, - }; - if (inputDef.options && Array.isArray(inputDef.options)) { - propSchema.enum = inputDef.options; - } - dynamicTool.inputSchema.properties[inputName] = propSchema; - if (inputDef.required) { - dynamicTool.inputSchema.required.push(inputName); - } - }); - } - registerTool(server, dynamicTool); - } - }); - } - const SERVER_INFO = { name: "safeoutputs", version: "1.0.0" }; - const MCP_LOG_DIR = process.env.GH_AW_MCP_LOG_DIR; - const server = createServer(SERVER_INFO, { logDir: MCP_LOG_DIR }); - const { config: safeOutputsConfig, outputFile } = loadConfig(server); - const appendSafeOutput = createAppendFunction(outputFile); - const handlers = createHandlers(server, appendSafeOutput); - const { defaultHandler } = handlers; - let ALL_TOOLS = loadTools(server); - ALL_TOOLS = attachHandlers(ALL_TOOLS, handlers); - server.debug(` output file: ${outputFile}`); - server.debug(` config: ${JSON.stringify(safeOutputsConfig)}`); - registerPredefinedTools(server, ALL_TOOLS, safeOutputsConfig, registerTool, normalizeTool); - registerDynamicTools(server, ALL_TOOLS, safeOutputsConfig, outputFile, registerTool, normalizeTool); - server.debug(` tools: ${Object.keys(server.tools).join(", ")}`); - if (!Object.keys(server.tools).length) throw new Error("No tools enabled in configuration"); - start(server, { defaultHandler }); - EOF - chmod +x /tmp/gh-aw/safeoutputs/mcp-server.cjs - - - name: Setup Safe Inputs JavaScript and Config - run: | - mkdir -p /tmp/gh-aw/safe-inputs/logs - cat > /tmp/gh-aw/safe-inputs/read_buffer.cjs << 'EOF_READ_BUFFER' - class ReadBuffer { - constructor() { - this._buffer = null; - } - append(chunk) { - this._buffer = this._buffer ? Buffer.concat([this._buffer, chunk]) : chunk; - } - readMessage() { - if (!this._buffer) { - return null; - } - const index = this._buffer.indexOf("\n"); - if (index === -1) { - return null; - } - const line = this._buffer.toString("utf8", 0, index).replace(/\r$/, ""); - this._buffer = this._buffer.subarray(index + 1); - if (line.trim() === "") { - return this.readMessage(); - } - try { - return JSON.parse(line); - } catch (error) { - throw new Error(`Parse error: ${error instanceof Error ? error.message : String(error)}`); - } - } - } - module.exports = { - ReadBuffer, - }; - EOF_READ_BUFFER - cat > /tmp/gh-aw/safe-inputs/mcp_server_core.cjs << 'EOF_MCP_CORE' - const fs = require("fs"); - const path = require("path"); - const { ReadBuffer } = require("./read_buffer.cjs"); - const { validateRequiredFields } = require("./safe_inputs_validation.cjs"); - const encoder = new TextEncoder(); - function initLogFile(server) { - if (server.logFileInitialized || !server.logDir || !server.logFilePath) return; - try { - if (!fs.existsSync(server.logDir)) { - fs.mkdirSync(server.logDir, { recursive: true }); - } - const timestamp = new Date().toISOString(); - fs.writeFileSync( - server.logFilePath, - `# ${server.serverInfo.name} MCP Server Log\n# Started: ${timestamp}\n# Version: ${server.serverInfo.version}\n\n` - ); - server.logFileInitialized = true; - } catch { - } - } - function createDebugFunction(server) { - return msg => { - const timestamp = new Date().toISOString(); - const formattedMsg = `[${timestamp}] [${server.serverInfo.name}] ${msg}\n`; - process.stderr.write(formattedMsg); - if (server.logDir && server.logFilePath) { - if (!server.logFileInitialized) { - initLogFile(server); - } - if (server.logFileInitialized) { - try { - fs.appendFileSync(server.logFilePath, formattedMsg); - } catch { - } - } - } - }; - } - function createDebugErrorFunction(server) { - return (prefix, error) => { - const errorMessage = error instanceof Error ? error.message : String(error); - server.debug(`${prefix}${errorMessage}`); - if (error instanceof Error && error.stack) { - server.debug(`${prefix}Stack trace: ${error.stack}`); - } - }; - } - function createWriteMessageFunction(server) { - return obj => { - const json = JSON.stringify(obj); - server.debug(`send: ${json}`); - const message = json + "\n"; - const bytes = encoder.encode(message); - fs.writeSync(1, bytes); - }; - } - function createReplyResultFunction(server) { - return (id, result) => { - if (id === undefined || id === null) return; - const res = { jsonrpc: "2.0", id, result }; - server.writeMessage(res); - }; - } - function createReplyErrorFunction(server) { - return (id, code, message) => { - if (id === undefined || id === null) { - server.debug(`Error for notification: ${message}`); - return; - } - const error = { code, message }; - const res = { - jsonrpc: "2.0", - id, - error, - }; - server.writeMessage(res); - }; - } - function createServer(serverInfo, options = {}) { - const logDir = options.logDir || undefined; - const logFilePath = logDir ? path.join(logDir, "server.log") : undefined; - const server = { - serverInfo, - tools: {}, - debug: () => {}, - debugError: () => {}, - writeMessage: () => {}, - replyResult: () => {}, - replyError: () => {}, - readBuffer: new ReadBuffer(), - logDir, - logFilePath, - logFileInitialized: false, - }; - server.debug = createDebugFunction(server); - server.debugError = createDebugErrorFunction(server); - server.writeMessage = createWriteMessageFunction(server); - server.replyResult = createReplyResultFunction(server); - server.replyError = createReplyErrorFunction(server); - return server; - } - function createWrappedHandler(server, toolName, handlerFn) { - return async args => { - server.debug(` [${toolName}] Invoking handler with args: ${JSON.stringify(args)}`); - try { - const result = await Promise.resolve(handlerFn(args)); - server.debug(` [${toolName}] Handler returned result type: ${typeof result}`); - if (result && typeof result === "object" && Array.isArray(result.content)) { - server.debug(` [${toolName}] Result is already in MCP format`); - return result; - } - let serializedResult; - try { - serializedResult = JSON.stringify(result); - } catch (serializationError) { - server.debugError(` [${toolName}] Serialization error: `, serializationError); - serializedResult = String(result); - } - server.debug(` [${toolName}] Serialized result: ${serializedResult.substring(0, 200)}${serializedResult.length > 200 ? "..." : ""}`); - return { - content: [ - { - type: "text", - text: serializedResult, - }, - ], - }; - } catch (error) { - server.debugError(` [${toolName}] Handler threw error: `, error); - throw error; - } - }; - } - function loadToolHandlers(server, tools, basePath) { - server.debug(`Loading tool handlers...`); - server.debug(` Total tools to process: ${tools.length}`); - server.debug(` Base path: ${basePath || "(not specified)"}`); - let loadedCount = 0; - let skippedCount = 0; - let errorCount = 0; - for (const tool of tools) { - const toolName = tool.name || "(unnamed)"; - if (!tool.handler) { - server.debug(` [${toolName}] No handler path specified, skipping handler load`); - skippedCount++; - continue; - } - const handlerPath = tool.handler; - server.debug(` [${toolName}] Handler path specified: ${handlerPath}`); - let resolvedPath = handlerPath; - if (basePath && !path.isAbsolute(handlerPath)) { - resolvedPath = path.resolve(basePath, handlerPath); - server.debug(` [${toolName}] Resolved relative path to: ${resolvedPath}`); - const normalizedBase = path.resolve(basePath); - const normalizedResolved = path.resolve(resolvedPath); - if (!normalizedResolved.startsWith(normalizedBase + path.sep) && normalizedResolved !== normalizedBase) { - server.debug(` [${toolName}] ERROR: Handler path escapes base directory: ${resolvedPath} is not within ${basePath}`); - errorCount++; - continue; - } - } else if (path.isAbsolute(handlerPath)) { - server.debug(` [${toolName}] Using absolute path (bypasses basePath validation): ${handlerPath}`); - } - tool.handlerPath = handlerPath; - try { - server.debug(` [${toolName}] Loading handler from: ${resolvedPath}`); - if (!fs.existsSync(resolvedPath)) { - server.debug(` [${toolName}] ERROR: Handler file does not exist: ${resolvedPath}`); - errorCount++; - continue; - } - const ext = path.extname(resolvedPath).toLowerCase(); - server.debug(` [${toolName}] Handler file extension: ${ext}`); - if (ext === ".sh") { - server.debug(` [${toolName}] Detected shell script handler`); - try { - fs.accessSync(resolvedPath, fs.constants.X_OK); - server.debug(` [${toolName}] Shell script is executable`); - } catch { - try { - fs.chmodSync(resolvedPath, 0o755); - server.debug(` [${toolName}] Made shell script executable`); - } catch (chmodError) { - server.debugError(` [${toolName}] Warning: Could not make shell script executable: `, chmodError); - } - } - const { createShellHandler } = require("./mcp_handler_shell.cjs"); - const timeout = tool.timeout || 60; - tool.handler = createShellHandler(server, toolName, resolvedPath, timeout); - loadedCount++; - server.debug(` [${toolName}] Shell handler created successfully with timeout: ${timeout}s`); - } else if (ext === ".py") { - server.debug(` [${toolName}] Detected Python script handler`); - try { - fs.accessSync(resolvedPath, fs.constants.X_OK); - server.debug(` [${toolName}] Python script is executable`); - } catch { - try { - fs.chmodSync(resolvedPath, 0o755); - server.debug(` [${toolName}] Made Python script executable`); - } catch (chmodError) { - server.debugError(` [${toolName}] Warning: Could not make Python script executable: `, chmodError); - } - } - const { createPythonHandler } = require("./mcp_handler_python.cjs"); - const timeout = tool.timeout || 60; - tool.handler = createPythonHandler(server, toolName, resolvedPath, timeout); - loadedCount++; - server.debug(` [${toolName}] Python handler created successfully with timeout: ${timeout}s`); - } else { - server.debug(` [${toolName}] Loading JavaScript handler module`); - const handlerModule = require(resolvedPath); - server.debug(` [${toolName}] Handler module loaded successfully`); - server.debug(` [${toolName}] Module type: ${typeof handlerModule}`); - let handlerFn = handlerModule; - if (handlerModule && typeof handlerModule === "object" && typeof handlerModule.default === "function") { - handlerFn = handlerModule.default; - server.debug(` [${toolName}] Using module.default export`); - } - if (typeof handlerFn !== "function") { - server.debug(` [${toolName}] ERROR: Handler is not a function, got: ${typeof handlerFn}`); - server.debug(` [${toolName}] Module keys: ${Object.keys(handlerModule || {}).join(", ") || "(none)"}`); - errorCount++; - continue; - } - server.debug(` [${toolName}] Handler function validated successfully`); - server.debug(` [${toolName}] Handler function name: ${handlerFn.name || "(anonymous)"}`); - tool.handler = createWrappedHandler(server, toolName, handlerFn); - loadedCount++; - server.debug(` [${toolName}] JavaScript handler loaded and wrapped successfully`); - } - } catch (error) { - server.debugError(` [${toolName}] ERROR loading handler: `, error); - errorCount++; - } - } - server.debug(`Handler loading complete:`); - server.debug(` Loaded: ${loadedCount}`); - server.debug(` Skipped (no handler path): ${skippedCount}`); - server.debug(` Errors: ${errorCount}`); - return tools; - } - function registerTool(server, tool) { - const normalizedName = normalizeTool(tool.name); - server.tools[normalizedName] = { - ...tool, - name: normalizedName, - }; - server.debug(`Registered tool: ${normalizedName}`); - } - function normalizeTool(name) { - return name.replace(/-/g, "_").toLowerCase(); - } - async function handleMessage(server, req, defaultHandler) { - if (!req || typeof req !== "object") { - server.debug(`Invalid message: not an object`); - return; - } - if (req.jsonrpc !== "2.0") { - server.debug(`Invalid message: missing or invalid jsonrpc field`); - return; - } - const { id, method, params } = req; - if (!method || typeof method !== "string") { - server.replyError(id, -32600, "Invalid Request: method must be a string"); - return; - } - try { - if (method === "initialize") { - const clientInfo = params?.clientInfo ?? {}; - server.debug(`client info: ${JSON.stringify(clientInfo)}`); - const protocolVersion = params?.protocolVersion ?? undefined; - const result = { - serverInfo: server.serverInfo, - ...(protocolVersion ? { protocolVersion } : {}), - capabilities: { - tools: {}, - }, - }; - server.replyResult(id, result); - } else if (method === "tools/list") { - const list = []; - Object.values(server.tools).forEach(tool => { - const toolDef = { - name: tool.name, - description: tool.description, - inputSchema: tool.inputSchema, - }; - list.push(toolDef); - }); - server.replyResult(id, { tools: list }); - } else if (method === "tools/call") { - const name = params?.name; - const args = params?.arguments ?? {}; - if (!name || typeof name !== "string") { - server.replyError(id, -32602, "Invalid params: 'name' must be a string"); - return; - } - const tool = server.tools[normalizeTool(name)]; - if (!tool) { - server.replyError(id, -32601, `Tool not found: ${name} (${normalizeTool(name)})`); - return; - } - let handler = tool.handler; - if (!handler && defaultHandler) { - handler = defaultHandler(tool.name); - } - if (!handler) { - server.replyError(id, -32603, `No handler for tool: ${name}`); - return; - } - const missing = validateRequiredFields(args, tool.inputSchema); - if (missing.length) { - server.replyError(id, -32602, `Invalid arguments: missing or empty ${missing.map(m => `'${m}'`).join(", ")}`); - return; - } - server.debug(`Calling handler for tool: ${name}`); - const result = await Promise.resolve(handler(args)); - server.debug(`Handler returned for tool: ${name}`); - const content = result && result.content ? result.content : []; - server.replyResult(id, { content, isError: false }); - } else if (/^notifications\//.test(method)) { - server.debug(`ignore ${method}`); - } else { - server.replyError(id, -32601, `Method not found: ${method}`); - } - } catch (e) { - server.replyError(id, -32603, e instanceof Error ? e.message : String(e)); - } - } - async function processReadBuffer(server, defaultHandler) { - while (true) { - try { - const message = server.readBuffer.readMessage(); - if (!message) { - break; - } - server.debug(`recv: ${JSON.stringify(message)}`); - await handleMessage(server, message, defaultHandler); - } catch (error) { - server.debug(`Parse error: ${error instanceof Error ? error.message : String(error)}`); - } - } - } - function start(server, options = {}) { - const { defaultHandler } = options; - server.debug(`v${server.serverInfo.version} ready on stdio`); - server.debug(` tools: ${Object.keys(server.tools).join(", ")}`); - if (!Object.keys(server.tools).length) { - throw new Error("No tools registered"); - } - const onData = async chunk => { - server.readBuffer.append(chunk); - await processReadBuffer(server, defaultHandler); - }; - process.stdin.on("data", onData); - process.stdin.on("error", err => server.debug(`stdin error: ${err}`)); - process.stdin.resume(); - server.debug(`listening...`); - } - module.exports = { - createServer, - registerTool, - normalizeTool, - handleMessage, - processReadBuffer, - start, - loadToolHandlers, - }; - EOF_MCP_CORE - cat > /tmp/gh-aw/safe-inputs/mcp_server.cjs << 'EOF_MCP_SERVER' - class MCPServer { - constructor(serverInfo, options = {}) { - this.serverInfo = serverInfo; - this.capabilities = options.capabilities || { tools: {} }; - this.tools = new Map(); - this.transport = null; - this.initialized = false; - } - tool(name, description, inputSchema, handler) { - this.tools.set(name, { - name, - description, - inputSchema, - handler, - }); - } - async connect(transport) { - this.transport = transport; - transport.setServer(this); - await transport.start(); - } - handleInitialize(params) { - this.initialized = true; - return { - protocolVersion: params.protocolVersion || "2024-11-05", - serverInfo: this.serverInfo, - capabilities: this.capabilities, - }; - } - handleToolsList() { - const tools = Array.from(this.tools.values()).map(tool => ({ - name: tool.name, - description: tool.description, - inputSchema: tool.inputSchema, - })); - return { tools }; - } - async handleToolsCall(params) { - const tool = this.tools.get(params.name); - if (!tool) { - throw { - code: -32602, - message: `Tool '${params.name}' not found`, - }; - } - try { - const result = await tool.handler(params.arguments || {}); - return result; - } catch (error) { - throw { - code: -32603, - message: error instanceof Error ? error.message : String(error), - }; - } - } - handlePing() { - return {}; - } - async handleRequest(request) { - const { id, method, params } = request; - try { - if (!("id" in request)) { - return null; - } - let result; - switch (method) { - case "initialize": - result = this.handleInitialize(params || {}); - break; - case "ping": - result = this.handlePing(); - break; - case "tools/list": - result = this.handleToolsList(); - break; - case "tools/call": - result = await this.handleToolsCall(params || {}); - break; - default: - throw { - code: -32601, - message: `Method '${method}' not found`, - }; - } - return { - jsonrpc: "2.0", - id, - result, - }; - } catch (error) { - return { - jsonrpc: "2.0", - id, - error: { - code: error.code || -32603, - message: error.message || "Internal error", - }, - }; - } - } - } - module.exports = { - MCPServer, - }; - EOF_MCP_SERVER - cat > /tmp/gh-aw/safe-inputs/mcp_http_transport.cjs << 'EOF_MCP_HTTP_TRANSPORT' - const http = require("http"); - const { randomUUID } = require("crypto"); - const { MCPServer } = require("./mcp_server.cjs"); - class MCPHTTPTransport { - constructor(options = {}) { - this.sessionIdGenerator = options.sessionIdGenerator; - this.enableJsonResponse = options.enableJsonResponse !== false; - this.enableDnsRebindingProtection = options.enableDnsRebindingProtection || false; - this.server = null; - this.sessionId = null; - this.started = false; - } - setServer(server) { - this.server = server; - } - async start() { - if (this.started) { - throw new Error("Transport already started"); - } - this.started = true; - } - async handleRequest(req, res, parsedBody) { - res.setHeader("Access-Control-Allow-Origin", "*"); - res.setHeader("Access-Control-Allow-Methods", "GET, POST, OPTIONS"); - res.setHeader("Access-Control-Allow-Headers", "Content-Type, Accept, Mcp-Session-Id"); - if (req.method === "OPTIONS") { - res.writeHead(200); - res.end(); - return; - } - if (req.method !== "POST") { - res.writeHead(405, { "Content-Type": "application/json" }); - res.end(JSON.stringify({ error: "Method not allowed" })); - return; - } - try { - let body = parsedBody; - if (!body) { - const chunks = []; - for await (const chunk of req) { - chunks.push(chunk); - } - const bodyStr = Buffer.concat(chunks).toString(); - try { - body = bodyStr ? JSON.parse(bodyStr) : null; - } catch (parseError) { - res.writeHead(400, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - jsonrpc: "2.0", - error: { - code: -32700, - message: "Parse error: Invalid JSON in request body", - }, - id: null, - }) - ); - return; - } - } - if (!body) { - res.writeHead(400, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - jsonrpc: "2.0", - error: { - code: -32600, - message: "Invalid Request: Empty request body", - }, - id: null, - }) - ); - return; - } - if (!body.jsonrpc || body.jsonrpc !== "2.0") { - res.writeHead(400, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - jsonrpc: "2.0", - error: { - code: -32600, - message: "Invalid Request: jsonrpc must be '2.0'", - }, - id: body.id || null, - }) - ); - return; - } - if (this.sessionIdGenerator) { - if (body.method === "initialize") { - this.sessionId = this.sessionIdGenerator(); - } else { - const requestSessionId = req.headers["mcp-session-id"]; - if (!requestSessionId) { - res.writeHead(400, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - jsonrpc: "2.0", - error: { - code: -32600, - message: "Invalid Request: Missing Mcp-Session-Id header", - }, - id: body.id || null, - }) - ); - return; - } - if (requestSessionId !== this.sessionId) { - res.writeHead(404, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - jsonrpc: "2.0", - error: { - code: -32001, - message: "Session not found", - }, - id: body.id || null, - }) - ); - return; - } - } - } - const response = await this.server.handleRequest(body); - if (response === null) { - res.writeHead(204); - res.end(); - return; - } - const headers = { "Content-Type": "application/json" }; - if (this.sessionId) { - headers["mcp-session-id"] = this.sessionId; - } - res.writeHead(200, headers); - res.end(JSON.stringify(response)); - } catch (error) { - if (!res.headersSent) { - res.writeHead(500, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - jsonrpc: "2.0", - error: { - code: -32603, - message: error instanceof Error ? error.message : String(error), - }, - id: null, - }) - ); - } - } - } - } - module.exports = { - MCPServer, - MCPHTTPTransport, - }; - EOF_MCP_HTTP_TRANSPORT - cat > /tmp/gh-aw/safe-inputs/mcp_logger.cjs << 'EOF_MCP_LOGGER' - function createLogger(serverName) { - const logger = { - debug: msg => { - const timestamp = new Date().toISOString(); - process.stderr.write(`[${timestamp}] [${serverName}] ${msg}\n`); - }, - debugError: (prefix, error) => { - const errorMessage = error instanceof Error ? error.message : String(error); - logger.debug(`${prefix}${errorMessage}`); - if (error instanceof Error && error.stack) { - logger.debug(`${prefix}Stack trace: ${error.stack}`); - } - }, - }; - return logger; - } - module.exports = { - createLogger, - }; - EOF_MCP_LOGGER - cat > /tmp/gh-aw/safe-inputs/mcp_handler_shell.cjs << 'EOF_HANDLER_SHELL' - const fs = require("fs"); - const path = require("path"); - const { execFile } = require("child_process"); - const os = require("os"); - function createShellHandler(server, toolName, scriptPath, timeoutSeconds = 60) { - return async args => { - server.debug(` [${toolName}] Invoking shell handler: ${scriptPath}`); - server.debug(` [${toolName}] Shell handler args: ${JSON.stringify(args)}`); - server.debug(` [${toolName}] Timeout: ${timeoutSeconds}s`); - const env = { ...process.env }; - for (const [key, value] of Object.entries(args || {})) { - const envKey = `INPUT_${key.toUpperCase().replace(/-/g, "_")}`; - env[envKey] = String(value); - server.debug(` [${toolName}] Set env: ${envKey}=${String(value).substring(0, 100)}${String(value).length > 100 ? "..." : ""}`); - } - const outputFile = path.join(os.tmpdir(), `mcp-shell-output-${Date.now()}-${Math.random().toString(36).substring(2)}.txt`); - env.GITHUB_OUTPUT = outputFile; - server.debug(` [${toolName}] Output file: ${outputFile}`); - fs.writeFileSync(outputFile, ""); - return new Promise((resolve, reject) => { - server.debug(` [${toolName}] Executing shell script...`); - execFile( - scriptPath, - [], - { - env, - timeout: timeoutSeconds * 1000, - maxBuffer: 10 * 1024 * 1024, - }, - (error, stdout, stderr) => { - if (stdout) { - server.debug(` [${toolName}] stdout: ${stdout.substring(0, 500)}${stdout.length > 500 ? "..." : ""}`); - } - if (stderr) { - server.debug(` [${toolName}] stderr: ${stderr.substring(0, 500)}${stderr.length > 500 ? "..." : ""}`); - } - if (error) { - server.debugError(` [${toolName}] Shell script error: `, error); - try { - if (fs.existsSync(outputFile)) { - fs.unlinkSync(outputFile); - } - } catch { - } - reject(error); - return; - } - const outputs = {}; - try { - if (fs.existsSync(outputFile)) { - const outputContent = fs.readFileSync(outputFile, "utf-8"); - server.debug( - ` [${toolName}] Output file content: ${outputContent.substring(0, 500)}${outputContent.length > 500 ? "..." : ""}` - ); - const lines = outputContent.split("\n"); - for (const line of lines) { - const trimmed = line.trim(); - if (trimmed && trimmed.includes("=")) { - const eqIndex = trimmed.indexOf("="); - const key = trimmed.substring(0, eqIndex); - const value = trimmed.substring(eqIndex + 1); - outputs[key] = value; - server.debug(` [${toolName}] Parsed output: ${key}=${value.substring(0, 100)}${value.length > 100 ? "..." : ""}`); - } - } - } - } catch (readError) { - server.debugError(` [${toolName}] Error reading output file: `, readError); - } - try { - if (fs.existsSync(outputFile)) { - fs.unlinkSync(outputFile); - } - } catch { - } - const result = { - stdout: stdout || "", - stderr: stderr || "", - outputs, - }; - server.debug(` [${toolName}] Shell handler completed, outputs: ${Object.keys(outputs).join(", ") || "(none)"}`); - resolve({ - content: [ - { - type: "text", - text: JSON.stringify(result), - }, - ], - }); - } - ); - }); - }; - } - module.exports = { - createShellHandler, - }; - EOF_HANDLER_SHELL - cat > /tmp/gh-aw/safe-inputs/mcp_handler_python.cjs << 'EOF_HANDLER_PYTHON' - const { execFile } = require("child_process"); - function createPythonHandler(server, toolName, scriptPath, timeoutSeconds = 60) { - return async args => { - server.debug(` [${toolName}] Invoking Python handler: ${scriptPath}`); - server.debug(` [${toolName}] Python handler args: ${JSON.stringify(args)}`); - server.debug(` [${toolName}] Timeout: ${timeoutSeconds}s`); - const inputJson = JSON.stringify(args || {}); - server.debug( - ` [${toolName}] Input JSON (${inputJson.length} bytes): ${inputJson.substring(0, 200)}${inputJson.length > 200 ? "..." : ""}` - ); - return new Promise((resolve, reject) => { - server.debug(` [${toolName}] Executing Python script...`); - const child = execFile( - "python3", - [scriptPath], - { - env: process.env, - timeout: timeoutSeconds * 1000, - maxBuffer: 10 * 1024 * 1024, - }, - (error, stdout, stderr) => { - if (stdout) { - server.debug(` [${toolName}] stdout: ${stdout.substring(0, 500)}${stdout.length > 500 ? "..." : ""}`); - } - if (stderr) { - server.debug(` [${toolName}] stderr: ${stderr.substring(0, 500)}${stderr.length > 500 ? "..." : ""}`); - } - if (error) { - server.debugError(` [${toolName}] Python script error: `, error); - reject(error); - return; - } - let result; - try { - if (stdout && stdout.trim()) { - result = JSON.parse(stdout.trim()); - } else { - result = { stdout: stdout || "", stderr: stderr || "" }; - } - } catch (parseError) { - server.debug(` [${toolName}] Output is not JSON, returning as text`); - result = { stdout: stdout || "", stderr: stderr || "" }; - } - server.debug(` [${toolName}] Python handler completed successfully`); - resolve({ - content: [ - { - type: "text", - text: JSON.stringify(result), - }, - ], - }); - } - ); - if (child.stdin) { - child.stdin.write(inputJson); - child.stdin.end(); - } - }); + }; }; - } - module.exports = { - createPythonHandler, - }; - EOF_HANDLER_PYTHON - cat > /tmp/gh-aw/safe-inputs/safe_inputs_config_loader.cjs << 'EOF_CONFIG_LOADER' - const fs = require("fs"); - function loadConfig(configPath) { - if (!fs.existsSync(configPath)) { - throw new Error(`Configuration file not found: ${configPath}`); - } - const configContent = fs.readFileSync(configPath, "utf-8"); - const config = JSON.parse(configContent); - if (!config.tools || !Array.isArray(config.tools)) { - throw new Error("Configuration must contain a 'tools' array"); - } - return config; - } - module.exports = { - loadConfig, - }; - EOF_CONFIG_LOADER - cat > /tmp/gh-aw/safe-inputs/safe_inputs_tool_factory.cjs << 'EOF_TOOL_FACTORY' - function createToolConfig(name, description, inputSchema, handlerPath) { return { - name, - description, - inputSchema, - handler: handlerPath, + defaultHandler, + uploadAssetHandler, + createPullRequestHandler, + pushToPullRequestBranchHandler, }; } - module.exports = { - createToolConfig, - }; - EOF_TOOL_FACTORY - cat > /tmp/gh-aw/safe-inputs/safe_inputs_validation.cjs << 'EOF_VALIDATION' - function validateRequiredFields(args, inputSchema) { - const requiredFields = inputSchema && Array.isArray(inputSchema.required) ? inputSchema.required : []; - if (!requiredFields.length) { - return []; - } - const missing = requiredFields.filter(f => { - const value = args[f]; - return value === undefined || value === null || (typeof value === "string" && value.trim() === ""); - }); - return missing; - } - module.exports = { - validateRequiredFields, - }; - EOF_VALIDATION - cat > /tmp/gh-aw/safe-inputs/safe_inputs_mcp_server.cjs << 'EOF_SAFE_INPUTS_SERVER' - const path = require("path"); - const { createServer, registerTool, loadToolHandlers, start } = require("./mcp_server_core.cjs"); - const { loadConfig } = require("./safe_inputs_config_loader.cjs"); - const { createToolConfig } = require("./safe_inputs_tool_factory.cjs"); - function startSafeInputsServer(configPath, options = {}) { - const config = loadConfig(configPath); - const basePath = path.dirname(configPath); - const serverName = config.serverName || "safeinputs"; - const version = config.version || "1.0.0"; - const logDir = options.logDir || config.logDir || undefined; - const server = createServer({ name: serverName, version }, { logDir }); - server.debug(`Loading safe-inputs configuration from: ${configPath}`); - server.debug(`Base path for handlers: ${basePath}`); - server.debug(`Tools to load: ${config.tools.length}`); - const tools = loadToolHandlers(server, config.tools, basePath); - for (const tool of tools) { - registerTool(server, tool); - } + function loadTools(server) { + const toolsPath = process.env.GH_AW_SAFE_OUTPUTS_TOOLS_PATH || "/tmp/gh-aw/safeoutputs/tools.json"; + let ALL_TOOLS = []; + server.debug(`Reading tools from file: ${toolsPath}`); try { - const fs = require("fs"); - if (fs.existsSync(configPath)) { - fs.unlinkSync(configPath); - server.debug(`Deleted configuration file: ${configPath}`); + if (fs.existsSync(toolsPath)) { + server.debug(`Tools file exists at: ${toolsPath}`); + const toolsFileContent = fs.readFileSync(toolsPath, "utf8"); + server.debug(`Tools file content length: ${toolsFileContent.length} characters`); + server.debug(`Tools file read successfully, attempting to parse JSON`); + ALL_TOOLS = JSON.parse(toolsFileContent); + server.debug(`Successfully parsed ${ALL_TOOLS.length} tools from file`); + } else { + server.debug(`Tools file does not exist at: ${toolsPath}`); + server.debug(`Using empty tools array`); + ALL_TOOLS = []; } } catch (error) { - server.debugError(`Warning: Could not delete configuration file: `, error); + server.debug(`Error reading tools file: ${error instanceof Error ? error.message : String(error)}`); + server.debug(`Falling back to empty tools array`); + ALL_TOOLS = []; } - start(server); + return ALL_TOOLS; } - if (require.main === module) { - const args = process.argv.slice(2); - if (args.length < 1) { - console.error("Usage: node safe_inputs_mcp_server.cjs [--log-dir ]"); - process.exit(1); - } - const configPath = args[0]; - const options = {}; - for (let i = 1; i < args.length; i++) { - if (args[i] === "--log-dir" && args[i + 1]) { - options.logDir = args[i + 1]; - i++; + function attachHandlers(tools, handlers) { + tools.forEach(tool => { + if (tool.name === "create_pull_request") { + tool.handler = handlers.createPullRequestHandler; + } else if (tool.name === "push_to_pull_request_branch") { + tool.handler = handlers.pushToPullRequestBranchHandler; + } else if (tool.name === "upload_asset") { + tool.handler = handlers.uploadAssetHandler; } - } - try { - startSafeInputsServer(configPath, options); - } catch (error) { - console.error(`Error starting safe-inputs server: ${error instanceof Error ? error.message : String(error)}`); - process.exit(1); - } + }); + return tools; } - module.exports = { - startSafeInputsServer, - loadConfig, - createToolConfig, - }; - EOF_SAFE_INPUTS_SERVER - cat > /tmp/gh-aw/safe-inputs/safe_inputs_mcp_server_http.cjs << 'EOF_SAFE_INPUTS_SERVER_HTTP' - const path = require("path"); - const http = require("http"); - const { randomUUID } = require("crypto"); - const { MCPServer, MCPHTTPTransport } = require("./mcp_http_transport.cjs"); - const { loadConfig } = require("./safe_inputs_config_loader.cjs"); - const { loadToolHandlers } = require("./mcp_server_core.cjs"); - const { validateRequiredFields } = require("./safe_inputs_validation.cjs"); - const { createLogger } = require("./mcp_logger.cjs"); - function createMCPServer(configPath, options = {}) { - const config = loadConfig(configPath); - const basePath = path.dirname(configPath); - const serverName = config.serverName || "safeinputs"; - const version = config.version || "1.0.0"; - const server = new MCPServer( - { - name: serverName, - version: version, - }, - { - capabilities: { - tools: {}, - }, - } - ); - const logger = createLogger(serverName); - logger.debug(`=== Creating MCP Server ===`); - logger.debug(`Configuration file: ${configPath}`); - logger.debug(`Loading safe-inputs configuration from: ${configPath}`); - logger.debug(`Base path for handlers: ${basePath}`); - logger.debug(`Server name: ${serverName}`); - logger.debug(`Server version: ${version}`); - logger.debug(`Tools to load: ${config.tools.length}`); - const tempServer = { debug: logger.debug, debugError: logger.debugError }; - const tools = loadToolHandlers(tempServer, config.tools, basePath); - logger.debug(`Registering tools with MCP server...`); - let registeredCount = 0; - let skippedCount = 0; - for (const tool of tools) { - if (!tool.handler) { - logger.debug(`Skipping tool ${tool.name} - no handler loaded`); - skippedCount++; - continue; + function registerPredefinedTools(server, tools, config, registerTool, normalizeTool) { + tools.forEach(tool => { + if (Object.keys(config).find(configKey => normalizeTool(configKey) === tool.name)) { + registerTool(server, tool); } - logger.debug(`Registering tool: ${tool.name}`); - server.tool(tool.name, tool.description || "", tool.inputSchema || { type: "object", properties: {} }, async args => { - logger.debug(`Calling handler for tool: ${tool.name}`); - const missing = validateRequiredFields(args, tool.inputSchema); - if (missing.length) { - throw new Error(`Invalid arguments: missing or empty ${missing.map(m => `'${m}'`).join(", ")}`); - } - const result = await Promise.resolve(tool.handler(args)); - logger.debug(`Handler returned for tool: ${tool.name}`); - const content = result && result.content ? result.content : []; - return { content, isError: false }; - }); - registeredCount++; - } - logger.debug(`Tool registration complete: ${registeredCount} registered, ${skippedCount} skipped`); - logger.debug(`=== MCP Server Creation Complete ===`); - try { - const fs = require("fs"); - if (fs.existsSync(configPath)) { - fs.unlinkSync(configPath); - logger.debug(`Deleted configuration file: ${configPath}`); + }); + } + function registerDynamicTools(server, tools, config, outputFile, registerTool, normalizeTool) { + Object.keys(config).forEach(configKey => { + const normalizedKey = normalizeTool(configKey); + if (server.tools[normalizedKey]) { + return; } - } catch (error) { - logger.debugError(`Warning: Could not delete configuration file: `, error); - } - return { server, config, logger }; - } - async function startHttpServer(configPath, options = {}) { - const port = options.port || 3000; - const stateless = options.stateless || false; - const logger = createLogger("safe-inputs-startup"); - logger.debug(`=== Starting Safe Inputs MCP HTTP Server ===`); - logger.debug(`Configuration file: ${configPath}`); - logger.debug(`Port: ${port}`); - logger.debug(`Mode: ${stateless ? "stateless" : "stateful"}`); - logger.debug(`Environment: NODE_VERSION=${process.version}, PLATFORM=${process.platform}`); - try { - const { server, config, logger: mcpLogger } = createMCPServer(configPath, { logDir: options.logDir }); - Object.assign(logger, mcpLogger); - logger.debug(`MCP server created successfully`); - logger.debug(`Server name: ${config.serverName || "safeinputs"}`); - logger.debug(`Server version: ${config.version || "1.0.0"}`); - logger.debug(`Tools configured: ${config.tools.length}`); - logger.debug(`Creating HTTP transport...`); - const transport = new MCPHTTPTransport({ - sessionIdGenerator: stateless ? undefined : () => randomUUID(), - enableJsonResponse: true, - enableDnsRebindingProtection: false, - }); - logger.debug(`HTTP transport created`); - logger.debug(`Connecting server to transport...`); - await server.connect(transport); - logger.debug(`Server connected to transport successfully`); - logger.debug(`Creating HTTP server...`); - const httpServer = http.createServer(async (req, res) => { - res.setHeader("Access-Control-Allow-Origin", "*"); - res.setHeader("Access-Control-Allow-Methods", "GET, POST, OPTIONS"); - res.setHeader("Access-Control-Allow-Headers", "Content-Type, Accept"); - if (req.method === "OPTIONS") { - res.writeHead(200); - res.end(); - return; - } - if (req.method === "GET" && req.url === "/health") { - res.writeHead(200, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - status: "ok", - server: config.serverName || "safeinputs", - version: config.version || "1.0.0", - tools: config.tools.length, - }) - ); - return; - } - if (req.method !== "POST") { - res.writeHead(405, { "Content-Type": "application/json" }); - res.end(JSON.stringify({ error: "Method not allowed" })); - return; - } - try { - let body = null; - if (req.method === "POST") { - const chunks = []; - for await (const chunk of req) { - chunks.push(chunk); + if (!tools.find(t => t.name === normalizedKey)) { + const jobConfig = config[configKey]; + const dynamicTool = { + name: normalizedKey, + description: jobConfig && jobConfig.description ? jobConfig.description : `Custom safe-job: ${configKey}`, + inputSchema: { + type: "object", + properties: {}, + additionalProperties: true, + }, + handler: args => { + const entry = { + type: normalizedKey, + ...args, + }; + const entryJSON = JSON.stringify(entry); + fs.appendFileSync(outputFile, entryJSON + "\n"); + const outputText = + jobConfig && jobConfig.output + ? jobConfig.output + : `Safe-job '${configKey}' executed successfully with arguments: ${JSON.stringify(args)}`; + return { + content: [ + { + type: "text", + text: JSON.stringify({ result: outputText }), + }, + ], + }; + }, + }; + if (jobConfig && jobConfig.inputs) { + dynamicTool.inputSchema.properties = {}; + dynamicTool.inputSchema.required = []; + Object.keys(jobConfig.inputs).forEach(inputName => { + const inputDef = jobConfig.inputs[inputName]; + const propSchema = { + type: inputDef.type || "string", + description: inputDef.description || `Input parameter: ${inputName}`, + }; + if (inputDef.options && Array.isArray(inputDef.options)) { + propSchema.enum = inputDef.options; } - const bodyStr = Buffer.concat(chunks).toString(); - try { - body = bodyStr ? JSON.parse(bodyStr) : null; - } catch (parseError) { - res.writeHead(400, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - jsonrpc: "2.0", - error: { - code: -32700, - message: "Parse error: Invalid JSON in request body", - }, - id: null, - }) - ); - return; + dynamicTool.inputSchema.properties[inputName] = propSchema; + if (inputDef.required) { + dynamicTool.inputSchema.required.push(inputName); } - } - await transport.handleRequest(req, res, body); - } catch (error) { - logger.debugError("Error handling request: ", error); - if (!res.headersSent) { - res.writeHead(500, { "Content-Type": "application/json" }); - res.end( - JSON.stringify({ - jsonrpc: "2.0", - error: { - code: -32603, - message: error instanceof Error ? error.message : String(error), - }, - id: null, - }) - ); - } - } - }); - logger.debug(`Attempting to bind to port ${port}...`); - httpServer.listen(port, () => { - logger.debug(`=== Safe Inputs MCP HTTP Server Started Successfully ===`); - logger.debug(`HTTP server listening on http://localhost:${port}`); - logger.debug(`MCP endpoint: POST http://localhost:${port}/`); - logger.debug(`Server name: ${config.serverName || "safeinputs"}`); - logger.debug(`Server version: ${config.version || "1.0.0"}`); - logger.debug(`Tools available: ${config.tools.length}`); - logger.debug(`Server is ready to accept requests`); - }); - httpServer.on("error", error => { - if (error.code === "EADDRINUSE") { - logger.debugError(`ERROR: Port ${port} is already in use. `, error); - } else if (error.code === "EACCES") { - logger.debugError(`ERROR: Permission denied to bind to port ${port}. `, error); - } else { - logger.debugError(`ERROR: Failed to start HTTP server: `, error); + }); } - process.exit(1); - }); - process.on("SIGINT", () => { - logger.debug("Received SIGINT, shutting down..."); - httpServer.close(() => { - logger.debug("HTTP server closed"); - process.exit(0); - }); - }); - process.on("SIGTERM", () => { - logger.debug("Received SIGTERM, shutting down..."); - httpServer.close(() => { - logger.debug("HTTP server closed"); - process.exit(0); - }); - }); - return httpServer; - } catch (error) { - const errorLogger = createLogger("safe-inputs-startup-error"); - errorLogger.debug(`=== FATAL ERROR: Failed to start Safe Inputs MCP HTTP Server ===`); - errorLogger.debug(`Error type: ${error.constructor.name}`); - errorLogger.debug(`Error message: ${error.message}`); - if (error.stack) { - errorLogger.debug(`Stack trace:\n${error.stack}`); - } - if (error.code) { - errorLogger.debug(`Error code: ${error.code}`); - } - errorLogger.debug(`Configuration file: ${configPath}`); - errorLogger.debug(`Port: ${port}`); - throw error; - } - } - if (require.main === module) { - const args = process.argv.slice(2); - if (args.length < 1) { - console.error("Usage: node safe_inputs_mcp_server_http.cjs [--port ] [--stateless] [--log-dir ]"); - process.exit(1); - } - const configPath = args[0]; - const options = { - port: 3000, - stateless: false, - logDir: undefined, - }; - for (let i = 1; i < args.length; i++) { - if (args[i] === "--port" && args[i + 1]) { - options.port = parseInt(args[i + 1], 10); - i++; - } else if (args[i] === "--stateless") { - options.stateless = true; - } else if (args[i] === "--log-dir" && args[i + 1]) { - options.logDir = args[i + 1]; - i++; - } - } - startHttpServer(configPath, options).catch(error => { - console.error(`Error starting HTTP server: ${error instanceof Error ? error.message : String(error)}`); - process.exit(1); + registerTool(server, dynamicTool); + } }); } - module.exports = { - startHttpServer, - createMCPServer, - }; - EOF_SAFE_INPUTS_SERVER_HTTP - cat > /tmp/gh-aw/safe-inputs/tools.json << 'EOF_TOOLS_JSON' - { - "serverName": "safeinputs", - "version": "1.0.0", - "logDir": "/tmp/gh-aw/safe-inputs/logs", - "tools": [ - { - "name": "gh", - "description": "Execute any gh CLI command. Provide the full command after 'gh' (e.g., args: 'pr list --limit 5'). The tool will run: gh \u003cargs\u003e. Use single quotes ' for complex args to avoid shell interpretation issues.", - "inputSchema": { - "properties": { - "args": { - "description": "Arguments to pass to gh CLI (without the 'gh' prefix). Examples: 'pr list --limit 5', 'issue view 123', 'api repos/{owner}/{repo}'", - "type": "string" - } - }, - "required": [ - "args" - ], - "type": "object" - }, - "handler": "gh.sh", - "env": { - "GH_AW_GH_TOKEN": "GH_AW_GH_TOKEN" - }, - "timeout": 60 - } - ] - } - EOF_TOOLS_JSON - cat > /tmp/gh-aw/safe-inputs/mcp-server.cjs << 'EOFSI' - const path = require("path"); - const { startHttpServer } = require("./safe_inputs_mcp_server_http.cjs"); - const configPath = path.join(__dirname, "tools.json"); - const port = parseInt(process.env.GH_AW_SAFE_INPUTS_PORT || "3000", 10); - const apiKey = process.env.GH_AW_SAFE_INPUTS_API_KEY || ""; - startHttpServer(configPath, { - port: port, - stateless: false, - logDir: "/tmp/gh-aw/safe-inputs/logs" - }).catch(error => { - console.error("Failed to start safe-inputs HTTP server:", error); - process.exit(1); - }); - EOFSI - chmod +x /tmp/gh-aw/safe-inputs/mcp-server.cjs - - - name: Setup Safe Inputs Tool Files - run: | - cat > /tmp/gh-aw/safe-inputs/gh.sh << 'EOFSH_gh' - #!/bin/bash - # Auto-generated safe-input tool: gh - # Execute any gh CLI command. Provide the full command after 'gh' (e.g., args: 'pr list --limit 5'). The tool will run: gh . Use single quotes ' for complex args to avoid shell interpretation issues. - - set -euo pipefail - - GH_TOKEN=$GH_AW_GH_TOKEN gh $INPUT_ARGS - - EOFSH_gh - chmod +x /tmp/gh-aw/safe-inputs/gh.sh - - - name: Generate Safe Inputs MCP Server Config - id: safe-inputs-config - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 - with: - script: | - function generateSafeInputsConfig({ core, crypto }) { - const apiKeyBuffer = crypto.randomBytes(32); - const apiKey = apiKeyBuffer.toString("base64").replace(/[/+=]/g, ""); - const port = 3000; - core.setOutput("safe_inputs_api_key", apiKey); - core.setOutput("safe_inputs_port", port.toString()); - core.info(`Safe Inputs MCP server will run on port ${port}`); - return { apiKey, port }; - } - - // Execute the function - const crypto = require('crypto'); - generateSafeInputsConfig({ core, crypto }); - - - name: Start Safe Inputs MCP HTTP Server - id: safe-inputs-start - run: | - # Set environment variables for the server - export GH_AW_SAFE_INPUTS_PORT=${{ steps.safe-inputs-config.outputs.safe_inputs_port }} - export GH_AW_SAFE_INPUTS_API_KEY=${{ steps.safe-inputs-config.outputs.safe_inputs_api_key }} - - export GH_AW_GH_TOKEN="${GH_AW_GH_TOKEN}" - - cd /tmp/gh-aw/safe-inputs - # Verify required files exist - echo "Verifying safe-inputs setup..." - if [ ! -f mcp-server.cjs ]; then - echo "ERROR: mcp-server.cjs not found in /tmp/gh-aw/safe-inputs" - ls -la /tmp/gh-aw/safe-inputs/ - exit 1 - fi - if [ ! -f tools.json ]; then - echo "ERROR: tools.json not found in /tmp/gh-aw/safe-inputs" - ls -la /tmp/gh-aw/safe-inputs/ - exit 1 - fi - echo "Configuration files verified" - # Log environment configuration - echo "Server configuration:" - echo " Port: $GH_AW_SAFE_INPUTS_PORT" - echo " API Key: ${GH_AW_SAFE_INPUTS_API_KEY:0:8}..." - echo " Working directory: $(pwd)" - # Ensure logs directory exists - mkdir -p /tmp/gh-aw/safe-inputs/logs - # Create initial server.log file for artifact upload - echo "Safe Inputs MCP Server Log" > /tmp/gh-aw/safe-inputs/logs/server.log - echo "Start time: $(date)" >> /tmp/gh-aw/safe-inputs/logs/server.log - echo "===========================================" >> /tmp/gh-aw/safe-inputs/logs/server.log - echo "" >> /tmp/gh-aw/safe-inputs/logs/server.log - # Start the HTTP server in the background - echo "Starting safe-inputs MCP HTTP server..." - node mcp-server.cjs >> /tmp/gh-aw/safe-inputs/logs/server.log 2>&1 & - SERVER_PID=$! - echo "Started safe-inputs MCP server with PID $SERVER_PID" - # Wait for server to be ready (max 10 seconds) - echo "Waiting for server to become ready..." - for i in {1..10}; do - # Check if process is still running - if ! kill -0 $SERVER_PID 2>/dev/null; then - echo "ERROR: Server process $SERVER_PID has died" - echo "Server log contents:" - cat /tmp/gh-aw/safe-inputs/logs/server.log - exit 1 - fi - # Check if server is responding - if curl -s -f http://localhost:$GH_AW_SAFE_INPUTS_PORT/health > /dev/null 2>&1; then - echo "Safe Inputs MCP server is ready (attempt $i/10)" - break - fi - if [ $i -eq 10 ]; then - echo "ERROR: Safe Inputs MCP server failed to start after 10 seconds" - echo "Process status: $(ps aux | grep '[m]cp-server.cjs' || echo 'not running')" - echo "Server log contents:" - cat /tmp/gh-aw/safe-inputs/logs/server.log - echo "Checking port availability:" - netstat -tuln | grep $GH_AW_SAFE_INPUTS_PORT || echo "Port $GH_AW_SAFE_INPUTS_PORT not listening" - exit 1 - fi - echo "Waiting for server... (attempt $i/10)" - sleep 1 - done - # Output the configuration for the MCP client - echo "port=$GH_AW_SAFE_INPUTS_PORT" >> $GITHUB_OUTPUT - echo "api_key=$GH_AW_SAFE_INPUTS_API_KEY" >> $GITHUB_OUTPUT + const SERVER_INFO = { name: "safeoutputs", version: "1.0.0" }; + const MCP_LOG_DIR = process.env.GH_AW_MCP_LOG_DIR; + const server = createServer(SERVER_INFO, { logDir: MCP_LOG_DIR }); + const { config: safeOutputsConfig, outputFile } = loadConfig(server); + const appendSafeOutput = createAppendFunction(outputFile); + const handlers = createHandlers(server, appendSafeOutput); + const { defaultHandler } = handlers; + let ALL_TOOLS = loadTools(server); + ALL_TOOLS = attachHandlers(ALL_TOOLS, handlers); + server.debug(` output file: ${outputFile}`); + server.debug(` config: ${JSON.stringify(safeOutputsConfig)}`); + registerPredefinedTools(server, ALL_TOOLS, safeOutputsConfig, registerTool, normalizeTool); + registerDynamicTools(server, ALL_TOOLS, safeOutputsConfig, outputFile, registerTool, normalizeTool); + server.debug(` tools: ${Object.keys(server.tools).join(", ")}`); + if (!Object.keys(server.tools).length) throw new Error("No tools enabled in configuration"); + start(server, { defaultHandler }); + EOF + chmod +x /tmp/gh-aw/safeoutputs/mcp-server.cjs - name: Setup MCPs env: GITHUB_MCP_SERVER_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN || secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }} GH_AW_SAFE_OUTPUTS: ${{ env.GH_AW_SAFE_OUTPUTS }} - GH_AW_SAFE_INPUTS_PORT: ${{ steps.safe-inputs-start.outputs.port }} - GH_AW_SAFE_INPUTS_API_KEY: ${{ steps.safe-inputs-start.outputs.api_key }} - GH_AW_GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | mkdir -p /tmp/gh-aw/mcp-config mkdir -p /home/runner/.copilot @@ -4644,19 +3304,6 @@ jobs: "GITHUB_PERSONAL_ACCESS_TOKEN": "\${GITHUB_MCP_SERVER_TOKEN}" } }, - "safeinputs": { - "type": "http", - "url": "http://localhost:\${GH_AW_SAFE_INPUTS_PORT}", - "headers": { - "Authorization": "Bearer \${GH_AW_SAFE_INPUTS_API_KEY}" - }, - "tools": ["*"], - "env": { - "GH_AW_SAFE_INPUTS_PORT": "\${GH_AW_SAFE_INPUTS_PORT}", - "GH_AW_SAFE_INPUTS_API_KEY": "\${GH_AW_SAFE_INPUTS_API_KEY}", - "GH_AW_GH_TOKEN": "\${GH_AW_GH_TOKEN}" - } - }, "safeoutputs": { "type": "local", "command": "node", @@ -4777,8 +3424,6 @@ jobs: PROMPT_DIR="$(dirname "$GH_AW_PROMPT")" mkdir -p "$PROMPT_DIR" cat << 'PROMPT_EOF' | envsubst > "$GH_AW_PROMPT" - - # Smoke Test: Copilot Engine Validation **IMPORTANT: Keep all outputs extremely short and concise. Use single-line responses where possible. No verbose explanations.** @@ -4789,7 +3434,8 @@ jobs: 2. **File Writing Testing**: Create a test file `/tmp/gh-aw/agent/smoke-test-copilot-${GH_AW_GITHUB_RUN_ID}.txt` with content "Smoke test passed for Copilot at $(date)" (create the directory if it doesn't exist) 3. **Bash Tool Testing**: Execute bash commands to verify file creation was successful (use `cat` to read the file back) 4. **GitHub MCP Default Toolset Testing**: Verify that the `get_me` tool is NOT available with default toolsets. Try to use it and confirm it fails with a tool not found error. - 5. **Safe Input gh Tool Testing**: Use the `gh` safe-input tool to run "gh issues list --limit 3" to verify the tool can access GitHub issues + 5. **Cache Memory Testing**: Write a test file to `/tmp/gh-aw/cache-memory/smoke-test-${GH_AW_GITHUB_RUN_ID}.txt` with content "Cache memory test for run ${GH_AW_GITHUB_RUN_ID}" and verify it was created successfully + 6. **Safe Input gh Tool Testing**: Use the `gh` safe-input tool to run "gh issues list --limit 3" to verify the tool can access GitHub issues ## Output @@ -4848,6 +3494,31 @@ jobs: Do NOT attempt to edit files outside these directories as you do not have the necessary permissions. + PROMPT_EOF + - name: Append cache memory instructions to prompt + env: + GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt + run: | + cat << 'PROMPT_EOF' | envsubst >> "$GH_AW_PROMPT" + + --- + + ## Cache Folder Available + + You have access to a persistent cache folder at `/tmp/gh-aw/cache-memory/` where you can read and write files to create memories and store information. + + - **Read/Write Access**: You can freely read from and write to any files in this folder + - **Persistence**: Files in this folder persist across workflow runs via GitHub Actions cache + - **Last Write Wins**: If multiple processes write to the same file, the last write will be preserved + - **File Share**: Use this as a simple file share - organize files as you see fit + + Examples of what you can store: + - `/tmp/gh-aw/cache-memory/notes.txt` - general notes and observations + - `/tmp/gh-aw/cache-memory/preferences.json` - user preferences and settings + - `/tmp/gh-aw/cache-memory/history.log` - activity history and logs + - `/tmp/gh-aw/cache-memory/state/` - organized state files in subdirectories + + Feel free to create, read, update, and organize files in this folder as needed for your tasks. PROMPT_EOF - name: Append safe outputs instructions to prompt env: @@ -5018,12 +3689,11 @@ jobs: run: | set -o pipefail sudo -E awf --env-all --container-workdir "${GITHUB_WORKSPACE}" --mount /tmp:/tmp:rw --mount "${GITHUB_WORKSPACE}:${GITHUB_WORKSPACE}:rw" --mount /usr/bin/date:/usr/bin/date:ro --mount /usr/bin/gh:/usr/bin/gh:ro --mount /usr/bin/yq:/usr/bin/yq:ro --allow-domains '*.githubusercontent.com,api.business.githubcopilot.com,api.enterprise.githubcopilot.com,api.github.com,api.npms.io,api.snapcraft.io,archive.ubuntu.com,azure.archive.ubuntu.com,bun.sh,codeload.github.com,crl.geotrust.com,crl.globalsign.com,crl.identrust.com,crl.sectigo.com,crl.thawte.com,crl.usertrust.com,crl.verisign.com,crl3.digicert.com,crl4.digicert.com,crls.ssl.com,deb.nodesource.com,deno.land,get.pnpm.io,github-cloud.githubusercontent.com,github-cloud.s3.amazonaws.com,github.com,json-schema.org,json.schemastore.org,keyserver.ubuntu.com,lfs.github.com,nodejs.org,npm.pkg.github.com,npmjs.com,npmjs.org,objects.githubusercontent.com,ocsp.digicert.com,ocsp.geotrust.com,ocsp.globalsign.com,ocsp.identrust.com,ocsp.sectigo.com,ocsp.ssl.com,ocsp.thawte.com,ocsp.usertrust.com,ocsp.verisign.com,packagecloud.io,packages.cloud.google.com,packages.microsoft.com,ppa.launchpad.net,raw.githubusercontent.com,registry.bower.io,registry.npmjs.com,registry.npmjs.org,registry.yarnpkg.com,repo.yarnpkg.com,s.symcb.com,s.symcd.com,security.ubuntu.com,skimdb.npmjs.com,ts-crl.ws.symantec.com,ts-ocsp.ws.symantec.com,www.npmjs.com,www.npmjs.org,yarnpkg.com' --log-level debug --proxy-logs-dir /tmp/gh-aw/sandbox/firewall/logs \ - -- npx -y @github/copilot@0.0.367 --add-dir /tmp/gh-aw/ --log-level all --log-dir /tmp/gh-aw/sandbox/agent/logs/ --add-dir "${GITHUB_WORKSPACE}" --disable-builtin-mcps --allow-all-tools --allow-all-paths --prompt "$(cat /tmp/gh-aw/aw-prompts/prompt.txt)"${GH_AW_MODEL_AGENT_COPILOT:+ --model "$GH_AW_MODEL_AGENT_COPILOT"} \ + -- npx -y @github/copilot@0.0.367 --add-dir /tmp/gh-aw/ --log-level all --log-dir /tmp/gh-aw/sandbox/agent/logs/ --add-dir "${GITHUB_WORKSPACE}" --disable-builtin-mcps --allow-all-tools --add-dir /tmp/gh-aw/cache-memory/ --allow-all-paths --prompt "$(cat /tmp/gh-aw/aw-prompts/prompt.txt)"${GH_AW_MODEL_AGENT_COPILOT:+ --model "$GH_AW_MODEL_AGENT_COPILOT"} \ 2>&1 | tee /tmp/gh-aw/agent-stdio.log env: COPILOT_AGENT_RUNNER_TYPE: STANDALONE COPILOT_GITHUB_TOKEN: ${{ secrets.COPILOT_GITHUB_TOKEN || secrets.COPILOT_CLI_TOKEN }} - GH_AW_GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GH_AW_MCP_CONFIG: /home/runner/.copilot/mcp-config.json GH_AW_MODEL_AGENT_COPILOT: ${{ vars.GH_AW_MODEL_AGENT_COPILOT || '' }} GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt @@ -6093,13 +4763,6 @@ jobs: name: mcp-logs path: /tmp/gh-aw/mcp-logs/ if-no-files-found: ignore - - name: Upload SafeInputs logs - if: always() - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5 - with: - name: safeinputs - path: /tmp/gh-aw/safe-inputs/logs/ - if-no-files-found: ignore - name: Parse agent logs for step summary if: always() uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 @@ -7916,6 +6579,7 @@ jobs: - agent - create_issue - detection + - update_cache_memory if: ((always()) && (needs.agent.result != 'skipped')) && (!(needs.add_comment.outputs.comment_id)) runs-on: ubuntu-slim permissions: @@ -9326,3 +7990,23 @@ jobs: } await main(); + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + diff --git a/.github/workflows/smoke-copilot.md b/.github/workflows/smoke-copilot.md index a210011b81..4701d19e43 100644 --- a/.github/workflows/smoke-copilot.md +++ b/.github/workflows/smoke-copilot.md @@ -14,8 +14,6 @@ permissions: issues: read name: Smoke Copilot engine: copilot -imports: - - shared/gh.md network: allowed: - defaults @@ -24,6 +22,7 @@ network: firewall: log-level: debug # Enable debug-level firewall logs tools: + cache-memory: true edit: bash: - "*" @@ -52,7 +51,8 @@ strict: true 2. **File Writing Testing**: Create a test file `/tmp/gh-aw/agent/smoke-test-copilot-${{ github.run_id }}.txt` with content "Smoke test passed for Copilot at $(date)" (create the directory if it doesn't exist) 3. **Bash Tool Testing**: Execute bash commands to verify file creation was successful (use `cat` to read the file back) 4. **GitHub MCP Default Toolset Testing**: Verify that the `get_me` tool is NOT available with default toolsets. Try to use it and confirm it fails with a tool not found error. -5. **Safe Input gh Tool Testing**: Use the `gh` safe-input tool to run "gh issues list --limit 3" to verify the tool can access GitHub issues +5. **Cache Memory Testing**: Write a test file to `/tmp/gh-aw/cache-memory/smoke-test-${{ github.run_id }}.txt` with content "Cache memory test for run ${{ github.run_id }}" and verify it was created successfully +6. **Safe Input gh Tool Testing**: Use the `gh` safe-input tool to run "gh issues list --limit 3" to verify the tool can access GitHub issues ## Output @@ -61,4 +61,4 @@ Add a **very brief** comment (max 5-10 lines) to the current pull request with: - βœ… or ❌ for each test result - Overall status: PASS or FAIL -If all tests pass, add the label `smoke-copilot` to the pull request. \ No newline at end of file +If all tests pass, add the label `smoke-copilot` to the pull request. diff --git a/.github/workflows/smoke-detector.lock.yml b/.github/workflows/smoke-detector.lock.yml index def806df1e..a7eabf487e 100644 --- a/.github/workflows/smoke-detector.lock.yml +++ b/.github/workflows/smoke-detector.lock.yml @@ -95,6 +95,7 @@ # create_issue["create_issue"] # detection["detection"] # pre_activation["pre_activation"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # add_comment --> conclusion @@ -102,12 +103,15 @@ # agent --> conclusion # agent --> create_issue # agent --> detection +# agent --> update_cache_memory # create_issue --> add_comment # create_issue --> conclusion # detection --> add_comment # detection --> conclusion # detection --> create_issue +# detection --> update_cache_memory # pre_activation --> activation +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -436,7 +440,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -1632,8 +1638,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -6059,6 +6065,7 @@ jobs: - agent - create_issue - detection + - update_cache_memory if: ((always()) && (needs.agent.result != 'skipped')) && (!(needs.add_comment.outputs.comment_id)) runs-on: ubuntu-slim permissions: @@ -7478,3 +7485,23 @@ jobs: } await main(); + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + diff --git a/.github/workflows/spec-kit-executor.lock.yml b/.github/workflows/spec-kit-executor.lock.yml index ae2d6e496e..f74b31b880 100644 --- a/.github/workflows/spec-kit-executor.lock.yml +++ b/.github/workflows/spec-kit-executor.lock.yml @@ -86,6 +86,7 @@ # create_pull_request["create_pull_request"] # detection["detection"] # push_repo_memory["push_repo_memory"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # activation --> create_pull_request @@ -93,11 +94,14 @@ # agent --> create_pull_request # agent --> detection # agent --> push_repo_memory +# agent --> update_cache_memory # create_pull_request --> conclusion # detection --> conclusion # detection --> create_pull_request # detection --> push_repo_memory +# detection --> update_cache_memory # push_repo_memory --> conclusion +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -316,7 +320,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -476,8 +482,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -5416,6 +5422,7 @@ jobs: - create_pull_request - detection - push_repo_memory + - update_cache_memory if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim permissions: @@ -7012,3 +7019,23 @@ jobs: core.setFailed(`Unexpected error: ${error instanceof Error ? error.message : String(error)}`); }); + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + diff --git a/.github/workflows/stale-repo-identifier.lock.yml b/.github/workflows/stale-repo-identifier.lock.yml index aa7e4a165f..87b98ddaec 100644 --- a/.github/workflows/stale-repo-identifier.lock.yml +++ b/.github/workflows/stale-repo-identifier.lock.yml @@ -120,17 +120,21 @@ # conclusion["conclusion"] # create_issue["create_issue"] # detection["detection"] +# update_cache_memory["update_cache_memory"] # upload_assets["upload_assets"] # activation --> agent # activation --> conclusion # agent --> conclusion # agent --> create_issue # agent --> detection +# agent --> update_cache_memory # agent --> upload_assets # create_issue --> conclusion # detection --> conclusion # detection --> create_issue +# detection --> update_cache_memory # detection --> upload_assets +# update_cache_memory --> conclusion # upload_assets --> conclusion # ``` # @@ -980,7 +984,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -1214,8 +1220,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: trending-data-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -6741,6 +6747,7 @@ jobs: - agent - create_issue - detection + - update_cache_memory - upload_assets if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim @@ -8060,6 +8067,26 @@ jobs: path: /tmp/gh-aw/threat-detection/detection.log if-no-files-found: ignore + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: trending-data-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + upload_assets: needs: - agent diff --git a/.github/workflows/static-analysis-report.lock.yml b/.github/workflows/static-analysis-report.lock.yml index e899d98a63..28f92ea471 100644 --- a/.github/workflows/static-analysis-report.lock.yml +++ b/.github/workflows/static-analysis-report.lock.yml @@ -65,14 +65,18 @@ # conclusion["conclusion"] # create_discussion["create_discussion"] # detection["detection"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # agent --> conclusion # agent --> create_discussion # agent --> detection +# agent --> update_cache_memory # create_discussion --> conclusion # detection --> conclusion # detection --> create_discussion +# detection --> update_cache_memory +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -498,7 +502,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -676,8 +682,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -5131,6 +5137,7 @@ jobs: - agent - create_discussion - detection + - update_cache_memory if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim permissions: @@ -6547,3 +6554,23 @@ jobs: path: /tmp/gh-aw/threat-detection/detection.log if-no-files-found: ignore + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + diff --git a/.github/workflows/super-linter.lock.yml b/.github/workflows/super-linter.lock.yml index a33ba0971e..2fc6df0089 100644 --- a/.github/workflows/super-linter.lock.yml +++ b/.github/workflows/super-linter.lock.yml @@ -115,16 +115,20 @@ # create_issue["create_issue"] # detection["detection"] # super_linter["super_linter"] +# update_cache_memory["update_cache_memory"] # activation --> agent # activation --> conclusion # activation --> super_linter # agent --> conclusion # agent --> create_issue # agent --> detection +# agent --> update_cache_memory # create_issue --> conclusion # detection --> conclusion # detection --> create_issue +# detection --> update_cache_memory # super_linter --> agent +# update_cache_memory --> conclusion # ``` # # Original Prompt: @@ -324,7 +328,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -496,8 +502,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -5325,6 +5331,7 @@ jobs: - agent - create_issue - detection + - update_cache_memory if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim permissions: @@ -6685,3 +6692,23 @@ jobs: path: super-linter.log retention-days: 7 + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + diff --git a/.github/workflows/technical-doc-writer.lock.yml b/.github/workflows/technical-doc-writer.lock.yml index 2b650368e4..105fb4ecb0 100644 --- a/.github/workflows/technical-doc-writer.lock.yml +++ b/.github/workflows/technical-doc-writer.lock.yml @@ -107,6 +107,7 @@ # conclusion["conclusion"] # create_pull_request["create_pull_request"] # detection["detection"] +# update_cache_memory["update_cache_memory"] # upload_assets["upload_assets"] # activation --> agent # activation --> conclusion @@ -116,13 +117,16 @@ # agent --> conclusion # agent --> create_pull_request # agent --> detection +# agent --> update_cache_memory # agent --> upload_assets # create_pull_request --> add_comment # create_pull_request --> conclusion # detection --> add_comment # detection --> conclusion # detection --> create_pull_request +# detection --> update_cache_memory # detection --> upload_assets +# update_cache_memory --> conclusion # upload_assets --> conclusion # ``` # @@ -524,7 +528,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -1303,8 +1309,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -6403,6 +6409,7 @@ jobs: - agent - create_pull_request - detection + - update_cache_memory - upload_assets if: ((always()) && (needs.agent.result != 'skipped')) && (!(needs.add_comment.outputs.comment_id)) runs-on: ubuntu-slim @@ -7805,6 +7812,26 @@ jobs: path: /tmp/gh-aw/threat-detection/detection.log if-no-files-found: ignore + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + upload_assets: needs: - agent diff --git a/.github/workflows/unbloat-docs.lock.yml b/.github/workflows/unbloat-docs.lock.yml index f831dffec0..83e7c1ea32 100644 --- a/.github/workflows/unbloat-docs.lock.yml +++ b/.github/workflows/unbloat-docs.lock.yml @@ -142,6 +142,7 @@ # create_pull_request["create_pull_request"] # detection["detection"] # pre_activation["pre_activation"] +# update_cache_memory["update_cache_memory"] # upload_assets["upload_assets"] # activation --> agent # activation --> conclusion @@ -151,14 +152,17 @@ # agent --> conclusion # agent --> create_pull_request # agent --> detection +# agent --> update_cache_memory # agent --> upload_assets # create_pull_request --> add_comment # create_pull_request --> conclusion # detection --> add_comment # detection --> conclusion # detection --> create_pull_request +# detection --> update_cache_memory # detection --> upload_assets # pre_activation --> activation +# update_cache_memory --> conclusion # upload_assets --> conclusion # ``` # @@ -479,7 +483,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -1658,8 +1664,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -6203,6 +6209,7 @@ jobs: - agent - create_pull_request - detection + - update_cache_memory - upload_assets if: ((always()) && (needs.agent.result != 'skipped')) && (!(needs.add_comment.outputs.comment_id)) runs-on: ubuntu-slim @@ -7714,6 +7721,26 @@ jobs: } await main(); + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + upload_assets: needs: - agent diff --git a/.github/workflows/weekly-issue-summary.lock.yml b/.github/workflows/weekly-issue-summary.lock.yml index 853455787c..e3554c4790 100644 --- a/.github/workflows/weekly-issue-summary.lock.yml +++ b/.github/workflows/weekly-issue-summary.lock.yml @@ -71,17 +71,21 @@ # conclusion["conclusion"] # create_discussion["create_discussion"] # detection["detection"] +# update_cache_memory["update_cache_memory"] # upload_assets["upload_assets"] # activation --> agent # activation --> conclusion # agent --> conclusion # agent --> create_discussion # agent --> detection +# agent --> update_cache_memory # agent --> upload_assets # create_discussion --> conclusion # detection --> conclusion # detection --> create_discussion +# detection --> update_cache_memory # detection --> upload_assets +# update_cache_memory --> conclusion # upload_assets --> conclusion # ``` # @@ -700,7 +704,9 @@ # ``` # # Pinned GitHub Actions: -# - actions/cache@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# - actions/cache/restore@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) +# https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 +# - actions/cache/save@v4 (0057852bfaa89a56745cba8c7296529d2fc39830) # https://github.com/actions/cache/commit/0057852bfaa89a56745cba8c7296529d2fc39830 # - actions/checkout@v5 (93cb6efe18208431cddfb8368fd83d5badbf9bfd) # https://github.com/actions/checkout/commit/93cb6efe18208431cddfb8368fd83d5badbf9bfd @@ -878,8 +884,8 @@ jobs: echo "Cache memory directory created at /tmp/gh-aw/cache-memory" echo "This folder provides persistent file storage across workflow runs" echo "LLMs and agentic tools can freely read and write files in this directory" - - name: Cache memory file share data - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + - name: Restore cache memory file share data + uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: key: memory-${{ github.workflow }}-${{ github.run_id }} path: /tmp/gh-aw/cache-memory @@ -6106,6 +6112,7 @@ jobs: - agent - create_discussion - detection + - update_cache_memory - upload_assets if: (always()) && (needs.agent.result != 'skipped') runs-on: ubuntu-slim @@ -7520,6 +7527,26 @@ jobs: path: /tmp/gh-aw/threat-detection/detection.log if-no-files-found: ignore + update_cache_memory: + needs: + - agent + - detection + if: always() && needs.detection.outputs.success == 'true' + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Download cache-memory artifact (default) + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6 + continue-on-error: true + with: + name: cache-memory + path: /tmp/gh-aw/cache-memory + - name: Save cache-memory to cache (default) + uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 + with: + key: memory-${{ github.workflow }}-${{ github.run_id }} + path: /tmp/gh-aw/cache-memory + upload_assets: needs: - agent diff --git a/docs/src/content/docs/reference/memory.md b/docs/src/content/docs/reference/memory.md index 66b233c5b7..f818c6df9c 100644 --- a/docs/src/content/docs/reference/memory.md +++ b/docs/src/content/docs/reference/memory.md @@ -183,6 +183,16 @@ Cache Memory leverages GitHub Actions cache with 7-day retention, 10GB per repos **File Security**: Files use standard GitHub Actions runner permissions. The cache directory is temporary and cleaned between runs, with no external access. +**Delayed Cache Updates with Threat Detection**: When [threat detection](/gh-aw/reference/safe-outputs/#threat-detection) is enabled, cache-memory updates are deferred until after the detection job validates the agent's output. This prevents potentially malicious cache content from being persisted before security scanning. During agent execution, cache-memory files are: + +1. Restored from previous runs using `actions/cache/restore` +2. Modified by the AI agent during workflow execution +3. Uploaded as artifacts for inspection +4. Validated by the threat detection job +5. Saved back to cache using `actions/cache/save` only if detection succeeds + +This ensures that cache memory changes are only persisted after validation, providing an additional security layer for agentic workflows. If threat detection is not enabled, cache updates occur automatically via the standard `actions/cache` post-action behavior. + ## Examples ### Basic File Storage diff --git a/pkg/workflow/action_pins_test.go b/pkg/workflow/action_pins_test.go index bfc16de226..d3381fab63 100644 --- a/pkg/workflow/action_pins_test.go +++ b/pkg/workflow/action_pins_test.go @@ -345,9 +345,9 @@ func TestApplyActionPinToStep(t *testing.T) { func TestGetActionPinsSorting(t *testing.T) { pins := getActionPins() - // Verify we got all the pins (should be 25 after adding more actions) - if len(pins) != 25 { - t.Errorf("getActionPins() returned %d pins, expected 25", len(pins)) + // Verify we got all the pins (should be 26 after adding cache/save action) + if len(pins) != 26 { + t.Errorf("getActionPins() returned %d pins, expected 26", len(pins)) } // Verify they are sorted by version (descending) then by repository name (ascending) diff --git a/pkg/workflow/cache.go b/pkg/workflow/cache.go index bd2f0f8157..903ff1e359 100644 --- a/pkg/workflow/cache.go +++ b/pkg/workflow/cache.go @@ -368,9 +368,14 @@ func generateCacheMemorySteps(builder *strings.Builder, data *WorkflowData) { } // Step name and action - // Use actions/cache/restore for restore-only caches, actions/cache for normal caches + // Use actions/cache/restore for restore-only caches or when threat detection is enabled + // When threat detection is enabled, we only restore the cache and defer saving to a separate job after detection + // Use actions/cache for normal caches (which auto-saves via post-action) + threatDetectionEnabled := data.SafeOutputs != nil && data.SafeOutputs.ThreatDetection != nil + useRestoreOnly := cache.RestoreOnly || threatDetectionEnabled + var actionName string - if cache.RestoreOnly { + if useRestoreOnly { actionName = "Restore cache memory file share data" } else { actionName = "Cache memory file share data" @@ -382,8 +387,9 @@ func generateCacheMemorySteps(builder *strings.Builder, data *WorkflowData) { builder.WriteString(fmt.Sprintf(" - name: %s (%s)\n", actionName, cache.ID)) } - // Use actions/cache/restore@v4 for restore-only, actions/cache@v4 for normal - if cache.RestoreOnly { + // Use actions/cache/restore@v4 when restore-only or threat detection enabled + // Use actions/cache@v4 for normal caches + if useRestoreOnly { builder.WriteString(fmt.Sprintf(" uses: %s\n", GetActionPin("actions/cache/restore"))) } else { builder.WriteString(fmt.Sprintf(" uses: %s\n", GetActionPin("actions/cache"))) @@ -499,3 +505,94 @@ func generateCacheMemoryPromptSection(yaml *strings.Builder, config *CacheMemory yaml.WriteString(" Feel free to create, read, update, and organize files in these folders as needed for your tasks.\n") } } + +// buildUpdateCacheMemoryJob builds a job that updates cache-memory after detection passes +// This job downloads cache-memory artifacts and saves them to GitHub Actions cache +func (c *Compiler) buildUpdateCacheMemoryJob(data *WorkflowData, threatDetectionEnabled bool) (*Job, error) { + if data.CacheMemoryConfig == nil || len(data.CacheMemoryConfig.Caches) == 0 { + return nil, nil + } + + // Only create this job if threat detection is enabled + // Otherwise, cache is updated automatically by actions/cache post-action + if !threatDetectionEnabled { + return nil, nil + } + + cacheLog.Printf("Building update_cache_memory job for %d caches (threatDetectionEnabled=%v)", len(data.CacheMemoryConfig.Caches), threatDetectionEnabled) + + var steps []string + + // Build steps for each cache + for _, cache := range data.CacheMemoryConfig.Caches { + // Skip restore-only caches + if cache.RestoreOnly { + continue + } + + // Determine artifact name and cache directory + var artifactName, cacheDir string + if cache.ID == "default" { + artifactName = "cache-memory" + cacheDir = "/tmp/gh-aw/cache-memory" + } else { + artifactName = fmt.Sprintf("cache-memory-%s", cache.ID) + cacheDir = fmt.Sprintf("/tmp/gh-aw/cache-memory-%s", cache.ID) + } + + // Download artifact step + var downloadStep strings.Builder + downloadStep.WriteString(fmt.Sprintf(" - name: Download cache-memory artifact (%s)\n", cache.ID)) + downloadStep.WriteString(fmt.Sprintf(" uses: %s\n", GetActionPin("actions/download-artifact"))) + downloadStep.WriteString(" continue-on-error: true\n") + downloadStep.WriteString(" with:\n") + downloadStep.WriteString(fmt.Sprintf(" name: %s\n", artifactName)) + downloadStep.WriteString(fmt.Sprintf(" path: %s\n", cacheDir)) + steps = append(steps, downloadStep.String()) + + // Generate cache key (same logic as in generateCacheMemorySteps) + cacheKey := cache.Key + if cacheKey == "" { + if cache.ID == "default" { + cacheKey = "memory-${{ github.workflow }}-${{ github.run_id }}" + } else { + cacheKey = fmt.Sprintf("memory-%s-${{ github.workflow }}-${{ github.run_id }}", cache.ID) + } + } + + // Automatically append -${{ github.run_id }} if the key doesn't already end with it + runIdSuffix := "-${{ github.run_id }}" + if !strings.HasSuffix(cacheKey, runIdSuffix) { + cacheKey = cacheKey + runIdSuffix + } + + // Save to cache step + var saveStep strings.Builder + saveStep.WriteString(fmt.Sprintf(" - name: Save cache-memory to cache (%s)\n", cache.ID)) + saveStep.WriteString(fmt.Sprintf(" uses: %s\n", GetActionPin("actions/cache/save"))) + saveStep.WriteString(" with:\n") + saveStep.WriteString(fmt.Sprintf(" key: %s\n", cacheKey)) + saveStep.WriteString(fmt.Sprintf(" path: %s\n", cacheDir)) + steps = append(steps, saveStep.String()) + } + + // If no writable caches, return nil + if len(steps) == 0 { + return nil, nil + } + + // Job condition: only run if detection passed + jobCondition := "always() && needs.detection.outputs.success == 'true'" + + job := &Job{ + Name: "update_cache_memory", + DisplayName: "", // No display name - job ID is sufficient + RunsOn: "runs-on: ubuntu-latest", + If: jobCondition, + Permissions: NewPermissionsEmpty().RenderToYAML(), // No special permissions needed + Needs: []string{"agent", "detection"}, + Steps: steps, + } + + return job, nil +} diff --git a/pkg/workflow/cache_memory_threat_detection_test.go b/pkg/workflow/cache_memory_threat_detection_test.go new file mode 100644 index 0000000000..92dd7482e0 --- /dev/null +++ b/pkg/workflow/cache_memory_threat_detection_test.go @@ -0,0 +1,221 @@ +package workflow + +import ( + "os" + "path/filepath" + "strings" + "testing" +) + +// TestCacheMemoryWithThreatDetection verifies that when threat detection is enabled, +// cache-memory uses actions/cache/restore instead of actions/cache and creates +// an update_cache_memory job to save the cache after detection succeeds +func TestCacheMemoryWithThreatDetection(t *testing.T) { + tests := []struct { + name string + frontmatter string + expectedInLock []string + notExpectedInLock []string + }{ + { + name: "cache-memory with threat detection enabled", + frontmatter: `--- +name: Test Cache Memory with Threat Detection +on: workflow_dispatch +permissions: + contents: read + issues: read + pull-requests: read +engine: claude +tools: + cache-memory: true + github: + allowed: [get_repository] +safe-outputs: + create-issue: + threat-detection: true +--- + +Test workflow with cache-memory and threat detection enabled.`, + expectedInLock: []string{ + // In agent job, should use actions/cache/restore instead of actions/cache + "- name: Restore cache memory file share data", + "uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830", + "key: memory-${{ github.workflow }}-${{ github.run_id }}", + // Should still upload artifact + "- name: Upload cache-memory data as artifact", + "name: cache-memory", + // Should have update_cache_memory job + "update_cache_memory:", + "- agent", + "- detection", + "if: always() && needs.detection.outputs.success == 'true'", + "- name: Download cache-memory artifact (default)", + "- name: Save cache-memory to cache (default)", + "uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830", + }, + notExpectedInLock: []string{ + // Should NOT use regular actions/cache in agent job + "- name: Cache memory file share data\n uses: actions/cache@", + }, + }, + { + name: "cache-memory without threat detection", + frontmatter: `--- +name: Test Cache Memory without Threat Detection +on: workflow_dispatch +permissions: + contents: read + issues: read + pull-requests: read +engine: claude +tools: + cache-memory: true + github: + allowed: [get_repository] +--- + +Test workflow with cache-memory but no threat detection.`, + expectedInLock: []string{ + // Without threat detection, should use regular actions/cache + "- name: Cache memory file share data", + "uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830", + "key: memory-${{ github.workflow }}-${{ github.run_id }}", + // Should still upload artifact + "- name: Upload cache-memory data as artifact", + "name: cache-memory", + }, + notExpectedInLock: []string{ + // Should NOT have update_cache_memory job + "update_cache_memory:", + // Should NOT use restore action + "uses: actions/cache/restore@", + }, + }, + { + name: "multiple cache-memory with threat detection", + frontmatter: `--- +name: Test Multiple Cache Memory with Threat Detection +on: workflow_dispatch +permissions: + contents: read + issues: read + pull-requests: read +engine: claude +tools: + cache-memory: + - id: default + key: memory-default + - id: session + key: memory-session + github: + allowed: [get_repository] +safe-outputs: + create-issue: + threat-detection: true +--- + +Test workflow with multiple cache-memory and threat detection enabled.`, + expectedInLock: []string{ + // Both caches should use restore + "- name: Restore cache memory file share data (default)", + "uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830", + "key: memory-default-${{ github.run_id }}", + "- name: Restore cache memory file share data (session)", + "key: memory-session-${{ github.run_id }}", + // Should upload both artifacts + "- name: Upload cache-memory data as artifact (default)", + "name: cache-memory-default", + "- name: Upload cache-memory data as artifact (session)", + "name: cache-memory-session", + // Should have update_cache_memory job with both caches + "update_cache_memory:", + "- name: Download cache-memory artifact (default)", + "- name: Save cache-memory to cache (default)", + "- name: Download cache-memory artifact (session)", + "- name: Save cache-memory to cache (session)", + }, + notExpectedInLock: []string{ + // Should NOT use regular actions/cache + "- name: Cache memory file share data (default)", + "- name: Cache memory file share data (session)", + }, + }, + { + name: "restore-only cache-memory with threat detection", + frontmatter: `--- +name: Test Restore-Only Cache Memory with Threat Detection +on: workflow_dispatch +permissions: + contents: read + issues: read + pull-requests: read +engine: claude +tools: + cache-memory: + - id: default + key: memory-restore-only + restore-only: true + github: + allowed: [get_repository] +safe-outputs: + create-issue: + threat-detection: true +--- + +Test workflow with restore-only cache-memory and threat detection enabled.`, + expectedInLock: []string{ + // Should use restore for restore-only cache (no ID suffix for single default cache) + "- name: Restore cache memory file share data", + "uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830", + }, + notExpectedInLock: []string{ + // Should NOT upload artifact for restore-only + "- name: Upload cache-memory data as artifact", + // Should NOT have update_cache_memory job for restore-only + "update_cache_memory:", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create a temporary directory for the test + tmpDir := t.TempDir() + mdPath := filepath.Join(tmpDir, "test.md") + lockPath := filepath.Join(tmpDir, "test.lock.yml") + + // Write markdown to temp file + if err := os.WriteFile(mdPath, []byte(tt.frontmatter), 0644); err != nil { + t.Fatalf("Failed to write markdown file: %v", err) + } + + // Compile the workflow + compiler := NewCompiler(false, "", "test") + if err := compiler.CompileWorkflow(mdPath); err != nil { + t.Fatalf("Failed to compile workflow: %v", err) + } + + // Read the generated lock file + lockYAML, err := os.ReadFile(lockPath) + if err != nil { + t.Fatalf("Failed to read lock file: %v", err) + } + lockContent := string(lockYAML) + + // Check expected strings + for _, expected := range tt.expectedInLock { + if !strings.Contains(lockContent, expected) { + t.Errorf("Expected lock YAML to contain %q, but it didn't.\nGenerated YAML:\n%s", expected, lockContent) + } + } + + // Check not expected strings + for _, notExpected := range tt.notExpectedInLock { + if strings.Contains(lockContent, notExpected) { + t.Errorf("Expected lock YAML NOT to contain %q, but it did.\nGenerated YAML:\n%s", notExpected, lockContent) + } + } + }) + } +} diff --git a/pkg/workflow/compiler_jobs.go b/pkg/workflow/compiler_jobs.go index 0053bc64c6..50162994eb 100644 --- a/pkg/workflow/compiler_jobs.go +++ b/pkg/workflow/compiler_jobs.go @@ -219,6 +219,36 @@ func (c *Compiler) buildJobs(data *WorkflowData, markdownPath string) error { } } + // Build update_cache_memory job if cache-memory is configured and threat detection is enabled + // This job downloads cache-memory artifacts and saves them to GitHub Actions cache + // It runs after detection job completes successfully + var updateCacheMemoryJobName string + if data.CacheMemoryConfig != nil && len(data.CacheMemoryConfig.Caches) > 0 { + threatDetectionEnabledForSafeJobs := data.SafeOutputs != nil && data.SafeOutputs.ThreatDetection != nil + if threatDetectionEnabledForSafeJobs { + compilerJobsLog.Print("Building update_cache_memory job") + updateCacheMemoryJob, err := c.buildUpdateCacheMemoryJob(data, threatDetectionEnabledForSafeJobs) + if err != nil { + return fmt.Errorf("failed to build update_cache_memory job: %w", err) + } + if updateCacheMemoryJob != nil { + if err := c.jobManager.AddJob(updateCacheMemoryJob); err != nil { + return fmt.Errorf("failed to add update_cache_memory job: %w", err) + } + updateCacheMemoryJobName = updateCacheMemoryJob.Name + compilerJobsLog.Printf("Successfully added update_cache_memory job: %s", updateCacheMemoryJobName) + } + } + } + + // Update conclusion job to depend on update_cache_memory if it exists + if updateCacheMemoryJobName != "" { + if conclusionJob, exists := c.jobManager.GetJob("conclusion"); exists { + conclusionJob.Needs = append(conclusionJob.Needs, updateCacheMemoryJobName) + compilerJobsLog.Printf("Added update_cache_memory dependency to conclusion job") + } + } + compilerJobsLog.Print("Successfully built all jobs for workflow") return nil } diff --git a/pkg/workflow/data/action_pins.json b/pkg/workflow/data/action_pins.json index 13156845b2..ad52ade1a1 100644 --- a/pkg/workflow/data/action_pins.json +++ b/pkg/workflow/data/action_pins.json @@ -10,6 +10,11 @@ "version": "v4", "sha": "0057852bfaa89a56745cba8c7296529d2fc39830" }, + "actions/cache/save@v4": { + "repo": "actions/cache/save", + "version": "v4", + "sha": "0057852bfaa89a56745cba8c7296529d2fc39830" + }, "actions/cache@v4": { "repo": "actions/cache", "version": "v4", diff --git a/pkg/workflow/js.go b/pkg/workflow/js.go index 33a54b54fd..49616c4e73 100644 --- a/pkg/workflow/js.go +++ b/pkg/workflow/js.go @@ -204,6 +204,9 @@ var temporaryIdScript string //go:embed js/update_runner.cjs var updateRunnerScript string +//go:embed js/update_pr_description_helpers.cjs +var updatePRDescriptionHelpersScript string + //go:embed js/read_buffer.cjs var readBufferScript string @@ -286,6 +289,7 @@ func GetJavaScriptSources() map[string]string { "get_base_branch.cjs": getBaseBranchScript, "generate_git_patch.cjs": generateGitPatchJSScript, "update_runner.cjs": updateRunnerScript, + "update_pr_description_helpers.cjs": updatePRDescriptionHelpersScript, "read_buffer.cjs": readBufferScript, "mcp_server_core.cjs": mcpServerCoreScript, "mcp_server.cjs": mcpServerScriptSource,