diff --git a/.github/workflows/agentics-maintenance.yml b/.github/workflows/agentics-maintenance.yml
index 378b52c38fa..8c142f3ab1a 100644
--- a/.github/workflows/agentics-maintenance.yml
+++ b/.github/workflows/agentics-maintenance.yml
@@ -50,6 +50,7 @@ on:
- 'upgrade'
- 'safe_outputs'
- 'create_labels'
+ - 'clean_cache_memories'
- 'validate'
run_url:
description: 'Run URL or run ID to replay safe outputs from (e.g. https://github.com/owner/repo/actions/runs/12345 or 12345). Required when operation is safe_outputs.'
@@ -107,8 +108,35 @@ jobs:
const { main } = require('${{ runner.temp }}/gh-aw/actions/close_expired_pull_requests.cjs');
await main();
+ cleanup-cache-memory:
+ if: ${{ (!(github.event.repository.fork)) && (github.event_name != 'workflow_dispatch' || github.event.inputs.operation == '' || github.event.inputs.operation == 'clean_cache_memories') }}
+ runs-on: ubuntu-slim
+ permissions:
+ actions: write
+ steps:
+ - name: Checkout actions folder
+ uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
+ with:
+ sparse-checkout: |
+ actions
+ persist-credentials: false
+
+ - name: Setup Scripts
+ uses: ./actions/setup
+ with:
+ destination: ${{ runner.temp }}/gh-aw/actions
+
+ - name: Cleanup outdated cache-memory entries
+ uses: actions/github-script@373c709c69115d41ff229c7e5df9f8788daa9553 # v9
+ with:
+ script: |
+ const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io, getOctokit);
+ const { main } = require('${{ runner.temp }}/gh-aw/actions/cleanup_cache_memory.cjs');
+ await main();
+
run_operation:
- if: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.operation != '' && github.event.inputs.operation != 'safe_outputs' && github.event.inputs.operation != 'create_labels' && github.event.inputs.operation != 'validate' && !github.event.repository.fork }}
+ if: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.operation != '' && github.event.inputs.operation != 'safe_outputs' && github.event.inputs.operation != 'create_labels' && github.event.inputs.operation != 'clean_cache_memories' && github.event.inputs.operation != 'validate' && (!(github.event.repository.fork)) }}
runs-on: ubuntu-slim
permissions:
actions: write
diff --git a/actions/setup/js/cleanup_cache_memory.cjs b/actions/setup/js/cleanup_cache_memory.cjs
new file mode 100644
index 00000000000..0872161b2ce
--- /dev/null
+++ b/actions/setup/js/cleanup_cache_memory.cjs
@@ -0,0 +1,310 @@
+// @ts-check
+///
+
+const { getErrorMessage } = require("./error_helpers.cjs");
+const { delay } = require("./expired_entity_cleanup_helpers.cjs");
+const { checkRateLimit, MIN_RATE_LIMIT_REMAINING } = require("./rate_limit_helpers.cjs");
+const { fetchAndLogRateLimit } = require("./github_rate_limit_logger.cjs");
+
+/**
+ * Default delay in ms between delete operations to avoid throttling.
+ */
+const DELETE_DELAY_MS = 250;
+
+/**
+ * Default delay in ms between list pages to avoid throttling.
+ */
+const LIST_DELAY_MS = 100;
+
+/**
+ * Maximum number of pages to fetch when listing caches.
+ * At 100 caches per page this allows up to 5000 caches.
+ */
+const MAX_LIST_PAGES = 50;
+
+/**
+ * Parse a cache key to extract the run ID and group key in a single pass.
+ * Cache keys follow the pattern: memory-{parts}-{runID}
+ * where runID is the last purely numeric segment.
+ *
+ * @param {string} key - Cache key string
+ * @returns {{ runId: number | null, groupKey: string }}
+ */
+function parseCacheKey(key) {
+ const parts = key.split("-");
+ for (let i = parts.length - 1; i >= 0; i--) {
+ if (/^\d+$/.test(parts[i])) {
+ return {
+ runId: parseInt(parts[i], 10),
+ groupKey: parts.slice(0, i).join("-"),
+ };
+ }
+ }
+ return { runId: null, groupKey: key };
+}
+
+/**
+ * @typedef {Object} CacheEntry
+ * @property {number} id - Cache ID for deletion
+ * @property {string} key - Full cache key
+ * @property {number | null} runId - Extracted run ID
+ * @property {string} groupKey - Group key (key without run ID)
+ */
+
+/**
+ * List all caches starting with "memory-" prefix, handling pagination.
+ * Results are sorted newest-first by last_accessed_at from the API.
+ *
+ * @param {any} github - GitHub REST client
+ * @param {string} owner - Repository owner
+ * @param {string} repo - Repository name
+ * @param {number} [listDelayMs] - Delay between list pages in ms
+ * @returns {Promise} List of cache entries
+ */
+async function listMemoryCaches(github, owner, repo, listDelayMs = LIST_DELAY_MS) {
+ /** @type {CacheEntry[]} */
+ const caches = [];
+ let page = 1;
+ const perPage = 100;
+
+ while (page <= MAX_LIST_PAGES) {
+ core.info(` Fetching cache list page ${page}...`);
+ const response = await github.rest.actions.getActionsCacheList({
+ owner,
+ repo,
+ key: "memory-",
+ per_page: perPage,
+ page,
+ sort: "last_accessed_at",
+ direction: "desc",
+ });
+
+ const actionsCaches = response.data.actions_caches;
+ if (!actionsCaches || actionsCaches.length === 0) {
+ break;
+ }
+
+ for (const cache of actionsCaches) {
+ if (!cache.key || !cache.key.startsWith("memory-")) {
+ continue;
+ }
+ const { runId, groupKey } = parseCacheKey(cache.key);
+ caches.push({ id: cache.id, key: cache.key, runId, groupKey });
+ }
+
+ core.info(` Page ${page}: ${actionsCaches.length} cache(s) fetched (${caches.length} total)`);
+
+ if (actionsCaches.length < perPage) {
+ break;
+ }
+
+ page++;
+ // Throttle between list pages
+ await delay(listDelayMs);
+ }
+
+ if (page > MAX_LIST_PAGES) {
+ core.warning(`โ ๏ธ Reached maximum page limit (${MAX_LIST_PAGES}). Some caches may not have been listed.`);
+ }
+
+ return caches;
+}
+
+/**
+ * Group caches by their group key (everything except run ID),
+ * then for each group keep only the entry with the highest run ID
+ * and return the rest for deletion.
+ *
+ * @param {CacheEntry[]} caches - List of cache entries
+ * @returns {{ toDelete: CacheEntry[], kept: CacheEntry[] }}
+ */
+function identifyCachesToDelete(caches) {
+ /** @type {Map} */
+ const groups = new Map();
+
+ for (const cache of caches) {
+ if (cache.runId === null) {
+ // Skip caches without a recognizable run ID
+ continue;
+ }
+ const group = groups.get(cache.groupKey) || [];
+ group.push(cache);
+ groups.set(cache.groupKey, group);
+ }
+
+ /** @type {CacheEntry[]} */
+ const toDelete = [];
+ /** @type {CacheEntry[]} */
+ const kept = [];
+
+ for (const [, group] of groups) {
+ if (group.length <= 1) {
+ // Only one entry in this group, nothing to clean up
+ if (group.length === 1) {
+ kept.push(group[0]);
+ }
+ continue;
+ }
+
+ // Sort by run ID descending (highest first = latest)
+ group.sort((a, b) => (b.runId ?? 0) - (a.runId ?? 0));
+
+ // Keep the first (latest), mark the rest for deletion
+ kept.push(group[0]);
+ toDelete.push(...group.slice(1));
+ }
+
+ return { toDelete, kept };
+}
+
+/**
+ * Main entry point: cleanup outdated cache-memory caches.
+ *
+ * Lists all caches with "memory-" prefix, groups them by key prefix,
+ * keeps the latest run ID per group, and deletes the rest.
+ * Includes timeouts to avoid GitHub API throttling and skips
+ * if rate limiting is too high.
+ *
+ * @param {Object} [options] - Optional configuration for testing
+ * @param {number} [options.deleteDelayMs] - Delay between deletions (default: DELETE_DELAY_MS)
+ * @param {number} [options.listDelayMs] - Delay between list pages (default: LIST_DELAY_MS)
+ */
+async function main(options = {}) {
+ const deleteDelayMs = options.deleteDelayMs ?? DELETE_DELAY_MS;
+ const listDelayMs = options.listDelayMs ?? LIST_DELAY_MS;
+
+ const owner = context.repo.owner;
+ const repo = context.repo.repo;
+
+ core.info("๐งน Starting cache-memory cleanup");
+ core.info(` Repository: ${owner}/${repo}`);
+
+ // Log initial rate limit snapshot for observability
+ await fetchAndLogRateLimit(github, "cleanup_cache_memory_start");
+
+ // Check rate limit before starting
+ const { ok: rateLimitOk, remaining: initialRemaining } = await checkRateLimit(github, "cleanup_cache_memory_initial");
+ if (!rateLimitOk) {
+ core.warning(`โ ๏ธ Rate limit too low (${initialRemaining} remaining, minimum: ${MIN_RATE_LIMIT_REMAINING}). Skipping cache cleanup.`);
+ core.summary.addRaw(`## Cache Memory Cleanup\n\nโ ๏ธ Skipped: Rate limit too low (${initialRemaining} remaining, minimum required: ${MIN_RATE_LIMIT_REMAINING})\n`);
+ await core.summary.write();
+ return;
+ }
+
+ core.info(` Rate limit remaining: ${initialRemaining === -1 ? "unknown" : initialRemaining}`);
+
+ // List all memory caches
+ core.info("๐ Listing caches with 'memory-' prefix...");
+ let caches;
+ try {
+ caches = await listMemoryCaches(github, owner, repo, listDelayMs);
+ } catch (error) {
+ core.error(`โ Failed to list caches: ${getErrorMessage(error)}`);
+ core.summary.addRaw(`## Cache Memory Cleanup\n\nโ Failed to list caches: ${getErrorMessage(error)}\n`);
+ await core.summary.write();
+ return;
+ }
+
+ core.info(` Found ${caches.length} cache(s) with 'memory-' prefix`);
+
+ if (caches.length === 0) {
+ core.info("โ
No memory caches found. Nothing to clean up.");
+ core.summary.addRaw("## Cache Memory Cleanup\n\nโ
No memory caches found. Nothing to clean up.\n");
+ await core.summary.write();
+ return;
+ }
+
+ // Identify which caches to delete
+ const { toDelete, kept } = identifyCachesToDelete(caches);
+
+ core.info(` Groups with latest entries kept: ${kept.length}`);
+ for (const entry of kept) {
+ core.info(` โ Keeping: ${entry.key} (run ID: ${entry.runId})`);
+ }
+ core.info(` Outdated entries to delete: ${toDelete.length}`);
+
+ if (toDelete.length === 0) {
+ core.info("โ
No outdated caches to clean up. All entries are current.");
+ core.summary.addRaw(`## Cache Memory Cleanup\n\nโ
No outdated caches to clean up.\n- Total memory caches: ${caches.length}\n- Groups: ${kept.length}\n`);
+ await core.summary.write();
+ return;
+ }
+
+ // Delete outdated caches with throttling
+ core.info(`๐๏ธ Deleting ${toDelete.length} outdated cache(s)...`);
+ let deletedCount = 0;
+ let failedCount = 0;
+ /** @type {string[]} */
+ const errors = [];
+
+ for (const cache of toDelete) {
+ // Check rate limit periodically (every 10 deletions)
+ if (deletedCount > 0 && deletedCount % 10 === 0) {
+ const { ok, remaining } = await checkRateLimit(github, "cleanup_cache_memory_periodic");
+ if (!ok) {
+ core.warning(`โ ๏ธ Rate limit getting low (${remaining} remaining). Stopping deletion early.`);
+ core.warning(` Deleted ${deletedCount} of ${toDelete.length} caches before stopping.`);
+ break;
+ }
+ core.info(` Rate limit check: ${remaining} remaining`);
+ }
+
+ try {
+ await github.rest.actions.deleteActionsCacheById({
+ owner,
+ repo,
+ cache_id: cache.id,
+ });
+ deletedCount++;
+ core.info(` โ Deleted cache: ${cache.key} (run ID: ${cache.runId})`);
+ } catch (error) {
+ failedCount++;
+ const msg = `Failed to delete cache ${cache.key}: ${getErrorMessage(error)}`;
+ errors.push(msg);
+ core.warning(` โ ${msg}`);
+ }
+
+ // Throttle between deletions
+ await delay(deleteDelayMs);
+ }
+
+ // Log final rate limit snapshot for observability
+ await fetchAndLogRateLimit(github, "cleanup_cache_memory_end");
+
+ // Summary
+ core.info(`\n๐ Cache cleanup complete:`);
+ core.info(` Total memory caches found: ${caches.length}`);
+ core.info(` Groups (latest kept): ${kept.length}`);
+ core.info(` Outdated deleted: ${deletedCount}`);
+ if (failedCount > 0) {
+ core.info(` Failed to delete: ${failedCount}`);
+ }
+
+ // Write job summary
+ let summary = `## Cache Memory Cleanup\n\n`;
+ summary += `| Metric | Count |\n|--------|-------|\n`;
+ summary += `| Total memory caches | ${caches.length} |\n`;
+ summary += `| Groups (latest kept) | ${kept.length} |\n`;
+ summary += `| Outdated deleted | ${deletedCount} |\n`;
+ if (failedCount > 0) {
+ summary += `| Failed to delete | ${failedCount} |\n`;
+ }
+ if (errors.length > 0) {
+ summary += `\n### Errors\n\n`;
+ for (const err of errors) {
+ summary += `- ${err}\n`;
+ }
+ }
+ core.summary.addRaw(summary);
+ await core.summary.write();
+
+ core.info("โ
Cache memory cleanup finished");
+}
+
+module.exports = {
+ main,
+ parseCacheKey,
+ identifyCachesToDelete,
+ listMemoryCaches,
+ MAX_LIST_PAGES,
+};
diff --git a/actions/setup/js/cleanup_cache_memory.test.cjs b/actions/setup/js/cleanup_cache_memory.test.cjs
new file mode 100644
index 00000000000..1c1c17a4a3e
--- /dev/null
+++ b/actions/setup/js/cleanup_cache_memory.test.cjs
@@ -0,0 +1,442 @@
+// @ts-check
+import { describe, it, expect, beforeEach, vi } from "vitest";
+
+// Mock core and context globals
+const mockCore = {
+ info: vi.fn(),
+ warning: vi.fn(),
+ error: vi.fn(),
+ summary: {
+ addRaw: vi.fn().mockReturnThis(),
+ write: vi.fn(),
+ },
+ setOutput: vi.fn(),
+};
+
+const mockContext = {
+ repo: {
+ owner: "testowner",
+ repo: "testrepo",
+ },
+};
+
+global.core = mockCore;
+global.context = mockContext;
+
+describe("cleanup_cache_memory", () => {
+ let mockGithub;
+
+ beforeEach(() => {
+ vi.clearAllMocks();
+ mockGithub = {
+ rest: {
+ actions: {
+ getActionsCacheList: vi.fn(),
+ deleteActionsCacheById: vi.fn(),
+ },
+ rateLimit: {
+ get: vi.fn().mockResolvedValue({
+ data: {
+ rate: { remaining: 5000, limit: 5000, used: 0 },
+ resources: {},
+ },
+ }),
+ },
+ },
+ };
+ global.github = mockGithub;
+ });
+
+ describe("parseCacheKey", () => {
+ it("should extract run ID and group key from standard cache key", async () => {
+ const { parseCacheKey } = await import("./cleanup_cache_memory.cjs");
+ const result = parseCacheKey("memory-none-nopolicy-workflow-12345");
+ expect(result.runId).toBe(12345);
+ expect(result.groupKey).toBe("memory-none-nopolicy-workflow");
+ });
+
+ it("should extract run ID and group key from integrity-aware cache key", async () => {
+ const { parseCacheKey } = await import("./cleanup_cache_memory.cjs");
+ const result = parseCacheKey("memory-unapproved-7e4d9f12-session-workflow-67890");
+ expect(result.runId).toBe(67890);
+ expect(result.groupKey).toBe("memory-unapproved-7e4d9f12-session-workflow");
+ });
+
+ it("should return null runId when no numeric segment exists", async () => {
+ const { parseCacheKey } = await import("./cleanup_cache_memory.cjs");
+ const result = parseCacheKey("memory-none-nopolicy-workflow");
+ expect(result.runId).toBeNull();
+ expect(result.groupKey).toBe("memory-none-nopolicy-workflow");
+ });
+
+ it("should handle cache key with only run ID as numeric part", async () => {
+ const { parseCacheKey } = await import("./cleanup_cache_memory.cjs");
+ const result = parseCacheKey("memory-abc-def-99999");
+ expect(result.runId).toBe(99999);
+ expect(result.groupKey).toBe("memory-abc-def");
+ });
+ });
+
+ describe("identifyCachesToDelete", () => {
+ it("should keep latest run ID and mark older ones for deletion", async () => {
+ const { identifyCachesToDelete } = await import("./cleanup_cache_memory.cjs");
+
+ const caches = [
+ { id: 1, key: "memory-none-nopolicy-workflow-100", runId: 100, groupKey: "memory-none-nopolicy-workflow" },
+ { id: 2, key: "memory-none-nopolicy-workflow-200", runId: 200, groupKey: "memory-none-nopolicy-workflow" },
+ { id: 3, key: "memory-none-nopolicy-workflow-150", runId: 150, groupKey: "memory-none-nopolicy-workflow" },
+ ];
+
+ const { toDelete, kept } = identifyCachesToDelete(caches);
+
+ expect(kept).toHaveLength(1);
+ expect(kept[0].runId).toBe(200);
+ expect(toDelete).toHaveLength(2);
+ expect(toDelete.map(c => c.runId).sort((a, b) => a - b)).toEqual([100, 150]);
+ });
+
+ it("should handle multiple groups independently", async () => {
+ const { identifyCachesToDelete } = await import("./cleanup_cache_memory.cjs");
+
+ const caches = [
+ { id: 1, key: "memory-none-nopolicy-wf1-100", runId: 100, groupKey: "memory-none-nopolicy-wf1" },
+ { id: 2, key: "memory-none-nopolicy-wf1-200", runId: 200, groupKey: "memory-none-nopolicy-wf1" },
+ { id: 3, key: "memory-none-nopolicy-wf2-50", runId: 50, groupKey: "memory-none-nopolicy-wf2" },
+ { id: 4, key: "memory-none-nopolicy-wf2-75", runId: 75, groupKey: "memory-none-nopolicy-wf2" },
+ ];
+
+ const { toDelete, kept } = identifyCachesToDelete(caches);
+
+ expect(kept).toHaveLength(2);
+ expect(kept.map(c => c.runId).sort((a, b) => a - b)).toEqual([75, 200]);
+ expect(toDelete).toHaveLength(2);
+ expect(toDelete.map(c => c.runId).sort((a, b) => a - b)).toEqual([50, 100]);
+ });
+
+ it("should not delete when only one entry per group", async () => {
+ const { identifyCachesToDelete } = await import("./cleanup_cache_memory.cjs");
+
+ const caches = [
+ { id: 1, key: "memory-none-nopolicy-wf1-100", runId: 100, groupKey: "memory-none-nopolicy-wf1" },
+ { id: 2, key: "memory-none-nopolicy-wf2-200", runId: 200, groupKey: "memory-none-nopolicy-wf2" },
+ ];
+
+ const { toDelete, kept } = identifyCachesToDelete(caches);
+
+ expect(kept).toHaveLength(2);
+ expect(toDelete).toHaveLength(0);
+ });
+
+ it("should skip entries with null run ID", async () => {
+ const { identifyCachesToDelete } = await import("./cleanup_cache_memory.cjs");
+
+ const caches = [
+ { id: 1, key: "memory-none-nopolicy-workflow", runId: null, groupKey: "memory-none-nopolicy-workflow" },
+ { id: 2, key: "memory-none-nopolicy-wf-100", runId: 100, groupKey: "memory-none-nopolicy-wf" },
+ ];
+
+ const { toDelete, kept } = identifyCachesToDelete(caches);
+
+ expect(kept).toHaveLength(1);
+ expect(kept[0].runId).toBe(100);
+ expect(toDelete).toHaveLength(0);
+ });
+
+ it("should handle empty input", async () => {
+ const { identifyCachesToDelete } = await import("./cleanup_cache_memory.cjs");
+
+ const { toDelete, kept } = identifyCachesToDelete([]);
+
+ expect(kept).toHaveLength(0);
+ expect(toDelete).toHaveLength(0);
+ });
+ });
+
+ describe("main - no caches found", () => {
+ it("should handle case when no memory caches exist", async () => {
+ const module = await import("./cleanup_cache_memory.cjs");
+
+ mockGithub.rest.actions.getActionsCacheList.mockResolvedValueOnce({
+ data: {
+ total_count: 0,
+ actions_caches: [],
+ },
+ });
+
+ await module.main({ deleteDelayMs: 0, listDelayMs: 0 });
+
+ expect(mockCore.info).toHaveBeenCalledWith(expect.stringContaining("No memory caches found"));
+ expect(mockCore.summary.addRaw).toHaveBeenCalledWith(expect.stringContaining("No memory caches found"));
+ expect(mockCore.summary.write).toHaveBeenCalled();
+ });
+ });
+
+ describe("main - rate limit too low", () => {
+ it("should skip cleanup when rate limit is below threshold", async () => {
+ const module = await import("./cleanup_cache_memory.cjs");
+
+ mockGithub.rest.rateLimit.get.mockResolvedValue({
+ data: {
+ rate: { remaining: 50, limit: 5000, used: 4950 },
+ resources: {},
+ },
+ });
+
+ await module.main({ deleteDelayMs: 0, listDelayMs: 0 });
+
+ expect(mockCore.warning).toHaveBeenCalledWith(expect.stringContaining("Rate limit too low"));
+ expect(mockGithub.rest.actions.getActionsCacheList).not.toHaveBeenCalled();
+ expect(mockCore.summary.addRaw).toHaveBeenCalledWith(expect.stringContaining("Skipped: Rate limit too low"));
+ expect(mockCore.summary.write).toHaveBeenCalled();
+ });
+ });
+
+ describe("main - deletes outdated caches", () => {
+ it("should delete older caches and keep latest per group", async () => {
+ const module = await import("./cleanup_cache_memory.cjs");
+
+ mockGithub.rest.actions.getActionsCacheList.mockResolvedValueOnce({
+ data: {
+ total_count: 3,
+ actions_caches: [
+ { id: 1, key: "memory-none-nopolicy-workflow-100" },
+ { id: 2, key: "memory-none-nopolicy-workflow-200" },
+ { id: 3, key: "memory-none-nopolicy-workflow-150" },
+ ],
+ },
+ });
+
+ mockGithub.rest.actions.deleteActionsCacheById.mockResolvedValue({});
+
+ await module.main({ deleteDelayMs: 0, listDelayMs: 0 });
+
+ // Should have deleted 2 caches (run IDs 100 and 150)
+ expect(mockGithub.rest.actions.deleteActionsCacheById).toHaveBeenCalledTimes(2);
+ expect(mockGithub.rest.actions.deleteActionsCacheById).toHaveBeenCalledWith(expect.objectContaining({ owner: "testowner", repo: "testrepo" }));
+
+ expect(mockCore.info).toHaveBeenCalledWith(expect.stringContaining("Cache memory cleanup finished"));
+ expect(mockCore.summary.write).toHaveBeenCalled();
+ });
+ });
+
+ describe("main - all caches are current", () => {
+ it("should not delete when each group has only one entry", async () => {
+ const module = await import("./cleanup_cache_memory.cjs");
+
+ mockGithub.rest.actions.getActionsCacheList.mockResolvedValueOnce({
+ data: {
+ total_count: 2,
+ actions_caches: [
+ { id: 1, key: "memory-none-nopolicy-wf1-100" },
+ { id: 2, key: "memory-none-nopolicy-wf2-200" },
+ ],
+ },
+ });
+
+ await module.main({ deleteDelayMs: 0, listDelayMs: 0 });
+
+ expect(mockGithub.rest.actions.deleteActionsCacheById).not.toHaveBeenCalled();
+ expect(mockCore.info).toHaveBeenCalledWith(expect.stringContaining("No outdated caches to clean up"));
+ });
+ });
+
+ describe("main - handles delete errors gracefully", () => {
+ it("should continue deleting after individual failures", async () => {
+ const module = await import("./cleanup_cache_memory.cjs");
+
+ mockGithub.rest.actions.getActionsCacheList.mockResolvedValueOnce({
+ data: {
+ total_count: 3,
+ actions_caches: [
+ { id: 1, key: "memory-none-nopolicy-wf-100" },
+ { id: 2, key: "memory-none-nopolicy-wf-200" },
+ { id: 3, key: "memory-none-nopolicy-wf-300" },
+ ],
+ },
+ });
+
+ // First delete fails, second succeeds
+ mockGithub.rest.actions.deleteActionsCacheById.mockRejectedValueOnce(new Error("API error")).mockResolvedValueOnce({});
+
+ await module.main({ deleteDelayMs: 0, listDelayMs: 0 });
+
+ // Should have attempted to delete 2 caches (IDs 100 and 200, keeping 300)
+ expect(mockGithub.rest.actions.deleteActionsCacheById).toHaveBeenCalledTimes(2);
+ expect(mockCore.warning).toHaveBeenCalledWith(expect.stringContaining("Failed to delete cache"));
+ expect(mockCore.summary.addRaw).toHaveBeenCalledWith(expect.stringContaining("Errors"));
+ });
+ });
+
+ describe("main - handles list error", () => {
+ it("should handle errors when listing caches", async () => {
+ const module = await import("./cleanup_cache_memory.cjs");
+
+ mockGithub.rest.actions.getActionsCacheList.mockRejectedValueOnce(new Error("API error"));
+
+ await module.main({ deleteDelayMs: 0, listDelayMs: 0 });
+
+ expect(mockCore.error).toHaveBeenCalledWith(expect.stringContaining("Failed to list caches"));
+ expect(mockCore.summary.write).toHaveBeenCalled();
+ });
+ });
+
+ describe("main - pagination", () => {
+ it("should handle paginated cache list results", async () => {
+ const module = await import("./cleanup_cache_memory.cjs");
+
+ // First page - full page of 100 items
+ const page1Caches = [];
+ for (let i = 0; i < 100; i++) {
+ page1Caches.push({ id: i + 1, key: `memory-none-nopolicy-wf-${1000 + i}` });
+ }
+
+ // Second page - partial page
+ const page2Caches = [{ id: 101, key: "memory-none-nopolicy-wf-2000" }];
+
+ mockGithub.rest.actions.getActionsCacheList
+ .mockResolvedValueOnce({
+ data: { total_count: 101, actions_caches: page1Caches },
+ })
+ .mockResolvedValueOnce({
+ data: { total_count: 101, actions_caches: page2Caches },
+ });
+
+ mockGithub.rest.actions.deleteActionsCacheById.mockResolvedValue({});
+
+ await module.main({ deleteDelayMs: 0, listDelayMs: 0 });
+
+ // Should have fetched 2 pages
+ expect(mockGithub.rest.actions.getActionsCacheList).toHaveBeenCalledTimes(2);
+
+ // Should delete 100 caches (keep only run 2000, delete 1000-1099)
+ expect(mockGithub.rest.actions.deleteActionsCacheById).toHaveBeenCalledTimes(100);
+ });
+ });
+
+ describe("main - rate limit stops early", () => {
+ it("should stop deleting when rate limit drops below threshold", async () => {
+ const module = await import("./cleanup_cache_memory.cjs");
+
+ // Create 15 caches in same group (need >10 to trigger the rate limit check)
+ const caches = [];
+ for (let i = 0; i < 15; i++) {
+ caches.push({ id: i + 1, key: `memory-none-nopolicy-wf-${100 + i}` });
+ }
+
+ mockGithub.rest.actions.getActionsCacheList.mockResolvedValueOnce({
+ data: { total_count: 15, actions_caches: caches },
+ });
+
+ // Rate limit calls:
+ // 1. fetchAndLogRateLimit at start of main โ rateLimit.get
+ // 2-3. checkRateLimit (initial) โ fetchAndLogRateLimit + rateLimit.get
+ // ... 10 deletions ...
+ // 4-5. checkRateLimit (periodic) โ fetchAndLogRateLimit + rateLimit.get
+ // We want call 4 or 5 to return low rate limit to trigger early stop
+ let callCount = 0;
+ mockGithub.rest.rateLimit.get.mockImplementation(() => {
+ callCount++;
+ // Return low rate limit starting from the periodic check (call 4+)
+ if (callCount >= 4) {
+ return Promise.resolve({
+ data: {
+ rate: { remaining: 50, limit: 5000, used: 4950 },
+ resources: {},
+ },
+ });
+ }
+ return Promise.resolve({
+ data: {
+ rate: { remaining: 5000, limit: 5000, used: 0 },
+ resources: {},
+ },
+ });
+ });
+
+ mockGithub.rest.actions.deleteActionsCacheById.mockResolvedValue({});
+
+ await module.main({ deleteDelayMs: 0, listDelayMs: 0 });
+
+ // Should have stopped after 10 deletions (checked rate limit, it was low)
+ expect(mockGithub.rest.actions.deleteActionsCacheById).toHaveBeenCalledTimes(10);
+ expect(mockCore.warning).toHaveBeenCalledWith(expect.stringContaining("Rate limit getting low"));
+ });
+ });
+
+ describe("main - logging", () => {
+ it("should log kept entries with their keys", async () => {
+ const module = await import("./cleanup_cache_memory.cjs");
+
+ mockGithub.rest.actions.getActionsCacheList.mockResolvedValueOnce({
+ data: {
+ total_count: 2,
+ actions_caches: [
+ { id: 1, key: "memory-none-nopolicy-wf-100" },
+ { id: 2, key: "memory-none-nopolicy-wf-200" },
+ ],
+ },
+ });
+
+ mockGithub.rest.actions.deleteActionsCacheById.mockResolvedValue({});
+
+ await module.main({ deleteDelayMs: 0, listDelayMs: 0 });
+
+ expect(mockCore.info).toHaveBeenCalledWith(expect.stringContaining("Keeping: memory-none-nopolicy-wf-200"));
+ });
+
+ it("should log repository info", async () => {
+ const module = await import("./cleanup_cache_memory.cjs");
+
+ mockGithub.rest.actions.getActionsCacheList.mockResolvedValueOnce({
+ data: { total_count: 0, actions_caches: [] },
+ });
+
+ await module.main({ deleteDelayMs: 0, listDelayMs: 0 });
+
+ expect(mockCore.info).toHaveBeenCalledWith(expect.stringContaining("Repository: testowner/testrepo"));
+ });
+ });
+
+ describe("listMemoryCaches - sort order", () => {
+ it("should request caches sorted by last_accessed_at descending", async () => {
+ const module = await import("./cleanup_cache_memory.cjs");
+
+ mockGithub.rest.actions.getActionsCacheList.mockResolvedValueOnce({
+ data: { total_count: 0, actions_caches: [] },
+ });
+
+ await module.listMemoryCaches(mockGithub, "testowner", "testrepo", 0);
+
+ expect(mockGithub.rest.actions.getActionsCacheList).toHaveBeenCalledWith(
+ expect.objectContaining({
+ sort: "last_accessed_at",
+ direction: "desc",
+ })
+ );
+ });
+ });
+
+ describe("listMemoryCaches - upper bound", () => {
+ it("should respect MAX_LIST_PAGES limit", async () => {
+ const { listMemoryCaches, MAX_LIST_PAGES } = await import("./cleanup_cache_memory.cjs");
+
+ // Return full pages forever
+ mockGithub.rest.actions.getActionsCacheList.mockImplementation(({ page }) => {
+ const caches = [];
+ for (let i = 0; i < 100; i++) {
+ caches.push({ id: page * 100 + i, key: `memory-none-nopolicy-wf-${page * 1000 + i}` });
+ }
+ return Promise.resolve({
+ data: { total_count: 10000, actions_caches: caches },
+ });
+ });
+
+ const result = await listMemoryCaches(mockGithub, "testowner", "testrepo", 0);
+
+ // Should stop at MAX_LIST_PAGES
+ expect(mockGithub.rest.actions.getActionsCacheList).toHaveBeenCalledTimes(MAX_LIST_PAGES);
+ expect(result.length).toBe(MAX_LIST_PAGES * 100);
+ expect(mockCore.warning).toHaveBeenCalledWith(expect.stringContaining("maximum page limit"));
+ });
+ });
+});
diff --git a/actions/setup/js/rate_limit_helpers.cjs b/actions/setup/js/rate_limit_helpers.cjs
new file mode 100644
index 00000000000..52c74e73723
--- /dev/null
+++ b/actions/setup/js/rate_limit_helpers.cjs
@@ -0,0 +1,51 @@
+// @ts-check
+///
+
+const { fetchAndLogRateLimit } = require("./github_rate_limit_logger.cjs");
+
+/**
+ * Minimum rate limit remaining before we skip further operations.
+ * This reserves capacity for other workflow jobs and API consumers.
+ */
+const MIN_RATE_LIMIT_REMAINING = 100;
+
+/**
+ * Check the current rate limit and determine if we should continue.
+ * Returns the remaining requests count, or -1 if we couldn't check.
+ * Also logs the rate limit snapshot for observability.
+ *
+ * @param {any} github - GitHub REST client
+ * @param {string} [operation="rate_limit_check"] - Label for the log entry
+ * @returns {Promise} Remaining requests, or -1 on error
+ */
+async function getRateLimitRemaining(github, operation = "rate_limit_check") {
+ try {
+ await fetchAndLogRateLimit(github, operation);
+ const { data } = await github.rest.rateLimit.get();
+ return data.rate.remaining;
+ } catch {
+ return -1;
+ }
+}
+
+/**
+ * Check if the current rate limit is sufficient for operations.
+ * Logs a warning and returns false if the rate limit is too low.
+ *
+ * @param {any} github - GitHub REST client
+ * @param {string} [operation="rate_limit_check"] - Label for the log entry
+ * @returns {Promise<{ok: boolean, remaining: number}>}
+ */
+async function checkRateLimit(github, operation = "rate_limit_check") {
+ const remaining = await getRateLimitRemaining(github, operation);
+ if (remaining !== -1 && remaining < MIN_RATE_LIMIT_REMAINING) {
+ return { ok: false, remaining };
+ }
+ return { ok: true, remaining };
+}
+
+module.exports = {
+ MIN_RATE_LIMIT_REMAINING,
+ getRateLimitRemaining,
+ checkRateLimit,
+};
diff --git a/actions/setup/js/rate_limit_helpers.test.cjs b/actions/setup/js/rate_limit_helpers.test.cjs
new file mode 100644
index 00000000000..61511e2c564
--- /dev/null
+++ b/actions/setup/js/rate_limit_helpers.test.cjs
@@ -0,0 +1,83 @@
+// @ts-check
+import { describe, it, expect, beforeEach, vi } from "vitest";
+
+// Mock core global (needed by github_rate_limit_logger.cjs)
+const mockCore = {
+ info: vi.fn(),
+ warning: vi.fn(),
+ error: vi.fn(),
+};
+
+global.core = mockCore;
+
+describe("rate_limit_helpers", () => {
+ let mockGithub;
+
+ beforeEach(() => {
+ vi.clearAllMocks();
+ mockGithub = {
+ rest: {
+ rateLimit: {
+ get: vi.fn().mockResolvedValue({
+ data: {
+ rate: { remaining: 5000, limit: 5000, used: 0 },
+ resources: {},
+ },
+ }),
+ },
+ },
+ };
+ });
+
+ describe("getRateLimitRemaining", () => {
+ it("should return remaining rate limit", async () => {
+ const { getRateLimitRemaining } = await import("./rate_limit_helpers.cjs");
+ const remaining = await getRateLimitRemaining(mockGithub, "test");
+ expect(remaining).toBe(5000);
+ });
+
+ it("should return -1 on error", async () => {
+ const { getRateLimitRemaining } = await import("./rate_limit_helpers.cjs");
+ mockGithub.rest.rateLimit.get.mockRejectedValueOnce(new Error("API error")).mockRejectedValueOnce(new Error("API error"));
+ const remaining = await getRateLimitRemaining(mockGithub, "test");
+ expect(remaining).toBe(-1);
+ });
+ });
+
+ describe("checkRateLimit", () => {
+ it("should return ok when rate limit is sufficient", async () => {
+ const { checkRateLimit } = await import("./rate_limit_helpers.cjs");
+ const result = await checkRateLimit(mockGithub, "test");
+ expect(result.ok).toBe(true);
+ expect(result.remaining).toBe(5000);
+ });
+
+ it("should return not ok when rate limit is too low", async () => {
+ const { checkRateLimit } = await import("./rate_limit_helpers.cjs");
+ mockGithub.rest.rateLimit.get.mockResolvedValue({
+ data: {
+ rate: { remaining: 50, limit: 5000, used: 4950 },
+ resources: {},
+ },
+ });
+ const result = await checkRateLimit(mockGithub, "test");
+ expect(result.ok).toBe(false);
+ expect(result.remaining).toBe(50);
+ });
+
+ it("should return ok when rate limit check fails", async () => {
+ const { checkRateLimit } = await import("./rate_limit_helpers.cjs");
+ mockGithub.rest.rateLimit.get.mockRejectedValue(new Error("API error"));
+ const result = await checkRateLimit(mockGithub, "test");
+ expect(result.ok).toBe(true);
+ expect(result.remaining).toBe(-1);
+ });
+ });
+
+ describe("MIN_RATE_LIMIT_REMAINING", () => {
+ it("should be 100", async () => {
+ const { MIN_RATE_LIMIT_REMAINING } = await import("./rate_limit_helpers.cjs");
+ expect(MIN_RATE_LIMIT_REMAINING).toBe(100);
+ });
+ });
+});
diff --git a/pkg/workflow/maintenance_workflow.go b/pkg/workflow/maintenance_workflow.go
index 748fbdc012b..c7c2b1fb2ee 100644
--- a/pkg/workflow/maintenance_workflow.go
+++ b/pkg/workflow/maintenance_workflow.go
@@ -262,6 +262,7 @@ on:
- 'upgrade'
- 'safe_outputs'
- 'create_labels'
+ - 'clean_cache_memories'
- 'validate'
run_url:
description: 'Run URL or run ID to replay safe outputs from (e.g. https://github.com/owner/repo/actions/runs/12345 or 12345). Required when operation is safe_outputs.'
@@ -343,10 +344,49 @@ jobs:
await main();
`)
- // Add unified run_operation job for all dispatch operations except safe_outputs, create_labels, and validate
+ // Add cleanup-cache-memory job for scheduled runs and clean_cache_memories operation
+ // This job lists all caches starting with "memory-", groups them by key prefix,
+ // keeps the latest run ID per group, and deletes the rest.
+ cleanupCacheCondition := buildNotForkAndScheduledOrOperation("clean_cache_memories")
+ yaml.WriteString(`
+ cleanup-cache-memory:
+ if: ${{ ` + RenderCondition(cleanupCacheCondition) + ` }}
+ runs-on: ` + runsOnValue + `
+ permissions:
+ actions: write
+ steps:
+`)
+
+ // Add checkout step only in dev/script mode (for local action paths)
+ if actionMode == ActionModeDev || actionMode == ActionModeScript {
+ yaml.WriteString(" - name: Checkout actions folder\n")
+ yaml.WriteString(" uses: " + GetActionPin("actions/checkout") + "\n")
+ yaml.WriteString(" with:\n")
+ yaml.WriteString(" sparse-checkout: |\n")
+ yaml.WriteString(" actions\n")
+ yaml.WriteString(" persist-credentials: false\n\n")
+ }
+
+ yaml.WriteString(` - name: Setup Scripts
+ uses: ` + setupActionRef + `
+ with:
+ destination: ${{ runner.temp }}/gh-aw/actions
+
+ - name: Cleanup outdated cache-memory entries
+ uses: ` + GetActionPin("actions/github-script") + `
+ with:
+ script: |
+ const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
+ setupGlobals(core, github, context, exec, io, getOctokit);
+ const { main } = require('${{ runner.temp }}/gh-aw/actions/cleanup_cache_memory.cjs');
+ await main();
+`)
+
+ // Add unified run_operation job for all dispatch operations except those with dedicated jobs (safe_outputs, create_labels, clean_cache_memories, validate)
+ runOperationCondition := buildRunOperationCondition("safe_outputs", "create_labels", "clean_cache_memories", "validate")
yaml.WriteString(`
run_operation:
- if: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.operation != '' && github.event.inputs.operation != 'safe_outputs' && github.event.inputs.operation != 'create_labels' && github.event.inputs.operation != 'validate' && !github.event.repository.fork }}
+ if: ${{ ` + RenderCondition(runOperationCondition) + ` }}
runs-on: ` + runsOnValue + `
permissions:
actions: write
@@ -672,3 +712,69 @@ jobs:
maintenanceLog.Print("Maintenance workflow generated successfully")
return nil
}
+
+// buildNotForkCondition creates a condition to check the repository is not a fork.
+func buildNotForkCondition() ConditionNode {
+ return &NotNode{
+ Child: BuildPropertyAccess("github.event.repository.fork"),
+ }
+}
+
+// buildNotDispatchOrEmptyOperation creates a condition that is true when the event
+// is not a workflow_dispatch or the operation input is empty.
+func buildNotDispatchOrEmptyOperation() ConditionNode {
+ return BuildOr(
+ BuildNotEquals(
+ BuildPropertyAccess("github.event_name"),
+ BuildStringLiteral("workflow_dispatch"),
+ ),
+ BuildEquals(
+ BuildPropertyAccess("github.event.inputs.operation"),
+ BuildStringLiteral(""),
+ ),
+ )
+}
+
+// buildNotForkAndScheduledOrOperation creates a condition for jobs that run on
+// schedule (or empty operation) AND when a specific operation is selected.
+// Condition: !fork && (not_dispatch || operation == '' || operation == op)
+func buildNotForkAndScheduledOrOperation(operation string) ConditionNode {
+ return BuildAnd(
+ buildNotForkCondition(),
+ BuildOr(
+ buildNotDispatchOrEmptyOperation(),
+ BuildEquals(
+ BuildPropertyAccess("github.event.inputs.operation"),
+ BuildStringLiteral(operation),
+ ),
+ ),
+ )
+}
+
+// buildRunOperationCondition creates the condition for the unified run_operation
+// job that handles all dispatch operations except the ones with dedicated jobs.
+// Condition: dispatch && operation != '' && operation != each excluded && !fork.
+func buildRunOperationCondition(excludedOperations ...string) ConditionNode {
+ // Start with: event is workflow_dispatch AND operation is not empty
+ condition := BuildAnd(
+ BuildEventTypeEquals("workflow_dispatch"),
+ BuildNotEquals(
+ BuildPropertyAccess("github.event.inputs.operation"),
+ BuildStringLiteral(""),
+ ),
+ )
+
+ // Exclude each dedicated operation
+ for _, op := range excludedOperations {
+ condition = BuildAnd(
+ condition,
+ BuildNotEquals(
+ BuildPropertyAccess("github.event.inputs.operation"),
+ BuildStringLiteral(op),
+ ),
+ )
+ }
+
+ // AND not a fork
+ return BuildAnd(condition, buildNotForkCondition())
+}
diff --git a/pkg/workflow/maintenance_workflow_test.go b/pkg/workflow/maintenance_workflow_test.go
index ad750f80d46..7348cb6bd38 100644
--- a/pkg/workflow/maintenance_workflow_test.go
+++ b/pkg/workflow/maintenance_workflow_test.go
@@ -281,15 +281,16 @@ func TestGenerateMaintenanceWorkflow_OperationJobConditions(t *testing.T) {
yaml := string(content)
operationSkipCondition := `github.event_name != 'workflow_dispatch' || github.event.inputs.operation == ''`
- operationRunCondition := `github.event_name == 'workflow_dispatch' && github.event.inputs.operation != '' && github.event.inputs.operation != 'safe_outputs' && github.event.inputs.operation != 'create_labels' && github.event.inputs.operation != 'validate'`
+ operationRunCondition := `github.event_name == 'workflow_dispatch' && github.event.inputs.operation != '' && github.event.inputs.operation != 'safe_outputs' && github.event.inputs.operation != 'create_labels' && github.event.inputs.operation != 'clean_cache_memories' && github.event.inputs.operation != 'validate'`
applySafeOutputsCondition := `github.event_name == 'workflow_dispatch' && github.event.inputs.operation == 'safe_outputs'`
createLabelsCondition := `github.event_name == 'workflow_dispatch' && github.event.inputs.operation == 'create_labels'`
+ cleanCacheMemoriesCondition := `github.event_name != 'workflow_dispatch' || github.event.inputs.operation == '' || github.event.inputs.operation == 'clean_cache_memories'`
const jobSectionSearchRange = 300
- const runOpSectionSearchRange = 400
+ const runOpSectionSearchRange = 500
- // Jobs that should be disabled when operation is set
- disabledJobs := []string{"close-expired-entities:", "compile-workflows:", "zizmor-scan:", "secret-validation:"}
+ // Jobs that should be disabled when any non-dedicated operation is set (cleanup-cache-memory has its own dedicated operation)
+ disabledJobs := []string{"close-expired-entities:", "compile-workflows:", "secret-validation:", "zizmor-scan:"}
for _, job := range disabledJobs {
// Find the if: condition for each job
jobIdx := strings.Index(yaml, "\n "+job)
@@ -304,6 +305,17 @@ func TestGenerateMaintenanceWorkflow_OperationJobConditions(t *testing.T) {
}
}
+ // cleanup-cache-memory job should run on schedule, empty operation, or clean_cache_memories operation
+ cleanupCacheIdx := strings.Index(yaml, "\n cleanup-cache-memory:")
+ if cleanupCacheIdx == -1 {
+ t.Errorf("Job cleanup-cache-memory not found in generated workflow")
+ } else {
+ cleanupCacheSection := yaml[cleanupCacheIdx : cleanupCacheIdx+jobSectionSearchRange]
+ if !strings.Contains(cleanupCacheSection, cleanCacheMemoriesCondition) {
+ t.Errorf("Job cleanup-cache-memory should have the clean_cache_memories condition %q in:\n%s", cleanCacheMemoriesCondition, cleanupCacheSection)
+ }
+ }
+
// run_operation job should NOT have the skip condition but should have its own activation condition
// and should exclude safe_outputs
runOpIdx := strings.Index(yaml, "\n run_operation:")
@@ -363,6 +375,11 @@ func TestGenerateMaintenanceWorkflow_OperationJobConditions(t *testing.T) {
t.Error("workflow_dispatch operation choices should include 'safe_outputs'")
}
+ // Verify clean_cache_memories is an option in the operation choices
+ if !strings.Contains(yaml, "- 'clean_cache_memories'") {
+ t.Error("workflow_dispatch operation choices should include 'clean_cache_memories'")
+ }
+
// Verify validate is an option in the operation choices
if !strings.Contains(yaml, "- 'validate'") {
t.Error("workflow_dispatch operation choices should include 'validate'")