diff --git a/apps/bot/src/features/automation/system/executor.ts b/apps/bot/src/features/automation/system/executor.ts index 47c3aad..6857738 100644 --- a/apps/bot/src/features/automation/system/executor.ts +++ b/apps/bot/src/features/automation/system/executor.ts @@ -12,20 +12,28 @@ import type { StepConditionConfig, } from "@fluxcore/systems/actions/types"; -// --- Per-guild rate limiting --- +// --- Per-(guild, eventType) rate limiting --- +// +// Each event type gets its own 60/min bucket so a noisy event class +// (e.g. messageCreate) cannot starve quieter ones (e.g. memberJoin). const RATE_LIMIT_WINDOW_MS = 60_000; // 1 minute -const RATE_LIMIT_MAX_EXECUTIONS = 60; // max 60 action executions per guild per minute +const RATE_LIMIT_MAX_EXECUTIONS = 60; // max 60 action executions per (guild, eventType) per minute -const guildExecutionCounts = new Map(); +const eventBuckets = new Map(); -function checkRateLimit(guildId: string): boolean { +function bucketKey(guildId: string, eventType: string): string { + return `${guildId}\u0000${eventType}`; +} + +function checkRateLimit(guildId: string, eventType: string): boolean { + const key = bucketKey(guildId, eventType); const now = Date.now(); - let entry = guildExecutionCounts.get(guildId); + let entry = eventBuckets.get(key); if (!entry || now >= entry.resetAt) { entry = { count: 0, resetAt: now + RATE_LIMIT_WINDOW_MS }; - guildExecutionCounts.set(guildId, entry); + eventBuckets.set(key, entry); } if (entry.count >= RATE_LIMIT_MAX_EXECUTIONS) { @@ -173,7 +181,7 @@ async function executeSteps( switch (step.type) { case "action": { - if (!checkRateLimit(context.guildId)) { + if (!checkRateLimit(context.guildId, context.eventType)) { logger.warn(`Rate limit hit for guild ${context.guildId} during rule "${rule.name}"`); return; } @@ -271,7 +279,7 @@ export async function processEvent( // V1: linear actions if (!rule.actions?.length) continue; for (const actionConfig of rule.actions) { - if (!checkRateLimit(context.guildId)) { + if (!checkRateLimit(context.guildId, context.eventType)) { logger.warn(`Rate limit hit for guild ${context.guildId} during rule "${rule.name}"`); return; } diff --git a/apps/bot/tests/features/automation/system/executor-rate-limit.test.ts b/apps/bot/tests/features/automation/system/executor-rate-limit.test.ts new file mode 100644 index 0000000..299ca97 --- /dev/null +++ b/apps/bot/tests/features/automation/system/executor-rate-limit.test.ts @@ -0,0 +1,126 @@ +import { describe, it, expect, vi, beforeEach } from "vitest"; + +vi.mock("@fluxcore/config", () => ({ + config: { + token: "test-token", + clientId: "test-client-id", + guildId: undefined, + logLevel: "info", + }, +})); + +vi.mock("@fluxcore/utils", () => ({ + logger: { info: vi.fn(), warn: vi.fn(), error: vi.fn(), debug: vi.fn() }, +})); + +const { getRulesForEventMock, executorMock, logExecutionMock } = vi.hoisted( + () => ({ + getRulesForEventMock: vi.fn(), + executorMock: vi.fn().mockResolvedValue(undefined), + logExecutionMock: vi.fn().mockResolvedValue(undefined), + }), +); + +vi.mock("@fluxcore/systems/actions/cache", () => ({ + getRulesForEvent: (...args: unknown[]) => getRulesForEventMock(...args), +})); + +vi.mock("@fluxcore/systems/actions/config", () => ({ + getGuildSettingsOrDefault: () => ({ + globalEnabled: true, + maxRules: 25, + logChannelId: null, + }), +})); + +vi.mock("@fluxcore/systems/actions/persistence", () => ({ + logExecution: (...args: unknown[]) => logExecutionMock(...args), +})); + +vi.mock("../../../../src/features/automation/system/registry.js", () => ({ + getExecutor: () => executorMock, +})); + +import type { Client } from "discord.js"; + +const { processEvent } = await import( + "../../../../src/features/automation/system/executor.js" +); + +type TestEventContext = { + eventType: string; + guildId: string; + timestamp: string; +}; + +const fakeClient = {} as Client; + +function makeRule(eventType: string, id = 1) { + return { + id, + guildId: "g-rl", + name: `r-${eventType}-${id}`, + enabled: true, + eventType, + actions: [{ type: "addRole", roleId: "r" }], + conditions: {}, + priority: 0, + createdBy: "u", + }; +} + +function ctx(guildId: string, eventType: string): TestEventContext { + return { + eventType, + guildId, + timestamp: new Date().toISOString(), + }; +} + +describe("processEvent rate limiting", () => { + beforeEach(() => { + executorMock.mockClear(); + getRulesForEventMock.mockReset(); + logExecutionMock.mockClear(); + }); + + it("does not let one event type starve another", async () => { + const guildId = `guild-starve-${Date.now()}`; + getRulesForEventMock.mockImplementation((_g: string, ev: string) => [ + makeRule(ev), + ]); + + // Burn through the messageCreate budget + for (let i = 0; i < 80; i++) { + await processEvent(fakeClient, ctx(guildId, "messageCreated") as never); + } + + const messageCreateExecCount = executorMock.mock.calls.length; + expect(messageCreateExecCount).toBeLessThanOrEqual(60); + expect(messageCreateExecCount).toBeGreaterThan(0); + + // Now memberJoin must still be allowed: separate bucket + executorMock.mockClear(); + await processEvent(fakeClient, ctx(guildId, "memberJoin") as never); + + expect(executorMock).toHaveBeenCalledTimes(1); + }); + + it("enforces the per-(guild, eventType) cap independently per guild", async () => { + const a = `guild-a-${Date.now()}`; + const b = `guild-b-${Date.now()}`; + getRulesForEventMock.mockImplementation((_g: string, ev: string) => [ + makeRule(ev), + ]); + + for (let i = 0; i < 70; i++) { + await processEvent(fakeClient, ctx(a, "messageCreated")); + } + const aCount = executorMock.mock.calls.length; + expect(aCount).toBeLessThanOrEqual(60); + + executorMock.mockClear(); + await processEvent(fakeClient, ctx(b, "messageCreated")); + expect(executorMock).toHaveBeenCalledTimes(1); + }); +}); diff --git a/apps/dashboard/src/server/features/permissions/roles-routes.ts b/apps/dashboard/src/server/features/permissions/roles-routes.ts index 89d1101..33093b3 100644 --- a/apps/dashboard/src/server/features/permissions/roles-routes.ts +++ b/apps/dashboard/src/server/features/permissions/roles-routes.ts @@ -10,6 +10,7 @@ import { createDashboardAuditLog, invalidatePermissionCache, } from "../../shared/permissions.js"; +import { deleteDashboardRoleWithAudit } from "../../shared/dashboardRoleDelete.js"; const MAX_ROLES_PER_GUILD = 25; const MAX_PERMISSIONS_PER_ROLE = 100; @@ -303,8 +304,12 @@ export function registerDashboardRoleRoutes(app: FastifyInstance): void { return; } - await prisma.dashboardRole.delete({ where: { id: roleId } }); - invalidatePermissionCache(guildId); + await deleteDashboardRoleWithAudit({ + guildId, + roleId, + actorId: session.userId, + actorUsername: session.username, + }); await createDashboardAuditLog({ guildId, diff --git a/apps/dashboard/src/server/features/permissions/routes.ts b/apps/dashboard/src/server/features/permissions/routes.ts index 0ac2d25..288facc 100644 --- a/apps/dashboard/src/server/features/permissions/routes.ts +++ b/apps/dashboard/src/server/features/permissions/routes.ts @@ -326,7 +326,7 @@ export function registerDashboardPermissionRoutes(app: FastifyInstance): void { action: e.action, targetType: e.targetType, targetId: e.targetId, - details: JSON.parse(e.details), + details: e.details, createdAt: e.createdAt, })), total, diff --git a/apps/dashboard/src/server/shared/crypto.ts b/apps/dashboard/src/server/shared/crypto.ts index 60efc05..253dc71 100644 --- a/apps/dashboard/src/server/shared/crypto.ts +++ b/apps/dashboard/src/server/shared/crypto.ts @@ -42,3 +42,29 @@ export function decrypt(encoded: string): string { decipher.final(), ]).toString("utf8"); } + +const MIN_ENCRYPTED_BYTES = IV_LENGTH + AUTH_TAG_LENGTH + 1; + +/** + * Best-effort check whether a stored string was produced by `encrypt()`. + * Used to detect legacy plaintext rows during the encryption backfill. + */ +export function isEncrypted(value: string): boolean { + if (!value) return false; + // Base64 alphabet check (allow padding) + if (!/^[A-Za-z0-9+/]+={0,2}$/.test(value)) return false; + let buf: Buffer; + try { + buf = Buffer.from(value, "base64"); + } catch { + return false; + } + if (buf.length < MIN_ENCRYPTED_BYTES) return false; + // Verify by attempting decryption with the active key + try { + decrypt(value); + return true; + } catch { + return false; + } +} diff --git a/apps/dashboard/src/server/shared/dashboardRoleDelete.ts b/apps/dashboard/src/server/shared/dashboardRoleDelete.ts new file mode 100644 index 0000000..3842dd9 --- /dev/null +++ b/apps/dashboard/src/server/shared/dashboardRoleDelete.ts @@ -0,0 +1,64 @@ +import { getPrisma } from "@fluxcore/database"; +import { logger } from "@fluxcore/utils"; +import { invalidatePermissionCache } from "./permissions.js"; + +export interface DeleteDashboardRoleArgs { + guildId: string; + roleId: string; + actorId: string; + actorUsername: string; +} + +/** + * Safely delete a DashboardRole: enumerate assignments, write an audit + * entry per unassignment, invalidate per-user permission caches, and + * only then delete the role itself. The schema's onDelete: Restrict + * guarantees that this is the ONLY safe path to remove a role. + */ +export async function deleteDashboardRoleWithAudit( + args: DeleteDashboardRoleArgs, +): Promise { + const prisma = getPrisma(); + + const assignedUserIds: string[] = await prisma.$transaction(async (tx) => { + const assignments = await tx.dashboardRoleAssignment.findMany({ + where: { guildId: args.guildId, roleId: args.roleId }, + }); + + for (const a of assignments) { + await tx.dashboardRoleAssignment.delete({ where: { id: a.id } }); + await tx.dashboardAuditLog.create({ + data: { + guildId: args.guildId, + userId: args.actorId, + username: args.actorUsername, + action: "role.unassign", + targetType: "user", + targetId: a.userId, + details: { roleId: args.roleId, reason: "role-delete" }, + }, + }); + } + + await tx.dashboardRole.delete({ where: { id: args.roleId } }); + await tx.dashboardAuditLog.create({ + data: { + guildId: args.guildId, + userId: args.actorId, + username: args.actorUsername, + action: "role.delete", + targetType: "role", + targetId: args.roleId, + details: { unassignedUsers: assignments.map((a) => a.userId) }, + }, + }); + + return assignments.map((a) => a.userId); + }); + + // Invalidate cached permissions for every previously-assigned user + invalidatePermissionCache(args.guildId); + logger.info( + `Dashboard role ${args.roleId} deleted in guild ${args.guildId} by ${args.actorUsername} (${assignedUserIds.length} unassigned)`, + ); +} diff --git a/apps/dashboard/src/server/shared/permissions.ts b/apps/dashboard/src/server/shared/permissions.ts index 1c81a6a..4a6efa5 100644 --- a/apps/dashboard/src/server/shared/permissions.ts +++ b/apps/dashboard/src/server/shared/permissions.ts @@ -182,7 +182,7 @@ export async function createDashboardAuditLog( action: entry.action, targetType: entry.targetType ?? null, targetId: entry.targetId ?? null, - details: JSON.stringify(entry.details ?? {}), + details: (entry.details ?? {}) as object, }, }); } catch (err) { diff --git a/apps/dashboard/src/server/shared/session.ts b/apps/dashboard/src/server/shared/session.ts index a3c7246..6f93a10 100644 --- a/apps/dashboard/src/server/shared/session.ts +++ b/apps/dashboard/src/server/shared/session.ts @@ -1,10 +1,34 @@ -import { randomUUID } from "node:crypto"; +import { randomUUID, timingSafeEqual } from "node:crypto"; import type { FastifyReply } from "fastify"; import { getPrisma } from "@fluxcore/database"; -import { encrypt, decrypt } from "./crypto.js"; +import { encrypt, decrypt, isEncrypted } from "./crypto.js"; import { fetchGuilds } from "./auth.js"; import { logger } from "@fluxcore/utils"; +/** + * Encrypt a Discord OAuth access token for storage. + * ALL writes to DashboardSession.accessToken MUST go through this helper. + */ +function encryptAccessToken(token: string): string { + if (!token) throw new Error("encryptAccessToken: empty token"); + return encrypt(token); +} + +/** + * Decrypt a stored access token, with a defensive fallback for the + * (now-illegal) case of legacy plaintext rows: if the value does not + * decrypt cleanly, log a security warning and refuse to use it. + */ +function decryptAccessToken(stored: string): string { + if (!isEncrypted(stored)) { + logger.error( + "DashboardSession.accessToken is not encrypted; refusing to use. Run scripts/migrate-encrypt-session-tokens.ts", + ); + throw new Error("Session token is not encrypted"); + } + return decrypt(stored); +} + const isProduction = process.env.NODE_ENV === "production"; function safeJsonParse(json: string, fallback: T): T { @@ -58,7 +82,7 @@ export async function createSession( userId: data.userId, username: data.username, avatar: data.avatar, - accessToken: encrypt(data.accessToken), + accessToken: encryptAccessToken(data.accessToken), guilds: JSON.stringify(data.guilds), guildsRefreshedAt: now, createdAt: now, @@ -81,10 +105,30 @@ export async function getSession(id: string): Promise { where: { id }, }); - if (!row) return null; + // Defense-in-depth: constant-time compare the supplied cookie id + // against the stored row id, and perform a dummy decryption on the + // not-found path so the timing of "missing", "tampered", and + // "expired" converges. + const suppliedBuf = Buffer.from(id, "utf8"); + const storedBuf = Buffer.from(row?.id ?? id, "utf8"); + const idsMatch = + row !== null && + suppliedBuf.length === storedBuf.length && + timingSafeEqual(suppliedBuf, storedBuf); + + if (!row || !idsMatch) { + // Equalise work with the success path: perform a throwaway + // decryption so timing does not branch on row presence. + try { + decrypt(encrypt("dummy")); + } catch { + /* ignore */ + } + return null; + } if (row.expiresAt < new Date()) { - await prisma.dashboardSession.deleteMany({ where: { id } }); + await prisma.dashboardSession.deleteMany({ where: { id: row.id } }); return null; } @@ -92,7 +136,7 @@ export async function getSession(id: string): Promise { userId: row.userId, username: row.username, avatar: row.avatar, - accessToken: decrypt(row.accessToken), + accessToken: decryptAccessToken(row.accessToken), guilds: safeJsonParse(row.guilds, []), createdAt: row.createdAt.getTime(), }; diff --git a/apps/dashboard/tests/server/features/permissions/dashboardRoles.test.ts b/apps/dashboard/tests/server/features/permissions/dashboardRoles.test.ts index 826fa67..34e939f 100644 --- a/apps/dashboard/tests/server/features/permissions/dashboardRoles.test.ts +++ b/apps/dashboard/tests/server/features/permissions/dashboardRoles.test.ts @@ -52,6 +52,10 @@ const mockPrisma = { create: vi.fn(), delete: vi.fn(), }, + dashboardAuditLog: { + create: vi.fn().mockResolvedValue(undefined), + }, + $transaction: vi.fn(async (cb: (tx: unknown) => unknown) => cb(mockPrisma)), }; vi.mock("@fluxcore/database", () => ({ diff --git a/apps/dashboard/tests/server/shared/crypto.test.ts b/apps/dashboard/tests/server/shared/crypto.test.ts new file mode 100644 index 0000000..90903be --- /dev/null +++ b/apps/dashboard/tests/server/shared/crypto.test.ts @@ -0,0 +1,34 @@ +import { describe, it, expect, vi } from "vitest"; + +vi.mock("@fluxcore/config", () => ({ + config: { dashboardSessionSecret: "test-secret-do-not-use-in-prod-1234567890" }, +})); + +const { encrypt, decrypt, isEncrypted } = await import( + "../../../src/server/shared/crypto.js" +); + +describe("crypto helpers", () => { + it("encrypt -> decrypt round-trips", () => { + const plaintext = "discord_oauth_access_token_value"; + const encoded = encrypt(plaintext); + expect(encoded).not.toBe(plaintext); + expect(decrypt(encoded)).toBe(plaintext); + }); + + it("isEncrypted returns true for output of encrypt()", () => { + expect(isEncrypted(encrypt("hello"))).toBe(true); + }); + + it("isEncrypted returns false for arbitrary plaintext", () => { + expect(isEncrypted("plain-bearer-token")).toBe(false); + expect(isEncrypted("")).toBe(false); + expect(isEncrypted("not-base64!!!")).toBe(false); + }); + + it("decrypt throws on tampered ciphertext", () => { + const enc = encrypt("payload"); + const tampered = enc.slice(0, -4) + "AAAA"; + expect(() => decrypt(tampered)).toThrow(); + }); +}); diff --git a/apps/dashboard/tests/server/shared/session-encryption.test.ts b/apps/dashboard/tests/server/shared/session-encryption.test.ts new file mode 100644 index 0000000..f49120d --- /dev/null +++ b/apps/dashboard/tests/server/shared/session-encryption.test.ts @@ -0,0 +1,55 @@ +import { describe, it, expect, vi, beforeEach } from "vitest"; + +vi.mock("@fluxcore/config", () => ({ + config: { dashboardSessionSecret: "test-secret-do-not-use-in-prod-1234567890" }, +})); + +const createMock = vi.fn(); +const findUniqueMock = vi.fn(); +const updateMock = vi.fn(); +const deleteManyMock = vi.fn(); + +vi.mock("@fluxcore/database", () => ({ + getPrisma: () => ({ + dashboardSession: { + create: createMock, + findUnique: findUniqueMock, + update: updateMock, + deleteMany: deleteManyMock, + }, + }), +})); + +vi.mock("@fluxcore/utils", () => ({ + logger: { info: vi.fn(), warn: vi.fn(), error: vi.fn(), debug: vi.fn() }, +})); + +vi.mock("../../../src/server/shared/auth.js", () => ({ + fetchGuilds: vi.fn().mockResolvedValue([]), +})); + +const { createSession } = await import("../../../src/server/shared/session.js"); +const { isEncrypted } = await import("../../../src/server/shared/crypto.js"); + +describe("createSession", () => { + beforeEach(() => { + createMock.mockReset(); + createMock.mockResolvedValue(undefined); + }); + + it("never persists the access token in plaintext", async () => { + const plaintextToken = "ya29.PLAINTEXT_BEARER_TOKEN"; + await createSession({ + userId: "1", + username: "u", + avatar: null, + accessToken: plaintextToken, + guilds: [], + }); + + expect(createMock).toHaveBeenCalledTimes(1); + const stored = createMock.mock.calls[0][0].data.accessToken as string; + expect(stored).not.toBe(plaintextToken); + expect(isEncrypted(stored)).toBe(true); + }); +}); diff --git a/apps/dashboard/tests/server/shared/session-timing.test.ts b/apps/dashboard/tests/server/shared/session-timing.test.ts new file mode 100644 index 0000000..2250685 --- /dev/null +++ b/apps/dashboard/tests/server/shared/session-timing.test.ts @@ -0,0 +1,75 @@ +import { describe, it, expect, vi, beforeEach } from "vitest"; + +vi.mock("@fluxcore/config", () => ({ + config: { dashboardSessionSecret: "test-secret-do-not-use-in-prod-1234567890" }, +})); + +const findUniqueMock = vi.fn(); +const deleteManyMock = vi.fn(); + +vi.mock("@fluxcore/database", () => ({ + getPrisma: () => ({ + dashboardSession: { + findUnique: findUniqueMock, + deleteMany: deleteManyMock, + update: vi.fn(), + }, + }), +})); + +vi.mock("@fluxcore/utils", () => ({ + logger: { info: vi.fn(), warn: vi.fn(), error: vi.fn(), debug: vi.fn() }, +})); + +vi.mock("../../../src/server/shared/auth.js", () => ({ + fetchGuilds: vi.fn().mockResolvedValue([]), +})); + +const { getSession } = await import("../../../src/server/shared/session.js"); +const { encrypt } = await import("../../../src/server/shared/crypto.js"); + +describe("getSession constant-time behaviour", () => { + beforeEach(() => { + findUniqueMock.mockReset(); + deleteManyMock.mockReset(); + }); + + it("returns null when row is missing without leaking via Prisma id comparator", async () => { + findUniqueMock.mockResolvedValue(null); + const result = await getSession("00000000-0000-0000-0000-000000000000"); + expect(result).toBeNull(); + }); + + it("returns null when stored id does not constant-time match", async () => { + findUniqueMock.mockResolvedValue({ + id: "11111111-1111-1111-1111-111111111111", + userId: "u", + username: "u", + avatar: null, + accessToken: encrypt("tok"), + guilds: "[]", + guildsRefreshedAt: new Date(), + createdAt: new Date(), + expiresAt: new Date(Date.now() + 60_000), + }); + const result = await getSession("22222222-2222-2222-2222-222222222222"); + expect(result).toBeNull(); + }); + + it("returns the session on a valid id", async () => { + const id = "33333333-3333-3333-3333-333333333333"; + findUniqueMock.mockResolvedValue({ + id, + userId: "u", + username: "name", + avatar: null, + accessToken: encrypt("tok"), + guilds: "[]", + guildsRefreshedAt: new Date(), + createdAt: new Date(), + expiresAt: new Date(Date.now() + 60_000), + }); + const result = await getSession(id); + expect(result?.userId).toBe("u"); + }); +}); diff --git a/docs/superpowers/plans/2026-04-07-sec-data-01-encrypt-access-token-audit.md b/docs/superpowers/plans/2026-04-07-sec-data-01-encrypt-access-token-audit.md new file mode 100644 index 0000000..6d80e50 --- /dev/null +++ b/docs/superpowers/plans/2026-04-07-sec-data-01-encrypt-access-token-audit.md @@ -0,0 +1,386 @@ +# DashboardSession Access Token Encryption Audit — Fix Plan + +> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking. + +**Severity:** CRITICAL +**Goal:** Guarantee that `DashboardSession.accessToken` is always written encrypted (AES-256-GCM) and decrypted on every read, that the column type/comment in Prisma reflects the encrypted contract, and that any pre-existing plaintext rows are migrated to ciphertext. +**Architecture:** `apps/dashboard/src/server/shared/crypto.ts` already implements AES-256-GCM (`encrypt`/`decrypt`). `apps/dashboard/src/server/shared/session.ts` calls `encrypt(data.accessToken)` on `createSession` and `decrypt(row.accessToken)` on `getSession`. We will (1) add a typed wrapper module so the contract is enforced via the type system, (2) sweep the codebase for any other write/read site that bypasses the wrapper, (3) add a one-shot data-migration script that detects rows whose value is not valid base64-of-(iv+tag+ct) and re-encrypts them, and (4) lock the contract with unit + integration tests. +**Tech Stack:** Prisma 7, PostgreSQL 18, TypeScript, Vitest + +--- + +## Vulnerability +`packages/database/prisma/schema.prisma:109` declares `accessToken String` with no indication that the value is encrypted. Today the only writer (`createSession` in `apps/dashboard/src/server/shared/session.ts:61`) wraps the token in `encrypt(...)`, but there is no compile-time guarantee that future writers do the same. Any contributor that calls `prisma.dashboardSession.create/update` directly will silently store the OAuth bearer token in plaintext, which (a) leaks Discord account access if the DB is dumped and (b) is invisible because reads use `decrypt()` and would either succeed (returning garbage) or throw at the next request. Additionally, environments that pre-date the encryption helper may still have plaintext rows. + +## Files +- Read: `apps/dashboard/src/server/shared/crypto.ts` +- Read: `apps/dashboard/src/server/shared/session.ts` +- Read: `packages/database/prisma/schema.prisma` +- Modify: `apps/dashboard/src/server/shared/crypto.ts` (add `isEncrypted` helper + `ENCRYPTED_PREFIX`) +- Modify: `apps/dashboard/src/server/shared/session.ts` (wrap all writes through a typed helper, add defensive read) +- Modify: `packages/database/prisma/schema.prisma` (add `/// @encrypted` doc comment) +- Create: `packages/database/prisma/migrations/20260407120000_encrypt_dashboard_session_tokens/migration.sql` +- Create: `scripts/migrate-encrypt-session-tokens.ts` (data backfill) +- Create: `apps/dashboard/tests/server/shared/crypto.test.ts` +- Create: `apps/dashboard/tests/server/shared/session-encryption.test.ts` + +## Tasks + +### Task 1: Add `isEncrypted` detection helper to crypto module + +- [ ] **Step 1: Write the failing test** + +Create `apps/dashboard/tests/server/shared/crypto.test.ts`: + +```typescript +import { describe, it, expect, vi, beforeEach } from "vitest"; + +vi.mock("@fluxcore/config", () => ({ + config: { dashboardSessionSecret: "test-secret-do-not-use-in-prod-1234567890" }, +})); + +import { encrypt, decrypt, isEncrypted } from "../../../src/server/shared/crypto.js"; + +describe("crypto helpers", () => { + it("encrypt -> decrypt round-trips", () => { + const plaintext = "discord_oauth_access_token_value"; + const encoded = encrypt(plaintext); + expect(encoded).not.toBe(plaintext); + expect(decrypt(encoded)).toBe(plaintext); + }); + + it("isEncrypted returns true for output of encrypt()", () => { + expect(isEncrypted(encrypt("hello"))).toBe(true); + }); + + it("isEncrypted returns false for arbitrary plaintext", () => { + expect(isEncrypted("plain-bearer-token")).toBe(false); + expect(isEncrypted("")).toBe(false); + expect(isEncrypted("not-base64!!!")).toBe(false); + }); + + it("decrypt throws on tampered ciphertext", () => { + const enc = encrypt("payload"); + const tampered = enc.slice(0, -4) + "AAAA"; + expect(() => decrypt(tampered)).toThrow(); + }); +}); +``` + +- [ ] **Step 2: Run the test and watch it fail** + +```bash +docker compose run --rm dashboard pnpm --filter @fluxcore/dashboard test apps/dashboard/tests/server/shared/crypto.test.ts +``` + +Expect: `isEncrypted is not a function`. + +- [ ] **Step 3: Implement `isEncrypted` in `apps/dashboard/src/server/shared/crypto.ts`** + +Append to the file: + +```typescript +const MIN_ENCRYPTED_BYTES = IV_LENGTH + AUTH_TAG_LENGTH + 1; + +/** + * Best-effort check whether a stored string was produced by `encrypt()`. + * Used to detect legacy plaintext rows during the encryption backfill. + */ +export function isEncrypted(value: string): boolean { + if (!value) return false; + // Base64 alphabet check (allow padding) + if (!/^[A-Za-z0-9+/]+={0,2}$/.test(value)) return false; + let buf: Buffer; + try { + buf = Buffer.from(value, "base64"); + } catch { + return false; + } + if (buf.length < MIN_ENCRYPTED_BYTES) return false; + // Verify by attempting decryption with the active key + try { + decrypt(value); + return true; + } catch { + return false; + } +} +``` + +- [ ] **Step 4: Run the test and watch it pass** + +```bash +docker compose run --rm dashboard pnpm --filter @fluxcore/dashboard test apps/dashboard/tests/server/shared/crypto.test.ts +``` + +- [ ] **Step 5: Commit** + +```bash +git add apps/dashboard/src/server/shared/crypto.ts apps/dashboard/tests/server/shared/crypto.test.ts +git commit -m "feat(crypto): add isEncrypted helper for session token migration" +``` + +### Task 2: Funnel all DashboardSession writes through a typed helper + +- [ ] **Step 1: Write the failing test** + +Create `apps/dashboard/tests/server/shared/session-encryption.test.ts`: + +```typescript +import { describe, it, expect, vi, beforeEach } from "vitest"; + +vi.mock("@fluxcore/config", () => ({ + config: { dashboardSessionSecret: "test-secret-do-not-use-in-prod-1234567890" }, +})); + +const createMock = vi.fn(); +const findUniqueMock = vi.fn(); +const updateMock = vi.fn(); + +vi.mock("@fluxcore/database", () => ({ + getPrisma: () => ({ + dashboardSession: { + create: createMock, + findUnique: findUniqueMock, + update: updateMock, + deleteMany: vi.fn(), + }, + }), +})); + +vi.mock("@fluxcore/utils", () => ({ + logger: { info: vi.fn(), warn: vi.fn(), error: vi.fn(), debug: vi.fn() }, +})); + +vi.mock("../../../src/server/shared/auth.js", () => ({ + fetchGuilds: vi.fn().mockResolvedValue([]), +})); + +import { createSession } from "../../../src/server/shared/session.js"; +import { isEncrypted } from "../../../src/server/shared/crypto.js"; + +describe("createSession", () => { + beforeEach(() => { + createMock.mockReset(); + createMock.mockResolvedValue(undefined); + }); + + it("never persists the access token in plaintext", async () => { + const plaintextToken = "ya29.PLAINTEXT_BEARER_TOKEN"; + await createSession({ + userId: "1", + username: "u", + avatar: null, + accessToken: plaintextToken, + guilds: [], + }); + + expect(createMock).toHaveBeenCalledTimes(1); + const stored = createMock.mock.calls[0][0].data.accessToken as string; + expect(stored).not.toBe(plaintextToken); + expect(isEncrypted(stored)).toBe(true); + }); +}); +``` + +- [ ] **Step 2: Run the test and watch it pass for createSession (already encrypted) — but extend to assert update sites** + +```bash +docker compose run --rm dashboard pnpm --filter @fluxcore/dashboard test apps/dashboard/tests/server/shared/session-encryption.test.ts +``` + +The first test should pass. Now add a second `it()` block that verifies any future `update({ data: { accessToken } })` callsite is also wrapped. Search the codebase to confirm there are no other writers: + +```bash +docker compose run --rm dashboard sh -lc 'grep -rn "dashboardSession\." apps/dashboard/src packages | grep -E "create|update"' +``` + +If results contain only `session.ts:55` (`create`) and `session.ts:133/166` (`update` for `expiresAt`/`guilds`, **not** `accessToken`), the audit is clean. Document this in a code comment. + +- [ ] **Step 3: Add the type-level guard in `apps/dashboard/src/server/shared/session.ts`** + +Replace the inline `accessToken: encrypt(data.accessToken),` with a dedicated helper at the top of the file: + +```typescript +import { encrypt, decrypt, isEncrypted } from "./crypto.js"; + +/** + * Encrypt a Discord OAuth access token for storage. + * ALL writes to DashboardSession.accessToken MUST go through this helper. + */ +function encryptAccessToken(token: string): string { + if (!token) throw new Error("encryptAccessToken: empty token"); + return encrypt(token); +} + +/** + * Decrypt a stored access token, with a defensive fallback for the + * (now-illegal) case of legacy plaintext rows: if the value does not + * decrypt cleanly, log a security warning and refuse to use it. + */ +function decryptAccessToken(stored: string): string { + if (!isEncrypted(stored)) { + logger.error( + "DashboardSession.accessToken is not encrypted; refusing to use. Run scripts/migrate-encrypt-session-tokens.ts", + ); + throw new Error("Session token is not encrypted"); + } + return decrypt(stored); +} +``` + +Update `createSession` to call `encryptAccessToken(data.accessToken)` and `getSession` to call `decryptAccessToken(row.accessToken)`. + +- [ ] **Step 4: Run the test and watch it pass** + +```bash +docker compose run --rm dashboard pnpm --filter @fluxcore/dashboard test apps/dashboard/tests/server/shared/session-encryption.test.ts +docker compose run --rm dashboard pnpm typecheck +``` + +- [ ] **Step 5: Commit** + +```bash +git add apps/dashboard/src/server/shared/session.ts apps/dashboard/tests/server/shared/session-encryption.test.ts +git commit -m "fix(session): funnel access-token writes through typed encrypt helper" +``` + +### Task 3: Schema annotation + data migration for legacy plaintext rows + +- [ ] **Step 1: Write the failing migration script test** + +Create `apps/dashboard/tests/server/shared/session-backfill.test.ts`: + +```typescript +import { describe, it, expect, vi } from "vitest"; + +vi.mock("@fluxcore/config", () => ({ + config: { dashboardSessionSecret: "test-secret-do-not-use-in-prod-1234567890" }, +})); + +const findManyMock = vi.fn(); +const updateMock = vi.fn(); + +vi.mock("@fluxcore/database", () => ({ + getPrisma: () => ({ + dashboardSession: { findMany: findManyMock, update: updateMock }, + }), +})); + +vi.mock("@fluxcore/utils", () => ({ + logger: { info: vi.fn(), warn: vi.fn(), error: vi.fn() }, +})); + +import { backfillEncryptSessionTokens } from "../../../../scripts/migrate-encrypt-session-tokens.js"; +import { encrypt, isEncrypted } from "../../../src/server/shared/crypto.js"; + +describe("backfillEncryptSessionTokens", () => { + it("re-encrypts plaintext rows and skips already-encrypted rows", async () => { + findManyMock.mockResolvedValue([ + { id: "a", accessToken: "plain-token-1" }, + { id: "b", accessToken: encrypt("already-ciphertext") }, + { id: "c", accessToken: "plain-token-2" }, + ]); + updateMock.mockResolvedValue(undefined); + + const summary = await backfillEncryptSessionTokens(); + + expect(summary.encrypted).toBe(2); + expect(summary.skipped).toBe(1); + const encryptedCalls = updateMock.mock.calls.filter( + (c) => c[0].where.id === "a" || c[0].where.id === "c", + ); + expect(encryptedCalls).toHaveLength(2); + for (const call of encryptedCalls) { + expect(isEncrypted(call[0].data.accessToken)).toBe(true); + } + }); +}); +``` + +- [ ] **Step 2: Run the test and watch it fail** + +```bash +docker compose run --rm dashboard pnpm --filter @fluxcore/dashboard test apps/dashboard/tests/server/shared/session-backfill.test.ts +``` + +Expect: cannot find module `scripts/migrate-encrypt-session-tokens.js`. + +- [ ] **Step 3: Implement migration script and Prisma annotation** + +Create `scripts/migrate-encrypt-session-tokens.ts`: + +```typescript +import { getPrisma } from "@fluxcore/database"; +import { logger } from "@fluxcore/utils"; +import { encrypt, isEncrypted } from "../apps/dashboard/src/server/shared/crypto.js"; + +export interface BackfillSummary { + encrypted: number; + skipped: number; +} + +export async function backfillEncryptSessionTokens(): Promise { + const prisma = getPrisma(); + const rows = await prisma.dashboardSession.findMany({ + select: { id: true, accessToken: true }, + }); + + let encrypted = 0; + let skipped = 0; + + for (const row of rows) { + if (isEncrypted(row.accessToken)) { + skipped++; + continue; + } + await prisma.dashboardSession.update({ + where: { id: row.id }, + data: { accessToken: encrypt(row.accessToken) }, + }); + encrypted++; + } + + logger.info( + `DashboardSession backfill complete: encrypted=${encrypted} skipped=${skipped}`, + ); + return { encrypted, skipped }; +} + +if (import.meta.url === `file://${process.argv[1]}`) { + backfillEncryptSessionTokens() + .then(() => process.exit(0)) + .catch((err) => { + logger.error("Backfill failed", err); + process.exit(1); + }); +} +``` + +Edit `packages/database/prisma/schema.prisma` line 109 to add a doc comment: + +```prisma + /// Encrypted with AES-256-GCM via apps/dashboard/src/server/shared/crypto.ts + /// MUST be written through encryptAccessToken() in session.ts + accessToken String +``` + +Then create the (no-op SQL) migration to lock the schema state: + +```bash +docker compose run --rm dashboard pnpm db:migrate --name encrypt_dashboard_session_tokens +``` + +- [ ] **Step 4: Run the test and watch it pass** + +```bash +docker compose run --rm dashboard pnpm --filter @fluxcore/dashboard test apps/dashboard/tests/server/shared/session-backfill.test.ts +docker compose run --rm dashboard pnpm typecheck +``` + +- [ ] **Step 5: Commit** + +```bash +git add scripts/migrate-encrypt-session-tokens.ts packages/database/prisma/schema.prisma packages/database/prisma/migrations/ apps/dashboard/tests/server/shared/session-backfill.test.ts +git commit -m "fix(session): backfill script + schema annotation for encrypted access tokens" +``` diff --git a/docs/superpowers/plans/2026-04-07-sec-data-02-actions-cache-atomic-swap.md b/docs/superpowers/plans/2026-04-07-sec-data-02-actions-cache-atomic-swap.md new file mode 100644 index 0000000..b13b851 --- /dev/null +++ b/docs/superpowers/plans/2026-04-07-sec-data-02-actions-cache-atomic-swap.md @@ -0,0 +1,192 @@ +# Actions Cache Reload Atomic Swap — Fix Plan + +> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking. + +**Severity:** HIGH +**Goal:** Eliminate the empty-cache window in `reloadGuild()` so that events arriving during a guild reload always see either the previous rule set or the new one — never an empty set. +**Architecture:** `packages/systems/src/actions/cache.ts` keeps a `guildId -> eventType -> ActionRule[]` Map. The current `reloadGuild` loads new rules then calls `invalidateGuild(guildId)` (deleting the old map) and rebuilds in-place via `addToInternalCache`. Between the `delete` and the loop's first `set`, `getRulesForEvent` returns `[]`. The fix: build a fresh `Map` locally, then atomically swap it into `ruleCache.set(guildId, newMap)`. Map writes are synchronous in Node so the swap is observably atomic with respect to other JS turns. +**Tech Stack:** Prisma 7, PostgreSQL 18, TypeScript, Vitest + +--- + +## Vulnerability +`packages/systems/src/actions/cacheSync.ts:65-72` (the `reloadGuild` it imports from `cache.ts:65-72`) does: + +```ts +const rules = await getRulesByGuild(guildId); +invalidateGuild(guildId); // <-- deletes the old map +for (const rule of rules) { + addToInternalCache(rule); // <-- rebuilds slot by slot +} +``` + +If an event handler synchronously calls `getRulesForEvent(guildId, ev)` after `invalidateGuild` but before its event slot has been re-added, it sees `[]` and silently no-ops. Although JS is single-threaded, microtask boundaries inside `addToInternalCache` (none today, but trivial to introduce) and the more important risk — the comment promises "minimize the window" but the window is non-zero — make this a race that should be eliminated by construction. + +## Files +- Read: `packages/systems/src/actions/cache.ts` +- Read: `packages/systems/src/actions/cacheSync.ts` +- Read: `packages/systems/src/actions/persistence.ts` +- Modify: `packages/systems/src/actions/cache.ts` +- Create: `packages/systems/tests/unit/actions-cache-atomic.test.ts` + +## Tasks + +### Task 1: Replace in-place rebuild with build-then-swap + +- [ ] **Step 1: Write the failing test** + +Create `packages/systems/tests/unit/actions-cache-atomic.test.ts`: + +```typescript +import { describe, it, expect, vi, beforeEach } from "vitest"; + +const getRulesByGuildMock = vi.fn(); + +vi.mock("@fluxcore/database", () => ({ + getPrisma: () => ({}), +})); + +vi.mock("@fluxcore/utils", () => ({ + logger: { info: vi.fn(), warn: vi.fn(), error: vi.fn(), debug: vi.fn() }, +})); + +vi.mock("../../src/actions/persistence.js", () => ({ + getRulesByGuild: getRulesByGuildMock, + rowToRule: (r: unknown) => r, +})); + +import { + reloadGuild, + getRulesForEvent, + addRuleToCache, +} from "../../src/actions/cache.js"; +import type { ActionRule } from "../../src/actions/types.js"; + +function rule(id: number, eventType: string): ActionRule { + return { + id, + guildId: "g1", + name: `r${id}`, + enabled: true, + eventType: eventType as ActionRule["eventType"], + actions: [], + conditions: {}, + priority: 0, + createdBy: "u1", + }; +} + +describe("reloadGuild atomicity", () => { + beforeEach(() => { + getRulesByGuildMock.mockReset(); + }); + + it("never exposes an empty rule set during reload", async () => { + // Seed the cache with an existing rule + addRuleToCache(rule(1, "memberJoin")); + expect(getRulesForEvent("g1", "memberJoin")).toHaveLength(1); + + // Make getRulesByGuild observe the cache mid-reload + const observed: number[] = []; + getRulesByGuildMock.mockImplementation(async () => { + // Simulate an event firing while the new rules are being fetched + observed.push(getRulesForEvent("g1", "memberJoin").length); + return [rule(2, "memberJoin"), rule(3, "memberJoin")]; + }); + + await reloadGuild("g1"); + + // During the fetch, the old rules MUST still be visible + expect(observed).toEqual([1]); + // After reload completes, the new rules are visible + expect(getRulesForEvent("g1", "memberJoin")).toHaveLength(2); + }); + + it("swaps the entire eventType map in one assignment", async () => { + addRuleToCache(rule(10, "messageDelete")); + getRulesByGuildMock.mockResolvedValue([rule(11, "messageEdit")]); + + await reloadGuild("g1"); + + // Old eventType bucket must be replaced, not merged + expect(getRulesForEvent("g1", "messageDelete")).toHaveLength(0); + expect(getRulesForEvent("g1", "messageEdit")).toHaveLength(1); + }); +}); +``` + +- [ ] **Step 2: Run the test and watch it fail** + +```bash +docker compose run --rm bot pnpm --filter @fluxcore/systems test packages/systems/tests/unit/actions-cache-atomic.test.ts +``` + +The first test will currently pass (because `getRulesByGuild` resolves before `invalidateGuild`), but the second is unlikely to highlight the bug. Add a third assertion that wraps the issue precisely: insert a microtask between fetch and rebuild and assert no empty window. + +Append to the test file: + +```typescript + it("preserves visibility across microtask boundaries", async () => { + addRuleToCache(rule(20, "voiceJoin")); + getRulesByGuildMock.mockImplementation(async () => { + await Promise.resolve(); // microtask boundary + return [rule(21, "voiceJoin")]; + }); + + const reloadPromise = reloadGuild("g1"); + // Yield once: cache should still show the old rule + await Promise.resolve(); + expect(getRulesForEvent("g1", "voiceJoin").length).toBeGreaterThan(0); + await reloadPromise; + expect(getRulesForEvent("g1", "voiceJoin")).toHaveLength(1); + }); +``` + +Re-run; the third test should fail on `main`. + +- [ ] **Step 3: Implement build-then-swap** + +Edit `packages/systems/src/actions/cache.ts`. Replace `reloadGuild`: + +```typescript +export async function reloadGuild(guildId: string): Promise { + const rules = await getRulesByGuild(guildId); + + // Build the new per-event map locally first; only after it is fully + // populated do we atomically swap it into the global cache. This + // guarantees that any concurrent getRulesForEvent() call observes + // EITHER the complete previous rule set OR the complete new one. + const newGuildMap = new Map(); + for (const rule of rules) { + let bucket = newGuildMap.get(rule.eventType); + if (!bucket) { + bucket = []; + newGuildMap.set(rule.eventType, bucket); + } + bucket.push(rule); + } + for (const bucket of newGuildMap.values()) { + bucket.sort((a, b) => b.priority - a.priority); + } + + if (newGuildMap.size === 0) { + ruleCache.delete(guildId); + } else { + ruleCache.set(guildId, newGuildMap); + } +} +``` + +- [ ] **Step 4: Run the test and watch it pass** + +```bash +docker compose run --rm bot pnpm --filter @fluxcore/systems test packages/systems/tests/unit/actions-cache-atomic.test.ts +docker compose run --rm bot pnpm typecheck +``` + +- [ ] **Step 5: Commit** + +```bash +git add packages/systems/src/actions/cache.ts packages/systems/tests/unit/actions-cache-atomic.test.ts +git commit -m "fix(actions): atomic-swap reload to eliminate empty-cache race window" +``` diff --git a/docs/superpowers/plans/2026-04-07-sec-data-03-session-lookup-constant-time.md b/docs/superpowers/plans/2026-04-07-sec-data-03-session-lookup-constant-time.md new file mode 100644 index 0000000..7b8d5ff --- /dev/null +++ b/docs/superpowers/plans/2026-04-07-sec-data-03-session-lookup-constant-time.md @@ -0,0 +1,197 @@ +# Session Lookup Timing Side-Channel — Fix Plan + +> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking. + +**Severity:** HIGH +**Goal:** Make session validation observably constant-time with respect to the *content* of the supplied session id, so an attacker cannot distinguish "session row exists but is wrong" from "session row does not exist" via response timing. +**Architecture:** `apps/dashboard/src/server/shared/session.ts` `getSession()` does a Prisma `findUnique({ where: { id } })`. Postgres B-tree lookup time depends on the supplied bytes (early-out on first mismatch in the index page), and our subsequent `if (!row) return null` short-circuits before hitting the cache, expiry check, JSON parse, decryption, etc. Even though the session id is a 128-bit UUID (so practical brute force is infeasible), we still want defense-in-depth: (1) compare the cookie-supplied id against the row id with `crypto.timingSafeEqual`, (2) ensure the not-found path performs the same dummy decryption work the found path performs, and (3) ensure error timing for "expired", "not found", and "tampered" all converge. +**Tech Stack:** Prisma 7, PostgreSQL 18, TypeScript, Vitest + +--- + +## Vulnerability +`apps/dashboard/src/server/shared/session.ts:72-114` (`getSession`) returns `null` immediately when `findUnique` returns nothing, without performing the equivalent decryption and parse work the success path does. The function also relies on Prisma equality semantics for the id, which doesn't use `crypto.timingSafeEqual`. Combined, the wall-clock time of the not-found path differs measurably from the found-but-expired and found-and-valid paths, leaking enumeration signal under high-volume probing. Although UUID v4 makes brute force impractical, the project's threat model treats this as defense-in-depth and constant-time comparison is cheap. + +## Files +- Read: `apps/dashboard/src/server/shared/session.ts` +- Modify: `apps/dashboard/src/server/shared/session.ts` +- Create: `apps/dashboard/tests/server/shared/session-timing.test.ts` + +## Tasks + +### Task 1: Constant-time session id verification + path equalisation + +- [ ] **Step 1: Write the failing test** + +Create `apps/dashboard/tests/server/shared/session-timing.test.ts`: + +```typescript +import { describe, it, expect, vi, beforeEach } from "vitest"; + +vi.mock("@fluxcore/config", () => ({ + config: { dashboardSessionSecret: "test-secret-do-not-use-in-prod-1234567890" }, +})); + +const findUniqueMock = vi.fn(); +const deleteManyMock = vi.fn(); + +vi.mock("@fluxcore/database", () => ({ + getPrisma: () => ({ + dashboardSession: { findUnique: findUniqueMock, deleteMany: deleteManyMock }, + }), +})); + +vi.mock("@fluxcore/utils", () => ({ + logger: { info: vi.fn(), warn: vi.fn(), error: vi.fn(), debug: vi.fn() }, +})); + +vi.mock("../../../src/server/shared/auth.js", () => ({ + fetchGuilds: vi.fn().mockResolvedValue([]), +})); + +import { getSession } from "../../../src/server/shared/session.js"; +import { encrypt } from "../../../src/server/shared/crypto.js"; + +describe("getSession constant-time behaviour", () => { + beforeEach(() => { + findUniqueMock.mockReset(); + deleteManyMock.mockReset(); + }); + + it("returns null when row is missing without leaking via Prisma id comparator", async () => { + findUniqueMock.mockResolvedValue(null); + const result = await getSession("00000000-0000-0000-0000-000000000000"); + expect(result).toBeNull(); + }); + + it("returns null when stored id does not constant-time match", async () => { + findUniqueMock.mockResolvedValue({ + id: "11111111-1111-1111-1111-111111111111", + userId: "u", + username: "u", + avatar: null, + accessToken: encrypt("tok"), + guilds: "[]", + guildsRefreshedAt: new Date(), + createdAt: new Date(), + expiresAt: new Date(Date.now() + 60_000), + }); + const result = await getSession("22222222-2222-2222-2222-222222222222"); + expect(result).toBeNull(); + }); + + it("returns the session on a valid id", async () => { + const id = "33333333-3333-3333-3333-333333333333"; + findUniqueMock.mockResolvedValue({ + id, + userId: "u", + username: "name", + avatar: null, + accessToken: encrypt("tok"), + guilds: "[]", + guildsRefreshedAt: new Date(), + createdAt: new Date(), + expiresAt: new Date(Date.now() + 60_000), + }); + const result = await getSession(id); + expect(result?.userId).toBe("u"); + }); +}); +``` + +- [ ] **Step 2: Run the test and watch it fail** + +```bash +docker compose run --rm dashboard pnpm --filter @fluxcore/dashboard test apps/dashboard/tests/server/shared/session-timing.test.ts +``` + +The "constant-time match" test will fail because today the function returns whatever Prisma found regardless of supplied id bytes (Prisma's `findUnique` will still match — but the test verifies our explicit guard by mocking Prisma to return a row whose `id` differs from the input). + +- [ ] **Step 3: Implement constant-time verification** + +Edit `apps/dashboard/src/server/shared/session.ts`. Add at the top: + +```typescript +import { randomUUID, timingSafeEqual } from "node:crypto"; +``` + +Replace `getSession` body: + +```typescript +export async function getSession(id: string): Promise { + const cached = sessionCache.get(id); + if (cached && cached.cacheExpiresAt > Date.now()) { + return cached.session; + } + sessionCache.delete(id); + + const prisma = getPrisma(); + const row = await prisma.dashboardSession.findUnique({ where: { id } }); + + // Defense-in-depth: constant-time compare the supplied cookie id + // against the stored row id, and perform a dummy decryption on the + // not-found path so the timing of "missing", "tampered", and + // "expired" converges. + const suppliedBuf = Buffer.from(id, "utf8"); + const storedBuf = Buffer.from(row?.id ?? id, "utf8"); + const idsMatch = + row !== null && + suppliedBuf.length === storedBuf.length && + timingSafeEqual(suppliedBuf, storedBuf); + + if (!row || !idsMatch) { + // Equalise work with the success path: perform a throwaway + // decryption so timing does not branch on row presence. + try { + decrypt(encrypt("dummy")); + } catch { + /* ignore */ + } + return null; + } + + if (row.expiresAt < new Date()) { + await prisma.dashboardSession.deleteMany({ where: { id: row.id } }); + return null; + } + + const session: Session = { + userId: row.userId, + username: row.username, + avatar: row.avatar, + accessToken: decryptAccessToken(row.accessToken), + guilds: safeJsonParse(row.guilds, []), + createdAt: row.createdAt.getTime(), + }; + + const cachedEntry: CachedSession = { + session, + cacheExpiresAt: Date.now() + CACHE_TTL, + sessionExpiresAt: row.expiresAt.getTime(), + guildsRefreshedAt: row.guildsRefreshedAt.getTime(), + }; + sessionCache.set(id, cachedEntry); + + if (Date.now() - cachedEntry.guildsRefreshedAt > GUILD_REFRESH_INTERVAL) { + refreshSessionGuilds(id, session.accessToken, cachedEntry).catch(() => {}); + } + + return session; +} +``` + +(The reference to `decryptAccessToken` assumes plan `sec-data-01` has landed; if running this plan independently, inline `decrypt(row.accessToken)` instead.) + +- [ ] **Step 4: Run the test and watch it pass** + +```bash +docker compose run --rm dashboard pnpm --filter @fluxcore/dashboard test apps/dashboard/tests/server/shared/session-timing.test.ts +docker compose run --rm dashboard pnpm typecheck +``` + +- [ ] **Step 5: Commit** + +```bash +git add apps/dashboard/src/server/shared/session.ts apps/dashboard/tests/server/shared/session-timing.test.ts +git commit -m "fix(session): constant-time id check and equalise not-found timing" +``` diff --git a/docs/superpowers/plans/2026-04-07-sec-data-04-dashboard-role-restrict-delete.md b/docs/superpowers/plans/2026-04-07-sec-data-04-dashboard-role-restrict-delete.md new file mode 100644 index 0000000..5b71c36 --- /dev/null +++ b/docs/superpowers/plans/2026-04-07-sec-data-04-dashboard-role-restrict-delete.md @@ -0,0 +1,252 @@ +# DashboardRole Restrict Delete + Audit — Fix Plan + +> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking. + +**Severity:** HIGH +**Goal:** Prevent silent privilege loss when a `DashboardRole` is deleted: require all `DashboardRoleAssignment` rows for the role to be removed first (with explicit audit entries) before the role itself can be deleted, and add an audit entry for the role deletion. +**Architecture:** `packages/database/prisma/schema.prisma:564-575` declares `DashboardRoleAssignment.role @relation(... onDelete: Cascade)`. Cascade is convenient but means a single `DELETE FROM "DashboardRole" WHERE id = ?` silently revokes the role from every user with no audit trail. We will (1) change the FK to `onDelete: Restrict`, (2) add a delete-role route helper that explicitly enumerates and audit-logs each unassignment, and (3) write an integration test against the real test DB that proves the restrict semantics. +**Tech Stack:** Prisma 7, PostgreSQL 18, TypeScript, Vitest + +--- + +## Vulnerability +`packages/database/prisma/schema.prisma:569` uses `onDelete: Cascade` on the role assignment FK. Combined with the dashboard's "delete role" endpoint (which today calls `prisma.dashboardRole.delete`), an admin can wipe out every user's permissions for a role with a single click and no audit trail in `DashboardAuditLog`. Worse, the cache invalidation path in `permissions.ts` only invalidates per-user when called explicitly — a role deletion does not enumerate affected users, so cached permission sets continue to grant access until TTL expiry (60s). + +## Files +- Read: `packages/database/prisma/schema.prisma` +- Read: `apps/dashboard/src/server/shared/permissions.ts` +- Modify: `packages/database/prisma/schema.prisma` +- Create: `packages/database/prisma/migrations/20260407130000_restrict_dashboard_role_delete/migration.sql` +- Modify (or create) `apps/dashboard/src/server/features/permissions/routes.ts` (delete-role handler — verify path with grep first) +- Create: `apps/dashboard/tests/server/shared/dashboard-role-delete.test.ts` + +## Tasks + +### Task 1: Switch FK to Restrict + add migration + +- [ ] **Step 1: Write the failing test** + +Create `apps/dashboard/tests/server/shared/dashboard-role-delete.test.ts`: + +```typescript +import { describe, it, expect, beforeEach } from "vitest"; +import { getPrisma } from "@fluxcore/database"; +import { setupTestDatabase, cleanTestData, teardownTestDatabase } from "../../../../packages/systems/tests/helpers/db.js"; + +describe("DashboardRole delete semantics", () => { + beforeEach(async () => { + await cleanTestData(); + }); + + it("refuses to delete a role with active assignments", async () => { + const prisma = getPrisma(); + const role = await prisma.dashboardRole.create({ + data: { guildId: "g1", name: "mods", permissions: "[\"moderation.warn\"]" }, + }); + await prisma.dashboardRoleAssignment.create({ + data: { guildId: "g1", userId: "u1", roleId: role.id, assignedBy: "admin" }, + }); + + await expect( + prisma.dashboardRole.delete({ where: { id: role.id } }), + ).rejects.toThrow(); + }); + + it("permits deletion after all assignments are removed", async () => { + const prisma = getPrisma(); + const role = await prisma.dashboardRole.create({ + data: { guildId: "g1", name: "mods", permissions: "[]" }, + }); + await prisma.dashboardRoleAssignment.create({ + data: { guildId: "g1", userId: "u1", roleId: role.id, assignedBy: "admin" }, + }); + await prisma.dashboardRoleAssignment.deleteMany({ where: { roleId: role.id } }); + + await expect( + prisma.dashboardRole.delete({ where: { id: role.id } }), + ).resolves.toMatchObject({ id: role.id }); + }); +}); +``` + +(If `tests/helpers/db.js` does not exist in the test runner working dir, replace with the project's existing integration setup helper — search with `grep -rn setupTestDatabase packages/systems/tests`.) + +- [ ] **Step 2: Run the test and watch it fail** + +```bash +docker compose run --rm dashboard pnpm test:integration apps/dashboard/tests/server/shared/dashboard-role-delete.test.ts +``` + +The first test will FAIL because the current `Cascade` allows the delete to succeed. + +- [ ] **Step 3: Update schema and migrate** + +Edit `packages/database/prisma/schema.prisma` line 569: + +```prisma + role DashboardRole @relation(fields: [roleId], references: [id], onDelete: Restrict) +``` + +Then generate and apply the migration: + +```bash +docker compose run --rm dashboard pnpm db:migrate --name restrict_dashboard_role_delete +docker compose run --rm dashboard pnpm db:generate +``` + +- [ ] **Step 4: Run the test and watch it pass** + +```bash +docker compose run --rm dashboard pnpm test:integration apps/dashboard/tests/server/shared/dashboard-role-delete.test.ts +docker compose run --rm dashboard pnpm typecheck +``` + +- [ ] **Step 5: Commit** + +```bash +git add packages/database/prisma/schema.prisma packages/database/prisma/migrations/ apps/dashboard/tests/server/shared/dashboard-role-delete.test.ts +git commit -m "fix(permissions): restrict DashboardRole delete when assignments exist" +``` + +### Task 2: Audit-logged unassign-then-delete helper + cache invalidation + +- [ ] **Step 1: Write the failing test** + +Append to `apps/dashboard/tests/server/shared/dashboard-role-delete.test.ts`: + +```typescript +import { deleteDashboardRoleWithAudit } from "../../../src/server/shared/dashboardRoleDelete.js"; + +describe("deleteDashboardRoleWithAudit", () => { + beforeEach(async () => { + await cleanTestData(); + }); + + it("unassigns members, writes audit entries, then deletes the role", async () => { + const prisma = getPrisma(); + const role = await prisma.dashboardRole.create({ + data: { guildId: "g1", name: "mods", permissions: "[]" }, + }); + await prisma.dashboardRoleAssignment.createMany({ + data: [ + { guildId: "g1", userId: "u1", roleId: role.id, assignedBy: "a" }, + { guildId: "g1", userId: "u2", roleId: role.id, assignedBy: "a" }, + ], + }); + + await deleteDashboardRoleWithAudit({ + guildId: "g1", + roleId: role.id, + actorId: "admin", + actorUsername: "admin#0", + }); + + const remaining = await prisma.dashboardRole.findUnique({ where: { id: role.id } }); + expect(remaining).toBeNull(); + + const auditEntries = await prisma.dashboardAuditLog.findMany({ + where: { guildId: "g1", action: { in: ["role.unassign", "role.delete"] } }, + orderBy: { createdAt: "asc" }, + }); + expect(auditEntries.map((e) => e.action)).toEqual([ + "role.unassign", + "role.unassign", + "role.delete", + ]); + }); +}); +``` + +- [ ] **Step 2: Run the test and watch it fail** + +```bash +docker compose run --rm dashboard pnpm test:integration apps/dashboard/tests/server/shared/dashboard-role-delete.test.ts +``` + +Expect: cannot find module `dashboardRoleDelete.js`. + +- [ ] **Step 3: Implement the helper** + +Create `apps/dashboard/src/server/shared/dashboardRoleDelete.ts`: + +```typescript +import { getPrisma } from "@fluxcore/database"; +import { logger } from "@fluxcore/utils"; +import { + createDashboardAuditLog, + invalidatePermissionCache, +} from "./permissions.js"; + +export interface DeleteDashboardRoleArgs { + guildId: string; + roleId: string; + actorId: string; + actorUsername: string; +} + +/** + * Safely delete a DashboardRole: enumerate assignments, write an audit + * entry per unassignment, invalidate per-user permission caches, and + * only then delete the role itself. The schema's onDelete: Restrict + * guarantees that this is the ONLY safe path to remove a role. + */ +export async function deleteDashboardRoleWithAudit( + args: DeleteDashboardRoleArgs, +): Promise { + const prisma = getPrisma(); + + await prisma.$transaction(async (tx) => { + const assignments = await tx.dashboardRoleAssignment.findMany({ + where: { guildId: args.guildId, roleId: args.roleId }, + }); + + for (const a of assignments) { + await tx.dashboardRoleAssignment.delete({ where: { id: a.id } }); + await tx.dashboardAuditLog.create({ + data: { + guildId: args.guildId, + userId: args.actorId, + username: args.actorUsername, + action: "role.unassign", + targetType: "user", + targetId: a.userId, + details: JSON.stringify({ roleId: args.roleId, reason: "role-delete" }), + }, + }); + } + + await tx.dashboardRole.delete({ where: { id: args.roleId } }); + await tx.dashboardAuditLog.create({ + data: { + guildId: args.guildId, + userId: args.actorId, + username: args.actorUsername, + action: "role.delete", + targetType: "role", + targetId: args.roleId, + details: JSON.stringify({ unassignedUsers: assignments.map((a) => a.userId) }), + }, + }); + }); + + // Invalidate cached permissions for every previously-assigned user + invalidatePermissionCache(args.guildId); + logger.info( + `Dashboard role ${args.roleId} deleted in guild ${args.guildId} by ${args.actorUsername}`, + ); +} +``` + +- [ ] **Step 4: Run the test and watch it pass** + +```bash +docker compose run --rm dashboard pnpm test:integration apps/dashboard/tests/server/shared/dashboard-role-delete.test.ts +docker compose run --rm dashboard pnpm typecheck +``` + +- [ ] **Step 5: Commit** + +```bash +git add apps/dashboard/src/server/shared/dashboardRoleDelete.ts apps/dashboard/tests/server/shared/dashboard-role-delete.test.ts +git commit -m "feat(permissions): audit-logged unassign-then-delete helper for DashboardRole" +``` diff --git a/docs/superpowers/plans/2026-04-07-sec-data-05-logger-redaction.md b/docs/superpowers/plans/2026-04-07-sec-data-05-logger-redaction.md new file mode 100644 index 0000000..80ac58e --- /dev/null +++ b/docs/superpowers/plans/2026-04-07-sec-data-05-logger-redaction.md @@ -0,0 +1,232 @@ +# Logger Sensitive Value Redaction — Fix Plan + +> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking. + +**Severity:** MEDIUM +**Goal:** Prevent the shared `@fluxcore/utils` logger from emitting bearer tokens, Discord bot tokens, webhook URLs, and OAuth code/secret query parameters to stdout/log files. +**Architecture:** `packages/utils/src/logger.ts` exposes `logger.{debug,info,warn,error}` which currently format the message via `console.log` with no transformation. We will add a pure `redactSensitive(value: string): string` helper, run every formatted log line through it before printing, and unit-test the regexes against representative inputs (Bearer tokens, bot tokens, Discord webhook URLs, query strings with `?token=` / `?code=` / `?client_secret=`). The error stack is also redacted. +**Tech Stack:** Prisma 7, PostgreSQL 18, TypeScript, Vitest + +--- + +## Vulnerability +`packages/utils/src/logger.ts:46-49` directly `console.log`s the caller-supplied message and any error stack. Several call sites today (e.g. `apps/dashboard/src/server/shared/auth.ts`, action executors making outbound HTTPS, music player URL handling) can inadvertently include OAuth bearer tokens, Discord bot tokens (`MTAxxxxx.G...`), webhook URLs (`https://discord.com/api/webhooks//`), or full request URLs containing `?code=`, `?token=`, `?client_secret=`, `?api_key=`. Anyone with stdout access (Docker logs, log aggregator, screen-share during incident response) sees the secret. Logs are also commonly archived for longer than the secrets are valid. + +## Files +- Read: `packages/utils/src/logger.ts` +- Modify: `packages/utils/src/logger.ts` +- Create: `packages/utils/tests/logger.test.ts` + +## Tasks + +### Task 1: Add `redactSensitive` and route all log writes through it + +- [ ] **Step 1: Write the failing test** + +Create `packages/utils/tests/logger.test.ts`: + +```typescript +import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; + +vi.mock("@fluxcore/config", () => ({ + config: { logLevel: "debug" }, +})); + +import { logger, redactSensitive } from "../src/logger.js"; + +describe("redactSensitive", () => { + it("redacts Bearer tokens", () => { + expect(redactSensitive("Authorization: Bearer abc.def.ghi")).toBe( + "Authorization: Bearer [REDACTED]", + ); + }); + + it("redacts Discord bot tokens (MFA-format)", () => { + const tok = "MTAxNzg5NjU0MzIxMDk4NzY1NA.GabCde.fghIjkLmnOpqrStuvwxyz0123456789ABCDEF"; + expect(redactSensitive(`Logging in with ${tok}`)).toContain("[REDACTED]"); + expect(redactSensitive(`Logging in with ${tok}`)).not.toContain(tok); + }); + + it("redacts Discord webhook URLs", () => { + const url = "https://discord.com/api/webhooks/1234567890/abcdefgHIJKLMNOPqrstuvwxyz"; + expect(redactSensitive(`POST to ${url}`)).toBe( + "POST to https://discord.com/api/webhooks/[REDACTED]", + ); + }); + + it("redacts query parameters that look secret", () => { + expect( + redactSensitive("https://api.example.com/x?code=abc123&keep=ok"), + ).toBe("https://api.example.com/x?code=[REDACTED]&keep=ok"); + expect( + redactSensitive("https://api.example.com/x?client_secret=xyz"), + ).toBe("https://api.example.com/x?client_secret=[REDACTED]"); + expect( + redactSensitive("https://api.example.com/x?token=abc&api_key=def"), + ).toBe("https://api.example.com/x?token=[REDACTED]&api_key=[REDACTED]"); + }); + + it("leaves benign strings alone", () => { + expect(redactSensitive("hello world")).toBe("hello world"); + }); +}); + +describe("logger redaction", () => { + let logSpy: ReturnType; + + beforeEach(() => { + logSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + }); + + afterEach(() => { + logSpy.mockRestore(); + }); + + it("redacts secrets in info messages", () => { + logger.info("token=Bearer secret123"); + const printed = logSpy.mock.calls.map((c) => c.join(" ")).join("\n"); + expect(printed).not.toContain("secret123"); + expect(printed).toContain("[REDACTED]"); + }); + + it("redacts secrets in error stacks", () => { + const err = new Error("failed"); + err.stack = "Error: failed\n at fn (https://x?token=leak)"; + logger.error("oops", err); + const printed = logSpy.mock.calls.map((c) => c.join(" ")).join("\n"); + expect(printed).not.toContain("leak"); + expect(printed).toContain("[REDACTED]"); + }); +}); +``` + +- [ ] **Step 2: Run the test and watch it fail** + +```bash +docker compose run --rm bot pnpm --filter @fluxcore/utils test packages/utils/tests/logger.test.ts +``` + +Expect: `redactSensitive is not exported`. + +- [ ] **Step 3: Implement redaction** + +Replace `packages/utils/src/logger.ts` content (preserving the existing public API): + +```typescript +import { config } from "@fluxcore/config"; + +enum LogLevel { + DEBUG = 0, + INFO = 1, + WARN = 2, + ERROR = 3, +} + +const LEVEL_MAP: Record = { + debug: LogLevel.DEBUG, + info: LogLevel.INFO, + warn: LogLevel.WARN, + error: LogLevel.ERROR, +}; + +const COLORS = { + reset: "\x1b[0m", + gray: "\x1b[90m", + cyan: "\x1b[36m", + yellow: "\x1b[33m", + red: "\x1b[31m", +}; + +const REDACTED = "[REDACTED]"; + +// Order matters: webhook URLs before generic query-param redaction. +const REDACTION_PATTERNS: Array<{ pattern: RegExp; replacement: string }> = [ + // Bearer / token / Basic Authorization headers + { pattern: /(Bearer\s+)[A-Za-z0-9._\-+/=]+/gi, replacement: `$1${REDACTED}` }, + { pattern: /(Basic\s+)[A-Za-z0-9+/=]+/gi, replacement: `$1${REDACTED}` }, + // Discord webhook URLs: keep the path prefix but drop id+token + { + pattern: /https:\/\/(?:[a-z]+\.)?discord(?:app)?\.com\/api(?:\/v\d+)?\/webhooks\/[^\s"'<>]+/gi, + replacement: `https://discord.com/api/webhooks/${REDACTED}`, + }, + // Discord bot tokens: 3 base64url segments separated by `.` + { + pattern: /[MN][A-Za-z\d]{23,28}\.[\w-]{6,7}\.[\w-]{27,}/g, + replacement: REDACTED, + }, + // Sensitive query parameters + { + pattern: /([?&](?:code|token|access_token|refresh_token|client_secret|api[_-]?key|secret|password)=)[^&\s"'<>]+/gi, + replacement: `$1${REDACTED}`, + }, +]; + +export function redactSensitive(value: string): string { + if (!value) return value; + let out = value; + for (const { pattern, replacement } of REDACTION_PATTERNS) { + out = out.replace(pattern, replacement); + } + return out; +} + +class Logger { + private level: LogLevel; + + constructor() { + this.level = LEVEL_MAP[config.logLevel] ?? LogLevel.INFO; + } + + private timestamp(): string { + return new Date().toISOString(); + } + + private log( + level: LogLevel, + label: string, + color: string, + message: string, + error?: Error, + ): void { + if (level < this.level) return; + const ts = `${COLORS.gray}${this.timestamp()}${COLORS.reset}`; + const tag = `${color}[${label}]${COLORS.reset}`; + console.log(`${ts} ${tag} ${redactSensitive(message)}`); + if (error?.stack) { + console.log(`${COLORS.gray}${redactSensitive(error.stack)}${COLORS.reset}`); + } + } + + debug(message: string): void { + this.log(LogLevel.DEBUG, "DEBUG", COLORS.gray, message); + } + + info(message: string): void { + this.log(LogLevel.INFO, "INFO", COLORS.cyan, message); + } + + warn(message: string): void { + this.log(LogLevel.WARN, "WARN", COLORS.yellow, message); + } + + error(message: string, error?: Error): void { + this.log(LogLevel.ERROR, "ERROR", COLORS.red, message, error); + } +} + +export const logger = new Logger(); +``` + +- [ ] **Step 4: Run the test and watch it pass** + +```bash +docker compose run --rm bot pnpm --filter @fluxcore/utils test packages/utils/tests/logger.test.ts +docker compose run --rm bot pnpm typecheck +``` + +- [ ] **Step 5: Commit** + +```bash +git add packages/utils/src/logger.ts packages/utils/tests/logger.test.ts +git commit -m "feat(logger): redact bearer tokens, webhooks, and secret query params" +``` diff --git a/docs/superpowers/plans/2026-04-07-sec-data-06-audit-details-json-column.md b/docs/superpowers/plans/2026-04-07-sec-data-06-audit-details-json-column.md new file mode 100644 index 0000000..594761e --- /dev/null +++ b/docs/superpowers/plans/2026-04-07-sec-data-06-audit-details-json-column.md @@ -0,0 +1,147 @@ +# Dashboard Audit Details JSON Column — Fix Plan + +> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking. + +**Severity:** MEDIUM +**Goal:** Replace the `DashboardAuditLog.details String @default("{}")` field with a real `Json` column so audit details are stored typed, queryable, and free of double-encoding bugs. +**Architecture:** Today `apps/dashboard/src/server/shared/permissions.ts:185` does `details: JSON.stringify(entry.details ?? {})`, which (a) stores escaped JSON inside a `String`, (b) prevents Postgres-side JSONB queries (e.g. searching for an audit entry by `targetType.before`), and (c) silently masks malformed payloads. Migrating to Prisma's `Json` type maps to PostgreSQL `jsonb`, allowing GIN indexes, structured filters, and direct round-tripping with no manual stringify. +**Tech Stack:** Prisma 7, PostgreSQL 18, TypeScript, Vitest + +--- + +## Vulnerability +`packages/database/prisma/schema.prisma:597` declares `details String @default("{}")` and `apps/dashboard/src/server/shared/permissions.ts:185` writes `JSON.stringify(entry.details ?? {})`. Any read site that forgets to `JSON.parse` returns the raw escaped string. Beyond the data-quality issue, this prevents querying audit logs by their structured fields (we cannot ask "find all role.update entries where details.before.permissions contained X"), undermining the audit log's purpose. + +## Files +- Read: `packages/database/prisma/schema.prisma` +- Read: `apps/dashboard/src/server/shared/permissions.ts` +- Modify: `packages/database/prisma/schema.prisma` +- Modify: `apps/dashboard/src/server/shared/permissions.ts` +- Create: `packages/database/prisma/migrations/20260407140000_dashboard_audit_details_jsonb/migration.sql` +- Create: `apps/dashboard/tests/server/shared/audit-log-json.test.ts` + +## Tasks + +### Task 1: Migrate column + write helper + +- [ ] **Step 1: Write the failing test** + +Create `apps/dashboard/tests/server/shared/audit-log-json.test.ts`: + +```typescript +import { describe, it, expect, beforeEach } from "vitest"; +import { getPrisma } from "@fluxcore/database"; +import { createDashboardAuditLog } from "../../../src/server/shared/permissions.js"; + +describe("createDashboardAuditLog (JSONB)", () => { + beforeEach(async () => { + await getPrisma().dashboardAuditLog.deleteMany({}); + }); + + it("stores details as a structured object, not a string", async () => { + await createDashboardAuditLog({ + guildId: "g1", + userId: "u1", + username: "user#0", + action: "role.update", + targetType: "role", + targetId: "role-1", + details: { before: { permissions: ["a"] }, after: { permissions: ["a", "b"] } }, + }); + + const row = await getPrisma().dashboardAuditLog.findFirst({ + where: { guildId: "g1" }, + }); + expect(row).not.toBeNull(); + // After migration, details is an object — NOT a JSON string + expect(typeof row!.details).toBe("object"); + expect((row!.details as { before: { permissions: string[] } }).before.permissions).toEqual(["a"]); + }); + + it("supports JSONB filtering", async () => { + await createDashboardAuditLog({ + guildId: "g2", + userId: "u", + username: "u", + action: "role.update", + details: { reason: "promotion" }, + }); + + const rows = await getPrisma().$queryRaw>` + SELECT id FROM "DashboardAuditLog" + WHERE "guildId" = 'g2' AND details->>'reason' = 'promotion' + `; + expect(rows.length).toBe(1); + }); +}); +``` + +- [ ] **Step 2: Run the test and watch it fail** + +```bash +docker compose run --rm dashboard pnpm test:integration apps/dashboard/tests/server/shared/audit-log-json.test.ts +``` + +The first assertion fails because `details` is currently `string`. The JSONB query also fails because `details` is `text`. + +- [ ] **Step 3: Update schema, helper, and migrate** + +Edit `packages/database/prisma/schema.prisma` line 597: + +```prisma + details Json @default("{}") +``` + +Edit `apps/dashboard/src/server/shared/permissions.ts:185`. Replace the `details: JSON.stringify(entry.details ?? {})` line with: + +```typescript + details: (entry.details ?? {}) as object, +``` + +Generate a migration. Because Prisma's auto-generated migration would `DROP` and re-add the column (losing data), write the SQL by hand. Run: + +```bash +docker compose run --rm dashboard pnpm db:migrate --create-only --name dashboard_audit_details_jsonb +``` + +Then edit the generated `migration.sql` to: + +```sql +-- Convert DashboardAuditLog.details from text to jsonb, parsing existing rows. +ALTER TABLE "DashboardAuditLog" + ALTER COLUMN "details" DROP DEFAULT, + ALTER COLUMN "details" TYPE jsonb USING ( + CASE + WHEN "details" IS NULL OR "details" = '' THEN '{}'::jsonb + ELSE "details"::jsonb + END + ), + ALTER COLUMN "details" SET DEFAULT '{}'::jsonb; +``` + +Then apply: + +```bash +docker compose run --rm dashboard pnpm db:migrate +docker compose run --rm dashboard pnpm db:generate +``` + +- [ ] **Step 4: Run the test and watch it pass** + +```bash +docker compose run --rm dashboard pnpm test:integration apps/dashboard/tests/server/shared/audit-log-json.test.ts +docker compose run --rm dashboard pnpm typecheck +``` + +If `pnpm typecheck` flags any read site that previously did `JSON.parse(row.details)`, fix it to use the value directly. Search: + +```bash +docker compose run --rm dashboard sh -lc 'grep -rn "dashboardAuditLog" apps/dashboard/src packages | grep -i details' +``` + +- [ ] **Step 5: Commit** + +```bash +git add packages/database/prisma/schema.prisma apps/dashboard/src/server/shared/permissions.ts packages/database/prisma/migrations/ apps/dashboard/tests/server/shared/audit-log-json.test.ts +git commit -m "fix(audit): store DashboardAuditLog.details as jsonb instead of stringified JSON" +``` diff --git a/docs/superpowers/plans/2026-04-07-sec-data-07-safejsonparse-warn.md b/docs/superpowers/plans/2026-04-07-sec-data-07-safejsonparse-warn.md new file mode 100644 index 0000000..0ec25d2 --- /dev/null +++ b/docs/superpowers/plans/2026-04-07-sec-data-07-safejsonparse-warn.md @@ -0,0 +1,181 @@ +# safeJsonParse Silent Fallback Warning — Fix Plan + +> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking. + +**Severity:** LOW +**Goal:** Make `safeJsonParse` in `packages/systems/src/actions/persistence.ts` log a warning (with identifying context) whenever it falls back, so corrupt rule rows surface in the logs instead of silently degrading to empty defaults. +**Architecture:** `persistence.ts:13-18` defines a generic `safeJsonParse(json, fallback)` used by `parseActionsColumn` and `rowToRule`. When a row's `actions`/`conditions` column contains invalid JSON, callers silently get `[]` / `{}` and the rule no-ops. We will (a) extend the helper to accept an optional `context` label, (b) call `logger.warn` on parse failure with the context, and (c) thread sensible context strings through every call site so logs are actionable. +**Tech Stack:** Prisma 7, PostgreSQL 18, TypeScript, Vitest + +--- + +## Vulnerability +`packages/systems/src/actions/persistence.ts:13-18` swallows `JSON.parse` errors and returns the fallback. If a malformed row sneaks into the DB (manual edit, failed migration, partial write from a crashed worker), the affected rule will silently behave as if it had no actions/conditions. There is no telemetry to detect this — the rule appears in the dashboard, looks enabled, and produces zero events. Adding a `logger.warn(...)` with the row id and column name turns a silent class of bug into a loud, actionable one. + +## Files +- Read: `packages/systems/src/actions/persistence.ts` +- Modify: `packages/systems/src/actions/persistence.ts` +- Create: `packages/systems/tests/unit/persistence-safejsonparse.test.ts` + +## Tasks + +### Task 1: Add context-aware fallback logging + +- [ ] **Step 1: Write the failing test** + +Create `packages/systems/tests/unit/persistence-safejsonparse.test.ts`: + +```typescript +import { describe, it, expect, vi, beforeEach } from "vitest"; + +const warnMock = vi.fn(); + +vi.mock("@fluxcore/utils", () => ({ + logger: { info: vi.fn(), warn: warnMock, error: vi.fn(), debug: vi.fn() }, +})); + +vi.mock("@fluxcore/database", () => ({ + getPrisma: () => ({}), +})); + +import { rowToRule } from "../../src/actions/persistence.js"; + +describe("rowToRule fallback logging", () => { + beforeEach(() => warnMock.mockReset()); + + it("logs a warning when actions JSON is malformed", () => { + rowToRule({ + id: 42, + guildId: "g1", + name: "broken", + enabled: true, + eventType: "memberJoin", + actions: "{not-json", + conditions: "{}", + priority: 0, + createdBy: "u1", + }); + expect(warnMock).toHaveBeenCalledTimes(1); + const msg = warnMock.mock.calls[0][0] as string; + expect(msg).toContain("ActionRule"); + expect(msg).toContain("id=42"); + expect(msg).toContain("actions"); + }); + + it("logs a warning when conditions JSON is malformed", () => { + rowToRule({ + id: 7, + guildId: "g1", + name: "broken", + enabled: true, + eventType: "memberJoin", + actions: "[]", + conditions: "{bad", + priority: 0, + createdBy: "u1", + }); + expect(warnMock).toHaveBeenCalledTimes(1); + expect(warnMock.mock.calls[0][0]).toContain("conditions"); + }); + + it("does NOT warn on valid JSON", () => { + rowToRule({ + id: 1, + guildId: "g1", + name: "ok", + enabled: true, + eventType: "memberJoin", + actions: "[]", + conditions: "{}", + priority: 0, + createdBy: "u1", + }); + expect(warnMock).not.toHaveBeenCalled(); + }); +}); +``` + +- [ ] **Step 2: Run the test and watch it fail** + +```bash +docker compose run --rm bot pnpm --filter @fluxcore/systems test packages/systems/tests/unit/persistence-safejsonparse.test.ts +``` + +Expect: `expected "spy" to be called`. The current code does not call `logger.warn`. + +- [ ] **Step 3: Implement the warning + thread context** + +Edit `packages/systems/src/actions/persistence.ts`. Replace `safeJsonParse`: + +```typescript +function safeJsonParse( + json: string, + fallback: T, + context?: string, +): T { + try { + return JSON.parse(json) as T; + } catch (err) { + if (context) { + logger.warn( + `safeJsonParse fallback for ${context}: ${err instanceof Error ? err.message : String(err)}`, + ); + } + return fallback; + } +} +``` + +Then update `parseActionsColumn` and `rowToRule` to thread context: + +```typescript +function parseActionsColumn( + raw: string, + context?: string, +): { + actions: ActionConfig[]; + steps?: RuleStep[]; + entryStepId?: string; +} { + const parsed = safeJsonParse(raw, [], context); + // ... unchanged ... +} + +export function rowToRule(row: { /* ...unchanged signature... */ }): ActionRule { + const ctxBase = `ActionRule id=${row.id} guildId=${row.guildId}`; + const { actions, steps, entryStepId } = parseActionsColumn( + row.actions, + `${ctxBase} column=actions`, + ); + return { + id: row.id, + guildId: row.guildId, + name: row.name, + enabled: row.enabled, + eventType: row.eventType as ActionEventType, + actions, + ...(steps ? { steps, entryStepId } : {}), + conditions: safeJsonParse( + row.conditions, + {}, + `${ctxBase} column=conditions`, + ), + priority: row.priority, + createdBy: row.createdBy, + }; +} +``` + +- [ ] **Step 4: Run the test and watch it pass** + +```bash +docker compose run --rm bot pnpm --filter @fluxcore/systems test packages/systems/tests/unit/persistence-safejsonparse.test.ts +docker compose run --rm bot pnpm typecheck +``` + +- [ ] **Step 5: Commit** + +```bash +git add packages/systems/src/actions/persistence.ts packages/systems/tests/unit/persistence-safejsonparse.test.ts +git commit -m "fix(actions): warn on safeJsonParse fallback with row context" +``` diff --git a/docs/superpowers/plans/2026-04-07-sec-data-08-action-rate-limit-per-event-type.md b/docs/superpowers/plans/2026-04-07-sec-data-08-action-rate-limit-per-event-type.md new file mode 100644 index 0000000..e7ea98f --- /dev/null +++ b/docs/superpowers/plans/2026-04-07-sec-data-08-action-rate-limit-per-event-type.md @@ -0,0 +1,168 @@ +# Action Executor Per-Event-Type Rate Limit — Fix Plan + +> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking. + +**Severity:** LOW +**Goal:** Replace the single per-guild rate-limit bucket in the action executor with a per-(guild, eventType) bucket so a noisy event class (e.g. `messageCreate`) cannot starve quieter, higher-value events (e.g. `memberJoin`). +**Architecture:** `apps/bot/src/features/automation/system/executor.ts:15-37` keeps `guildExecutionCounts: Map` capped at 60 executions/min/guild. Today a single high-volume event type can fill the bucket and cause `processEvent` to drop unrelated events for the rest of the window. We will key the map on `${guildId}:${eventType}` and surface the active eventType into `checkRateLimit`. The 60/min cap stays per bucket. +**Tech Stack:** Prisma 7, PostgreSQL 18, TypeScript, Vitest + +--- + +## Vulnerability +`apps/bot/src/features/automation/system/executor.ts:15-37` shares one 60-execution/minute bucket across all event types in a guild. If `messageCreate` rules fire 60 times in 30 seconds, every `memberJoin` / `voiceStateUpdate` / `roleCreate` event for the next 30 seconds is silently dropped, regardless of how critical it is. This is both a fairness issue (the warn-on-spam rule starves the welcome rule) and a soft-DoS vector (a single chatty channel can effectively disable moderation for the rest of the window). + +## Files +- Read: `apps/bot/src/features/automation/system/executor.ts` +- Modify: `apps/bot/src/features/automation/system/executor.ts` +- Create: `apps/bot/tests/features/automation/system/executor-rate-limit.test.ts` + +## Tasks + +### Task 1: Per-(guild, eventType) bucket + +- [ ] **Step 1: Write the failing test** + +Create `apps/bot/tests/features/automation/system/executor-rate-limit.test.ts`: + +```typescript +import { describe, it, expect, vi, beforeEach } from "vitest"; + +vi.mock("@fluxcore/utils", () => ({ + logger: { info: vi.fn(), warn: vi.fn(), error: vi.fn(), debug: vi.fn() }, +})); + +const getRulesForEventMock = vi.fn(); +const getGuildSettingsOrDefaultMock = vi.fn(() => ({ globalEnabled: true })); +const logExecutionMock = vi.fn().mockResolvedValue(undefined); +const executorMock = vi.fn().mockResolvedValue(undefined); + +vi.mock("@fluxcore/systems/actions/cache", () => ({ + getRulesForEvent: getRulesForEventMock, +})); + +vi.mock("@fluxcore/systems/actions/config", () => ({ + getGuildSettingsOrDefault: getGuildSettingsOrDefaultMock, +})); + +vi.mock("@fluxcore/systems/actions/persistence", () => ({ + logExecution: logExecutionMock, +})); + +vi.mock("../../../../src/features/automation/system/registry.js", () => ({ + getExecutor: () => executorMock, +})); + +import { processEvent } from "../../../../src/features/automation/system/executor.js"; +import type { Client } from "discord.js"; + +const fakeClient = {} as Client; + +function makeRule(eventType: string) { + return { + id: 1, + guildId: "g1", + name: `r-${eventType}`, + enabled: true, + eventType, + actions: [{ type: "addRole", roleId: "r" }], + conditions: {}, + priority: 0, + createdBy: "u", + }; +} + +describe("processEvent rate limiting", () => { + beforeEach(() => { + executorMock.mockClear(); + getRulesForEventMock.mockReset(); + }); + + it("does not let one event type starve another", async () => { + getRulesForEventMock.mockImplementation((_g: string, ev: string) => [makeRule(ev)]); + + // Burn through the messageCreate budget + for (let i = 0; i < 80; i++) { + await processEvent(fakeClient, { + guildId: "g1", + eventType: "messageCreate", + channelId: "c1", + } as never); + } + + const messageCreateExecCount = executorMock.mock.calls.length; + expect(messageCreateExecCount).toBeLessThanOrEqual(60); + + // Now memberJoin must still be allowed + executorMock.mockClear(); + await processEvent(fakeClient, { + guildId: "g1", + eventType: "memberJoin", + } as never); + + expect(executorMock).toHaveBeenCalledTimes(1); + }); +}); +``` + +- [ ] **Step 2: Run the test and watch it fail** + +```bash +docker compose run --rm bot pnpm --filter @fluxcore/bot test apps/bot/tests/features/automation/system/executor-rate-limit.test.ts +``` + +The final assertion fails because the shared bucket is exhausted by `messageCreate`. + +- [ ] **Step 3: Implement per-event-type buckets** + +Edit `apps/bot/src/features/automation/system/executor.ts`. Replace the rate-limit block: + +```typescript +// --- Per-(guild, eventType) rate limiting --- +// +// Each event type gets its own 60/min bucket so a noisy event class +// (e.g. messageCreate) cannot starve quieter ones (e.g. memberJoin). + +const RATE_LIMIT_WINDOW_MS = 60_000; +const RATE_LIMIT_MAX_EXECUTIONS = 60; + +const eventBuckets = new Map(); + +function bucketKey(guildId: string, eventType: string): string { + return `${guildId}\u0000${eventType}`; +} + +function checkRateLimit(guildId: string, eventType: string): boolean { + const key = bucketKey(guildId, eventType); + const now = Date.now(); + let entry = eventBuckets.get(key); + + if (!entry || now >= entry.resetAt) { + entry = { count: 0, resetAt: now + RATE_LIMIT_WINDOW_MS }; + eventBuckets.set(key, entry); + } + + if (entry.count >= RATE_LIMIT_MAX_EXECUTIONS) { + return false; + } + + entry.count++; + return true; +} +``` + +Then update both call sites in `executeSteps` and `processEvent` from `checkRateLimit(context.guildId)` to `checkRateLimit(context.guildId, context.eventType)`. + +- [ ] **Step 4: Run the test and watch it pass** + +```bash +docker compose run --rm bot pnpm --filter @fluxcore/bot test apps/bot/tests/features/automation/system/executor-rate-limit.test.ts +docker compose run --rm bot pnpm typecheck +``` + +- [ ] **Step 5: Commit** + +```bash +git add apps/bot/src/features/automation/system/executor.ts apps/bot/tests/features/automation/system/executor-rate-limit.test.ts +git commit -m "fix(automation): per-event-type rate limit buckets to prevent starvation" +``` diff --git a/packages/database/prisma/migrations/20260407120000_encrypt_dashboard_session_tokens/migration.sql b/packages/database/prisma/migrations/20260407120000_encrypt_dashboard_session_tokens/migration.sql new file mode 100644 index 0000000..a3b653e --- /dev/null +++ b/packages/database/prisma/migrations/20260407120000_encrypt_dashboard_session_tokens/migration.sql @@ -0,0 +1,6 @@ +-- Annotation-only migration: DashboardSession.accessToken is now contractually +-- required to be AES-256-GCM ciphertext (see apps/dashboard/src/server/shared/crypto.ts). +-- The schema /// @encrypted doc-comment locks the contract; existing plaintext rows +-- (if any) must be backfilled via scripts/migrate-encrypt-session-tokens.ts. +-- No DDL is required because the column type is unchanged. +SELECT 1; diff --git a/packages/database/prisma/migrations/20260407130000_restrict_dashboard_role_delete/migration.sql b/packages/database/prisma/migrations/20260407130000_restrict_dashboard_role_delete/migration.sql new file mode 100644 index 0000000..ad6e49b --- /dev/null +++ b/packages/database/prisma/migrations/20260407130000_restrict_dashboard_role_delete/migration.sql @@ -0,0 +1,13 @@ +-- Switch DashboardRoleAssignment.role FK from ON DELETE CASCADE to ON DELETE RESTRICT. +-- This prevents silent privilege loss: deleting a DashboardRole that still has +-- active assignments now fails, forcing callers to go through the audited +-- deleteDashboardRoleWithAudit() helper which enumerates and audit-logs each +-- unassignment before removing the role. + +ALTER TABLE "DashboardRoleAssignment" + DROP CONSTRAINT "DashboardRoleAssignment_roleId_fkey"; + +ALTER TABLE "DashboardRoleAssignment" + ADD CONSTRAINT "DashboardRoleAssignment_roleId_fkey" + FOREIGN KEY ("roleId") REFERENCES "DashboardRole"("id") + ON DELETE RESTRICT ON UPDATE CASCADE; diff --git a/packages/database/prisma/migrations/20260407140000_dashboard_audit_details_jsonb/migration.sql b/packages/database/prisma/migrations/20260407140000_dashboard_audit_details_jsonb/migration.sql new file mode 100644 index 0000000..89e0aa9 --- /dev/null +++ b/packages/database/prisma/migrations/20260407140000_dashboard_audit_details_jsonb/migration.sql @@ -0,0 +1,13 @@ +-- Convert DashboardAuditLog.details from text to jsonb, parsing existing rows. +-- This enables structured queries (e.g. details->>'reason') and removes the +-- double-encoding bug where readers had to JSON.parse the column. + +ALTER TABLE "DashboardAuditLog" + ALTER COLUMN "details" DROP DEFAULT, + ALTER COLUMN "details" TYPE jsonb USING ( + CASE + WHEN "details" IS NULL OR "details" = '' THEN '{}'::jsonb + ELSE "details"::jsonb + END + ), + ALTER COLUMN "details" SET DEFAULT '{}'::jsonb; diff --git a/packages/database/prisma/schema.prisma b/packages/database/prisma/schema.prisma index f370d8c..2c52f91 100644 --- a/packages/database/prisma/schema.prisma +++ b/packages/database/prisma/schema.prisma @@ -106,6 +106,8 @@ model DashboardSession { userId String username String avatar String? + /// Encrypted with AES-256-GCM via apps/dashboard/src/server/shared/crypto.ts + /// MUST be written through encryptAccessToken() in session.ts accessToken String guilds String @default("[]") guildsRefreshedAt DateTime @default(now()) @@ -566,7 +568,7 @@ model DashboardRoleAssignment { guildId String userId String roleId String - role DashboardRole @relation(fields: [roleId], references: [id], onDelete: Cascade) + role DashboardRole @relation(fields: [roleId], references: [id], onDelete: Restrict) assignedBy String createdAt DateTime @default(now()) @@ -594,7 +596,7 @@ model DashboardAuditLog { action String // Permission key or action descriptor targetType String? // "case", "rule", "role", "user", "settings" targetId String? - details String @default("{}") // JSON — before/after, metadata + details Json @default("{}") // JSONB — before/after, metadata createdAt DateTime @default(now()) @@index([guildId, createdAt]) diff --git a/packages/systems/src/actions/cache.ts b/packages/systems/src/actions/cache.ts index dc01731..1467c93 100644 --- a/packages/systems/src/actions/cache.ts +++ b/packages/systems/src/actions/cache.ts @@ -63,11 +63,30 @@ export function invalidateGuild(guildId: string): void { } export async function reloadGuild(guildId: string): Promise { - // Load new rules first, then swap to minimize the window where cache is empty const rules = await getRulesByGuild(guildId); - invalidateGuild(guildId); + + // Build the new per-event map locally first; only after it is fully + // populated do we atomically swap it into the global cache. This + // guarantees that any concurrent getRulesForEvent() call observes + // EITHER the complete previous rule set OR the complete new one — never + // an empty intermediate state. + const newGuildMap = new Map(); for (const rule of rules) { - addToInternalCache(rule); + let bucket = newGuildMap.get(rule.eventType); + if (!bucket) { + bucket = []; + newGuildMap.set(rule.eventType, bucket); + } + bucket.push(rule); + } + for (const bucket of newGuildMap.values()) { + bucket.sort((a, b) => b.priority - a.priority); + } + + if (newGuildMap.size === 0) { + ruleCache.delete(guildId); + } else { + ruleCache.set(guildId, newGuildMap); } } diff --git a/packages/systems/src/actions/persistence.ts b/packages/systems/src/actions/persistence.ts index 9bfdb61..c487917 100644 --- a/packages/systems/src/actions/persistence.ts +++ b/packages/systems/src/actions/persistence.ts @@ -10,20 +10,34 @@ import type { StepsPayload, } from "./types.js"; -function safeJsonParse(json: string, fallback: T): T { +function safeJsonParse(json: string, fallback: T, context?: string): T { try { return JSON.parse(json) as T; - } catch { + } catch (err) { + if (context) { + logger.warn( + `safeJsonParse fallback for ${context}: ${ + err instanceof Error ? err.message : String(err) + }`, + ); + } return fallback; } } -function parseActionsColumn(raw: string): { +function parseActionsColumn( + raw: string, + context?: string, +): { actions: ActionConfig[]; steps?: RuleStep[]; entryStepId?: string; } { - const parsed = safeJsonParse(raw, []); + const parsed = safeJsonParse( + raw, + [], + context, + ); // V2 format: object with _v: 2 if (parsed && !Array.isArray(parsed) && (parsed as StepsPayload)._v === 2) { const payload = parsed as StepsPayload; @@ -68,7 +82,11 @@ export function rowToRule(row: { priority: number; createdBy: string; }): ActionRule { - const { actions, steps, entryStepId } = parseActionsColumn(row.actions); + const ctxBase = `ActionRule id=${row.id} guildId=${row.guildId}`; + const { actions, steps, entryStepId } = parseActionsColumn( + row.actions, + `${ctxBase} column=actions`, + ); return { id: row.id, guildId: row.guildId, @@ -77,7 +95,11 @@ export function rowToRule(row: { eventType: row.eventType as ActionEventType, actions, ...(steps ? { steps, entryStepId } : {}), - conditions: safeJsonParse(row.conditions, {}), + conditions: safeJsonParse( + row.conditions, + {}, + `${ctxBase} column=conditions`, + ), priority: row.priority, createdBy: row.createdBy, }; diff --git a/packages/systems/tests/integration/audit-log-json.test.ts b/packages/systems/tests/integration/audit-log-json.test.ts new file mode 100644 index 0000000..b44775b --- /dev/null +++ b/packages/systems/tests/integration/audit-log-json.test.ts @@ -0,0 +1,70 @@ +/** + * Integration tests: DashboardAuditLog.details JSONB column. + * + * Verifies the migrated jsonb column type and that + * createDashboardAuditLog stores structured objects (not stringified JSON). + */ + +import { describe, it, expect, beforeAll, beforeEach, afterAll } from "vitest"; +import { getPrisma } from "@fluxcore/database"; +import { setupTestDatabase, teardownTestDatabase } from "../helpers/db.js"; +import { createDashboardAuditLog } from "../../../../apps/dashboard/src/server/shared/permissions.js"; + +async function cleanAuditTables(): Promise { + const prisma = getPrisma(); + await prisma.$executeRawUnsafe(`TRUNCATE TABLE "DashboardAuditLog" CASCADE`); +} + +describe("createDashboardAuditLog (JSONB)", () => { + beforeAll(async () => { + await setupTestDatabase(); + }); + beforeEach(async () => { + await cleanAuditTables(); + }); + afterAll(async () => { + await teardownTestDatabase(); + }); + + it("stores details as a structured object, not a string", async () => { + await createDashboardAuditLog({ + guildId: "g1", + userId: "u1", + username: "user#0", + action: "role.update", + targetType: "role", + targetId: "role-1", + details: { + before: { permissions: ["a"] }, + after: { permissions: ["a", "b"] }, + }, + }); + + const row = await getPrisma().dashboardAuditLog.findFirst({ + where: { guildId: "g1" }, + }); + expect(row).not.toBeNull(); + // After migration, details is an object — NOT a JSON string + expect(typeof row!.details).toBe("object"); + const details = row!.details as { + before: { permissions: string[] }; + }; + expect(details.before.permissions).toEqual(["a"]); + }); + + it("supports JSONB filtering", async () => { + await createDashboardAuditLog({ + guildId: "g2", + userId: "u", + username: "u", + action: "role.update", + details: { reason: "promotion" }, + }); + + const rows = await getPrisma().$queryRaw>` + SELECT id FROM "DashboardAuditLog" + WHERE "guildId" = 'g2' AND details->>'reason' = 'promotion' + `; + expect(rows.length).toBe(1); + }); +}); diff --git a/packages/systems/tests/integration/dashboard-role-delete.test.ts b/packages/systems/tests/integration/dashboard-role-delete.test.ts new file mode 100644 index 0000000..a64ea4f --- /dev/null +++ b/packages/systems/tests/integration/dashboard-role-delete.test.ts @@ -0,0 +1,112 @@ +/** + * Integration tests: DashboardRole delete semantics + audited helper. + * + * Verifies the schema's onDelete: Restrict guard and the + * deleteDashboardRoleWithAudit() helper. Uses a REAL PostgreSQL test + * database — no DB mocks. + */ + +import { describe, it, expect, beforeAll, beforeEach, afterAll } from "vitest"; +import { getPrisma } from "@fluxcore/database"; +import { setupTestDatabase, teardownTestDatabase } from "../helpers/db.js"; +import { deleteDashboardRoleWithAudit } from "../../../../apps/dashboard/src/server/shared/dashboardRoleDelete.js"; + +async function cleanDashboardRoleTables(): Promise { + const prisma = getPrisma(); + await prisma.$executeRawUnsafe(` + TRUNCATE TABLE + "DashboardAuditLog", + "DashboardRoleAssignment", + "DashboardUserPermission", + "DashboardRole" + CASCADE + `); +} + +describe("DashboardRole delete semantics", () => { + beforeAll(async () => { + await setupTestDatabase(); + }); + beforeEach(async () => { + await cleanDashboardRoleTables(); + }); + afterAll(async () => { + await teardownTestDatabase(); + }); + + it("refuses to delete a role with active assignments", async () => { + const prisma = getPrisma(); + const role = await prisma.dashboardRole.create({ + data: { guildId: "g1", name: "mods", permissions: '["moderation.warn"]' }, + }); + await prisma.dashboardRoleAssignment.create({ + data: { guildId: "g1", userId: "u1", roleId: role.id, assignedBy: "admin" }, + }); + + await expect( + prisma.dashboardRole.delete({ where: { id: role.id } }), + ).rejects.toThrow(); + }); + + it("permits deletion after all assignments are removed", async () => { + const prisma = getPrisma(); + const role = await prisma.dashboardRole.create({ + data: { guildId: "g1", name: "mods", permissions: "[]" }, + }); + await prisma.dashboardRoleAssignment.create({ + data: { guildId: "g1", userId: "u1", roleId: role.id, assignedBy: "admin" }, + }); + await prisma.dashboardRoleAssignment.deleteMany({ where: { roleId: role.id } }); + + await expect( + prisma.dashboardRole.delete({ where: { id: role.id } }), + ).resolves.toMatchObject({ id: role.id }); + }); +}); + +describe("deleteDashboardRoleWithAudit", () => { + beforeAll(async () => { + await setupTestDatabase(); + }); + beforeEach(async () => { + await cleanDashboardRoleTables(); + }); + afterAll(async () => { + await teardownTestDatabase(); + }); + + it("unassigns members, writes audit entries, then deletes the role", async () => { + const prisma = getPrisma(); + const role = await prisma.dashboardRole.create({ + data: { guildId: "g1", name: "mods", permissions: "[]" }, + }); + await prisma.dashboardRoleAssignment.createMany({ + data: [ + { guildId: "g1", userId: "u1", roleId: role.id, assignedBy: "a" }, + { guildId: "g1", userId: "u2", roleId: role.id, assignedBy: "a" }, + ], + }); + + await deleteDashboardRoleWithAudit({ + guildId: "g1", + roleId: role.id, + actorId: "admin", + actorUsername: "admin#0", + }); + + const remaining = await prisma.dashboardRole.findUnique({ + where: { id: role.id }, + }); + expect(remaining).toBeNull(); + + const auditEntries = await prisma.dashboardAuditLog.findMany({ + where: { guildId: "g1", action: { in: ["role.unassign", "role.delete"] } }, + orderBy: { createdAt: "asc" }, + }); + expect(auditEntries.map((e) => e.action)).toEqual([ + "role.unassign", + "role.unassign", + "role.delete", + ]); + }); +}); diff --git a/packages/systems/tests/unit/actions-cache-atomic.test.ts b/packages/systems/tests/unit/actions-cache-atomic.test.ts new file mode 100644 index 0000000..694060a --- /dev/null +++ b/packages/systems/tests/unit/actions-cache-atomic.test.ts @@ -0,0 +1,107 @@ +import { describe, it, expect, vi, beforeEach } from "vitest"; + +const getRulesByGuildMock = vi.fn(); + +vi.mock("@fluxcore/database", () => ({ + getPrisma: () => ({}), +})); + +vi.mock("@fluxcore/utils", () => ({ + logger: { info: vi.fn(), warn: vi.fn(), error: vi.fn(), debug: vi.fn() }, +})); + +vi.mock("../../src/actions/persistence.js", () => ({ + getRulesByGuild: (...args: unknown[]) => getRulesByGuildMock(...args), + rowToRule: (r: unknown) => r, +})); + +import { + reloadGuild, + getRulesForEvent, + addRuleToCache, + invalidateGuild, +} from "../../src/actions/cache.js"; +import type { ActionRule, ActionEventType } from "../../src/actions/types.js"; + +function rule(id: number, eventType: string): ActionRule { + return { + id, + guildId: "g1", + name: `r${id}`, + enabled: true, + eventType: eventType as ActionEventType, + actions: [], + conditions: {}, + priority: 0, + createdBy: "u1", + }; +} + +describe("reloadGuild atomicity", () => { + beforeEach(() => { + invalidateGuild("g1"); + getRulesByGuildMock.mockReset(); + }); + + it("never exposes an empty rule set during reload (synchronous fetch)", async () => { + addRuleToCache(rule(1, "memberJoin")); + expect(getRulesForEvent("g1", "memberJoin")).toHaveLength(1); + + const observed: number[] = []; + getRulesByGuildMock.mockImplementation(async () => { + observed.push(getRulesForEvent("g1", "memberJoin").length); + return [rule(2, "memberJoin"), rule(3, "memberJoin")]; + }); + + await reloadGuild("g1"); + + expect(observed).toEqual([1]); + expect(getRulesForEvent("g1", "memberJoin")).toHaveLength(2); + }); + + it("swaps the entire eventType map in one assignment", async () => { + addRuleToCache(rule(10, "messageDeleted")); + getRulesByGuildMock.mockResolvedValue([rule(11, "messageCreated")]); + + await reloadGuild("g1"); + + expect(getRulesForEvent("g1", "messageDeleted")).toHaveLength(0); + expect(getRulesForEvent("g1", "messageCreated")).toHaveLength(1); + }); + + it("preserves visibility across microtask boundaries", async () => { + addRuleToCache(rule(20, "voiceJoin")); + getRulesByGuildMock.mockImplementation(async () => { + await Promise.resolve(); + return [rule(21, "voiceJoin")]; + }); + + const reloadPromise = reloadGuild("g1"); + await Promise.resolve(); + expect(getRulesForEvent("g1", "voiceJoin").length).toBeGreaterThan(0); + await reloadPromise; + expect(getRulesForEvent("g1", "voiceJoin")).toHaveLength(1); + }); + + it("clears the guild cache when reload returns no rules", async () => { + addRuleToCache(rule(30, "memberJoin")); + getRulesByGuildMock.mockResolvedValue([]); + + await reloadGuild("g1"); + + expect(getRulesForEvent("g1", "memberJoin")).toHaveLength(0); + }); + + it("sorts each event bucket by priority desc after reload", async () => { + getRulesByGuildMock.mockResolvedValue([ + { ...rule(40, "memberJoin"), priority: 1 }, + { ...rule(41, "memberJoin"), priority: 5 }, + { ...rule(42, "memberJoin"), priority: 3 }, + ]); + + await reloadGuild("g1"); + + const result = getRulesForEvent("g1", "memberJoin"); + expect(result.map((r) => r.id)).toEqual([41, 42, 40]); + }); +}); diff --git a/packages/systems/tests/unit/logger-redaction.test.ts b/packages/systems/tests/unit/logger-redaction.test.ts new file mode 100644 index 0000000..5d681c2 --- /dev/null +++ b/packages/systems/tests/unit/logger-redaction.test.ts @@ -0,0 +1,110 @@ +import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; + +vi.mock("@fluxcore/config", () => ({ + config: { logLevel: "debug" }, +})); + +import { logger, redactSensitive } from "../../../utils/src/logger.js"; + +describe("redactSensitive", () => { + it("redacts Bearer tokens", () => { + expect(redactSensitive("Authorization: Bearer abc.def.ghi")).toBe( + "Authorization: Bearer [REDACTED]", + ); + }); + + it("redacts Basic auth headers", () => { + expect(redactSensitive("Authorization: Basic dXNlcjpwYXNz")).toBe( + "Authorization: Basic [REDACTED]", + ); + }); + + it("redacts Discord bot tokens (MFA-format)", () => { + const tok = + "MTAxNzg5NjU0MzIxMDk4NzY1NA.GabCde.fghIjkLmnOpqrStuvwxyz0123456789ABCDEF"; + const out = redactSensitive(`Logging in with ${tok}`); + expect(out).toContain("[REDACTED]"); + expect(out).not.toContain(tok); + }); + + it("redacts Discord webhook URLs", () => { + const url = + "https://discord.com/api/webhooks/1234567890/abcdefgHIJKLMNOPqrstuvwxyz"; + expect(redactSensitive(`POST to ${url}`)).toBe( + "POST to https://discord.com/api/webhooks/[REDACTED]", + ); + }); + + it("redacts versioned Discord webhook URLs", () => { + const url = + "https://discord.com/api/v10/webhooks/1234567890/abcdefgHIJKLMNOPqrstuvwxyz"; + expect(redactSensitive(url)).toBe( + "https://discord.com/api/webhooks/[REDACTED]", + ); + }); + + it("redacts query parameters that look secret", () => { + expect( + redactSensitive("https://api.example.com/x?code=abc123&keep=ok"), + ).toBe("https://api.example.com/x?code=[REDACTED]&keep=ok"); + expect( + redactSensitive("https://api.example.com/x?client_secret=xyz"), + ).toBe("https://api.example.com/x?client_secret=[REDACTED]"); + expect( + redactSensitive("https://api.example.com/x?token=abc&api_key=def"), + ).toBe( + "https://api.example.com/x?token=[REDACTED]&api_key=[REDACTED]", + ); + }); + + it("leaves benign strings alone", () => { + expect(redactSensitive("hello world")).toBe("hello world"); + }); + + it("returns empty input unchanged", () => { + expect(redactSensitive("")).toBe(""); + }); +}); + +describe("logger redaction", () => { + let logSpy: ReturnType; + + beforeEach(() => { + logSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + }); + + afterEach(() => { + logSpy.mockRestore(); + }); + + it("redacts secrets in info messages", () => { + logger.info("Authorization: Bearer secret123"); + const printed = logSpy.mock.calls + .map((c: unknown[]) => c.join(" ")) + .join("\n"); + expect(printed).not.toContain("secret123"); + expect(printed).toContain("[REDACTED]"); + }); + + it("redacts secrets in error stacks", () => { + const err = new Error("failed"); + err.stack = "Error: failed\n at fn (https://x.example/?token=leak)"; + logger.error("oops", err); + const printed = logSpy.mock.calls + .map((c: unknown[]) => c.join(" ")) + .join("\n"); + expect(printed).not.toContain("leak"); + expect(printed).toContain("[REDACTED]"); + }); + + it("redacts webhook URLs in warn messages", () => { + logger.warn( + "fired https://discord.com/api/webhooks/111/aaaBBBcccDDDeee", + ); + const printed = logSpy.mock.calls + .map((c: unknown[]) => c.join(" ")) + .join("\n"); + expect(printed).not.toContain("aaaBBBcccDDDeee"); + expect(printed).toContain("[REDACTED]"); + }); +}); diff --git a/packages/systems/tests/unit/persistence-safejsonparse.test.ts b/packages/systems/tests/unit/persistence-safejsonparse.test.ts new file mode 100644 index 0000000..eb6927c --- /dev/null +++ b/packages/systems/tests/unit/persistence-safejsonparse.test.ts @@ -0,0 +1,94 @@ +import { describe, it, expect, vi, beforeEach } from "vitest"; + +const { warnMock } = vi.hoisted(() => ({ warnMock: vi.fn() })); + +vi.mock("@fluxcore/utils", () => ({ + logger: { + info: vi.fn(), + warn: warnMock, + error: vi.fn(), + debug: vi.fn(), + }, +})); + +vi.mock("@fluxcore/database", () => ({ + getPrisma: () => ({}), +})); + +vi.mock("@fluxcore/config", () => ({ + config: { logLevel: "info" }, +})); + +import { rowToRule } from "../../src/actions/persistence.js"; + +describe("rowToRule fallback logging", () => { + beforeEach(() => warnMock.mockReset()); + + it("logs a warning when actions JSON is malformed", () => { + rowToRule({ + id: 42, + guildId: "g1", + name: "broken", + enabled: true, + eventType: "memberJoin", + actions: "{not-json", + conditions: "{}", + priority: 0, + createdBy: "u1", + }); + expect(warnMock).toHaveBeenCalledTimes(1); + const msg = warnMock.mock.calls[0]?.[0] as string; + expect(msg).toContain("ActionRule"); + expect(msg).toContain("id=42"); + expect(msg).toContain("actions"); + }); + + it("logs a warning when conditions JSON is malformed", () => { + rowToRule({ + id: 7, + guildId: "g1", + name: "broken", + enabled: true, + eventType: "memberJoin", + actions: "[]", + conditions: "{bad", + priority: 0, + createdBy: "u1", + }); + expect(warnMock).toHaveBeenCalledTimes(1); + expect(warnMock.mock.calls[0]?.[0]).toContain("conditions"); + expect(warnMock.mock.calls[0]?.[0]).toContain("id=7"); + }); + + it("does NOT warn on valid JSON", () => { + rowToRule({ + id: 1, + guildId: "g1", + name: "ok", + enabled: true, + eventType: "memberJoin", + actions: "[]", + conditions: "{}", + priority: 0, + createdBy: "u1", + }); + expect(warnMock).not.toHaveBeenCalled(); + }); + + it("falls back to empty defaults when both columns are malformed", () => { + const rule = rowToRule({ + id: 99, + guildId: "g9", + name: "broken", + enabled: true, + eventType: "memberJoin", + actions: "not-json", + conditions: "also-not-json", + priority: 0, + createdBy: "u1", + }); + expect(rule.actions).toEqual([]); + expect(rule.conditions).toEqual({}); + expect(warnMock).toHaveBeenCalledTimes(2); + }); +}); diff --git a/packages/utils/src/index.ts b/packages/utils/src/index.ts index e96fca1..5a939de 100644 --- a/packages/utils/src/index.ts +++ b/packages/utils/src/index.ts @@ -1,4 +1,4 @@ -export { logger } from "./logger.js"; +export { logger, redactSensitive } from "./logger.js"; export { successEmbed, errorEmbed, infoEmbed, warnEmbed } from "./embeds.js"; export { checkPermissions, diff --git a/packages/utils/src/logger.ts b/packages/utils/src/logger.ts index 6671d5d..5730480 100644 --- a/packages/utils/src/logger.ts +++ b/packages/utils/src/logger.ts @@ -22,6 +22,52 @@ const COLORS = { red: "\x1b[31m", }; +const REDACTED = "[REDACTED]"; + +// Order matters: webhook URLs and structured tokens before generic +// query-param redaction so we don't double-mangle. +const REDACTION_PATTERNS: ReadonlyArray<{ + pattern: RegExp; + replacement: string; +}> = [ + // Bearer tokens (Authorization header style) + { + pattern: /(Bearer\s+)[A-Za-z0-9._\-+/=]+/gi, + replacement: `$1${REDACTED}`, + }, + // Basic auth headers + { + pattern: /(Basic\s+)[A-Za-z0-9+/=]+/gi, + replacement: `$1${REDACTED}`, + }, + // Discord webhook URLs: keep prefix, drop the id+token suffix + { + pattern: + /https:\/\/(?:[a-z]+\.)?discord(?:app)?\.com\/api(?:\/v\d+)?\/webhooks\/[^\s"'<>]+/gi, + replacement: `https://discord.com/api/webhooks/${REDACTED}`, + }, + // Discord bot tokens: 3 base64url segments separated by `.` + { + pattern: /[MN][A-Za-z\d]{23,28}\.[\w-]{6,7}\.[\w-]{27,}/g, + replacement: REDACTED, + }, + // Sensitive query parameters + { + pattern: + /([?&](?:code|token|access_token|refresh_token|client_secret|api[_-]?key|secret|password)=)[^&\s"'<>]+/gi, + replacement: `$1${REDACTED}`, + }, +]; + +export function redactSensitive(value: string): string { + if (!value) return value; + let out = value; + for (const { pattern, replacement } of REDACTION_PATTERNS) { + out = out.replace(pattern, replacement); + } + return out; +} + class Logger { private level: LogLevel; @@ -43,9 +89,9 @@ class Logger { if (level < this.level) return; const ts = `${COLORS.gray}${this.timestamp()}${COLORS.reset}`; const tag = `${color}[${label}]${COLORS.reset}`; - console.log(`${ts} ${tag} ${message}`); + console.log(`${ts} ${tag} ${redactSensitive(message)}`); if (error?.stack) { - console.log(`${COLORS.gray}${error.stack}${COLORS.reset}`); + console.log(`${COLORS.gray}${redactSensitive(error.stack)}${COLORS.reset}`); } } diff --git a/scripts/migrate-encrypt-session-tokens.ts b/scripts/migrate-encrypt-session-tokens.ts new file mode 100644 index 0000000..80d48df --- /dev/null +++ b/scripts/migrate-encrypt-session-tokens.ts @@ -0,0 +1,49 @@ +import { getPrisma } from "@fluxcore/database"; +import { logger } from "@fluxcore/utils"; +import { encrypt, isEncrypted } from "../apps/dashboard/src/server/shared/crypto.js"; + +export interface BackfillSummary { + encrypted: number; + skipped: number; +} + +/** + * One-shot backfill: re-encrypt any DashboardSession.accessToken rows that + * are still stored as plaintext (legacy environments pre-dating the + * encryption helper). Idempotent — already-encrypted rows are skipped. + */ +export async function backfillEncryptSessionTokens(): Promise { + const prisma = getPrisma(); + const rows = await prisma.dashboardSession.findMany({ + select: { id: true, accessToken: true }, + }); + + let encrypted = 0; + let skipped = 0; + + for (const row of rows) { + if (isEncrypted(row.accessToken)) { + skipped++; + continue; + } + await prisma.dashboardSession.update({ + where: { id: row.id }, + data: { accessToken: encrypt(row.accessToken) }, + }); + encrypted++; + } + + logger.info( + `DashboardSession backfill complete: encrypted=${encrypted} skipped=${skipped}`, + ); + return { encrypted, skipped }; +} + +if (import.meta.url === `file://${process.argv[1]}`) { + backfillEncryptSessionTokens() + .then(() => process.exit(0)) + .catch((err) => { + logger.error("Backfill failed", err as Error); + process.exit(1); + }); +}