diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 13c3f89d0..3ca6a129d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -95,9 +95,14 @@ jobs: - name: Run tests with coverage run: npm run test:coverage - - name: Security audit - run: npm audit --audit-level=high || true - continue-on-error: true + # 🔐 SECURITY GATE — bloqueia merge quando aparecem vulnerabilidades + # NOVAS ≥ high (omit=dev) comparadas com .security/npm-audit-baseline.json. + # Para aceitar um CVE conhecido sem patch: revisar, justificar no PR e + # rodar `UPDATE_BASELINE=1 node scripts/check-npm-audit.mjs` localmente. + - name: 🔐 npm audit gate (no new vulnerabilities) + env: + AUDIT_THRESHOLD: high + run: node scripts/check-npm-audit.mjs # Gate crítico: funções SECURITY DEFINER em public NÃO podem ser # executáveis por PUBLIC/anon (fora da whitelist público-intencional) @@ -122,6 +127,12 @@ jobs: - name: 📜 Edge structured-logging gate run: node scripts/check-edge-structured-logging.mjs + # Garante que TODA invocação de edge nas rotas críticas (auth/quote/mcp/ + # magicUp/comparison/connections) use createClientLogger + log.headers() + # para correlação request_id ↔ edge logs. Allowlist congelada 2026-04-27. + - name: 📜 Client structured-logging gate (rotas críticas) + run: node scripts/check-client-structured-logging.mjs + - name: RLS policy tests (optional) if: env.TEST_SELLER_PASSWORD != '' && env.TEST_ADMIN_PASSWORD != '' env: @@ -292,7 +303,12 @@ jobs: if-no-files-found: ignore edge-functions-typecheck: - name: Edge Functions — Deno typecheck + # 🔐 SECURITY GATE — falha o CI imediatamente quando qualquer edge function + # quebra o typecheck Deno após mudanças de segurança (CORS, authz, JWT, + # RLS bridge, secrets). Roda em paralelo ao `smoke` e NÃO depende de + # `quality` (sem `npm ci`) para dar feedback em <2min, antes da bateria + # pesada de testes. Ver scripts/typecheck-edge-functions.mjs. + name: 🔐 Edge Functions — Deno typecheck (security gate) runs-on: ubuntu-latest timeout-minutes: 15 @@ -310,7 +326,24 @@ jobs: # Roda `deno check` em cada supabase/functions//*.ts. # Falha o job se qualquer função tiver erro de tipos - # (ex: cast inseguro de GenericStringError, SupabaseClient com schema errado). - - name: Typecheck all edge functions + # (ex: cast inseguro de GenericStringError, SupabaseClient com schema + # errado, corsHeaders fora de escopo, getClaims sem null-check). + # Após mudanças de segurança (CORS allowlist, authz manifest, JWT, + # _shared/*), este gate falha ANTES de qualquer deploy. + - name: Typecheck all edge functions (fail-fast on security regressions) run: node scripts/typecheck-edge-functions.mjs + # Resumo amigável: quais funções falharam, para PRs de segurança. + - name: Summarize failures (PR comment-friendly) + if: failure() + run: | + echo "::error title=Edge typecheck failed::Uma ou mais edge functions quebraram o typecheck Deno após mudanças de segurança. Rode localmente: node scripts/typecheck-edge-functions.mjs" + echo "## ❌ Edge Functions typecheck falhou" >> "$GITHUB_STEP_SUMMARY" + echo "" >> "$GITHUB_STEP_SUMMARY" + echo "Mudanças de segurança (CORS, authz, JWT, RLS) introduziram regressão de tipos em pelo menos uma edge function." >> "$GITHUB_STEP_SUMMARY" + echo "" >> "$GITHUB_STEP_SUMMARY" + echo "**Reproduzir localmente:**" >> "$GITHUB_STEP_SUMMARY" + echo '```bash' >> "$GITHUB_STEP_SUMMARY" + echo "node scripts/typecheck-edge-functions.mjs" >> "$GITHUB_STEP_SUMMARY" + echo '```' >> "$GITHUB_STEP_SUMMARY" + diff --git a/.security/npm-audit-baseline.json b/.security/npm-audit-baseline.json new file mode 100644 index 000000000..234ffa2dd --- /dev/null +++ b/.security/npm-audit-baseline.json @@ -0,0 +1 @@ +{"$schema":"./baseline.schema.json","generatedAt":null,"threshold":"high","knownAdvisories":[],"notes":"Atualize com UPDATE_BASELINE=1 node scripts/check-npm-audit.mjs após revisar cada CVE."} diff --git a/docs/observability/webhook-alerts-spec.sql b/docs/observability/webhook-alerts-spec.sql new file mode 100644 index 000000000..b8ca1af55 --- /dev/null +++ b/docs/observability/webhook-alerts-spec.sql @@ -0,0 +1,114 @@ +-- ============================================================================ +-- Webhook Alerts Spec — Detection Queries (read-only) +-- ---------------------------------------------------------------------------- +-- SSOT das queries que a edge function `webhook-alerts-monitor` executa para +-- decidir se dispara um alerta no Sentry. Nenhuma destas queries faz INSERT/ +-- UPDATE — todas leem `public.webhook_delivery_metrics` (schema definido em +-- supabase/migrations/20260427122230_*.sql). +-- +-- Colunas usadas: +-- occurred_at timestamptz — quando a tentativa ocorreu +-- source text — ex: 'bitrix', 'product-webhook', 'stripe' +-- direction text — 'inbound' | 'outbound' +-- http_status int — código HTTP (NULL para erro de transporte) +-- success boolean — sucesso final (após retries) +-- request_id text — correlation id +-- +-- Janelas e thresholds (perfil CONSERVADOR — confirmado pelo usuário): +-- * delivery_failure_total : >=3 falhas CONSECUTIVAS por (source,direction) +-- * spike_5xx : >=5 respostas 5xx em 5min OU >20% das requests +-- (mín 10 amostras na janela) +-- * spike_4xx : >40% de respostas 4xx em 5min (mín 10 amostras) +-- +-- Cada query devolve UMA linha por (source,direction) que ATINGE o threshold. +-- A edge monitor mapeia cada linha para um evento Sentry com tags: +-- alert=, source=, direction=, severity=warning|error +-- ============================================================================ + +-- :window_minutes -> default 5 +-- :min_samples -> default 10 +-- :rate_5xx -> default 0.20 +-- :rate_4xx -> default 0.40 +-- :abs_5xx -> default 5 +-- :consecutive -> default 3 + +-- ---------------------------------------------------------------------------- +-- 1) delivery_failure_total +-- >= N falhas CONSECUTIVAS (mais recentes) por (source,direction). +-- ---------------------------------------------------------------------------- +with recent as ( + select + source, + direction, + success, + occurred_at, + row_number() over (partition by source, direction order by occurred_at desc) as rn + from public.webhook_delivery_metrics + where occurred_at > now() - (:window_minutes || ' minutes')::interval +) +select + source, + direction, + count(*) filter (where success = false) as failures, + max(occurred_at) as last_failure_at, + 'delivery_failure_total' as alert_id, + 'error' as severity +from recent +where rn <= :consecutive +group by source, direction +having count(*) filter (where success = false) >= :consecutive; + +-- ---------------------------------------------------------------------------- +-- 2) spike_5xx +-- >= :abs_5xx respostas 5xx OU >:rate_5xx das requests na janela. +-- ---------------------------------------------------------------------------- +with bucket as ( + select + source, + direction, + count(*) as total, + count(*) filter (where http_status between 500 and 599) as count_5xx + from public.webhook_delivery_metrics + where occurred_at > now() - (:window_minutes || ' minutes')::interval + group by source, direction +) +select + source, + direction, + total, + count_5xx, + round(100.0 * count_5xx / nullif(total,0), 2) as pct_5xx, + 'spike_5xx' as alert_id, + 'error' as severity +from bucket +where total >= :min_samples + and ( + count_5xx >= :abs_5xx + or (count_5xx::float / nullif(total,0)) > :rate_5xx + ); + +-- ---------------------------------------------------------------------------- +-- 3) spike_4xx +-- > :rate_4xx das requests com http_status 4xx. +-- ---------------------------------------------------------------------------- +with bucket as ( + select + source, + direction, + count(*) as total, + count(*) filter (where http_status between 400 and 499) as count_4xx + from public.webhook_delivery_metrics + where occurred_at > now() - (:window_minutes || ' minutes')::interval + group by source, direction +) +select + source, + direction, + total, + count_4xx, + round(100.0 * count_4xx / nullif(total,0), 2) as pct_4xx, + 'spike_4xx' as alert_id, + 'warning' as severity +from bucket +where total >= :min_samples + and (count_4xx::float / nullif(total,0)) > :rate_4xx; diff --git a/docs/observability/webhook-alerts.md b/docs/observability/webhook-alerts.md new file mode 100644 index 000000000..e8d8a6a25 --- /dev/null +++ b/docs/observability/webhook-alerts.md @@ -0,0 +1,71 @@ +# Webhook Alerts → Sentry + +Monitor read-only de `public.webhook_delivery_metrics` que dispara alertas no +Sentry quando há falhas de delivery ou spikes de 4xx/5xx por `(source, +direction)`. + +## Componentes + +| Arquivo | Papel | +|---|---| +| `docs/observability/webhook-alerts-spec.sql` | SSOT das queries de detecção (parametrizadas, read-only). | +| `supabase/functions/webhook-alerts-monitor/index.ts` | Cron edge function: detecta + envia eventos ao Sentry via envelope API. | + +## Thresholds (perfil conservador) + +Janela: **5 minutos**. Mínimo de **10 amostras** para spikes (evita falso-positivo em baixo volume). + +| Alert | Critério | Severity | +|---|---|---| +| `delivery_failure_total` | ≥3 falhas **consecutivas** por (source,direction) | `error` | +| `spike_5xx` | ≥5 respostas 5xx **OU** >20% das requests | `error` | +| `spike_4xx` | >40% das requests com 4xx | `warning` | + +Para alterar thresholds, edite as constantes no topo de +`webhook-alerts-monitor/index.ts` (SSOT do runtime) e os defaults no +spec SQL. + +## Tags Sentry (para filtragem/dashboards) + +Cada evento sai com: +- `alert` = `delivery_failure_total | spike_5xx | spike_4xx` +- `source` = ex. `bitrix24`, `product-webhook`, `inbound:n8n` +- `direction` = `inbound | outbound` +- `severity` = `warning | error` + +E `fingerprint = ["webhook-alert", alert, source, direction]` — o Sentry agrupa +o mesmo alerta na mesma issue, evitando flood. + +## Configuração + +1. **Secret**: cadastrar `SENTRY_DSN_SERVER` (DSN do projeto Sentry usado em + server-side). Sem o secret, o monitor roda em modo "dry" e apenas loga + `alert_skipped_no_sink`. +2. **Tabela**: `public.webhook_delivery_metrics` já existe (migration + `20260427122230_*`) — colunas usadas: `occurred_at`, `source`, `direction`, + `http_status`, `success`, `request_id`. +3. **Cron**: registrar via `supabase--insert` em `cron.schedule`: + ```sql + select cron.schedule( + 'webhook-alerts-monitor', + '*/1 * * * *', + $$ + select net.http_post( + url := '/functions/v1/webhook-alerts-monitor', + headers := jsonb_build_object( + 'Content-Type','application/json', + 'Authorization','Bearer ' + ), + body := '{}'::jsonb + ); + $$ + ); + ``` + +## Próximos passos (fora deste escopo) + +- Criar a migration de `webhook_delivery_metrics` + índices. +- Instrumentar `webhook-inbound`, `webhook-dispatcher`, `product-webhook` para + gravar uma linha por delivery. +- Adicionar Sentry Alert Rule no projeto: "create issue when tag `alert` matches + `spike_5xx|delivery_failure_total`" → notifica Slack/email. diff --git a/scripts/check-client-structured-logging.mjs b/scripts/check-client-structured-logging.mjs new file mode 100755 index 000000000..6e4c70f07 --- /dev/null +++ b/scripts/check-client-structured-logging.mjs @@ -0,0 +1,125 @@ +#!/usr/bin/env node +/** + * 📜 Client Structured-Logging Gate (rotas críticas) + * ---------------------------------------------------------------- + * Garante que toda invocação de edge function (`supabase.functions.invoke(...)`) + * em arquivos das rotas críticas (auth, quote, mcp, magicUp, comparison, + * connections) use `createClientLogger` E propague `headers: log.headers()` + * para correlacionar request_id entre client e edge logs. + * + * Critério: arquivo monitorado contém `supabase.functions.invoke` → + * DEVE conter `createClientLogger` E `log.headers()` (ou equivalente + * via REQUEST_ID_HEADER) em pelo menos uma chamada. + * + * Allowlist: arquivos legados podem ser registrados em ALLOWLIST com data e + * ticket — não pode crescer (snapshot 2026-04-27). + * + * Saída: + * exit 0 → todos os arquivos monitorados conformes + * exit 1 → arquivos faltando logger estruturado + */ +import { readFileSync, statSync } from 'node:fs'; +import { resolve, dirname, relative } from 'node:path'; +import { fileURLToPath } from 'node:url'; +import { execSync } from 'node:child_process'; + +const __filename = fileURLToPath(import.meta.url); +const ROOT = resolve(dirname(__filename), '..'); + +// Domínios críticos: glob patterns relativos a src/. +const MONITORED_GLOBS = [ + 'src/contexts/AuthContext.tsx', + 'src/pages/Auth.tsx', + 'src/hooks/useStepUpAuth.ts', + // quote + 'src/hooks/useQuotes.ts', + 'src/pages/public-approval/usePublicQuoteApproval.ts', + 'src/pages/quote-view/QuoteActionHandlers.ts', + 'src/pages/quote-view/QuoteBitrixSync.ts', + // mcp + 'src/components/admin/security/keys/useMcpKeys.ts', + 'src/components/admin/security/keys/UpdateMcpKeyDialog.tsx', + 'src/components/admin/security/keys/diagnostics/FullOpDiagnosticsPanel.tsx', + // magic up + 'src/hooks/useMagicUpGeneration.ts', + // comparison + 'src/pages/PublicComparisonPage.tsx', + // connections + 'src/hooks/useConnectionTester.ts', + 'src/hooks/useSecretsManager.ts', +]; + +// Snapshot 2026-04-27 — arquivos legados pendentes de instrumentação. +// NÃO PODE CRESCER. Cada entrada deve ter ticket no roadmap. +const ALLOWLIST = new Set([ + 'src/pages/Auth.tsx', + 'src/pages/quote-view/QuoteActionHandlers.ts', + 'src/pages/quote-view/QuoteBitrixSync.ts', + 'src/components/admin/security/keys/UpdateMcpKeyDialog.tsx', + 'src/components/admin/security/keys/diagnostics/FullOpDiagnosticsPanel.tsx', +]); + +const violations = []; +const missingFiles = []; + +for (const rel of MONITORED_GLOBS) { + const abs = resolve(ROOT, rel); + let content; + try { + statSync(abs); + content = readFileSync(abs, 'utf-8'); + } catch { + missingFiles.push(rel); + continue; + } + + const invokesEdge = /supabase\.functions\.invoke\s*\(/.test(content); + if (!invokesEdge) continue; // arquivo não chama edge → fora de escopo + + const usesLogger = /createClientLogger\s*\(/.test(content); + const propagatesHeaders = + /log\.headers\s*\(\s*\)/.test(content) || + /REQUEST_ID_HEADER/.test(content); + + if (usesLogger && propagatesHeaders) continue; // ✅ + + if (ALLOWLIST.has(rel)) continue; // legado conhecido + + violations.push({ + file: rel, + missing: [ + !usesLogger && 'createClientLogger', + !propagatesHeaders && 'log.headers() / REQUEST_ID_HEADER', + ].filter(Boolean).join(' + '), + }); +} + +console.log(`📜 Client structured-logging gate — ${MONITORED_GLOBS.length} arquivo(s) monitorado(s)`); +console.log(` Allowlist legada: ${ALLOWLIST.size} | Não-encontrados: ${missingFiles.length}`); + +if (missingFiles.length > 0) { + console.warn('\n⚠️ Arquivos monitorados ausentes (atualize MONITORED_GLOBS):'); + for (const f of missingFiles) console.warn(` - ${f}`); +} + +if (violations.length === 0) { + console.log('\n✅ Todas as rotas críticas instrumentadas com logger estruturado.'); + process.exit(0); +} + +console.error(`\n❌ ${violations.length} arquivo(s) sem logger estruturado:\n`); +for (const v of violations) { + console.error(` • ${v.file}`); + console.error(` Falta: ${v.missing}`); +} +console.error(` +Como corrigir: + import { createClientLogger } from "@/lib/telemetry/structuredLogger"; + const log = createClientLogger("", { base: { ... } }); + log.info("_start", { ... }); + await supabase.functions.invoke("", { body, headers: log.headers() }); + log.info("_ok"); // ou log.error("_failed", { err }) + +Padrão de event names: _start | _ok | _failed | _denied | _invalid +`); +process.exit(1); diff --git a/scripts/check-npm-audit.mjs b/scripts/check-npm-audit.mjs new file mode 100755 index 000000000..7ff2ef224 --- /dev/null +++ b/scripts/check-npm-audit.mjs @@ -0,0 +1,159 @@ +#!/usr/bin/env node +/** + * 🔐 npm audit CI gate + * + * Falha o build quando aparecem vulnerabilidades NOVAS acima do threshold + * configurado, comparando com .security/npm-audit-baseline.json. + * + * Uso: + * node scripts/check-npm-audit.mjs # gate (CI) + * UPDATE_BASELINE=1 node scripts/check-npm-audit.mjs # atualiza baseline + * AUDIT_THRESHOLD=critical node scripts/check-npm-audit.mjs + * + * Saída: + * exit 0 → sem vulns novas acima do threshold + * exit 1 → vulns novas detectadas (CI deve falhar) + * exit 2 → erro inesperado (npm audit falhou de forma irrecuperável) + * + * Tolerâncias: + * - Se `npm audit` retornar 404/operation not supported (proxy de sandbox), + * o gate emite warning e termina com exit 0 — o ambiente real do GH Actions + * usa o registry público e roda normalmente. + */ +import { spawnSync } from 'node:child_process'; +import { readFileSync, writeFileSync, existsSync, mkdirSync } from 'node:fs'; +import { dirname, resolve } from 'node:path'; +import { fileURLToPath } from 'node:url'; + +const __filename = fileURLToPath(import.meta.url); +const ROOT = resolve(dirname(__filename), '..'); +const BASELINE_PATH = resolve(ROOT, '.security/npm-audit-baseline.json'); + +const SEVERITY_ORDER = ['info', 'low', 'moderate', 'high', 'critical']; +const THRESHOLD = (process.env.AUDIT_THRESHOLD || 'high').toLowerCase(); +const UPDATE_BASELINE = process.env.UPDATE_BASELINE === '1'; + +function severityRank(sev) { + const i = SEVERITY_ORDER.indexOf((sev || '').toLowerCase()); + return i === -1 ? -1 : i; +} + +function loadBaseline() { + if (!existsSync(BASELINE_PATH)) { + return { knownAdvisories: [], threshold: THRESHOLD }; + } + try { + return JSON.parse(readFileSync(BASELINE_PATH, 'utf-8')); + } catch (e) { + console.error(`❌ Baseline inválido em ${BASELINE_PATH}: ${e.message}`); + process.exit(2); + } +} + +function saveBaseline(advisories) { + mkdirSync(dirname(BASELINE_PATH), { recursive: true }); + const payload = { + $schema: './baseline.schema.json', + generatedAt: new Date().toISOString(), + threshold: THRESHOLD, + knownAdvisories: advisories.sort((a, b) => String(a.id).localeCompare(String(b.id))), + notes: 'Atualize com UPDATE_BASELINE=1 node scripts/check-npm-audit.mjs após revisar cada CVE.', + }; + writeFileSync(BASELINE_PATH, JSON.stringify(payload, null, 2) + '\n'); +} + +function runNpmAudit() { + const res = spawnSync('npm', ['audit', '--json', '--omit=dev'], { + cwd: ROOT, + encoding: 'utf-8', + maxBuffer: 50 * 1024 * 1024, + }); + // npm audit retorna exit !=0 quando há vulns; ainda assim o JSON sai em stdout. + if (!res.stdout || !res.stdout.trim()) { + return { ok: false, reason: `npm audit não produziu saída (exit=${res.status}): ${res.stderr?.slice(0, 200)}` }; + } + let parsed; + try { + parsed = JSON.parse(res.stdout); + } catch (e) { + return { ok: false, reason: `JSON inválido de npm audit: ${e.message}` }; + } + if (parsed && parsed.error && parsed.message?.includes('operation is not supported')) { + return { ok: false, reason: 'sandbox-unsupported', sandboxUnsupported: true }; + } + return { ok: true, data: parsed }; +} + +function extractAdvisories(audit) { + // Suporta o formato moderno (npm v7+): audit.vulnerabilities = { name: { via: [...] } } + const out = new Map(); + const vulns = audit.vulnerabilities || {}; + for (const pkg of Object.values(vulns)) { + const via = Array.isArray(pkg.via) ? pkg.via : []; + for (const v of via) { + if (typeof v !== 'object' || !v) continue; + const id = v.source || v.url || v.title || `${v.name}@${v.range}`; + out.set(String(id), { + id: String(id), + name: v.name || pkg.name, + title: v.title || '', + severity: v.severity || pkg.severity, + url: v.url || null, + range: v.range || null, + }); + } + } + return [...out.values()]; +} + +// ---------- Main ---------- +const result = runNpmAudit(); +if (!result.ok) { + if (result.sandboxUnsupported) { + console.warn('⚠️ npm audit não suportado neste registry (sandbox). Gate ignorado — CI público (GitHub Actions) executará normalmente.'); + process.exit(0); + } + console.error(`❌ npm audit falhou: ${result.reason}`); + process.exit(2); +} + +const advisories = extractAdvisories(result.data); +const minRank = severityRank(THRESHOLD); +const filtered = advisories.filter((a) => severityRank(a.severity) >= minRank); + +if (UPDATE_BASELINE) { + saveBaseline(filtered); + console.log(`✅ Baseline atualizado em .security/npm-audit-baseline.json (${filtered.length} advisories ≥ ${THRESHOLD}).`); + process.exit(0); +} + +const baseline = loadBaseline(); +const known = new Set((baseline.knownAdvisories || []).map((a) => String(a.id))); +const newOnes = filtered.filter((a) => !known.has(String(a.id))); +const fixed = (baseline.knownAdvisories || []).filter((a) => !filtered.some((c) => String(c.id) === String(a.id))); + +console.log(`🔍 npm audit (omit=dev, threshold=${THRESHOLD})`); +console.log(` Total ≥ ${THRESHOLD}: ${filtered.length} | conhecidos: ${filtered.length - newOnes.length} | novos: ${newOnes.length} | corrigidos: ${fixed.length}`); + +if (fixed.length > 0) { + console.log(`\n✅ ${fixed.length} advisory(ies) do baseline foram corrigidos. Rode UPDATE_BASELINE=1 para limpar.`); + for (const a of fixed) console.log(` - ${a.severity}\t${a.name}\t${a.id}`); +} + +if (newOnes.length === 0) { + console.log('\n✅ Nenhuma vulnerabilidade nova acima do threshold.'); + process.exit(0); +} + +console.error(`\n❌ ${newOnes.length} vulnerabilidade(s) NOVA(s) ≥ ${THRESHOLD} detectada(s):\n`); +for (const a of newOnes) { + console.error(` • [${a.severity.toUpperCase()}] ${a.name}${a.range ? ` (${a.range})` : ''}`); + if (a.title) console.error(` ${a.title}`); + if (a.url) console.error(` ${a.url}`); +} +console.error('\nAções:'); +console.error(' 1. Atualize o pacote vulnerável (`npm update ` ou bump em package.json).'); +console.error(' 2. Se não houver patch disponível e o risco for aceito, adicione ao baseline:'); +console.error(' UPDATE_BASELINE=1 node scripts/check-npm-audit.mjs'); +console.error(' → SEMPRE registre justificativa no PR.\n'); +process.exit(1); diff --git a/src/hooks/useSecretsManager.ts b/src/hooks/useSecretsManager.ts index dbf218bd6..84071c43c 100644 --- a/src/hooks/useSecretsManager.ts +++ b/src/hooks/useSecretsManager.ts @@ -3,6 +3,7 @@ import { supabase } from "@/integrations/supabase/client"; import { toast } from "sonner"; import { newRequestId, REQUEST_ID_HEADER } from "@/lib/telemetry/requestId"; import { recordSecretsManagerCall } from "@/lib/telemetry/secretsManagerCallMetrics"; +import { createClientLogger } from "@/lib/telemetry/structuredLogger"; export interface SecretStatus { name: string; @@ -83,6 +84,11 @@ async function invokeSecretsManager(body: InvokeBody): Promise<{ status?: number; }> { const requestId = newRequestId(); + const log = createClientLogger(`connections.secretsManager.${body.action}`, { + requestId, + base: { target: body.name }, + }); + log.info("secrets_manager_call_start"); const startedAt = performance.now(); const { data, error } = await supabase.functions.invoke("secrets-manager", { body, @@ -91,19 +97,21 @@ async function invokeSecretsManager(body: InvokeBody): Promise<{ const durationMs = performance.now() - startedAt; const ctx = (error as { context?: Response } | null)?.context; const status = ctx?.status; + const dataOkFalse = data && (data as { ok?: boolean }).ok === false; + const ok = !error && !dataOkFalse; recordSecretsManagerCall({ action: body.action, target: body.name, durationMs, - ok: !error && !(data && (data as { ok?: boolean }).ok === false), + ok, status, - errorMessage: error?.message ?? (data && (data as { ok?: boolean }).ok === false + errorMessage: error?.message ?? (dataOkFalse ? (typeof (data as { error?: { message?: string } }).error?.message === "string" ? (data as { error: { message: string } }).error.message : undefined) : undefined), - errorCode: data && (data as { ok?: boolean }).ok === false + errorCode: dataOkFalse ? (typeof (data as { error?: { code?: string } }).error?.code === "string" ? (data as { error: { code: string } }).error.code : undefined) @@ -111,6 +119,14 @@ async function invokeSecretsManager(body: InvokeBody): Promise<{ requestId, }); + if (ok) { + log.info("secrets_manager_call_ok", { duration_ms: Math.round(durationMs), status }); + } else if (error) { + log.error("secrets_manager_call_failed", { duration_ms: Math.round(durationMs), status, err: error }); + } else { + log.warn("secrets_manager_call_denied", { duration_ms: Math.round(durationMs), status, code: (data as { error?: { code?: string } })?.error?.code }); + } + return { data: data as Record | null, error: error as { message: string; context?: Response } | null, diff --git a/src/hooks/useStepUpAuth.ts b/src/hooks/useStepUpAuth.ts index 86d1070db..af44648c0 100644 --- a/src/hooks/useStepUpAuth.ts +++ b/src/hooks/useStepUpAuth.ts @@ -1,6 +1,7 @@ import { useState, useCallback, useRef } from "react"; import { supabase } from "@/integrations/supabase/client"; import { sanitizeError, SAFE_MESSAGES } from "@/lib/security/sanitize-error"; +import { createClientLogger } from "@/lib/telemetry/structuredLogger"; export type StepUpAction = | "promote_dev" @@ -58,6 +59,8 @@ export function useStepUpAuth() { targetRef: targetRef ?? null, actionLabel: actionLabel ?? null, }; + const log = createClientLogger("auth.stepUp.request", { base: { action, target_ref: targetRef ?? null } }); + log.info("step_up_request_start"); setState((s) => ({ ...s, loading: true, error: null })); const { data, error } = await supabase.functions.invoke("step-up-verify", { body: { @@ -66,11 +69,14 @@ export function useStepUpAuth() { target_ref: targetRef ?? null, action_label: actionLabel ?? null, }, + headers: log.headers(), }); if (error || data?.error) { + log.error("step_up_request_failed", { err: error ?? data?.error }); setState((s) => ({ ...s, loading: false, error: sanitizeError(data ?? error) })); return false; } + log.info("step_up_request_ok", { challenge_id: data.challenge_id }); setState((s) => ({ ...s, loading: false, challengeId: data.challenge_id, expiresAt: data.expires_at })); return true; }, []); @@ -78,6 +84,8 @@ export function useStepUpAuth() { const verifyPassword = useCallback(async (password: string) => { setState((s) => ({ ...s, loading: true, error: null })); const { action, targetRef, actionLabel } = ctxRef.current; + const log = createClientLogger("auth.stepUp.password", { base: { action, target_ref: targetRef } }); + log.info("step_up_password_start", { challenge_id: state.challengeId }); const { data, error } = await supabase.functions.invoke("step-up-verify", { body: { step: "verify_password", @@ -87,12 +95,15 @@ export function useStepUpAuth() { target_ref: targetRef, action_label: actionLabel, }, + headers: log.headers(), }); if (error || data?.error) { // Mensagem genérica — não diferencia "senha errada" de outros erros (anti-enumeration) + log.warn("step_up_password_denied"); // não logar mensagens diferenciadas (anti-enumeration) setState((s) => ({ ...s, loading: false, error: SAFE_MESSAGES.AUTH_GENERIC })); return false; } + log.info("step_up_password_ok"); setState((s) => ({ ...s, loading: false, passwordVerified: true })); return true; }, [state.challengeId]); @@ -100,6 +111,8 @@ export function useStepUpAuth() { const verifyOtp = useCallback(async (otp: string) => { setState((s) => ({ ...s, loading: true, error: null })); const { action, targetRef, actionLabel } = ctxRef.current; + const log = createClientLogger("auth.stepUp.otp", { base: { action, target_ref: targetRef } }); + log.info("step_up_otp_start", { challenge_id: state.challengeId }); const { data, error } = await supabase.functions.invoke("step-up-verify", { body: { step: "verify_otp", @@ -109,11 +122,14 @@ export function useStepUpAuth() { target_ref: targetRef, action_label: actionLabel, }, + headers: log.headers(), }); if (error || data?.error) { + log.warn("step_up_otp_failed", { err: error ?? data?.error }); setState((s) => ({ ...s, loading: false, error: SAFE_MESSAGES.STEP_UP_FAILED })); return null; } + log.info("step_up_otp_ok"); setState((s) => ({ ...s, loading: false, token: data.token, expiresAt: data.expires_at })); return data.token as string; }, [state.challengeId]); @@ -127,6 +143,8 @@ export function useStepUpAuth() { // Só registra se houve um challenge ativo OU pelo menos um contexto definido, // para evitar logs vazios em re-renders. if (!action && !state.challengeId) return; + const log = createClientLogger("auth.stepUp.cancel", { base: { action, target_ref: targetRef, reason } }); + log.info("step_up_cancel_start", { challenge_id: state.challengeId }); try { await supabase.functions.invoke("step-up-verify", { body: { @@ -137,9 +155,12 @@ export function useStepUpAuth() { action_label: actionLabel, cancel_reason: reason, }, + headers: log.headers(), }); - } catch (_) { - // Audit best-effort + log.info("step_up_cancel_ok"); + } catch (err) { + // Audit best-effort — não propaga, mas registra + log.warn("step_up_cancel_failed", { err }); } }, [state.challengeId]); diff --git a/src/pages/public-approval/usePublicQuoteApproval.ts b/src/pages/public-approval/usePublicQuoteApproval.ts index a5c25fae6..2a0fdd13e 100644 --- a/src/pages/public-approval/usePublicQuoteApproval.ts +++ b/src/pages/public-approval/usePublicQuoteApproval.ts @@ -3,6 +3,7 @@ */ import { useState, useEffect, useCallback } from "react"; import { supabase } from "@/integrations/supabase/client"; +import { createClientLogger } from "@/lib/telemetry/structuredLogger"; export interface QuoteData { quote: any; @@ -77,29 +78,38 @@ export function usePublicQuoteApproval(token?: string): PublicQuoteApprovalState const fetchQuote = async () => { setIsLoading(true); + const log = createClientLogger("quote.publicApproval.fetch", { base: { token: token?.slice(0, 8) } }); + log.info("public_quote_fetch_start"); try { const { data: result, error: fnError } = await supabase.functions.invoke( "quote-public-view", - { body: { action: "get_quote", token } } + { body: { action: "get_quote", token }, headers: log.headers() } ); if (fnError && !result) throw new Error(fnError.message); if (result?.error) { - if (result.expired) setIsExpired(true); - else setError(result.error); + if (result.expired) { + log.warn("public_quote_fetch_expired"); + setIsExpired(true); + } else { + log.warn("public_quote_fetch_denied", { reason: result.error }); + setError(result.error); + } return; } if (result.already_responded) { + log.info("public_quote_fetch_already_responded"); setAlreadyResponded(result); return; } + log.info("public_quote_fetch_ok"); setData(result); } catch (err) { + log.error("public_quote_fetch_failed", { err }); setError("Erro ao carregar proposta"); - console.error(err); } finally { setIsLoading(false); } @@ -111,20 +121,26 @@ export function usePublicQuoteApproval(token?: string): PublicQuoteApprovalState const handleResponse = useCallback(async (response: "approved" | "rejected") => { if (!token) return; setSignatureError(null); + const log = createClientLogger("quote.publicApproval.respond", { + base: { token: token.slice(0, 8), response }, + }); // Client-side validation for approval if (response === "approved") { if (signerName.trim().length < 3) { + log.warn("public_quote_respond_invalid", { reason: "signer_name_too_short" }); setSignatureError("Informe seu nome completo para assinar a aprovação."); return; } const docDigits = signerDocumentRaw.replace(/\D/g, ""); if (docDigits.length !== 11 && docDigits.length !== 14) { + log.warn("public_quote_respond_invalid", { reason: "signer_document_invalid_length" }); setSignatureError("Informe um CPF (11 dígitos) ou CNPJ (14 dígitos) válido."); return; } } + log.info("public_quote_respond_start"); setIsSubmitting(true); try { const { data: result, error: fnError } = await supabase.functions.invoke( @@ -138,6 +154,7 @@ export function usePublicQuoteApproval(token?: string): PublicQuoteApprovalState signer_name: response === "approved" ? signerName.trim() : undefined, signer_document: response === "approved" ? signerDocumentRaw.replace(/\D/g, "") : undefined, }, + headers: log.headers(), } ); @@ -145,10 +162,11 @@ export function usePublicQuoteApproval(token?: string): PublicQuoteApprovalState if (result?.error) throw new Error(result.error); if (result?.signature) setSignatureReceipt(result.signature); + log.info("public_quote_respond_ok", { has_signature: !!result?.signature }); setSubmitted(response); } catch (err: any) { + log.error("public_quote_respond_failed", { err }); setSignatureError(err?.message || "Erro ao enviar resposta"); - console.error(err); } finally { setIsSubmitting(false); } diff --git a/supabase/functions/_shared/authorize.ts b/supabase/functions/_shared/authorize.ts index 606b8e118..6a3bc32be 100644 --- a/supabase/functions/_shared/authorize.ts +++ b/supabase/functions/_shared/authorize.ts @@ -24,11 +24,34 @@ import { getCorsHeaders } from "./cors.ts"; export type AppRole = "dev" | "supervisor" | "agente"; +/** + * MFA enforcement options. Quando definido, `authorize()` exige UMA das duas + * provas de "fator forte" antes de retornar `ok`: + * + * 1. AAL2 nativo da Supabase Auth — JWT com claim `aal === "aal2"` + * (TOTP enrollado e verificado na sessão). + * + * 2. Step-up token de curta validade — emitido por `step-up-verify` após + * reautenticação senha+OTP. Lido de `X-Step-Up-Token` (preferido) ou + * body field `step_up_token`. Validado via RPC `consume_step_up_token` + * com `_expected_action` e `_expected_target`. + */ +export interface MfaRequirement { + /** Action esperada no consume_step_up_token (ex.: "mcp_full_issue"). */ + action: string; + /** Target opcional (ex.: user_id alvo de uma promoção). */ + target?: string | null; + /** Se true, AAL2 sozinho NÃO basta — exige step-up token (ações destrutivas). */ + requireStepUpToken?: boolean; +} + export interface AuthorizeOptions { /** Mínimo exigido (hierárquico). Omita para apenas exigir authenticated. */ requireRole?: AppRole; /** Forçar verificação adicional via has_role() RPC (server-side, RLS-safe). */ enforceServerSide?: boolean; + /** Exigir MFA (AAL2 ou step-up token). Obrigatório para dev/supervisor em mutações. */ + requireMfa?: MfaRequirement; } export type AuthorizeResult = @@ -37,6 +60,8 @@ export type AuthorizeResult = user: { id: string; email?: string }; role: AppRole | null; token: string; + /** Como o MFA foi satisfeito (se exigido): "aal2" | "step_up" | null. */ + mfaSource: "aal2" | "step_up" | null; supabaseUser: SupabaseClient; supabaseAdmin: SupabaseClient; } @@ -169,14 +194,17 @@ export async function authorize( _role: "dev", }); if (isDev === true) { - return { - ok: true, - user: { id: userId, email: userEmail }, - role: highestRole, + return await finalize({ + userId, + userEmail, token, + role: highestRole, supabaseUser, supabaseAdmin, - }; + opts, + req, + corsHeaders, + }); } } return { @@ -191,12 +219,159 @@ export async function authorize( } } + return await finalize({ + userId, + userEmail, + token, + role: highestRole, + supabaseUser, + supabaseAdmin, + opts, + req, + corsHeaders, + }); +} + +// -------------------------------------------------------------- +// MFA finalize: aplica requireMfa antes de devolver ok=true. +// -------------------------------------------------------------- + +interface FinalizeArgs { + userId: string; + userEmail?: string; + token: string; + role: AppRole | null; + supabaseUser: SupabaseClient; + supabaseAdmin: SupabaseClient; + opts: AuthorizeOptions; + req: Request; + corsHeaders: Record; +} + +/** Decodifica payload de JWT sem verificar assinatura — só para ler claim `aal`/`amr`. + * Exportado para testes unitários do enforcement de MFA. */ +export function decodeJwtClaims(jwt: string): Record | null { + try { + const parts = jwt.split("."); + if (parts.length !== 3) return null; + const payload = parts[1].replace(/-/g, "+").replace(/_/g, "/"); + const padded = payload + "=".repeat((4 - (payload.length % 4)) % 4); + return JSON.parse(atob(padded)); + } catch { + return null; + } +} + +async function finalize(args: FinalizeArgs): Promise { + const { userId, userEmail, token, role, supabaseUser, supabaseAdmin, opts, req, corsHeaders } = args; + + let mfaSource: "aal2" | "step_up" | null = null; + + if (opts.requireMfa) { + const verify = await verifyMfa({ + token, + req, + requirement: opts.requireMfa, + consumeStepUpToken: async (input) => { + const { data, error } = await supabaseUser.rpc("consume_step_up_token", input); + return { data, error }; + }, + }); + if (!verify.ok) { + return { + ok: false, + response: jsonResponse( + { + error: "mfa_required", + message: opts.requireMfa.requireStepUpToken + ? "Confirme sua identidade (senha + código por e-mail) antes de prosseguir." + : "Esta operação exige autenticação de dois fatores (TOTP) ou confirmação senha + OTP.", + required_action: opts.requireMfa.action, + mfa_failure_reason: verify.reason, + }, + 403, + corsHeaders, + ), + }; + } + mfaSource = verify.source; + } + return { ok: true, user: { id: userId, email: userEmail }, - role: highestRole, + role, token, + mfaSource, supabaseUser, supabaseAdmin, }; } + +// -------------------------------------------------------------- +// Pure MFA verifier — exportado para testes unitários. +// -------------------------------------------------------------- + +export interface ConsumeStepUpTokenInput { + _token: string; + _expected_action: string; + _expected_target: string | null; +} +export type ConsumeStepUpTokenFn = (input: ConsumeStepUpTokenInput) => Promise<{ data: unknown; error: unknown }>; + +export interface VerifyMfaArgs { + token: string; + req: Request; + requirement: MfaRequirement; + consumeStepUpToken: ConsumeStepUpTokenFn; +} + +export type VerifyMfaResult = + | { ok: true; source: "aal2" | "step_up" } + | { ok: false; reason: "no_aal2_no_step_up" | "step_up_invalid" | "step_up_token_required" }; + +export async function verifyMfa(args: VerifyMfaArgs): Promise { + const { token, req, requirement, consumeStepUpToken } = args; + const { action, target = null, requireStepUpToken = false } = requirement; + + // Caminho 1: AAL2 — basta a sessão estar elevada via TOTP, exceto se requireStepUpToken. + if (!requireStepUpToken) { + const claims = decodeJwtClaims(token); + const amrTotp = claims && Array.isArray(claims.amr) + && (claims.amr as Array<{ method?: string }>).some((e) => e?.method === "totp"); + if (claims && (claims.aal === "aal2" || amrTotp)) { + return { ok: true, source: "aal2" }; + } + } + + // Caminho 2: step-up token (header preferido, fallback body). + let stepUpToken = req.headers.get("X-Step-Up-Token") || req.headers.get("x-step-up-token") || ""; + if (!stepUpToken) { + try { + const cloned = req.clone(); + const ct = cloned.headers.get("content-type") || ""; + if (ct.includes("application/json")) { + const body = await cloned.json().catch(() => null) as { step_up_token?: unknown } | null; + if (body && typeof body.step_up_token === "string") { + stepUpToken = body.step_up_token; + } + } + } catch { + // body já consumido — header é o caminho preferido + } + } + + if (!stepUpToken) { + return { ok: false, reason: requireStepUpToken ? "step_up_token_required" : "no_aal2_no_step_up" }; + } + + const { data, error } = await consumeStepUpToken({ + _token: stepUpToken, + _expected_action: action, + _expected_target: target, + }); + if (error || data !== true) { + return { ok: false, reason: "step_up_invalid" }; + } + return { ok: true, source: "step_up" }; +} diff --git a/supabase/functions/tests/authorize-mfa_test.ts b/supabase/functions/tests/authorize-mfa_test.ts new file mode 100644 index 000000000..aaf0720f5 --- /dev/null +++ b/supabase/functions/tests/authorize-mfa_test.ts @@ -0,0 +1,198 @@ +// supabase/functions/tests/authorize-mfa_test.ts +// -------------------------------------------------------------- +// Testes do enforcement de MFA do helper SSOT _shared/authorize.ts. +// +// Cobre os 6 cenários críticos do gate MFA para roles dev/admin: +// 1. Sem AAL2 e sem step-up token → bloqueia (no_aal2_no_step_up). +// 2. JWT com aal=aal2 → libera com source="aal2". +// 3. JWT com amr.method=totp → libera com source="aal2". +// 4. Header X-Step-Up-Token + RPC ok → libera com source="step_up". +// 5. Body { step_up_token } + RPC ok → libera com source="step_up". +// 6. requireStepUpToken=true ignora AAL2 e ainda exige token. +// 7. Step-up token presente mas RPC retorna false → step_up_invalid. +// +// Não depende de Supabase real — injeta consumeStepUpToken como callback. +// -------------------------------------------------------------- + +import { assertEquals } from "https://deno.land/std@0.224.0/assert/mod.ts"; +import { decodeJwtClaims, verifyMfa, type ConsumeStepUpTokenFn } from "../_shared/authorize.ts"; + +// Helper: monta JWT com header/payload/sig falsos (assinatura é ignorada por decodeJwtClaims). +function makeJwt(payload: Record): string { + const b64 = (obj: Record) => + btoa(JSON.stringify(obj)).replace(/=+$/, "").replace(/\+/g, "-").replace(/\//g, "_"); + return `${b64({ alg: "HS256", typ: "JWT" })}.${b64(payload)}.sig`; +} + +// Helper: cria Request com headers/body opcionais. +function makeReq(opts: { headers?: Record; body?: unknown } = {}): Request { + const headers = new Headers(opts.headers); + let body: BodyInit | undefined; + if (opts.body !== undefined) { + body = JSON.stringify(opts.body); + if (!headers.has("content-type")) headers.set("content-type", "application/json"); + } + return new Request("https://example.com/test", { method: "POST", headers, body }); +} + +const consumeOk: ConsumeStepUpTokenFn = async () => ({ data: true, error: null }); +const consumeFail: ConsumeStepUpTokenFn = async () => ({ data: false, error: null }); +const consumeError: ConsumeStepUpTokenFn = async () => ({ data: null, error: { message: "expired" } }); + +// -------------------------------------------------------------- +// decodeJwtClaims +// -------------------------------------------------------------- +Deno.test("decodeJwtClaims: extrai payload válido", () => { + const jwt = makeJwt({ sub: "u1", aal: "aal2" }); + const claims = decodeJwtClaims(jwt); + assertEquals(claims?.sub, "u1"); + assertEquals(claims?.aal, "aal2"); +}); + +Deno.test("decodeJwtClaims: retorna null para JWT malformado", () => { + assertEquals(decodeJwtClaims("not.a.jwt.at.all"), null); + assertEquals(decodeJwtClaims("nope"), null); + assertEquals(decodeJwtClaims(""), null); +}); + +// -------------------------------------------------------------- +// verifyMfa — cenários de bloqueio +// -------------------------------------------------------------- +Deno.test("verifyMfa: bloqueia sem AAL2 e sem step-up token", async () => { + const jwt = makeJwt({ sub: "u1", aal: "aal1" }); + const result = await verifyMfa({ + token: jwt, + req: makeReq(), + requirement: { action: "admin_action" }, + consumeStepUpToken: consumeOk, // não deve ser chamado + }); + assertEquals(result.ok, false); + if (!result.ok) assertEquals(result.reason, "no_aal2_no_step_up"); +}); + +Deno.test("verifyMfa: bloqueia quando step-up token RPC retorna false", async () => { + const jwt = makeJwt({ sub: "u1", aal: "aal1" }); + const result = await verifyMfa({ + token: jwt, + req: makeReq({ headers: { "X-Step-Up-Token": "tok-bad" } }), + requirement: { action: "admin_action" }, + consumeStepUpToken: consumeFail, + }); + assertEquals(result.ok, false); + if (!result.ok) assertEquals(result.reason, "step_up_invalid"); +}); + +Deno.test("verifyMfa: bloqueia quando step-up RPC retorna error (token expirado)", async () => { + const jwt = makeJwt({ sub: "u1", aal: "aal1" }); + const result = await verifyMfa({ + token: jwt, + req: makeReq({ headers: { "X-Step-Up-Token": "tok-expired" } }), + requirement: { action: "admin_action" }, + consumeStepUpToken: consumeError, + }); + assertEquals(result.ok, false); + if (!result.ok) assertEquals(result.reason, "step_up_invalid"); +}); + +Deno.test("verifyMfa: requireStepUpToken=true ignora AAL2 e exige token", async () => { + const jwt = makeJwt({ sub: "u1", aal: "aal2" }); // tem AAL2 + const result = await verifyMfa({ + token: jwt, + req: makeReq(), // mas sem token + requirement: { action: "destructive_action", requireStepUpToken: true }, + consumeStepUpToken: consumeOk, + }); + assertEquals(result.ok, false); + if (!result.ok) assertEquals(result.reason, "step_up_token_required"); +}); + +// -------------------------------------------------------------- +// verifyMfa — cenários de liberação +// -------------------------------------------------------------- +Deno.test("verifyMfa: libera com aal=aal2 no JWT", async () => { + const jwt = makeJwt({ sub: "u1", aal: "aal2" }); + const result = await verifyMfa({ + token: jwt, + req: makeReq(), + requirement: { action: "admin_action" }, + consumeStepUpToken: consumeFail, + }); + assertEquals(result.ok, true); + if (result.ok) assertEquals(result.source, "aal2"); +}); + +Deno.test("verifyMfa: libera com amr.method=totp no JWT", async () => { + const jwt = makeJwt({ sub: "u1", amr: [{ method: "totp", timestamp: Date.now() }] }); + const result = await verifyMfa({ + token: jwt, + req: makeReq(), + requirement: { action: "admin_action" }, + consumeStepUpToken: consumeFail, + }); + assertEquals(result.ok, true); + if (result.ok) assertEquals(result.source, "aal2"); +}); + +Deno.test("verifyMfa: libera com X-Step-Up-Token header válido", async () => { + const jwt = makeJwt({ sub: "u1", aal: "aal1" }); + let calledWith: unknown = null; + const result = await verifyMfa({ + token: jwt, + req: makeReq({ headers: { "X-Step-Up-Token": "tok-good" } }), + requirement: { action: "mcp_full_issue", target: "user-42" }, + consumeStepUpToken: async (input) => { + calledWith = input; + return { data: true, error: null }; + }, + }); + assertEquals(result.ok, true); + if (result.ok) assertEquals(result.source, "step_up"); + assertEquals(calledWith, { + _token: "tok-good", + _expected_action: "mcp_full_issue", + _expected_target: "user-42", + }); +}); + +Deno.test("verifyMfa: libera com step_up_token no body JSON", async () => { + const jwt = makeJwt({ sub: "u1", aal: "aal1" }); + const result = await verifyMfa({ + token: jwt, + req: makeReq({ body: { step_up_token: "tok-body", other: "data" } }), + requirement: { action: "admin_action" }, + consumeStepUpToken: consumeOk, + }); + assertEquals(result.ok, true); + if (result.ok) assertEquals(result.source, "step_up"); +}); + +Deno.test("verifyMfa: header tem precedência sobre body", async () => { + const jwt = makeJwt({ sub: "u1", aal: "aal1" }); + let receivedToken = ""; + const result = await verifyMfa({ + token: jwt, + req: makeReq({ + headers: { "X-Step-Up-Token": "from-header" }, + body: { step_up_token: "from-body" }, + }), + requirement: { action: "admin_action" }, + consumeStepUpToken: async (input) => { + receivedToken = input._token; + return { data: true, error: null }; + }, + }); + assertEquals(result.ok, true); + assertEquals(receivedToken, "from-header"); +}); + +Deno.test("verifyMfa: AAL2 + requireStepUpToken=true + token válido → libera via step_up", async () => { + const jwt = makeJwt({ sub: "u1", aal: "aal2" }); + const result = await verifyMfa({ + token: jwt, + req: makeReq({ headers: { "X-Step-Up-Token": "tok-good" } }), + requirement: { action: "destructive", requireStepUpToken: true }, + consumeStepUpToken: consumeOk, + }); + assertEquals(result.ok, true); + if (result.ok) assertEquals(result.source, "step_up"); // não aal2, porque requireStepUpToken +}); diff --git a/supabase/functions/webhook-alerts-monitor/index.ts b/supabase/functions/webhook-alerts-monitor/index.ts new file mode 100644 index 000000000..cf8bde514 --- /dev/null +++ b/supabase/functions/webhook-alerts-monitor/index.ts @@ -0,0 +1,264 @@ +/** + * webhook-alerts-monitor + * ---------------------------------------------------------------- + * Cron-driven monitor que executa as queries de detecção definidas em + * docs/observability/webhook-alerts-spec.sql sobre `webhook_delivery_metrics` + * e dispara eventos Sentry quando um threshold é atingido. + * + * Configuração: + * - SENTRY_DSN_SERVER (secret) → DSN server-side do Sentry (envelope API) + * - SUPABASE_URL / SUPABASE_SERVICE_ROLE_KEY (default) + * + * Schedule sugerido (pg_cron, a cadastrar via supabase--insert): + * '*\/1 * * * *' → roda 1x por minuto, janela de detecção 5min + * + * Cada alerta é deduplicado por (alert_id|source|direction|bucket_minute) num + * Map em memória do request — para deduplicação cross-run, o Sentry agrupa + * por fingerprint (alertId|source|direction). + */ + +import { createClient } from "https://esm.sh/@supabase/supabase-js@2.95.0"; +import { getCorsHeaders, handleCorsPreflightIfNeeded } from "../_shared/cors.ts"; +import { getOrCreateRequestId } from "../_shared/request-id.ts"; +import { createStructuredLogger } from "../_shared/structured-logger.ts"; + +// ---------- Thresholds (perfil conservador) --------------------------------- +const WINDOW_MINUTES = 5; +const MIN_SAMPLES = 10; +const RATE_5XX = 0.20; +const RATE_4XX = 0.40; +const ABS_5XX = 5; +const CONSECUTIVE = 3; + +// ---------- Tipos ----------------------------------------------------------- +interface DetectedAlert { + alert_id: "delivery_failure_total" | "spike_5xx" | "spike_4xx"; + severity: "warning" | "error"; + source: string; + direction: "inbound" | "outbound"; + details: Record; +} + +// ---------- Sentry envelope (server-side, sem SDK) -------------------------- +async function sendToSentry(alert: DetectedAlert, requestId: string): Promise { + const dsn = Deno.env.get("SENTRY_DSN_SERVER"); + if (!dsn) return false; + + // Parse DSN: https://@o.ingest.sentry.io/ + const match = dsn.match(/^https:\/\/([^@]+)@([^/]+)\/(\d+)$/); + if (!match) return false; + const [, publicKey, host, projectId] = match; + + const eventId = crypto.randomUUID().replace(/-/g, ""); + const ts = new Date().toISOString(); + + const event = { + event_id: eventId, + timestamp: ts, + level: alert.severity, + logger: "webhook-alerts-monitor", + platform: "javascript", + environment: Deno.env.get("SUPABASE_ENV") ?? "production", + message: { + formatted: `[webhook] ${alert.alert_id} on ${alert.source}/${alert.direction}`, + }, + fingerprint: ["webhook-alert", alert.alert_id, alert.source, alert.direction], + tags: { + alert: alert.alert_id, + source: alert.source, + direction: alert.direction, + severity: alert.severity, + }, + extra: { ...alert.details, request_id: requestId }, + }; + + const envelopeHeader = JSON.stringify({ event_id: eventId, sent_at: ts, dsn }); + const itemHeader = JSON.stringify({ type: "event", content_type: "application/json" }); + const body = `${envelopeHeader}\n${itemHeader}\n${JSON.stringify(event)}\n`; + + const url = `https://${host}/api/${projectId}/envelope/?sentry_key=${publicKey}&sentry_version=7`; + const res = await fetch(url, { + method: "POST", + headers: { "Content-Type": "application/x-sentry-envelope" }, + body, + }); + return res.ok; +} + +// ---------- Detecções (executam SQL via supabase-js) ------------------------ +interface MetricRow { + source: string; + direction: "inbound" | "outbound"; + success: boolean; + http_status: number | null; + occurred_at: string; +} + +// eslint-disable-next-line @typescript-eslint/no-explicit-any +async function detect(supabase: any): Promise { + const since = new Date(Date.now() - WINDOW_MINUTES * 60_000).toISOString(); + const alerts: DetectedAlert[] = []; + + const { data, error } = await supabase + .from("webhook_delivery_metrics") + .select("source,direction,success,http_status,occurred_at") + .gte("occurred_at", since); + + if (error || !data) return alerts; + const rows = data as MetricRow[]; + + // Agrupa por (source,direction) + type Bucket = { + total: number; + count_5xx: number; + count_4xx: number; + failures_recent: boolean[]; // ordem cronológica desc + last_failure_at?: string; + }; + const groups = new Map(); + + // Ordena desc para identificar falhas consecutivas a partir da mais recente + const sorted = [...rows].sort((a, b) => + String(b.occurred_at).localeCompare(String(a.occurred_at)) + ); + + for (const r of sorted) { + const key = `${r.source}|${r.direction}`; + let g = groups.get(key); + if (!g) { + g = { total: 0, count_5xx: 0, count_4xx: 0, failures_recent: [] }; + groups.set(key, g); + } + g.total++; + const sc = Number(r.http_status) || 0; + if (sc >= 500 && sc < 600) g.count_5xx++; + if (sc >= 400 && sc < 500) g.count_4xx++; + if (g.failures_recent.length < CONSECUTIVE) { + g.failures_recent.push(!r.success); + if (!r.success && !g.last_failure_at) g.last_failure_at = String(r.occurred_at); + } + } + + for (const [key, g] of groups.entries()) { + const [source, direction] = key.split("|") as [string, "inbound" | "outbound"]; + + // 1) delivery_failure_total + if ( + g.failures_recent.length >= CONSECUTIVE && + g.failures_recent.every(Boolean) + ) { + alerts.push({ + alert_id: "delivery_failure_total", + severity: "error", + source, + direction, + details: { + consecutive_failures: CONSECUTIVE, + last_failure_at: g.last_failure_at, + window_minutes: WINDOW_MINUTES, + }, + }); + } + + // 2) spike_5xx + if ( + g.total >= MIN_SAMPLES && + (g.count_5xx >= ABS_5XX || g.count_5xx / g.total > RATE_5XX) + ) { + alerts.push({ + alert_id: "spike_5xx", + severity: "error", + source, + direction, + details: { + total: g.total, + count_5xx: g.count_5xx, + pct_5xx: +((100 * g.count_5xx) / g.total).toFixed(2), + window_minutes: WINDOW_MINUTES, + }, + }); + } + + // 3) spike_4xx + if (g.total >= MIN_SAMPLES && g.count_4xx / g.total > RATE_4XX) { + alerts.push({ + alert_id: "spike_4xx", + severity: "warning", + source, + direction, + details: { + total: g.total, + count_4xx: g.count_4xx, + pct_4xx: +((100 * g.count_4xx) / g.total).toFixed(2), + window_minutes: WINDOW_MINUTES, + }, + }); + } + } + + return alerts; +} + +// ---------- HTTP handler ---------------------------------------------------- +Deno.serve(async (req) => { + const preflight = handleCorsPreflightIfNeeded(req); + if (preflight) return preflight; + const cors = getCorsHeaders(req); + + const requestId = getOrCreateRequestId(req); + const log = createStructuredLogger({ fn: "webhook-alerts-monitor", requestId, req }); + log.info("monitor_start"); + + const supabaseUrl = Deno.env.get("SUPABASE_URL"); + const serviceKey = Deno.env.get("SUPABASE_SERVICE_ROLE_KEY"); + if (!supabaseUrl || !serviceKey) { + log.error("monitor_misconfigured", { reason: "missing_service_role" }); + return new Response(JSON.stringify({ error: "misconfigured" }), { + status: 500, + headers: { ...cors, "Content-Type": "application/json" }, + }); + } + + const supabase = createClient(supabaseUrl, serviceKey, { + auth: { persistSession: false }, + }); + + let alerts: DetectedAlert[] = []; + try { + alerts = await detect(supabase); + } catch (err) { + log.error("monitor_detect_failed", { err: String(err) }); + return new Response(JSON.stringify({ error: "detect_failed" }), { + status: 500, + headers: { ...cors, "Content-Type": "application/json" }, + }); + } + + let sent = 0; + let skipped = 0; + for (const a of alerts) { + const ok = await sendToSentry(a, requestId); + if (ok) { + sent++; + log.warn("alert_dispatched", { + alert: a.alert_id, + source: a.source, + direction: a.direction, + severity: a.severity, + }); + } else { + skipped++; + log.warn("alert_skipped_no_sink", { + alert: a.alert_id, + source: a.source, + direction: a.direction, + }); + } + } + + log.info("monitor_ok", { detected: alerts.length, sent, skipped }); + return new Response( + JSON.stringify({ detected: alerts.length, sent, skipped, alerts }), + { status: 200, headers: { ...cors, "Content-Type": "application/json" } }, + ); +});