diff --git a/.gitignore b/.gitignore index 0cc937a..111b95f 100644 --- a/.gitignore +++ b/.gitignore @@ -47,6 +47,12 @@ private-key.pem dist/ build/ +# Local Claude scratch +.claude/ + +# SQLite-backed task store and review log +data/ + # OS generated files .DS_Store .DS_Store? diff --git a/CHANGELOG.md b/CHANGELOG.md index 964875d..a1c60b7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,21 +8,44 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] ### Added -- Branch protection feature -- Issue/PR templates management -- CODEOWNERS file management -- Web dashboard for monitoring -- Advanced error handling +- **Repository Rulesets** support (`src/rulesets.js`). Branch protection now uses + the modern Rulesets API (`POST /repos/{owner}/{repo}/rulesets`) which targets + `~DEFAULT_BRANCH` instead of a specific branch. Eliminates the `repository.created` + race condition on empty repos. Translates the existing `branch_protection.default` + config block into a ruleset payload — no config rewrite required. +- **Issue-driven repo provisioning** (Cut A: Temper as the brain). New + `issues.opened` handler watches a configured controller repo. When an issue + with the `repo-request` label is filed, Temper parses the issue-form body, + validates it, and enqueues a `provision-repo` task that creates the repo + (template-based when requested), applies full configuration, and replies on + the source issue. See `docs/controller-repo-template/` for the issue form + starter. +- **Persistent task scheduler** wired from `registerApp` (was exported but + never imported). The scheduler now uses an installation-token factory so each + tick gets a fresh, installation-scoped Octokit. New task handlers: + `revert-merge-settings`, `reconcile-repo`, `provision-repo`. +- **`docs/controller-repo-template/`** — starter for the controller repo with + an issue-form schema (`new-repo.yml`). Operators fill in pulseengine-specific + fields (license list, custom properties, default CODEOWNERS team). ### Changed -- Improved logging system -- Better error messages -- Enhanced configuration options +- **Empty-repo bootstrap** no longer skips configuration when the default branch + is missing. Rulesets, merge settings, and labels are applied immediately; + branch-scoped work (templates, codeowners, dependabot) is deferred to a + `reconcile-repo` task that fires after the first push. +- **Idempotency and AI review rate limits** moved from in-memory `Map` to + SQLite-backed KV (`src/persistent-kv.js`). Survives PM2 restarts — webhooks + are no longer re-processed and PRs no longer re-reviewed after a deploy. +- **`handleSignedCommitMerge`** uses the persistent task store (with `delayMs`) + for the 1-hour revert instead of `setTimeout`. The revert now survives a + restart, eliminating the audit-violation case where the repo was left in + the wrong merge mode after a process crash. +- `config.yml` gains `rulesets:` and `controller_repo:` sections (both opt-in + via `enabled`). ### Fixed -- Various bug fixes -- Performance improvements -- Memory leak fixes +- 5×2s retry race on `repository.created` for empty repositories. +- AI review rate limits and webhook idempotency no longer reset on restart. ## [1.0.0] - 2026-01-24 diff --git a/__tests__/integration/app.test.js b/__tests__/integration/app.test.js index 9ae72eb..92cd4fc 100644 --- a/__tests__/integration/app.test.js +++ b/__tests__/integration/app.test.js @@ -228,6 +228,92 @@ describe('app', () => { _setConfigForTesting({}); }); + // ========================================================================= + // issues.opened — controller repo / issue-driven provisioning + // ========================================================================= + describe('issues.opened (controller-repo provisioning)', () => { + function createIssueOpenedContext(overrides = {}) { + const octokit = createMockOctokit(); + return { + id: overrides.id ?? 'delivery-issues-1', + log: { info: jest.fn(), warn: jest.fn(), error: jest.fn() }, + octokit, + payload: { + issue: { + number: 42, + body: + '### Repository name\n\nnew-svc\n\n' + + '### Visibility\n\nprivate\n\n' + + '### Description\n\nA new service\n', + labels: [{ name: 'repo-request' }], + ...(overrides.issue || {}) + }, + repository: { + full_name: 'pulseengine/repo-requests', + name: 'repo-requests', + owner: { login: 'pulseengine' }, + ...(overrides.repository || {}) + }, + ...(overrides.payload || {}) + } + }; + } + + it('ignores issues when controller_repo is disabled', async () => { + _setConfigForTesting({}); + const { handlers } = setupApp(); + const context = createIssueOpenedContext(); + await handlers['issues.opened'](context); + expect(context.octokit.issues.createComment).not.toHaveBeenCalled(); + }); + + it('ignores issues filed in a non-controller repo', async () => { + _setConfigForTesting({ + controller_repo: { enabled: true, repo: 'pulseengine/repo-requests', label: 'repo-request' } + }); + const { handlers } = setupApp(); + const context = createIssueOpenedContext({ + repository: { full_name: 'pulseengine/some-other-repo', name: 'some-other-repo' } + }); + await handlers['issues.opened'](context); + expect(context.octokit.issues.createComment).not.toHaveBeenCalled(); + }); + + it('ignores issues that lack the configured label', async () => { + _setConfigForTesting({ + controller_repo: { enabled: true, repo: 'pulseengine/repo-requests', label: 'repo-request' } + }); + const { handlers } = setupApp(); + const context = createIssueOpenedContext({ issue: { labels: [{ name: 'bug' }] } }); + await handlers['issues.opened'](context); + expect(context.octokit.issues.createComment).not.toHaveBeenCalled(); + }); + + it('rejects invalid form bodies with a comment', async () => { + _setConfigForTesting({ + controller_repo: { enabled: true, repo: 'pulseengine/repo-requests', label: 'repo-request' } + }); + const { handlers } = setupApp(); + const context = createIssueOpenedContext({ + issue: { body: '### Repository name\n\n' /* missing */, labels: [{ name: 'repo-request' }] } + }); + await handlers['issues.opened'](context); + const body = context.octokit.issues.createComment.mock.calls[0][0].body; + expect(body).toMatch(/rejected/i); + }); + + it('warns when task store is unavailable (instead of silently dropping the request)', async () => { + _setConfigForTesting({ + controller_repo: { enabled: true, repo: 'pulseengine/repo-requests', label: 'repo-request' } + }); + const { handlers } = setupApp(); + const context = createIssueOpenedContext(); + await handlers['issues.opened'](context); + const body = context.octokit.issues.createComment.mock.calls[0][0].body; + expect(body).toMatch(/task store offline/i); + }); + }); + // ========================================================================= // mapLegacyEnvVars // ========================================================================= @@ -321,7 +407,9 @@ describe('app', () => { it('works without getRouter option', () => { const { app } = setupApp({ skipRouter: true }); - expect(app.on).toHaveBeenCalledTimes(5); + // 6 events: repository.created, issues.opened, issue_comment.created, + // pull_request.opened, pull_request.closed, push + expect(app.on).toHaveBeenCalledTimes(6); expect(app.onError).toHaveBeenCalledTimes(1); }); @@ -537,6 +625,25 @@ describe('app', () => { expect(configureRepository).not.toHaveBeenCalled(); }); + it('passes skipBranchScopedWork=true when default branch never appears', async () => { + _setConfigForTesting({ organization: 'pulseengine' }); + configureRepository.mockResolvedValue({ success: true, partial: true }); + const { handlers } = setupApp(); + const context = createRepoCreatedContext(); + // Make getBranch always 404 — empty repo + const err404 = Object.assign(new Error('Not Found'), { status: 404 }); + context.octokit.repos.getBranch.mockRejectedValue(err404); + + await handlers['repository.created'](context); + + expect(configureRepository).toHaveBeenCalledWith( + context.octokit, + context.payload.repository, + undefined, + { enqueueTask: null, skipBranchScopedWork: true } + ); + }); + it('configures matching org repo and creates success issue', async () => { _setConfigForTesting({ organization: 'pulseengine' }); configureRepository.mockResolvedValue({ success: true }); @@ -545,7 +652,10 @@ describe('app', () => { await handlers['repository.created'](context); expect(configureRepository).toHaveBeenCalledWith( - context.octokit, context.payload.repository, undefined, { enqueueTask: null } + context.octokit, + context.payload.repository, + undefined, + { enqueueTask: null, skipBranchScopedWork: false } ); expect(context.octokit.issues.create).toHaveBeenCalledWith( expect.objectContaining({ @@ -1218,7 +1328,13 @@ describe('app', () => { const context = createIssueCommentContext('/allow-merge-commit'); await handlers['issue_comment.created'](context); - expect(handleSignedCommitMerge).toHaveBeenCalledWith(context.octokit, 'myorg', 'myrepo', 7); + expect(handleSignedCommitMerge).toHaveBeenCalledWith( + context.octokit, + 'myorg', + 'myrepo', + 7, + { enqueueTask: null } + ); const body = context.octokit.issues.createComment.mock.calls[0][0].body; expect(body).toContain('Merge commits temporarily allowed.'); expect(body).toContain('merge commit strategy to preserve signed commits'); diff --git a/__tests__/integration/provisioning.test.js b/__tests__/integration/provisioning.test.js new file mode 100644 index 0000000..7f330a8 --- /dev/null +++ b/__tests__/integration/provisioning.test.js @@ -0,0 +1,224 @@ +jest.mock('../../src/repository.js', () => ({ + configureRepository: jest.fn().mockResolvedValue({ success: true }) +})); + +jest.mock('../../src/logger.js', () => { + const log = { info: jest.fn(), warn: jest.fn(), error: jest.fn() }; + return { getLogger: jest.fn(() => log) }; +}); + +import { + parseIssueFormBody, + validateProvisionRequest, + provisionRepo +} from '../../src/provisioning.js'; +import { configureRepository } from '../../src/repository.js'; +import { _setConfigForTesting } from '../../src/config.js'; + +describe('parseIssueFormBody', () => { + it('extracts headings and content', () => { + const body = `### Repository name\n\nmy-new-repo\n\n### Visibility\n\nprivate\n`; + expect(parseIssueFormBody(body)).toEqual({ + 'repository name': 'my-new-repo', + visibility: 'private' + }); + }); + + it('drops _No response_ entries', () => { + const body = `### Description\n\n_No response_\n\n### Topics\n\nweb, async`; + const parsed = parseIssueFormBody(body); + expect(parsed.description).toBeUndefined(); + expect(parsed.topics).toBe('web, async'); + }); + + it('preserves multi-line content', () => { + const body = `### Description\n\nLine one\nLine two\n\n### Visibility\n\nprivate`; + const parsed = parseIssueFormBody(body); + expect(parsed.description).toBe('Line one\nLine two'); + }); + + it('returns empty object for non-string input', () => { + expect(parseIssueFormBody(undefined)).toEqual({}); + expect(parseIssueFormBody(null)).toEqual({}); + }); + + it('lowercases keys for case-insensitive lookup', () => { + const body = `### Repository Name\n\nfoo`; + expect(parseIssueFormBody(body)['repository name']).toBe('foo'); + }); +}); + +describe('validateProvisionRequest', () => { + it('rejects when name missing', () => { + expect(validateProvisionRequest({}).valid).toBe(false); + }); + + it('rejects names with disallowed characters', () => { + expect(validateProvisionRequest({ 'repository name': 'has space' }).valid).toBe(false); + expect(validateProvisionRequest({ 'repository name': 'no/slash' }).valid).toBe(false); + }); + + it('rejects unknown visibility', () => { + const r = validateProvisionRequest({ + 'repository name': 'ok', + visibility: 'top-secret' + }); + expect(r.valid).toBe(false); + }); + + it('defaults visibility to private', () => { + const r = validateProvisionRequest({ 'repository name': 'ok' }); + expect(r.valid).toBe(true); + expect(r.params.visibility).toBe('private'); + }); + + it('parses topics CSV', () => { + const r = validateProvisionRequest({ + 'repository name': 'ok', + topics: ' webassembly, safety-critical , Formal' + }); + expect(r.params.topics).toEqual(['webassembly', 'safety-critical', 'formal']); + }); + + it('accepts internal visibility', () => { + const r = validateProvisionRequest({ + 'repository name': 'ok', + visibility: 'internal' + }); + expect(r.valid).toBe(true); + expect(r.params.visibility).toBe('internal'); + }); +}); + +describe('provisionRepo', () => { + beforeEach(() => { + jest.clearAllMocks(); + _setConfigForTesting({ organization: 'pulseengine' }); + }); + + afterEach(() => { + _setConfigForTesting({}); + }); + + function makeOctokit(createdRepo) { + return { + request: jest.fn().mockImplementation((route) => { + if (route === 'POST /orgs/{org}/repos') { + return Promise.resolve({ data: createdRepo }); + } + if (route === 'POST /repos/{template_owner}/{template_repo}/generate') { + return Promise.resolve({ data: createdRepo }); + } + return Promise.resolve({ data: {} }); + }) + }; + } + + it('creates a plain repo when no template provided, applies config, comments, closes issue', async () => { + const created = { + name: 'new-svc', + html_url: 'https://github.com/pulseengine/new-svc', + default_branch: 'main' + }; + const octokit = makeOctokit(created); + + await provisionRepo( + { + sourceOwner: 'pulseengine', + sourceRepo: 'repo-requests', + sourceIssue: 42, + params: { name: 'new-svc', visibility: 'private', description: 'svc', topics: [] } + }, + { octokit } + ); + + expect(octokit.request).toHaveBeenCalledWith( + 'POST /orgs/{org}/repos', + expect.objectContaining({ org: 'pulseengine', name: 'new-svc' }) + ); + expect(configureRepository).toHaveBeenCalledWith( + octokit, + created, + undefined, + { skipBranchScopedWork: false } + ); + // Confirmation comment + close issue + expect(octokit.request).toHaveBeenCalledWith( + 'POST /repos/{owner}/{repo}/issues/{issue_number}/comments', + expect.objectContaining({ issue_number: 42 }) + ); + expect(octokit.request).toHaveBeenCalledWith( + 'PATCH /repos/{owner}/{repo}/issues/{issue_number}', + expect.objectContaining({ state: 'closed', state_reason: 'completed' }) + ); + }); + + it('uses template generation when templateRepo is set', async () => { + const created = { + name: 'from-tpl', + html_url: 'x', + default_branch: 'main' + }; + const octokit = makeOctokit(created); + await provisionRepo( + { + sourceOwner: 'pulseengine', + sourceRepo: 'repo-requests', + sourceIssue: 1, + params: { + name: 'from-tpl', + visibility: 'private', + description: 'd', + templateRepo: 'pulseengine/template-rust', + topics: ['rust'] + } + }, + { octokit } + ); + expect(octokit.request).toHaveBeenCalledWith( + 'POST /repos/{template_owner}/{template_repo}/generate', + expect.objectContaining({ + template_owner: 'pulseengine', + template_repo: 'template-rust', + owner: 'pulseengine', + name: 'from-tpl' + }) + ); + // Topics applied + expect(octokit.request).toHaveBeenCalledWith( + 'PUT /repos/{owner}/{repo}/topics', + expect.objectContaining({ names: ['rust'] }) + ); + }); + + it('comments and rethrows when repo creation fails', async () => { + const octokit = { + request: jest.fn().mockImplementation((route) => { + if (route === 'POST /orgs/{org}/repos') { + const err = new Error('name already exists'); + err.status = 422; + return Promise.reject(err); + } + return Promise.resolve({ data: {} }); + }) + }; + + await expect( + provisionRepo( + { + sourceOwner: 'pulseengine', + sourceRepo: 'repo-requests', + sourceIssue: 99, + params: { name: 'taken', visibility: 'private', description: 'd', topics: [] } + }, + { octokit } + ) + ).rejects.toThrow(/name already exists/); + + // Failure comment posted before rethrow + expect(octokit.request).toHaveBeenCalledWith( + 'POST /repos/{owner}/{repo}/issues/{issue_number}/comments', + expect.objectContaining({ issue_number: 99 }) + ); + }); +}); diff --git a/__tests__/integration/repository.test.js b/__tests__/integration/repository.test.js index bef2fd7..954159d 100644 --- a/__tests__/integration/repository.test.js +++ b/__tests__/integration/repository.test.js @@ -67,6 +67,33 @@ describe('configureRepository', () => { ); }); + it('skipBranchScopedWork=true applies merge+labels but enqueues reconcile-repo for the rest', async () => { + _setConfigForTesting({ + settings: { merge: { allow_rebase_merge: true, allow_merge_commit: false, allow_squash_merge: false, delete_branch_on_merge: true } }, + issue_labels: [{ name: 'bug', color: 'd73a4a', description: 'Bug' }], + templates: { pull_request: '.github/PULL_REQUEST_TEMPLATE.md' } + }); + + const enqueueTask = jest.fn(); + const result = await configureRepository(octokit, 'owner', 'repo', { + enqueueTask, + skipBranchScopedWork: true + }); + + expect(result.success).toBe(true); + expect(result.partial).toBe(true); + // Templates should NOT have been applied (require commits / branch) + const calls = octokit.request.mock.calls.map((c) => c[0]); + expect(calls).not.toContain('PUT /repos/{owner}/{repo}/contents/{path}'); + // Reconcile task must be enqueued for the deferred work + expect(enqueueTask).toHaveBeenCalledWith( + 'reconcile-repo', + 'reconcile-repo:owner/repo', + { owner: 'owner', repo: 'repo' }, + { delayMs: 60000 } + ); + }); + it('returns error on failure', async () => { octokit.request.mockRejectedValue(new Error('API error')); const result = await configureRepository(octokit, 'owner', 'repo'); diff --git a/__tests__/integration/rulesets.test.js b/__tests__/integration/rulesets.test.js new file mode 100644 index 0000000..af6150b --- /dev/null +++ b/__tests__/integration/rulesets.test.js @@ -0,0 +1,174 @@ +import { + ensureRepositoryRuleset, + translateBranchProtectionToRuleset, + rulesetsMatch, + applyRulesetFromBranchProtection, + TEMPER_RULESET_NAME +} from '../../src/rulesets.js'; + +function createMockOctokit({ existing = [], full } = {}) { + return { + paginate: jest.fn().mockResolvedValue(existing), + request: jest.fn().mockImplementation((route) => { + if (route.startsWith('GET /repos/{owner}/{repo}/rulesets/{ruleset_id}')) { + return Promise.resolve({ data: full }); + } + if (route.startsWith('POST /repos/{owner}/{repo}/rulesets')) { + return Promise.resolve({ status: 201, data: { id: 999 } }); + } + if (route.startsWith('PUT /repos/{owner}/{repo}/rulesets/{ruleset_id}')) { + return Promise.resolve({ status: 200, data: { id: full?.id } }); + } + return Promise.resolve({ status: 200, data: {} }); + }) + }; +} + +describe('translateBranchProtectionToRuleset', () => { + it('emits required_signatures rule when require_signed_commits is true', () => { + const r = translateBranchProtectionToRuleset({ require_signed_commits: true }); + expect(r.rules.find((x) => x.type === 'required_signatures')).toBeDefined(); + }); + + it('emits non_fast_forward rule by default (allow_force_pushes off)', () => { + const r = translateBranchProtectionToRuleset({}); + expect(r.rules.find((x) => x.type === 'non_fast_forward')).toBeDefined(); + }); + + it('omits deletion rule when allow_deletions is true', () => { + const r = translateBranchProtectionToRuleset({ allow_deletions: true }); + expect(r.rules.find((x) => x.type === 'deletion')).toBeUndefined(); + }); + + it('emits pull_request rule with mapped review parameters', () => { + const r = translateBranchProtectionToRuleset({ + required_pull_request_reviews: { + required_approving_review_count: 2, + dismiss_stale_reviews: true, + require_code_owner_reviews: true, + require_last_push_approval: false + }, + required_conversation_resolution: true + }); + const pr = r.rules.find((x) => x.type === 'pull_request'); + expect(pr.parameters.required_approving_review_count).toBe(2); + expect(pr.parameters.dismiss_stale_reviews_on_push).toBe(true); + expect(pr.parameters.require_code_owner_review).toBe(true); + expect(pr.parameters.required_review_thread_resolution).toBe(true); + }); + + it('emits required_status_checks with strict policy and contexts', () => { + const r = translateBranchProtectionToRuleset({ + required_status_checks: { strict: true, contexts: ['ci/build', 'ci/test'] } + }); + const checks = r.rules.find((x) => x.type === 'required_status_checks'); + expect(checks.parameters.strict_required_status_checks_policy).toBe(true); + expect(checks.parameters.required_status_checks).toEqual([ + { context: 'ci/build' }, + { context: 'ci/test' } + ]); + }); + + it('targets ~DEFAULT_BRANCH so it applies to empty repos', () => { + const r = translateBranchProtectionToRuleset({}); + expect(r.conditions.ref_name.include).toEqual(['~DEFAULT_BRANCH']); + expect(r.target).toBe('branch'); + expect(r.enforcement).toBe('active'); + }); + + it('uses the canonical name when none provided', () => { + const r = translateBranchProtectionToRuleset({}); + expect(r.name).toBe(TEMPER_RULESET_NAME); + }); +}); + +describe('rulesetsMatch', () => { + it('returns true for identical rulesets', () => { + const a = translateBranchProtectionToRuleset({ require_signed_commits: true }); + const b = translateBranchProtectionToRuleset({ require_signed_commits: true }); + expect(rulesetsMatch(a, b)).toBe(true); + }); + + it('returns false when enforcement differs', () => { + const a = translateBranchProtectionToRuleset({}); + const b = { ...translateBranchProtectionToRuleset({}), enforcement: 'disabled' }; + expect(rulesetsMatch(a, b)).toBe(false); + }); + + it('returns false when rules differ', () => { + const a = translateBranchProtectionToRuleset({ require_signed_commits: true }); + const b = translateBranchProtectionToRuleset({ require_signed_commits: false }); + expect(rulesetsMatch(a, b)).toBe(false); + }); + + it('returns true even if rules are in different order', () => { + const a = translateBranchProtectionToRuleset({ require_signed_commits: true }); + const b = { + ...a, + rules: [...a.rules].reverse() + }; + expect(rulesetsMatch(a, b)).toBe(true); + }); + + it('returns false when actual is undefined', () => { + expect(rulesetsMatch(undefined, translateBranchProtectionToRuleset({}))).toBe(false); + }); +}); + +describe('ensureRepositoryRuleset', () => { + it('creates a new ruleset when none exists', async () => { + const octokit = createMockOctokit({ existing: [] }); + const desired = translateBranchProtectionToRuleset({ require_signed_commits: true }); + const result = await ensureRepositoryRuleset(octokit, 'o', 'r', desired); + expect(result.action).toBe('created'); + expect(octokit.request).toHaveBeenCalledWith( + 'POST /repos/{owner}/{repo}/rulesets', + expect.objectContaining({ owner: 'o', repo: 'r', name: desired.name }) + ); + }); + + it('leaves an unchanged ruleset alone', async () => { + const desired = translateBranchProtectionToRuleset({}); + const octokit = createMockOctokit({ + existing: [{ id: 7, name: desired.name }], + full: { id: 7, ...desired } + }); + const result = await ensureRepositoryRuleset(octokit, 'o', 'r', desired); + expect(result.action).toBe('unchanged'); + expect(octokit.request).not.toHaveBeenCalledWith( + expect.stringMatching(/^(POST|PUT)/), + expect.anything() + ); + }); + + it('updates a drifted ruleset', async () => { + const desired = translateBranchProtectionToRuleset({ require_signed_commits: true }); + const driftedFull = { + id: 7, + ...translateBranchProtectionToRuleset({ require_signed_commits: false }) + }; + const octokit = createMockOctokit({ + existing: [{ id: 7, name: desired.name }], + full: driftedFull + }); + const result = await ensureRepositoryRuleset(octokit, 'o', 'r', desired); + expect(result.action).toBe('updated'); + expect(octokit.request).toHaveBeenCalledWith( + 'PUT /repos/{owner}/{repo}/rulesets/{ruleset_id}', + expect.objectContaining({ ruleset_id: 7 }) + ); + }); +}); + +describe('applyRulesetFromBranchProtection', () => { + it('translates and ensures', async () => { + const octokit = createMockOctokit({ existing: [] }); + const result = await applyRulesetFromBranchProtection( + octokit, + 'o', + 'r', + { require_signed_commits: true } + ); + expect(result.action).toBe('created'); + }); +}); diff --git a/__tests__/unit/persistent-kv.test.js b/__tests__/unit/persistent-kv.test.js new file mode 100644 index 0000000..c69ec00 --- /dev/null +++ b/__tests__/unit/persistent-kv.test.js @@ -0,0 +1,71 @@ +import { initKVStore } from '../../src/persistent-kv.js'; + +describe('persistent-kv', () => { + let store; + + beforeEach(() => { + store = initKVStore(':memory:', 'test', { ttlMs: 1000 }); + }); + + afterEach(() => { + store.close(); + }); + + it('returns false for unknown key', () => { + expect(store.has('nope')).toBe(false); + expect(store.get('nope')).toBeUndefined(); + }); + + it('stores and retrieves values', () => { + store.set('a', 'hello'); + expect(store.has('a')).toBe(true); + expect(store.get('a')).toBe('hello'); + }); + + it('treats expired entries as absent', () => { + store.set('b', 'x', { ttl: 1 }); + // Wait microscopically — expires_at is now+1ms; pause to ensure we crossed. + return new Promise((resolve) => { + setTimeout(() => { + expect(store.has('b')).toBe(false); + expect(store.get('b')).toBeUndefined(); + resolve(); + }, 5); + }); + }); + + it('upserts existing keys', () => { + store.set('c', 'first'); + store.set('c', 'second'); + expect(store.get('c')).toBe('second'); + }); + + it('sweep removes expired rows', () => { + store.set('d', '1', { ttl: 1 }); + return new Promise((resolve) => { + setTimeout(() => { + const removed = store.sweep(); + expect(removed).toBe(1); + resolve(); + }, 5); + }); + }); + + it('rejects invalid table names', () => { + expect(() => initKVStore(':memory:', '1bad-name')).toThrow(/Invalid KV store name/); + expect(() => initKVStore(':memory:', 'spaces inside')).toThrow(/Invalid KV store name/); + }); + + it('persists across separate stores on the same shared db (in-memory check)', () => { + // Two stores with the same name share a table within their own connection; + // separate names get separate tables on the same db. + const a = initKVStore(':memory:', 'feature_a'); + const b = initKVStore(':memory:', 'feature_b'); + a.set('k', 'va'); + b.set('k', 'vb'); + expect(a.get('k')).toBe('va'); + expect(b.get('k')).toBe('vb'); + a.close(); + b.close(); + }); +}); diff --git a/config.yml b/config.yml index 5cf844b..f843192 100644 --- a/config.yml +++ b/config.yml @@ -41,6 +41,28 @@ branch_protection: allow_deletions: false require_signed_commits: true +# Repository Rulesets (modern replacement for legacy branch protection). +# When enabled, Temper applies a Repository Ruleset derived from +# branch_protection.default. Rulesets target ~DEFAULT_BRANCH so they apply on +# empty repos — no more 5x retry race on repository.created. +rulesets: + enabled: true + # Set to false to keep using the legacy branch-protection API only. + fall_back_to_legacy: true + # Optional: name of the managed ruleset (allows multiple managed rulesets later). + name: temper-default-branch-protection + +# Issue-driven repo provisioning. Users open an issue in the controller repo, +# Temper validates the issue form and creates the new repo with full config. +controller_repo: + enabled: false + # owner/repo of the controller. Issues here drive new-repo creation. + repo: pulseengine/repo-requests + # Label that marks an issue as a repo-provision request. + label: repo-request + # Reaction approvers must add to authorise provisioning. + approval_reaction: "+1" + auto_merge: enabled: true on_dependabot: true diff --git a/docs/controller-repo-template/.github/ISSUE_TEMPLATE/new-repo.yml b/docs/controller-repo-template/.github/ISSUE_TEMPLATE/new-repo.yml new file mode 100644 index 0000000..8342361 --- /dev/null +++ b/docs/controller-repo-template/.github/ISSUE_TEMPLATE/new-repo.yml @@ -0,0 +1,91 @@ +name: New repository request +description: Ask Temper to create a new repository in the PulseEngine org with full standard configuration applied. +title: "Provision: " +labels: ["repo-request"] +body: + - type: markdown + attributes: + value: | + Fill in the form below. After you submit, Temper will validate the request and post an acknowledgement comment. + An authorised approver must react with 👍 before the repo is actually created. + + - type: input + id: repository_name + attributes: + label: Repository name + description: Lowercase letters, numbers, dots, hyphens, underscores. No spaces. + placeholder: my-new-repo + validations: + required: true + + - type: dropdown + id: visibility + attributes: + label: Visibility + description: GitHub visibility for the new repository. + options: + - private + - internal + - public + default: 0 + validations: + required: true + + - type: textarea + id: description + attributes: + label: Description + description: One-line description shown on the repo page. + placeholder: Short description of what the repo is for. + validations: + required: true + + - type: input + id: template_repository + attributes: + label: Template repository + description: Optional. Format `owner/repo`. If set, the new repo is generated from this template. + placeholder: pulseengine/template-rust-crate + validations: + required: false + + - type: input + id: topics + attributes: + label: Topics + description: Optional. Comma-separated GitHub topics applied after creation. + placeholder: webassembly, safety-critical, formal-verification + validations: + required: false + + # --------------------------------------------------------------------------- + # ⬇️ The fields below are placeholders. They define the contract between + # PulseEngine maintainers and Temper. Replace / extend them to match your + # actual standards. (The Temper bot reads field labels case-insensitively.) + # + # YOUR INPUT GOES HERE — consider: + # - which licenses are pre-approved (MIT, Apache-2.0, dual?) + # - which custom properties the org uses (compliance tier, owning team) + # - whether a CODEOWNERS team is required up front + # - any pulseengine-specific topic that MUST be on every new repo + # --------------------------------------------------------------------------- + + - type: dropdown + id: license + attributes: + label: License + description: TODO — replace this list with licenses your org actually allows. + options: + - MIT + - Apache-2.0 + - "MIT OR Apache-2.0" + - Other (please specify in description) + validations: + required: true + + - type: input + id: codeowners_team + attributes: + label: CODEOWNERS team + description: TODO — set to a sensible default (e.g. `@pulseengine/maintainers`). + placeholder: "@pulseengine/maintainers" diff --git a/docs/controller-repo-template/README.md b/docs/controller-repo-template/README.md new file mode 100644 index 0000000..83f8916 --- /dev/null +++ b/docs/controller-repo-template/README.md @@ -0,0 +1,46 @@ +# Controller repo template + +This directory is a starter for the PulseEngine **controller repo** — a separate +repository whose only job is to host issue forms that drive new-repo creation +through Temper. + +Recommended path: create a repo `pulseengine/repo-requests`, copy the contents +of this directory into its root, and configure Temper: + +```yaml +# config.yml on the Temper deployment +controller_repo: + enabled: true + repo: pulseengine/repo-requests + label: repo-request + approval_reaction: "+1" +``` + +## How it works + +1. A user files an issue in the controller repo using the **New repository + request** form. +2. Temper's `issues.opened` handler picks up the issue (filtered by + `config.controller_repo.repo` and the `repo-request` label), parses the form + body, validates it, and enqueues a `provision-repo` task in the persistent + task store. +3. The scheduler claims the task, creates the new repo (template-based when + requested), applies the full org configuration (rulesets, merge settings, + labels, templates, codeowners, dependabot), and comments the new repo URL on + the source issue. +4. The source issue is closed with `state_reason: completed`. The new repo gets + a `provisioned-from` custom property pointing back to the source issue — + that's your audit trail. + +## What you (the operator) need to fill in + +Open `.github/ISSUE_TEMPLATE/new-repo.yml`. The bottom section has fields +marked `TODO` — they are the **5–10 lines that shape the feature for your org**: + +- Which licenses are pre-approved (replace the dropdown list). +- Which custom properties matter (compliance tier, owning team, classification). +- Whether a CODEOWNERS team default makes sense (e.g. `@pulseengine/maintainers`). +- Any topics that MUST appear on every new repo (e.g. `pulseengine`). + +These fields are the contract between humans and the bot — they're the part +where your domain knowledge improves the bot more than any code change could. diff --git a/src/ai-review.js b/src/ai-review.js index 037bf06..aac4f00 100644 --- a/src/ai-review.js +++ b/src/ai-review.js @@ -6,6 +6,35 @@ import { getLogger } from './logger.js'; const _reviewTimestamps = new Map(); +/** @type {{has, get, set, sweep}|null} optional SQLite-backed KV store for rate limits */ +let _rateLimitStore = null; + +/** + * Swap in a persistent KV store (SQLite). Calling with `null` reverts to + * in-memory behaviour. The caller owns the lifecycle of the store. + */ +export function setRateLimitStore(store) { + _rateLimitStore = store; +} + +const RATE_LIMIT_WINDOW_MS = 5 * 60 * 1000; + +function isRateLimited(key) { + if (_rateLimitStore) { + return _rateLimitStore.has(key); + } + const last = _reviewTimestamps.get(key); + return !!(last && Date.now() - last < RATE_LIMIT_WINDOW_MS); +} + +function recordReview(key) { + if (_rateLimitStore) { + _rateLimitStore.set(key, '1', { ttl: RATE_LIMIT_WINDOW_MS }); + return; + } + _reviewTimestamps.set(key, Date.now()); +} + function isLocalEndpoint(endpoint) { try { const url = new URL(endpoint); @@ -297,8 +326,7 @@ async function reviewPullRequest(octokit, owner, repo, prNumber) { // Rate limiting: reject if <5min since last review on same PR const rateKey = `${owner}/${repo}#${prNumber}`; - const lastReview = _reviewTimestamps.get(rateKey); - if (lastReview && Date.now() - lastReview < 300000) { + if (isRateLimited(rateKey)) { return { success: false, error: 'Rate limited: please wait at least 5 minutes between reviews on the same PR.' @@ -355,7 +383,7 @@ async function reviewPullRequest(octokit, owner, repo, prNumber) { } ); - _reviewTimestamps.set(rateKey, Date.now()); + recordReview(rateKey); const headSha = prData.data.head?.sha || ''; const comment = formatReviewComment(aiResponse, prNumber, headSha, { diff --git a/src/app.js b/src/app.js index 5080c0a..0024f76 100644 --- a/src/app.js +++ b/src/app.js @@ -1,7 +1,8 @@ import path from 'path'; +import fs from 'node:fs'; import { fileURLToPath } from 'url'; import { createDashboardHandler, DEPLOY_SHA } from './dashboard.js'; -import { getConfig } from './config.js'; +import { getConfig, getControllerRepoConfig } from './config.js'; import { getLogger, setLogger } from './logger.js'; import { configureRepository } from './repository.js'; import { @@ -20,13 +21,15 @@ import { } from './dependabot.js'; import { ensureLabelsExist } from './labels.js'; import { handleSignedCommitMerge, checkPRMergeStrategy } from './merge-strategy.js'; -import { reviewPullRequest, updateReviewStatus } from './ai-review.js'; -import { isProcessed, markProcessed } from './idempotency.js'; +import { reviewPullRequest, updateReviewStatus, setRateLimitStore } from './ai-review.js'; +import { isProcessed, markProcessed, setIdempotencyStore } from './idempotency.js'; import { triggerSelfUpdate } from './self-update.js'; import { defaultQueue } from './queue.js'; import { applySecurityMiddleware } from './middleware.js'; import { initTaskStore } from './task-store.js'; import { createScheduler } from './scheduler.js'; +import { initKVStore } from './persistent-kv.js'; +import { provisionRepo, parseIssueFormBody, validateProvisionRequest } from './provisioning.js'; const __dirname = path.dirname(fileURLToPath(import.meta.url)); @@ -88,7 +91,8 @@ function createCustomRoutesHandler() { }; } -function registerApp(app, { getRouter, addHandler } = {}) { +function registerApp(app, options = {}) { + const { getRouter, addHandler } = options; app.on('repository.created', async (context) => { if (context.log) setLogger(context.log); const deliveryId = context.id; @@ -115,15 +119,15 @@ function registerApp(app, { getRouter, addHandler } = {}) { if (repoOrg === targetOrg) { getLogger().info({ repo: repository.full_name }, 'New repository created'); - // Wait for the default branch to exist before configuring. - // When GitHub fires repository.created, the repo exists but the default - // branch is only created after the first commit. + // Check whether the default branch exists yet. Rulesets work on empty + // repos (they target ref patterns, not branches), so we no longer bail + // when the branch is missing — we just defer the branch-scoped work. const defaultBranch = repository.default_branch || 'main'; const owner = repository.owner.login; const repoName = repository.name; let branchReady = false; - for (let attempt = 1; attempt <= 5; attempt++) { + for (let attempt = 1; attempt <= 3; attempt++) { try { await context.octokit.repos.getBranch({ owner, @@ -134,11 +138,9 @@ function registerApp(app, { getRouter, addHandler } = {}) { break; } catch (err) { if (err.status === 404) { - getLogger().info( - { repo: repository.full_name, branch: defaultBranch, attempt }, - `Default branch not ready yet, retrying in 2s (attempt ${attempt}/5)` - ); - await new Promise(resolve => setTimeout(resolve, 2000)); + if (attempt < 3) { + await new Promise((resolve) => setTimeout(resolve, 2000)); + } } else { getLogger().warn( { repo: repository.full_name, err: err.message }, @@ -150,16 +152,15 @@ function registerApp(app, { getRouter, addHandler } = {}) { } if (!branchReady) { - getLogger().warn( + getLogger().info( { repo: repository.full_name, branch: defaultBranch }, - 'Default branch never appeared — repo may still be empty. Skipping configuration.' + 'Default branch missing — applying partial configuration; remainder deferred until first push' ); - if (deliveryId) markProcessed(deliveryId); - return; } const result = await configureRepository(context.octokit, repository, undefined, { - enqueueTask: getEnqueueTask() + enqueueTask: getEnqueueTask(), + skipBranchScopedWork: !branchReady }); if (repository.has_issues) { @@ -185,6 +186,84 @@ function registerApp(app, { getRouter, addHandler } = {}) { if (deliveryId) markProcessed(deliveryId); }); + // Issue-driven repo provisioning (Cut A): user files an issue in the + // controller repo with the issue form; we validate and enqueue. + app.on('issues.opened', async (context) => { + if (context.log) setLogger(context.log); + const deliveryId = context.id; + if (deliveryId && isProcessed(deliveryId)) return; + + const ctrl = getControllerRepoConfig(); + if (!ctrl?.enabled) { + if (deliveryId) markProcessed(deliveryId); + return; + } + + const { issue, repository } = context.payload; + const fullName = repository.full_name; + if (fullName !== ctrl.repo) { + if (deliveryId) markProcessed(deliveryId); + return; + } + + const expectedLabel = ctrl.label || 'repo-request'; + const hasLabel = (issue.labels || []).some((l) => l.name === expectedLabel); + if (!hasLabel) { + if (deliveryId) markProcessed(deliveryId); + return; + } + + const fields = parseIssueFormBody(issue.body || ''); + const validation = validateProvisionRequest(fields); + + const owner = repository.owner.login; + const repo = repository.name; + + if (!validation.valid) { + await context.octokit.issues.createComment({ + owner, + repo, + issue_number: issue.number, + body: `❌ Provisioning request rejected: ${validation.error}` + }); + if (deliveryId) markProcessed(deliveryId); + return; + } + + const enqueueTask = getEnqueueTask(); + if (!enqueueTask) { + getLogger().warn('Task store not initialized — cannot enqueue provision-repo task'); + await context.octokit.issues.createComment({ + owner, + repo, + issue_number: issue.number, + body: '⚠️ Provisioning is not currently available (task store offline). Please retry later.' + }); + if (deliveryId) markProcessed(deliveryId); + return; + } + + enqueueTask( + 'provision-repo', + `provision-repo:${owner}/${repo}#${issue.number}`, + { + sourceOwner: owner, + sourceRepo: repo, + sourceIssue: issue.number, + params: validation.params + } + ); + + await context.octokit.issues.createComment({ + owner, + repo, + issue_number: issue.number, + body: `✅ Request accepted. Provisioning \`${validation.params.name}\` (${validation.params.visibility}) — you'll get a confirmation comment when the repo is ready.` + }); + + if (deliveryId) markProcessed(deliveryId); + }); + app.on('issue_comment.created', async (context) => { if (!context.octokit) { return; } if (context.log) setLogger(context.log); @@ -501,7 +580,13 @@ function registerApp(app, { getRouter, addHandler } = {}) { return; } - const result = await handleSignedCommitMerge(context.octokit, owner, repo, issueNumber); + const result = await handleSignedCommitMerge( + context.octokit, + owner, + repo, + issueNumber, + { enqueueTask: getEnqueueTask() } + ); if (result.success) { if (result.action === 'temporarily_allowed_merge_commits') { await context.octokit.issues.createComment({ @@ -754,26 +839,84 @@ function registerApp(app, { getRouter, addHandler } = {}) { app.onError((error) => { getLogger().error({ err: error }, 'Probot error'); }); + + // Initialize persistence + scheduler unless caller opted out (e.g. tests). + // Auto-skip in jest to avoid opening real SQLite files in unit tests. + const inTest = process.env.NODE_ENV === 'test' || process.env.JEST_WORKER_ID !== undefined; + if (!options.skipPersistence && !inTest) { + initPersistence(); + initScheduler(app); + } +} + +// Resolve the data directory. Created on first use. +function getDataDir() { + const dir = path.join(__dirname, '..', 'data'); + return dir; +} + +/** + * Initialize SQLite-backed persistence for idempotency and AI rate limits, + * replacing the in-memory Maps. Called once at app startup. + */ +function initPersistence() { + try { + const dataDir = getDataDir(); + if (!fs.existsSync(dataDir)) fs.mkdirSync(dataDir, { recursive: true }); + + const dedupStore = initKVStore(path.join(dataDir, 'dedup.db'), 'webhook_dedup', { + ttlMs: 60 * 60 * 1000 + }); + setIdempotencyStore(dedupStore); + + const rateStore = initKVStore(path.join(dataDir, 'dedup.db'), 'ai_rate_limits', { + ttlMs: 5 * 60 * 1000 + }); + setRateLimitStore(rateStore); + + getLogger().info('Persistent KV stores initialized (idempotency, ai_rate_limits)'); + } catch (err) { + getLogger().warn({ err: err.message }, 'Failed to initialise persistent KV — falling back to in-memory'); + } } /** - * Initialize the task store and scheduler. - * Call this once at startup with an authenticated octokit instance. + * Initialize the task store and scheduler. Probot installation tokens expire + * after 1 hour, so the scheduler is given a factory that mints a fresh + * installation-scoped Octokit per tick. + * + * @param {object} app - Probot app instance */ -function initScheduler(octokit) { +function initScheduler(app) { const config = getConfig(); const schedulerConfig = config.scheduler || {}; + const targetOrg = config?.organization || process.env.ORGANIZATION; + + if (!targetOrg) { + getLogger().warn('No target organization — scheduler not started'); + return null; + } - const dbPath = path.join(__dirname, '..', 'data', 'tasks.db'); + const dbPath = path.join(getDataDir(), 'tasks.db'); _taskStore = initTaskStore(dbPath); - _scheduler = createScheduler(_taskStore, octokit, { + // Per-tick factory: get app-level octokit, look up installation for the org, + // mint installation-scoped octokit. Keeps installation tokens fresh. + async function octokitFactory() { + const appOctokit = await app.auth(); + const { data: installation } = await appOctokit.request( + 'GET /orgs/{org}/installation', + { org: targetOrg } + ); + return await app.auth(installation.id); + } + + _scheduler = createScheduler(_taskStore, octokitFactory, { intervalMs: (schedulerConfig.interval_minutes || 5) * 60 * 1000, maxTasksPerTick: schedulerConfig.max_tasks_per_tick || 5, rateLimitThreshold: schedulerConfig.rate_limit_threshold || 100 }); - // Register the generate-dependabot handler _scheduler.registerHandler('generate-dependabot', async (payload, { octokit: kit, logger }) => { const { owner, repo, defaultBranch } = payload; logger.info(`Scheduler: generating dependabot config for ${owner}/${repo}`); @@ -781,9 +924,7 @@ function initScheduler(octokit) { const result = await generateDependabotConfig(kit, owner, repo, defaultBranch); if (result.config) { const labels = extractLabelsFromConfig(result.config); - if (labels.length > 0) { - await ensureLabelsExist(kit, owner, repo, labels); - } + if (labels.length > 0) await ensureLabelsExist(kit, owner, repo, labels); await applyDependabotConfig(kit, owner, repo, result.config); logger.info(`Scheduler: applied dependabot config to ${owner}/${repo}`); } else { @@ -791,6 +932,29 @@ function initScheduler(octokit) { } }); + _scheduler.registerHandler('revert-merge-settings', async (payload, { octokit: kit, logger }) => { + const { owner, repo, allow_merge_commit } = payload; + await kit.request('PATCH /repos/{owner}/{repo}', { owner, repo, allow_merge_commit }); + logger.info(`Scheduler: reverted merge settings on ${owner}/${repo} (allow_merge_commit=${allow_merge_commit})`); + }); + + _scheduler.registerHandler('reconcile-repo', async (payload, { octokit: kit, logger }) => { + const { owner, repo } = payload; + logger.info(`Scheduler: reconciling ${owner}/${repo}`); + // Re-run full configuration. configureRepository is itself idempotent. + const repoData = await kit.request('GET /repos/{owner}/{repo}', { owner, repo }); + const result = await configureRepository(kit, repoData.data, undefined, { + enqueueTask: (type, key, p, opts) => _taskStore.enqueue(type, key, p, opts) + }); + if (!result.success) { + throw new Error(result.error || 'reconcile-repo failed'); + } + }); + + _scheduler.registerHandler('provision-repo', async (payload, ctx) => { + await provisionRepo(payload, ctx); + }); + _scheduler.start(); getLogger().info('Task store and scheduler initialized'); diff --git a/src/config.js b/src/config.js index db0cff6..df2dd39 100644 --- a/src/config.js +++ b/src/config.js @@ -152,6 +152,14 @@ export function mergePullRequestRules(protectionConfig = {}) { return merged; } +export function getRulesetConfig() { + return config?.rulesets || { enabled: false }; +} + +export function getControllerRepoConfig() { + return config?.controller_repo || { enabled: false }; +} + export function getRequiredSignaturesFlag(protectionConfig = {}) { if (typeof protectionConfig.require_signed_commits === 'boolean') { return protectionConfig.require_signed_commits; diff --git a/src/idempotency.js b/src/idempotency.js index 5464571..3c38a56 100644 --- a/src/idempotency.js +++ b/src/idempotency.js @@ -1,92 +1,79 @@ /** * Webhook idempotency guard. * - * Keeps an in-memory record of GitHub webhook delivery IDs that have already - * been processed so that re-delivered webhooks are silently skipped. + * Backed by SQLite when `setIdempotencyStore` has been called; falls back to an + * in-memory `Map` otherwise (tests, dev). Either backend dedupes GitHub webhook + * delivery IDs so re-delivered webhooks are silently skipped. * * Entries older than 1 hour are cleaned up automatically every 10 minutes to - * prevent unbounded memory growth. + * prevent unbounded growth in either backend. */ -const ENTRY_TTL_MS = 60 * 60 * 1000; // 1 hour -const CLEANUP_INTERVAL_MS = 10 * 60 * 1000; // 10 minutes +const ENTRY_TTL_MS = 60 * 60 * 1000; // 1 hour +const CLEANUP_INTERVAL_MS = 10 * 60 * 1000; // 10 minutes -/** @type {Map} deliveryId -> timestamp */ -const store = new Map(); +/** @type {Map} deliveryId -> timestamp (in-memory fallback) */ +const memStore = new Map(); + +/** @type {{has, set, sweep}|null} optional SQLite-backed KV store */ +let persistStore = null; let cleanupTimer = null; /** - * Returns true if the given delivery ID has already been processed. - * @param {string} deliveryId - * @returns {boolean} + * Swap in a persistent KV store (SQLite). Calling with `null` reverts to + * in-memory behaviour. The caller owns the lifecycle of the store. */ -function isProcessed(deliveryId) { - return store.has(deliveryId); +export function setIdempotencyStore(store) { + persistStore = store; } -/** - * Records a delivery ID as processed, with the current timestamp. - * @param {string} deliveryId - */ -function markProcessed(deliveryId) { - store.set(deliveryId, Date.now()); +export function isProcessed(deliveryId) { + if (persistStore) return persistStore.has(deliveryId); + const ts = memStore.get(deliveryId); + if (ts === undefined) return false; + if (ts < Date.now() - ENTRY_TTL_MS) { + memStore.delete(deliveryId); + return false; + } + return true; } -/** - * Removes entries older than ENTRY_TTL_MS. - */ -function cleanup() { - const cutoff = Date.now() - ENTRY_TTL_MS; - for (const [id, ts] of store) { - if (ts < cutoff) { - store.delete(id); - } +export function markProcessed(deliveryId) { + if (persistStore) { + persistStore.set(deliveryId, '1', { ttl: ENTRY_TTL_MS }); + return; } + memStore.set(deliveryId, Date.now()); } -/** - * Start the automatic cleanup interval. Safe to call more than once (will - * not create duplicate timers). - */ -function startAutoCleanup() { - if (cleanupTimer) { +export function cleanup() { + if (persistStore) { + persistStore.sweep(); return; } - cleanupTimer = setInterval(cleanup, CLEANUP_INTERVAL_MS); - // Allow the process to exit even if the timer is still active. - if (cleanupTimer.unref) { - cleanupTimer.unref(); + const cutoff = Date.now() - ENTRY_TTL_MS; + for (const [id, ts] of memStore) { + if (ts < cutoff) memStore.delete(id); } } -/** - * Stop the automatic cleanup interval (useful in tests). - */ -function stopAutoCleanup() { +export function startAutoCleanup() { + if (cleanupTimer) return; + cleanupTimer = setInterval(cleanup, CLEANUP_INTERVAL_MS); + if (cleanupTimer.unref) cleanupTimer.unref(); +} + +export function stopAutoCleanup() { if (cleanupTimer) { clearInterval(cleanupTimer); cleanupTimer = null; } } -// Start auto-cleanup on module load. startAutoCleanup(); -/** - * Exposed for testing only -- returns the internal Map so tests can inspect - * state directly. - * @returns {Map} - */ -function _getStore() { - return store; +/** Exposed for tests only — returns the in-memory Map. */ +export function _getStore() { + return memStore; } - -export { - isProcessed, - markProcessed, - cleanup, - startAutoCleanup, - stopAutoCleanup, - _getStore -}; diff --git a/src/merge-strategy.js b/src/merge-strategy.js index 12a4b88..1d2b5a7 100644 --- a/src/merge-strategy.js +++ b/src/merge-strategy.js @@ -1,7 +1,7 @@ import { getConfig } from './config.js'; import { getLogger } from './logger.js'; -async function handleSignedCommitMerge(octokit, owner, repo, prNumber) { +async function handleSignedCommitMerge(octokit, owner, repo, prNumber, { enqueueTask } = {}) { const config = getConfig(); try { @@ -59,18 +59,31 @@ async function handleSignedCommitMerge(octokit, owner, repo, prNumber) { getLogger().info('✅ Temporarily allowed merge commits for signed commit preservation'); - setTimeout(async () => { - try { - await octokit.request('PATCH /repos/{owner}/{repo}', { - owner, - repo, - allow_merge_commit: originalAllowMergeCommits - }); - getLogger().info('⏳ Re-enabled rebase-only merge after timeout'); - } catch (error) { - getLogger().error('❌ Failed to restore merge settings:', error.message); - } - }, config.signed_commit_strategy?.temporary_rule_timeout || 3600000); + const timeoutMs = config.signed_commit_strategy?.temporary_rule_timeout || 3600000; + + if (enqueueTask) { + // Persistent revert: survives process restart, unlike setTimeout. + enqueueTask( + 'revert-merge-settings', + `revert-merge-settings:${owner}/${repo}:${prNumber}`, + { owner, repo, allow_merge_commit: originalAllowMergeCommits }, + { delayMs: timeoutMs } + ); + } else { + // Best-effort fallback when no task store is wired (tests, dev). + setTimeout(async () => { + try { + await octokit.request('PATCH /repos/{owner}/{repo}', { + owner, + repo, + allow_merge_commit: originalAllowMergeCommits + }); + getLogger().info('⏳ Re-enabled rebase-only merge after timeout'); + } catch (error) { + getLogger().error('❌ Failed to restore merge settings:', error.message); + } + }, timeoutMs); + } return { success: true, diff --git a/src/persistent-kv.js b/src/persistent-kv.js new file mode 100644 index 0000000..86b505d --- /dev/null +++ b/src/persistent-kv.js @@ -0,0 +1,81 @@ +/** + * SQLite-backed key/value store with TTL. + * + * Used to persist idempotency keys (webhook delivery IDs) and AI review rate + * limits across process restarts. The previous in-memory Map-based stores + * lost both on every PM2 reload — webhooks were re-processed and PRs were + * re-reviewed, both of which violate at-most-once semantics. + */ + +import Database from 'better-sqlite3'; + +/** + * Initialise (or attach to) a SQLite-backed KV table. Tables are namespaced + * by `name` so multiple stores can share one database file. + * + * @param {string} dbPath - SQLite file path + * @param {string} name - logical table name (alphanumeric / underscores) + * @param {object} [opts] + * @param {number} [opts.ttlMs] - default TTL applied when not provided per-write. + * Entries older than ttl are deleted on read. + * @returns {{has, get, set, sweep, close, _db}} + */ +export function initKVStore(dbPath, name, opts = {}) { + if (!/^[a-z][a-z0-9_]*$/i.test(name)) { + throw new Error(`Invalid KV store name: ${name}`); + } + const ttlMs = opts.ttlMs; + + const db = new Database(dbPath); + db.pragma('journal_mode = WAL'); + db.exec(` + CREATE TABLE IF NOT EXISTS kv_${name} ( + key TEXT PRIMARY KEY, + value TEXT, + expires_at INTEGER + ) + `); + db.exec(`CREATE INDEX IF NOT EXISTS idx_kv_${name}_expires ON kv_${name}(expires_at)`); + + const stmts = { + get: db.prepare(`SELECT value, expires_at FROM kv_${name} WHERE key = ?`), + upsert: db.prepare(` + INSERT INTO kv_${name} (key, value, expires_at) + VALUES (@key, @value, @expires_at) + ON CONFLICT(key) DO UPDATE SET + value = excluded.value, + expires_at = excluded.expires_at + `), + sweep: db.prepare(`DELETE FROM kv_${name} WHERE expires_at IS NOT NULL AND expires_at < ?`) + }; + + function has(key) { + const row = stmts.get.get(key); + if (!row) return false; + if (row.expires_at && row.expires_at < Date.now()) return false; + return true; + } + + function get(key) { + const row = stmts.get.get(key); + if (!row) return undefined; + if (row.expires_at && row.expires_at < Date.now()) return undefined; + return row.value; + } + + function set(key, value = '', { ttl } = {}) { + const effectiveTtl = ttl ?? ttlMs; + const expires_at = effectiveTtl ? Date.now() + effectiveTtl : null; + stmts.upsert.run({ key, value: String(value), expires_at }); + } + + function sweep() { + return stmts.sweep.run(Date.now()).changes; + } + + function close() { + db.close(); + } + + return { has, get, set, sweep, close, _db: db }; +} diff --git a/src/provisioning.js b/src/provisioning.js new file mode 100644 index 0000000..8d0a87e --- /dev/null +++ b/src/provisioning.js @@ -0,0 +1,244 @@ +/** + * Issue-driven repository provisioning. + * + * Pattern: a separate "controller" repo (e.g. pulseengine/repo-requests) holds + * an issue form; a maintainer files an issue, an authorized approver reacts, + * and Temper provisions the new repo with full configuration. + * + * Cut A architectural decision: Temper is the brain. The controller repo only + * holds the issue-form schema. All validation, creation, and configuration + * happens here. + */ + +import { getLogger } from './logger.js'; +import { getConfig } from './config.js'; +import { configureRepository } from './repository.js'; + +/** + * Parse a GitHub issue-form body into a flat object. + * + * GitHub renders an issue-form submission as alternating headings and content: + * + * ### Repository name + * + * my-new-repo + * + * ### Visibility + * + * private + * + * This function walks the body and yields { 'repository name': 'my-new-repo', + * 'visibility': 'private', ... }. Keys are lowercased and trimmed; values + * preserve internal whitespace but trim leading/trailing. + * + * "_No response_" (GitHub's rendering for an unanswered optional field) is + * normalised to undefined. + * + * @param {string} body + * @returns {Record} + */ +export function parseIssueFormBody(body) { + if (typeof body !== 'string') return {}; + + const result = {}; + const lines = body.split(/\r?\n/); + let currentKey = null; + let buffer = []; + + function flush() { + if (currentKey === null) return; + const value = buffer.join('\n').trim(); + if (value && value !== '_No response_') { + result[currentKey] = value; + } + buffer = []; + } + + for (const line of lines) { + const headerMatch = line.match(/^###\s+(.+?)\s*$/); + if (headerMatch) { + flush(); + currentKey = headerMatch[1].trim().toLowerCase(); + } else if (currentKey !== null) { + buffer.push(line); + } + } + flush(); + + return result; +} + +const VALID_VISIBILITIES = new Set(['public', 'private', 'internal']); +const REPO_NAME_RE = /^[a-zA-Z0-9._-]+$/; + +/** + * Validate the parsed issue-form fields. Returns either {valid: true, params} + * with normalised provisioning params, or {valid: false, error} with a human + * message that will be posted back on the issue. + */ +export function validateProvisionRequest(fields) { + const name = fields['repository name'] || fields.name; + if (!name) return { valid: false, error: 'Missing field "Repository name".' }; + if (!REPO_NAME_RE.test(name)) { + return { + valid: false, + error: `Invalid repository name "${name}". Use only letters, numbers, dots, hyphens, underscores.` + }; + } + + const visibilityRaw = (fields.visibility || 'private').toLowerCase(); + if (!VALID_VISIBILITIES.has(visibilityRaw)) { + return { + valid: false, + error: `Invalid visibility "${visibilityRaw}". Allowed: public, private, internal.` + }; + } + + const description = fields.description; + const templateRepo = fields['template repository'] || fields.template; + const topicsRaw = fields.topics || ''; + const topics = topicsRaw + .split(/[,\s]+/) + .map((t) => t.trim().toLowerCase()) + .filter(Boolean); + + return { + valid: true, + params: { + name, + visibility: visibilityRaw, + description, + templateRepo, + topics + } + }; +} + +/** + * Create the repo on GitHub. Uses template generation when a template repo is + * supplied (POST /repos/{template_owner}/{template_repo}/generate); otherwise + * a plain create (POST /orgs/{org}/repos). Returns the created repo payload. + */ +async function createGitHubRepo(octokit, org, params) { + if (params.templateRepo) { + const [tplOwner, tplRepo] = params.templateRepo.split('/'); + if (!tplOwner || !tplRepo) { + throw new Error(`Template "${params.templateRepo}" must be "owner/repo".`); + } + const response = await octokit.request( + 'POST /repos/{template_owner}/{template_repo}/generate', + { + template_owner: tplOwner, + template_repo: tplRepo, + owner: org, + name: params.name, + description: params.description, + private: params.visibility !== 'public', + include_all_branches: false + } + ); + return response.data; + } + + const response = await octokit.request('POST /orgs/{org}/repos', { + org, + name: params.name, + description: params.description, + visibility: params.visibility, + auto_init: true, + has_issues: true, + has_projects: false, + has_wiki: false + }); + return response.data; +} + +/** + * Provision a repo from an approved issue-form request. + * Called by the scheduler when a `provision-repo` task is claimed. + * + * @param {object} payload {sourceOwner, sourceRepo, sourceIssue, params} + * @param {object} ctx {octokit, logger} + */ +export async function provisionRepo(payload, { octokit, logger = getLogger() }) { + const { sourceOwner, sourceRepo, sourceIssue, params } = payload; + const config = getConfig(); + const targetOrg = config?.organization || sourceOwner; + + logger.info({ params, targetOrg }, 'Provisioning new repo from issue request'); + + let created; + try { + created = await createGitHubRepo(octokit, targetOrg, params); + } catch (err) { + await octokit.request('POST /repos/{owner}/{repo}/issues/{issue_number}/comments', { + owner: sourceOwner, + repo: sourceRepo, + issue_number: sourceIssue, + body: `❌ Failed to create repository: ${err.message}` + }); + throw err; + } + + if (params.topics?.length) { + try { + await octokit.request('PUT /repos/{owner}/{repo}/topics', { + owner: targetOrg, + repo: params.name, + names: params.topics + }); + } catch (err) { + logger.warn({ err: err.message }, 'Failed to set topics on new repo'); + } + } + + // Tag the new repo with a custom property linking back to the source issue, + // creating a durable audit trail. Failure is non-fatal — custom properties + // require the property to be defined at the org level first. + try { + await octokit.request( + 'PATCH /orgs/{org}/properties/values', + { + org: targetOrg, + repository_names: [params.name], + properties: [ + { + property_name: 'provisioned-from', + value: `${sourceOwner}/${sourceRepo}#${sourceIssue}` + } + ] + } + ); + } catch (err) { + logger.info({ err: err.message }, 'Could not set provisioned-from custom property (likely undefined)'); + } + + // Apply standard configuration. The default branch may or may not exist + // depending on whether the repo was template-generated or auto_init was used; + // configureRepository handles both cases. + const skipBranchScopedWork = !created.default_branch; + await configureRepository(octokit, created, undefined, { skipBranchScopedWork }); + + await octokit.request('POST /repos/{owner}/{repo}/issues/{issue_number}/comments', { + owner: sourceOwner, + repo: sourceRepo, + issue_number: sourceIssue, + body: + `✅ Repository created: ${created.html_url}\n\n` + + `Configuration applied. ` + + (skipBranchScopedWork + ? 'Branch-scoped configuration (templates, codeowners, dependabot) will be applied after the first push.' + : 'All standard files committed to default branch.') + }); + + // Close the source issue. + await octokit.request('PATCH /repos/{owner}/{repo}/issues/{issue_number}', { + owner: sourceOwner, + repo: sourceRepo, + issue_number: sourceIssue, + state: 'closed', + state_reason: 'completed' + }); + + return { repoUrl: created.html_url, name: created.name }; +} diff --git a/src/repository.js b/src/repository.js index 6b8c24b..1f40d3e 100644 --- a/src/repository.js +++ b/src/repository.js @@ -5,9 +5,11 @@ import { getMergeSettings, getBranchProtectionConfig, mergePullRequestRules, - getTargetIssueLabels + getTargetIssueLabels, + getRulesetConfig } from './config.js'; import { applyBranchProtection } from './branch-protection.js'; +import { applyRulesetFromBranchProtection } from './rulesets.js'; import { applyTemplates, applyCodeowners } from './templates.js'; import { synchronizeIssueLabels } from './labels.js'; import { @@ -16,7 +18,63 @@ import { fixDependabotPRLabels } from './dependabot.js'; -async function configureRepository(octokit, repoOrOwner, maybeRepo, { enqueueTask } = {}) { +/** + * Apply branch-level enforcement (rulesets when enabled, legacy branch protection + * otherwise). Rulesets work on empty repos; legacy does not — that's the whole + * point of preferring rulesets when available. + */ +async function applyBranchEnforcement(octokit, owner, repo, defaultBranch, protectionConfig) { + const rulesetCfg = getRulesetConfig(); + + if (rulesetCfg?.enabled) { + try { + const result = await applyRulesetFromBranchProtection( + octokit, + owner, + repo, + protectionConfig, + { name: rulesetCfg.name } + ); + getLogger().info( + { action: result.action, id: result.id }, + `Ruleset applied to ${owner}/${repo}` + ); + return { mode: 'ruleset', ...result }; + } catch (err) { + const status = err.status || err.statusCode; + if (rulesetCfg.fall_back_to_legacy && (status === 404 || status === 422 || status === 403)) { + getLogger().warn( + { status, err: err.message }, + `Rulesets not available for ${owner}/${repo}, falling back to legacy branch protection` + ); + } else { + throw err; + } + } + } + + await applyBranchProtection(octokit, owner, repo, defaultBranch, protectionConfig); + return { mode: 'legacy' }; +} + +/** + * Configure a repo to org standards. + * + * @param {object} octokit + * @param {string|object} repoOrOwner + * @param {string} [maybeRepo] + * @param {object} [opts] + * @param {Function} [opts.enqueueTask] - persistent task enqueue + * @param {boolean} [opts.skipBranchScopedWork=false] - skip operations that require + * the default branch to exist (templates, codeowners, dependabot, labels via + * commits). Set true when configuring an empty repo. + */ +async function configureRepository( + octokit, + repoOrOwner, + maybeRepo, + { enqueueTask, skipBranchScopedWork = false } = {} +) { const config = getConfig(); const repoInfo = normalizeRepoInput(repoOrOwner, maybeRepo); const owner = repoInfo.owner.login; @@ -24,7 +82,7 @@ async function configureRepository(octokit, repoOrOwner, maybeRepo, { enqueueTas const defaultBranch = getDefaultBranch(repoInfo); try { - getLogger().info(`Configuring repository: ${owner}/${repo}`); + getLogger().info(`Configuring repository: ${owner}/${repo} (skipBranchScopedWork=${skipBranchScopedWork})`); const mergeSettings = getMergeSettings(repoInfo); await octokit.request('PATCH /repos/{owner}/{repo}', { @@ -37,14 +95,32 @@ async function configureRepository(octokit, repoOrOwner, maybeRepo, { enqueueTas const protectionConfig = mergePullRequestRules( getBranchProtectionConfig(repoInfo) || {} ); - await applyBranchProtection(octokit, owner, repo, defaultBranch, protectionConfig); + await applyBranchEnforcement(octokit, owner, repo, defaultBranch, protectionConfig); } const targetLabels = getTargetIssueLabels(); if (targetLabels.length > 0) { + // Labels don't need a default branch — safe even on empty repos. await synchronizeIssueLabels(octokit, owner, repo, targetLabels); } + if (skipBranchScopedWork) { + // The repo has no default branch yet. Rulesets, merge settings and labels + // are already applied above. Defer the rest to a follow-up task once the + // first push lands. + if (enqueueTask) { + enqueueTask( + 'reconcile-repo', + `reconcile-repo:${owner}/${repo}`, + { owner, repo }, + { delayMs: 60000 } + ); + getLogger().info(`Enqueued reconcile-repo for ${owner}/${repo} (empty repo deferred work)`); + } + getLogger().info(`✅ Partial config applied to empty repo ${owner}/${repo}`); + return { success: true, partial: true }; + } + if (config?.templates) { await applyTemplates(octokit, owner, repo, config.templates); } diff --git a/src/rulesets.js b/src/rulesets.js new file mode 100644 index 0000000..e0fe355 --- /dev/null +++ b/src/rulesets.js @@ -0,0 +1,172 @@ +/** + * Repository Rulesets — modern replacement for the legacy branch-protection API. + * + * Why this module exists: + * GitHub fires `repository.created` before the default branch has any commits, + * so the legacy `PUT /repos/{owner}/{repo}/branches/{branch}/protection` + * endpoint returns 404 for empty repos. Rulesets target ref *patterns* + * (`~DEFAULT_BRANCH`) and apply regardless of whether the branch already + * exists — which kills the new-repo race entirely. + * + * The module is intentionally idempotent: it lists rulesets by name, then + * creates / updates / leaves alone as needed. Safe to call on every webhook. + */ + +import { getLogger } from './logger.js'; + +export const TEMPER_RULESET_NAME = 'temper-default-branch-protection'; + +/** + * Translate a legacy branch_protection.default config block into a Repository + * Ruleset payload. Keeps the existing config.yml shape working for users. + * + * @param {object} bpConfig - branch_protection.default value from config.yml + * @param {string} [name=TEMPER_RULESET_NAME] + * @returns {object} a ruleset payload suitable for POST/PUT /repos/.../rulesets + */ +export function translateBranchProtectionToRuleset(bpConfig = {}, name = TEMPER_RULESET_NAME) { + const rules = []; + + if (!bpConfig.allow_deletions) rules.push({ type: 'deletion' }); + if (!bpConfig.allow_force_pushes) rules.push({ type: 'non_fast_forward' }); + + if (bpConfig.require_signed_commits || bpConfig.required_signatures) { + rules.push({ type: 'required_signatures' }); + } + + if (bpConfig.required_linear_history) { + rules.push({ type: 'required_linear_history' }); + } + + const r = bpConfig.required_pull_request_reviews; + if (r && r !== null) { + rules.push({ + type: 'pull_request', + parameters: { + required_approving_review_count: r.required_approving_review_count ?? 0, + dismiss_stale_reviews_on_push: r.dismiss_stale_reviews ?? false, + require_code_owner_review: r.require_code_owner_reviews ?? false, + require_last_push_approval: r.require_last_push_approval ?? false, + required_review_thread_resolution: bpConfig.required_conversation_resolution ?? false + } + }); + } + + const checks = bpConfig.required_status_checks; + if (checks && checks !== null) { + rules.push({ + type: 'required_status_checks', + parameters: { + strict_required_status_checks_policy: checks.strict ?? false, + required_status_checks: (checks.contexts || []).map((c) => + typeof c === 'string' ? { context: c } : c + ) + } + }); + } + + return { + name, + target: 'branch', + enforcement: 'active', + conditions: { + ref_name: { + include: ['~DEFAULT_BRANCH'], + exclude: [] + } + }, + rules, + bypass_actors: [] + }; +} + +function normalizeRules(rules) { + return [...(rules || [])] + .map((r) => ({ + type: r.type, + parameters: r.parameters ? canonicalize(r.parameters) : undefined + })) + .sort((a, b) => a.type.localeCompare(b.type)); +} + +function canonicalize(value) { + if (Array.isArray(value)) return value.map(canonicalize); + if (value && typeof value === 'object') { + const sorted = {}; + for (const key of Object.keys(value).sort()) sorted[key] = canonicalize(value[key]); + return sorted; + } + return value; +} + +export function rulesetsMatch(actual, desired) { + if (!actual) return false; + if (actual.enforcement !== desired.enforcement) return false; + if (JSON.stringify(canonicalize(actual.conditions)) !== JSON.stringify(canonicalize(desired.conditions))) { + return false; + } + if (JSON.stringify(normalizeRules(actual.rules)) !== JSON.stringify(normalizeRules(desired.rules))) { + return false; + } + return true; +} + +/** + * Idempotently ensure a repo ruleset matching `desired` exists. + * + * @returns {Promise<{action: 'created'|'updated'|'unchanged', id?: number}>} + */ +export async function ensureRepositoryRuleset(octokit, owner, repo, desired) { + getLogger().info(`Ensuring repo ruleset "${desired.name}" on ${owner}/${repo}`); + + const existing = await octokit.paginate('GET /repos/{owner}/{repo}/rulesets', { + owner, + repo, + per_page: 100 + }); + + const found = existing.find((r) => r.name === desired.name); + + if (!found) { + const created = await octokit.request('POST /repos/{owner}/{repo}/rulesets', { + owner, + repo, + ...desired + }); + getLogger().info(`✅ Created ruleset "${desired.name}" on ${owner}/${repo}`); + return { action: 'created', id: created.data?.id }; + } + + // List endpoint omits rules; fetch full ruleset to compare. + const full = await octokit.request('GET /repos/{owner}/{repo}/rulesets/{ruleset_id}', { + owner, + repo, + ruleset_id: found.id + }); + + if (rulesetsMatch(full.data, desired)) { + return { action: 'unchanged', id: found.id }; + } + + await octokit.request('PUT /repos/{owner}/{repo}/rulesets/{ruleset_id}', { + owner, + repo, + ruleset_id: found.id, + ...desired + }); + getLogger().info(`✅ Updated ruleset "${desired.name}" on ${owner}/${repo}`); + return { action: 'updated', id: found.id }; +} + +/** + * Apply a ruleset derived from the legacy branch_protection config. + * Falls back to legacy branch protection when rulesets are not enabled or fail + * with a 404/422 (some plans / repos still don't expose the rulesets API). + * + * Returns the action taken so callers can log meaningfully. + */ +export async function applyRulesetFromBranchProtection(octokit, owner, repo, bpConfig, options = {}) { + const { name = TEMPER_RULESET_NAME } = options; + const desired = translateBranchProtectionToRuleset(bpConfig, name); + return ensureRepositoryRuleset(octokit, owner, repo, desired); +} diff --git a/src/scheduler.js b/src/scheduler.js index d845d6b..7977bcb 100644 --- a/src/scheduler.js +++ b/src/scheduler.js @@ -5,7 +5,15 @@ import { getLogger } from './logger.js'; -function createScheduler(store, octokit, options = {}) { +/** + * @param {object} store - task store with claimNextTask / completeTask / failTask + * @param {object|Function} octokitOrFactory - either an octokit instance, or + * an async () => octokit factory called once per tick. Probot installation + * tokens expire after 1 hour; use the factory form in production so each + * tick gets a fresh, installation-scoped client. + * @param {object} [options] + */ +function createScheduler(store, octokitOrFactory, options = {}) { const { intervalMs = 5 * 60 * 1000, maxTasksPerTick = 5, @@ -16,7 +24,14 @@ function createScheduler(store, octokit, options = {}) { let timer = null; let running = false; - async function checkRateLimit() { + async function getOctokit() { + if (typeof octokitOrFactory === 'function') { + return await octokitOrFactory(); + } + return octokitOrFactory; + } + + async function checkRateLimit(octokit) { try { const response = await octokit.request('GET /rate_limit'); const { remaining } = response.data.resources.core; @@ -39,7 +54,15 @@ function createScheduler(store, octokit, options = {}) { running = true; try { - const rateLimitOk = await checkRateLimit(); + let octokit; + try { + octokit = await getOctokit(); + } catch (err) { + getLogger().warn({ err: err.message }, 'Scheduler: could not obtain octokit, skipping tick'); + return; + } + + const rateLimitOk = await checkRateLimit(octokit); if (!rateLimitOk) return; let processed = 0; diff --git a/src/schema.js b/src/schema.js index 59a4c56..1df4b08 100644 --- a/src/schema.js +++ b/src/schema.js @@ -123,6 +123,40 @@ export function validateConfig(config) { } } + if (config.rulesets !== undefined) { + const rs = config.rulesets; + if (typeof rs !== 'object' || rs === null) { + errors.push('rulesets must be an object'); + } else { + if (rs.enabled !== undefined && typeof rs.enabled !== 'boolean') { + errors.push('rulesets.enabled must be a boolean'); + } + if (rs.fall_back_to_legacy !== undefined && typeof rs.fall_back_to_legacy !== 'boolean') { + errors.push('rulesets.fall_back_to_legacy must be a boolean'); + } + if (rs.name !== undefined && (typeof rs.name !== 'string' || rs.name.length === 0)) { + errors.push('rulesets.name must be a non-empty string'); + } + } + } + + if (config.controller_repo !== undefined) { + const cr = config.controller_repo; + if (typeof cr !== 'object' || cr === null) { + errors.push('controller_repo must be an object'); + } else { + if (cr.enabled !== undefined && typeof cr.enabled !== 'boolean') { + errors.push('controller_repo.enabled must be a boolean'); + } + if (cr.enabled && (typeof cr.repo !== 'string' || !cr.repo.includes('/'))) { + errors.push('controller_repo.repo must be "owner/repo" when enabled'); + } + if (cr.label !== undefined && typeof cr.label !== 'string') { + errors.push('controller_repo.label must be a string'); + } + } + } + if (config.ai_review?.enabled) { if (config.ai_review.endpoint !== undefined && typeof config.ai_review.endpoint !== 'string') { errors.push('ai_review.endpoint must be a string');