diff --git a/.fork-features/manifest.json b/.fork-features/manifest.json index 267b895fd0f2..b96640519777 100644 --- a/.fork-features/manifest.json +++ b/.fork-features/manifest.json @@ -260,6 +260,162 @@ "symlink resolution" ] } + }, + "taskctl": { + "status": "active", + "description": "Autonomous pipeline for managing task graphs with dependencies, priority ordering, conflict detection, and adversarial workflow. Phase 1 includes types, store, scheduler, validation, and CLI tool. Phase 2 adds Composer agent for automatic issue decomposition, taskctl start command for spawning full pipeline. Phase 3 adds Pulse execution engine with heartbeat, scheduling, singleton lock, crash recovery, and timeout detection. Phase 3a adds adversarial workflow integration with pipeline stages (developing, reviewing, adversarial, steering), verdict tracking, and auto-retry logic. Phase 3b adds agent.ts integration for spawning adversarial sessions (developer-pipeline and adversarial-pipeline agent definitions). Phase 3c adds PM control commands: status, stop, resume, override, retry, inspect for manual pipeline intervention. Phase 4 adds self-healing steering agent (evaluates progress every 15 min, sends guidance or replaces developer), improved adversarial timeout detection (60 min), and session message retrieval for activity summaries. Phase 5 updates AGENTS.md with taskctl documentation section describing pipeline workflow and PM commands.", + "issue": "https://github.com/randomm/opencode/issues/207", + "newFiles": [ + "packages/opencode/src/tasks/types.ts", + "packages/opencode/src/tasks/store.ts", + "packages/opencode/src/tasks/scheduler.ts", + "packages/opencode/src/tasks/validation.ts", + "packages/opencode/src/tasks/composer.ts", + "packages/opencode/src/tasks/pulse.ts", + "packages/opencode/src/tasks/tool.ts", + "packages/opencode/src/tasks/index.ts", + "packages/opencode/src/tasks/LIMITATIONS.md", + "packages/opencode/test/tasks/store.test.ts", + "packages/opencode/test/tasks/scheduler.test.ts", + "packages/opencode/test/tasks/validation.test.ts", + "packages/opencode/test/tasks/composer.test.ts", + "packages/opencode/test/tasks/pulse.test.ts", + "packages/opencode/test/tasks/commands.test.ts", + "packages/opencode/test/tasks/steering.test.ts" + ], + "modifiedFiles": ["packages/opencode/src/tool/registry.ts", "packages/opencode/src/agent/agent.ts", "packages/opencode/src/tasks/pulse.ts", "AGENTS.md"], + "deletedFiles": [], + "criticalCode": [ + "sanitizeProjectId", + "sanitizeTaskId", + "getSafeTaskPath", + "IMMUTABLE_FIELDS", + "validateTaskUpdates", + "const TASKS_DIR = \"tasks\"", + "getTasksDir", + "atomicWrite", + "Store.createTask", + "Store.getTask", + "Store.updateTask", + "Store.listTasks", + "Store.updateIndex", + "Store.getIndex", + "Store.logActivity", + "Store.createJob", + "Store.getJob", + "Store.updateJob", + "Store.addComment", + "Store.addPipelineEvent", + "Store.findJobByIssue", + "Scheduler.getNextTasks", + "Validation.validateGraph", + "Validation.validateGraphFromMap", + "startPulse", + "tickStartTime", + "resurrectionScan", + "isSessionAlive", + "lock file management", + "writeLockFile", + "readLockPid", + "removeLockFile", + "isPidAlive", + "checkTimeouts", + "TIMEOUT_MS", + "checkCompletion", + "gracefulStop", + "heartbeatActiveAgents", + "Pulse tick overlap guard", + "Atomic lock file write", + "Crash recovery", + "Worker session spawn", + "TaskctlTool", + "taskctl create", + "taskctl list", + "taskctl get", + "taskctl update", + "taskctl close", + "taskctl comment", + "taskctl depends", + "taskctl split", + "taskctl next", + "taskctl validate", + "taskctl start", + "taskctl start-skip", + "taskctl status", + "taskctl stop", + "taskctl resume", + "taskctl inspect", + "taskctl override", + "taskctl retry", + "command enum (stop, resume, inspect, override, retry)", + "overrideMode parameter (skip, commit-as-is)", + "Terminal state rejection (complete/failed/stopped)", + "Pulse-zombie detection in stop command", + "Job stopping flag validation", + "Session cancellation and worktree cleanup", + "composer agent", + "runComposer", + "Circular dependency detection", + "Missing dependency detection", + "Acceptance criteria warning", + "Conflict label warning", + "Priority ordering (0=highest)", + "Dependency filtering", + "Self-dependency prevention", + "Duplicate dependency check", + "Status validation (only split open, prevent re-close)", + "MAX_COMMENT_LENGTH", + "maxAttempts (1000)", + "append-only.ndjson", + "activity.ndjson", + "index.json", + "slug generation", + "conflict detection", + "steering agent", + "checkSteering", + "spawnSteering", + "getRecentActivity", + "Session.messages()", + "15-minute steering evaluation", + "continue/steer/replace", + "ADVERSARIAL_TIMEOUT_MS = 60 * 60 * 1000", + "adversarial-running stage timeout", + "Steering guidance via SessionPrompt", + "Developer replacement logic" + ], + "tests": [ + "packages/opencode/test/tasks/store.test.ts", + "packages/opencode/test/tasks/scheduler.test.ts", + "packages/opencode/test/tasks/validation.test.ts", + "packages/opencode/test/tasks/composer.test.ts", + "packages/opencode/test/tasks/pulse.test.ts", + "packages/opencode/test/tasks/commands.test.ts", + "packages/opencode/test/tasks/steering.test.ts" + ], + "upstreamTracking": { + "absorptionSignals": [ + "TASKS_DIR", + "taskctl", + "Composer", + "runComposer", + "Scheduler.getNextTasks", + "Validation.validateGraph", + "Validation.validateGraphFromMap", + "sanitizeProjectId", + "sanitizeTaskId", + "IMMUTABLE_FIELDS", + "task.*pipeline.*adversarial", + "Circular dependency", + "conflict labels", + "module:.*file:", + "activity.ndjson", + "atomicWrite", + "slug.*generation", + "LIMITATIONS.md", + "TOCTOU", + "concurrent operation" + ] + } } } -} \ No newline at end of file +} diff --git a/AGENTS.md b/AGENTS.md index 3bd1bdcf7045..03997736ba41 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -270,6 +270,44 @@ bun test:e2e:local # Run Playwright E2E tests --- +## taskctl: Autonomous Task Pipeline + +`taskctl` is a built-in tool that automates the development loop for GitHub issues. +PM calls `taskctl start ` and the pipeline handles decomposition, development, review, and committing automatically. + +### How it works + +1. **Composer** — decomposes the GitHub issue into a dependency graph of tasks +2. **Pulse** — a 5-second deterministic loop that schedules developer agents, monitors progress, and processes review verdicts +3. **developer-pipeline** — implements tasks with TDD, signals completion via `taskctl comment` +4. **adversarial-pipeline** — reviews code and writes structured verdict via `taskctl verdict` +5. **Steering** — assessed every 15 minutes: sends guidance or replaces stuck developers +6. **@ops** — commits approved work to the feature branch + +PM is only interrupted when a task fails after 3 adversarial cycles, or when all tasks complete. + +### PM workflow with taskctl + +```bash +taskctl start # Decompose issue and start pipeline +taskctl status # Live dashboard — tasks, states, Pulse health +taskctl inspect # Full history of a specific task +taskctl stop # Gracefully halt pipeline (work preserved) +taskctl resume # Resume a stopped or crashed pipeline +taskctl retry # Reset a stuck task for fresh attempt +taskctl override --skip # Skip a task, unblock dependents +taskctl override --commit-as-is # Commit despite issues (PM responsibility) +``` + +### Source locations + +- Tool commands: `packages/opencode/src/tasks/tool.ts` +- Pipeline engine: `packages/opencode/src/tasks/pulse.ts` +- Agent definitions: `packages/opencode/src/agent/agent.ts` +- Design document: `lievo/plan-v2.md` (git-ignored, local only) + +--- + ## Build & Install Binaries ### Main opencode TUI diff --git a/packages/opencode/src/agent/agent.ts b/packages/opencode/src/agent/agent.ts index daf888de3c32..a90de6a93925 100644 --- a/packages/opencode/src/agent/agent.ts +++ b/packages/opencode/src/agent/agent.ts @@ -199,6 +199,195 @@ export namespace Agent { ), prompt: PROMPT_SUMMARY, }, + "composer": { + name: "composer", + mode: "subagent", + hidden: true, + native: true, + options: {}, + permission: PermissionNext.merge( + defaults, + PermissionNext.fromConfig({ + "*": "deny", + }), + user, + ), + prompt: `You are the Composer agent for the taskctl autonomous development pipeline. + +Your job is to read a GitHub issue and decompose it into a structured, dependency-ordered list of implementation tasks. + +RESPONSE FORMAT — you must respond with ONLY valid JSON, nothing else: + +If the spec is too vague or missing acceptance criteria: +{ + "status": "needs_clarification", + "questions": [ + { "id": 1, "question": "What specific behaviour should change?" } + ] +} + +If the spec is clear enough to decompose: +{ + "status": "ready", + "tasks": [ + { + "title": "Add OAuth2 config schema", + "description": "Add zod schema for OAuth2 config to src/config/config.ts", + "acceptance_criteria": "Schema validates clientId, clientSecret, redirectUri. Tests pass.", + "task_type": "implementation", + "labels": ["module:config", "file:src/config/config.ts"], + "depends_on": [], + "priority": 0 + } + ] +} + +RULES FOR GOOD TASK DECOMPOSITION: +1. Each task must be completable by one developer in a single session +2. Every task MUST have non-empty acceptance_criteria +3. Every task MUST have at least one label with "module:" or "file:" prefix +4. Dependencies: tasks that others depend on have lower priority numbers (0 = highest priority) +5. Tasks with no shared module:/file: labels can run in parallel +6. Do not create tasks for work not explicitly required by the issue +7. Validate your own output: check that no depends_on creates a cycle before responding +8. Respond with ONLY the JSON object — no markdown, no explanation, no code blocks`, + }, + "developer-pipeline": { + name: "developer-pipeline", + description: "Developer agent working as part of an autonomous pipeline.", + mode: "subagent", + native: true, + permission: PermissionNext.merge( + defaults, + PermissionNext.fromConfig({ + task: "deny", + }), + user, + ), + options: {}, + prompt: `You are a developer agent working as part of an autonomous pipeline. + +Your job is to implement the assigned task with TDD discipline. + +## Your task +You will receive a task description with: +- Title: what to build +- Description: full context and requirements +- Acceptance criteria: what must be true when done + +## Workflow +1. Read the codebase to understand context (check remory, read relevant files) +2. Write failing tests first (TDD) +3. Write minimal code to make tests pass +4. Refactor for clarity following AGENTS.md style guide +5. Run \`bun run typecheck && bun test\` — fix all errors +6. When done: \`taskctl comment "Implementation complete: "\` + +## Rules +- ONLY implement what is explicitly in the task description +- No TODO/FIXME/HACK comments (create a GitHub issue instead) +- No @ts-ignore or as any +- Follow style guide: single-word variable names, early returns, no else, functional array methods +- Do NOT spawn any adversarial agent — the pipeline handles this automatically +- Do NOT commit or push — the pipeline handles this automatically +- Do NOT write any documentation files (PLAN.md, ANALYSIS.md, etc.) + +## taskctl commands available to you +- \`taskctl comment ""\` — log progress or signal completion +- \`taskctl split \` — if task is too large, split it (creates two sub-tasks) +- \`taskctl depends --on \` — if you discover an undeclared dependency + +You may NOT call: taskctl start, taskctl stop, taskctl verdict, taskctl override, taskctl retry, taskctl resume`, + }, + "adversarial-pipeline": { + name: "adversarial-pipeline", + description: "Adversarial code reviewer in an autonomous pipeline.", + mode: "subagent", + native: true, + permission: PermissionNext.merge( + defaults, + PermissionNext.fromConfig({ + "*": "deny", + bash: "allow", + }), + user, + ), + options: {}, + prompt: `You are an adversarial code reviewer in an autonomous pipeline. + +Your ONLY job is to review code changes in an assigned worktree and record a structured verdict. + +## What you receive +- Task title, description, and acceptance criteria +- Path to the worktree containing the implementation +- The task ID + +## Your review process +1. Read the implementation files in the worktree +2. Check: Does it meet the acceptance criteria? +3. Check: Are there bugs, security issues, or quality problems? +4. Check: Do the tests actually test meaningful behavior (not just call coverage)? +5. Check: Does typecheck pass? (Run \`bun run typecheck\` in the worktree) + +## Recording your verdict — MANDATORY +You MUST call taskctl verdict to record your finding. Never write a text response instead. + +**If the code is good:** +\`taskctl verdict --verdict APPROVED\` + +**If there are fixable issues:** +\`taskctl verdict --verdict ISSUES_FOUND --summary "Brief summary" --issues '[{"location":"src/foo.ts:42","severity":"HIGH","fix":"Add null check before calling user.profile"}]'\` + +**If there are critical/blocking issues:** +\`taskctl verdict --verdict CRITICAL_ISSUES_FOUND --summary "Brief summary" --issues '[...]'\` + +## Severity guide +- CRITICAL: Security vulnerability, data loss risk, or complete functional failure +- HIGH: Bug that will cause incorrect behavior in normal use +- MEDIUM: Code quality issue that should be fixed before merging +- LOW: Style or minor improvement suggestion + +## Rules +- You may ONLY call: taskctl verdict +- Do NOT spawn any agents +- Do NOT commit or push + - Be specific: every issue must have a location (file:line) and a concrete fix suggestion`, + }, + steering: { + name: "steering", + description: "Steering agent in an autonomous development pipeline.", + mode: "subagent", + native: true, + hidden: true, + permission: PermissionNext.merge( + defaults, + PermissionNext.fromConfig({ + "*": "deny", + }), + user, + ), + options: {}, + // Uses cheapest available model — configure via agent config if needed + prompt: `You are a steering agent in an autonomous development pipeline. Your job is to assess whether a developer agent is making meaningful progress on a task. + +You will receive: +- The task title, description, and acceptance criteria +- A summary of recent developer activity (last session turns) + +Respond with EXACTLY one of these JSON objects and nothing else: + +{ "action": "continue", "message": null } +— Use when the developer is making steady progress: writing code, running tests, moving forward + +{ "action": "steer", "message": "specific actionable guidance here" } +— Use when the developer seems confused, going in circles, or heading the wrong direction +— The message must be specific and actionable (e.g. "Focus on fixing the null check at src/api.ts:42, not rewriting the whole module") + +{ "action": "replace", "message": "reason for replacement" } +— Use ONLY when the developer has made zero meaningful progress for the entire session or is clearly broken (e.g. repeating the same failed command) + +Be conservative: prefer "continue" when in doubt. Only "replace" when truly stuck.`, + }, } for (const [key, value] of Object.entries(cfg.agent ?? {})) { diff --git a/packages/opencode/src/session/async-tasks.ts b/packages/opencode/src/session/async-tasks.ts index 2eb5c86a1334..f3a825ffca58 100644 --- a/packages/opencode/src/session/async-tasks.ts +++ b/packages/opencode/src/session/async-tasks.ts @@ -502,7 +502,7 @@ export async function cancelBackgroundTask(id: string, requestingSessionID?: str const metadata = pendingTaskMetadata.get(id) // If requestingSessionID provided, verify authorization - if (requestingSessionID && metadata && metadata.session_id !== requestingSessionID) { + if (requestingSessionID && metadata && metadata.parent_session_id !== requestingSessionID) { return false } @@ -579,7 +579,7 @@ export async function tryCancel(id: string, sessionID: string): Promise } + | { status: "ready"; tasks: z.infer[] } + +type SpawnComposerFn = (prompt: string) => Promise + +async function defaultSpawnComposerFn( + sessionID: string, + prompt: string, + timeoutMs: number = 300000 +): Promise { + const parentSession = await Session.get(sessionID) + if (!parentSession?.directory) throw new Error("Parent session not found or has no directory") + + const session = await Session.createNext({ + parentID: sessionID, + directory: parentSession.directory, + title: "Composer task decomposition", + permission: [], + }) + + await SessionPrompt.prompt({ sessionID: session.id, agent: "composer", parts: [{ type: "text", text: prompt }] }) + + const timeout = new Promise((resolve) => setTimeout(() => resolve(undefined), timeoutMs)) + + const messages = Promise.resolve().then(async () => { + let lastAssistantText: string | undefined + for await (const msg of MessageV2.stream(session.id)) { + if (msg.info.role === "assistant") { + const textPart = msg.parts.find((p) => p.type === "text" && !p.synthetic) + if (textPart && "text" in textPart) lastAssistantText = textPart.text + } + } + return lastAssistantText + }) + + return await Promise.race([messages, timeout]) +} + +export async function runComposer( + params: { + jobId: string + projectId: string + pmSessionId: string + issueNumber: number + issueTitle: string + issueBody: string + }, + spawnFn?: (prompt: string) => Promise, +): Promise<{ status: "needs_clarification"; questions: Array<{ id: number; question: string }> } | { status: "ready"; taskCount: number }> { + const { jobId, projectId, pmSessionId, issueNumber, issueTitle, issueBody } = params + const spawn = spawnFn ?? ((prompt: string) => defaultSpawnComposerFn(pmSessionId, prompt)) + + const composerPrompt = `Issue #${issueNumber}: ${issueTitle} + +${issueBody} + +Decompose this issue into a dependency-ordered list of implementation tasks. Return ONLY valid JSON.` + + const output = await spawn(composerPrompt) + if (!output) throw new Error("Composer agent timed out or returned no response") + + let parsed: unknown + try { + parsed = JSON.parse(output) + } catch { + throw new Error("Composer agent returned invalid JSON") + } + + if (!parsed || typeof parsed !== "object" || Array.isArray(parsed) || parsed === null) { + throw new Error("Invalid output: expected a JSON object with 'status' field") + } + + const parsedObj = parsed as Record + const status = parsedObj.status as string | null + + if (status === "needs_clarification" && Array.isArray(parsedObj.questions)) { + return { status: "needs_clarification", questions: parsedObj.questions as Array<{ id: number; question: string }> } + } + + if (status !== "ready" || !Array.isArray(parsedObj.tasks)) { + throw new Error(`Invalid composer output: expected status "ready" with tasks array`) + } + + const tasksArray = parsedObj.tasks as unknown[] + if (tasksArray.length === 0) { + throw new Error("Composer returned no tasks") + } + + const proposedTasks: z.infer[] = [] + const validationErrors: string[] = [] + + for (const [index, task] of tasksArray.entries()) { + const result = ComposerTasksSchema.safeParse(task) + if (result.success) { + proposedTasks.push({ ...result.data, depends_on: result.data.depends_on.filter((d) => d) as string[] }) + } else { + const errorDetails = result.error.issues.map((e) => { + const path = e.path.map((p) => String(p)).join(".") + return `${path} ${e.message}` + }).join(", ") + validationErrors.push(`Task ${index + 1}: ${errorDetails}`) + } + } + + if (validationErrors.length > 0) { + throw new Error(`Composer task validation failed:\n${validationErrors.join("\n")}`) + } + + const now = new Date().toISOString() + + const allTaskTitles = new Set() + const slugifiedToTitleMap = new Map() + + for (const task of proposedTasks) { + allTaskTitles.add(task.title) + const slug = slugify(task.title) + slugifiedToTitleMap.set(slug, task.title) + } + + for (const task of proposedTasks) { + for (const dep of task.depends_on) { + if (!allTaskTitles.has(dep) && !slugifiedToTitleMap.has(dep)) { + throw new Error(`Task "${task.title}" depends on "${dep}" which is not defined in this batch. All dependencies must be tasks in the same decomposed set.`) + } + } + } + + const tasksMap = new Map() + for (const task of proposedTasks) { + const taskSlug = slugify(task.title) + const depSlugs = task.depends_on.map((dep) => { + if (allTaskTitles.has(dep)) { + return slugify(dep) + } + return dep + }).filter((slug) => slug != null) as string[] + tasksMap.set(taskSlug, { depends_on: depSlugs }) + } + + const proposedErrors = Validation.validateGraphFromMap(tasksMap) + + if (proposedErrors.length > 0) { + return { + status: "needs_clarification", + questions: [ + { + id: 1, + question: `Invalid task graph: ${proposedErrors.join("; ")}. Please revise the task decomposition.`, + }, + ], + } + } + + const createdTaskIds: string[] = [] + + try { + for (const task of proposedTasks) { + const slug = await generateUniqueSlug(projectId, task.title) + + await Store.createTask(projectId, { + id: slug, + title: task.title, + description: task.description, + acceptance_criteria: task.acceptance_criteria, + parent_issue: issueNumber, + job_id: jobId, + status: "open", + priority: task.priority, + task_type: task.task_type, + labels: task.labels, + depends_on: task.depends_on.map((dep) => { + const depBaseSlug = slugify(dep) + const existingSlug = createdTaskIds.find((id) => + id === depBaseSlug || id.startsWith(`${depBaseSlug}-`) + ) + return existingSlug ?? depBaseSlug + }).filter((slug) => slug != null) as string[], + assignee: null, + assignee_pid: null, + worktree: null, + branch: null, + created_at: now, + updated_at: now, + close_reason: null, + comments: [], + pipeline: { + stage: "idle", + attempt: 0, + last_activity: null, + last_steering: null, + history: [], + adversarial_verdict: null, + }, + }) + createdTaskIds.push(slug) + } + } catch (error) { + for (const createdId of createdTaskIds) { + await Store.updateTask(projectId, createdId, { status: "closed", close_reason: "rollback: composer failed" }).catch(() => {}) + } + throw error + } + + return { status: "ready", taskCount: createdTaskIds.length } +} \ No newline at end of file diff --git a/packages/opencode/src/tasks/index.ts b/packages/opencode/src/tasks/index.ts new file mode 100644 index 000000000000..596e43ce7563 --- /dev/null +++ b/packages/opencode/src/tasks/index.ts @@ -0,0 +1,5 @@ +export * from "./types" +export * from "./store" +export * from "./scheduler" +export * from "./validation" +export * from "./tool" \ No newline at end of file diff --git a/packages/opencode/src/tasks/pulse.ts b/packages/opencode/src/tasks/pulse.ts new file mode 100644 index 000000000000..2184a0e82faf --- /dev/null +++ b/packages/opencode/src/tasks/pulse.ts @@ -0,0 +1,1034 @@ +import fs from "fs/promises" +import path from "path" +import { platform } from "os" +import { Global } from "../global" +import { Store } from "./store" +import { Scheduler } from "./scheduler" +import { Session } from "../session" +import { SessionPrompt } from "../session/prompt" +import { Bus } from "../bus" +import { BackgroundTaskEvent } from "../session/async-tasks" +import { Worktree } from "../worktree" +import { Log } from "../util/log" +import { MessageV2 } from "../session/message-v2" +import type { Task, Job, AdversarialVerdict } from "./types" + +const log = Log.create({ service: "taskctl.pulse" }) +const tickLock = new Map>() + +const TIMEOUT_MS = 30 * 60 * 1000 + +export function sanitizeWorktree(worktree: string | null | undefined): string | null { + if (!worktree || typeof worktree !== "string") return null + if (worktree.includes("..")) return null + return path.resolve(worktree) +} + +export function startPulse(jobId: string, projectId: string, pmSessionId: string): ReturnType { + const startJob = async (): Promise => { + const existingPid = await readLockPid(jobId, projectId).catch(() => null) + if (existingPid && isPidAlive(existingPid)) { + log.error("job already running", { jobId, existingPid }) + return + } + if (existingPid && !isPidAlive(existingPid)) { + log.warn("overwriting stale lock file", { jobId, oldPid: existingPid }) + } + writeLockFile(jobId, projectId, process.pid).catch((e) => log.error("failed to write lock file", { jobId, error: String(e) })) + } + + startJob() + + const interval = setInterval(async () => { + const prevTick = tickLock.get(jobId) + const done = new Promise((resolve) => { + resolve() + }) + tickLock.set(jobId, done) + + if (prevTick) { + try { + await prevTick + return + } catch { + return + } + } + + try { + const job = await Store.getJob(projectId, jobId) + if (!job) { + clearInterval(interval) + tickLock.delete(jobId) + return + } + if (job.stopping) { + await gracefulStop(jobId, projectId, interval) + return + } + await heartbeatActiveAgents(jobId, projectId) + await processAdversarialVerdicts(jobId, projectId, pmSessionId) + await checkTimeouts(jobId, projectId) + await checkSteering(jobId, projectId, pmSessionId) + await scheduleReadyTasks(jobId, projectId, pmSessionId) + await checkCompletion(jobId, projectId, pmSessionId, interval) + } catch (e) { + log.error("tick failed with unrecoverable error", { jobId, error: String(e) }) + } finally { + tickLock.delete(jobId) + } + }, 5_000) + + return interval +} + +export async function resurrectionScan(jobId: string, projectId: string): Promise { + const tasks = await Store.listTasks(projectId) + const jobTasks = tasks.filter((t) => t.job_id === jobId) + + for (const task of jobTasks) { + if (task.status === "in_progress" || task.status === "review") { + const sessionAlive = task.assignee ? await isSessionAlive(task.assignee) : false + if (!sessionAlive) { + let worktreeRemoved = false + const safeWorktree = sanitizeWorktree(task.worktree) + if (safeWorktree) { + try { + await Worktree.remove({ directory: safeWorktree }) + worktreeRemoved = true + log.info("removed worktree during resurrection", { taskId: task.id, worktree: safeWorktree }) + } catch (e) { + log.error("failed to remove worktree during resurrection", { taskId: task.id, error: String(e) }) + } + } + + await Store.updateTask(projectId, task.id, { + status: "open", + assignee: null, + assignee_pid: null, + worktree: null, + branch: null, + }, true) + + await Store.addComment(projectId, task.id, { + author: "system", + message: worktreeRemoved + ? "Resurrected: agent session not found on Pulse restart. Worktree cleaned up." + : "Resurrected: agent session not found on Pulse restart.", + created_at: new Date().toISOString(), + }) + log.info("resurrected task", { taskId: task.id, jobId, worktreeRemoved }) + } + } + } +} + +async function isSessionAlive(sessionId: string): Promise { + try { + const session = await Session.get(sessionId) + return session !== null && session !== undefined + } catch (e) { + const errorStr = String(e).toLowerCase() + const isNotFound = errorStr.includes("not found") || errorStr.includes("no such") + if (!isNotFound) { + log.error("session alive check failed with unexpected error", { sessionId, error: String(e) }) + } + return false + } +} + +async function lockFilePath(jobId: string, projectId: string): Promise { + const tasksDir = path.join(Global.Path.data, "tasks", projectId) + await fs.mkdir(tasksDir, { recursive: true }) + + const files = await fs.readdir(tasksDir) + for (const file of files) { + if (file.startsWith(`job-${jobId}.lock.tmp.`)) { + await fs.unlink(path.join(tasksDir, file)).catch(() => {}) + } + } + + return path.join(tasksDir, `job-${jobId}.lock`) +} + +async function writeLockFile(jobId: string, projectId: string, pid: number): Promise { + const lockPath = await lockFilePath(jobId, projectId) + const tmpPath = `${lockPath}.tmp.${process.pid}` + await Bun.write(tmpPath, String(pid)) + await fs.rename(tmpPath, lockPath) +} + +async function removeLockFile(jobId: string, projectId: string): Promise { + const lockPath = await lockFilePath(jobId, projectId) + await fs.unlink(lockPath).catch(() => {}) +} + +async function readLockPid(jobId: string, projectId: string): Promise { + const lockPath = await lockFilePath(jobId, projectId) + const content = await Bun.file(lockPath).text().catch(() => null) + if (!content) return null + const pid = parseInt(content, 10) + if (isNaN(pid)) return null + return pid +} + +function isPidAlive(pid: number): boolean { + if (platform() === "win32") { + try { + const { execSync } = require("child_process") + execSync(`tasklist /FI "PID eq ${pid}"`, { stdio: "ignore" }) + return true + } catch { + return false + } + } + try { process.kill(pid, 0); return true } catch { return false } +} + +export { + isPidAlive, + writeLockFile, + removeLockFile, + readLockPid, + processAdversarialVerdicts, + spawnAdversarial, +} + +async function scheduleReadyTasks(jobId: string, projectId: string, pmSessionId: string): Promise { + const job = await Store.getJob(projectId, jobId) + if (!job) return + + const allTasks = await Store.listTasks(projectId) + const jobTasks = allTasks.filter((t) => t.job_id === jobId) + const inProgressCount = jobTasks.filter((t) => t.status === "in_progress").length + + if (inProgressCount >= job.max_workers) return + + const slots = job.max_workers - inProgressCount + const ready = await Scheduler.getNextTasks(projectId, slots) + const toSpawn = ready.filter((t) => t.job_id === jobId) + + for (const task of toSpawn) { + const current = await Store.getTask(projectId, task.id) + if (!current || current.status !== "open") { + log.info("task no longer open, skipping spawn", { taskId: task.id, status: current?.status }) + continue + } + await spawnDeveloper(task, jobId, projectId, pmSessionId) + } + + const reviewingTasks = jobTasks.filter((t) => + t.pipeline.stage === "reviewing" && + !t.pipeline.adversarial_verdict && + !t.assignee && + t.status === "in_progress" + ) + + for (const task of reviewingTasks) { + await spawnAdversarial(task, jobId, projectId, pmSessionId) + } +} + +async function spawnDeveloper(task: Task, jobId: string, projectId: string, pmSessionId: string): Promise { + let worktreeInfo + try { + worktreeInfo = await Worktree.create({ name: task.id }) + } catch (e) { + log.error("failed to create worktree", { taskId: task.id, error: String(e) }) + return + } + + const now = new Date().toISOString() + + const parentSession = await Session.get(pmSessionId).catch(() => null) + if (!parentSession?.directory) { + await Worktree.remove({ directory: worktreeInfo.directory }).catch((e) => + log.error("failed to clean up worktree after PM session check failed", { taskId: task.id, error: String(e) }) + ) + log.error("PM session not found", { pmSessionId, taskId: task.id }) + return + } + + let devSession + try { + devSession = await Session.createNext({ + parentID: pmSessionId, + directory: worktreeInfo.directory, + title: `Developer: ${task.title} (developer-pipeline)`, + permission: [], + }) + } catch (e) { + await Worktree.remove({ directory: worktreeInfo.directory }).catch((e) => + log.error("failed to clean up worktree after session creation failed", { taskId: task.id, error: String(e) }) + ) + log.error("failed to create developer session", { taskId: task.id, error: String(e) }) + return + } + + await Store.updateTask(projectId, task.id, { + status: "in_progress", + assignee: devSession.id, + assignee_pid: process.pid, + worktree: worktreeInfo.directory, + branch: worktreeInfo.branch, + }, true) + + const prompt = buildDeveloperPrompt(task) + try { + await SessionPrompt.prompt({ + sessionID: devSession.id, + agent: "developer-pipeline", + parts: [{ type: "text", text: prompt }], + }) + } catch (e) { + log.error("developer session failed to start", { taskId: task.id, sessionId: devSession.id, error: String(e) }) + + try { + SessionPrompt.cancel(devSession.id) + } catch (e: any) { + log.error("failed to cancel orphaned developer session", { sessionId: devSession.id, error: String(e) }) + } + + await Worktree.remove({ directory: worktreeInfo.directory }).catch((e) => + log.error("failed to clean up worktree after developer prompt failed", { taskId: task.id, error: String(e) }) + ) + + await Store.updateTask(projectId, task.id, { + status: "open", + assignee: null, + assignee_pid: null, + worktree: null, + branch: null, + }, true) + + await Store.addComment(projectId, task.id, { + author: "system", + message: `Failed to start developer: ${String(e)}`, + created_at: new Date().toISOString(), + }) + } +} + +function buildDeveloperPrompt(task: Task): string { + return `Implement the following task with TDD: + +**Title:** ${task.title} + +**Description:** ${task.description} + +**Acceptance Criteria:** ${task.acceptance_criteria} + +Follow these steps: +1. Write failing test(s) for the required behavior +2. Write minimal code to make tests pass +3. Refactor for clarity and maintainability +4. Run all tests to verify nothing broke + +Important: Only implement what's explicitly requested. Do not add "helpful" features.` +} + +async function heartbeatActiveAgents(jobId: string, projectId: string): Promise { + const allTasks = await Store.listTasks(projectId) + const jobTasks = allTasks.filter((t) => t.job_id === jobId) + const now = new Date().toISOString() + + for (const task of jobTasks) { + if (task.status === "in_progress" && task.assignee) { + const sessionAlive = await isSessionAlive(task.assignee) + const updated = await Store.getTask(projectId, task.id) + if (!updated) continue + + if (!sessionAlive) { + log.info("developer session ended, transitioning to review stage", { taskId: task.id }) + await Store.updateTask(projectId, task.id, { + pipeline: { ...updated.pipeline, stage: "reviewing", last_activity: now } + }) + } else { + await Store.updateTask(projectId, task.id, { + pipeline: { ...updated.pipeline, last_activity: now } + }) + } + } + } +} + +async function checkTimeouts(jobId: string, projectId: string): Promise { + const allTasks = await Store.listTasks(projectId) + const jobTasks = allTasks.filter((t) => t.job_id === jobId) + const now = Date.now() + const ADVERSARIAL_TIMEOUT_MS = 60 * 60 * 1000 + const SESSION_MESSAGE_TIMEOUT_MS = 30 * 60 * 1000 + + for (const task of jobTasks) { + if (task.status === "in_progress") { + const lastActivity = task.pipeline.last_activity + ? new Date(task.pipeline.last_activity).getTime() + : 0 + + let timedOut = false + if (lastActivity > 0 && now - lastActivity > TIMEOUT_MS) { + timedOut = true + log.info("task timed out by pipeline.last_activity", { taskId: task.id, lastActivity, now }) + } + + if (!timedOut && task.assignee) { + const msgs = await Session.messages({ sessionID: task.assignee }).catch(() => []) + if (msgs.length > 0) { + const lastMsg = msgs[msgs.length - 1] + const lastMsgTime = lastMsg.info.time.created + if (lastMsgTime && now - lastMsgTime > SESSION_MESSAGE_TIMEOUT_MS) { + timedOut = true + log.info("task timed out by session message activity", { taskId: task.id, lastMsgTime, now }) + } + } + } + + if (timedOut) { + + let worktreeRemoved = false + if (task.worktree) { + try { + const safeWorktree = sanitizeWorktree(task.worktree) + if (!safeWorktree) { + log.error("worktree sanitization failed for timed out task", { taskId: task.id, worktree: task.worktree }) + } else { + await Worktree.remove({ directory: safeWorktree }) + worktreeRemoved = true + } + } catch (e) { + log.error("failed to remove worktree for timed out task", { taskId: task.id, error: String(e) }) + } + } + + if (task.assignee) { + try { + SessionPrompt.cancel(task.assignee) + } catch (e: any) { + log.error("failed to cancel session for timed out task", { taskId: task.id, error: String(e) }) + } + } + + await Store.updateTask(projectId, task.id, { + status: "open", + assignee: null, + assignee_pid: null, + worktree: null, + branch: null, + }, true) + + await Store.addComment(projectId, task.id, { + author: "system", + message: worktreeRemoved + ? `Timed out after 30 minutes with no activity. Worktree cleaned up.` + : `Timed out after 30 minutes with no activity.`, + created_at: new Date().toISOString(), + }) + } + } + + if (task.status === "in_progress" && task.pipeline.stage === "adversarial-running") { + const lastActivity = task.pipeline.last_activity + ? new Date(task.pipeline.last_activity).getTime() + : 0 + + if (lastActivity > 0 && now - lastActivity > ADVERSARIAL_TIMEOUT_MS) { + log.info("adversarial stage timed out — resetting to reviewing", { taskId: task.id }) + + await Store.updateTask(projectId, task.id, { + pipeline: { ...task.pipeline, stage: "reviewing" } + }, true) + + await Store.addComment(projectId, task.id, { + author: "system", + message: "Adversarial agent timed out after 60 minutes. Will retry on next Pulse tick.", + created_at: new Date().toISOString(), + }) + } + } + } +} + +async function processAdversarialVerdicts(jobId: string, projectId: string, pmSessionId: string): Promise { + const allTasks = await Store.listTasks(projectId) + const jobTasks = allTasks.filter((t) => t.job_id === jobId) + + for (const task of jobTasks) { + if (task.status !== "review") continue + if (!task.pipeline.adversarial_verdict) continue + + const verdict = task.pipeline.adversarial_verdict + + // Clear verdict immediately to prevent double-processing + await Store.updateTask(projectId, task.id, { + pipeline: { ...task.pipeline, adversarial_verdict: null, last_activity: new Date().toISOString() } + }, true) + + if (verdict.verdict === "APPROVED") { + await commitTask(task, projectId, pmSessionId) + } else { + const newAttempt = (task.pipeline.attempt || 0) + 1 + if (newAttempt >= 3) { + await escalateToPM(task, jobId, projectId, pmSessionId) + } else { + await respawnDeveloper(task, jobId, projectId, pmSessionId, newAttempt, verdict) + } + } + } +} + +async function commitTask(task: Task, projectId: string, pmSessionId: string): Promise { + const parentSession = await Session.get(pmSessionId).catch(() => null) + if (!parentSession?.directory) { + log.error("PM session not found for commit", { taskId: task.id }) + await escalateCommitFailure(task, projectId, pmSessionId, "PM session not found") + return + } + + if (!task.worktree) { + log.error("Task has no worktree for commit", { taskId: task.id }) + await escalateCommitFailure(task, projectId, pmSessionId, "No worktree available") + return + } + + let opsSession + try { + opsSession = await Session.createNext({ + parentID: pmSessionId, + directory: task.worktree, + title: `@ops commit: ${task.title}`, + permission: [], + }) + } catch (e) { + log.error("failed to create @ops session for commit", { taskId: task.id, error: String(e) }) + await escalateCommitFailure(task, projectId, pmSessionId, String(e)) + return + } + + const commitMsg = `feat(taskctl): ${task.title} (#${task.parent_issue})` + const opsPrompt = `Commit all changes in the current directory. +Commit message: "${commitMsg}" +Do NOT push to remote. Only commit locally. +Run: git add -A && git commit -m "${commitMsg}" +If there is nothing to commit, that is fine — report success.` + + try { + await SessionPrompt.prompt({ + sessionID: opsSession.id, + agent: "ops", + parts: [{ type: "text", text: opsPrompt }], + }) + } catch (e) { + log.error("@ops commit prompt failed", { taskId: task.id, error: String(e) }) + await escalateCommitFailure(task, projectId, pmSessionId, `Commit prompt failed: ${String(e)}`) + return + } + + const maxWait = 5 * 60 * 1000 + const pollInterval = 2000 + const start = Date.now() + let opsComplete = false + + while (Date.now() - start < maxWait) { + await new Promise(r => setTimeout(r, pollInterval)) + const alive = await isSessionAlive(opsSession.id) + if (!alive) { opsComplete = true; break } + } + + if (!opsComplete) { + log.error("@ops commit timed out", { taskId: task.id }) + try { + SessionPrompt.cancel(opsSession.id) + } catch (e: any) { + log.error("failed to cancel timed-out ops session", { sessionId: opsSession.id, error: String(e) }) + } + await escalateCommitFailure(task, projectId, pmSessionId, "Commit timed out after 5 minutes") + return + } + + if (task.worktree) { + const safeWorktree = sanitizeWorktree(task.worktree) + if (safeWorktree) { + await Worktree.remove({ directory: safeWorktree }).catch(e => + log.error("failed to remove worktree after commit", { taskId: task.id, error: String(e) }) + ) + } + } + + await Store.updateTask(projectId, task.id, { + status: "closed", + close_reason: "approved and committed", + worktree: null, + branch: null, + assignee: null, + assignee_pid: null, + pipeline: { ...task.pipeline, stage: "done" } + }, true) + + await Store.addComment(projectId, task.id, { + author: "system", + message: `Committed to branch by @ops. Task closed.`, + created_at: new Date().toISOString(), + }) + + Bus.publish(BackgroundTaskEvent.Completed, { + taskID: task.id, + sessionID: pmSessionId, + parentSessionID: undefined, + }) + + log.info("task committed and closed", { taskId: task.id }) +} + +async function respawnDeveloper(task: Task, jobId: string, projectId: string, pmSessionId: string, attempt: number, verdict: AdversarialVerdict): Promise { + const parentSession = await Session.get(pmSessionId).catch(() => null) + if (!parentSession?.directory) { + log.error("PM session not found for respawn", { taskId: task.id }) + return + } + + if (!task.worktree) { + log.error("Task has no worktree for respawn", { taskId: task.id }) + return + } + + let devSession + try { + devSession = await Session.createNext({ + parentID: pmSessionId, + directory: task.worktree, + title: `Developer retry #${attempt}: ${task.title}`, + permission: [], + }) + } catch (e) { + log.error("failed to respawn developer", { taskId: task.id, error: String(e) }) + return + } + + await Store.updateTask(projectId, task.id, { + status: "in_progress", + assignee: devSession.id, + pipeline: { + ...task.pipeline, + attempt, + stage: "developing", + last_activity: new Date().toISOString(), + } + }, true) + + const issueLines = verdict.issues.map((i) => ` - ${i.location} [${i.severity}]: ${i.fix}`).join("\n") + const prompt = `This is retry attempt ${attempt} of 3. The previous implementation had issues that must be fixed. + +**Task:** ${task.title} +**Description:** ${task.description} +**Acceptance Criteria:** ${task.acceptance_criteria} + +**Adversarial feedback — fix these before signaling complete:** +Summary: ${verdict.summary} +Issues: +${issueLines} + +The codebase changes are already in this worktree. Fix the specific issues listed above, run tests, then signal completion with: +taskctl comment ${task.id} "Implementation complete: "` + + await SessionPrompt.prompt({ + sessionID: devSession.id, + agent: "developer-pipeline", + parts: [{ type: "text", text: prompt }], + }) + + await Store.addComment(projectId, task.id, { + author: "system", + message: `Developer respawned for attempt ${attempt}. Adversarial feedback provided.`, + created_at: new Date().toISOString(), + }) +} + +async function escalateToPM(task: Task, jobId: string, projectId: string, pmSessionId: string): Promise { + await Store.updateTask(projectId, task.id, { + status: "failed", + pipeline: { ...task.pipeline, stage: "failed" } + }, true) + + await Store.addComment(projectId, task.id, { + author: "system", + message: `Failed after 3 adversarial review cycles. Last verdict: ${task.pipeline.adversarial_verdict?.summary ?? "unknown"}. Worktree preserved for PM inspection.`, + created_at: new Date().toISOString(), + }) + + Bus.publish(BackgroundTaskEvent.Completed, { + taskID: `escalation-${task.id}`, + sessionID: pmSessionId, + parentSessionID: undefined, + }) + + log.error("task escalated to PM after 3 failures", { taskId: task.id, jobId }) +} + +async function escalateCommitFailure(task: Task, projectId: string, pmSessionId: string, reason: string): Promise { + await Store.updateTask(projectId, task.id, { + status: "blocked_on_conflict", + pipeline: { ...task.pipeline, stage: "commit-failed" } + }, true) + + await Store.addComment(projectId, task.id, { + author: "system", + message: `Commit failed: ${reason}. Worktree preserved. Use taskctl override ${task.id} --commit-as-is to force commit or taskctl retry to reset.`, + created_at: new Date().toISOString(), + }) + + Bus.publish(BackgroundTaskEvent.Completed, { + taskID: `commit-failure-${task.id}`, + sessionID: pmSessionId, + parentSessionID: undefined, + }) +} + +async function spawnAdversarial(task: Task, jobId: string, projectId: string, pmSessionId: string): Promise { + if (task.assignee) { + log.warn("refusing to spawn adversarial: task already has assignee", { taskId: task.id, assignee: task.assignee }) + return + } + + const parentSession = await Session.get(pmSessionId).catch(() => null) + if (!parentSession?.directory) { + log.error("PM session not found for adversarial spawn", { taskId: task.id }) + return + } + + if (!task.worktree || typeof task.worktree !== "string") { + log.error("invalid worktree for adversarial spawn", { taskId: task.id, worktree: task.worktree }) + return + } + const safeWorktree = task.worktree.replace(/[^\w\-./]/g, "") + if (!safeWorktree) { + log.error("worktree sanitization resulted in empty string", { taskId: task.id, worktree: task.worktree }) + return + } + + let adversarialSession + try { + adversarialSession = await Session.createNext({ + parentID: pmSessionId, + directory: parentSession.directory, + title: `Adversarial: ${task.title}`, + permission: [], + }) + } catch (e) { + log.error("failed to create adversarial session", { taskId: task.id, error: String(e) }) + return + } + + await Store.updateTask(projectId, task.id, { + pipeline: { ...task.pipeline, stage: "adversarial-running" } + }) + + const prompt = `Review the implementation in worktree at: ${safeWorktree} + +Task ID: ${task.id} +Title: ${task.title} +Description: ${task.description} +Acceptance Criteria: ${task.acceptance_criteria} + +Read the changed files in the worktree, run typecheck, and record your verdict with taskctl verdict.` + + try { + await SessionPrompt.prompt({ + sessionID: adversarialSession.id, + agent: "adversarial-pipeline", + parts: [{ type: "text", text: prompt }], + }) + } catch (e) { + log.error("adversarial session failed to start", { taskId: task.id, error: String(e) }) + try { + SessionPrompt.cancel(adversarialSession.id) + } catch (e: any) { + log.error("failed to cancel orphaned adversarial session", { sessionId: adversarialSession.id, error: String(e) }) + await Store.addComment(projectId, task.id, { + author: "system", + message: `⚠️ Failed to cancel orphaned adversarial session: ${adversarialSession.id}. Manual cleanup may be required.`, + created_at: new Date().toISOString(), + }) + } + + // Remove worktree before resetting status to prevent orphaned worktrees + if (task.worktree) { + const safeWorktree = sanitizeWorktree(task.worktree) + if (safeWorktree) { + await Worktree.remove({ directory: safeWorktree }).catch(e => + log.error("failed to remove worktree after adversarial spawn failed", { taskId: task.id, error: String(e) }) + ) + } + } + + await Store.updateTask(projectId, task.id, { + status: "open", + assignee: null, + assignee_pid: null, + worktree: null, + branch: null, + pipeline: { ...task.pipeline, stage: "idle" } + }, true) + } +} + +async function getRecentActivity(sessionId: string): Promise { + try { + const msgs = await Session.messages({ sessionID: sessionId, limit: 10 }) + if (!msgs || msgs.length === 0) { + return `Session ${sessionId} is active. No message history available.` + } + + const assistantMsgs = msgs.filter((m) => m.info.role === "assistant") + if (assistantMsgs.length === 0) { + return `Session ${sessionId} is active. Developer has not yet responded.` + } + + const summary = assistantMsgs.map((_, i) => `${i + 1}. [assistant response]`).join("\n") + return `Recent developer activity:\n${summary}` + } catch { + return "Unable to retrieve session history." + } +} + +async function spawnSteering(task: Task, history: string, pmSessionId: string): Promise<{ action: string; message: string | null } | null> { + const parentSession = await Session.get(pmSessionId).catch(() => null) + if (!parentSession?.directory) return null + + let steeringSession + try { + steeringSession = await Session.createNext({ + parentID: pmSessionId, + directory: parentSession.directory, + title: `Steering: ${task.title}`, + permission: [], + }) + } catch (e) { + log.error("failed to create steering session", { taskId: task.id, error: String(e) }) + return null + } + + const prompt = `Task: ${task.title} +Description: ${task.description} +Acceptance criteria: ${task.acceptance_criteria} + +Recent developer activity: +${history} + +Assess the developer's progress and respond with the appropriate JSON action.` + + try { + await SessionPrompt.prompt({ + sessionID: steeringSession.id, + agent: "steering", + parts: [{ type: "text", text: prompt }], + }) + } catch (e) { + log.error("failed to prompt steering agent", { taskId: task.id, error: String(e) }) + return null + } + + const maxWait = 2 * 60 * 1000 + const pollMs = 3000 + const start = Date.now() + + while (Date.now() - start < maxWait) { + await new Promise(r => setTimeout(r, pollMs)) + const alive = await isSessionAlive(steeringSession.id) + if (!alive) break + } + + const msgs = await Session.messages({ sessionID: steeringSession.id }) + if (!msgs || msgs.length === 0) { + log.warn("steering session produced no messages", { taskId: task.id }) + return { action: "continue", message: null } + } + + const assistantMsgs = msgs.filter((m) => m.info.role === "assistant") + if (assistantMsgs.length === 0) { + log.warn("steering session has no assistant response", { taskId: task.id }) + return { action: "continue", message: null } + } + + const lastMsg = assistantMsgs[assistantMsgs.length - 1] + const textParts = lastMsg.parts.filter((p) => p.type === "text") + if (textParts.length === 0) { + log.warn("steering agent response has no text parts", { taskId: task.id }) + return { action: "continue", message: null } + } + + const responseText = textParts.map((p) => (p as MessageV2.TextPart).text).join("\n") + + let response + try { + const jsonMatch = responseText.match(/\{[\s\S]*\}/) + if (!jsonMatch) { + log.warn("steering agent response contains no JSON", { taskId: task.id }) + return { action: "continue", message: null } + } + response = JSON.parse(jsonMatch[0]) + } catch (e) { + log.warn("failed to parse steering agent JSON response", { taskId: task.id, error: String(e) }) + return { action: "continue", message: null } + } + + if (!response.action || typeof response.action !== "string") { + log.warn("steering response missing action field", { taskId: task.id }) + return { action: "continue", message: null } + } + + return { action: response.action, message: response.message ?? null } +} + +async function checkSteering(jobId: string, projectId: string, pmSessionId: string): Promise { + const allTasks = await Store.listTasks(projectId) + const jobTasks = allTasks.filter((t) => t.job_id === jobId) + const now = new Date() + + for (const task of jobTasks) { + if (task.status !== "in_progress") continue + if (task.pipeline.stage === "adversarial-running" || task.pipeline.stage === "reviewing") continue + + const lastSteering = task.pipeline.last_steering + ? new Date(task.pipeline.last_steering) + : new Date(0) + const minutesSince = (now.getTime() - lastSteering.getTime()) / 60_000 + if (minutesSince < 15) continue + + if (!task.assignee) continue + + const sessionAlive = await isSessionAlive(task.assignee) + if (!sessionAlive) continue + + const history = await getRecentActivity(task.assignee) + + const result = await spawnSteering(task, history, pmSessionId) + if (!result) { + await Store.updateTask(projectId, task.id, { + pipeline: { ...task.pipeline, last_steering: now.toISOString() } + }, true) + continue + } + + await Store.updateTask(projectId, task.id, { + pipeline: { ...task.pipeline, last_steering: now.toISOString() } + }, true) + + if (result.action === "continue") { + log.info("steering: continue", { taskId: task.id }) + } else if (result.action === "steer") { + log.info("steering: sending guidance", { taskId: task.id, message: result.message }) + await SessionPrompt.prompt({ + sessionID: task.assignee, + agent: "developer-pipeline", + parts: [{ type: "text", text: `[Steering guidance]: ${result.message}` }], + }).catch(e => log.error("failed to send steering message", { taskId: task.id, error: String(e) })) + + await Store.addComment(projectId, task.id, { + author: "system", + message: `Steering guidance sent: ${result.message}`, + created_at: now.toISOString(), + }) + } else if (result.action === "replace") { + log.info("steering: replacing developer", { taskId: task.id, reason: result.message }) + + if (task.assignee) { + try { SessionPrompt.cancel(task.assignee) } catch {} + } + + await Store.updateTask(projectId, task.id, { + status: "open", + assignee: null, + assignee_pid: null, + pipeline: { + ...task.pipeline, + stage: "idle", + last_activity: null, + last_steering: now.toISOString(), + } + }, true) + + await Store.addComment(projectId, task.id, { + author: "system", + message: `Developer replaced by steering agent: ${result.message}. Task reset to open — Pulse will reschedule.`, + created_at: now.toISOString(), + }) + } + } +} + +export async function checkCompletion(jobId: string, projectId: string, pmSessionId: string, interval: ReturnType): Promise { + const allTasks = await Store.listTasks(projectId) + const jobTasks = allTasks.filter((t) => t.job_id === jobId) + const allClosed = jobTasks.every((t) => t.status === "closed") + + if (allClosed) { + log.info("all tasks completed", { jobId }) + try { + clearInterval(interval) + tickLock.delete(jobId) + await removeLockFile(jobId, projectId) + await Store.updateJob(projectId, jobId, { status: "complete" }) + Bus.publish(BackgroundTaskEvent.Completed, { taskID: jobId, sessionID: pmSessionId, parentSessionID: undefined }) + } catch (e) { + tickLock.delete(jobId) + await removeLockFile(jobId, projectId).catch(() => {}) + } + } +} + +async function gracefulStop(jobId: string, projectId: string, interval: ReturnType): Promise { + log.info("graceful stop requested", { jobId }) + + const allTasks = await Store.listTasks(projectId) + const jobTasks = allTasks.filter((t) => t.job_id === jobId) + const inProgressTasks = jobTasks.filter((t) => t.status === "in_progress" || t.status === "review") + + for (const task of inProgressTasks) { + if (task.assignee) { + await Session.get(task.assignee).catch(() => {}) + try { + SessionPrompt.cancel(task.assignee) + } catch (e: any) { + log.error("failed to cancel session during graceful stop", { taskId: task.id, error: String(e) }) + } + } + + let worktreeRemoved = false + if (task.worktree) { + try { + const safeWorktree = sanitizeWorktree(task.worktree) + if (!safeWorktree) { + log.error("worktree sanitization failed during graceful stop", { taskId: task.id, worktree: task.worktree }) + } else { + await Worktree.remove({ directory: safeWorktree }) + worktreeRemoved = true + } + } catch (e) { + log.error("failed to remove worktree during graceful stop", { taskId: task.id, error: String(e) }) + } + } + + await Store.updateTask(projectId, task.id, { + status: "open", + assignee: null, + assignee_pid: null, + worktree: null, + branch: null, + }, true) + + await Store.addComment(projectId, task.id, { + author: "system", + message: worktreeRemoved + ? "Job stopped by PM. Worktree cleaned up." + : "Job stopped by PM.", + created_at: new Date().toISOString(), + }) + } + + clearInterval(interval) + tickLock.delete(jobId) + await removeLockFile(jobId, projectId) + + await Store.updateJob(projectId, jobId, { status: "stopped" }) + log.info("graceful stop completed", { jobId }) +} \ No newline at end of file diff --git a/packages/opencode/src/tasks/scheduler.ts b/packages/opencode/src/tasks/scheduler.ts new file mode 100644 index 000000000000..6a863fd0277b --- /dev/null +++ b/packages/opencode/src/tasks/scheduler.ts @@ -0,0 +1,26 @@ +import { Store } from "./store" +import type { Task } from "./types" + +export const Scheduler = { + async getNextTasks(projectId: string, count: number = 1): Promise { + const tasks = await Store.listTasks(projectId) + const openTasks = tasks.filter((task) => task.status === "open" || task.status === "blocked_on_conflict") + + const readyTasks = openTasks.filter((task) => { + const dependencies = task.depends_on + const allDepsClosed = dependencies.every((depId) => { + const depTask = tasks.find((t) => t.id === depId) + return depTask && depTask.status === "closed" + }) + + return allDepsClosed + }) + + const sortedTasks = readyTasks.sort((a, b) => { + if (a.priority !== b.priority) return a.priority - b.priority + return a.id.localeCompare(b.id) + }) + + return sortedTasks.slice(0, count) + }, +} \ No newline at end of file diff --git a/packages/opencode/src/tasks/store.ts b/packages/opencode/src/tasks/store.ts new file mode 100644 index 000000000000..e81a91f1790a --- /dev/null +++ b/packages/opencode/src/tasks/store.ts @@ -0,0 +1,294 @@ +import fs from "fs/promises" +import path from "path" +import { Global } from "../global/index" +import type { Task, Job, TaskIndex, Comment, PipelineEvent } from "./types" + +const TASKS_DIR = "tasks" + +function sanitizeProjectId(projectId: string): string { + if (!projectId || typeof projectId !== "string") { + throw new Error("Invalid projectId: must be a non-empty string") + } + + if (projectId.includes("/") || projectId.includes("\\") || projectId.includes("\0")) { + throw new Error(`Invalid projectId: contains path separators or null bytes`) + } + + if (projectId === "." || projectId === ".." || projectId.startsWith("..") || projectId.endsWith("..")) { + throw new Error(`Invalid projectId: cannot be "." or ".." or contain ".."`) + } + + return projectId +} + +function sanitizeTaskId(taskId: string): string { + if (!taskId || typeof taskId !== "string") { + throw new Error("Invalid taskId: must be a non-empty string") + } + + if (taskId.includes("/") || taskId.includes("\\") || taskId.includes("\0")) { + throw new Error(`Invalid taskId: contains path separators or null bytes`) + } + + if (taskId.startsWith(".")) { + throw new Error(`Invalid taskId: cannot start with "."`) + } + + return taskId +} + +function getTasksDir(projectId: string): string { + const sanitized = sanitizeProjectId(projectId) + return path.join(Global.Path.data, TASKS_DIR, sanitized) +} + +function getSafeTaskPath(projectId: string, taskId: string): string { + const tasksDir = getTasksDir(projectId) + const sanitizedTaskId = sanitizeTaskId(taskId) + return path.join(tasksDir, `${sanitizedTaskId}.json`) +} + +async function ensureTasksDir(projectId: string): Promise { + const tasksDir = getTasksDir(projectId) + await fs.mkdir(tasksDir, { recursive: true }) + return tasksDir +} + +async function atomicWrite(filePath: string, content: string): Promise { + const tmp = `${filePath}.tmp` + try { + await Bun.write(tmp, content) + await fs.rename(tmp, filePath) + } catch (error) { + try { + await fs.unlink(tmp) + } catch {} + throw error + } +} + +const IMMUTABLE_FIELDS: readonly (keyof Task)[] = [ + "id", + "created_at", + "job_id", + "parent_issue", + "task_type", + "comments", + "pipeline", +] as const + +function validateTaskUpdates(updates: Partial, allowImmutable: boolean = false): void { + if (allowImmutable) return + const invalidFields = IMMUTABLE_FIELDS.filter((field) => field in updates) + if (invalidFields.length > 0) { + throw new Error( + `Cannot update immutable fields: ${invalidFields.join(", ")}. Use specialized methods for comments and pipeline events.`, + ) + } +} + +export const Store = { + async createTask(projectId: string, task: Task): Promise { + sanitizeTaskId(task.id) + const tasksDir = await ensureTasksDir(projectId) + const taskPath = getSafeTaskPath(projectId, task.id) + await atomicWrite(taskPath, JSON.stringify(task, null, 2)) + await this.updateIndex(projectId, task.id, { + status: task.status, + priority: task.priority, + labels: task.labels, + depends_on: task.depends_on, + updated_at: task.updated_at, + }) + await this.logActivity(projectId, { + type: "task_created", + task_id: task.id, + timestamp: new Date().toISOString(), + }) + }, + + async getTask(projectId: string, taskId: string): Promise { + const taskPath = getSafeTaskPath(projectId, taskId) + const content = await Bun.file(taskPath).text().catch(() => null) + if (!content) return null + try { + return JSON.parse(content) as Task + } catch { + return null + } + }, + + async updateTask(projectId: string, taskId: string, updates: Partial, allowImmutable: boolean = false): Promise { + validateTaskUpdates(updates, allowImmutable) + sanitizeTaskId(taskId) + const task = await this.getTask(projectId, taskId) + if (!task) throw new Error(`Task not found: ${taskId}`) + + const updated = { ...task, ...updates, updated_at: new Date().toISOString() } + const taskPath = getSafeTaskPath(projectId, taskId) + await atomicWrite(taskPath, JSON.stringify(updated, null, 2)) + await this.updateIndex(projectId, taskId, { + status: updated.status, + priority: updated.priority, + labels: updated.labels, + depends_on: updated.depends_on, + updated_at: updated.updated_at, + }) + await this.logActivity(projectId, { + type: "task_updated", + task_id: taskId, + timestamp: new Date().toISOString(), + }) + }, + + async listTasks(projectId: string): Promise { + const tasksDir = getTasksDir(projectId) + const content = await Bun.file(path.join(tasksDir, "index.json")).text().catch(() => "{}") + try { + const index = JSON.parse(content) as TaskIndex + const tasks: Task[] = [] + + for (const taskId in index) { + const task = await this.getTask(projectId, taskId) + if (task) tasks.push(task) + } + + return tasks + } catch { + return [] + } + }, + + async updateIndex(projectId: string, taskId: string, entry: TaskIndex[string]): Promise { + const tasksDir = await ensureTasksDir(projectId) + const indexPath = path.join(tasksDir, "index.json") + + let index = {} as TaskIndex + const content = await Bun.file(indexPath).text().catch(() => "{}") + try { + if (content) index = JSON.parse(content) as TaskIndex + } catch { + index = {} + } + + index[taskId] = entry + await atomicWrite(indexPath, JSON.stringify(index, null, 2)) + }, + + async getIndex(projectId: string): Promise { + const tasksDir = getTasksDir(projectId) + const content = await Bun.file(path.join(tasksDir, "index.json")).text().catch(() => "{}") + if (!content) return {} + try { + return JSON.parse(content) as TaskIndex + } catch { + return {} + } + }, + + async logActivity(projectId: string, event: Record): Promise { + const tasksDir = await ensureTasksDir(projectId) + const activityPath = path.join(tasksDir, "activity.ndjson") + const line = JSON.stringify(event) + "\n" + const file = Bun.file(activityPath) + try { + const existing = file.size > 0 ? await file.text() : "" + await Bun.write(activityPath, existing + line) + } catch {} + }, + + async createJob(projectId: string, job: Job): Promise { + const tasksDir = await ensureTasksDir(projectId) + const jobPath = path.join(tasksDir, `job-${job.id}.json`) + await atomicWrite(jobPath, JSON.stringify(job, null, 2)) + await this.logActivity(projectId, { + type: "job_created", + job_id: job.id, + timestamp: new Date().toISOString(), + }) + }, + + async getJob(projectId: string, jobId: string): Promise { + const tasksDir = getTasksDir(projectId) + const jobPath = path.join(tasksDir, `job-${jobId}.json`) + const content = await Bun.file(jobPath).text().catch(() => null) + if (!content) return null + try { + return JSON.parse(content) as Job + } catch { + return null + } + }, + + async updateJob(projectId: string, jobId: string, updates: Partial): Promise { + const job = await this.getJob(projectId, jobId) + if (!job) throw new Error(`Job not found: ${jobId}`) + + const updated = { ...job, ...updates } + const tasksDir = getTasksDir(projectId) + const jobPath = path.join(tasksDir, `job-${jobId}.json`) + await atomicWrite(jobPath, JSON.stringify(updated, null, 2)) + await this.logActivity(projectId, { + type: "job_updated", + job_id: jobId, + timestamp: new Date().toISOString(), + }) + }, + + async addComment(projectId: string, taskId: string, comment: Comment): Promise { + const task = await this.getTask(projectId, taskId) + if (!task) throw new Error(`Task not found: ${taskId}`) + + const updated = { ...task, comments: [...task.comments, comment] } + await this.updateTask(projectId, taskId, updated, true) + await this.logActivity(projectId, { + type: "comment_added", + task_id: taskId, + comment, + timestamp: new Date().toISOString(), + }) + }, + + async addPipelineEvent(projectId: string, taskId: string, event: PipelineEvent): Promise { + const task = await this.getTask(projectId, taskId) + if (!task) throw new Error(`Task not found: ${taskId}`) + + const updated = { + ...task, + pipeline: { + ...task.pipeline, + history: [...task.pipeline.history, event], + }, + } + await this.updateTask(projectId, taskId, updated, true) + }, + + async findJobByIssue(projectId: string, issueNumber: number): Promise { + const tasksDir = await getTasksDir(projectId) + const jobPattern = /^job-(.+)\.json$/ + const jobFiles = []; + + try { + const entries = await fs.readdir(tasksDir); + for (const entry of entries) { + const match = jobPattern.exec(entry); + if (match) { + const jobPath = path.join(tasksDir, entry); + const content = await Bun.file(jobPath).text().catch(() => null); + if (content) { + try { + const job = JSON.parse(content) as Job; + if (job.parent_issue === issueNumber && job.status === "running") { + return job; + } + } catch {} + } + } + } + } catch { + return null; + } + + return null; + }, +} \ No newline at end of file diff --git a/packages/opencode/src/tasks/tool.ts b/packages/opencode/src/tasks/tool.ts new file mode 100644 index 000000000000..2fa8b6f0aaf4 --- /dev/null +++ b/packages/opencode/src/tasks/tool.ts @@ -0,0 +1,1000 @@ +import z from "zod" +import { Tool } from "../tool/tool" +import { Instance } from "../project/instance" +import { Store } from "./store" +import { Scheduler } from "./scheduler" +import { Validation } from "./validation" +import { runComposer } from "./composer" +import { enableAutoWakeup } from "../session/async-tasks" +import { startPulse, resurrectionScan, readLockPid, isPidAlive, removeLockFile, sanitizeWorktree } from "./pulse" +import { SessionPrompt } from "../session/prompt" +import { Worktree } from "../worktree" +import { Log } from "../util/log" +import type { Task, Job } from "./types" + +const log = Log.create({ service: "taskctl.tool" }) +const MAX_COMMENT_LENGTH = 100 * 1024 + +function validateLabel(label: string): void { + if (!label || typeof label !== "string") { + throw new Error("Label must be a non-empty string") + } + if (label.includes("/") || label.includes("\\") || label.includes("\0")) { + throw new Error(`Invalid label "${label}": contains path separators or null bytes`) + } +} + +export const slugify = (title: string): string => { + const slug = title + .toLowerCase() + .replace(/[^a-z0-9]+/g, "-") + .replace(/^-|-$/g, "") + .slice(0, 100) + return slug || "entry" +} + +export async function generateUniqueSlug(projectId: string, title: string): Promise { + const baseSlug = slugify(title) + let slug = baseSlug + let counter = 2 + const maxAttempts = 1000 + + while (counter <= maxAttempts) { + const existing = await Store.getTask(projectId, slug) + if (!existing) return slug + slug = `${baseSlug}-${counter}` + counter++ + } + + throw new Error(`Failed to generate unique slug after ${maxAttempts} attempts for title: ${title}`) +} + +export const TaskctlTool = Tool.define("taskctl", { + description: `Task control and management tool for autonomous development pipelines. + +Commands: +- create: Create a new task with title, description, acceptance criteria +- list: List all tasks with optional filters +- get: Get a single task by ID +- update: Update task fields +- close: Close a task with a reason +- comment: Add a comment to a task +- depends: Add dependency to task (validates no cycle) +- split: Split task into two new tasks, close original +- next: Get next tasks ready for work (respecting dependencies and conflicts) +- validate: Validate task graph for cycles and other issues +- start: Start autonomous pipeline for a GitHub issue (decomposes issue into tasks via Composer agent) +- start-skip: Start pipeline skipping Composer (requires existing tasks for issue) +- status: Show job status for a GitHub issue +- stop: Stop a running job gracefully +- resume: Resume a stopped/crashed pipeline +- inspect: Show full task history and details +- override: Override a task (skip or commit as-is) +- retry: Reset and retry a failed task +- verdict: Record adversarial pipeline verdict for a task + +Task labels: +- module:: Prevent conflicts with tasks in same module +- file:: Prevent conflicts with tasks touching same file`, + + parameters: z.object({ + command: z + .enum([ + "create", + "list", + "get", + "update", + "close", + "comment", + "depends", + "split", + "next", + "validate", + "start", + "start-skip", + "status", + "stop", + "resume", + "inspect", + "override", + "retry", + "verdict", + ]) + .describe("Command to execute"), + taskId: z.string().optional().describe("Task ID (for get, update, close, comment, depends, split, inspect, override, retry, verdict)"), + title: z.string().optional().describe("Task title (for create)"), + description: z.string().optional().describe("Task description (for create)"), + acceptanceCriteria: z.string().optional().describe("Acceptance criteria (for create)"), + parentIssue: z.number().optional().describe("GitHub issue number (for create, start)"), + jobId: z.string().optional().describe("Job ID (for create, stop, resume)"), + priority: z.number().min(0).max(4).optional().describe("Priority 0-4, 0 is highest (for create, update)"), + taskType: z.enum(["implementation", "test", "research"]).optional().describe("Task type (for create)"), + labels: z.array(z.string()).optional().describe("Task labels (for create)"), + dependsOn: z.array(z.string()).optional().describe("Dependencies (for create)"), + message: z.string().optional().describe("Comment message (for comment)"), + reason: z.string().optional().describe("Close reason (for close)"), + dependencyId: z.string().optional().describe("Dependency task ID to add (for depends)"), + count: z.number().min(1).max(10).optional().describe("Number of tasks to return (for next)"), + updates: z.object({}).passthrough().optional().describe("Field updates for task (for update, e.g. {status: 'in_progress'})"), + issueNumber: z.number().optional().describe("GitHub issue number (for start, start-skip, status)"), + overrideMode: z.enum(["skip", "commit-as-is"]).optional().describe("Override mode: skip task or commit-as-is (for override command)"), + verdict: z.enum(["APPROVED", "ISSUES_FOUND", "CRITICAL_ISSUES_FOUND"]).optional().describe("Verdict for adversarial review (for verdict)"), + verdictIssues: z.array(z.object({ + location: z.string(), + severity: z.enum(["CRITICAL", "HIGH", "MEDIUM", "LOW"]), + fix: z.string(), + })).optional().describe("Issues found in review (for verdict)"), + verdictSummary: z.string().optional().describe("Summary of verdict (for verdict)"), + }), + async execute(params, ctx) { + const projectId = Instance.project.id + + if (params.command === "create") { + if (!params.title?.trim()) { + throw new Error("create requires title") + } + if (!params.description?.trim()) { + throw new Error("create requires description") + } + if (!params.acceptanceCriteria?.trim()) { + throw new Error("create requires acceptanceCriteria") + } + if (!params.parentIssue || !params.jobId) { + throw new Error("create requires parentIssue and jobId") + } + + const labels = (params.labels ?? []).filter((l) => l.trim()) + labels.forEach(validateLabel) + + if (params.dependsOn) { + for (const depId of params.dependsOn) { + if (!depId || typeof depId !== "string") { + throw new Error(`Invalid dependency ID: ${depId}`) + } + const depExists = await Store.getTask(projectId, depId) + if (!depExists) { + throw new Error(`Dependency task not found: ${depId}`) + } + } + } + + const taskId = await generateUniqueSlug(projectId, params.title) + const now = new Date().toISOString() + + const task: Task = { + id: taskId, + title: params.title.trim(), + description: params.description.trim(), + acceptance_criteria: params.acceptanceCriteria.trim(), + parent_issue: params.parentIssue, + job_id: params.jobId, + status: "open", + priority: (params.priority ?? 2) as Task["priority"], + task_type: params.taskType ?? "implementation", + labels, + depends_on: params.dependsOn ?? [], + assignee: null, + assignee_pid: null, + worktree: null, + branch: null, + created_at: now, + updated_at: now, + close_reason: null, + comments: [], + pipeline: { + stage: "idle", + attempt: 0, + last_activity: null, + last_steering: null, + history: [], + adversarial_verdict: null, + }, + } + + await Store.createTask(projectId, task) + return { + title: "Task created", + output: `Created task ${taskId}\n${params.title.trim()}`, + metadata: {}, + } + } + + if (params.command === "list") { + const tasks = await Store.listTasks(projectId) + const lines = [`Found ${tasks.length} tasks`, ""] + + for (const task of tasks.sort((a, b) => a.id.localeCompare(b.id))) { + lines.push(`${task.id} [${task.status}] priority:${task.priority} - ${task.title}`) + } + + return { + title: "Task list", + output: lines.join("\n"), + metadata: {}, + } + } + + if (params.command === "get") { + if (!params.taskId) throw new Error("get requires taskId") + const task = await Store.getTask(projectId, params.taskId) + if (!task) throw new Error(`Task not found: ${params.taskId}`) + + const lines = [ + `Task: ${task.id}`, + `Title: ${task.title}`, + `Status: ${task.status}`, + `Priority: ${task.priority}`, + `Type: ${task.task_type}`, + `Parent Issue: #${task.parent_issue}`, + `Job: ${task.job_id}`, + `Assignee: ${task.assignee ?? "none"}`, + `Labels: ${task.labels.join(", ") || "none"}`, + `Depends on: ${task.depends_on.join(", ") || "none"}`, + `Created: ${task.created_at}`, + `Updated: ${task.updated_at}`, + ``, + `Description:`, + task.description, + ``, + `Acceptance Criteria:`, + task.acceptance_criteria, + ``, + `Pipeline Stage: ${task.pipeline.stage}`, + `Pipeline Attempt: ${task.pipeline.attempt}`, + ] + + if (task.close_reason) { + lines.push(`Close Reason: ${task.close_reason}`) + } + + if (task.comments.length > 0) { + lines.push("", "Comments:") + for (const comment of task.comments) { + lines.push(` [${comment.created_at}] ${comment.author}: ${comment.message}`) + } + } + + if (task.branch) lines.push(`Branch: ${task.branch}`) + if (task.worktree) lines.push(`Worktree: ${task.worktree}`) + + return { + title: `Task: ${task.id}`, + output: lines.join("\n"), + metadata: {}, + } + } + + if (params.command === "update") { + if (!params.taskId) throw new Error("update requires taskId") + await Store.updateTask(projectId, params.taskId, params.updates ?? {}) + return { + title: "Task updated", + output: `Updated task ${params.taskId}`, + metadata: {}, + } + } + +if (params.command === "close") { + if (!params.taskId) throw new Error("close requires taskId") + const task = await Store.getTask(projectId, params.taskId) + if (!task) throw new Error(`Task not found: ${params.taskId}`) + + if (task.status === "closed") { + throw new Error(`Task ${params.taskId} is already closed`) + } + + await Store.updateTask(projectId, params.taskId, { + status: "closed", + close_reason: (params.reason?.trim()) ?? "completed", + }) + return { + title: "Task closed", + output: `Closed task ${task.id}: ${params.reason?.trim() ?? "completed"}`, + metadata: {}, + } + } + + if (params.command === "comment") { + if (!params.taskId) throw new Error("comment requires taskId") + if (!params.message?.trim()) throw new Error("comment requires message") + + const message = params.message.trim() + if (message.length > MAX_COMMENT_LENGTH) { + throw new Error(`Comment message too long: ${message.length} bytes (max ${MAX_COMMENT_LENGTH})`) + } + + const task = await Store.getTask(projectId, params.taskId) + if (!task) throw new Error(`Task not found: ${params.taskId}`) + + const comment = { + author: ctx.agent, + message, + created_at: new Date().toISOString(), + } + await Store.addComment(projectId, params.taskId, comment) + return { + title: "Comment added", + output: `Added comment to task ${params.taskId}`, + metadata: {}, + } + } + + if (params.command === "depends") { + if (!params.taskId || !params.dependencyId) throw new Error("depends requires taskId and dependencyId") + const task = await Store.getTask(projectId, params.taskId) + if (!task) throw new Error(`Task not found: ${params.taskId}`) + + const depTask = await Store.getTask(projectId, params.dependencyId) + if (!depTask) throw new Error(`Dependency task not found: ${params.dependencyId}`) + + if (params.taskId === params.dependencyId) { + throw new Error(`Task cannot depend on itself`) + } + + if (task.depends_on.includes(params.dependencyId)) { + throw new Error(`Task ${params.taskId} already depends on ${params.dependencyId}`) + } + + const newDeps = [...task.depends_on, params.dependencyId] + + async function hasCycle(currentId: string, visited = new Set()): Promise { + if (visited.has(currentId)) return true + visited.add(currentId) + + const currentTask = await Store.getTask(projectId, currentId) + if (!currentTask) return false + + const depsToCheck = currentId === params.taskId ? newDeps : currentTask.depends_on + + for (const depId of depsToCheck) { + if (await hasCycle(depId, visited)) return true + } + + visited.delete(currentId) + return false + } + + if (await hasCycle(params.taskId)) { + throw new Error(`Adding ${params.dependencyId} as dependency would create a cycle`) + } + + await Store.updateTask(projectId, params.taskId, { + depends_on: newDeps, + }) + return { + title: "Dependency added", + output: `Added ${params.dependencyId} as dependency to ${params.taskId}`, + metadata: {}, + } + } + + if (params.command === "split") { + if (!params.taskId) throw new Error("split requires taskId") + const task = await Store.getTask(projectId, params.taskId) + if (!task) throw new Error(`Task not found: ${params.taskId}`) + + if (task.status !== "open") { + throw new Error(`Can only split open tasks, current status: ${task.status}`) + } + + const slug1 = await generateUniqueSlug(projectId, `${task.title}-part-1`) + const slug2 = await generateUniqueSlug(projectId, `${task.title}-part-2`) + const now = new Date().toISOString() + + const task1: Task = { + ...task, + id: slug1, + title: `${task.title} (Part 1)`, + depends_on: [task.id, ...task.depends_on], + description: `${task.description} - Part 1`, + status: "open", + assignee: null, + assignee_pid: null, + comments: [], + created_at: now, + updated_at: now, + pipeline: { + stage: "idle", + attempt: 0, + last_activity: null, + last_steering: null, + history: [], + adversarial_verdict: null, + }, + } + + const task2: Task = { + ...task, + id: slug2, + title: `${task.title} (Part 2)`, + depends_on: [task.id, ...task.depends_on], + description: `${task.description} - Part 2`, + status: "open", + assignee: null, + assignee_pid: null, + comments: [], + created_at: now, + updated_at: now, + pipeline: { + stage: "idle", + attempt: 0, + last_activity: null, + last_steering: null, + history: [], + adversarial_verdict: null, + }, + } + + let task1Created = false + let task2Created = false + + try { + await Store.createTask(projectId, task1) + task1Created = true + + await Store.createTask(projectId, task2) + task2Created = true + + await Store.updateTask(projectId, params.taskId, { + status: "closed", + close_reason: `split into: ${slug1}, ${slug2}`, + }) + + return { + title: "Task split", + output: `Split ${task.id} into ${slug1} and ${slug2}`, + metadata: {}, + } + } catch (error) { + if (task2Created) { + await Store.updateTask(projectId, slug2, { status: "closed", close_reason: "rollback: split failed" }).catch(() => {}) + } + if (task1Created) { + await Store.updateTask(projectId, slug1, { status: "closed", close_reason: "rollback: split failed" }).catch(() => {}) + } + throw error + } + } + + if (params.command === "next") { + const count = params.count ?? 1 + const tasks = await Scheduler.getNextTasks(projectId, count) + + if (tasks.length === 0) { + return { + title: "No tasks available", + output: "No tasks are currently available for work", + metadata: {}, + } + } + + const lines = [`Available tasks: ${tasks.length}`, ""] + + for (const task of tasks) { + lines.push(`${task.id} [${task.status}] priority:${task.priority} - ${task.title}`) + if (task.depends_on.length > 0) { + lines.push(` Depends on: ${task.depends_on.join(", ")}`) + } + } + + return { + title: "Available tasks", + output: lines.join("\n"), + metadata: {}, + } + } + + if (params.command === "validate") { + const result = await Validation.validateGraph(projectId) + const lines = [`Valid: ${result.valid}`, `Errors: ${result.errors.length}`, `Warnings: ${result.warnings.length}`, ""] + + if (result.errors.length > 0) { + lines.push("Errors:") + for (const error of result.errors) { + lines.push(` - ${error}`) + } + } + + if (result.warnings.length > 0) { + lines.push("Warnings:") + for (const warning of result.warnings) { + lines.push(` - ${warning}`) + } + } + + if (result.valid && result.warnings.length === 0) { + lines.push("Task graph is valid.") + } + + return { + title: "Validation result", + output: lines.join("\n"), + metadata: {}, + } + } + +if (params.command === "start") { + const issueNumber = params.issueNumber + if (!issueNumber) throw new Error("start requires issueNumber") + + const existingJob = await Store.findJobByIssue(projectId, issueNumber) + if (existingJob) { + const { removeLockFile } = await import("./pulse") + const existingPid = await readLockPid(existingJob.id, projectId) + + if (existingJob.status === "complete" || existingJob.status === "failed" || existingJob.status === "stopped") { + return { + title: "Job already completed", + output: `Job is ${existingJob.status}. Status: taskctl status ${issueNumber}`, + metadata: {}, + } + } + + if (existingJob.status === "running") { + if (existingPid !== null && isPidAlive(existingPid)) { + return { + title: "Already running", + output: `Job already running (PID ${existingPid}). Use taskctl status ${issueNumber} to monitor.`, + metadata: {}, + } + } + if (existingPid !== null) { + await removeLockFile(existingJob.id, projectId) + } + } + } + + const jobId = `job-${Date.now()}` + await Store.createJob(projectId, { + id: jobId, + parent_issue: issueNumber, + status: "running", + created_at: new Date().toISOString(), + stopping: false, + pulse_pid: null, + max_workers: 3, + pm_session_id: ctx.sessionID, + }) + + enableAutoWakeup(ctx.sessionID) + + const repo = "randomm/opencode" + + let issueOutput: { title: string; body: string } | null = null + const proc = Bun.spawn(["gh", "issue", "view", issueNumber.toString(), "--repo", repo, "--json", "title,body"], { + stdout: "pipe", + stderr: "pipe", + }) + + const reader = proc.stdout.getReader() + const decoder = new TextDecoder() + let output = "" + + while (true) { + const { done, value } = await reader.read() + if (done) break + output += decoder.decode(value) + } + + const exitCode = await proc.exited + if (exitCode !== 0 || !output) { + const errorReader = proc.stderr.getReader() + let errorOutput = "" + while (true) { + const { done, value } = await errorReader.read() + if (done) break + errorOutput += decoder.decode(value) + } + await Store.updateJob(projectId, jobId, { status: "failed" }) + throw new Error(`Failed to fetch GitHub issue #${issueNumber}: ${errorOutput || "Unknown error"}`) + } + + try { + issueOutput = JSON.parse(output) as { title: string; body: string } + if (!issueOutput || typeof issueOutput !== "object") { + throw new Error("Invalid response format") + } + } catch { + await Store.updateJob(projectId, jobId, { status: "failed" }) + throw new Error(`Failed to parse GitHub issue #${issueNumber} output`) + } + + const issueTitle = issueOutput.title || "" + const issueBody = issueOutput.body || "" + + const composerResult = await runComposer({ + jobId, + projectId, + pmSessionId: ctx.sessionID, + issueNumber, + issueTitle, + issueBody, + }) + + if (composerResult.status === "needs_clarification") { + await Store.updateJob(projectId, jobId, { status: "failed" }) + const questionLines = ["Composer needs clarification:", ...composerResult.questions.map((q) => `${q.id}. ${q.question}`)] + return { + title: "Composer needs clarification", + output: questionLines.join("\n"), + metadata: {}, + } + } + + await resurrectionScan(jobId, projectId) + startPulse(jobId, projectId, ctx.sessionID) + + return { + title: "Pipeline started", + output: `Job ${jobId} started: ${composerResult.taskCount} tasks queued. Pulse is running every 5 seconds. Use taskctl status ${issueNumber} to monitor.`, + metadata: {}, + } + } + + if (params.command === "status") { + const issueNumber = params.issueNumber + if (!issueNumber) throw new Error("status requires issueNumber") + + const job = await Store.findJobByIssue(projectId, issueNumber) + if (!job) { + const tasks = await Store.listTasks(projectId) + const historicalTasks = tasks.filter((t) => t.parent_issue === issueNumber) + if (historicalTasks.length > 0) { + return { + title: "Job completed", + output: `Job completed. Historical tasks: ${historicalTasks.length} tasks found.`, + metadata: {}, + } + } + return { + title: "Job not found", + output: `No job found for issue #${issueNumber}. Use "taskctl start ${issueNumber}" to start the pipeline.`, + metadata: {}, + } + } + + const tasks = await Store.listTasks(projectId) + const jobTasks = tasks.filter((t) => t.job_id === job.id) + + const lines = [ + `Job: ${job.id}`, + `Status: ${job.status}`, + `Max Workers: ${job.max_workers}`, + `PM Session: ${job.pm_session_id}`, + `Created: ${job.created_at}`, + `Pulse PID: ${job.pulse_pid ?? "none"}`, + ``, + `Tasks (${jobTasks.length}):`, + ] + + for (const task of jobTasks.sort((a, b) => a.id.localeCompare(b.id))) { + lines.push(` ${task.id} [${task.status}] - ${task.title}`) + if (task.assignee) { + lines.push(` Assignee: ${task.assignee}`) + } + if (task.worktree) { + lines.push(` Worktree: ${task.worktree}`) + } + if (task.pipeline.stage !== "idle") { + lines.push(` Pipeline: ${task.pipeline.stage} (attempt ${task.pipeline.attempt})`) + } + } + + return { + title: `Job Status: #${issueNumber}`, + output: lines.join("\n"), + metadata: {}, + } + } + + if (params.command === "start-skip") { + const issueNumber = params.issueNumber + if (!issueNumber) throw new Error("start-skip requires issueNumber") + + const tasks = await Store.listTasks(projectId) + const tasksWithIssue = tasks.filter((t) => t.parent_issue === issueNumber) + if (tasksWithIssue.length === 0) { + return { + title: "No tasks found", + output: `No tasks found for issue #${issueNumber}. Use taskctl start to create tasks first.`, + metadata: {}, + } + } + + return { + title: "Tasks found", + output: `Tasks found: ${tasksWithIssue.length}. Pulse integration comes in Phase 3.`, + metadata: {}, + } + } + + if (params.command === "stop") { + const jobId = params.jobId + if (!jobId) throw new Error("stop requires jobId") + + const job = await Store.getJob(projectId, jobId) + if (!job) throw new Error(`Job not found: ${jobId}`) + if (job.status !== "running") { + return { + title: "Job not running", + output: `Job ${jobId} is not running (status: ${job.status}). Nothing to stop.`, + metadata: {}, + } + } + if (job.stopping) { + return { + title: "Already stopping", + output: `Job ${jobId} is already stopping. Use taskctl status to monitor.`, + metadata: {}, + } + } + + await Store.updateJob(projectId, jobId, { stopping: true }) + return { + title: "Stop signal sent", + output: `Stop signal sent to job ${jobId}. Pipeline will finish in-flight work and halt. Use taskctl status to monitor.`, + metadata: {}, + } + } + + if (params.command === "resume") { + const jobId = params.jobId + if (!jobId) throw new Error("resume requires jobId") + + const job = await Store.getJob(projectId, jobId) + if (!job) throw new Error(`Job not found: ${jobId}`) + + const existingPid = await readLockPid(jobId, projectId) + if (existingPid !== null) { + if (isPidAlive(existingPid)) { + return { + title: "Already running", + output: `Pipeline is already running (PID ${existingPid}). Use taskctl status to monitor.`, + metadata: {}, + } + } + await removeLockFile(jobId, projectId) + } + + await resurrectionScan(jobId, projectId) + + const revalidated = await Store.getJob(projectId, jobId) + if (!revalidated) throw new Error(`Job vanished after resurrection: ${jobId}`) + if (revalidated.stopping === true || revalidated.status === "complete" || revalidated.status === "failed" || revalidated.status === "stopped") { + return { + title: "Cannot resume", + output: `Job is ${revalidated.status}${revalidated.stopping ? " and has stop flag set" : ""}. Status: taskctl status`, + metadata: {}, + } + } + + // NOTE: Update is not atomic with prior read. Race window exists where pulse tick could set stopping=true concurrently. + // Related design gap: pulse.ts line 58 doesn't check job.stopping before spawning tasks (pulse-zombie race). + // To handle this gracefully: if stop is set during this window, pulse tick will gracefully stop the job. + + await Store.updateJob(projectId, jobId, { status: "running", stopping: false }) + enableAutoWakeup(ctx.sessionID) + + const tasks = await Store.listTasks(projectId) + const remaining = tasks.filter((t) => t.job_id === jobId && t.status !== "closed").length + await resurrectionScan(jobId, projectId) + startPulse(jobId, projectId, ctx.sessionID) + + return { + title: "Pipeline resumed", + output: `Pipeline resumed for job ${jobId}. ${remaining} tasks remaining. Pulse is running.`, + metadata: {}, + } + } + + if (params.command === "inspect") { + if (!params.taskId) throw new Error("inspect requires taskId") + + const task = await Store.getTask(projectId, params.taskId) + if (!task) throw new Error(`Task not found: ${params.taskId}`) + + const lines: string[] = [ + `Task: ${task.id}`, + `Status: ${task.status}${task.close_reason ? ` (${task.close_reason})` : ""}`, + `Branch: ${task.branch ?? "none"}`, + `Worktree: ${task.worktree ?? "none"}`, + ``, + `Pipeline:`, + ` Stage: ${task.pipeline.stage}`, + ` Attempt: ${task.pipeline.attempt}`, + ` Last activity: ${task.pipeline.last_activity ?? "never"}`, + ` Last steering: ${task.pipeline.last_steering ?? "never"}`, + ] + + if (task.pipeline.history && task.pipeline.history.length > 0) { + lines.push(``, `Pipeline history:`) + for (const entry of task.pipeline.history) { + lines.push(` ${entry}`) + } + } + + if (task.pipeline.adversarial_verdict) { + const v = task.pipeline.adversarial_verdict + lines.push(``, `Last adversarial verdict:`, ` ${v.verdict}`) + if (v.summary) lines.push(` Summary: ${v.summary}`) + if (v.issues.length > 0) { + lines.push(` Issues:`) + for (const issue of v.issues) { + lines.push(` - ${issue.location} [${issue.severity}]: ${issue.fix}`) + } + } + } + + if (task.comments.length > 0) { + lines.push(``, `Comments (${task.comments.length} total):`) + for (const comment of task.comments) { + lines.push(` [${comment.author}] ${comment.message}`) + } + } + + return { + title: `Task inspect: ${task.id}`, + output: lines.join("\n"), + metadata: {}, + } + } + + if (params.command === "override") { + if (!params.taskId) throw new Error("override requires taskId") + if (!params.overrideMode) throw new Error("override requires --skip or --commit-as-is") + + const task = await Store.getTask(projectId, params.taskId) + if (!task) throw new Error(`Task not found: ${params.taskId}`) + + const validStates = ["failed", "in_progress", "review", "blocked_on_conflict"] + if (!validStates.includes(task.status)) { + throw new Error(`override requires task in state: ${validStates.join(", ")}. Current: ${task.status}`) + } + + if (params.overrideMode === "skip") { + if (task.assignee) { + try { + SessionPrompt.cancel(task.assignee) + } catch {} + } + + if (task.worktree) { + const safeWorktree = sanitizeWorktree(task.worktree) + if (safeWorktree) { + await Worktree.remove({ directory: safeWorktree }).catch((e) => + log.error("failed to remove worktree in override --skip", { taskId: task.id, error: String(e) }) + ) + } + } + + await Store.updateTask(projectId, params.taskId, { + status: "closed", + close_reason: "skipped by PM", + worktree: null, + branch: null, + assignee: null, + assignee_pid: null, + pipeline: { ...task.pipeline, stage: "done" }, + }) + + await Store.addComment(projectId, params.taskId, { + author: "system", + message: "Skipped by PM override. Dependent tasks are now unblocked.", + created_at: new Date().toISOString(), + }) + + return { + title: "Task skipped", + output: `Task ${params.taskId} skipped. Dependent tasks are now unblocked. Pulse will schedule them on next tick.`, + metadata: {}, + } + } + + if (!task.worktree) { + throw new Error(`Task ${params.taskId} has no worktree to commit`) + } + + return { + title: "Commit as-is", + output: `To commit worktree for task ${params.taskId}:\n1. @ops: cd ${task.worktree} && git add -A && git commit -m "feat(taskctl): ${task.title} (#${task.parent_issue}) — committed as-is by PM"\n2. Then: taskctl override ${params.taskId} --skip (to close the task)`, + metadata: {}, + } + } + + if (params.command === "retry") { + if (!params.taskId) throw new Error("retry requires taskId") + + const task = await Store.getTask(projectId, params.taskId) + if (!task) throw new Error(`Task not found: ${params.taskId}`) + + if (task.assignee) { + try { + SessionPrompt.cancel(task.assignee) + } catch {} + } + + if (task.worktree) { + const safeWorktree = sanitizeWorktree(task.worktree) + if (safeWorktree) { + await Worktree.remove({ directory: safeWorktree }).catch((e) => + log.error("failed to remove worktree in retry", { taskId: task.id, error: String(e) }) + ) + } + } + + await Store.updateTask(projectId, params.taskId, { + status: "open", + assignee: null, + assignee_pid: null, + worktree: null, + branch: null, + pipeline: { + ...task.pipeline, + stage: "idle", + attempt: 1, + adversarial_verdict: null, + last_activity: null, + }, + }) + + await Store.addComment(projectId, params.taskId, { + author: "system", + message: `Retried by PM. Task reset to open. Pulse will reschedule on next tick.`, + created_at: new Date().toISOString(), + }) + + return { + title: "Task retried", + output: `Task ${params.taskId} reset to open with fresh state. Pulse will reschedule it on next tick.`, + metadata: {}, + } + } + + if (params.command === "verdict") { + if (ctx.agent !== "adversarial-pipeline") { + throw new Error("verdict command can only be called by adversarial-pipeline agent") + } + + const taskId = params.taskId! + const verdict = params.verdict! + if (!taskId) throw new Error("verdict requires taskId") + if (!verdict) throw new Error("verdict requires verdict") + + const task = await Store.getTask(projectId, taskId) + if (!task) throw new Error(`Task not found: ${taskId}`) + + const issues = params.verdictIssues ?? [] + const summary = params.verdictSummary ?? "" + + const verdictData = { + verdict, + summary, + issues, + created_at: new Date().toISOString(), + } + + await Store.updateTask(projectId, taskId, { + status: "review", + pipeline: { + ...task!.pipeline, + adversarial_verdict: verdictData, + stage: "reviewing", + } + }) + + await Store.addComment(projectId, taskId, { + author: "adversarial-pipeline", + message: `Verdict: ${verdict}${summary ? ` — ${summary}` : ""}`, + created_at: new Date().toISOString(), + }) + + return { + title: "Verdict recorded", + output: `Recorded ${verdict} verdict for task ${taskId}`, + metadata: {}, + } + } + + throw new Error(`Unknown command: ${params.command}`) + }, +}) diff --git a/packages/opencode/src/tasks/types.ts b/packages/opencode/src/tasks/types.ts new file mode 100644 index 000000000000..79d71b788c14 --- /dev/null +++ b/packages/opencode/src/tasks/types.ts @@ -0,0 +1,83 @@ +export type Task = { + id: string + title: string + description: string + acceptance_criteria: string + parent_issue: number + job_id: string + status: "open" | "in_progress" | "review" | "blocked" | "closed" | "failed" | "stopped" | "blocked_on_conflict" + priority: 0 | 1 | 2 | 3 | 4 + task_type: "implementation" | "test" | "research" + labels: string[] + depends_on: string[] + assignee: string | null + assignee_pid: number | null + worktree: string | null + branch: string | null + created_at: string + updated_at: string + close_reason: string | null + comments: Comment[] + pipeline: { + stage: "idle" | "developing" | "reviewing" | "adversarial-running" | "committing" | "done" | "failed" | "stopped" | "commit-failed" + attempt: number + last_activity: string | null + last_steering: string | null + history: PipelineEvent[] + adversarial_verdict: AdversarialVerdict | null + } +} + +export type Comment = { + author: string + message: string + created_at: string +} + +export type PipelineEvent = { + from: string + to: string + attempt: number + timestamp: string + message: string | null +} + +export type AdversarialVerdict = { + verdict: "APPROVED" | "ISSUES_FOUND" | "CRITICAL_ISSUES_FOUND" + issues: Array<{ + location: string + severity: "LOW" | "MEDIUM" | "HIGH" | "CRITICAL" + fix: string + }> + summary: string + created_at: string +} + +export type Job = { + id: string + parent_issue: number + status: "running" | "stopped" | "complete" | "failed" + created_at: string + stopping: boolean + pulse_pid: number | null + max_workers: number + pm_session_id: string +} + +export type TaskIndexEntry = { + status: Task["status"] + priority: Task["priority"] + labels: Task["labels"] + depends_on: Task["depends_on"] + updated_at: string +} + +export type TaskIndex = { + [taskId: string]: TaskIndexEntry +} + +export type ValidationResult = { + valid: boolean + errors: string[] + warnings: string[] +} \ No newline at end of file diff --git a/packages/opencode/src/tasks/validation.ts b/packages/opencode/src/tasks/validation.ts new file mode 100644 index 000000000000..567b4759fa18 --- /dev/null +++ b/packages/opencode/src/tasks/validation.ts @@ -0,0 +1,95 @@ +import { Store } from "./store" +import type { ValidationResult } from "./types" + +function detectCycle(tasks: Map): string[] { + const errors: string[] = [] + const visited = new Set() + const path = new Set() + + function dfs(taskId: string, dependencyChain: string[]): boolean { + if (path.has(taskId)) { + const cycle = [...dependencyChain, taskId].join(" -> ") + errors.push(`Circular dependency detected: ${cycle}`) + return true + } + + if (visited.has(taskId)) return false + + path.add(taskId) + visited.add(taskId) + + const task = tasks.get(taskId) + if (task) { + for (const depId of task.depends_on) { + dfs(depId, [...dependencyChain, taskId]) + } + } + + path.delete(taskId) + return false + } + + for (const taskId of tasks.keys()) { + if (!visited.has(taskId)) { + dfs(taskId, []) + } + } + + return errors +} + +export const Validation = { + async validateGraph(projectId: string): Promise { + const tasks = await Store.listTasks(projectId) + const taskMap = new Map(tasks.map((task) => [task.id, { depends_on: task.depends_on }])) + const taskSet = new Set(tasks.map((task) => task.id)) + + const errors: string[] = [] + const warnings: string[] = [] + + const cycleErrors = detectCycle(taskMap) + errors.push(...cycleErrors) + + for (const task of tasks) { + for (const depId of task.depends_on) { + if (!taskSet.has(depId)) { + errors.push(`Task "${task.id}" depends on non-existent task "${depId}"`) + } + } + + if (!task.acceptance_criteria || task.acceptance_criteria.trim() === "") { + warnings.push(`Task "${task.id}" is missing acceptance criteria`) + } + + const hasModuleLabel = task.labels.some((label) => label.startsWith("module:")) + const hasFileLabel = task.labels.some((label) => label.startsWith("file:")) + if (!hasModuleLabel && !hasFileLabel) { + warnings.push(`Task "${task.id}" has no conflict labels (no module: or file: prefix)`) + } + } + + return { + valid: errors.length === 0, + errors, + warnings, + } + }, + + validateGraphFromMap(taskMap: Map): string[] { + const errors: string[] = [] + const cycleErrors = detectCycle(taskMap) + errors.push(...cycleErrors) + + const taskSet = new Set(taskMap.keys()) + + for (const [taskId, task] of taskMap.entries()) { + for (const depId of task.depends_on) { + if (!taskSet.has(depId)) { + errors.push(`Task "${taskId}" depends on non-existent task "${depId}"`) + } + } + } + + return errors + }, +} \ No newline at end of file diff --git a/packages/opencode/src/tool/registry.ts b/packages/opencode/src/tool/registry.ts index d622436b54fc..0d9aafda4eaf 100644 --- a/packages/opencode/src/tool/registry.ts +++ b/packages/opencode/src/tool/registry.ts @@ -29,6 +29,7 @@ import { ApplyPatchTool } from "./apply_patch" import { CheckTaskTool } from "./check_task" import { ListTasksTool } from "./list_tasks" import { CancelTaskTool } from "./cancel_task" +import { TaskctlTool } from "../tasks/tool" import { HashlineReadTool } from "./hashline_read" import { HashlineEditTool } from "./hashline_edit" @@ -130,9 +131,10 @@ export namespace ToolRegistry { WebSearchTool, CodeSearchTool, SkillTool, + TaskctlTool, ApplyPatchTool, ...(Flag.OPENCODE_EXPERIMENTAL_LSP_TOOL ? [LspTool] : []), -...(Flag.OPENCODE_EXPERIMENTAL_HASHLINE ? [HashlineReadTool, HashlineEditTool] : []), + ...(Flag.OPENCODE_EXPERIMENTAL_HASHLINE ? [HashlineReadTool, HashlineEditTool] : []), ...(config.experimental?.batch_tool === true ? [BatchTool] : []), ...(Flag.OPENCODE_EXPERIMENTAL_PLAN_MODE && Flag.OPENCODE_CLIENT === "cli" ? [PlanExitTool, PlanEnterTool] : []), ...custom, @@ -153,21 +155,21 @@ export namespace ToolRegistry { const tools = await all() const result = await Promise.all( tools -.filter((t) => { - // Enable websearch/codesearch for zen users OR via enable flag - if (t.id === "codesearch" || t.id === "websearch") { - return model.providerID === "opencode" || Flag.OPENCODE_ENABLE_EXA - } + .filter((t) => { + // Enable websearch/codesearch for zen users OR via enable flag + if (t.id === "codesearch" || t.id === "websearch") { + return model.providerID === "opencode" || Flag.OPENCODE_ENABLE_EXA + } - // use apply tool in same format as codex - const usePatch = - model.modelID.includes("gpt-") && !model.modelID.includes("oss") && !model.modelID.includes("gpt-4") - if (t.id === "apply_patch") return usePatch -if (t.id === "edit") return !usePatch && !Flag.OPENCODE_EXPERIMENTAL_HASHLINE - if (t.id === "write") return !usePatch + // use apply tool in same format as codex + const usePatch = + model.modelID.includes("gpt-") && !model.modelID.includes("oss") && !model.modelID.includes("gpt-4") + if (t.id === "apply_patch") return usePatch + if (t.id === "edit") return !usePatch && !Flag.OPENCODE_EXPERIMENTAL_HASHLINE + if (t.id === "write") return !usePatch - return true - }) + return true + }) .map(async (t) => { using _ = log.time(t.id) const tool = await t.init({ agent }) diff --git a/packages/opencode/test/tasks/commands.test.ts b/packages/opencode/test/tasks/commands.test.ts new file mode 100644 index 000000000000..9137b6995023 --- /dev/null +++ b/packages/opencode/test/tasks/commands.test.ts @@ -0,0 +1,328 @@ +import { describe, test, expect } from "bun:test" +import { Store } from "../../src/tasks/store" +import type { Task, Job } from "../../src/tasks/types" +import { Global } from "../../src/global" +import path from "path" +import fs from "fs/promises" + +async function withTestProject(fn: (projectId: string) => Promise) { + const projectId = `test-${Date.now()}` + const tasksDir = path.join(Global.Path.data, "tasks", projectId) + + try { + await fn(projectId) + } finally { + await fs.rm(tasksDir, { recursive: true, force: true }).catch(() => {}) + } +} + +describe("taskctl start (terminal state rejection)", () => { + test("rejects jobs with complete status", async () => { + await withTestProject(async (projectId) => { + const jobId = "job-complete" + const issueNumber = 123 + + await Store.createJob(projectId, { + id: jobId, + parent_issue: issueNumber, + status: "complete", + created_at: new Date().toISOString(), + stopping: false, + pulse_pid: null, + max_workers: 3, + pm_session_id: "pm-test", + }) + + const job = await Store.getJob(projectId, jobId) + expect(job).not.toBeNull() + expect(job?.status).toBe("complete") + }) + }) + + test("rejects jobs with failed status", async () => { + await withTestProject(async (projectId) => { + const jobId = "job-failed" + const issueNumber = 124 + + await Store.createJob(projectId, { + id: jobId, + parent_issue: issueNumber, + status: "failed", + created_at: new Date().toISOString(), + stopping: false, + pulse_pid: null, + max_workers: 3, + pm_session_id: "pm-test", + }) + + const job = await Store.getJob(projectId, jobId) + expect(job).not.toBeNull() + expect(job?.status).toBe("failed") + }) + }) + + test("rejects jobs with stopped status", async () => { + await withTestProject(async (projectId) => { + const jobId = "job-stopped" + const issueNumber = 125 + + await Store.createJob(projectId, { + id: jobId, + parent_issue: issueNumber, + status: "stopped", + created_at: new Date().toISOString(), + stopping: false, + pulse_pid: null, + max_workers: 3, + pm_session_id: "pm-test", + }) + + const job = await Store.getJob(projectId, jobId) + expect(job).not.toBeNull() + expect(job?.status).toBe("stopped") + }) + }) +}) + +describe("taskctl stop", () => { + test("sets stopping flag", async () => { + await withTestProject(async (projectId) => { + const jobId = "job-test" + await Store.createJob(projectId, { + id: jobId, + parent_issue: 1, + status: "running", + created_at: new Date().toISOString(), + stopping: false, + pulse_pid: null, + max_workers: 3, + pm_session_id: "pm-test", + }) + + const { readLockPid, removeLockFile } = await import("../../src/tasks/pulse") + + const job = await Store.getJob(projectId, jobId) + const existingPid = await readLockPid(jobId, projectId) + if (existingPid !== null) { + await removeLockFile(jobId, projectId) + } + + await Store.updateJob(projectId, jobId, { stopping: true }) + + const updated = await Store.getJob(projectId, jobId) + expect(updated?.stopping).toBe(true) + }) + }) + + test("rejects non-running jobs", async () => { + await withTestProject(async (projectId) => { + const jobId = "job-test" + await Store.createJob(projectId, { + id: jobId, + parent_issue: 1, + status: "complete", + created_at: new Date().toISOString(), + stopping: false, + pulse_pid: null, + max_workers: 3, + pm_session_id: "pm-test", + }) + + const job = await Store.getJob(projectId, jobId) + expect(job?.status).toBe("complete") + expect(job?.stopping).toBe(false) + }) + }) +}) + +describe("taskctl inspect", () => { + test("shows full task history", async () => { + await withTestProject(async (projectId) => { + const now = new Date().toISOString() + const task: Task = { + id: "test-task", + title: "Test Task", + description: "Test description", + acceptance_criteria: "Test criteria", + parent_issue: 1, + job_id: "job-test", + status: "failed", + priority: 2, + task_type: "implementation", + labels: [], + depends_on: [], + assignee: null, + assignee_pid: null, + worktree: null, + branch: null, + created_at: now, + updated_at: now, + close_reason: "test failure", + comments: [ + { author: "developer", message: "Working on it", created_at: now }, + { author: "system", message: "Task failed", created_at: now }, + ], + pipeline: { + stage: "failed", + attempt: 3, + last_activity: now, + last_steering: null, + history: [ + { from: "idle", to: "developing", attempt: 1, timestamp: now, message: "Started" }, + { from: "developing", to: "reviewing", attempt: 1, timestamp: now, message: "Developer done" }, + ], + adversarial_verdict: { + verdict: "ISSUES_FOUND", + issues: [ + { location: "src/test.ts:42", severity: "HIGH", fix: "Add null check" }, + ], + summary: "Missing null check", + created_at: new Date().toISOString(), + }, + }, + } + + await Store.createTask(projectId, task) + + const retrieved = await Store.getTask(projectId, "test-task") + expect(retrieved).not.toBeNull() + expect(retrieved?.id).toBe("test-task") + expect(retrieved?.status).toBe("failed") + expect(retrieved?.close_reason).toBe("test failure") + expect(retrieved?.comments.length).toBe(2) + expect(retrieved?.pipeline.attempt).toBe(3) + expect(retrieved?.pipeline.history.length).toBe(2) + expect(retrieved?.pipeline.adversarial_verdict?.verdict).toBe("ISSUES_FOUND") + expect(retrieved?.pipeline.adversarial_verdict?.issues.length).toBe(1) + }) + }) +}) + +describe("taskctl override --skip", () => { + test("closes task with skip reason", async () => { + await withTestProject(async (projectId) => { + const now = new Date().toISOString() + const task: Task = { + id: "test-task", + title: "Test Task", + description: "Test description", + acceptance_criteria: "Test criteria", + parent_issue: 1, + job_id: "job-test", + status: "failed", + priority: 2, + task_type: "implementation", + labels: [], + depends_on: [], + assignee: null, + assignee_pid: null, + worktree: "/tmp/test-worktree", + branch: "feature/test", + created_at: now, + updated_at: now, + close_reason: null, + comments: [], + pipeline: { + stage: "failed", + attempt: 3, + last_activity: now, + last_steering: null, + history: [], + adversarial_verdict: null, + }, + } + + await Store.createTask(projectId, task) + + await Store.updateTask(projectId, "test-task", { + status: "closed", + close_reason: "skipped by PM", + worktree: null, + branch: null, + assignee: null, + assignee_pid: null, + pipeline: { ...task.pipeline, stage: "done" }, + }, true) + + const retrieved = await Store.getTask(projectId, "test-task") + expect(retrieved).not.toBeNull() + expect(retrieved?.status).toBe("closed") + expect(retrieved?.close_reason).toBe("skipped by PM") + expect(retrieved?.worktree).toBeNull() + expect(retrieved?.branch).toBeNull() + expect(retrieved?.assignee).toBeNull() + expect(retrieved?.pipeline.stage).toBe("done") + }) + }) +}) + +describe("taskctl retry", () => { + test("resets task to open with cleared pipeline state", async () => { + await withTestProject(async (projectId) => { + const now = new Date().toISOString() + const task: Task = { + id: "test-task", + title: "Test Task", + description: "Test description", + acceptance_criteria: "Test criteria", + parent_issue: 1, + job_id: "job-test", + status: "failed", + priority: 2, + task_type: "implementation", + labels: [], + depends_on: [], + assignee: "session-test", + assignee_pid: 12345, + worktree: "/tmp/test-worktree", + branch: "feature/test", + created_at: now, + updated_at: now, + close_reason: null, + comments: [], + pipeline: { + stage: "failed", + attempt: 3, + last_activity: now, + last_steering: now, + history: [ + { from: "idle", to: "developing", attempt: 1, timestamp: now, message: "Started" }, + ], + adversarial_verdict: { + verdict: "CRITICAL_ISSUES_FOUND", + issues: [], + summary: "Critical issues", + created_at: new Date().toISOString(), + }, + }, + } + + await Store.createTask(projectId, task) + + await Store.updateTask(projectId, "test-task", { + status: "open", + assignee: null, + assignee_pid: null, + worktree: null, + branch: null, + pipeline: { + ...task.pipeline, + stage: "idle", + attempt: 1, + adversarial_verdict: null, + last_activity: null, + }, + }, true) + + const retrieved = await Store.getTask(projectId, "test-task") + expect(retrieved).not.toBeNull() + expect(retrieved?.status).toBe("open") + expect(retrieved?.assignee).toBeNull() + expect(retrieved?.worktree).toBeNull() + expect(retrieved?.pipeline.stage).toBe("idle") + expect(retrieved?.pipeline.attempt).toBe(1) + expect(retrieved?.pipeline.adversarial_verdict).toBeNull() + expect(retrieved?.pipeline.last_activity).toBeNull() + }) + }) +}) \ No newline at end of file diff --git a/packages/opencode/test/tasks/composer.test.ts b/packages/opencode/test/tasks/composer.test.ts new file mode 100644 index 000000000000..c794d93a8259 --- /dev/null +++ b/packages/opencode/test/tasks/composer.test.ts @@ -0,0 +1,855 @@ +import { test, beforeEach, afterEach } from "bun:test" +import { mkdir } from "fs/promises" +import { join } from "path" +import { Store } from "../../src/tasks/store" +import { Validation } from "../../src/tasks/validation" +import { runComposer } from "../../src/tasks/composer" +import type { Task } from "../../src/tasks/types" + +const tmpdir = `${Bun.env.TMPDIR}/taskctl-composer-test-${Date.now()}` + +beforeEach(async () => { + await mkdir(tmpdir, { recursive: true }) + process.env.OPENCODE_TEST_HOME = tmpdir +}) + +afterEach(async () => { + await Bun.$`rm -rf ${tmpdir}`.catch(() => {}) + delete process.env.OPENCODE_TEST_HOME +}) + +test("validateGraphFromMap detects circular dependencies", async () => { + const tasksMap = new Map([ + ["task-1", { depends_on: ["task-2"] }], + ["task-2", { depends_on: ["task-3"] }], + ["task-3", { depends_on: ["task-1"] }], + ]) + + const errors = Validation.validateGraphFromMap(tasksMap) + + if (errors.length === 0) { + throw new Error("Expected circular dependency to be detected") + } + + const circularError = errors.find((e) => e.includes("Circular dependency")) + if (!circularError) { + throw new Error("Expected circular dependency error message") + } +}) + +test("validateGraphFromMap detects missing dependencies", async () => { + const tasksMap = new Map([ + ["task-1", { depends_on: ["task-2"] }], + ["task-2", { depends_on: ["task-missing"] }], + ["task-3", { depends_on: [] }], + ]) + + const errors = Validation.validateGraphFromMap(tasksMap) + + if (errors.length === 0) { + throw new Error("Expected missing dependency error") + } + + const missingError = errors.find((e) => e.includes("non-existent")) + if (!missingError) { + throw new Error("Expected missing dependency error message") + } +}) + +test("validateGraphFromMap passes for valid graph", async () => { + const tasksMap = new Map([ + ["task-1", { depends_on: [] }], + ["task-2", { depends_on: ["task-1"] }], + ["task-3", { depends_on: ["task-1"] }], + ["task-4", { depends_on: ["task-2", "task-3"] }], + ]) + + const errors = Validation.validateGraphFromMap(tasksMap) + + if (errors.length > 0) { + throw new Error(`Expected no errors, got: ${errors.join(", ")}`) + } +}) + +test("runComposer returns needs_clarification from spawnFn", async () => { + const mockSpawn = async () => + JSON.stringify({ + status: "needs_clarification", + questions: [{ id: 1, question: "What specific behaviour should change?" }], + }) + + const result = await runComposer( + { + jobId: "job-1", + projectId: "test-project", + pmSessionId: "session-1", + issueNumber: 123, + issueTitle: "Add feature", + issueBody: "Please add a feature.", + }, + mockSpawn, + ) + + if (result.status === "ready") { + throw new Error("Expected needs_clarification status") + } + + if (result.questions.length !== 1) { + throw new Error(`Expected 1 question, got ${result.questions.length}`) + } +}) + +test("runComposer returns needs_clarification for invalid graph", async () => { + const mockSpawn = async () => + JSON.stringify({ + status: "ready", + tasks: [ + { + title: "First task", + description: "Do this first", + acceptance_criteria: "Done", + task_type: "implementation" as const, + labels: ["module:test"], + depends_on: ["third-task"], + priority: 0, + }, + { + title: "Second task", + description: "Do this second", + acceptance_criteria: "Done", + task_type: "implementation" as const, + labels: ["module:test"], + depends_on: ["third-task"], + priority: 0, + }, + { + title: "Third task", + description: "Do this third", + acceptance_criteria: "Done", + task_type: "implementation" as const, + labels: ["module:test"], + depends_on: ["first-task"], + priority: 0, + }, + ], + }) + + const result = await runComposer( + { + jobId: "job-1", + projectId: "test-project", + pmSessionId: "session-1", + issueNumber: 123, + issueTitle: "Add feature", + issueBody: "Please add a feature.", + }, + mockSpawn, + ) + + if (result.status === "ready") { + throw new Error("Expected needs_clarification for invalid graph") + } +}) + +test("runComposer throws on dependency reference outside batch", async () => { + const mockSpawn = async () => + JSON.stringify({ + status: "ready", + tasks: [ + { + title: "First task", + description: "Desc", + acceptance_criteria: "Criteria", + task_type: "implementation" as const, + labels: [], + depends_on: ["non-existent-task-external"], + priority: 2, + }, + ], + }) + + let threw = false + try { + await runComposer( + { + jobId: "job-1", + projectId: "test-project", + pmSessionId: "session-1", + issueNumber: 123, + issueTitle: "Add feature", + issueBody: "Please add a feature.", + }, + mockSpawn, + ) + } catch (e) { + threw = true + if (!(e as Error).message.includes("not defined in this batch")) { + throw new Error(`Expected 'not defined in this batch' error, got: ${(e as Error).message}`) + } + } + + if (!threw) { + throw new Error("Expected runComposer to throw for external dependency reference") + } +}) + +test("runComposer creates tasks for valid decomposition", async () => { + const mockSpawn = async () => + JSON.stringify({ + status: "ready", + tasks: [ + { + title: "First task", + description: "Do this first", + acceptance_criteria: "Tests pass", + task_type: "implementation" as const, + labels: ["module:test"], + depends_on: [], + priority: 0, + }, + { + title: "Second task", + description: "Do this second", + acceptance_criteria: "Tests pass", + task_type: "implementation" as const, + labels: ["module:test"], + depends_on: ["first-task"], + priority: 1, + }, + ], + }) + + const result = await runComposer( + { + jobId: "job-1", + projectId: "test-project", + pmSessionId: "session-1", + issueNumber: 123, + issueTitle: "Add feature", + issueBody: "Please add a feature.", + }, + mockSpawn, + ) + + if (result.status !== "ready") { + throw new Error(`Expected ready status, got ${result.status}`) + } + + if (result.taskCount !== 2) { + throw new Error(`Expected 2 tasks, got ${result.taskCount}`) + } + + const task1 = await Store.getTask("test-project", "first-task") + if (!task1) { + throw new Error("Expected task 'first-task' to be created") + } + + if (task1.parent_issue !== 123) { + throw new Error(`Expected parent_issue to be 123, got ${task1.parent_issue}`) + } + + if (task1.job_id !== "job-1") { + throw new Error(`Expected job_id to be job-1, got ${task1.job_id}`) + } +}) + +test("runComposer handles slug collisions", async () => { + const existingTask: Task = { + id: "add-oauth2-config-schema", + title: "Add OAuth2 Config Schema", + description: "Existing task", + acceptance_criteria: "Tests pass", + parent_issue: 122, + job_id: "job-0", + status: "open", + priority: 0, + task_type: "implementation", + labels: ["module:test"], + depends_on: [], + assignee: null, + assignee_pid: null, + worktree: null, + branch: null, + created_at: "2024-01-01T00:00:00.000Z", + updated_at: "2024-01-01T00:00:00.000Z", + close_reason: null, + comments: [], + pipeline: { + stage: "idle", + attempt: 0, + last_activity: null, + last_steering: null, + history: [], + adversarial_verdict: null, + }, + } + + await Store.createTask("test-project", existingTask) + + const mockSpawn = async () => + JSON.stringify({ + status: "ready", + tasks: [ + { + title: "Add OAuth2 Config Schema", + description: "Different task with same title", + acceptance_criteria: "Tests pass", + task_type: "implementation" as const, + labels: ["module:test"], + depends_on: [], + priority: 0, + }, + ], + }) + + const result = await runComposer( + { + jobId: "job-1", + projectId: "test-project", + pmSessionId: "session-1", + issueNumber: 123, + issueTitle: "Add feature", + issueBody: "Please add a feature.", + }, + mockSpawn, + ) + + if (result.status !== "ready") { + throw new Error(`Expected ready status, got ${result.status}`) + } + + if (result.taskCount !== 1) { + throw new Error(`Expected 1 task, got ${result.taskCount}`) + } + + const existing = await Store.getTask("test-project", "add-oauth2-config-schema") + if (!existing) { + throw new Error("Expected original task to still exist") + } + + const newTask = await Store.getTask("test-project", "add-oauth2-config-schema-2") + if (!newTask) { + throw new Error("Expected new task to be created with slug '-2'") + } + + if (newTask.parent_issue !== 123) { + throw new Error(`Expected new task to have parent_issue 123, got ${newTask.parent_issue}`) + } +}) + +test("runComposer throws on invalid status", async () => { + const mockSpawn = async () => + JSON.stringify({ + status: "invalid_status", + tasks: [], + }) + + let threw = false + try { + await runComposer( + { + jobId: "job-1", + projectId: "test-project", + pmSessionId: "session-1", + issueNumber: 123, + issueTitle: "Add feature", + issueBody: "Please add a feature.", + }, + mockSpawn, + ) + } catch (_e) { + threw = true + } + + if (!threw) { + throw new Error("Expected runComposer to throw on invalid status") + } +}) + +test("runComposer throws on empty spawn response", async () => { + const mockSpawn = async () => undefined + + let threw = false + try { + await runComposer( + { + jobId: "job-1", + projectId: "test-project", + pmSessionId: "session-1", + issueNumber: 123, + issueTitle: "Add feature", + issueBody: "Please add a feature.", + }, + mockSpawn, + ) + } catch (_e) { + threw = true + } + + if (!threw) { + throw new Error("Expected runComposer to throw on empty response") + } +}) + +test("runComposer throws on invalid JSON", async () => { + const mockSpawn = async () => "not valid json" + + let threw = false + try { + await runComposer( + { + jobId: "job-1", + projectId: "test-project", + pmSessionId: "session-1", + issueNumber: 123, + issueTitle: "Add feature", + issueBody: "Please add a feature.", + }, + mockSpawn, + ) + } catch (e) { + threw = true + } + + if (!threw) { + throw new Error("Expected runComposer to throw on invalid JSON") + } +}) + +test("runComposer generates sequential slugs for duplicate titles", async () => { + await Store.createTask("test-project", { + id: "test-task", + title: "Test task", + description: "Existing", + acceptance_criteria: "Criteria", + parent_issue: 1, + job_id: "job-0", + status: "open", + priority: 2, + task_type: "implementation", + labels: [], + depends_on: [], + assignee: null, + assignee_pid: null, + worktree: null, + branch: null, + created_at: "2024-01-01T00:00:00.000Z", + updated_at: "2024-01-01T00:00:00.000Z", + close_reason: null, + comments: [], + pipeline: { + stage: "idle", + attempt: 0, + last_activity: null, + last_steering: null, + history: [], + adversarial_verdict: null, + }, + }) + + const mockSpawn = async () => + JSON.stringify({ + status: "ready", + tasks: [ + { + title: "Test task", + description: "First", + acceptance_criteria: "First done", + task_type: "implementation" as const, + labels: [], + depends_on: [], + priority: 2, + }, + { + title: "Test task", + description: "Second", + acceptance_criteria: "Second done", + task_type: "implementation" as const, + labels: [], + depends_on: [], + priority: 2, + }, + ], + }) + + const result = await runComposer( + { + jobId: "job-1", + projectId: "test-project", + pmSessionId: "session-1", + issueNumber: 123, + issueTitle: "Add feature", + issueBody: "Please add a feature.", + }, + mockSpawn, + ) + + if (result.status !== "ready") { + throw new Error(`Expected ready status, got ${result.status}`) + } + + if (result.taskCount !== 2) { + throw new Error(`Expected 2 tasks, got ${result.taskCount}`) + } + + const originalTask = await Store.getTask("test-project", "test-task") + const task2 = await Store.getTask("test-project", "test-task-2") + const task3 = await Store.getTask("test-project", "test-task-3") + + if (!originalTask) { + throw new Error("Expected original task 'test-task' to exist") + } + + if (!task2) { + throw new Error("Expected second task 'test-task-2' to exist") + } + + if (!task3) { + throw new Error("Expected third task 'test-task-3' to exist") + } + + if (originalTask.description !== "Existing") { + throw new Error(`Expected original task description 'Existing', got '${originalTask.description}'`) + } + + if (task2.description !== "First") { + throw new Error(`Expected second task description 'First', got '${task2.description}'`) + } + + if (task3.description !== "Second") { + throw new Error(`Expected third task description 'Second', got '${task3.description}'`) + } +}) + +test("runComposer validates and rejects malformed task structures", async () => { + const mockSpawn = async () => + JSON.stringify({ + status: "ready", + tasks: [ + { + title: "", + description: "Bad task", + acceptance_criteria: "Criteria", + }, + ], + }) + + let threw = false + try { + await runComposer( + { + jobId: "job-1", + projectId: "test-project", + pmSessionId: "session-1", + issueNumber: 123, + issueTitle: "Add feature", + issueBody: "Please add a feature.", + }, + mockSpawn, + ) + } catch (e) { + threw = true + if (!(e as Error).message.includes("validation failed")) { + throw new Error(`Expected 'validation failed' error, got: ${(e as Error).message}`) + } + } + + if (!threw) { + throw new Error("Expected runComposer to throw validation error for malformed task") + } +}) + +test("runComposer validates task types", async () => { + const mockSpawn = async () => + JSON.stringify({ + status: "ready", + tasks: [ + { + title: "Bad task", + description: "Desc", + acceptance_criteria: "Criteria", + task_type: "invalid_type" as any, + priority: 2, + }, + ], + }) + + let threw = false + try { + await runComposer( + { + jobId: "job-1", + projectId: "test-project", + pmSessionId: "session-1", + issueNumber: 123, + issueTitle: "Add feature", + issueBody: "Please add a feature.", + }, + mockSpawn, + ) + } catch (e) { + threw = true + } + + if (!threw) { + throw new Error("Expected runComposer to throw validation error for invalid task type") + } +}) + +test("runComposer validates priority range", async () => { + const mockSpawn = async () => + JSON.stringify({ + status: "ready", + tasks: [ + { + title: "Bad task", + description: "Desc", + acceptance_criteria: "Criteria", + task_type: "implementation" as const, + priority: 99, + }, + ], + }) + + let threw = false + try { + await runComposer( + { + jobId: "job-1", + projectId: "test-project", + pmSessionId: "session-1", + issueNumber: 123, + issueTitle: "Add feature", + issueBody: "Please add a feature.", + }, + mockSpawn, + ) + } catch (e) { + threw = true + } + + if (!threw) { + throw new Error("Expected runComposer to throw validation error for invalid priority") + } +}) + +test("runComposer filters empty strings from depends_on", async () => { + const mockSpawn = async () => + JSON.stringify({ + status: "ready", + tasks: [ + { + title: "First task", + description: "Desc", + acceptance_criteria: "Criteria", + task_type: "implementation" as const, + depends_on: ["", "valid-id", " "], + priority: 2, + }, + ], + }) + + let threw = false + try { + await runComposer( + { + jobId: "job-1", + projectId: "test-project", + pmSessionId: "session-1", + issueNumber: 123, + issueTitle: "Add feature", + issueBody: "Please add a feature.", + }, + mockSpawn, + ) + } catch (e) { + threw = true + if (!(e as Error).message.includes("validation failed")) { + throw new Error(`Expected validation failed for empty depends_on, got: ${(e as Error).message}`) + } + } + + if (!threw) { + throw new Error("Expected runComposer to throw validation error for empty depends_on elements") + } +}) + +test("runComposer handles unicode in titles", async () => { + const mockSpawn = async () => + JSON.stringify({ + status: "ready", + tasks: [ + { + title: "测试任务 Unicode", + description: "Test description", + acceptance_criteria: "Criteria", + task_type: "implementation" as const, + labels: [], + depends_on: [], + priority: 2, + }, + ], + }) + + const result = await runComposer( + { + jobId: "job-1", + projectId: "test-project", + pmSessionId: "session-1", + issueNumber: 123, + issueTitle: "Add feature", + issueBody: "Please add a feature.", + }, + mockSpawn, + ) + + if (result.status !== "ready") { + throw new Error(`Expected ready status, got ${result.status}`) + } + + const task = await Store.getTask("test-project", "unicode") + if (!task) { + throw new Error("Expected task with unicode-converted slug to exist") + } + + if (task.title !== "测试任务 Unicode") { + throw new Error(`Expected title '测试任务 Unicode', got '${task.title}'`) + } +}) + +test("runComposer rejects titles over 200 characters", async () => { + const longTitle = "a".repeat(201) + const mockSpawn = async () => + JSON.stringify({ + status: "ready", + tasks: [ + { + title: longTitle, + description: "Desc", + acceptance_criteria: "Criteria", + task_type: "implementation" as const, + priority: 2, + }, + ], + }) + + let threw = false + try { + await runComposer( + { + jobId: "job-1", + projectId: "test-project", + pmSessionId: "session-1", + issueNumber: 123, + issueTitle: "Add feature", + issueBody: "Please add a feature.", + }, + mockSpawn, + ) + } catch (e) { + threw = true + } + + if (!threw) { + throw new Error("Expected runComposer to throw validation error for title over 200 chars") + } +}) + +test("runComposer rolls back tasks on partial creation failure", async () => { + await Store.createTask("test-project", { + id: "existing-task", + title: "Existing", + description: "Desc", + acceptance_criteria: "Criteria", + parent_issue: 1, + job_id: "job-0", + status: "open", + priority: 2, + task_type: "implementation", + labels: [], + depends_on: [], + assignee: null, + assignee_pid: null, + worktree: null, + branch: null, + created_at: "2024-01-01T00:00:00.000Z", + updated_at: "2024-01-01T00:00:00.000Z", + close_reason: null, + comments: [], + pipeline: { + stage: "idle", + attempt: 0, + last_activity: null, + last_steering: null, + history: [], + adversarial_verdict: null, + }, + }) + + const mockSpawn = async () => + JSON.stringify({ + status: "ready", + tasks: [ + { + title: "Task 1", + description: "Desc 1", + acceptance_criteria: "Criteria 1", + task_type: "implementation" as const, + labels: [], + depends_on: [], + priority: 2, + }, + { + title: "Task 2", + description: "Desc 2", + acceptance_criteria: "Criteria 2", + task_type: "implementation" as const, + labels: [], + depends_on: [], + priority: 2, + }, + ], + }) + + const originalCreate = Store.createTask.bind(Store) + let callCount = 0 + Store.createTask = async function (projectId: string, task: Task): Promise { + callCount++ + if (callCount === 2) { + throw new Error("Simulated failure on second task") + } + return originalCreate(projectId, task) + } + + let threw = false + try { + await runComposer( + { + jobId: "job-1", + projectId: "test-project", + pmSessionId: "session-1", + issueNumber: 123, + issueTitle: "Add feature", + issueBody: "Please add a feature.", + }, + mockSpawn, + ) + } catch (e) { + threw = true + if (!(e as Error).message.includes("Simulated failure")) { + throw new Error(`Expected simulated failure, got: ${(e as Error).message}`) + } + } + + if (!threw) { + throw new Error("Expected runComposer to throw on partial creation failure") + } + + const tasks = await Store.listTasks("test-project") + const tasksWithRollback = tasks.filter((t) => t.close_reason?.includes("rollback")) + if (tasksWithRollback.length === 0) { + throw new Error(`Expected at least one task to have rollback reason`) + } + + Store.createTask = originalCreate +}) \ No newline at end of file diff --git a/packages/opencode/test/tasks/pipeline.test.ts b/packages/opencode/test/tasks/pipeline.test.ts new file mode 100644 index 000000000000..a0c84967c058 --- /dev/null +++ b/packages/opencode/test/tasks/pipeline.test.ts @@ -0,0 +1,209 @@ +import { beforeEach, describe, expect, test } from "bun:test" +import { Instance } from "../../src/project/instance" +import { Store } from "../../src/tasks/store" +import type { Task, Job } from "../../src/tasks/types" + +describe("taskctl pipeline: verdict data validation", () => { + let projectId: string + let testJob: Job + let testTask: Task + + beforeEach(async () => { + const testDir = `/tmp/taskctl-pipeline-test-${Date.now()}-${Math.random().toString(36).slice(2)}` + await Instance.provide({ + directory: testDir, + fn: async () => { + projectId = Instance.project.id + testJob = { + id: `job-${Date.now()}`, + parent_issue: 205, + status: "running", + created_at: new Date().toISOString(), + stopping: false, + pulse_pid: null, + max_workers: 3, + pm_session_id: "ses_0000001234567890abctest", + } + + testTask = { + id: `tsk_${Date.now()}${Math.random().toString(36).slice(2, 10)}`, + title: "Test task", + description: "Test description", + acceptance_criteria: "Test criteria", + parent_issue: 205, + job_id: testJob.id, + status: "open", + priority: 2, + task_type: "implementation", + labels: ["module:taskctl"], + depends_on: [], + assignee: null, + assignee_pid: null, + worktree: "/tmp/test-worktree", + branch: "test-branch", + created_at: new Date().toISOString(), + updated_at: new Date().toISOString(), + close_reason: null, + comments: [], + pipeline: { + stage: "idle", + attempt: 0, + last_activity: null, + last_steering: null, + history: [], + adversarial_verdict: null, + }, + } + + await Store.createJob(projectId, testJob) + await Store.createTask(projectId, testTask) + }, + }) + }) + + test("APPROVED verdict stores correctly", async () => { + const verdictData = { + verdict: "APPROVED" as const, + summary: "Code looks good", + issues: [], + created_at: new Date().toISOString(), + } + + await Store.updateTask(projectId, testTask.id, { + status: "review", + pipeline: { + ...testTask.pipeline, + adversarial_verdict: verdictData, + }, + }, true) + + const updated = await Store.getTask(projectId, testTask.id) + expect(updated?.pipeline.adversarial_verdict).toEqual(verdictData) + expect(updated?.status).toBe("review") + }) + + test("ISSUES_FOUND verdict stores structured feedback", async () => { + const verdictData = { + verdict: "ISSUES_FOUND" as const, + summary: "Null check needed", + issues: [ + { + location: "src/foo.ts:42", + severity: "HIGH" as const, + fix: "Add null check before calling user.profile", + }, + ], + created_at: new Date().toISOString(), + } + + await Store.updateTask(projectId, testTask.id, { + status: "review", + pipeline: { + ...testTask.pipeline, + attempt: 1, + adversarial_verdict: verdictData, + }, + }, true) + + const updated = await Store.getTask(projectId, testTask.id) + expect(updated?.pipeline.adversarial_verdict?.verdict).toBe("ISSUES_FOUND") + expect(updated?.pipeline.adversarial_verdict?.issues).toHaveLength(1) + expect(updated?.pipeline.adversarial_verdict?.issues[0]?.location).toBe("src/foo.ts:42") + expect(updated?.pipeline.adversarial_verdict?.issues[0]?.severity).toBe("HIGH") + expect(updated?.pipeline.attempt).toBe(1) + }) + + test("CRITICAL_ISSUES_FOUND verdict stores severity correctly", async () => { + const verdictData = { + verdict: "CRITICAL_ISSUES_FOUND" as const, + summary: "Security vulnerability", + issues: [ + { + location: "src/auth.ts:12", + severity: "CRITICAL" as const, + fix: "Add input validation on password field", + }, + ], + created_at: new Date().toISOString(), + } + + await Store.updateTask(projectId, testTask.id, { + status: "review", + pipeline: { + ...testTask.pipeline, + attempt: 2, + adversarial_verdict: verdictData, + }, + }, true) + + const updated = await Store.getTask(projectId, testTask.id) + expect(updated?.pipeline.adversarial_verdict?.verdict).toBe("CRITICAL_ISSUES_FOUND") + expect(updated?.pipeline.adversarial_verdict?.issues[0]?.severity).toBe("CRITICAL") + expect(updated?.pipeline.attempt).toBe(2) + }) + + test("multiple issues stored in single verdict", async () => { + const verdictData = { + verdict: "ISSUES_FOUND" as const, + summary: "Multiple issues found", + issues: [ + { + location: "src/foo.ts:42", + severity: "HIGH" as const, + fix: "Add null check", + }, + { + location: "src/bar.ts:15", + severity: "MEDIUM" as const, + fix: "Add error handling", + }, + { + location: "src/baz.ts:8", + severity: "LOW" as const, + fix: "Add JSDoc comment", + }, + ], + created_at: new Date().toISOString(), + } + + await Store.updateTask(projectId, testTask.id, { + status: "review", + pipeline: { + ...testTask.pipeline, + adversarial_verdict: verdictData, + }, + }, true) + + const updated = await Store.getTask(projectId, testTask.id) + expect(updated?.pipeline.adversarial_verdict?.issues).toHaveLength(3) + expect(updated?.pipeline.adversarial_verdict?.issues[0]?.location).toBe("src/foo.ts:42") + expect(updated?.pipeline.adversarial_verdict?.issues[1]?.location).toBe("src/bar.ts:15") + expect(updated?.pipeline.adversarial_verdict?.issues[2]?.location).toBe("src/baz.ts:8") + }) + + test("all severity levels stored correctly", async () => { + const verdictData = { + verdict: "ISSUES_FOUND" as const, + summary: "All severity levels", + issues: [ + { location: "a.ts:1", severity: "CRITICAL" as const, fix: "fix critical" }, + { location: "b.ts:2", severity: "HIGH" as const, fix: "fix high" }, + { location: "c.ts:3", severity: "MEDIUM" as const, fix: "fix medium" }, + { location: "d.ts:4", severity: "LOW" as const, fix: "fix low" }, + ], + created_at: new Date().toISOString(), + } + + await Store.updateTask(projectId, testTask.id, { + status: "review", + pipeline: { + ...testTask.pipeline, + adversarial_verdict: verdictData, + }, + }, true) + + const updated = await Store.getTask(projectId, testTask.id) + const severities = updated?.pipeline.adversarial_verdict?.issues.map(i => i.severity) + expect(severities).toEqual(["CRITICAL", "HIGH", "MEDIUM", "LOW"]) + }) +}) diff --git a/packages/opencode/test/tasks/pulse.test.ts b/packages/opencode/test/tasks/pulse.test.ts new file mode 100644 index 000000000000..1869b229e4de --- /dev/null +++ b/packages/opencode/test/tasks/pulse.test.ts @@ -0,0 +1,384 @@ +import { describe, test, expect, beforeEach, afterEach } from "bun:test" +import { Store } from "../../src/tasks/store" +import { Global } from "../../src/global" +import { BackgroundTaskEvent } from "../../src/session/async-tasks" +import { Bus } from "../../src/bus" +import path from "path" +import fs from "fs/promises" + +const TEST_PROJECT_ID = "test-pulse-project" +const TEST_JOB_ID = "job-test-123" +const TEST_PM_SESSION_ID = "pm-session-test" + +describe("pulse.ts", () => { + let originalDataPath: string + let testDataDir: string + + beforeEach(async () => { + originalDataPath = Global.Path.data + testDataDir = path.join("/tmp", "opencode-pulse-test-" + Math.random().toString(36).slice(2)) + await fs.mkdir(testDataDir, { recursive: true }) + + process.env.OPENCODE_TEST_HOME = testDataDir + await Global.init() + }) + + afterEach(async () => { + const tasksDir = path.join(Global.Path.data, "tasks", TEST_PROJECT_ID) + const lockPath = path.join(tasksDir, `job-${TEST_JOB_ID}.lock`) + await fs.unlink(lockPath).catch(() => {}) + + await fs.rm(testDataDir, { recursive: true, force: true }).catch(() => {}) + if (originalDataPath) { + delete process.env.OPENCODE_TEST_HOME + } + }) + + describe("lock file management", () => { + test("lock file written on start, removed on completion", async () => { + const { startPulse, readLockPid } = await import("../../src/tasks/pulse") + + const mockTask: any = { + id: "task-1", + job_id: TEST_JOB_ID, + status: "closed", + priority: 2, + task_type: "implementation", + parent_issue: 123, + labels: [], + depends_on: [], + assignee: null, + assignee_pid: null, + worktree: null, + branch: null, + title: "Test task", + description: "Test description", + acceptance_criteria: "Test criteria", + created_at: new Date().toISOString(), + updated_at: new Date().toISOString(), + close_reason: null, + comments: [], + pipeline: { + stage: "done", + attempt: 0, + last_activity: null, + last_steering: null, + history: [], + adversarial_verdict: null, + }, + } + + await Store.createJob(TEST_PROJECT_ID, { + id: TEST_JOB_ID, + parent_issue: 123, + status: "running", + created_at: new Date().toISOString(), + stopping: false, + pulse_pid: null, + max_workers: 1, + pm_session_id: TEST_PM_SESSION_ID, + }) + + await Store.createTask(TEST_PROJECT_ID, mockTask) + + const interval = startPulse(TEST_JOB_ID, TEST_PROJECT_ID, TEST_PM_SESSION_ID) + + await new Promise((resolve) => setTimeout(resolve, 100)) + + const lockPid = await readLockPid(TEST_JOB_ID, TEST_PROJECT_ID) + expect(lockPid).toBe(process.pid) + + clearInterval(interval) + }) + }) + + describe("isPidAlive", () => { + test("isPidAlive returns true for current process", async () => { + const { isPidAlive } = await import("../../src/tasks/pulse") + expect(isPidAlive(process.pid)).toBe(true) + }) + + test("isPidAlive returns false for dead PID", async () => { + const { isPidAlive } = await import("../../src/tasks/pulse") + expect(isPidAlive(9999999)).toBe(false) + }) + }) + + describe("readLockPid and lock file paths", () => { + test("readLockPid returns null when lock file does not exist", async () => { + const { readLockPid } = await import("../../src/tasks/pulse") + + const lockPid = await readLockPid(TEST_JOB_ID, TEST_PROJECT_ID) + expect(lockPid).toBeNull() + }) + }) + + describe("lock file removal", () => { + test("removeLockFile removes lock file", async () => { + const { writeLockFile, readLockPid, removeLockFile } = await import("../../src/tasks/pulse") + + await writeLockFile(TEST_JOB_ID, TEST_PROJECT_ID, process.pid) + expect(await readLockPid(TEST_JOB_ID, TEST_PROJECT_ID)).toBe(process.pid) + + await removeLockFile(TEST_JOB_ID, TEST_PROJECT_ID) + expect(await readLockPid(TEST_JOB_ID, TEST_PROJECT_ID)).toBeNull() + }) + + test("removeLockFile is idempotent", async () => { + const { removeLockFile, readLockPid } = await import("../../src/tasks/pulse") + + await removeLockFile(TEST_JOB_ID, TEST_PROJECT_ID) + await removeLockFile(TEST_JOB_ID, TEST_PROJECT_ID) + await removeLockFile(TEST_JOB_ID, TEST_PROJECT_ID) + expect(await readLockPid(TEST_JOB_ID, TEST_PROJECT_ID)).toBeNull() + }) + }) + + describe("resurrectionScan", () => { + test("resurrects in_progress task with dead session", async () => { + const { resurrectionScan, writeLockFile } = await import("../../src/tasks/pulse") + + await writeLockFile(TEST_JOB_ID, TEST_PROJECT_ID, process.pid) + + await Store.createJob(TEST_PROJECT_ID, { + id: TEST_JOB_ID, + parent_issue: 123, + status: "running", + created_at: new Date().toISOString(), + stopping: false, + pulse_pid: null, + max_workers: 1, + pm_session_id: TEST_PM_SESSION_ID, + }) + + const task: any = { + id: "task-1", + job_id: TEST_JOB_ID, + status: "in_progress", + priority: 1, + task_type: "implementation", + parent_issue: 123, + labels: [], + depends_on: [], + assignee: "dead-session-id", + assignee_pid: 12345, + worktree: null, + branch: null, + title: "Test task", + description: "Test", + acceptance_criteria: "Test", + created_at: new Date().toISOString(), + updated_at: new Date().toISOString(), + close_reason: null, + comments: [], + pipeline: { + stage: "working", + attempt: 0, + last_activity: new Date().toISOString(), + last_steering: null, + history: [], + adversarial_verdict: null, + }, + } + + await Store.createTask(TEST_PROJECT_ID, task) + await resurrectionScan(TEST_JOB_ID, TEST_PROJECT_ID) + + const updated = await Store.getTask(TEST_PROJECT_ID, "task-1") + expect(updated?.status).toBe("open") + expect(updated?.assignee).toBeNull() + expect(updated?.assignee_pid).toBeNull() + expect(updated?.worktree).toBeNull() + expect(updated?.branch).toBeNull() + + const comments = updated?.comments || [] + expect(comments.length).toBeGreaterThan(0) + const lastComment = comments[comments.length - 1] + expect(lastComment?.author).toBe("system") + expect(lastComment?.message).toContain("Resurrected") + expect(lastComment?.message).toContain("not found on Pulse restart") + }) + + test("resurrectionScan skips open tasks", async () => { + const { resurrectionScan, writeLockFile } = await import("../../src/tasks/pulse") + + await writeLockFile(TEST_JOB_ID, TEST_PROJECT_ID, process.pid) + + await Store.createJob(TEST_PROJECT_ID, { + id: TEST_JOB_ID, + parent_issue: 123, + status: "running", + created_at: new Date().toISOString(), + stopping: false, + pulse_pid: null, + max_workers: 1, + pm_session_id: TEST_PM_SESSION_ID, + }) + + const task: any = { + id: "task-1", + job_id: TEST_JOB_ID, + status: "open", + priority: 1, + task_type: "implementation", + parent_issue: 123, + labels: [], + depends_on: [], + assignee: null, + assignee_pid: null, + worktree: null, + branch: null, + title: "Test task", + description: "Test", + acceptance_criteria: "Test", + created_at: new Date().toISOString(), + updated_at: new Date().toISOString(), + close_reason: null, + comments: [], + pipeline: { + stage: "todo", + attempt: 0, + last_activity: null, + last_steering: null, + history: [], + adversarial_verdict: null, + }, + } + + await Store.createTask(TEST_PROJECT_ID, task) + await resurrectionScan(TEST_JOB_ID, TEST_PROJECT_ID) + + const updated = await Store.getTask(TEST_PROJECT_ID, "task-1") + expect(updated?.status).toBe("open") + }) + + test("resurrectionScan handles review status", async () => { + const { resurrectionScan, writeLockFile } = await import("../../src/tasks/pulse") + + await writeLockFile(TEST_JOB_ID, TEST_PROJECT_ID, process.pid) + + await Store.createJob(TEST_PROJECT_ID, { + id: TEST_JOB_ID, + parent_issue: 123, + status: "running", + created_at: new Date().toISOString(), + stopping: false, + pulse_pid: null, + max_workers: 1, + pm_session_id: TEST_PM_SESSION_ID, + }) + + const task: any = { + id: "task-1", + job_id: TEST_JOB_ID, + status: "review", + priority: 1, + task_type: "implementation", + parent_issue: 123, + labels: [], + depends_on: [], + assignee: "dead-session-id", + assignee_pid: 12345, + worktree: null, + branch: null, + title: "Test task", + description: "Test", + acceptance_criteria: "Test", + created_at: new Date().toISOString(), + updated_at: new Date().toISOString(), + close_reason: null, + comments: [], + pipeline: { + stage: "reviewing", + attempt: 0, + last_activity: new Date().toISOString(), + last_steering: null, + history: [], + adversarial_verdict: null, + }, + } + + await Store.createTask(TEST_PROJECT_ID, task) + await resurrectionScan(TEST_JOB_ID, TEST_PROJECT_ID) + + const updated = await Store.getTask(TEST_PROJECT_ID, "task-1") + expect(updated?.status).toBe("open") + expect(updated?.assignee).toBeNull() + }) + }) + + describe("checkCompletion", () => { + test("checkCompletion marks job complete and clears interval when all tasks closed", async () => { + const { writeLockFile, checkCompletion } = await import("../../src/tasks/pulse") + const { Instance } = await import("../../src/project/instance") + + await Instance.provide({ + directory: testDataDir, + fn: async () => { + await writeLockFile(TEST_JOB_ID, TEST_PROJECT_ID, process.pid) + + await Store.createJob(TEST_PROJECT_ID, { + id: TEST_JOB_ID, + parent_issue: 123, + status: "running", + created_at: new Date().toISOString(), + stopping: false, + pulse_pid: null, + max_workers: 1, + pm_session_id: TEST_PM_SESSION_ID, + }) + + const task: any = { + id: "task-1", + job_id: TEST_JOB_ID, + status: "closed", + priority: 1, + task_type: "implementation", + parent_issue: 123, + labels: [], + depends_on: [], + assignee: null, + assignee_pid: null, + worktree: null, + branch: null, + title: "Test task", + description: "Test", + acceptance_criteria: "Test", + created_at: new Date().toISOString(), + updated_at: new Date().toISOString(), + close_reason: "done", + comments: [], + pipeline: { + stage: "done", + attempt: 0, + last_activity: null, + last_steering: null, + history: [], + adversarial_verdict: null, + }, + } + + await Store.createTask(TEST_PROJECT_ID, task) + + const interval = setInterval(() => {}, 1000) + await checkCompletion(TEST_JOB_ID, TEST_PROJECT_ID, TEST_PM_SESSION_ID, interval) + + const job = await Store.getJob(TEST_PROJECT_ID, TEST_JOB_ID) + expect(job?.status).toBe("complete") + }, + }) + }) + }) + + describe("lock file integrity", () => { + test("writeLockFile overwrites existing lock", async () => { + const { writeLockFile, readLockPid } = await import("../../src/tasks/pulse") + + await writeLockFile(TEST_JOB_ID, TEST_PROJECT_ID, 1000) + await writeLockFile(TEST_JOB_ID, TEST_PROJECT_ID, 2000) + + const pid = await readLockPid(TEST_JOB_ID, TEST_PROJECT_ID) + expect(pid).toBe(2000) + }) + }) +}) \ No newline at end of file diff --git a/packages/opencode/test/tasks/scheduler.test.ts b/packages/opencode/test/tasks/scheduler.test.ts new file mode 100644 index 000000000000..89187d60b242 --- /dev/null +++ b/packages/opencode/test/tasks/scheduler.test.ts @@ -0,0 +1,181 @@ +import { describe, expect, test } from "bun:test" +import { Store } from "../../src/tasks/store" +import { Scheduler } from "../../src/tasks/scheduler" +import type { Task } from "../../src/tasks/types" +import { tmpdir } from "../fixture/fixture" +import { randomUUID } from "crypto" + +function getProjectId(): string { + return `test-scheduler-${randomUUID()}` +} + +describe("scheduler: getNextTasks", () => { + function createTask( + id: string, + overrides: Partial = {}, + ): Task { + const now = new Date().toISOString() + return { + id, + title: `Task ${id}`, + description: `Description for ${id}`, + acceptance_criteria: `Criteria for ${id}`, + parent_issue: 1, + job_id: "job-1", + status: "open", + priority: 2, + task_type: "implementation", + labels: [], + depends_on: [], + assignee: null, + assignee_pid: null, + worktree: null, + branch: null, + created_at: now, + updated_at: now, + close_reason: null, + comments: [], + pipeline: { + stage: "idle", + attempt: 0, + last_activity: null, + last_steering: null, + history: [], + adversarial_verdict: null, + }, + ...overrides, + } + } + + test("returns tasks in priority order", async () => { + await using tmp = await tmpdir() + const projectId = getProjectId() + + await Store.createTask(projectId, createTask("high-priority", { priority: 0 })) + await Store.createTask(projectId, createTask("medium-priority", { priority: 2 })) + await Store.createTask(projectId, createTask("low-priority", { priority: 4 })) + + const tasks = await Scheduler.getNextTasks(projectId, 10) + expect(tasks).toHaveLength(3) + if (!tasks[0] || !tasks[1] || !tasks[2]) throw new Error("Missing tasks") + expect(tasks[0].id).toBe("high-priority") + expect(tasks[1].id).toBe("medium-priority") + expect(tasks[2].id).toBe("low-priority") + }) + + test("excludes tasks with unmet depends_on", async () => { + await using tmp = await tmpdir() + const projectId = getProjectId() + + await Store.createTask(projectId, createTask("base-task")) + await Store.createTask(projectId, createTask("dependent-task", { depends_on: ["base-task"] })) + await Store.createTask(projectId, createTask("ready-task")) + + const tasks = await Scheduler.getNextTasks(projectId, 10) + const taskIds = tasks.map((t) => t.id) + expect(taskIds).toContain("base-task") + expect(taskIds).toContain("ready-task") + expect(taskIds).not.toContain("dependent-task") + }) + + test("includes dependent task when its dependency is closed", async () => { + await using tmp = await tmpdir() + const projectId = getProjectId() + + await Store.createTask(projectId, createTask("base-task")) + await Store.createTask(projectId, createTask("dependent-task", { depends_on: ["base-task"] })) + + await Store.updateTask(projectId, "base-task", { status: "closed" }) + + const tasks = await Scheduler.getNextTasks(projectId, 10) + const taskIds = tasks.map((t) => t.id) + expect(taskIds).toContain("dependent-task") + }) + + test("excludes tasks with conflicting module labels", async () => { + await using tmp = await tmpdir() + const projectId = getProjectId() + + await Store.createTask( + projectId, + createTask("task-a", { status: "open", labels: ["module:auth"] }), + ) + await Store.createTask( + projectId, + createTask("task-b", { status: "in_progress", labels: ["module:auth"] }), + ) + await Store.createTask( + projectId, + createTask("task-c", { status: "open", labels: ["module:db"] }), + ) + + const tasks = await Scheduler.getNextTasks(projectId, 10) + const taskIds = tasks.map((t) => t.id) + expect(taskIds).toContain("task-a") + expect(taskIds).not.toContain("task-b") + expect(taskIds).toContain("task-c") + }) + + test("excludes tasks with conflicting file labels", async () => { + await using tmp = await tmpdir() + const projectId = getProjectId() + + await Store.createTask( + projectId, + createTask("task-a", { status: "open", labels: ["file:src/auth.ts"] }), + ) + await Store.createTask( + projectId, + createTask("task-b", { status: "in_progress", labels: ["file:src/auth.ts"] }), + ) + await Store.createTask( + projectId, + createTask("task-c", { status: "open", labels: ["file:src/db.ts"] }), + ) + + const tasks = await Scheduler.getNextTasks(projectId, 10) + const taskIds = tasks.map((t) => t.id) + expect(taskIds).toContain("task-a") + expect(taskIds).not.toContain("task-b") + expect(taskIds).toContain("task-c") + }) + + test("sorts alphabetically within same priority", async () => { + await using tmp = await tmpdir() + const projectId = getProjectId() + + await Store.createTask(projectId, createTask("zebra", { priority: 1 })) + await Store.createTask(projectId, createTask("apple", { priority: 1 })) + await Store.createTask(projectId, createTask("banana", { priority: 1 })) + + const tasks = await Scheduler.getNextTasks(projectId, 10) + expect(tasks).toHaveLength(3) + if (!tasks[0] || !tasks[1] || !tasks[2]) throw new Error("Missing tasks") + expect(tasks[0].id).toBe("apple") + expect(tasks[1].id).toBe("banana") + expect(tasks[2].id).toBe("zebra") + }) + + test("returns empty array when all tasks are in_progress", async () => { + await using tmp = await tmpdir() + const projectId = getProjectId() + + await Store.createTask(projectId, createTask("task-1", { status: "in_progress" })) + await Store.createTask(projectId, createTask("task-2", { status: "in_progress" })) + + const tasks = await Scheduler.getNextTasks(projectId, 10) + expect(tasks.length).toBe(0) + }) + + test("respects count parameter", async () => { + await using tmp = await tmpdir() + const projectId = getProjectId() + + await Store.createTask(projectId, createTask("task-1")) + await Store.createTask(projectId, createTask("task-2")) + await Store.createTask(projectId, createTask("task-3")) + + const tasks = await Scheduler.getNextTasks(projectId, 2) + expect(tasks.length).toBe(2) + }) +}) \ No newline at end of file diff --git a/packages/opencode/test/tasks/steering.test.ts b/packages/opencode/test/tasks/steering.test.ts new file mode 100644 index 000000000000..c33fe37c2ec9 --- /dev/null +++ b/packages/opencode/test/tasks/steering.test.ts @@ -0,0 +1,356 @@ +import { describe, test, expect, beforeEach, afterEach } from "bun:test" +import { Store } from "../../src/tasks/store" +import { Global } from "../../src/global" +import path from "path" +import fs from "fs/promises" + +const TEST_PROJECT_ID = "test-steering-project" +const TEST_JOB_ID = "job-steering-123" +const TEST_PM_SESSION_ID = "pm-session-test" + +describe("steering.ts", () => { + let originalDataPath: string + let testDataDir: string + + beforeEach(async () => { + originalDataPath = Global.Path.data + testDataDir = path.join("/tmp", "opencode-steering-test-" + Math.random().toString(36).slice(2)) + await fs.mkdir(testDataDir, { recursive: true }) + + process.env.OPENCODE_TEST_HOME = testDataDir + await Global.init() + }) + + afterEach(async () => { + await fs.rm(testDataDir, { recursive: true, force: true }).catch(() => {}) + if (originalDataPath) { + delete process.env.OPENCODE_TEST_HOME + } + }) + + const createMockTask = (overrides?: any) => ({ + id: "task-steering-1", + job_id: TEST_JOB_ID, + status: "in_progress" as const, + priority: 2 as const, + task_type: "implementation" as const, + parent_issue: 123, + labels: [], + depends_on: [], + assignee: null, + assignee_pid: null, + worktree: null, + branch: null, + title: "Test task", + description: "Test description", + acceptance_criteria: "Test criteria", + created_at: new Date().toISOString(), + updated_at: new Date().toISOString(), + close_reason: null, + comments: [], + pipeline: { + stage: "developing" as const, + attempt: 0, + last_activity: null, + last_steering: null, + history: [], + adversarial_verdict: null, + }, + ...overrides, + }) + + describe("steering timer guard", () => { + test("steering skipped if evaluated recently (< 15 minutes)", async () => { + const now = new Date() + const fiveMinutesAgo = new Date(now.getTime() - 5 * 60 * 1000).toISOString() + + const task = createMockTask({ + pipeline: { + stage: "developing", + attempt: 0, + last_activity: now.toISOString(), + last_steering: fiveMinutesAgo, + history: [], + adversarial_verdict: null, + }, + status: "in_progress", + assignee: "test-session-id", + }) + + await Store.createJob(TEST_PROJECT_ID, { + id: TEST_JOB_ID, + parent_issue: 123, + status: "running", + created_at: new Date().toISOString(), + stopping: false, + pulse_pid: null, + max_workers: 1, + pm_session_id: TEST_PM_SESSION_ID, + }) + + await Store.createTask(TEST_PROJECT_ID, task) + + const stored = await Store.getTask(TEST_PROJECT_ID, task.id) + expect(stored?.pipeline.last_steering).toBe(fiveMinutesAgo) + + const minutesSince = (now.getTime() - new Date(fiveMinutesAgo).getTime()) / 60_000 + expect(minutesSince).toBeLessThan(15) + }) + + test("steering eligible if 15+ minutes since last evaluation", async () => { + const now = new Date() + const sixteenMinutesAgo = new Date(now.getTime() - 16 * 60 * 1000).toISOString() + + const task = createMockTask({ + pipeline: { + stage: "developing", + attempt: 0, + last_activity: now.toISOString(), + last_steering: sixteenMinutesAgo, + history: [], + adversarial_verdict: null, + }, + status: "in_progress", + assignee: "test-session-id", + }) + + const minutesSince = (now.getTime() - new Date(sixteenMinutesAgo).getTime()) / 60_000 + expect(minutesSince).toBeGreaterThanOrEqual(15) + }) + + test("steering skipped for reviewing stage tasks", async () => { + const task = createMockTask({ + pipeline: { + stage: "reviewing", + attempt: 0, + last_activity: new Date().toISOString(), + last_steering: new Date(0).toISOString(), + history: [], + adversarial_verdict: null, + }, + status: "in_progress", + assignee: "test-session-id", + }) + + expect(task.pipeline.stage).toBe("reviewing") + const shouldSkip = task.pipeline.stage === "reviewing" || task.pipeline.stage === "adversarial-running" + expect(shouldSkip).toBe(true) + }) + + test("steering skipped for adversarial-running stage tasks", async () => { + const task = createMockTask({ + pipeline: { + stage: "adversarial-running", + attempt: 0, + last_activity: new Date().toISOString(), + last_steering: new Date(0).toISOString(), + history: [], + adversarial_verdict: null, + }, + status: "in_progress", + assignee: "test-session-id", + }) + + expect(task.pipeline.stage).toBe("adversarial-running") + const shouldSkip = task.pipeline.stage === "reviewing" || task.pipeline.stage === "adversarial-running" + expect(shouldSkip).toBe(true) + }) + }) + + describe("closed task filtering", () => { + test("steering skipped for closed tasks", async () => { + const task = createMockTask({ + status: "closed", + pipeline: { + stage: "done", + attempt: 0, + last_activity: null, + last_steering: new Date(0).toISOString(), + history: [], + adversarial_verdict: null, + }, + }) + + expect(task.status).not.toBe("in_progress") + }) + }) + + describe("adversarial timeout recovery", () => { + test("adversarial-running stage times out after 60 minutes", async () => { + const ADVERSARIAL_TIMEOUT_MS = 60 * 60 * 1000 + const now = Date.now() + const sixtyOneMinutesAgo = new Date(now - 61 * 60 * 1000) + + const lastActivity = sixtyOneMinutesAgo.getTime() + + expect(now - lastActivity).toBeGreaterThan(ADVERSARIAL_TIMEOUT_MS) + }) + + test("adversarial-running stage not timed out before 60 minutes", async () => { + const ADVERSARIAL_TIMEOUT_MS = 60 * 60 * 1000 + const now = Date.now() + const fiftyNineMinutesAgo = new Date(now - 59 * 60 * 1000) + + const lastActivity = fiftyNineMinutesAgo.getTime() + + expect(now - lastActivity).toBeLessThan(ADVERSARIAL_TIMEOUT_MS) + }) + }) + + describe("task state transitions", () => { + test("developer task without assignee is skipped", async () => { + const task = createMockTask({ + status: "in_progress", + assignee: null, + pipeline: { + stage: "developing", + attempt: 0, + last_activity: new Date().toISOString(), + last_steering: new Date(0).toISOString(), + history: [], + adversarial_verdict: null, + }, + }) + + expect(task.assignee).toBeNull() + expect(task.status).toBe("in_progress") + }) + }) + + describe("spawnSteering response parsing", () => { + test("spawnSteering parses steer action from JSON response", async () => { + const now = new Date() + const response = { + action: "steer", + message: "You should focus on error handling in the API layer", + } + const jsonResponse = JSON.stringify(response) + + expect(jsonResponse).toContain('"action":"steer"') + + const parsed = JSON.parse(jsonResponse) + expect(parsed.action).toBe("steer") + expect(parsed.message).toBe("You should focus on error handling in the API layer") + }) + + test("spawnSteering parses replace action from JSON response", async () => { + const response = { + action: "replace", + message: "Developer is not making progress — needs a fresh approach", + } + const jsonResponse = JSON.stringify(response) + + const parsed = JSON.parse(jsonResponse) + expect(parsed.action).toBe("replace") + expect(parsed.message).toBe("Developer is not making progress — needs a fresh approach") + }) + + test("spawnSteering parses continue action from JSON response", async () => { + const response = { + action: "continue", + message: null, + } + const jsonResponse = JSON.stringify(response) + + const parsed = JSON.parse(jsonResponse) + expect(parsed.action).toBe("continue") + expect(parsed.message).toBeNull() + }) + + test("spawnSteering extracts JSON from text response", async () => { + const fullText = `The assessment is as follows: + +\`\`\`json +{"action": "steer", "message": "Improve test coverage"} +\`\`\` + +That's the recommendation.` + + const jsonMatch = fullText.match(/\{[\s\S]*\}/) + expect(jsonMatch).not.toBeNull() + if (jsonMatch) { + const parsed = JSON.parse(jsonMatch[0]) + expect(parsed.action).toBe("steer") + expect(parsed.message).toBe("Improve test coverage") + } + }) + + test("spawnSteering falls back to continue on invalid JSON", async () => { + const invalidJson = `This is not valid JSON {incomplete` + + let parsed + try { + const jsonMatch = invalidJson.match(/\{[\s\S]*\}/) + if (jsonMatch) { + parsed = JSON.parse(jsonMatch[0]) + } else { + parsed = null + } + } catch { + parsed = null + } + + expect(parsed).toBeNull() + }) + + test("spawnSteering falls back to continue on missing action field", async () => { + const responseWithoutAction = `{"message": "some guidance"}` + + const parsed = JSON.parse(responseWithoutAction) + expect(parsed.action).toBeUndefined() + expect(parsed.message).toBe("some guidance") + }) + + test("spawnSteering extracts text content from message parts", async () => { + const parts = [ + { type: "text", text: "First part" }, + { type: "text", text: "Second part" }, + ] + + const textParts = parts.filter((p) => p.type === "text") + const combined = textParts.map((p) => (p as any).text).join("\n") + + expect(textParts.length).toBe(2) + expect(combined).toBe("First part\nSecond part") + }) + }) + + describe("checkTimeouts with session message activity", () => { + test("timeout considers session message timestamps", async () => { + const now = Date.now() + const thirtyOneMinutesAgo = now - 31 * 60 * 1000 + + const messageTime = Math.floor(thirtyOneMinutesAgo / 1000) * 1000 + const elapsed = now - messageTime + + expect(elapsed).toBeGreaterThan(30 * 60 * 1000) + }) + + test("timeout does not trigger before 30 minute threshold", async () => { + const now = Date.now() + const twentyNineMinutesAgo = now - 29 * 60 * 1000 + + const messageTime = Math.floor(twentyNineMinutesAgo / 1000) * 1000 + const elapsed = now - messageTime + + expect(elapsed).toBeLessThan(30 * 60 * 1000) + }) + + test("timeout respects message time.created field format", async () => { + const now = Date.now() + const thirtyMinutesAgo = now - 30 * 60 * 1000 + + const message = { + info: { + role: "assistant" as const, + time: { + created: thirtyMinutesAgo, + }, + }, + } + + const elapsed = now - message.info.time.created + expect(elapsed).toBeGreaterThanOrEqual(30 * 60 * 1000) + }) + }) +}) diff --git a/packages/opencode/test/tasks/store.test.ts b/packages/opencode/test/tasks/store.test.ts new file mode 100644 index 000000000000..23c8a9558b63 --- /dev/null +++ b/packages/opencode/test/tasks/store.test.ts @@ -0,0 +1,234 @@ +import { describe, expect, test } from "bun:test" +import { Store } from "../../src/tasks/store" +import type { Task } from "../../src/tasks/types" +import { tmpdir } from "../fixture/fixture" +import path from "path" +import { randomUUID } from "crypto" + +function getProjectId(): string { + return `test-store-${randomUUID()}` +} + +function isValidISODate(dateStr: string): boolean { + return !isNaN(Date.parse(dateStr)) +} + +describe("store: task operations", () => { + test("write task and verify file exists", async () => { + await using tmp = await tmpdir() + const projectId = getProjectId() + const now = new Date().toISOString() + + const task: Task = { + id: "add-oauth2-schema", + title: "Add OAuth2 schema", + description: "Add OAuth2 schema to database", + acceptance_criteria: "Schema must support access tokens and refresh tokens", + parent_issue: 123, + job_id: "job-1", + status: "open", + priority: 2, + task_type: "implementation", + labels: ["module:auth", "file:src/auth/oauth.ts"], + depends_on: [], + assignee: null, + assignee_pid: null, + worktree: null, + branch: null, + created_at: now, + updated_at: now, + close_reason: null, + comments: [], + pipeline: { + stage: "idle", + attempt: 0, + last_activity: null, + last_steering: null, + history: [], + adversarial_verdict: null, + }, + } + + await Store.createTask(projectId, task) + + const retrieved = await Store.getTask(projectId, "add-oauth2-schema") + expect(retrieved).not.toBeNull() + expect(retrieved!.id).toBe("add-oauth2-schema") + expect(retrieved!.title).toBe("Add OAuth2 schema") + expect(retrieved!.status).toBe("open") + }) + + test("update task and verify updated", async () => { + await using tmp = await tmpdir() + const projectId = getProjectId() + const now = new Date().toISOString() + + const task: Task = { + id: "update-test", + title: "Test task", + description: "Test description", + acceptance_criteria: "Test criteria", + parent_issue: 1, + job_id: "job-1", + status: "open", + priority: 2, + task_type: "implementation", + labels: [], + depends_on: [], + assignee: null, + assignee_pid: null, + worktree: null, + branch: null, + created_at: now, + updated_at: now, + close_reason: null, + comments: [], + pipeline: { + stage: "idle", + attempt: 0, + last_activity: null, + last_steering: null, + history: [], + adversarial_verdict: null, + }, + } + + await Store.createTask(projectId, task) + await Store.updateTask(projectId, "update-test", { + status: "in_progress", + assignee: "agent-1", + assignee_pid: 12345, + }) + + const updated = await Store.getTask(projectId, "update-test") + expect(updated).not.toBeNull() + expect(updated!.status).toBe("in_progress") + expect(updated!.assignee).toBe("agent-1") + expect(updated!.assignee_pid).toBe(12345) + expect(isValidISODate(updated!.updated_at)).toBe(true) + }) + + test("list all tasks", async () => { + await using tmp = await tmpdir() + const projectId = getProjectId() + const now = new Date().toISOString() + + const task1: Task = { + id: "task-1", + title: "Task 1", + description: "Description 1", + acceptance_criteria: "Criteria 1", + parent_issue: 1, + job_id: "job-1", + status: "open", + priority: 1, + task_type: "implementation", + labels: [], + depends_on: [], + assignee: null, + assignee_pid: null, + worktree: null, + branch: null, + created_at: now, + updated_at: now, + close_reason: null, + comments: [], + pipeline: { + stage: "idle", + attempt: 0, + last_activity: null, + last_steering: null, + history: [], + adversarial_verdict: null, + }, + } + + const task2: Task = { + id: "task-2", + title: "Task 2", + description: "Description 2", + acceptance_criteria: "Criteria 2", + parent_issue: 2, + job_id: "job-1", + status: "open", + priority: 2, + task_type: "test", + labels: [], + depends_on: [], + assignee: null, + assignee_pid: null, + worktree: null, + branch: null, + created_at: now, + updated_at: now, + close_reason: null, + comments: [], + pipeline: { + stage: "idle", + attempt: 0, + last_activity: null, + last_steering: null, + history: [], + adversarial_verdict: null, + }, + } + + await Store.createTask(projectId, task1) + await Store.createTask(projectId, task2) + + const tasks = await Store.listTasks(projectId) + expect(tasks.length).toBe(2) + expect(tasks.map((t) => t.id).sort()).toEqual(["task-1", "task-2"]) + }) + + test("add comment to task", async () => { + await using tmp = await tmpdir() + const projectId = getProjectId() + const now = new Date().toISOString() + + const task: Task = { + id: "comment-test", + title: "Comment test", + description: "Test description", + acceptance_criteria: "Test criteria", + parent_issue: 1, + job_id: "job-1", + status: "open", + priority: 2, + task_type: "implementation", + labels: [], + depends_on: [], + assignee: null, + assignee_pid: null, + worktree: null, + branch: null, + created_at: now, + updated_at: now, + close_reason: null, + comments: [], + pipeline: { + stage: "idle", + attempt: 0, + last_activity: null, + last_steering: null, + history: [], + adversarial_verdict: null, + }, + } + + await Store.createTask(projectId, task) + + await Store.addComment(projectId, "comment-test", { + author: "test-agent", + message: "Test comment", + created_at: now, + }) + + const updated = await Store.getTask(projectId, "comment-test") + if (!updated) throw new Error("Task not found") + expect(updated.comments.length).toBeGreaterThan(0) + if (!updated.comments[0]) throw new Error("Comment not found") + expect(updated.comments[0].author).toBe("test-agent") + expect(updated.comments[0].message).toBe("Test comment") + }) +}) \ No newline at end of file diff --git a/packages/opencode/test/tasks/validation.test.ts b/packages/opencode/test/tasks/validation.test.ts new file mode 100644 index 000000000000..9e27d4fddfe0 --- /dev/null +++ b/packages/opencode/test/tasks/validation.test.ts @@ -0,0 +1,214 @@ +import { describe, expect, test } from "bun:test" +import { Store } from "../../src/tasks/store" +import { Validation } from "../../src/tasks/validation" +import type { Task } from "../../src/tasks/types" +import { tmpdir } from "../fixture/fixture" +import { randomUUID } from "crypto" + +function getProjectId(): string { + return `test-validation-${randomUUID()}` +} + +describe("validation: validateGraph", () => { + function createTask( + id: string, + overrides: Partial = {}, + ): Task { + const now = new Date().toISOString() + return { + id, + title: `Task ${id}`, + description: `Description for ${id}`, + acceptance_criteria: `Criteria for ${id}`, + parent_issue: 1, + job_id: "job-1", + status: "open", + priority: 2, + task_type: "implementation", + labels: ["module:test", "file:test.ts"], + depends_on: [], + assignee: null, + assignee_pid: null, + worktree: null, + branch: null, + created_at: now, + updated_at: now, + close_reason: null, + comments: [], + pipeline: { + stage: "idle", + attempt: 0, + last_activity: null, + last_steering: null, + history: [], + adversarial_verdict: null, + }, + ...overrides, + } + } + + test("returns valid:true for a clean graph", async () => { + await using tmp = await tmpdir() + const projectId = getProjectId() + + await Store.createTask(projectId, createTask("task-1")) + await Store.createTask(projectId, createTask("task-2")) + + const result = await Validation.validateGraph(projectId) + expect(result.valid).toBe(true) + expect(result.errors).toHaveLength(0) + expect(result.warnings).toHaveLength(0) + }) + + test("catches circular dependencies A -> B -> A", async () => { + await using tmp = await tmpdir() + const projectId = getProjectId() + + await Store.createTask(projectId, createTask("task-a", { depends_on: ["task-b"] })) + await Store.createTask(projectId, createTask("task-b", { depends_on: ["task-a"] })) + + const result = await Validation.validateGraph(projectId) + expect(result.valid).toBe(false) + expect(result.errors.length).toBeGreaterThan(0) + const hasCycleError = result.errors.some((err) => err.includes("Circular dependency")) + expect(hasCycleError).toBe(true) + }) + + test("catches circular dependencies A -> B -> C -> A", async () => { + await using tmp = await tmpdir() + const projectId = getProjectId() + + await Store.createTask(projectId, createTask("task-a", { depends_on: ["task-b"] })) + await Store.createTask(projectId, createTask("task-b", { depends_on: ["task-c"] })) + await Store.createTask(projectId, createTask("task-c", { depends_on: ["task-a"] })) + + const result = await Validation.validateGraph(projectId) + expect(result.valid).toBe(false) + const hasCycleError = result.errors.some((err) => err.includes("Circular dependency")) + expect(hasCycleError).toBe(true) + }) + + test("catches missing task IDs in depends_on", async () => { + await using tmp = await tmpdir() + const projectId = getProjectId() + + await Store.createTask(projectId, createTask("task-1", { depends_on: ["non-existent-task"] })) + + const result = await Validation.validateGraph(projectId) + expect(result.valid).toBe(false) + expect(result.errors.length).toBeGreaterThan(0) + const hasMissingDepError = result.errors.some((err) => + err.includes("non-existent task") || err.includes("non-existent-task"), + ) + expect(hasMissingDepError).toBe(true) + }) + + test("warns on missing acceptance_criteria", async () => { + await using tmp = await tmpdir() + const projectId = getProjectId() + + await Store.createTask( + projectId, + createTask("task-no-criteria", { acceptance_criteria: "" }), + ) + + const result = await Validation.validateGraph(projectId) + expect(result.valid).toBe(true) + expect(result.warnings.length).toBeGreaterThan(0) + const hasCriteriaWarning = result.warnings.some((err) => + err.includes("missing acceptance criteria") || err.includes("missing-acceptance-criteria"), + ) + expect(hasCriteriaWarning).toBe(true) + }) + + test("warns on missing conflict labels", async () => { + await using tmp = await tmpdir() + const projectId = getProjectId() + + await Store.createTask(projectId, createTask("task-no-labels", { labels: [] })) + + const result = await Validation.validateGraph(projectId) + expect(result.valid).toBe(true) + expect(result.warnings.length).toBeGreaterThan(0) + const hasLabelWarning = result.warnings.some((err) => + err.includes("conflict labels") || err.includes("module:") || err.includes("file:"), + ) + expect(hasLabelWarning).toBe(true) + }) + + test("allows module: labels (no warning)", async () => { + await using tmp = await tmpdir() + const projectId = getProjectId() + + await Store.createTask( + projectId, + createTask("task-module-label", { labels: ["module:auth"] }), + ) + + const result = await Validation.validateGraph(projectId) + expect(result.valid).toBe(true) + const hasLabelWarning = result.warnings.some((err) => + err.includes("task-module-label") && err.includes("conflict labels"), + ) + expect(hasLabelWarning).toBe(false) + }) + + test("allows file: labels (no warning)", async () => { + await using tmp = await tmpdir() + const projectId = getProjectId() + + await Store.createTask( + projectId, + createTask("task-file-label", { labels: ["file:src/auth.ts"] }), + ) + + const result = await Validation.validateGraph(projectId) + expect(result.valid).toBe(true) + const hasLabelWarning = result.warnings.some((err) => + err.includes("task-file-label") && err.includes("conflict labels"), + ) + expect(hasLabelWarning).toBe(false) + }) + + test("collects multiple errors and warnings", async () => { + await using tmp = await tmpdir() + const projectId = getProjectId() + + await Store.createTask( + projectId, + createTask("task-1", { + depends_on: ["task-3"], + acceptance_criteria: "", + labels: [], + }), + ) + await Store.createTask( + projectId, + createTask("task-2", { + depends_on: ["task-3"], + acceptance_criteria: "", + }), + ) + + const result = await Validation.validateGraph(projectId) + expect(result.errors.length).toBeGreaterThan(0) + expect(result.warnings.length).toBeGreaterThan(0) + }) + + test("valid graph with valid dependencies passes", async () => { + await using tmp = await tmpdir() + const projectId = getProjectId() + + await Store.createTask(projectId, createTask("base", {})) + await Store.createTask( + projectId, + createTask("dependent", { depends_on: ["base"] }), + ) + + await Store.updateTask(projectId, "base", { status: "closed" }) + + const result = await Validation.validateGraph(projectId) + expect(result.valid).toBe(true) + expect(result.errors).toHaveLength(0) + }) +}) \ No newline at end of file diff --git a/packages/opencode/test/tool/cancel_task.test.ts b/packages/opencode/test/tool/cancel_task.test.ts index 3001eaa1b96f..2abb7b2d3059 100644 --- a/packages/opencode/test/tool/cancel_task.test.ts +++ b/packages/opencode/test/tool/cancel_task.test.ts @@ -132,6 +132,38 @@ describe("tool.cancel_task", () => { }) }) + test("cancels child session task when parent_session_id matches", async () => { + await using tmp = await tmpdir({ git: true }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const tool = await CancelTaskTool.init() + const taskId = "test-task-child-session" + + const promise = new Promise((resolve) => { + setTimeout(() => resolve("Should be cancelled"), 10000) + }) + + trackBackgroundTask(taskId, promise, undefined, { + agent_type: "test", + description: "Test task", + session_id: "child-session-id", + parent_session_id: "test", + start_time: Date.now(), + }) + + await new Promise((r) => setTimeout(r, 50)) + + const result = await tool.execute({ task_id: taskId }, ctx) + + const output = JSON.parse(result.output) + expect(output.status).toBe("cancelled") + expect(output.task_id).toBe(taskId) + expect(result.metadata.status).toBe("cancelled") + }, + }) + }) + test("validates input with Zod schema", async () => { await Instance.provide({ directory: "/tmp/test",