diff --git a/.cursor/rules/architecture.mdc b/.cursor/rules/architecture.mdc new file mode 100644 index 00000000..28daeac6 --- /dev/null +++ b/.cursor/rules/architecture.mdc @@ -0,0 +1,50 @@ +--- +description: Git Board Flow – entry points, flow, and key paths +alwaysApply: true +--- + +# Architecture & Key Paths + +## Entry and main flow + +1. **GitHub Action**: `src/actions/github_action.ts` reads inputs, builds `Execution`, calls `mainRun(execution)` from `common_action.ts`. +2. **CLI**: `src/actions/local_action.ts` same flow with CLI/config inputs. +3. **common_action.ts**: Sets up; calls `waitForPreviousRuns(execution)` (sequential workflow); then: + - **Single action** → `SingleActionUseCase` + - **Issue** → `IssueCommentUseCase` or `IssueUseCase` + - **Pull request** → `PullRequestReviewCommentUseCase` or `PullRequestUseCase` + - **Push** → `CommitUseCase` + +## Key paths + +| Area | Path | Purpose | +|------|------|--------| +| Action entry | `src/actions/github_action.ts` | Reads inputs, builds Execution | +| CLI entry | `src/cli.ts` → `local_action.ts` | Same flow, local inputs | +| Shared flow | `src/actions/common_action.ts` | mainRun, waitForPreviousRuns, dispatch to use cases | +| Use cases | `src/usecase/` | issue_use_case, pull_request_use_case, commit_use_case, single_action_use_case | +| Single actions | `src/usecase/actions/` | check_progress, detect_errors, recommend_steps, think, initial_setup, create_release, create_tag, publish_github_action, deployed_action | +| Steps (issue) | `src/usecase/steps/issue/` | check_permissions, close_not_allowed_issue, assign_members, update_title, update_issue_type, link_issue_project, check_priority_issue_size, prepare_branches, remove_issue_branches, remove_not_needed_branches, label_deploy_added, label_deployed_added, move_issue_to_in_progress | +| Steps (PR) | `src/usecase/steps/pull_request/` | update_title, assign_members (issue), assign_reviewers_to_issue, link_pr_project, link_pr_issue, sync_size_and_progress_from_issue, check_priority_pull_request_size, update_description (AI), close_issue_after_merging | +| Steps (commit) | `src/usecase/steps/commit/` | notify commit, check size | +| Steps (issue comment) | `src/usecase/steps/issue_comment/` | check_issue_comment_language (translation) | +| Steps (PR review comment) | `src/usecase/steps/pull_request_review_comment/` | check_pull_request_comment_language (translation) | +| Manager (content) | `src/manager/` | description handlers, configuration_handler, markdown_content_hotfix_handler (PR description, hotfix changelog content) | +| Models | `src/data/model/` | Execution, Issue, PullRequest, SingleAction, etc. | +| Repos | `src/data/repository/` | branch_repository, issue_repository, workflow_repository, ai_repository (OpenCode), file_repository, project_repository | +| Config | `src/utils/constants.ts` | INPUT_KEYS, ACTIONS, defaults | +| Metadata | `action.yml` | Action inputs and defaults | + +## Single actions (by name) + +- `check_progress_action`, `detect_errors_action`, `recommend_steps_action` (need `single-action-issue`) +- `think_action`, `initial_setup` (no issue) +- `create_release` (version, title, changelog), `create_tag` (version), `publish_github_action`, `deployed_action` (issue) + +## CLI-only (not single actions) + +- **Copilot**: `giik copilot -p "..."` uses OpenCode build agent via `AiRepository.copilotMessage` in `src/cli.ts`. No workflow single-action equivalent. + +## Concurrency (sequential runs) + +`common_action.ts` calls `waitForPreviousRuns(execution)` (from `src/utils/queue_utils.ts`): lists workflow runs, waits until no previous run of the **same workflow name** is in progress/queued, then continues. Implemented in `WorkflowRepository.getActivePreviousRuns`. diff --git a/.cursor/rules/code-conventions.mdc b/.cursor/rules/code-conventions.mdc new file mode 100644 index 00000000..dffface3 --- /dev/null +++ b/.cursor/rules/code-conventions.mdc @@ -0,0 +1,34 @@ +--- +description: Git Board Flow – coding conventions and where to change things +globs: src/**/*.ts +alwaysApply: false +--- + +# Code Conventions + +## Logging and constants + +- Use **logger**: `logInfo`, `logError`, `logDebugInfo` from `src/utils/logger`. No ad-hoc `console.log`. +- Use **constants**: `INPUT_KEYS` and `ACTIONS` from `src/utils/constants.ts` for input names and action names. No hardcoded strings for these. + +## Adding a new action input + +1. **`action.yml`**: Add the input with `description` and `default` (if any). +2. **`src/utils/constants.ts`**: Add the key to `INPUT_KEYS` (e.g. `NEW_INPUT: 'new-input'`). +3. **`src/actions/github_action.ts`**: Read the input (e.g. `core.getInput(INPUT_KEYS.NEW_INPUT)`) and pass it into the object used to build `Execution`. +4. **Optional**: If the CLI must support it, add to `local_action.ts` and the corresponding CLI option. + +## Where to change content/descriptions + +- **PR description** (template filling, AI content): `src/manager/description/` (configuration_handler, content interfaces). +- **Hotfix/release changelog** (markdown extraction, formatting): `src/manager/description/markdown_content_hotfix_handler.ts`. + +## Build and bundles + +- The project uses **`@vercel/ncc`** to bundle the action and CLI. Keep imports and dependencies compatible with ncc (no dynamic requires that ncc cannot see). +- **Do not** edit or rely on `build/`; it is generated. Run tests and lint only on `src/`. + +## Style and lint + +- Prefer TypeScript; avoid `any` (lint rule: no-explicit-any). +- Run `npm run lint` before committing; use `npm run lint:fix` when possible. diff --git a/.cursor/rules/project-context.mdc b/.cursor/rules/project-context.mdc index a63c7f2a..2f24fc75 100644 --- a/.cursor/rules/project-context.mdc +++ b/.cursor/rules/project-context.mdc @@ -1,20 +1,18 @@ --- -description: Git Board Flow project context, commands, and conventions +description: Git Board Flow – quick read, commands, and where to find more alwaysApply: true --- # Git Board Flow – Project Context -**Git Board Flow** is a GitHub Action (and local CLI) that automates branch management, GitHub project linking, and issue/PR tracking using Git-Flow. It uses **OpenCode** for all AI features (progress detection, error detection, PR descriptions, copilot). +## Quick read (for fast understanding) -## Tech stack +- **What it is**: GitHub Action + CLI that automates Git-Flow: creates branches from issue labels, links issues/PRs to projects, tracks commits; AI via OpenCode (progress, errors, PR descriptions). +- **Entry points**: GitHub Action → `src/actions/github_action.ts`; CLI → `src/cli.ts`. Shared logic in `src/actions/common_action.ts` (single actions vs issue/PR/push). +- **Do**: Use Node 20, run from repo root; edit only `src/`; use `INPUT_KEYS`/`ACTIONS` and `logInfo`/`logError`/`logDebugInfo`. When adding inputs: update `action.yml`, `constants.ts` (INPUT_KEYS), and `github_action.ts` (and optionally `local_action.ts`). +- **Don’t**: Edit or depend on `build/` (generated by `ncc`); run tests/lint on `build/`. -- **Runtime**: Node.js 20 (use `nvm use 20` before running commands). -- **Language**: TypeScript. -- **Build**: `@vercel/ncc` bundles the action and CLI into `build/`. -- **Tests**: Jest. - -## Commands (run from repo root) +## Commands (repo root) ```bash nvm use 20 @@ -27,27 +25,16 @@ npm run lint npm run lint:fix ``` -- **Build**: `npm run build` – compiles `src/actions/github_action.ts` → `build/github_action/`, `src/cli.ts` → `build/cli/`, and sets the CLI as executable. -- **Tests**: `npm test` – runs Jest (exclude e2e when relevant). Use `npm run test:watch` for watch mode, `npm run test:coverage` for coverage. -- **Lint**: `npm run lint` – ESLint + typescript-eslint on `src/` (recommended + unused-vars, no-explicit-any). Use `npm run lint:fix` to auto-fix. +- **Build**: `npm run build` → bundles `github_action.ts` and `cli.ts` into `build/`. +- **Tests**: Jest; `npm run test:watch` / `npm run test:coverage` as needed. +- **Lint**: ESLint + typescript-eslint on `src/`; `npm run lint:fix` to auto-fix. ## What to ignore -- **`build/`** – Compiled output (bundled JS, generated .d.ts). Do not edit or rely on it; treat it as generated. Do not run tests or lint against `build/`. -- **`.agent-sessions/`** – Session data; ignore in context unless debugging sessions. - -## Key paths - -- `src/actions/github_action.ts` – GitHub Action entry; reads inputs and runs the main flow. -- `src/actions/local_action.ts` – CLI entry; same logic with local/config inputs. -- `src/actions/common_action.ts` – Shared flow: single actions vs issue/PR/push pipelines. -- `src/usecase/` – Use cases (issue, pull request, commit, single actions). -- `src/data/model/` – Domain models (Execution, Ai, Issue, etc.). -- `src/data/repository/ai_repository.ts` – OpenCode API (ask, askAgent, copilotMessage); no separate agent layer. -- `action.yml` – Action metadata and input definitions. +- **`build/`** – Generated output; do not edit or run tests/lint against it. +- **`.agent-sessions/`** – Session data; ignore unless debugging. -## Conventions +## Other rules -- Prefer TypeScript; keep action and CLI buildable with `ncc`. -- Use existing logger (`logInfo`, `logError`, `logDebugInfo`) and constants (`INPUT_KEYS`, `ACTIONS`) instead of ad-hoc strings. -- When adding inputs, update `action.yml`, `INPUT_KEYS` in `src/utils/constants.ts`, and the corresponding read in `github_action.ts` (and optionally `local_action.ts`). +- **Architecture & paths**: see `architecture.mdc` (entry points, use cases, single actions, key files). +- **Code conventions**: see `code-conventions.mdc` (logger, constants, adding inputs, ncc). diff --git a/.github/workflows/gbf_issue.yml b/.github/workflows/gbf_issue.yml index 7d016b1a..25bc8088 100644 --- a/.github/workflows/gbf_issue.yml +++ b/.github/workflows/gbf_issue.yml @@ -16,7 +16,6 @@ jobs: with: ai-ignore-files: build/* debug: ${{ vars.DEBUG }} - hotfix-workflow: release_workflow.yml opencode-model: ${{ vars.OPENCODE_MODEL }} project-ids: 2 token: ${{ secrets.PAT }} diff --git a/.github/workflows/gbf_issue_comment.yml b/.github/workflows/gbf_issue_comment.yml index 51360ac8..88f22f7e 100644 --- a/.github/workflows/gbf_issue_comment.yml +++ b/.github/workflows/gbf_issue_comment.yml @@ -7,7 +7,7 @@ on: jobs: git-board-issues: name: Git Board - Issue Comment - runs-on: [self-hosted, macOS, X64] + runs-on: ubuntu-latest steps: - name: Checkout Repository uses: actions/checkout@v4 @@ -16,7 +16,6 @@ jobs: with: ai-ignore-files: build/* debug: ${{ vars.DEBUG }} - hotfix-workflow: release_workflow.yml opencode-model: ${{ vars.OPENCODE_MODEL }} project-ids: 2 token: ${{ secrets.PAT }} diff --git a/.github/workflows/gbf_pull_request.yml b/.github/workflows/gbf_pull_request.yml index 2672bd5e..35968cac 100644 --- a/.github/workflows/gbf_pull_request.yml +++ b/.github/workflows/gbf_pull_request.yml @@ -14,7 +14,6 @@ jobs: - uses: ./ with: - ai-pull-request-description: true ai-ignore-files: build/* debug: ${{ vars.DEBUG }} opencode-model: ${{ vars.OPENCODE_MODEL }} diff --git a/.github/workflows/gbf_pull_request_review_comment.yml b/.github/workflows/gbf_pull_request_review_comment.yml index 48f0a45e..08fa9e4f 100644 --- a/.github/workflows/gbf_pull_request_review_comment.yml +++ b/.github/workflows/gbf_pull_request_review_comment.yml @@ -7,7 +7,7 @@ on: jobs: git-board-pull-requests: name: Git Board - Pull Request Review Comment - runs-on: [self-hosted, macOS, X64] + runs-on: ubuntu-latest steps: - name: Checkout Repository uses: actions/checkout@v4 @@ -15,7 +15,6 @@ jobs: - uses: ./ with: ai-ignore-files: build/* - ai-pull-request-description: true debug: ${{ vars.DEBUG }} opencode-model: ${{ vars.OPENCODE_MODEL }} project-ids: 2 diff --git a/.github/workflows/hotfix_workflow.yml b/.github/workflows/hotfix_workflow.yml new file mode 100644 index 00000000..5a414176 --- /dev/null +++ b/.github/workflows/hotfix_workflow.yml @@ -0,0 +1,161 @@ +name: Task - Hotfix + +on: + workflow_dispatch: + inputs: + version: + description: 'Hotfix version' + required: true + default: '1.0.0' + title: + description: 'Title' + required: true + default: 'New Version' + changelog: + description: 'Changelog' + required: true + default: '- Several improvements' + issue: + description: 'Launcher issue' + required: true + default: '-1' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + prepare-version-files: + name: Prepare files for hotfix + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Validate inputs + env: + VERSION: ${{ github.event.inputs.version }} + ISSUE: ${{ github.event.inputs.issue }} + TITLE: ${{ github.event.inputs.title }} + CHANGELOG: ${{ github.event.inputs.changelog }} + run: | + err=0 + if ! [[ "$VERSION" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then + echo "::error::Version must be in semver format (e.g. 1.0.0)." + err=1 + fi + if ! [[ "$ISSUE" =~ ^-?[0-9]+$ ]]; then + echo "::error::Issue must be a number (e.g. 123 or -1)." + err=1 + fi + if [[ ${#TITLE} -gt 1000 ]]; then + echo "::error::Title must be at most 1000 characters." + err=1 + fi + if [[ ${#CHANGELOG} -gt 50000 ]]; then + echo "::error::Changelog must be at most 50000 characters." + err=1 + fi + [[ $err -eq 0 ]] || exit 1 + + - name: Update version + uses: actions/github-script@v7 + with: + script: | + const fs = require('fs'); + const packageJson = JSON.parse(fs.readFileSync('./package.json', 'utf8')); + packageJson.version = '${{ github.event.inputs.version }}'; + fs.writeFileSync('./package.json', JSON.stringify(packageJson, null, 2)); + + - name: Commit updated package.json and dist directory + uses: EndBug/add-and-commit@v9 + with: + add: './build/ ./package.json' + committer_name: GitHub Actions + committer_email: actions@github.com + default_author: user_info + message: 'gh-action: updated compiled files and bumped version to ${{ github.event.inputs.version }} (hotfix)' + + prepare-compiled-files: + name: Update compiled files + runs-on: ubuntu-latest + needs: prepare-version-files + steps: + - uses: actions/checkout@v4 + + - name: Pull latest changes + run: | + git config --global user.email "efraespada@gmail.com" + git config --global user.name "Efra Espada" + git pull --no-ff --no-edit + + - name: Set up Node.js 20 + uses: actions/setup-node@v4 + with: + node-version: '20.x' + + - name: Install Dependencies + run: npm install + + - name: Build Files + run: npm run build + + - name: Force add build directory + run: git add -f ./build/ + + - name: Commit updated dist directory + uses: EndBug/add-and-commit@v9 + with: + committer_name: GitHub Actions + committer_email: actions@github.com + default_author: user_info + message: 'gh-action: updated compiled files' + + tag: + name: Publish version + runs-on: ubuntu-latest + needs: [ prepare-compiled-files ] + steps: + - name: Checkout Repository + uses: actions/checkout@v4 + + - name: Git Board Flow - Create Tag + uses: ./ + if: ${{ success() }} + with: + debug: ${{ vars.DEBUG }} + single-action: 'create_tag' + single-action-issue: '${{ github.event.inputs.issue }}' + single-action-version: '${{ github.event.inputs.version }}' + token: ${{ secrets.PAT }} + + - name: Git Board Flow - Create Release + uses: ./ + if: ${{ success() }} + with: + debug: ${{ vars.DEBUG }} + single-action: 'create_release' + single-action-issue: '${{ github.event.inputs.issue }}' + single-action-version: '${{ github.event.inputs.version }}' + single-action-title: '${{ github.event.inputs.title }}' + single-action-changelog: '${{ github.event.inputs.changelog }}' + token: ${{ secrets.PAT }} + + - name: Git Board Flow - Publish Github Action Version + uses: ./ + if: ${{ success() }} + with: + debug: ${{ vars.DEBUG }} + single-action: 'publish_github_action' + single-action-issue: '${{ github.event.inputs.issue }}' + single-action-version: '${{ github.event.inputs.version }}' + token: ${{ secrets.PAT }} + + - name: Git Board Flow - Deploy success notification + uses: ./ + if: ${{ success() }} + with: + debug: ${{ vars.DEBUG }} + single-action: 'deployed_action' + single-action-issue: '${{ github.event.inputs.issue }}' + opencode-model: ${{ vars.OPENCODE_MODEL }} + token: ${{ secrets.PAT }} diff --git a/.github/workflows/prepare_files.yml b/.github/workflows/prepare_files.yml index 8f00bde1..505f118d 100644 --- a/.github/workflows/prepare_files.yml +++ b/.github/workflows/prepare_files.yml @@ -2,11 +2,10 @@ name: Prepare Files on: push: - branches: - - '**' - - '!release/*' - - '!hotfix/*' - - '!master' + branches-ignore: + - master + - 'release/*' + - 'hotfix/*' concurrency: group: ${{ github.workflow }}-${{ github.ref }} @@ -15,31 +14,35 @@ concurrency: jobs: prepare-files: name: Update compiled files - runs-on: [self-hosted, macOS, X64] + runs-on: ubuntu-latest + permissions: + contents: write steps: - uses: actions/checkout@v4 - - name: Pull latest changes - run: git pull --no-ff --no-edit - - name: Set up Node.js 20 uses: actions/setup-node@v4 with: node-version: '20.x' - - name: Install Dependencies - run: npm install + - name: Install dependencies + run: npm ci - - name: Build Files + - name: Build run: npm run build - - name: Force add build directory - run: git add -f ./build/ + - name: Run tests + run: npm test + + - name: Lint + run: npm run lint - - name: Commit updated dist directory + - name: Commit updated build directory uses: EndBug/add-and-commit@v9 with: + add: './build/ --force' committer_name: GitHub Actions committer_email: actions@github.com default_author: user_info message: 'gh-action: updated compiled files' + push: true diff --git a/.github/workflows/release_workflow.yml b/.github/workflows/release_workflow.yml index 94e23618..f6a02239 100644 --- a/.github/workflows/release_workflow.yml +++ b/.github/workflows/release_workflow.yml @@ -31,6 +31,32 @@ jobs: steps: - uses: actions/checkout@v4 + - name: Validate inputs + env: + VERSION: ${{ github.event.inputs.version }} + ISSUE: ${{ github.event.inputs.issue }} + TITLE: ${{ github.event.inputs.title }} + CHANGELOG: ${{ github.event.inputs.changelog }} + run: | + err=0 + if ! [[ "$VERSION" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then + echo "::error::Version must be in semver format (e.g. 1.0.0)." + err=1 + fi + if ! [[ "$ISSUE" =~ ^-?[0-9]+$ ]]; then + echo "::error::Issue must be a number (e.g. 123 or -1)." + err=1 + fi + if [[ ${#TITLE} -gt 1000 ]]; then + echo "::error::Title must be at most 1000 characters." + err=1 + fi + if [[ ${#CHANGELOG} -gt 50000 ]]; then + echo "::error::Changelog must be at most 50000 characters." + err=1 + fi + [[ $err -eq 0 ]] || exit 1 + - name: Update version uses: actions/github-script@v7 with: @@ -132,5 +158,4 @@ jobs: single-action: 'deployed_action' single-action-issue: '${{ github.event.inputs.issue }}' opencode-model: ${{ vars.OPENCODE_MODEL }} - opencode-start-server: true token: ${{ secrets.PAT }} diff --git a/.github/workflows/validate_agent_workflows.yml b/.github/workflows/validate_agent_workflows.yml deleted file mode 100644 index cbf4c802..00000000 --- a/.github/workflows/validate_agent_workflows.yml +++ /dev/null @@ -1,65 +0,0 @@ -# Optional: smoke validation for agent workflows in GitHub Actions context. -# - On push to test/agent-smoke: runs the action with OpenCode (start-server or URL). -# The run may skip with "Issue number not found" if the branch is not linked to an issue; that is OK. -# - On workflow_dispatch: runs build + tests only (action is not run with a real event). -# See docs/integration-testing-agent-workflows.md for full manual validation checklist. - -name: Validate agent workflows - -on: - push: - branches: - - 'test/agent-smoke' - - 'test/agent-validation' - workflow_dispatch: - -jobs: - smoke-action: - name: Smoke – run action (push) - runs-on: ubuntu-latest - if: github.event_name == 'push' - steps: - - name: Checkout Repository - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Run Git Board Flow (smoke) - uses: ./ - env: - OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} - with: - debug: true - opencode-start-server: true - opencode-model: ${{ vars.OPENCODE_MODEL }} - project-ids: ${{ vars.PROJECT_IDS }} - token: ${{ secrets.PAT }} - # Expectation: job succeeds. The action may log "Issue number not found. Skipping." - # if the branch name does not match an issue (e.g. test/agent-smoke). That is acceptable. - - build-and-test: - name: Build and test - runs-on: ubuntu-latest - if: github.event_name == 'workflow_dispatch' || github.event_name == 'push' - steps: - - name: Checkout Repository - uses: actions/checkout@v4 - - - name: Setup Node.js - uses: actions/setup-node@v4 - with: - node-version: '20' - cache: 'npm' - - - name: Install dependencies - run: npm ci - - - name: Build - run: npm run build - - - name: Run tests - run: npm test - - - name: Lint - run: npm run lint diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e224129c..2ebd5a2f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -35,6 +35,7 @@ npm run build - `local_action.ts` – CLI entry; same logic with local/config inputs. - `common_action.ts` – Shared flow: single actions vs issue/PR/push pipelines. - **`src/usecase/`** – Use cases (issue, pull request, commit, single actions). +- **`src/manager/`** – Content handlers for PR descriptions, hotfix changelog, and markdown (e.g. `configuration_handler`, `markdown_content_hotfix_handler`). - **`src/data/model/`** – Domain models (Execution, Ai, Issue, etc.). - **`src/data/repository/`** – Repositories (GitHub API, OpenCode API). - **`src/utils/`** – Constants, logger, content utils, etc. @@ -61,7 +62,8 @@ npm run build - Update the relevant docs in `docs/` when changing behavior or adding features. - For user-facing changes, update `README.md` and the docs at [docs.page/landamessenger/git-board-flow](https://docs.page/landamessenger/git-board-flow). -- The project uses [Mintlify](https://mintlify.com/) for docs; see `docs.json` for sidebar structure. +- The project uses [Mintlify](https://mintlify.com/) (docs.page) for docs; see `docs.json` for sidebar structure. +- Use Mintlify components for a consistent, readable UI: **Card**, **Card** inside **Columns**, **Tabs**, **Accordion** / **AccordionGroup**, **Steps**, **Info** / **Warning** / **Tip** / **Note**. See [Mintlify Components](https://mintlify.com/docs/components). ## Submitting Changes diff --git a/README.md b/README.md index ee0dc4e8..b832b6f2 100644 --- a/README.md +++ b/README.md @@ -11,6 +11,7 @@ All AI features use **OpenCode** (75+ LLM providers: OpenAI, Anthropic, Gemini, local models, etc.): - **Progress detection** — On every push, analyzes branch vs issue and updates the progress label on the issue and on any open PRs for that branch. You can also run it on demand via single action or CLI (`check-progress`). +- **Bugbot (potential problems)** — On every push (or on demand via single action / CLI `detect-potential-problems`), OpenCode analyzes the branch vs base and reports findings as **comments on the issue** and **review comments on open PRs**; it updates or marks them as resolved when findings are fixed. - **Think / reasoning** — Deep code analysis and change proposals (`think_action`). - **AI PR description** — Generates or updates pull request descriptions by filling your `.github/pull_request_template.md` from the issue and branch diff. @@ -27,6 +28,7 @@ See the [OpenCode (AI)](https://docs.page/landamessenger/git-board-flow/opencode - **Pull request linking** — Links PRs to issues, adds them to projects, assigns reviewers, and can generate PR descriptions with AI. - **GitHub Project integration** — Links issues and PRs to the configured projects (`project-ids`) and moves them to the right columns. - **Single actions** — Run on-demand actions: check progress, think, create release/tag, deployed marking, and more. +- **Workflow concurrency** — The action waits for previous runs of the **same workflow name** to finish, so you can run workflows sequentially (no cancel). See [Features → Workflow concurrency](https://docs.page/landamessenger/git-board-flow/features#workflow-concurrency-and-sequential-execution). --- @@ -40,11 +42,7 @@ name: Git Board Flow - Issue on: issues: types: [opened, edited, labeled, unlabeled] - -concurrency: - group: ${{ github.workflow }}-${{ github.event.issue.number || github.ref }} - cancel-in-progress: true - + jobs: git-board-flow-issues: name: Git Board Flow - Issue @@ -86,7 +84,7 @@ jobs: ### Commit (push) workflow -This workflow runs on every push. It notifies the issue of new commits, updates **size** and **progress** labels on the issue and on any open PRs for that branch (progress requires OpenCode). No separate "check progress" workflow is needed. +This workflow runs on every push. It notifies the issue of new commits, updates **size** and **progress** labels on the issue and on any open PRs for that branch (progress requires OpenCode), and can run **Bugbot** to report potential problems as issue and PR comments (OpenCode). No separate "check progress" workflow is needed. ```yaml name: Git Board Flow - Commit @@ -149,14 +147,14 @@ giik [options] |--------|-------------|--------| | `setup` | Initial setup: labels, issue types, verify access | `node build/cli/index.js setup -t ` | | `check-progress` | Run progress check on demand (progress is also updated automatically on every push) | `node build/cli/index.js check-progress -i 123 -t ` | -| `detect-errors` | Detect potential errors in the branch vs base (OpenCode Plan) | `node build/cli/index.js detect-errors -i 123 -t ` | +| `detect-potential-problems` | Bugbot: detect potential problems, report on issue and PR (OpenCode) | `node build/cli/index.js detect-potential-problems -i 123 -t ` | | `recommend-steps` | Recommend implementation steps for an issue (OpenCode Plan) | `node build/cli/index.js recommend-steps -i 123 -t ` | | `think` | Deep code analysis / reasoning (needs a question) | `node build/cli/index.js think -q "Where is auth validated?" -t ` | | `copilot` | AI development assistant (analyze/modify code) | `node build/cli/index.js copilot -p "Explain src/cli.ts" -t ` | Add `-d` or `--debug` for verbose logs. For OpenCode, use `--opencode-server-url` and `--opencode-model` if you don’t set env vars. -For a step-by-step guide to testing the OpenCode Plan flows (check-progress, detect-errors, recommend-steps) locally, see [Testing OpenCode Plan Locally](https://docs.page/landamessenger/git-board-flow/testing-opencode-plan-locally). +For a step-by-step guide to testing the OpenCode Plan flows (check-progress, detect-potential-problems, recommend-steps) locally, see [Testing OpenCode Plan Locally](https://docs.page/landamessenger/git-board-flow/testing-opencode-plan-locally). ### 4. Optional: `.env` in repo root diff --git a/action.yml b/action.yml index b5ae6944..84461b76 100644 --- a/action.yml +++ b/action.yml @@ -83,6 +83,9 @@ inputs: priority-low-label: description: "Label to indicate a priority low." default: "priority: low" + priority-none-label: + description: "Label to indicate no priority." + default: "priority: none" size-xxl-label: description: "Label to indicate a task of size XXL." default: "size: XXL" @@ -394,17 +397,26 @@ inputs: description: "OpenCode model in provider/model format (default opencode/kimi-k2.5-free; e.g. openai/gpt-4o-mini)." default: "opencode/kimi-k2.5-free" opencode-start-server: - description: "If true, the action starts an OpenCode server at the beginning and stops it when the job ends. No need to install or run OpenCode yourself. Requires OPENAI_API_KEY or ANTHROPIC_API_KEY (or other provider keys) as secrets." + description: "If true, the action starts an OpenCode server at the beginning and stops it when the job ends. No need to install or run OpenCode yourself. Requires provider API keys as secrets (e.g. OPENAI_API_KEY, ANTHROPIC_API_KEY, OPENROUTER_API_KEY) passed via env." default: "true" ai-pull-request-description: description: "Enable AI-powered automatic updates for pull request descriptions." - default: "false" + default: "true" ai-ignore-files: description: "Comma-separated list of files to ignore for AI operations." default: "" ai-members-only: description: "Restrict AI features to only organization/project members." default: "false" + ai-include-reasoning: + description: "Include reasoning or chain-of-thought in AI responses when supported by the model." + default: "true" + bugbot-severity: + description: "Minimum severity for potential-problem detections to be published (info, low, medium, high). Findings below this are not posted as comments." + default: "low" + bugbot-comment-limit: + description: "Maximum number of potential-problem findings to publish as individual comments on the issue and PR. Extra findings are summarized in a single overflow comment." + default: "20" runs: using: "node20" main: "build/github_action/index.js" diff --git a/build/cli/index.js b/build/cli/index.js index 97e5756e..be3473d4 100755 --- a/build/cli/index.js +++ b/build/cli/index.js @@ -46572,6 +46572,12 @@ additionalParams) { .split(',') .map(path => path.trim()) .filter(path => path.length > 0); + const bugbotSeverity = (additionalParams[constants_1.INPUT_KEYS.BUGBOT_SEVERITY] ?? actionInputs[constants_1.INPUT_KEYS.BUGBOT_SEVERITY]) || constants_1.BUGBOT_MIN_SEVERITY; + const bugbotCommentLimitRaw = additionalParams[constants_1.INPUT_KEYS.BUGBOT_COMMENT_LIMIT] ?? actionInputs[constants_1.INPUT_KEYS.BUGBOT_COMMENT_LIMIT]; + const bugbotCommentLimitNum = typeof bugbotCommentLimitRaw === 'number' ? bugbotCommentLimitRaw : parseInt(String(bugbotCommentLimitRaw ?? ''), 10); + const bugbotCommentLimit = Number.isNaN(bugbotCommentLimitNum) || bugbotCommentLimitNum < 1 + ? constants_1.BUGBOT_MAX_COMMENTS + : Math.min(bugbotCommentLimitNum, 200); /** * Projects Details */ @@ -46887,7 +46893,7 @@ additionalParams) { const pullRequestDesiredAssigneesCount = parseInt(additionalParams[constants_1.INPUT_KEYS.PULL_REQUEST_DESIRED_ASSIGNEES_COUNT] ?? actionInputs[constants_1.INPUT_KEYS.PULL_REQUEST_DESIRED_ASSIGNEES_COUNT]) ?? 0; const pullRequestDesiredReviewersCount = parseInt(additionalParams[constants_1.INPUT_KEYS.PULL_REQUEST_DESIRED_REVIEWERS_COUNT] ?? actionInputs[constants_1.INPUT_KEYS.PULL_REQUEST_DESIRED_REVIEWERS_COUNT]) ?? 0; const pullRequestMergeTimeout = parseInt(additionalParams[constants_1.INPUT_KEYS.PULL_REQUEST_MERGE_TIMEOUT] ?? actionInputs[constants_1.INPUT_KEYS.PULL_REQUEST_MERGE_TIMEOUT]) ?? 0; - const execution = new execution_1.Execution(debug, new single_action_1.SingleAction(singleAction, singleActionIssue, singleActionVersion, singleActionTitle, singleActionChangelog), commitPrefixBuilder, new issue_1.Issue(branchManagementAlways, reopenIssueOnPush, issueDesiredAssigneesCount, additionalParams), new pull_request_1.PullRequest(pullRequestDesiredAssigneesCount, pullRequestDesiredReviewersCount, pullRequestMergeTimeout, additionalParams), new emoji_1.Emoji(titleEmoji, branchManagementEmoji), new images_1.Images(imagesOnIssue, imagesOnPullRequest, imagesOnCommit, imagesIssueAutomatic, imagesIssueFeature, imagesIssueBugfix, imagesIssueDocs, imagesIssueChore, imagesIssueRelease, imagesIssueHotfix, imagesPullRequestAutomatic, imagesPullRequestFeature, imagesPullRequestBugfix, imagesPullRequestRelease, imagesPullRequestHotfix, imagesPullRequestDocs, imagesPullRequestChore, imagesCommitAutomatic, imagesCommitFeature, imagesCommitBugfix, imagesCommitRelease, imagesCommitHotfix, imagesCommitDocs, imagesCommitChore), new tokens_1.Tokens(token), new ai_1.Ai(opencodeServerUrl, opencodeModel, aiPullRequestDescription, aiMembersOnly, aiIgnoreFiles, aiIncludeReasoning), new labels_1.Labels(branchManagementLauncherLabel, bugLabel, bugfixLabel, hotfixLabel, enhancementLabel, featureLabel, releaseLabel, questionLabel, helpLabel, deployLabel, deployedLabel, docsLabel, documentationLabel, choreLabel, maintenanceLabel, priorityHighLabel, priorityMediumLabel, priorityLowLabel, priorityNoneLabel, sizeXxlLabel, sizeXlLabel, sizeLLabel, sizeMLabel, sizeSLabel, sizeXsLabel), new issue_types_1.IssueTypes(issueTypeTask, issueTypeTaskDescription, issueTypeTaskColor, issueTypeBug, issueTypeBugDescription, issueTypeBugColor, issueTypeFeature, issueTypeFeatureDescription, issueTypeFeatureColor, issueTypeDocumentation, issueTypeDocumentationDescription, issueTypeDocumentationColor, issueTypeMaintenance, issueTypeMaintenanceDescription, issueTypeMaintenanceColor, issueTypeHotfix, issueTypeHotfixDescription, issueTypeHotfixColor, issueTypeRelease, issueTypeReleaseDescription, issueTypeReleaseColor, issueTypeQuestion, issueTypeQuestionDescription, issueTypeQuestionColor, issueTypeHelp, issueTypeHelpDescription, issueTypeHelpColor), new locale_1.Locale(issueLocale, pullRequestLocale), new size_thresholds_1.SizeThresholds(new size_threshold_1.SizeThreshold(sizeXxlThresholdLines, sizeXxlThresholdFiles, sizeXxlThresholdCommits), new size_threshold_1.SizeThreshold(sizeXlThresholdLines, sizeXlThresholdFiles, sizeXlThresholdCommits), new size_threshold_1.SizeThreshold(sizeLThresholdLines, sizeLThresholdFiles, sizeLThresholdCommits), new size_threshold_1.SizeThreshold(sizeMThresholdLines, sizeMThresholdFiles, sizeMThresholdCommits), new size_threshold_1.SizeThreshold(sizeSThresholdLines, sizeSThresholdFiles, sizeSThresholdCommits), new size_threshold_1.SizeThreshold(sizeXsThresholdLines, sizeXsThresholdFiles, sizeXsThresholdCommits)), new branches_1.Branches(mainBranch, developmentBranch, featureTree, bugfixTree, hotfixTree, releaseTree, docsTree, choreTree), new release_1.Release(), new hotfix_1.Hotfix(), new workflows_1.Workflows(releaseWorkflow, hotfixWorkflow), new projects_1.Projects(projects, projectColumnIssueCreated, projectColumnPullRequestCreated, projectColumnIssueInProgress, projectColumnPullRequestInProgress), new welcome_1.Welcome(welcomeTitle, welcomeMessages), additionalParams); + const execution = new execution_1.Execution(debug, new single_action_1.SingleAction(singleAction, singleActionIssue, singleActionVersion, singleActionTitle, singleActionChangelog), commitPrefixBuilder, new issue_1.Issue(branchManagementAlways, reopenIssueOnPush, issueDesiredAssigneesCount, additionalParams), new pull_request_1.PullRequest(pullRequestDesiredAssigneesCount, pullRequestDesiredReviewersCount, pullRequestMergeTimeout, additionalParams), new emoji_1.Emoji(titleEmoji, branchManagementEmoji), new images_1.Images(imagesOnIssue, imagesOnPullRequest, imagesOnCommit, imagesIssueAutomatic, imagesIssueFeature, imagesIssueBugfix, imagesIssueDocs, imagesIssueChore, imagesIssueRelease, imagesIssueHotfix, imagesPullRequestAutomatic, imagesPullRequestFeature, imagesPullRequestBugfix, imagesPullRequestRelease, imagesPullRequestHotfix, imagesPullRequestDocs, imagesPullRequestChore, imagesCommitAutomatic, imagesCommitFeature, imagesCommitBugfix, imagesCommitRelease, imagesCommitHotfix, imagesCommitDocs, imagesCommitChore), new tokens_1.Tokens(token), new ai_1.Ai(opencodeServerUrl, opencodeModel, aiPullRequestDescription, aiMembersOnly, aiIgnoreFiles, aiIncludeReasoning, bugbotSeverity, bugbotCommentLimit), new labels_1.Labels(branchManagementLauncherLabel, bugLabel, bugfixLabel, hotfixLabel, enhancementLabel, featureLabel, releaseLabel, questionLabel, helpLabel, deployLabel, deployedLabel, docsLabel, documentationLabel, choreLabel, maintenanceLabel, priorityHighLabel, priorityMediumLabel, priorityLowLabel, priorityNoneLabel, sizeXxlLabel, sizeXlLabel, sizeLLabel, sizeMLabel, sizeSLabel, sizeXsLabel), new issue_types_1.IssueTypes(issueTypeTask, issueTypeTaskDescription, issueTypeTaskColor, issueTypeBug, issueTypeBugDescription, issueTypeBugColor, issueTypeFeature, issueTypeFeatureDescription, issueTypeFeatureColor, issueTypeDocumentation, issueTypeDocumentationDescription, issueTypeDocumentationColor, issueTypeMaintenance, issueTypeMaintenanceDescription, issueTypeMaintenanceColor, issueTypeHotfix, issueTypeHotfixDescription, issueTypeHotfixColor, issueTypeRelease, issueTypeReleaseDescription, issueTypeReleaseColor, issueTypeQuestion, issueTypeQuestionDescription, issueTypeQuestionColor, issueTypeHelp, issueTypeHelpDescription, issueTypeHelpColor), new locale_1.Locale(issueLocale, pullRequestLocale), new size_thresholds_1.SizeThresholds(new size_threshold_1.SizeThreshold(sizeXxlThresholdLines, sizeXxlThresholdFiles, sizeXxlThresholdCommits), new size_threshold_1.SizeThreshold(sizeXlThresholdLines, sizeXlThresholdFiles, sizeXlThresholdCommits), new size_threshold_1.SizeThreshold(sizeLThresholdLines, sizeLThresholdFiles, sizeLThresholdCommits), new size_threshold_1.SizeThreshold(sizeMThresholdLines, sizeMThresholdFiles, sizeMThresholdCommits), new size_threshold_1.SizeThreshold(sizeSThresholdLines, sizeSThresholdFiles, sizeSThresholdCommits), new size_threshold_1.SizeThreshold(sizeXsThresholdLines, sizeXsThresholdFiles, sizeXsThresholdCommits)), new branches_1.Branches(mainBranch, developmentBranch, featureTree, bugfixTree, hotfixTree, releaseTree, docsTree, choreTree), new release_1.Release(), new hotfix_1.Hotfix(), new workflows_1.Workflows(releaseWorkflow, hotfixWorkflow), new projects_1.Projects(projects, projectColumnIssueCreated, projectColumnPullRequestCreated, projectColumnIssueInProgress, projectColumnPullRequestInProgress), new welcome_1.Welcome(welcomeTitle, welcomeMessages), additionalParams); const results = await (0, common_action_1.mainRun)(execution); let content = ''; const stepsContent = results @@ -46989,6 +46995,15 @@ function getGitInfo() { return { error: constants_1.ERRORS.GIT_REPOSITORY_NOT_FOUND }; } } +/** Get current git branch (for CLI commands that need a branch when -b is omitted). */ +function getCurrentBranch() { + try { + return (0, child_process_1.execSync)('git rev-parse --abbrev-ref HEAD').toString().trim() || 'main'; + } + catch { + return 'main'; + } +} /** * Run the thinking AI scenario for deep code analysis and proposals. */ @@ -47120,7 +47135,7 @@ program return; } try { - const ai = new ai_1.Ai(serverUrl, model, false, false, [], false); + const ai = new ai_1.Ai(serverUrl, model, false, false, [], false, 'low', 20); const aiRepository = new ai_repository_1.AiRepository(); const result = await aiRepository.copilotMessage(ai, prompt); if (!result) { @@ -47235,11 +47250,11 @@ program } }); /** - * Detect potential errors in the branch for an issue (vs base branch). + * Recommend implementation steps for an issue based on its description. */ program - .command('detect-errors') - .description(`${constants_1.TITLE} - Detect potential errors in the branch (vs base) using OpenCode Plan agent`) + .command('recommend-steps') + .description(`${constants_1.TITLE} - Recommend steps to implement an issue (OpenCode Plan agent)`) .option('-i, --issue ', 'Issue number (required)', '') .option('-d, --debug', 'Debug mode', false) .option('-t, --token ', 'Personal access token', process.env.PERSONAL_ACCESS_TOKEN) @@ -47260,7 +47275,7 @@ program // eslint-disable-next-line @typescript-eslint/no-explicit-any -- CLI options map to action inputs const params = { [constants_1.INPUT_KEYS.DEBUG]: options.debug?.toString() ?? 'false', - [constants_1.INPUT_KEYS.SINGLE_ACTION]: constants_1.ACTIONS.DETECT_ERRORS, + [constants_1.INPUT_KEYS.SINGLE_ACTION]: constants_1.ACTIONS.RECOMMEND_STEPS, [constants_1.INPUT_KEYS.SINGLE_ACTION_ISSUE]: parseInt(issueNumber), [constants_1.INPUT_KEYS.TOKEN]: options.token || process.env.PERSONAL_ACCESS_TOKEN, [constants_1.INPUT_KEYS.OPENCODE_SERVER_URL]: options.opencodeServerUrl || process.env.OPENCODE_SERVER_URL || 'http://127.0.0.1:4096', @@ -47268,17 +47283,19 @@ program repo: { owner: gitInfo.owner, repo: gitInfo.repo }, issue: { number: parseInt(issueNumber) }, }; - params[constants_1.INPUT_KEYS.WELCOME_TITLE] = '🔍 Error detection'; - params[constants_1.INPUT_KEYS.WELCOME_MESSAGES] = [`Detecting errors for issue #${issueNumber} in ${gitInfo.owner}/${gitInfo.repo}...`]; + params[constants_1.INPUT_KEYS.WELCOME_TITLE] = '📋 Recommend steps'; + params[constants_1.INPUT_KEYS.WELCOME_MESSAGES] = [`Recommending steps for issue #${issueNumber} in ${gitInfo.owner}/${gitInfo.repo}...`]; await (0, local_action_1.runLocalAction)(params); }); /** - * Recommend implementation steps for an issue based on its description. + * Detect potential problems (bugbot): OpenCode analyzes branch vs base, reports findings + * as comments on the issue and open PR. Previously reported findings can be marked resolved. */ program - .command('recommend-steps') - .description(`${constants_1.TITLE} - Recommend steps to implement an issue (OpenCode Plan agent)`) + .command('detect-potential-problems') + .description(`${constants_1.TITLE} - Detect potential problems in the branch (bugbot): report as comments on issue and PR`) .option('-i, --issue ', 'Issue number (required)', '') + .option('-b, --branch ', 'Branch name (optional, defaults to current git branch)', '') .option('-d, --debug', 'Debug mode', false) .option('-t, --token ', 'Personal access token', process.env.PERSONAL_ACCESS_TOKEN) .option('--opencode-server-url ', 'OpenCode server URL', process.env.OPENCODE_SERVER_URL || 'http://127.0.0.1:4096') @@ -47295,20 +47312,35 @@ program console.log('❌ Provide a valid issue number with -i or --issue'); return; } + const branch = (cleanArg(options.branch) || getCurrentBranch()).trim() || 'main'; // eslint-disable-next-line @typescript-eslint/no-explicit-any -- CLI options map to action inputs const params = { [constants_1.INPUT_KEYS.DEBUG]: options.debug?.toString() ?? 'false', - [constants_1.INPUT_KEYS.SINGLE_ACTION]: constants_1.ACTIONS.RECOMMEND_STEPS, + [constants_1.INPUT_KEYS.SINGLE_ACTION]: constants_1.ACTIONS.DETECT_POTENTIAL_PROBLEMS, [constants_1.INPUT_KEYS.SINGLE_ACTION_ISSUE]: parseInt(issueNumber), [constants_1.INPUT_KEYS.TOKEN]: options.token || process.env.PERSONAL_ACCESS_TOKEN, [constants_1.INPUT_KEYS.OPENCODE_SERVER_URL]: options.opencodeServerUrl || process.env.OPENCODE_SERVER_URL || 'http://127.0.0.1:4096', [constants_1.INPUT_KEYS.OPENCODE_MODEL]: options.opencodeModel || process.env.OPENCODE_MODEL || constants_1.OPENCODE_DEFAULT_MODEL, repo: { owner: gitInfo.owner, repo: gitInfo.repo }, issue: { number: parseInt(issueNumber) }, + commits: { ref: `refs/heads/${branch}` }, }; - params[constants_1.INPUT_KEYS.WELCOME_TITLE] = '📋 Recommend steps'; - params[constants_1.INPUT_KEYS.WELCOME_MESSAGES] = [`Recommending steps for issue #${issueNumber} in ${gitInfo.owner}/${gitInfo.repo}...`]; - await (0, local_action_1.runLocalAction)(params); + params[constants_1.INPUT_KEYS.WELCOME_TITLE] = '🐛 Detect potential problems (bugbot)'; + params[constants_1.INPUT_KEYS.WELCOME_MESSAGES] = [ + `Detecting potential problems for issue #${issueNumber} on branch ${branch} in ${gitInfo.owner}/${gitInfo.repo}...`, + ]; + try { + await (0, local_action_1.runLocalAction)(params); + process.exit(0); + } + catch (err) { + const error = err instanceof Error ? err : new Error(String(err)); + console.error('❌ Error running detect-potential-problems:', error.message); + if (options.debug) { + console.error(err); + } + process.exit(1); + } }); /** * Run the initial setup to configure labels, issue types, and verify access. @@ -47363,13 +47395,15 @@ const constants_1 = __nccwpck_require__(8593); * API keys are configured on the OpenCode server, not here. */ class Ai { - constructor(opencodeServerUrl, opencodeModel, aiPullRequestDescription, aiMembersOnly, aiIgnoreFiles, aiIncludeReasoning) { + constructor(opencodeServerUrl, opencodeModel, aiPullRequestDescription, aiMembersOnly, aiIgnoreFiles, aiIncludeReasoning, bugbotMinSeverity, bugbotCommentLimit) { this.opencodeServerUrl = opencodeServerUrl; this.opencodeModel = opencodeModel; this.aiPullRequestDescription = aiPullRequestDescription; this.aiMembersOnly = aiMembersOnly; this.aiIgnoreFiles = aiIgnoreFiles; this.aiIncludeReasoning = aiIncludeReasoning; + this.bugbotMinSeverity = bugbotMinSeverity; + this.bugbotCommentLimit = bugbotCommentLimit; } getOpencodeServerUrl() { return this.opencodeServerUrl; @@ -47389,6 +47423,12 @@ class Ai { getAiIncludeReasoning() { return this.aiIncludeReasoning; } + getBugbotMinSeverity() { + return this.bugbotMinSeverity; + } + getBugbotCommentLimit() { + return this.bugbotCommentLimit; + } /** * Parse "provider/model-id" into { providerID, modelID } for OpenCode session.prompt. * Uses OPENCODE_DEFAULT_MODEL when no model is set (e.g. opencode/kimi-k2.5-free). @@ -47838,6 +47878,9 @@ class Execution { else { this.currentConfiguration.parentBranch = this.previousConfiguration?.parentBranch; } + if (this.currentConfiguration.parentBranch === undefined && this.previousConfiguration?.parentBranch != null) { + this.currentConfiguration.parentBranch = this.previousConfiguration.parentBranch; + } if (this.isSingleAction) { /** * Nothing to do here (for now) @@ -48600,8 +48643,8 @@ class SingleAction { get isCheckProgressAction() { return this.currentSingleAction === constants_1.ACTIONS.CHECK_PROGRESS; } - get isDetectErrorsAction() { - return this.currentSingleAction === constants_1.ACTIONS.DETECT_ERRORS; + get isDetectPotentialProblemsAction() { + return this.currentSingleAction === constants_1.ACTIONS.DETECT_POTENTIAL_PROBLEMS; } get isRecommendStepsAction() { return this.currentSingleAction === constants_1.ACTIONS.RECOMMEND_STEPS; @@ -48629,7 +48672,7 @@ class SingleAction { constants_1.ACTIONS.THINK, constants_1.ACTIONS.INITIAL_SETUP, constants_1.ACTIONS.CHECK_PROGRESS, - constants_1.ACTIONS.DETECT_ERRORS, + constants_1.ACTIONS.DETECT_POTENTIAL_PROBLEMS, constants_1.ACTIONS.RECOMMEND_STEPS, ]; /** @@ -48814,10 +48857,52 @@ exports.Workflows = Workflows; "use strict"; Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.AiRepository = exports.TRANSLATION_RESPONSE_SCHEMA = exports.OPENCODE_AGENT_BUILD = exports.OPENCODE_AGENT_PLAN = void 0; +exports.AiRepository = exports.LANGUAGE_CHECK_RESPONSE_SCHEMA = exports.THINK_RESPONSE_SCHEMA = exports.TRANSLATION_RESPONSE_SCHEMA = exports.OPENCODE_AGENT_BUILD = exports.OPENCODE_AGENT_PLAN = void 0; exports.getSessionDiff = getSessionDiff; const constants_1 = __nccwpck_require__(8593); const logger_1 = __nccwpck_require__(8836); +function delay(ms) { + return new Promise((resolve) => setTimeout(resolve, ms)); +} +/** + * Runs an async OpenCode operation with retries. On failure, logs and retries up to OPENCODE_MAX_RETRIES. + * Single retry system for all OpenCode interactions: no parallel retry logic. + * + * Retries when the operation throws, including: + * - Network errors (fetch fails, connection refused, etc.) + * - HTTP errors (4xx/5xx from session create or message) + * - Timeout (OPENCODE_REQUEST_TIMEOUT_MS) + * - Empty or invalid JSON response body (parseJsonResponse throws) + * - Missing session id in create response + * - Parse failure of expected format (e.g. expectJson but text is not valid JSON) when parse is done inside the callback + */ +async function withOpenCodeRetry(fn, context) { + let lastError; + for (let attempt = 1; attempt <= constants_1.OPENCODE_MAX_RETRIES; attempt++) { + try { + return await fn(); + } + catch (error) { + lastError = error; + const message = error instanceof Error ? error.message : String(error); + const cause = error instanceof Error && error.cause instanceof Error + ? error.cause.message + : ''; + const detail = cause ? ` (cause: ${cause})` : ''; + const noResponseHint = message === 'fetch failed' + ? ' No HTTP response; connection lost or timeout. If this was before the client timeout (see log above), the OpenCode server or a proxy may have a shorter timeout.' + : ''; + if (attempt < constants_1.OPENCODE_MAX_RETRIES) { + (0, logger_1.logInfo)(`OpenCode [${context}] attempt ${attempt}/${constants_1.OPENCODE_MAX_RETRIES} failed: ${message}${detail}.${noResponseHint} Retrying in ${constants_1.OPENCODE_RETRY_DELAY_MS}ms...`); + await delay(constants_1.OPENCODE_RETRY_DELAY_MS); + } + else { + (0, logger_1.logError)(`OpenCode [${context}] failed after ${constants_1.OPENCODE_MAX_RETRIES} attempts: ${message}${detail}`); + } + } + } + throw lastError; +} function createTimeoutSignal(ms) { const controller = new AbortController(); setTimeout(() => controller.abort(new Error(`OpenCode request timeout after ${ms}ms`)), ms); @@ -48826,7 +48911,121 @@ function createTimeoutSignal(ms) { function ensureNoTrailingSlash(url) { return url.replace(/\/+$/, '') || url; } -const OPENCODE_RESPONSE_LOG_MAX_LEN = 2000; +function truncate(s, maxLen) { + return s.length <= maxLen ? s : s.slice(0, maxLen) + '...'; +} +const OPENCODE_PROMPT_LOG_PREVIEW_LEN = 500; +const OPENCODE_PROMPT_LOG_FULL_LEN = 3000; +function getValidatedOpenCodeConfig(ai) { + const serverUrl = ai.getOpencodeServerUrl(); + const model = ai.getOpencodeModel(); + if (!serverUrl?.trim() || !model?.trim()) { + (0, logger_1.logError)('Missing required AI configuration: opencode-server-url and opencode-model'); + return null; + } + const { providerID, modelID } = ai.getOpencodeModelParts(); + return { serverUrl, providerID, modelID, model }; +} +/** + * Try to extract the first complete JSON object from text (from first `{` with balanced braces). + * Handles being inside a double-quoted string so we don't count braces there. + */ +function extractFirstJsonObject(text) { + const start = text.indexOf('{'); + if (start === -1) + return null; + let depth = 1; + let inString = false; + let escape = false; + let quoteChar = '"'; + for (let i = start + 1; i < text.length; i++) { + const c = text[i]; + if (escape) { + escape = false; + continue; + } + if (c === '\\' && inString) { + escape = true; + continue; + } + if (inString) { + if (c === quoteChar) + inString = false; + continue; + } + if (c === '"' || c === "'") { + inString = true; + quoteChar = c; + continue; + } + if (c === '{') + depth++; + else if (c === '}') { + depth--; + if (depth === 0) + return text.slice(start, i + 1); + } + } + return null; +} +/** + * Parse JSON from agent response text safely. + * Tries: (1) direct parse, (2) strip markdown code fence, (3) extract first JSON object from text (model often adds prose before JSON). + * @throws Error with clear message if parsing fails + */ +function parseJsonFromAgentText(text) { + const trimmed = text.trim(); + if (!trimmed) { + throw new Error('Agent response text is empty'); + } + // 1) Direct parse + try { + return JSON.parse(trimmed); + } + catch { + // 2) Model may wrap JSON in ```json ... ``` or ``` ... ``` + const withoutFence = trimmed + .replace(/^```(?:json)?\s*\n?/i, '') + .replace(/\n?```\s*$/i, '') + .trim(); + try { + return JSON.parse(withoutFence); + } + catch { + // 3) Model may add prose before the JSON (e.g. "Based on my analysis... { ... }") + const extracted = extractFirstJsonObject(trimmed); + if (extracted) { + try { + return JSON.parse(extracted); + } + catch (e) { + const msg = e instanceof Error ? e.message : String(e); + (0, logger_1.logDebugInfo)(`OpenCode agent response (expectJson): failed to parse extracted JSON. Full text length=${trimmed.length} firstChars=${JSON.stringify(trimmed.slice(0, 200))}`); + throw new Error(`Agent response is not valid JSON: ${msg}`); + } + } + const previewLen = 500; + const msg = trimmed.length > previewLen ? `${trimmed.slice(0, previewLen)}...` : trimmed; + const fullTruncated = trimmed.length > 3000 ? `${trimmed.slice(0, 3000)}... [total ${trimmed.length} chars]` : trimmed; + (0, logger_1.logDebugInfo)(`OpenCode agent response (expectJson): no JSON object found. length=${trimmed.length} preview=${JSON.stringify(msg)}`); + (0, logger_1.logDebugInfo)(`OpenCode agent response (expectJson) full text for debugging:\n${fullTruncated}`); + throw new Error(`Agent response is not valid JSON: no JSON object found. Response starts with: ${msg.slice(0, 150)}`); + } + } +} +/** + * Extract text from OpenCode message parts by type (e.g. 'text', 'reasoning'), joined with separator. + */ +function extractPartsByType(parts, type, joinWith) { + if (!Array.isArray(parts)) + return ''; + return parts + .filter((p) => p?.type === type && typeof p.text === 'string') + .map((p) => p.text) + .join(joinWith) + .trim(); +} +const OPENCODE_RESPONSE_LOG_MAX_LEN = 80000; /** Parse response as JSON; on empty or invalid body throw a clear error with context. */ async function parseJsonResponse(res, context) { const raw = await res.text(); @@ -48848,29 +49047,33 @@ async function parseJsonResponse(res, context) { throw err; } } -/** - * Extract plain text from OpenCode message response parts (type === 'text'). - */ +/** Extract plain text from OpenCode message response parts (type === 'text'). */ function extractTextFromParts(parts) { - if (!Array.isArray(parts)) - return ''; - return parts - .filter((p) => p?.type === 'text' && typeof p.text === 'string') - .map((p) => p.text) - .join(''); + return extractPartsByType(parts, 'text', ''); +} +/** Extract reasoning from OpenCode message parts (type === 'reasoning'). */ +function extractReasoningFromParts(parts) { + return extractPartsByType(parts, 'reasoning', '\n\n'); } +/** Max length of per-part text preview in debug log (to avoid huge log lines). */ +const OPENCODE_PART_PREVIEW_LEN = 80; /** - * Extract reasoning text from OpenCode message response parts (type === 'reasoning'). - * Used to include the agent's full reasoning in comments (e.g. progress detection). + * Build a short summary of OpenCode message parts for debug logs (types, text lengths, and short preview). */ -function extractReasoningFromParts(parts) { - if (!Array.isArray(parts)) - return ''; - return parts - .filter((p) => p?.type === 'reasoning' && typeof p.text === 'string') - .map((p) => p.text) - .join('\n\n') - .trim(); +function summarizePartsForLog(parts, context) { + if (!Array.isArray(parts) || parts.length === 0) { + return `${context}: 0 parts`; + } + const items = parts.map((p, i) => { + const type = p?.type ?? '(missing type)'; + const text = typeof p?.text === 'string' ? p.text : ''; + const len = text.length; + const preview = len > OPENCODE_PART_PREVIEW_LEN + ? `${text.slice(0, OPENCODE_PART_PREVIEW_LEN).replace(/\n/g, ' ')}...` + : text.replace(/\n/g, ' '); + return `[${i}] type=${type} length=${len}${preview ? ` preview=${JSON.stringify(preview)}` : ''}`; + }); + return `${context}: ${parts.length} part(s) — ${items.join(' | ')}`; } /** Default OpenCode agent for analysis/planning (read-only, no file edits). */ exports.OPENCODE_AGENT_PLAN = 'plan'; @@ -48892,57 +49095,48 @@ exports.TRANSLATION_RESPONSE_SCHEMA = { required: ['translatedText'], additionalProperties: false, }; -/** - * OpenCode HTTP API: create session and send message, return assistant parts. - * Uses fetch to avoid ESM-only SDK with ncc. - */ -async function opencodePrompt(baseUrl, providerID, modelID, promptText) { - const base = ensureNoTrailingSlash(baseUrl); - const signal = createTimeoutSignal(constants_1.OPENCODE_REQUEST_TIMEOUT_MS); - const createRes = await fetch(`${base}/session`, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ title: 'gbf' }), - signal, - }); - if (!createRes.ok) { - const err = await createRes.text(); - throw new Error(`OpenCode session create failed: ${createRes.status} ${err}`); - } - const session = await parseJsonResponse(createRes, 'OpenCode session.create'); - const sessionId = session?.id ?? session?.data?.id; - if (!sessionId) { - throw new Error('OpenCode session.create did not return session id'); - } - const messageRes = await fetch(`${base}/session/${sessionId}/message`, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - model: { providerID, modelID }, - parts: [{ type: 'text', text: promptText }], - }), - signal, - }); - if (!messageRes.ok) { - const err = await messageRes.text(); - throw new Error(`OpenCode message failed: ${messageRes.status} ${err}`); - } - const messageData = await parseJsonResponse(messageRes, 'OpenCode message'); - const parts = messageData?.parts ?? messageData?.data?.parts ?? []; - return extractTextFromParts(parts); -} +/** JSON schema for Think (Q&A) responses: single answer field. */ +exports.THINK_RESPONSE_SCHEMA = { + type: 'object', + properties: { + answer: { + type: 'string', + description: 'The concise answer to the user question. Required.', + }, + }, + required: ['answer'], + additionalProperties: false, +}; +/** JSON schema for language check: done (already in locale) or must_translate. */ +exports.LANGUAGE_CHECK_RESPONSE_SCHEMA = { + type: 'object', + properties: { + status: { + type: 'string', + enum: ['done', 'must_translate'], + description: 'done if text is in the requested locale, must_translate otherwise.', + }, + }, + required: ['status'], + additionalProperties: false, +}; /** * Send a message to an OpenCode agent (e.g. "plan", "build") and wait for the full response. - * The server runs the agent loop (tools, etc.) and returns when done. - * Use this to delegate PR description, progress, error detection, recommendations, or copilot (build) to OpenCode. + * Raw call: no retries. Callers (askAgent, copilotMessage) wrap in withOpenCodeRetry. */ -async function opencodeMessageWithAgent(baseUrl, options) { +async function opencodeMessageWithAgentRaw(baseUrl, options) { + (0, logger_1.logInfo)(`OpenCode request [agent ${options.agent}] model=${options.providerID}/${options.modelID} promptLength=${options.promptText.length}`); + (0, logger_1.logInfo)(`OpenCode sending prompt (preview): ${truncate(options.promptText, OPENCODE_PROMPT_LOG_PREVIEW_LEN)}`); + (0, logger_1.logDebugInfo)(`OpenCode prompt (full): ${truncate(options.promptText, OPENCODE_PROMPT_LOG_FULL_LEN)}`); + (0, logger_1.logDebugInfo)(`OpenCode message body: agent=${options.agent}, model=${options.providerID}/${options.modelID}, parts[0].text length=${options.promptText.length}`); const base = ensureNoTrailingSlash(baseUrl); const signal = createTimeoutSignal(constants_1.OPENCODE_REQUEST_TIMEOUT_MS); + const sessionBody = { title: 'gbf' }; + (0, logger_1.logDebugInfo)(`OpenCode session create body: ${JSON.stringify(sessionBody)}`); const createRes = await fetch(`${base}/session`, { method: 'POST', headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ title: 'gbf' }), + body: JSON.stringify(sessionBody), signal, }); if (!createRes.ok) { @@ -48959,6 +49153,9 @@ async function opencodeMessageWithAgent(baseUrl, options) { model: { providerID: options.providerID, modelID: options.modelID }, parts: [{ type: 'text', text: options.promptText }], }; + (0, logger_1.logDebugInfo)(`OpenCode POST /session/${sessionId}/message body (keys): agent, model, parts (${body.parts.length} part(s))`); + const timeoutMin = Math.round(constants_1.OPENCODE_REQUEST_TIMEOUT_MS / 60000); + (0, logger_1.logInfo)(`OpenCode: waiting for agent "${options.agent}" message response (client timeout: ${timeoutMin} min)...`); const messageRes = await fetch(`${base}/session/${sessionId}/message`, { method: 'POST', headers: { 'Content-Type': 'application/json' }, @@ -48971,139 +49168,117 @@ async function opencodeMessageWithAgent(baseUrl, options) { } const messageData = await parseJsonResponse(messageRes, `OpenCode agent "${options.agent}" message`); const parts = messageData?.parts ?? messageData?.data?.parts ?? []; - const text = extractTextFromParts(parts); - return { text, parts, sessionId }; + const partsArray = Array.isArray(parts) ? parts : []; + (0, logger_1.logDebugInfo)(summarizePartsForLog(partsArray, `OpenCode agent "${options.agent}" message parts`)); + const text = extractTextFromParts(partsArray); + (0, logger_1.logInfo)(`OpenCode response [agent ${options.agent}] responseLength=${text.length} sessionId=${sessionId}`); + return { text, parts: partsArray, sessionId }; } /** * Get the diff for an OpenCode session (files changed by the agent). * Call after opencodeMessageWithAgent when using the "build" agent so the user can see what was edited. + * Wrapped with retries (OPENCODE_MAX_RETRIES). */ async function getSessionDiff(baseUrl, sessionId) { - const base = ensureNoTrailingSlash(baseUrl); - const signal = createTimeoutSignal(constants_1.OPENCODE_REQUEST_TIMEOUT_MS); - const res = await fetch(`${base}/session/${sessionId}/diff`, { method: 'GET', signal }); - if (!res.ok) - return []; - const raw = await res.text(); - if (!raw?.trim()) - return []; - let data; - try { - data = JSON.parse(raw); - } - catch { - return []; - } - if (Array.isArray(data)) - return data; - if (Array.isArray(data.data)) - return data.data; - return []; + return withOpenCodeRetry(async () => { + (0, logger_1.logInfo)(`OpenCode request [session diff] sessionId=${sessionId}`); + const base = ensureNoTrailingSlash(baseUrl); + const signal = createTimeoutSignal(constants_1.OPENCODE_REQUEST_TIMEOUT_MS); + const res = await fetch(`${base}/session/${sessionId}/diff`, { method: 'GET', signal }); + if (!res.ok) { + (0, logger_1.logInfo)(`OpenCode response [session diff] fileCount=0 (status ${res.status})`); + return []; + } + const raw = await res.text(); + if (!raw?.trim()) { + (0, logger_1.logInfo)('OpenCode response [session diff] fileCount=0 (empty body)'); + return []; + } + let data; + try { + data = JSON.parse(raw); + } + catch { + (0, logger_1.logInfo)('OpenCode response [session diff] fileCount=0 (invalid JSON)'); + return []; + } + const list = Array.isArray(data) + ? data + : Array.isArray(data.data) + ? data.data + : []; + (0, logger_1.logInfo)(`OpenCode response [session diff] fileCount=${list.length}`); + return list; + }, 'session diff'); } class AiRepository { constructor() { - this.ask = async (ai, prompt) => { - const serverUrl = ai.getOpencodeServerUrl(); - const model = ai.getOpencodeModel(); - if (!serverUrl || !model) { - (0, logger_1.logError)('Missing required AI configuration: opencode-server-url and opencode-model'); - return undefined; - } - try { - const { providerID, modelID } = ai.getOpencodeModelParts(); - const text = await opencodePrompt(serverUrl, providerID, modelID, prompt); - return text || undefined; - } - catch (error) { - (0, logger_1.logError)(`Error querying OpenCode (${model}): ${error}`); - return undefined; - } - }; /** - * Ask an OpenCode agent (e.g. Plan) to perform a task. The server runs the full agent loop. - * Returns the final message (including reasoning in parts when includeReasoning is true). - * @param ai - AI config (server URL, model) - * @param agentId - OpenCode agent id (e.g. OPENCODE_AGENT_PLAN) - * @param prompt - User prompt - * @param options - expectJson, schema, includeReasoning - * @returns Response text, or parsed JSON when expectJson is true + * Ask an OpenCode agent (e.g. Plan) to perform a task. All calls use strict response (expectJson + schema). + * Single retry system: HTTP failures and parse failures both retry up to OPENCODE_MAX_RETRIES. */ this.askAgent = async (ai, agentId, prompt, options = {}) => { - const serverUrl = ai.getOpencodeServerUrl(); - const model = ai.getOpencodeModel(); - if (!serverUrl || !model) { - (0, logger_1.logError)('Missing required AI configuration: opencode-server-url and opencode-model'); + const config = getValidatedOpenCodeConfig(ai); + if (!config) return undefined; - } + const { serverUrl, providerID, modelID, model } = config; + const schemaName = options.schemaName ?? 'response'; + const promptText = options.expectJson && options.schema + ? `Respond with a single JSON object that strictly conforms to this schema (name: ${schemaName}). No other text or markdown.\n\nSchema: ${JSON.stringify(options.schema)}\n\nUser request:\n${prompt}` + : prompt; try { - const { providerID, modelID } = ai.getOpencodeModelParts(); - let promptText = prompt; - if (options.expectJson && options.schema) { - const schemaName = options.schemaName ?? 'response'; - promptText = `Respond with a single JSON object that strictly conforms to this schema (name: ${schemaName}). No other text or markdown.\n\nSchema: ${JSON.stringify(options.schema)}\n\nUser request:\n${prompt}`; - } - const { text, parts } = await opencodeMessageWithAgent(serverUrl, { - providerID, - modelID, - agent: agentId, - promptText, - }); - if (!text) - return undefined; - const reasoning = options.includeReasoning ? extractReasoningFromParts(parts) : ''; - if (options.expectJson) { - const cleaned = text.replace(/^```json\n?/, '').replace(/\n?```$/, '').trim(); - const parsed = JSON.parse(cleaned); - if (options.includeReasoning && reasoning) { - return { ...parsed, reasoning }; + return await withOpenCodeRetry(async () => { + const { text, parts } = await opencodeMessageWithAgentRaw(serverUrl, { + providerID, + modelID, + agent: agentId, + promptText, + }); + if (!text) + throw new Error('Empty response text'); + const reasoning = options.includeReasoning ? extractReasoningFromParts(parts) : ''; + if (options.expectJson && options.schema) { + const maxLogLen = 5000000; + const toLog = text.length > maxLogLen ? `${text.slice(0, maxLogLen)}\n... [truncated, total ${text.length} chars]` : text; + (0, logger_1.logInfo)(`OpenCode agent response (full text, expectJson=true) length=${text.length}:\n${toLog}`); + const parsed = parseJsonFromAgentText(text); + if (options.includeReasoning && reasoning) { + return { ...parsed, reasoning }; + } + return parsed; } - return parsed; - } - return text; + return text; + }, `agent ${agentId}`); } catch (error) { const err = error instanceof Error ? error : new Error(String(error)); - const errWithCause = err; - const cause = errWithCause.cause instanceof Error - ? errWithCause.cause.message - : errWithCause.cause != null - ? String(errWithCause.cause) - : ''; - const detail = cause ? ` (${cause})` : ''; + const cause = err instanceof Error && err.cause; + const detail = cause != null ? ` (${cause instanceof Error ? cause.message : String(cause)})` : ''; (0, logger_1.logError)(`Error querying OpenCode agent ${agentId} (${model}): ${err.message}${detail}`); return undefined; } }; /** * Run the OpenCode "build" agent for the copilot command. Returns the final message and sessionId. + * Uses the same retry system (OPENCODE_MAX_RETRIES). */ this.copilotMessage = async (ai, prompt) => { - const serverUrl = ai.getOpencodeServerUrl(); - const model = ai.getOpencodeModel(); - if (!serverUrl || !model) { - (0, logger_1.logError)('Missing required AI configuration: opencode-server-url and opencode-model'); + const config = getValidatedOpenCodeConfig(ai); + if (!config) return undefined; - } + const { serverUrl, providerID, modelID, model } = config; try { - const { providerID, modelID } = ai.getOpencodeModelParts(); - const result = await opencodeMessageWithAgent(serverUrl, { + const result = await withOpenCodeRetry(() => opencodeMessageWithAgentRaw(serverUrl, { providerID, modelID, agent: exports.OPENCODE_AGENT_BUILD, promptText: prompt, - }); + }), `agent ${exports.OPENCODE_AGENT_BUILD}`); return { text: result.text, sessionId: result.sessionId }; } catch (error) { const err = error instanceof Error ? error : new Error(String(error)); - const errWithCause = err; - const cause = errWithCause.cause instanceof Error - ? errWithCause.cause.message - : errWithCause.cause != null - ? String(errWithCause.cause) - : ''; - const detail = cause ? ` (${cause})` : ''; - (0, logger_1.logError)(`Error querying OpenCode build agent (${model}): ${err.message}${detail}`); + (0, logger_1.logError)(`Error querying OpenCode build agent (${model}): ${err.message}`); return undefined; } }; @@ -49330,7 +49505,7 @@ class BranchRepository { else { baseBranchName = hotfixBranch ?? developmentBranch; } - if (!isRenamingBranch) { + if (!isRenamingBranch || param.currentConfiguration.parentBranch === undefined) { param.currentConfiguration.parentBranch = baseBranchName; } (0, logger_1.logDebugInfo)(`============================================================================================`); @@ -50221,6 +50396,30 @@ class IssueRepository { }); (0, logger_1.logDebugInfo)(`Comment ${commentId} updated in Issue ${issueNumber}.`); }; + /** + * Lists all comments on an issue (for bugbot: find existing findings by marker). + * Uses pagination to fetch every comment (default API returns only 30 per page). + */ + this.listIssueComments = async (owner, repository, issueNumber, token) => { + const octokit = github.getOctokit(token); + const all = []; + for await (const response of octokit.paginate.iterator(octokit.rest.issues.listComments, { + owner, + repo: repository, + issue_number: issueNumber, + per_page: 100, + })) { + const data = response.data || []; + for (const c of data) { + all.push({ + id: c.id, + body: c.body ?? null, + user: c.user, + }); + } + } + return all; + }; this.closeIssue = async (owner, repository, issueNumber, token) => { const octokit = github.getOctokit(token); const { data: issue } = await octokit.rest.issues.get({ @@ -51400,15 +51599,35 @@ class PullRequestRepository { }); (0, logger_1.logDebugInfo)(`Updated PR #${pullRequestNumber} description with: ${description}`); }; + /** + * Returns all users involved in review: requested (pending) + those who already submitted a review. + * Used to avoid re-requesting someone who already reviewed when ensuring desired reviewer count. + */ this.getCurrentReviewers = async (owner, repository, pullNumber, token) => { const octokit = github.getOctokit(token); try { - const { data } = await octokit.rest.pulls.listRequestedReviewers({ - owner, - repo: repository, - pull_number: pullNumber, - }); - return data.users.map((user) => user.login); + const [requestedRes, reviewsRes] = await Promise.all([ + octokit.rest.pulls.listRequestedReviewers({ + owner, + repo: repository, + pull_number: pullNumber, + }), + octokit.rest.pulls.listReviews({ + owner, + repo: repository, + pull_number: pullNumber, + }), + ]); + const logins = new Set(); + for (const user of requestedRes.data.users) { + logins.add(user.login); + } + for (const review of reviewsRes.data) { + if (review.user?.login) { + logins.add(review.user.login); + } + } + return Array.from(logins); } catch (error) { (0, logger_1.logError)(`Error getting reviewers of PR: ${error}.`); @@ -51454,6 +51673,30 @@ class PullRequestRepository { return []; } }; + /** + * Returns for each changed file the first line number that appears in the diff (right side). + * Used so review comments use a line that GitHub can resolve (avoids "line could not be resolved"). + */ + this.getFilesWithFirstDiffLine = async (owner, repository, pullNumber, token) => { + const octokit = github.getOctokit(token); + try { + const { data } = await octokit.rest.pulls.listFiles({ + owner, + repo: repository, + pull_number: pullNumber, + }); + return (data || []) + .filter((f) => f.status !== 'removed' && (f.patch ?? '').length > 0) + .map((f) => { + const firstLine = PullRequestRepository.firstLineFromPatch(f.patch ?? ''); + return { path: f.filename, firstLine: firstLine ?? 1 }; + }); + } + catch (error) { + (0, logger_1.logError)(`Error getting files with diff lines (owner=${owner}, repo=${repository}, pullNumber=${pullNumber}): ${error}.`); + return []; + } + }; this.getPullRequestChanges = async (owner, repository, pullNumber, token) => { const octokit = github.getOctokit(token); const allFiles = []; @@ -51480,43 +51723,215 @@ class PullRequestRepository { return []; } }; - } -} -exports.PullRequestRepository = PullRequestRepository; - - -/***/ }), - -/***/ 779: -/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { - -"use strict"; - -var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { - if (k2 === undefined) k2 = k; - var desc = Object.getOwnPropertyDescriptor(m, k); - if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) { - desc = { enumerable: true, get: function() { return m[k]; } }; - } - Object.defineProperty(o, k2, desc); -}) : (function(o, m, k, k2) { - if (k2 === undefined) k2 = k; - o[k2] = m[k]; -})); -var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { - Object.defineProperty(o, "default", { enumerable: true, value: v }); -}) : function(o, v) { - o["default"] = v; -}); -var __importStar = (this && this.__importStar) || (function () { - var ownKeys = function(o) { - ownKeys = Object.getOwnPropertyNames || function (o) { - var ar = []; - for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k; - return ar; - }; - return ownKeys(o); - }; + /** Head commit SHA of the PR (for creating review). */ + this.getPullRequestHeadSha = async (owner, repository, pullNumber, token) => { + const octokit = github.getOctokit(token); + try { + const { data } = await octokit.rest.pulls.get({ + owner, + repo: repository, + pull_number: pullNumber, + }); + return data.head?.sha; + } + catch (error) { + (0, logger_1.logError)(`Error getting PR head SHA: ${error}.`); + return undefined; + } + }; + /** + * List all review comments on a PR (for bugbot: find existing findings by marker). + * Uses pagination to fetch every comment (default API returns only 30 per page). + * Includes node_id for GraphQL (e.g. resolve review thread). + */ + this.listPullRequestReviewComments = async (owner, repository, pullNumber, token) => { + const octokit = github.getOctokit(token); + const all = []; + try { + for await (const response of octokit.paginate.iterator(octokit.rest.pulls.listReviewComments, { + owner, + repo: repository, + pull_number: pullNumber, + per_page: 100, + })) { + const data = response.data || []; + all.push(...data.map((c) => ({ + id: c.id, + body: c.body ?? null, + path: c.path, + line: c.line ?? undefined, + node_id: c.node_id ?? undefined, + }))); + } + return all; + } + catch (error) { + (0, logger_1.logError)(`Error listing PR review comments (owner=${owner}, repo=${repository}, pullNumber=${pullNumber}): ${error}.`); + return []; + } + }; + /** + * Resolve a PR review thread (GraphQL only). Finds the thread that contains the given comment and marks it resolved. + * Uses repository.pullRequest.reviewThreads because the field pullRequestReviewThread on PullRequestReviewComment was removed from the API. + * Paginates through all threads and all comments in each thread so the comment is found regardless of PR size. + * No-op if thread is already resolved. Logs and does not throw on error. + */ + this.resolvePullRequestReviewThread = async (owner, repository, pullNumber, commentNodeId, token) => { + const octokit = github.getOctokit(token); + try { + let threadId = null; + let threadsCursor = null; + outer: do { + const threadsData = await octokit.graphql(`query ($owner: String!, $repo: String!, $prNumber: Int!, $threadsAfter: String) { + repository(owner: $owner, name: $repo) { + pullRequest(number: $prNumber) { + reviewThreads(first: 100, after: $threadsAfter) { + nodes { + id + comments(first: 100) { + nodes { id } + pageInfo { hasNextPage endCursor } + } + } + pageInfo { hasNextPage endCursor } + } + } + } + }`, { owner, repo: repository, prNumber: pullNumber, threadsAfter: threadsCursor }); + const threads = threadsData?.repository?.pullRequest?.reviewThreads; + if (!threads?.nodes?.length) + break; + for (const thread of threads.nodes) { + let commentsCursor = null; + let commentNodes = thread.comments?.nodes ?? []; + let commentsPageInfo = thread.comments?.pageInfo; + do { + if (commentNodes.some((c) => c.id === commentNodeId)) { + threadId = thread.id; + break outer; + } + if (!commentsPageInfo?.hasNextPage || commentsPageInfo.endCursor == null) + break; + commentsCursor = commentsPageInfo.endCursor; + const nextComments = await octokit.graphql(`query ($threadId: ID!, $commentsAfter: String) { + node(id: $threadId) { + ... on PullRequestReviewThread { + comments(first: 100, after: $commentsAfter) { + nodes { id } + pageInfo { hasNextPage endCursor } + } + } + } + }`, { threadId: thread.id, commentsAfter: commentsCursor }); + commentNodes = nextComments?.node?.comments?.nodes ?? []; + commentsPageInfo = nextComments?.node?.comments?.pageInfo ?? { hasNextPage: false, endCursor: null }; + } while (commentsPageInfo?.hasNextPage === true && commentsPageInfo?.endCursor != null); + } + const pageInfo = threads.pageInfo; + if (threadId != null || !pageInfo?.hasNextPage) + break; + threadsCursor = pageInfo.endCursor ?? null; + } while (threadsCursor != null); + if (!threadId) { + (0, logger_1.logError)(`[Bugbot] No review thread found for comment node_id=${commentNodeId}.`); + return; + } + await octokit.graphql(`mutation ($threadId: ID!) { + resolveReviewThread(input: { threadId: $threadId }) { + thread { id } + } + }`, { threadId }); + (0, logger_1.logDebugInfo)(`Resolved PR review thread ${threadId}.`); + } + catch (err) { + (0, logger_1.logError)(`[Bugbot] Error resolving PR review thread (commentNodeId=${commentNodeId}, owner=${owner}, repo=${repository}): ${err}`); + } + }; + /** + * Create a review on the PR with one or more inline comments (bugbot findings). + * Each comment requires path and line (use first file and line 1 if not specified). + */ + this.createReviewWithComments = async (owner, repository, pullNumber, commitId, comments, token) => { + if (comments.length === 0) + return; + const octokit = github.getOctokit(token); + const results = await Promise.allSettled(comments.map((c) => octokit.rest.pulls.createReviewComment({ + owner, + repo: repository, + pull_number: pullNumber, + commit_id: commitId, + path: c.path, + line: c.line, + side: 'RIGHT', + body: c.body, + }))); + let created = 0; + results.forEach((result, i) => { + if (result.status === 'fulfilled') { + created += 1; + } + else { + const c = comments[i]; + (0, logger_1.logError)(`[Bugbot] Error creating PR review comment. path="${c.path}", line=${c.line}, prNumber=${pullNumber}, owner=${owner}, repo=${repository}: ${result.reason}`); + } + }); + if (created > 0) { + (0, logger_1.logDebugInfo)(`Created ${created} review comment(s) on PR #${pullNumber}.`); + } + }; + /** Update an existing PR review comment (e.g. to mark finding as resolved in body). */ + this.updatePullRequestReviewComment = async (owner, repository, commentId, body, token) => { + const octokit = github.getOctokit(token); + await octokit.rest.pulls.updateReviewComment({ + owner, + repo: repository, + comment_id: commentId, + body, + }); + (0, logger_1.logDebugInfo)(`Updated review comment ${commentId}.`); + }; + } + /** First line (right side) of the first hunk per file, for valid review comment placement. */ + static firstLineFromPatch(patch) { + const match = patch.match(/^@@ -\d+,\d+ \+(\d+),\d+ @@/m); + return match ? parseInt(match[1], 10) : undefined; + } +} +exports.PullRequestRepository = PullRequestRepository; + + +/***/ }), + +/***/ 779: +/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { + +"use strict"; + +var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + var desc = Object.getOwnPropertyDescriptor(m, k); + if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) { + desc = { enumerable: true, get: function() { return m[k]; } }; + } + Object.defineProperty(o, k2, desc); +}) : (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + o[k2] = m[k]; +})); +var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { + Object.defineProperty(o, "default", { enumerable: true, value: v }); +}) : function(o, v) { + o["default"] = v; +}); +var __importStar = (this && this.__importStar) || (function () { + var ownKeys = function(o) { + ownKeys = Object.getOwnPropertyNames || function (o) { + var ar = []; + for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k; + return ar; + }; + return ownKeys(o); + }; return function (mod) { if (mod && mod.__esModule) return mod; var result = {}; @@ -51752,12 +52167,46 @@ exports.ConfigurationHandler = void 0; const config_1 = __nccwpck_require__(1106); const logger_1 = __nccwpck_require__(8836); const issue_content_interface_1 = __nccwpck_require__(9913); +/** Keys that must be preserved from stored config when current has undefined (e.g. when branch already existed). */ +const CONFIG_KEYS_TO_PRESERVE = [ + 'parentBranch', + 'workingBranch', + 'releaseBranch', + 'hotfixBranch', + 'hotfixOriginBranch', + 'branchType', +]; class ConfigurationHandler extends issue_content_interface_1.IssueContentInterface { constructor() { super(...arguments); this.update = async (execution) => { try { - return await this.internalUpdate(execution, JSON.stringify(execution.currentConfiguration, null, 4)); + const current = execution.currentConfiguration; + const payload = { + branchType: current.branchType, + releaseBranch: current.releaseBranch, + workingBranch: current.workingBranch, + parentBranch: current.parentBranch, + hotfixOriginBranch: current.hotfixOriginBranch, + hotfixBranch: current.hotfixBranch, + results: current.results, + branchConfiguration: current.branchConfiguration, + }; + const storedRaw = await this.internalGetter(execution); + if (storedRaw != null && storedRaw.trim().length > 0) { + try { + const stored = JSON.parse(storedRaw); + for (const key of CONFIG_KEYS_TO_PRESERVE) { + if (payload[key] === undefined && stored[key] !== undefined) { + payload[key] = stored[key]; + } + } + } + catch { + /* ignore parse errors, save current as-is */ + } + } + return await this.internalUpdate(execution, JSON.stringify(payload, null, 4)); } catch (error) { (0, logger_1.logError)(`Error updating issue description: ${error}`); @@ -51814,7 +52263,6 @@ const PROGRESS_RESPONSE_SCHEMA = { required: ['progress', 'summary'], additionalProperties: false, }; -const MAX_PROGRESS_ATTEMPTS = 3; class CheckProgressUseCase { constructor() { this.taskId = 'CheckProgressUseCase'; @@ -51910,28 +52358,18 @@ class CheckProgressUseCase { const developmentBranch = param.branches.development || 'develop'; (0, logger_1.logInfo)(`📦 Progress will be assessed from workspace diff: base branch "${developmentBranch}", current branch "${branch}" (OpenCode agent will run git diff).`); const prompt = this.buildProgressPrompt(issueNumber, issueDescription, branch, developmentBranch); - let progress = 0; - let summary = 'Unable to determine progress.'; - let reasoning = ''; - let remaining = ''; - for (let attempt = 1; attempt <= MAX_PROGRESS_ATTEMPTS; attempt++) { - (0, logger_1.logInfo)(`🤖 Analyzing progress using OpenCode Plan agent... (attempt ${attempt}/${MAX_PROGRESS_ATTEMPTS})`); - const attemptResult = await this.fetchProgressAttempt(param.ai, prompt); - progress = attemptResult.progress; - summary = attemptResult.summary; - reasoning = attemptResult.reasoning; - remaining = attemptResult.remaining; - if (progress > 0) { - (0, logger_1.logInfo)(`✅ Progress detection completed: ${progress}%`); - break; - } - if (attempt < MAX_PROGRESS_ATTEMPTS) { - (0, logger_1.logInfo)(`⚠️ Progress returned 0% (attempt ${attempt}/${MAX_PROGRESS_ATTEMPTS}), retrying...`); - } + (0, logger_1.logInfo)('🤖 Analyzing progress using OpenCode Plan agent...'); + const attemptResult = await this.fetchProgressAttempt(param.ai, prompt); + const progress = attemptResult.progress; + const summary = attemptResult.summary; + const reasoning = attemptResult.reasoning; + const remaining = attemptResult.remaining; + if (progress > 0) { + (0, logger_1.logInfo)(`✅ Progress detection completed: ${progress}%`); } - const progressFailedAfterRetries = progress === 0; - if (progressFailedAfterRetries) { - (0, logger_1.logError)(`Progress detection failed: received 0% after ${MAX_PROGRESS_ATTEMPTS} attempts. This may be due to a model error.`); + const progressFailed = progress === 0; + if (progressFailed) { + (0, logger_1.logError)('Progress detection returned 0%. This may be due to a model error or no changes detected.'); results.push(new result_1.Result({ id: this.taskId, success: false, @@ -51941,7 +52379,7 @@ class CheckProgressUseCase { summary, ], errors: [ - `Progress detection failed: received 0% after ${MAX_PROGRESS_ATTEMPTS} attempts. This may be due to a model error. There are changes on the branch; consider re-running the check.`, + 'Progress detection returned 0%. This may be due to a model error or no changes detected. Consider re-running the check.', ], payload: { progress: 0, @@ -52012,7 +52450,7 @@ class CheckProgressUseCase { } /** * Calls the OpenCode agent once and returns parsed progress, summary, and reasoning. - * Used inside the retry loop when progress is 0%. + * HTTP-level retries are handled by AiRepository (OPENCODE_MAX_RETRIES). */ async fetchProgressAttempt(ai, prompt) { const agentResponse = await this.aiRepository.askAgent(ai, ai_repository_1.OPENCODE_AGENT_PLAN, prompt, { @@ -52311,17 +52749,63 @@ class DeployedActionUseCase { `Label \`${param.labels.deployed}\` added after a success deploy.`, ], })); + const mergeResults = []; if (param.currentConfiguration.releaseBranch) { const mergeToDefaultResult = await this.branchRepository.mergeBranch(param.owner, param.repo, param.currentConfiguration.releaseBranch, param.branches.defaultBranch, param.pullRequest.mergeTimeout, param.tokens.token); result.push(...mergeToDefaultResult); + mergeResults.push(...mergeToDefaultResult); const mergeToDevelopResult = await this.branchRepository.mergeBranch(param.owner, param.repo, param.currentConfiguration.releaseBranch, param.branches.development, param.pullRequest.mergeTimeout, param.tokens.token); result.push(...mergeToDevelopResult); + mergeResults.push(...mergeToDevelopResult); } else if (param.currentConfiguration.hotfixBranch) { const mergeToDefaultResult = await this.branchRepository.mergeBranch(param.owner, param.repo, param.currentConfiguration.hotfixBranch, param.branches.defaultBranch, param.pullRequest.mergeTimeout, param.tokens.token); result.push(...mergeToDefaultResult); + mergeResults.push(...mergeToDefaultResult); const mergeToDevelopResult = await this.branchRepository.mergeBranch(param.owner, param.repo, param.branches.defaultBranch, param.branches.development, param.pullRequest.mergeTimeout, param.tokens.token); result.push(...mergeToDevelopResult); + mergeResults.push(...mergeToDevelopResult); + } + const mergesAttempted = mergeResults.length > 0; + const allMergesSucceeded = mergesAttempted && mergeResults.every((r) => r.success); + if (allMergesSucceeded) { + const issueNumber = Number(param.singleAction.issue); + const closed = await this.issueRepository.closeIssue(param.owner, param.repo, issueNumber, param.tokens.token); + if (closed) { + (0, logger_1.logDebugInfo)(`Issue #${issueNumber} closed after merges to default and develop.`); + result.push(new result_1.Result({ + id: this.taskId, + success: true, + executed: true, + steps: [ + `Issue #${issueNumber} closed after merge to \`${param.branches.defaultBranch}\` and \`${param.branches.development}\`.`, + ], + })); + } + } + else { + if (mergesAttempted) { + (0, logger_1.logDebugInfo)(`Skipping issue close: one or more merges failed. Issue #${param.singleAction.issue} remains open.`); + result.push(new result_1.Result({ + id: this.taskId, + success: false, + executed: true, + steps: [ + `Issue #${param.singleAction.issue} was not closed because one or more merge operations failed.`, + ], + })); + } + else { + (0, logger_1.logDebugInfo)(`Skipping issue close: no release or hotfix branch configured. Issue #${param.singleAction.issue} remains open.`); + result.push(new result_1.Result({ + id: this.taskId, + success: false, + executed: true, + steps: [ + `Issue #${param.singleAction.issue} was not closed because no release or hotfix branch was configured (no merge operations were performed).`, + ], + })); + } } return result; } @@ -52341,116 +52825,6 @@ class DeployedActionUseCase { exports.DeployedActionUseCase = DeployedActionUseCase; -/***/ }), - -/***/ 938: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - -"use strict"; - -Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.DetectErrorsUseCase = void 0; -const result_1 = __nccwpck_require__(7305); -const logger_1 = __nccwpck_require__(8836); -const issue_repository_1 = __nccwpck_require__(57); -const branch_repository_1 = __nccwpck_require__(7701); -const ai_repository_1 = __nccwpck_require__(8307); -class DetectErrorsUseCase { - constructor() { - this.taskId = 'DetectErrorsUseCase'; - this.issueRepository = new issue_repository_1.IssueRepository(); - this.branchRepository = new branch_repository_1.BranchRepository(); - this.aiRepository = new ai_repository_1.AiRepository(); - } - async invoke(param) { - (0, logger_1.logInfo)(`Executing ${this.taskId}.`); - const results = []; - try { - if (!param.ai?.getOpencodeModel() || !param.ai?.getOpencodeServerUrl()) { - results.push(new result_1.Result({ - id: this.taskId, - success: false, - executed: true, - errors: ['Missing OPENCODE_SERVER_URL and OPENCODE_MODEL.'], - })); - return results; - } - const issueNumber = param.issueNumber; - if (issueNumber === -1) { - results.push(new result_1.Result({ - id: this.taskId, - success: false, - executed: true, - errors: ['Issue number not found.'], - })); - return results; - } - let branch = param.commit.branch; - if (!branch) { - const branchTypes = [ - param.branches.featureTree, - param.branches.bugfixTree, - param.branches.docsTree, - param.branches.choreTree, - ]; - const branches = await this.branchRepository.getListOfBranches(param.owner, param.repo, param.tokens.token); - for (const type of branchTypes) { - const prefix = `${type}/${issueNumber}-`; - const found = branches.find((b) => b.indexOf(prefix) > -1); - if (found) { - branch = found; - break; - } - } - } - const developmentBranch = param.branches.development || 'develop'; - if (!branch) { - results.push(new result_1.Result({ - id: this.taskId, - success: false, - executed: true, - errors: [`No branch found for issue #${issueNumber}.`], - })); - return results; - } - const changes = await this.branchRepository.getChanges(param.owner, param.repo, branch, developmentBranch, param.tokens.token); - const prompt = `Review the code changes in branch "${branch}" compared to "${developmentBranch}" and identify potential errors, bugs, or issues. - -**Changed files and patches:** -${changes.files - .slice(0, 30) - .map((f) => `### ${f.filename} (${f.status})\n\`\`\`diff\n${(f.patch ?? '').slice(0, 1500)}\n\`\`\``) - .join('\n\n')} - -List potential errors, bugs, or code quality issues. For each: file (if relevant), brief description, and severity if obvious. Use clear bullet points or numbered list.`; - (0, logger_1.logInfo)(`🤖 Detecting errors using OpenCode Plan agent...`); - const response = await this.aiRepository.askAgent(param.ai, ai_repository_1.OPENCODE_AGENT_PLAN, prompt); - const report = typeof response === 'string' - ? response - : (response && String(response.report)) || 'No response.'; - results.push(new result_1.Result({ - id: this.taskId, - success: true, - executed: true, - steps: ['Error detection completed (OpenCode Plan agent).', report], - payload: { issueNumber, branch, developmentBranch, report }, - })); - } - catch (error) { - (0, logger_1.logError)(`Error in ${this.taskId}: ${error}`); - results.push(new result_1.Result({ - id: this.taskId, - success: false, - executed: true, - errors: [`Error in ${this.taskId}: ${error}`], - })); - } - return results; - } -} -exports.DetectErrorsUseCase = DetectErrorsUseCase; - - /***/ }), /***/ 3943: @@ -52774,6 +53148,7 @@ const logger_1 = __nccwpck_require__(8836); const check_progress_use_case_1 = __nccwpck_require__(7744); const notify_new_commit_on_issue_use_case_1 = __nccwpck_require__(8020); const check_changes_issue_size_use_case_1 = __nccwpck_require__(5863); +const detect_potential_problems_use_case_1 = __nccwpck_require__(7395); class CommitUseCase { constructor() { this.taskId = 'CommitUseCase'; @@ -52792,6 +53167,7 @@ class CommitUseCase { results.push(...(await new notify_new_commit_on_issue_use_case_1.NotifyNewCommitOnIssueUseCase().invoke(param))); results.push(...(await new check_changes_issue_size_use_case_1.CheckChangesIssueSizeUseCase().invoke(param))); results.push(...(await new check_progress_use_case_1.CheckProgressUseCase().invoke(param))); + results.push(...(await new detect_potential_problems_use_case_1.DetectPotentialProblemsUseCase().invoke(param))); } catch (error) { (0, logger_1.logError)(error); @@ -53050,85 +53426,704 @@ class PullRequestUseCase { return results; } } -exports.PullRequestUseCase = PullRequestUseCase; +exports.PullRequestUseCase = PullRequestUseCase; + + +/***/ }), + +/***/ 6479: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.SingleActionUseCase = void 0; +const result_1 = __nccwpck_require__(7305); +const logger_1 = __nccwpck_require__(8836); +const deployed_action_use_case_1 = __nccwpck_require__(8293); +const publish_github_action_use_case_1 = __nccwpck_require__(9029); +const create_release_use_case_1 = __nccwpck_require__(2430); +const create_tag_use_case_1 = __nccwpck_require__(5279); +const think_use_case_1 = __nccwpck_require__(3841); +const initial_setup_use_case_1 = __nccwpck_require__(3943); +const check_progress_use_case_1 = __nccwpck_require__(7744); +const recommend_steps_use_case_1 = __nccwpck_require__(3538); +const detect_potential_problems_use_case_1 = __nccwpck_require__(7395); +class SingleActionUseCase { + constructor() { + this.taskId = 'SingleActionUseCase'; + } + async invoke(param) { + (0, logger_1.logInfo)(`Executing ${this.taskId}.`); + const results = []; + try { + if (!param.singleAction.validSingleAction) { + (0, logger_1.logDebugInfo)(`Not a valid single action: ${param.singleAction.currentSingleAction}`); + return results; + } + if (param.singleAction.isDeployedAction) { + results.push(...await new deployed_action_use_case_1.DeployedActionUseCase().invoke(param)); + } + else if (param.singleAction.isPublishGithubAction) { + results.push(...await new publish_github_action_use_case_1.PublishGithubActionUseCase().invoke(param)); + } + else if (param.singleAction.isCreateReleaseAction) { + results.push(...await new create_release_use_case_1.CreateReleaseUseCase().invoke(param)); + } + else if (param.singleAction.isCreateTagAction) { + results.push(...await new create_tag_use_case_1.CreateTagUseCase().invoke(param)); + } + else if (param.singleAction.isThinkAction) { + results.push(...await new think_use_case_1.ThinkUseCase().invoke(param)); + } + else if (param.singleAction.isInitialSetupAction) { + results.push(...await new initial_setup_use_case_1.InitialSetupUseCase().invoke(param)); + } + else if (param.singleAction.isCheckProgressAction) { + results.push(...await new check_progress_use_case_1.CheckProgressUseCase().invoke(param)); + } + else if (param.singleAction.isDetectPotentialProblemsAction) { + results.push(...await new detect_potential_problems_use_case_1.DetectPotentialProblemsUseCase().invoke(param)); + } + else if (param.singleAction.isRecommendStepsAction) { + results.push(...await new recommend_steps_use_case_1.RecommendStepsUseCase().invoke(param)); + } + } + catch (error) { + (0, logger_1.logError)(error); + results.push(new result_1.Result({ + id: this.taskId, + success: false, + executed: true, + steps: [ + `Error executing single action: ${param.singleAction.currentSingleAction}.`, + ], + error: error, + })); + } + return results; + } +} +exports.SingleActionUseCase = SingleActionUseCase; + + +/***/ }), + +/***/ 6339: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.buildBugbotPrompt = buildBugbotPrompt; +function buildBugbotPrompt(param, context) { + const headBranch = param.commit.branch; + const baseBranch = param.currentConfiguration.parentBranch ?? param.branches.development ?? 'develop'; + const previousBlock = context.previousFindingsBlock; + const ignorePatterns = param.ai?.getAiIgnoreFiles?.() ?? []; + const ignoreBlock = ignorePatterns.length > 0 + ? `\n**Files to ignore:** Do not report findings in files or paths matching these patterns: ${ignorePatterns.join(', ')}.` + : ''; + return `You are analyzing the latest code changes for potential bugs and issues. + +**Repository context:** +- Owner: ${param.owner} +- Repository: ${param.repo} +- Branch (head): ${headBranch} +- Base branch: ${baseBranch} +- Issue number: ${param.issueNumber} +${ignoreBlock} + +**Your task 1 (new/current problems):** Determine what has changed in the branch "${headBranch}" compared to "${baseBranch}" (you must compute or obtain the diff yourself using the repository context above). Then identify potential bugs, logic errors, security issues, and code quality problems. Be strict and descriptive. One finding per distinct problem. Return them in the \`findings\` array (each with id, title, description; optionally file, line, severity, suggestion). Only include findings in files that are not in the ignore list above. +${previousBlock} + +**Output:** Return a JSON object with: "findings" (array of new/current problems from task 1), and if we gave you previously reported issues above, "resolved_finding_ids" (array of those ids that are now fixed or no longer apply, as per task 2).`; +} + + +/***/ }), + +/***/ 7384: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.deduplicateFindings = deduplicateFindings; +/** + * Deduplicates findings by (file, line). When two findings share the same file and line, + * keeps the first; when they have no file, groups by normalized title and keeps the first. + * This reduces noise when the agent returns near-duplicate issues. + */ +function deduplicateFindings(findings) { + const seen = new Set(); + const result = []; + for (const f of findings) { + const file = f.file?.trim() ?? ''; + const line = f.line ?? 0; + const key = file || line + ? `${file}:${line}` + : `title:${(f.title ?? '').toLowerCase().trim().slice(0, 80)}`; + if (seen.has(key)) + continue; + seen.add(key); + result.push(f); + } + return result; +} + + +/***/ }), + +/***/ 3770: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.fileMatchesIgnorePatterns = fileMatchesIgnorePatterns; +/** + * Returns true if the file path matches any of the ignore patterns (glob-style). + * Used to exclude findings in test files, build output, etc. + */ +function fileMatchesIgnorePatterns(filePath, ignorePatterns) { + if (!filePath || ignorePatterns.length === 0) + return false; + const normalized = filePath.trim(); + if (!normalized) + return false; + return ignorePatterns.some((pattern) => { + const p = pattern.trim(); + if (!p) + return false; + const regexPattern = p + .replace(/[.+?^${}()|[\]\\]/g, '\\$&') + .replace(/\*/g, '.*') + .replace(/\//g, '\\/'); + const regex = p.endsWith('/*') + ? new RegExp(`^${regexPattern.replace(/\\\/\.\*$/, '(\\/.*)?')}$`) + : new RegExp(`^${regexPattern}$`); + return regex.test(normalized); + }); +} + + +/***/ }), + +/***/ 9072: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.applyCommentLimit = applyCommentLimit; +const constants_1 = __nccwpck_require__(8593); +/** + * Applies the max-comments limit: returns the first N findings to publish individually, + * and overflow count + titles for a single "revisar en local" summary comment. + */ +function applyCommentLimit(findings, maxComments = constants_1.BUGBOT_MAX_COMMENTS) { + if (findings.length <= maxComments) { + return { toPublish: findings, overflowCount: 0, overflowTitles: [] }; + } + const toPublish = findings.slice(0, maxComments); + const overflow = findings.slice(maxComments); + return { + toPublish, + overflowCount: overflow.length, + overflowTitles: overflow.map((f) => f.title?.trim() || f.id).filter(Boolean), + }; +} + + +/***/ }), + +/***/ 6319: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.loadBugbotContext = loadBugbotContext; +const issue_repository_1 = __nccwpck_require__(57); +const pull_request_repository_1 = __nccwpck_require__(634); +const marker_1 = __nccwpck_require__(2401); +function buildPreviousFindingsBlock(previousFindings) { + if (previousFindings.length === 0) + return ''; + const items = previousFindings + .map((p) => `---\n**Finding id (use this exact id in resolved_finding_ids if resolved/no longer applies):** \`${p.id.replace(/`/g, '\\`')}\`\n\n**Full comment as posted (including metadata at the end):**\n${p.fullBody}\n`) + .join('\n'); + return ` +**Previously reported issues (not yet marked resolved).** For each one we show the exact comment we posted (title, description, location, suggestion, and a hidden marker with the finding id at the end). + +${items} +**Your task 2:** For each finding above, analyze the current code and decide: +- If the problem **still exists** (same code or same issue present): do **not** include its id in \`resolved_finding_ids\`. +- If the problem **no longer applies** (e.g. that code was removed or refactored away): include its id in \`resolved_finding_ids\`. +- If the problem **has been fixed** (code was changed and the issue is resolved): include its id in \`resolved_finding_ids\`. + +Return in \`resolved_finding_ids\` only the ids from the list above that are now fixed or no longer apply. Use the exact id shown in each "Finding id" line.`; +} +/** + * Loads all context needed for bugbot: existing findings from issue + PR comments, + * open PR numbers, and the prompt block for previously reported issues. + * Also loads PR context (head sha, files, diff lines) for the first open PR. + */ +async function loadBugbotContext(param) { + const issueNumber = param.issueNumber; + const headBranch = param.commit.branch; + const token = param.tokens.token; + const owner = param.owner; + const repo = param.repo; + const issueRepository = new issue_repository_1.IssueRepository(); + const pullRequestRepository = new pull_request_repository_1.PullRequestRepository(); + const issueComments = await issueRepository.listIssueComments(owner, repo, issueNumber, token); + const existingByFindingId = {}; + for (const c of issueComments) { + for (const { findingId, resolved } of (0, marker_1.parseMarker)(c.body)) { + if (!existingByFindingId[findingId]) { + existingByFindingId[findingId] = { issueCommentId: c.id, resolved }; + } + else { + existingByFindingId[findingId].issueCommentId = c.id; + existingByFindingId[findingId].resolved = resolved; + } + } + } + const openPrNumbers = await pullRequestRepository.getOpenPullRequestNumbersByHeadBranch(owner, repo, headBranch, token); + /** Full comment body per finding id (from PR when we don't have issue comment). */ + const prFindingIdToBody = {}; + for (const prNumber of openPrNumbers) { + const prComments = await pullRequestRepository.listPullRequestReviewComments(owner, repo, prNumber, token); + for (const c of prComments) { + const body = c.body ?? ''; + for (const { findingId, resolved } of (0, marker_1.parseMarker)(body)) { + if (!existingByFindingId[findingId]) { + existingByFindingId[findingId] = { resolved }; + } + existingByFindingId[findingId].prCommentId = c.id; + existingByFindingId[findingId].prNumber = prNumber; + existingByFindingId[findingId].resolved = resolved; + prFindingIdToBody[findingId] = body; + } + } + } + /** Unresolved findings with full comment body (including hidden marker) for OpenCode to re-evaluate. */ + const previousFindingsForPrompt = []; + for (const [findingId, data] of Object.entries(existingByFindingId)) { + if (data.resolved) + continue; + const issueBody = issueComments.find((c) => c.id === data.issueCommentId)?.body ?? null; + const fullBody = (issueBody ?? prFindingIdToBody[findingId] ?? '').trim(); + if (fullBody) { + previousFindingsForPrompt.push({ id: findingId, fullBody }); + } + } + const previousFindingsBlock = buildPreviousFindingsBlock(previousFindingsForPrompt); + let prContext = null; + if (openPrNumbers.length > 0) { + const prHeadSha = await pullRequestRepository.getPullRequestHeadSha(owner, repo, openPrNumbers[0], token); + if (prHeadSha) { + const prFiles = await pullRequestRepository.getChangedFiles(owner, repo, openPrNumbers[0], token); + const filesWithLines = await pullRequestRepository.getFilesWithFirstDiffLine(owner, repo, openPrNumbers[0], token); + const pathToFirstDiffLine = {}; + for (const { path, firstLine } of filesWithLines) { + pathToFirstDiffLine[path] = firstLine; + } + prContext = { prHeadSha, prFiles, pathToFirstDiffLine }; + } + } + return { + existingByFindingId, + issueComments, + openPrNumbers, + previousFindingsBlock, + prContext, + }; +} + + +/***/ }), + +/***/ 61: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.markFindingsResolved = markFindingsResolved; +const issue_repository_1 = __nccwpck_require__(57); +const pull_request_repository_1 = __nccwpck_require__(634); +const logger_1 = __nccwpck_require__(8836); +const marker_1 = __nccwpck_require__(2401); +/** + * Marks as resolved the findings that OpenCode reported as fixed. + * Updates issue comments (with visible "Resolved" note) and PR review comments (marker only + resolve thread). + */ +async function markFindingsResolved(param) { + const { execution, context, resolvedFindingIds, normalizedResolvedIds } = param; + const { existingByFindingId, issueComments } = context; + const issueNumber = execution.issueNumber; + const token = execution.tokens.token; + const owner = execution.owner; + const repo = execution.repo; + const issueRepository = new issue_repository_1.IssueRepository(); + const pullRequestRepository = new pull_request_repository_1.PullRequestRepository(); + for (const [findingId, existing] of Object.entries(existingByFindingId)) { + const isResolvedByOpenCode = resolvedFindingIds.has(findingId) || + normalizedResolvedIds.has((0, marker_1.sanitizeFindingIdForMarker)(findingId)); + if (existing.resolved || !isResolvedByOpenCode) + continue; + const resolvedNote = '\n\n---\n**Resolved** (OpenCode confirmed fixed in latest analysis).\n'; + const markerTrue = (0, marker_1.buildMarker)(findingId, true); + const replacementWithNote = resolvedNote + markerTrue; + if (existing.issueCommentId != null) { + const comment = issueComments.find((c) => c.id === existing.issueCommentId); + if (comment == null) { + (0, logger_1.logError)(`[Bugbot] No se encontró el comentario de la issue para marcar como resuelto. findingId="${findingId}", issueCommentId=${existing.issueCommentId}, issueNumber=${issueNumber}, owner=${owner}, repo=${repo}.`); + } + else { + const resolvedBody = comment.body ?? ''; + const { updated, replaced } = (0, marker_1.replaceMarkerInBody)(resolvedBody, findingId, true, replacementWithNote); + if (replaced) { + try { + await issueRepository.updateComment(owner, repo, issueNumber, existing.issueCommentId, updated.trimEnd(), token); + (0, logger_1.logDebugInfo)(`Marked finding "${findingId}" as resolved on issue #${issueNumber} (comment ${existing.issueCommentId}).`); + } + catch (err) { + (0, logger_1.logError)(`[Bugbot] Error al actualizar comentario de la issue (marcar como resuelto). findingId="${findingId}", issueCommentId=${existing.issueCommentId}, issueNumber=${issueNumber}: ${err}`); + } + } + } + } + if (existing.prCommentId != null && existing.prNumber != null) { + const prCommentsList = await pullRequestRepository.listPullRequestReviewComments(owner, repo, existing.prNumber, token); + const prComment = prCommentsList.find((c) => c.id === existing.prCommentId); + if (prComment == null) { + (0, logger_1.logError)(`[Bugbot] No se encontró el comentario de la PR para marcar como resuelto. findingId="${findingId}", prCommentId=${existing.prCommentId}, prNumber=${existing.prNumber}, owner=${owner}, repo=${repo}.`); + } + else { + const prBody = prComment.body ?? ''; + const { updated, replaced } = (0, marker_1.replaceMarkerInBody)(prBody, findingId, true, markerTrue); + if (replaced) { + try { + await pullRequestRepository.updatePullRequestReviewComment(owner, repo, existing.prCommentId, updated.trimEnd(), token); + (0, logger_1.logDebugInfo)(`Marked finding "${findingId}" as resolved on PR #${existing.prNumber} (review comment ${existing.prCommentId}).`); + if (prComment.node_id) { + await pullRequestRepository.resolvePullRequestReviewThread(owner, repo, existing.prNumber, prComment.node_id, token); + } + } + catch (err) { + (0, logger_1.logError)(`[Bugbot] Error al actualizar comentario de revisión de la PR (marcar como resuelto). findingId="${findingId}", prCommentId=${existing.prCommentId}, prNumber=${existing.prNumber}: ${err}`); + } + } + } + } + } +} + + +/***/ }), + +/***/ 2401: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.sanitizeFindingIdForMarker = sanitizeFindingIdForMarker; +exports.buildMarker = buildMarker; +exports.parseMarker = parseMarker; +exports.markerRegexForFinding = markerRegexForFinding; +exports.replaceMarkerInBody = replaceMarkerInBody; +exports.extractTitleFromBody = extractTitleFromBody; +exports.buildCommentBody = buildCommentBody; +const constants_1 = __nccwpck_require__(8593); +const logger_1 = __nccwpck_require__(8836); +/** Sanitize finding ID so it cannot break HTML comment syntax (e.g. -->, , newlines, quotes). */ +function sanitizeFindingIdForMarker(findingId) { + return findingId + .replace(/-->/g, '') + .replace(//g, '') + .replace(/"/g, '') + .replace(/\r\n|\r|\n/g, '') + .trim(); +} +function buildMarker(findingId, resolved) { + const safeId = sanitizeFindingIdForMarker(findingId); + return ``; +} +function parseMarker(body) { + if (!body) + return []; + const results = []; + const regex = new RegExp(``, 'g'); + let m; + while ((m = regex.exec(body)) !== null) { + results.push({ findingId: m[1], resolved: m[2] === 'true' }); + } + return results; +} +/** Regex to match the marker for a specific finding (same flexible format as parseMarker). */ +function markerRegexForFinding(findingId) { + const safeId = sanitizeFindingIdForMarker(findingId); + const escapedId = safeId.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); + return new RegExp(``, 'g'); +} +/** + * Find the marker for this finding in body (using same pattern as parseMarker) and replace it. + * Returns the updated body and whether a replacement was made. Logs an error with details if no replacement occurred. + */ +function replaceMarkerInBody(body, findingId, newResolved, replacement) { + const regex = markerRegexForFinding(findingId); + const newMarker = replacement ?? buildMarker(findingId, newResolved); + const updated = body.replace(regex, newMarker); + const replaced = updated !== body; + if (!replaced) { + (0, logger_1.logError)(`[Bugbot] No se pudo marcar como resuelto: no se encontró el marcador en el comentario. findingId="${findingId}", bodyLength=${body?.length ?? 0}, bodySnippet=${(body ?? '').slice(0, 200)}...`); + } + return { updated, replaced }; +} +/** Extract title from comment body (first ## line) for context when sending to OpenCode. */ +function extractTitleFromBody(body) { + if (!body) + return ''; + const match = body.match(/^##\s+(.+)$/m); + return (match?.[1] ?? '').trim(); +} +function buildCommentBody(finding, resolved) { + const severity = finding.severity ? `**Severity:** ${finding.severity}\n\n` : ''; + const fileLine = finding.file != null + ? `**Location:** \`${finding.file}${finding.line != null ? `:${finding.line}` : ''}\`\n\n` + : ''; + const suggestion = finding.suggestion + ? `**Suggested fix:**\n${finding.suggestion}\n\n` + : ''; + const resolvedNote = resolved ? '\n\n---\n**Resolved** (no longer reported in latest analysis).\n' : ''; + const marker = buildMarker(finding.id, resolved); + return `## ${finding.title} + +${severity}${fileLine}${finding.description} +${suggestion}${resolvedNote}${marker}`; +} /***/ }), -/***/ 6479: +/***/ 1999: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +/** + * Path validation for AI-returned finding.file to prevent path traversal and misuse. + * Rejects paths containing '..', null bytes, or absolute paths. + */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.isSafeFindingFilePath = isSafeFindingFilePath; +exports.isAllowedPathForPr = isAllowedPathForPr; +exports.resolveFindingPathForPr = resolveFindingPathForPr; +const NULL_BYTE = '\0'; +const PARENT_SEGMENT = '..'; +const SLASH = '/'; +const BACKSLASH = '\\'; +/** + * Returns true if the path is safe to use: no '..', no null bytes, not absolute. + * Does not check against a list of allowed files; use isAllowedPathForPr for that. + */ +function isSafeFindingFilePath(path) { + if (path == null || typeof path !== 'string') + return false; + const trimmed = path.trim(); + if (trimmed.length === 0) + return false; + if (trimmed.includes(NULL_BYTE)) + return false; + if (trimmed.includes(PARENT_SEGMENT)) + return false; + if (trimmed.startsWith(SLASH)) + return false; + if (/^[a-zA-Z]:[/\\]/.test(trimmed)) + return false; + if (trimmed.startsWith(BACKSLASH)) + return false; + return true; +} +/** + * Returns true if path is safe (isSafeFindingFilePath) and is in the list of PR changed files. + * Used to validate finding.file before using it for PR review comments. + */ +function isAllowedPathForPr(path, prFiles) { + if (!isSafeFindingFilePath(path)) + return false; + if (prFiles.length === 0) + return false; + const normalized = path.trim(); + return prFiles.some((f) => f.filename === normalized); +} +/** + * Resolves the file path to use for a PR review comment: finding.file if valid and in prFiles. + * Returns undefined when the finding's file is not in the PR so we do not attach the comment + * to the wrong file (e.g. the first file in the list). + */ +function resolveFindingPathForPr(findingFile, prFiles) { + if (prFiles.length === 0) + return undefined; + if (isAllowedPathForPr(findingFile, prFiles)) + return findingFile.trim(); + return undefined; +} + + +/***/ }), + +/***/ 6697: /***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { "use strict"; Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.SingleActionUseCase = void 0; -const result_1 = __nccwpck_require__(7305); +exports.publishFindings = publishFindings; +const issue_repository_1 = __nccwpck_require__(57); +const pull_request_repository_1 = __nccwpck_require__(634); const logger_1 = __nccwpck_require__(8836); -const deployed_action_use_case_1 = __nccwpck_require__(8293); -const publish_github_action_use_case_1 = __nccwpck_require__(9029); -const create_release_use_case_1 = __nccwpck_require__(2430); -const create_tag_use_case_1 = __nccwpck_require__(5279); -const think_use_case_1 = __nccwpck_require__(3841); -const initial_setup_use_case_1 = __nccwpck_require__(3943); -const check_progress_use_case_1 = __nccwpck_require__(7744); -const detect_errors_use_case_1 = __nccwpck_require__(938); -const recommend_steps_use_case_1 = __nccwpck_require__(3538); -class SingleActionUseCase { - constructor() { - this.taskId = 'SingleActionUseCase'; - } - async invoke(param) { - (0, logger_1.logInfo)(`Executing ${this.taskId}.`); - const results = []; - try { - if (!param.singleAction.validSingleAction) { - (0, logger_1.logDebugInfo)(`Not a valid single action: ${param.singleAction.currentSingleAction}`); - return results; - } - if (param.singleAction.isDeployedAction) { - results.push(...await new deployed_action_use_case_1.DeployedActionUseCase().invoke(param)); - } - else if (param.singleAction.isPublishGithubAction) { - results.push(...await new publish_github_action_use_case_1.PublishGithubActionUseCase().invoke(param)); - } - else if (param.singleAction.isCreateReleaseAction) { - results.push(...await new create_release_use_case_1.CreateReleaseUseCase().invoke(param)); - } - else if (param.singleAction.isCreateTagAction) { - results.push(...await new create_tag_use_case_1.CreateTagUseCase().invoke(param)); - } - else if (param.singleAction.isThinkAction) { - results.push(...await new think_use_case_1.ThinkUseCase().invoke(param)); - } - else if (param.singleAction.isInitialSetupAction) { - results.push(...await new initial_setup_use_case_1.InitialSetupUseCase().invoke(param)); - } - else if (param.singleAction.isCheckProgressAction) { - results.push(...await new check_progress_use_case_1.CheckProgressUseCase().invoke(param)); - } - else if (param.singleAction.isDetectErrorsAction) { - results.push(...await new detect_errors_use_case_1.DetectErrorsUseCase().invoke(param)); - } - else if (param.singleAction.isRecommendStepsAction) { - results.push(...await new recommend_steps_use_case_1.RecommendStepsUseCase().invoke(param)); - } +const marker_1 = __nccwpck_require__(2401); +const path_validation_1 = __nccwpck_require__(1999); +/** + * Publishes current findings to issue and PR: creates or updates issue comments, + * creates or updates PR review comments (or creates new ones). + */ +async function publishFindings(param) { + const { execution, context, findings, overflowCount = 0, overflowTitles = [] } = param; + const { existingByFindingId, openPrNumbers, prContext } = context; + const issueNumber = execution.issueNumber; + const token = execution.tokens.token; + const owner = execution.owner; + const repo = execution.repo; + const issueRepository = new issue_repository_1.IssueRepository(); + const pullRequestRepository = new pull_request_repository_1.PullRequestRepository(); + const prFiles = prContext?.prFiles ?? []; + const pathToFirstDiffLine = prContext?.pathToFirstDiffLine ?? {}; + const prCommentsToCreate = []; + for (const finding of findings) { + const existing = existingByFindingId[finding.id]; + const commentBody = (0, marker_1.buildCommentBody)(finding, false); + if (existing?.issueCommentId != null) { + await issueRepository.updateComment(owner, repo, issueNumber, existing.issueCommentId, commentBody, token); + (0, logger_1.logDebugInfo)(`Updated bugbot comment for finding ${finding.id} on issue.`); } - catch (error) { - (0, logger_1.logError)(error); - results.push(new result_1.Result({ - id: this.taskId, - success: false, - executed: true, - steps: [ - `Error executing single action: ${param.singleAction.currentSingleAction}.`, - ], - error: error, - })); + else { + await issueRepository.addComment(owner, repo, issueNumber, commentBody, token); + (0, logger_1.logDebugInfo)(`Added bugbot comment for finding ${finding.id} on issue.`); + } + if (prContext && openPrNumbers.length > 0) { + const path = (0, path_validation_1.resolveFindingPathForPr)(finding.file, prFiles); + if (path) { + const line = finding.line ?? pathToFirstDiffLine[path] ?? 1; + if (existing?.prCommentId != null && existing.prNumber === openPrNumbers[0]) { + await pullRequestRepository.updatePullRequestReviewComment(owner, repo, existing.prCommentId, commentBody, token); + } + else { + prCommentsToCreate.push({ path, line, body: commentBody }); + } + } } - return results; + } + if (prCommentsToCreate.length > 0 && prContext && openPrNumbers.length > 0) { + await pullRequestRepository.createReviewWithComments(owner, repo, openPrNumbers[0], prContext.prHeadSha, prCommentsToCreate, token); + } + if (overflowCount > 0) { + const titlesList = overflowTitles.length > 0 + ? '\n- ' + overflowTitles.slice(0, 15).join('\n- ') + (overflowTitles.length > 15 ? `\n- ... and ${overflowTitles.length - 15} more` : '') + : ''; + const overflowBody = `## More findings (comment limit) + +There are **${overflowCount}** more finding(s) that were not published as individual comments. Review locally or in the full diff to see the list.${titlesList}`; + await issueRepository.addComment(owner, repo, issueNumber, overflowBody, token); + (0, logger_1.logDebugInfo)(`Added overflow comment: ${overflowCount} additional finding(s) not published individually.`); } } -exports.SingleActionUseCase = SingleActionUseCase; + + +/***/ }), + +/***/ 8267: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.BUGBOT_RESPONSE_SCHEMA = void 0; +/** OpenCode response schema: agent computes diff, returns new findings and which previous ones are resolved. */ +exports.BUGBOT_RESPONSE_SCHEMA = { + type: 'object', + properties: { + findings: { + type: 'array', + items: { + type: 'object', + properties: { + id: { type: 'string', description: 'Stable unique id for this finding (e.g. file:line:summary)' }, + title: { type: 'string', description: 'Short title of the problem' }, + description: { type: 'string', description: 'Clear explanation of the issue' }, + file: { type: 'string', description: 'Repository-relative path when applicable' }, + line: { type: 'number', description: 'Line number when applicable' }, + severity: { type: 'string', description: 'Severity: high, medium, low, or info. Findings below the configured minimum are not published.' }, + suggestion: { type: 'string', description: 'Suggested fix when applicable' }, + }, + required: ['id', 'title', 'description'], + additionalProperties: true, + }, + }, + resolved_finding_ids: { + type: 'array', + items: { type: 'string' }, + description: 'Ids of previously reported issues (from the list we sent) that are now fixed in the current code. Only include ids we asked you to check.', + }, + }, + required: ['findings'], + additionalProperties: false, +}; + + +/***/ }), + +/***/ 3109: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.normalizeMinSeverity = normalizeMinSeverity; +exports.severityLevel = severityLevel; +exports.meetsMinSeverity = meetsMinSeverity; +const VALID_SEVERITIES = ['info', 'low', 'medium', 'high']; +/** Normalizes user input to a valid SeverityLevel; defaults to 'low' if invalid. */ +function normalizeMinSeverity(value) { + if (!value) + return 'low'; + const normalized = value.toLowerCase().trim(); + return VALID_SEVERITIES.includes(normalized) ? normalized : 'low'; +} +const SEVERITY_ORDER = { + info: 0, + low: 1, + medium: 2, + high: 3, +}; +function severityLevel(severity) { + if (!severity) + return SEVERITY_ORDER.low; + const normalized = severity.toLowerCase().trim(); + return SEVERITY_ORDER[normalized] ?? SEVERITY_ORDER.low; +} +/** Returns true if the finding's severity is at or above the minimum threshold. */ +function meetsMinSeverity(findingSeverity, minSeverity) { + return severityLevel(findingSeverity) >= SEVERITY_ORDER[minSeverity]; +} /***/ }), @@ -53158,12 +54153,14 @@ class CheckChangesIssueSizeUseCase { (0, logger_1.logInfo)(`Executing ${this.taskId}.`); const result = []; try { - if (param.currentConfiguration.parentBranch === undefined) { - (0, logger_1.logDebugInfo)(`Parent branch is undefined.`); + const baseBranch = param.currentConfiguration.parentBranch ?? + param.branches.development ?? + 'develop'; + if (!baseBranch) { + (0, logger_1.logDebugInfo)(`Parent branch could not be determined.`); return result; } const headBranch = param.commit.branch; - const baseBranch = param.currentConfiguration.parentBranch; const { size, githubSize, reason } = await this.branchRepository.getSizeCategoryAndReason(param.owner, param.repo, headBranch, baseBranch, param.sizeThresholds, param.labels, param.tokens.token); (0, logger_1.logDebugInfo)(`Size: ${size}`); (0, logger_1.logDebugInfo)(`Github Size: ${githubSize}`); @@ -53226,6 +54223,124 @@ class CheckChangesIssueSizeUseCase { exports.CheckChangesIssueSizeUseCase = CheckChangesIssueSizeUseCase; +/***/ }), + +/***/ 7395: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.DetectPotentialProblemsUseCase = void 0; +const result_1 = __nccwpck_require__(7305); +const ai_repository_1 = __nccwpck_require__(8307); +const constants_1 = __nccwpck_require__(8593); +const logger_1 = __nccwpck_require__(8836); +const build_bugbot_prompt_1 = __nccwpck_require__(6339); +const deduplicate_findings_1 = __nccwpck_require__(7384); +const file_ignore_1 = __nccwpck_require__(3770); +const path_validation_1 = __nccwpck_require__(1999); +const limit_comments_1 = __nccwpck_require__(9072); +const load_bugbot_context_use_case_1 = __nccwpck_require__(6319); +const mark_findings_resolved_use_case_1 = __nccwpck_require__(61); +const publish_findings_use_case_1 = __nccwpck_require__(6697); +const schema_1 = __nccwpck_require__(8267); +const severity_1 = __nccwpck_require__(3109); +const marker_1 = __nccwpck_require__(2401); +class DetectPotentialProblemsUseCase { + constructor() { + this.taskId = 'DetectPotentialProblemsUseCase'; + this.aiRepository = new ai_repository_1.AiRepository(); + } + async invoke(param) { + (0, logger_1.logInfo)(`Executing ${this.taskId}.`); + const results = []; + try { + if (!param.ai?.getOpencodeModel() || !param.ai?.getOpencodeServerUrl()) { + (0, logger_1.logDebugInfo)('OpenCode not configured; skipping potential problems detection.'); + return results; + } + if (param.issueNumber === -1) { + (0, logger_1.logDebugInfo)('No issue number for this branch; skipping.'); + return results; + } + const context = await (0, load_bugbot_context_use_case_1.loadBugbotContext)(param); + const prompt = (0, build_bugbot_prompt_1.buildBugbotPrompt)(param, context); + (0, logger_1.logInfo)('Detecting potential problems via OpenCode (agent computes changes and checks resolved)...'); + const response = await this.aiRepository.askAgent(param.ai, ai_repository_1.OPENCODE_AGENT_PLAN, prompt, { + expectJson: true, + schema: schema_1.BUGBOT_RESPONSE_SCHEMA, + schemaName: 'bugbot_findings', + }); + if (response == null || typeof response !== 'object') { + (0, logger_1.logDebugInfo)('No response from OpenCode.'); + return results; + } + const payload = response; + let findings = Array.isArray(payload.findings) ? payload.findings : []; + const resolvedFindingIdsRaw = Array.isArray(payload.resolved_finding_ids) ? payload.resolved_finding_ids : []; + const resolvedFindingIds = new Set(resolvedFindingIdsRaw); + const normalizedResolvedIds = new Set(resolvedFindingIdsRaw.map(marker_1.sanitizeFindingIdForMarker)); + const ignorePatterns = param.ai?.getAiIgnoreFiles?.() ?? []; + const minSeverity = (0, severity_1.normalizeMinSeverity)(param.ai?.getBugbotMinSeverity?.()); + findings = findings.filter((f) => f.file == null || String(f.file).trim() === '' || (0, path_validation_1.isSafeFindingFilePath)(f.file)); + findings = findings.filter((f) => !(0, file_ignore_1.fileMatchesIgnorePatterns)(f.file, ignorePatterns)); + findings = findings.filter((f) => (0, severity_1.meetsMinSeverity)(f.severity, minSeverity)); + findings = (0, deduplicate_findings_1.deduplicateFindings)(findings); + const maxComments = param.ai?.getBugbotCommentLimit?.() ?? constants_1.BUGBOT_MAX_COMMENTS; + const { toPublish, overflowCount, overflowTitles } = (0, limit_comments_1.applyCommentLimit)(findings, maxComments); + if (toPublish.length === 0 && resolvedFindingIds.size === 0) { + (0, logger_1.logDebugInfo)('OpenCode returned no new findings (after filters) and no resolved ids.'); + results.push(new result_1.Result({ + id: this.taskId, + success: true, + executed: true, + steps: ['Potential problems detection completed (no new findings, no resolved).'], + })); + return results; + } + await (0, mark_findings_resolved_use_case_1.markFindingsResolved)({ + execution: param, + context, + resolvedFindingIds, + normalizedResolvedIds, + }); + await (0, publish_findings_use_case_1.publishFindings)({ + execution: param, + context, + findings: toPublish, + overflowCount: overflowCount > 0 ? overflowCount : undefined, + overflowTitles: overflowCount > 0 ? overflowTitles : undefined, + }); + const stepParts = [`${toPublish.length} new/current finding(s) from OpenCode`]; + if (overflowCount > 0) { + stepParts.push(`${overflowCount} more not published (see summary comment)`); + } + if (resolvedFindingIds.size > 0) { + stepParts.push(`${resolvedFindingIds.size} marked as resolved by OpenCode`); + } + results.push(new result_1.Result({ + id: this.taskId, + success: true, + executed: true, + steps: [`Potential problems detection completed. ${stepParts.join('; ')}.`], + })); + } + catch (error) { + (0, logger_1.logError)(`Error in ${this.taskId}: ${error}`); + results.push(new result_1.Result({ + id: this.taskId, + success: false, + executed: true, + errors: [`Error in ${this.taskId}: ${error}`], + })); + } + return results; + } +} +exports.DetectPotentialProblemsUseCase = DetectPotentialProblemsUseCase; + + /***/ }), /***/ 8020: @@ -53869,23 +54984,26 @@ class ThinkUseCase { })); return results; } - if (!param.tokenUser?.trim()) { - (0, logger_1.logInfo)('Bot username (tokenUser) not set; skipping Think response.'); - results.push(new result_1.Result({ - id: this.taskId, - success: true, - executed: false, - })); - return results; - } - if (!commentBody.includes(`@${param.tokenUser}`)) { - (0, logger_1.logInfo)(`Comment does not mention @${param.tokenUser}; skipping.`); - results.push(new result_1.Result({ - id: this.taskId, - success: true, - executed: false, - })); - return results; + const isHelpOrQuestionIssue = param.labels.isQuestion || param.labels.isHelp; + if (!isHelpOrQuestionIssue) { + if (!param.tokenUser?.trim()) { + (0, logger_1.logInfo)('Bot username (tokenUser) not set; skipping Think response.'); + results.push(new result_1.Result({ + id: this.taskId, + success: true, + executed: false, + })); + return results; + } + if (!commentBody.includes(`@${param.tokenUser}`)) { + (0, logger_1.logInfo)(`Comment does not mention @${param.tokenUser}; skipping.`); + results.push(new result_1.Result({ + id: this.taskId, + success: true, + executed: false, + })); + return results; + } } if (!param.ai.getOpencodeModel()?.trim() || !param.ai.getOpencodeServerUrl()?.trim()) { results.push(new result_1.Result({ @@ -53896,7 +55014,9 @@ class ThinkUseCase { })); return results; } - const question = commentBody.replace(new RegExp(`@${param.tokenUser}`, 'gi'), '').trim(); + const question = isHelpOrQuestionIssue + ? commentBody.trim() + : commentBody.replace(new RegExp(`@${param.tokenUser}`, 'gi'), '').trim(); if (!question) { results.push(new result_1.Result({ id: this.taskId, @@ -53917,8 +55037,17 @@ class ThinkUseCase { ? `\n\nContext (issue #${issueNumberForContext} description):\n${issueDescription}\n\n` : '\n\n'; const prompt = `You are a helpful assistant. Answer the following question concisely, using the context below when relevant. Do not include the question in your response.${contextBlock}Question: ${question}`; - const answer = await this.aiRepository.ask(param.ai, prompt); - if (answer === undefined || !answer.trim()) { + const response = await this.aiRepository.askAgent(param.ai, ai_repository_1.OPENCODE_AGENT_PLAN, prompt, { + expectJson: true, + schema: ai_repository_1.THINK_RESPONSE_SCHEMA, + schemaName: 'think_response', + }); + const answer = response != null && + typeof response === 'object' && + typeof response.answer === 'string' + ? response.answer.trim() + : ''; + if (!answer) { (0, logger_1.logError)('OpenCode returned no answer for Think.'); results.push(new result_1.Result({ id: this.taskId, @@ -54505,6 +55634,7 @@ const result_1 = __nccwpck_require__(7305); const branch_repository_1 = __nccwpck_require__(7701); const content_utils_1 = __nccwpck_require__(7873); const logger_1 = __nccwpck_require__(8836); +const move_issue_to_in_progress_1 = __nccwpck_require__(8203); class DeployAddedUseCase { constructor() { this.taskId = 'DeployAddedUseCase'; @@ -54517,6 +55647,7 @@ class DeployAddedUseCase { if (param.issue.labeled && param.issue.labelAdded === param.labels.deploy) { (0, logger_1.logDebugInfo)(`Deploying requested.`); if (param.release.active && param.release.branch !== undefined) { + result.push(...await new move_issue_to_in_progress_1.MoveIssueToInProgressUseCase().invoke(param)); const sanitizedTitle = param.issue.title .replace(/\b\d+(\.\d+){2,}\b/g, '') .replace(/[^\p{L}\p{N}\p{P}\p{Z}^$\n]/gu, '') @@ -54527,13 +55658,12 @@ class DeployAddedUseCase { .replace(/- -/g, '-').trim() .replace(/-+/g, '-') .trim(); - const description = param.issue.body?.match(/### Changelog\n\n([\s\S]*?)(?=\n\n|$)/)?.[1]?.trim() ?? 'No changelog provided'; - const escapedDescription = description.replace(/\n/g, '\\n'); + const changelogBody = (0, content_utils_1.extractChangelogUpToAdditionalContext)(param.issue.body, 'Changelog'); const releaseUrl = `https://github.com/${param.owner}/${param.repo}/tree/${param.release.branch}`; const parameters = { version: param.release.version, title: sanitizedTitle, - changelog: escapedDescription, + changelog: changelogBody, issue: `${param.issue.number}`, }; await this.branchRepository.executeWorkflow(param.owner, param.repo, param.release.branch, param.workflows.release, parameters, param.tokens.token); @@ -54549,6 +55679,7 @@ ${(0, content_utils_1.injectJsonAsMarkdownBlock)('Workflow Parameters', paramete })); } else if (param.hotfix.active && param.hotfix.branch !== undefined) { + result.push(...await new move_issue_to_in_progress_1.MoveIssueToInProgressUseCase().invoke(param)); const sanitizedTitle = param.issue.title .replace(/\b\d+(\.\d+){2,}\b/g, '') .replace(/[^\p{L}\p{N}\p{P}\p{Z}^$\n]/gu, '') @@ -54559,16 +55690,15 @@ ${(0, content_utils_1.injectJsonAsMarkdownBlock)('Workflow Parameters', paramete .replace(/- -/g, '-').trim() .replace(/-+/g, '-') .trim(); - const description = param.issue.body?.match(/### Hotfix Solution\n\n([\s\S]*?)(?=\n\n|$)/)?.[1]?.trim() ?? 'No changelog provided'; - const escapedDescription = description.replace(/\n/g, '\\n'); + const changelogBody = (0, content_utils_1.extractChangelogUpToAdditionalContext)(param.issue.body, 'Hotfix Solution'); const hotfixUrl = `https://github.com/${param.owner}/${param.repo}/tree/${param.hotfix.branch}`; const parameters = { version: param.hotfix.version, title: sanitizedTitle, - changelog: escapedDescription, + changelog: changelogBody, issue: param.issue.number, }; - await this.branchRepository.executeWorkflow(param.owner, param.repo, param.hotfix.branch, param.workflows.release, parameters, param.tokens.token); + await this.branchRepository.executeWorkflow(param.owner, param.repo, param.hotfix.branch, param.workflows.hotfix, parameters, param.tokens.token); result.push(new result_1.Result({ id: this.taskId, success: true, @@ -55426,8 +56556,17 @@ If you'd like this comment to be translated again, please delete the entire comm The text is: ${commentBody} `; - let result = await this.aiRepository.ask(param.ai, prompt); - if (result === "done") { + const checkResponse = await this.aiRepository.askAgent(param.ai, ai_repository_1.OPENCODE_AGENT_PLAN, prompt, { + expectJson: true, + schema: ai_repository_1.LANGUAGE_CHECK_RESPONSE_SCHEMA, + schemaName: 'language_check_response', + }); + const status = checkResponse != null && + typeof checkResponse === 'object' && + typeof checkResponse.status === 'string' + ? checkResponse.status + : ''; + if (status === 'done') { results.push(new result_1.Result({ id: this.taskId, success: true, @@ -55834,9 +56973,7 @@ class SyncSizeAndProgressLabelsFromIssueToPrUseCase { id: this.taskId, success: true, executed: true, - steps: [ - `Size and progress labels copied from issue #${param.issueNumber} to this PR.`, - ], + steps: [], })); } catch (error) { @@ -55942,7 +57079,7 @@ class UpdatePullRequestDescriptionUseCase { id: this.taskId, success: true, executed: true, - steps: [`The description has been updated with AI-generated content (OpenCode Plan agent).`], + steps: [], })); } catch (error) { @@ -56039,8 +57176,17 @@ If you'd like this comment to be translated again, please delete the entire comm The text is: ${commentBody} `; - let result = await this.aiRepository.ask(param.ai, prompt); - if (result === "done") { + const checkResponse = await this.aiRepository.askAgent(param.ai, ai_repository_1.OPENCODE_AGENT_PLAN, prompt, { + expectJson: true, + schema: ai_repository_1.LANGUAGE_CHECK_RESPONSE_SCHEMA, + schemaName: 'language_check_response', + }); + const status = checkResponse != null && + typeof checkResponse === 'object' && + typeof checkResponse.status === 'string' + ? checkResponse.status + : ''; + if (status === 'done') { results.push(new result_1.Result({ id: this.taskId, success: true, @@ -56101,14 +57247,18 @@ exports.CheckPullRequestCommentLanguageUseCase = CheckPullRequestCommentLanguage "use strict"; Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.PROMPTS = exports.ACTIONS = exports.ERRORS = exports.INPUT_KEYS = exports.WORKFLOW_ACTIVE_STATUSES = exports.WORKFLOW_STATUS = exports.DEFAULT_IMAGE_CONFIG = exports.OPENCODE_REQUEST_TIMEOUT_MS = exports.OPENCODE_DEFAULT_MODEL = exports.REPO_URL = exports.TITLE = exports.COMMAND = void 0; +exports.PROMPTS = exports.BUGBOT_MIN_SEVERITY = exports.BUGBOT_MAX_COMMENTS = exports.BUGBOT_MARKER_PREFIX = exports.ACTIONS = exports.ERRORS = exports.INPUT_KEYS = exports.WORKFLOW_ACTIVE_STATUSES = exports.WORKFLOW_STATUS = exports.DEFAULT_IMAGE_CONFIG = exports.OPENCODE_RETRY_DELAY_MS = exports.OPENCODE_MAX_RETRIES = exports.OPENCODE_REQUEST_TIMEOUT_MS = exports.OPENCODE_DEFAULT_MODEL = exports.REPO_URL = exports.TITLE = exports.COMMAND = void 0; exports.COMMAND = 'giik'; exports.TITLE = 'Giik'; exports.REPO_URL = 'https://github.com/landamessenger/git-board-flow'; /** Default OpenCode model: provider/modelID (e.g. opencode/kimi-k2.5-free). Reuse for CLI, action and Ai fallbacks. */ exports.OPENCODE_DEFAULT_MODEL = 'opencode/kimi-k2.5-free'; -/** Timeout in ms for OpenCode HTTP requests (session create, message, diff). Agent calls can be slow with many files. */ -exports.OPENCODE_REQUEST_TIMEOUT_MS = 600000; +/** Timeout in ms for OpenCode HTTP requests (session create, message, diff). Agent calls can be slow (e.g. plan analyzing repo). */ +exports.OPENCODE_REQUEST_TIMEOUT_MS = 900000; +/** Max attempts for OpenCode requests (retries on failure). Applied transparently in AiRepository. */ +exports.OPENCODE_MAX_RETRIES = 5; +/** Delay in ms between OpenCode retry attempts. */ +exports.OPENCODE_RETRY_DELAY_MS = 2000; exports.DEFAULT_IMAGE_CONFIG = { issue: { automatic: [ @@ -56315,6 +57465,8 @@ exports.INPUT_KEYS = { AI_MEMBERS_ONLY: 'ai-members-only', AI_IGNORE_FILES: 'ai-ignore-files', AI_INCLUDE_REASONING: 'ai-include-reasoning', + BUGBOT_SEVERITY: 'bugbot-severity', + BUGBOT_COMMENT_LIMIT: 'bugbot-comment-limit', // Projects PROJECT_IDS: 'project-ids', PROJECT_COLUMN_ISSUE_CREATED: 'project-column-issue-created', @@ -56459,9 +57611,15 @@ exports.ACTIONS = { THINK: 'think_action', INITIAL_SETUP: 'initial_setup', CHECK_PROGRESS: 'check_progress_action', - DETECT_ERRORS: 'detect_errors_action', + DETECT_POTENTIAL_PROBLEMS: 'detect_potential_problems_action', RECOMMEND_STEPS: 'recommend_steps_action', }; +/** Hidden HTML comment prefix for bugbot findings (issue/PR comments). Format: */ +exports.BUGBOT_MARKER_PREFIX = 'gbf-bugbot'; +/** Max number of individual bugbot comments to create per issue/PR. Excess findings get one summary comment suggesting to review locally. */ +exports.BUGBOT_MAX_COMMENTS = 20; +/** Minimum severity to publish (findings below this are dropped). Order: high > medium > low > info. */ +exports.BUGBOT_MIN_SEVERITY = 'low'; exports.PROMPTS = {}; @@ -56473,7 +57631,7 @@ exports.PROMPTS = {}; "use strict"; Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.injectJsonAsMarkdownBlock = exports.extractReleaseType = exports.extractVersion = void 0; +exports.injectJsonAsMarkdownBlock = exports.extractChangelogUpToAdditionalContext = exports.extractReleaseType = exports.extractVersion = void 0; const extractVersion = (pattern, text) => { const versionPattern = new RegExp(`###\\s*${pattern}\\s+(\\d+\\.\\d+\\.\\d+)`, 'i'); const match = text.match(versionPattern); @@ -56486,6 +57644,22 @@ const extractReleaseType = (pattern, text) => { return match ? match[1] : undefined; }; exports.extractReleaseType = extractReleaseType; +/** + * Extracts changelog content from an issue body: from the given section heading (e.g. "Changelog" or "Hotfix Solution") + * up to but not including the "Additional Context" section. Used for release/hotfix deployment bodies. + */ +const extractChangelogUpToAdditionalContext = (body, sectionTitle) => { + if (body == null || body === '') { + return 'No changelog provided'; + } + const escaped = sectionTitle.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); + const pattern = new RegExp(`(?:###|##)\\s*${escaped}\\s*\\n\\n([\\s\\S]*?)` + + `(?=\\n(?:###|##)\\s*Additional Context\\s*|$)`, 'i'); + const match = body.match(pattern); + const content = match?.[1]?.trim(); + return content ?? 'No changelog provided'; +}; +exports.extractChangelogUpToAdditionalContext = extractChangelogUpToAdditionalContext; const injectJsonAsMarkdownBlock = (title, json) => { const formattedJson = JSON.stringify(json, null, 4) // Pretty-print the JSON with 4 spaces. .split('\n') // Split into lines. diff --git a/build/cli/src/data/model/ai.d.ts b/build/cli/src/data/model/ai.d.ts index d6ea5394..d45b1069 100644 --- a/build/cli/src/data/model/ai.d.ts +++ b/build/cli/src/data/model/ai.d.ts @@ -10,13 +10,17 @@ export declare class Ai { private aiMembersOnly; private aiIgnoreFiles; private aiIncludeReasoning; - constructor(opencodeServerUrl: string, opencodeModel: string, aiPullRequestDescription: boolean, aiMembersOnly: boolean, aiIgnoreFiles: string[], aiIncludeReasoning: boolean); + private bugbotMinSeverity; + private bugbotCommentLimit; + constructor(opencodeServerUrl: string, opencodeModel: string, aiPullRequestDescription: boolean, aiMembersOnly: boolean, aiIgnoreFiles: string[], aiIncludeReasoning: boolean, bugbotMinSeverity: string, bugbotCommentLimit: number); getOpencodeServerUrl(): string; getOpencodeModel(): string; getAiPullRequestDescription(): boolean; getAiMembersOnly(): boolean; getAiIgnoreFiles(): string[]; getAiIncludeReasoning(): boolean; + getBugbotMinSeverity(): string; + getBugbotCommentLimit(): number; /** * Parse "provider/model-id" into { providerID, modelID } for OpenCode session.prompt. * Uses OPENCODE_DEFAULT_MODEL when no model is set (e.g. opencode/kimi-k2.5-free). diff --git a/build/cli/src/data/model/single_action.d.ts b/build/cli/src/data/model/single_action.d.ts index 6ecdd44b..5ec86825 100644 --- a/build/cli/src/data/model/single_action.d.ts +++ b/build/cli/src/data/model/single_action.d.ts @@ -26,7 +26,7 @@ export declare class SingleAction { get isThinkAction(): boolean; get isInitialSetupAction(): boolean; get isCheckProgressAction(): boolean; - get isDetectErrorsAction(): boolean; + get isDetectPotentialProblemsAction(): boolean; get isRecommendStepsAction(): boolean; get enabledSingleAction(): boolean; get validSingleAction(): boolean; diff --git a/build/cli/src/data/repository/__tests__/ai_repository.test.d.ts b/build/cli/src/data/repository/__tests__/ai_repository.test.d.ts index 5c108792..9b53426a 100644 --- a/build/cli/src/data/repository/__tests__/ai_repository.test.d.ts +++ b/build/cli/src/data/repository/__tests__/ai_repository.test.d.ts @@ -1,6 +1,7 @@ /** * Integration-style tests for AiRepository with mocked fetch. * Covers edge cases for the OpenCode-based architecture: missing config, - * session/message failures, empty/invalid responses, JSON parsing, reasoning, getSessionDiff. + * session/message failures, empty/invalid responses, JSON parsing, reasoning, getSessionDiff, + * and retry behavior (OPENCODE_MAX_RETRIES). */ export {}; diff --git a/build/cli/src/data/repository/ai_repository.d.ts b/build/cli/src/data/repository/ai_repository.d.ts index e2bb1067..e9c48bd4 100644 --- a/build/cli/src/data/repository/ai_repository.d.ts +++ b/build/cli/src/data/repository/ai_repository.d.ts @@ -19,6 +19,31 @@ export declare const TRANSLATION_RESPONSE_SCHEMA: { readonly required: readonly ["translatedText"]; readonly additionalProperties: false; }; +/** JSON schema for Think (Q&A) responses: single answer field. */ +export declare const THINK_RESPONSE_SCHEMA: { + readonly type: "object"; + readonly properties: { + readonly answer: { + readonly type: "string"; + readonly description: "The concise answer to the user question. Required."; + }; + }; + readonly required: readonly ["answer"]; + readonly additionalProperties: false; +}; +/** JSON schema for language check: done (already in locale) or must_translate. */ +export declare const LANGUAGE_CHECK_RESPONSE_SCHEMA: { + readonly type: "object"; + readonly properties: { + readonly status: { + readonly type: "string"; + readonly enum: readonly ["done", "must_translate"]; + readonly description: "done if text is in the requested locale, must_translate otherwise."; + }; + }; + readonly required: readonly ["status"]; + readonly additionalProperties: false; +}; export interface AskAgentOptions { /** Request JSON response and parse it. If schema provided, include it in the prompt. */ expectJson?: boolean; @@ -37,22 +62,18 @@ export interface OpenCodeFileDiff { /** * Get the diff for an OpenCode session (files changed by the agent). * Call after opencodeMessageWithAgent when using the "build" agent so the user can see what was edited. + * Wrapped with retries (OPENCODE_MAX_RETRIES). */ export declare function getSessionDiff(baseUrl: string, sessionId: string): Promise; export declare class AiRepository { - ask: (ai: Ai, prompt: string) => Promise; /** - * Ask an OpenCode agent (e.g. Plan) to perform a task. The server runs the full agent loop. - * Returns the final message (including reasoning in parts when includeReasoning is true). - * @param ai - AI config (server URL, model) - * @param agentId - OpenCode agent id (e.g. OPENCODE_AGENT_PLAN) - * @param prompt - User prompt - * @param options - expectJson, schema, includeReasoning - * @returns Response text, or parsed JSON when expectJson is true + * Ask an OpenCode agent (e.g. Plan) to perform a task. All calls use strict response (expectJson + schema). + * Single retry system: HTTP failures and parse failures both retry up to OPENCODE_MAX_RETRIES. */ askAgent: (ai: Ai, agentId: string, prompt: string, options?: AskAgentOptions) => Promise | undefined>; /** * Run the OpenCode "build" agent for the copilot command. Returns the final message and sessionId. + * Uses the same retry system (OPENCODE_MAX_RETRIES). */ copilotMessage: (ai: Ai, prompt: string) => Promise<{ text: string; diff --git a/build/cli/src/data/repository/branch_repository.d.ts b/build/cli/src/data/repository/branch_repository.d.ts index f65ea00a..e8965846 100644 --- a/build/cli/src/data/repository/branch_repository.d.ts +++ b/build/cli/src/data/repository/branch_repository.d.ts @@ -33,7 +33,7 @@ export declare class BranchRepository { totalCommits: number; files: { filename: string; - status: "added" | "removed" | "modified" | "renamed" | "copied" | "changed" | "unchanged"; + status: "modified" | "added" | "removed" | "renamed" | "copied" | "changed" | "unchanged"; additions: number; deletions: number; changes: number; diff --git a/build/cli/src/data/repository/issue_repository.d.ts b/build/cli/src/data/repository/issue_repository.d.ts index 85aa697f..dbd004e0 100644 --- a/build/cli/src/data/repository/issue_repository.d.ts +++ b/build/cli/src/data/repository/issue_repository.d.ts @@ -39,6 +39,17 @@ export declare class IssueRepository { getHeadBranch: (owner: string, repository: string, issueNumber: number, token: string) => Promise; addComment: (owner: string, repository: string, issueNumber: number, comment: string, token: string) => Promise; updateComment: (owner: string, repository: string, issueNumber: number, commentId: number, comment: string, token: string) => Promise; + /** + * Lists all comments on an issue (for bugbot: find existing findings by marker). + * Uses pagination to fetch every comment (default API returns only 30 per page). + */ + listIssueComments: (owner: string, repository: string, issueNumber: number, token: string) => Promise>; closeIssue: (owner: string, repository: string, issueNumber: number, token: string) => Promise; openIssue: (owner: string, repository: string, issueNumber: number, token: string) => Promise; getCurrentAssignees: (owner: string, repository: string, issueNumber: number, token: string) => Promise; diff --git a/build/cli/src/data/repository/pull_request_repository.d.ts b/build/cli/src/data/repository/pull_request_repository.d.ts index 4118cf6c..228713db 100644 --- a/build/cli/src/data/repository/pull_request_repository.d.ts +++ b/build/cli/src/data/repository/pull_request_repository.d.ts @@ -7,12 +7,26 @@ export declare class PullRequestRepository { isLinked: (pullRequestUrl: string) => Promise; updateBaseBranch: (owner: string, repository: string, pullRequestNumber: number, branch: string, token: string) => Promise; updateDescription: (owner: string, repository: string, pullRequestNumber: number, description: string, token: string) => Promise; + /** + * Returns all users involved in review: requested (pending) + those who already submitted a review. + * Used to avoid re-requesting someone who already reviewed when ensuring desired reviewer count. + */ getCurrentReviewers: (owner: string, repository: string, pullNumber: number, token: string) => Promise; addReviewersToPullRequest: (owner: string, repository: string, pullNumber: number, reviewers: string[], token: string) => Promise; getChangedFiles: (owner: string, repository: string, pullNumber: number, token: string) => Promise<{ filename: string; status: string; }[]>; + /** First line (right side) of the first hunk per file, for valid review comment placement. */ + private static firstLineFromPatch; + /** + * Returns for each changed file the first line number that appears in the diff (right side). + * Used so review comments use a line that GitHub can resolve (avoids "line could not be resolved"). + */ + getFilesWithFirstDiffLine: (owner: string, repository: string, pullNumber: number, token: string) => Promise>; getPullRequestChanges: (owner: string, repository: string, pullNumber: number, token: string) => Promise>; + /** Head commit SHA of the PR (for creating review). */ + getPullRequestHeadSha: (owner: string, repository: string, pullNumber: number, token: string) => Promise; + /** + * List all review comments on a PR (for bugbot: find existing findings by marker). + * Uses pagination to fetch every comment (default API returns only 30 per page). + * Includes node_id for GraphQL (e.g. resolve review thread). + */ + listPullRequestReviewComments: (owner: string, repository: string, pullNumber: number, token: string) => Promise>; + /** + * Resolve a PR review thread (GraphQL only). Finds the thread that contains the given comment and marks it resolved. + * Uses repository.pullRequest.reviewThreads because the field pullRequestReviewThread on PullRequestReviewComment was removed from the API. + * Paginates through all threads and all comments in each thread so the comment is found regardless of PR size. + * No-op if thread is already resolved. Logs and does not throw on error. + */ + resolvePullRequestReviewThread: (owner: string, repository: string, pullNumber: number, commentNodeId: string, token: string) => Promise; + /** + * Create a review on the PR with one or more inline comments (bugbot findings). + * Each comment requires path and line (use first file and line 1 if not specified). + */ + createReviewWithComments: (owner: string, repository: string, pullNumber: number, commitId: string, comments: Array<{ + path: string; + line: number; + body: string; + }>, token: string) => Promise; + /** Update an existing PR review comment (e.g. to mark finding as resolved in body). */ + updatePullRequestReviewComment: (owner: string, repository: string, commentId: number, body: string, token: string) => Promise; } diff --git a/build/cli/src/usecase/actions/__tests__/check_progress_use_case.test.d.ts b/build/cli/src/usecase/actions/__tests__/check_progress_use_case.test.d.ts index 01edf0e5..812db253 100644 --- a/build/cli/src/usecase/actions/__tests__/check_progress_use_case.test.d.ts +++ b/build/cli/src/usecase/actions/__tests__/check_progress_use_case.test.d.ts @@ -1,6 +1,6 @@ /** * Integration-style tests for CheckProgressUseCase with the OpenCode-based flow. * Covers edge cases: missing AI config, no issue/branch/description, AI returns undefined/invalid - * progress, retries when progress 0%, success path with label updates. + * progress, progress 0% (single call; HTTP retries are in AiRepository), success path with label updates. */ export {}; diff --git a/build/cli/src/usecase/actions/__tests__/deployed_action_use_case.test.d.ts b/build/cli/src/usecase/actions/__tests__/deployed_action_use_case.test.d.ts new file mode 100644 index 00000000..cb0ff5c3 --- /dev/null +++ b/build/cli/src/usecase/actions/__tests__/deployed_action_use_case.test.d.ts @@ -0,0 +1 @@ +export {}; diff --git a/build/cli/src/usecase/actions/check_progress_use_case.d.ts b/build/cli/src/usecase/actions/check_progress_use_case.d.ts index f3d3f26a..2c536409 100644 --- a/build/cli/src/usecase/actions/check_progress_use_case.d.ts +++ b/build/cli/src/usecase/actions/check_progress_use_case.d.ts @@ -10,7 +10,7 @@ export declare class CheckProgressUseCase implements ParamUseCase; /** * Calls the OpenCode agent once and returns parsed progress, summary, and reasoning. - * Used inside the retry loop when progress is 0%. + * HTTP-level retries are handled by AiRepository (OPENCODE_MAX_RETRIES). */ private fetchProgressAttempt; /** diff --git a/build/cli/src/usecase/steps/commit/__tests__/check_changes_issue_size_use_case.test.d.ts b/build/cli/src/usecase/steps/commit/__tests__/check_changes_issue_size_use_case.test.d.ts new file mode 100644 index 00000000..cb0ff5c3 --- /dev/null +++ b/build/cli/src/usecase/steps/commit/__tests__/check_changes_issue_size_use_case.test.d.ts @@ -0,0 +1 @@ +export {}; diff --git a/build/cli/src/usecase/steps/commit/__tests__/detect_potential_problems_use_case.test.d.ts b/build/cli/src/usecase/steps/commit/__tests__/detect_potential_problems_use_case.test.d.ts new file mode 100644 index 00000000..a68dd59d --- /dev/null +++ b/build/cli/src/usecase/steps/commit/__tests__/detect_potential_problems_use_case.test.d.ts @@ -0,0 +1,6 @@ +/** + * Unit tests for DetectPotentialProblemsUseCase (bugbot on push). + * Covers: skip when OpenCode/issue missing, prompt with/without previous findings, + * new findings (add/update issue and PR comments), resolved_finding_ids, errors. + */ +export {}; diff --git a/build/cli/src/usecase/steps/commit/bugbot/__tests__/deduplicate_findings.test.d.ts b/build/cli/src/usecase/steps/commit/bugbot/__tests__/deduplicate_findings.test.d.ts new file mode 100644 index 00000000..fd8207cb --- /dev/null +++ b/build/cli/src/usecase/steps/commit/bugbot/__tests__/deduplicate_findings.test.d.ts @@ -0,0 +1,4 @@ +/** + * Unit tests for deduplicateFindings: dedupe by (file, line) or by title when no location. + */ +export {}; diff --git a/build/cli/src/usecase/steps/commit/bugbot/__tests__/file_ignore.test.d.ts b/build/cli/src/usecase/steps/commit/bugbot/__tests__/file_ignore.test.d.ts new file mode 100644 index 00000000..e8076137 --- /dev/null +++ b/build/cli/src/usecase/steps/commit/bugbot/__tests__/file_ignore.test.d.ts @@ -0,0 +1,4 @@ +/** + * Unit tests for file_ignore: fileMatchesIgnorePatterns (glob-style path matching). + */ +export {}; diff --git a/build/cli/src/usecase/steps/commit/bugbot/__tests__/limit_comments.test.d.ts b/build/cli/src/usecase/steps/commit/bugbot/__tests__/limit_comments.test.d.ts new file mode 100644 index 00000000..8bead7b4 --- /dev/null +++ b/build/cli/src/usecase/steps/commit/bugbot/__tests__/limit_comments.test.d.ts @@ -0,0 +1,4 @@ +/** + * Unit tests for applyCommentLimit: max comments and overflow titles. + */ +export {}; diff --git a/build/cli/src/usecase/steps/commit/bugbot/__tests__/path_validation.test.d.ts b/build/cli/src/usecase/steps/commit/bugbot/__tests__/path_validation.test.d.ts new file mode 100644 index 00000000..cb0ff5c3 --- /dev/null +++ b/build/cli/src/usecase/steps/commit/bugbot/__tests__/path_validation.test.d.ts @@ -0,0 +1 @@ +export {}; diff --git a/build/cli/src/usecase/steps/commit/bugbot/__tests__/severity.test.d.ts b/build/cli/src/usecase/steps/commit/bugbot/__tests__/severity.test.d.ts new file mode 100644 index 00000000..12b0c054 --- /dev/null +++ b/build/cli/src/usecase/steps/commit/bugbot/__tests__/severity.test.d.ts @@ -0,0 +1,4 @@ +/** + * Unit tests for bugbot severity helpers: normalizeMinSeverity, severityLevel, meetsMinSeverity. + */ +export {}; diff --git a/build/cli/src/usecase/steps/commit/bugbot/build_bugbot_prompt.d.ts b/build/cli/src/usecase/steps/commit/bugbot/build_bugbot_prompt.d.ts new file mode 100644 index 00000000..9c6bc28c --- /dev/null +++ b/build/cli/src/usecase/steps/commit/bugbot/build_bugbot_prompt.d.ts @@ -0,0 +1,3 @@ +import type { Execution } from "../../../../data/model/execution"; +import type { BugbotContext } from "./types"; +export declare function buildBugbotPrompt(param: Execution, context: BugbotContext): string; diff --git a/build/cli/src/usecase/steps/commit/bugbot/deduplicate_findings.d.ts b/build/cli/src/usecase/steps/commit/bugbot/deduplicate_findings.d.ts new file mode 100644 index 00000000..856de1f9 --- /dev/null +++ b/build/cli/src/usecase/steps/commit/bugbot/deduplicate_findings.d.ts @@ -0,0 +1,7 @@ +import type { BugbotFinding } from "./types"; +/** + * Deduplicates findings by (file, line). When two findings share the same file and line, + * keeps the first; when they have no file, groups by normalized title and keeps the first. + * This reduces noise when the agent returns near-duplicate issues. + */ +export declare function deduplicateFindings(findings: BugbotFinding[]): BugbotFinding[]; diff --git a/build/cli/src/usecase/steps/commit/bugbot/file_ignore.d.ts b/build/cli/src/usecase/steps/commit/bugbot/file_ignore.d.ts new file mode 100644 index 00000000..f32bd91d --- /dev/null +++ b/build/cli/src/usecase/steps/commit/bugbot/file_ignore.d.ts @@ -0,0 +1,5 @@ +/** + * Returns true if the file path matches any of the ignore patterns (glob-style). + * Used to exclude findings in test files, build output, etc. + */ +export declare function fileMatchesIgnorePatterns(filePath: string | undefined, ignorePatterns: string[]): boolean; diff --git a/build/cli/src/usecase/steps/commit/bugbot/limit_comments.d.ts b/build/cli/src/usecase/steps/commit/bugbot/limit_comments.d.ts new file mode 100644 index 00000000..006c7a05 --- /dev/null +++ b/build/cli/src/usecase/steps/commit/bugbot/limit_comments.d.ts @@ -0,0 +1,14 @@ +import type { BugbotFinding } from "./types"; +export interface ApplyLimitResult { + /** Findings to publish as individual comments (up to maxComments). */ + toPublish: BugbotFinding[]; + /** Number of findings not published as individual comments. */ + overflowCount: number; + /** Titles of overflow findings (for the summary comment). */ + overflowTitles: string[]; +} +/** + * Applies the max-comments limit: returns the first N findings to publish individually, + * and overflow count + titles for a single "revisar en local" summary comment. + */ +export declare function applyCommentLimit(findings: BugbotFinding[], maxComments?: number): ApplyLimitResult; diff --git a/build/cli/src/usecase/steps/commit/bugbot/load_bugbot_context_use_case.d.ts b/build/cli/src/usecase/steps/commit/bugbot/load_bugbot_context_use_case.d.ts new file mode 100644 index 00000000..361f5940 --- /dev/null +++ b/build/cli/src/usecase/steps/commit/bugbot/load_bugbot_context_use_case.d.ts @@ -0,0 +1,8 @@ +import type { Execution } from "../../../../data/model/execution"; +import type { BugbotContext } from "./types"; +/** + * Loads all context needed for bugbot: existing findings from issue + PR comments, + * open PR numbers, and the prompt block for previously reported issues. + * Also loads PR context (head sha, files, diff lines) for the first open PR. + */ +export declare function loadBugbotContext(param: Execution): Promise; diff --git a/build/cli/src/usecase/steps/commit/bugbot/mark_findings_resolved_use_case.d.ts b/build/cli/src/usecase/steps/commit/bugbot/mark_findings_resolved_use_case.d.ts new file mode 100644 index 00000000..93448758 --- /dev/null +++ b/build/cli/src/usecase/steps/commit/bugbot/mark_findings_resolved_use_case.d.ts @@ -0,0 +1,13 @@ +import type { Execution } from "../../../../data/model/execution"; +import type { BugbotContext } from "./types"; +export interface MarkFindingsResolvedParam { + execution: Execution; + context: BugbotContext; + resolvedFindingIds: Set; + normalizedResolvedIds: Set; +} +/** + * Marks as resolved the findings that OpenCode reported as fixed. + * Updates issue comments (with visible "Resolved" note) and PR review comments (marker only + resolve thread). + */ +export declare function markFindingsResolved(param: MarkFindingsResolvedParam): Promise; diff --git a/build/cli/src/usecase/steps/commit/bugbot/marker.d.ts b/build/cli/src/usecase/steps/commit/bugbot/marker.d.ts new file mode 100644 index 00000000..316074ba --- /dev/null +++ b/build/cli/src/usecase/steps/commit/bugbot/marker.d.ts @@ -0,0 +1,21 @@ +import type { BugbotFinding } from "./types"; +/** Sanitize finding ID so it cannot break HTML comment syntax (e.g. -->, , newlines, quotes). */ +export declare function sanitizeFindingIdForMarker(findingId: string): string; +export declare function buildMarker(findingId: string, resolved: boolean): string; +export declare function parseMarker(body: string | null): Array<{ + findingId: string; + resolved: boolean; +}>; +/** Regex to match the marker for a specific finding (same flexible format as parseMarker). */ +export declare function markerRegexForFinding(findingId: string): RegExp; +/** + * Find the marker for this finding in body (using same pattern as parseMarker) and replace it. + * Returns the updated body and whether a replacement was made. Logs an error with details if no replacement occurred. + */ +export declare function replaceMarkerInBody(body: string, findingId: string, newResolved: boolean, replacement?: string): { + updated: string; + replaced: boolean; +}; +/** Extract title from comment body (first ## line) for context when sending to OpenCode. */ +export declare function extractTitleFromBody(body: string | null): string; +export declare function buildCommentBody(finding: BugbotFinding, resolved: boolean): string; diff --git a/build/cli/src/usecase/steps/commit/bugbot/path_validation.d.ts b/build/cli/src/usecase/steps/commit/bugbot/path_validation.d.ts new file mode 100644 index 00000000..ca19c32e --- /dev/null +++ b/build/cli/src/usecase/steps/commit/bugbot/path_validation.d.ts @@ -0,0 +1,25 @@ +/** + * Path validation for AI-returned finding.file to prevent path traversal and misuse. + * Rejects paths containing '..', null bytes, or absolute paths. + */ +/** + * Returns true if the path is safe to use: no '..', no null bytes, not absolute. + * Does not check against a list of allowed files; use isAllowedPathForPr for that. + */ +export declare function isSafeFindingFilePath(path: string | undefined): boolean; +/** + * Returns true if path is safe (isSafeFindingFilePath) and is in the list of PR changed files. + * Used to validate finding.file before using it for PR review comments. + */ +export declare function isAllowedPathForPr(path: string | undefined, prFiles: Array<{ + filename: string; +}>): boolean; +/** + * Resolves the file path to use for a PR review comment: finding.file if valid and in prFiles. + * Returns undefined when the finding's file is not in the PR so we do not attach the comment + * to the wrong file (e.g. the first file in the list). + */ +export declare function resolveFindingPathForPr(findingFile: string | undefined, prFiles: Array<{ + filename: string; + status: string; +}>): string | undefined; diff --git a/build/cli/src/usecase/steps/commit/bugbot/publish_findings_use_case.d.ts b/build/cli/src/usecase/steps/commit/bugbot/publish_findings_use_case.d.ts new file mode 100644 index 00000000..e9270fbb --- /dev/null +++ b/build/cli/src/usecase/steps/commit/bugbot/publish_findings_use_case.d.ts @@ -0,0 +1,16 @@ +import type { Execution } from "../../../../data/model/execution"; +import type { BugbotContext } from "./types"; +import type { BugbotFinding } from "./types"; +export interface PublishFindingsParam { + execution: Execution; + context: BugbotContext; + findings: BugbotFinding[]; + /** When findings were limited by max comments, add one summary comment with this overflow info. */ + overflowCount?: number; + overflowTitles?: string[]; +} +/** + * Publishes current findings to issue and PR: creates or updates issue comments, + * creates or updates PR review comments (or creates new ones). + */ +export declare function publishFindings(param: PublishFindingsParam): Promise; diff --git a/build/cli/src/usecase/steps/commit/bugbot/schema.d.ts b/build/cli/src/usecase/steps/commit/bugbot/schema.d.ts new file mode 100644 index 00000000..5a66ca5e --- /dev/null +++ b/build/cli/src/usecase/steps/commit/bugbot/schema.d.ts @@ -0,0 +1,53 @@ +/** OpenCode response schema: agent computes diff, returns new findings and which previous ones are resolved. */ +export declare const BUGBOT_RESPONSE_SCHEMA: { + readonly type: "object"; + readonly properties: { + readonly findings: { + readonly type: "array"; + readonly items: { + readonly type: "object"; + readonly properties: { + readonly id: { + readonly type: "string"; + readonly description: "Stable unique id for this finding (e.g. file:line:summary)"; + }; + readonly title: { + readonly type: "string"; + readonly description: "Short title of the problem"; + }; + readonly description: { + readonly type: "string"; + readonly description: "Clear explanation of the issue"; + }; + readonly file: { + readonly type: "string"; + readonly description: "Repository-relative path when applicable"; + }; + readonly line: { + readonly type: "number"; + readonly description: "Line number when applicable"; + }; + readonly severity: { + readonly type: "string"; + readonly description: "Severity: high, medium, low, or info. Findings below the configured minimum are not published."; + }; + readonly suggestion: { + readonly type: "string"; + readonly description: "Suggested fix when applicable"; + }; + }; + readonly required: readonly ["id", "title", "description"]; + readonly additionalProperties: true; + }; + }; + readonly resolved_finding_ids: { + readonly type: "array"; + readonly items: { + readonly type: "string"; + }; + readonly description: "Ids of previously reported issues (from the list we sent) that are now fixed in the current code. Only include ids we asked you to check."; + }; + }; + readonly required: readonly ["findings"]; + readonly additionalProperties: false; +}; diff --git a/build/cli/src/usecase/steps/commit/bugbot/severity.d.ts b/build/cli/src/usecase/steps/commit/bugbot/severity.d.ts new file mode 100644 index 00000000..ae6635cc --- /dev/null +++ b/build/cli/src/usecase/steps/commit/bugbot/severity.d.ts @@ -0,0 +1,6 @@ +export type SeverityLevel = 'info' | 'low' | 'medium' | 'high'; +/** Normalizes user input to a valid SeverityLevel; defaults to 'low' if invalid. */ +export declare function normalizeMinSeverity(value: string | undefined): SeverityLevel; +export declare function severityLevel(severity: string | undefined): number; +/** Returns true if the finding's severity is at or above the minimum threshold. */ +export declare function meetsMinSeverity(findingSeverity: string | undefined, minSeverity: SeverityLevel): boolean; diff --git a/build/cli/src/usecase/steps/commit/bugbot/types.d.ts b/build/cli/src/usecase/steps/commit/bugbot/types.d.ts new file mode 100644 index 00000000..79e3ce79 --- /dev/null +++ b/build/cli/src/usecase/steps/commit/bugbot/types.d.ts @@ -0,0 +1,35 @@ +/** Single finding from OpenCode (agent computes changes and returns these). */ +export interface BugbotFinding { + id: string; + title: string; + description: string; + file?: string; + line?: number; + severity?: string; + suggestion?: string; +} +export interface ExistingFindingInfo { + issueCommentId?: number; + prCommentId?: number; + prNumber?: number; + resolved: boolean; +} +export type ExistingByFindingId = Record; +export interface BugbotPrContext { + prHeadSha: string; + prFiles: Array<{ + filename: string; + status: string; + }>; + pathToFirstDiffLine: Record; +} +export interface BugbotContext { + existingByFindingId: ExistingByFindingId; + issueComments: Array<{ + id: number; + body: string | null; + }>; + openPrNumbers: number[]; + previousFindingsBlock: string; + prContext: BugbotPrContext | null; +} diff --git a/build/cli/src/usecase/steps/commit/detect_potential_problems_use_case.d.ts b/build/cli/src/usecase/steps/commit/detect_potential_problems_use_case.d.ts new file mode 100644 index 00000000..4c622b26 --- /dev/null +++ b/build/cli/src/usecase/steps/commit/detect_potential_problems_use_case.d.ts @@ -0,0 +1,9 @@ +import { Execution } from "../../../data/model/execution"; +import { Result } from "../../../data/model/result"; +import { ParamUseCase } from "../../base/param_usecase"; +export type { BugbotFinding } from "./bugbot/types"; +export declare class DetectPotentialProblemsUseCase implements ParamUseCase { + taskId: string; + private aiRepository; + invoke(param: Execution): Promise; +} diff --git a/build/cli/src/usecase/steps/common/__tests__/publish_resume_use_case.test.d.ts b/build/cli/src/usecase/steps/common/__tests__/publish_resume_use_case.test.d.ts new file mode 100644 index 00000000..cb0ff5c3 --- /dev/null +++ b/build/cli/src/usecase/steps/common/__tests__/publish_resume_use_case.test.d.ts @@ -0,0 +1 @@ +export {}; diff --git a/build/cli/src/usecase/steps/issue/__tests__/assign_reviewers_to_issue_use_case.test.d.ts b/build/cli/src/usecase/steps/issue/__tests__/assign_reviewers_to_issue_use_case.test.d.ts new file mode 100644 index 00000000..cb0ff5c3 --- /dev/null +++ b/build/cli/src/usecase/steps/issue/__tests__/assign_reviewers_to_issue_use_case.test.d.ts @@ -0,0 +1 @@ +export {}; diff --git a/build/cli/src/usecase/steps/issue/__tests__/label_deployed_added_use_case.test.d.ts b/build/cli/src/usecase/steps/issue/__tests__/label_deployed_added_use_case.test.d.ts new file mode 100644 index 00000000..cb0ff5c3 --- /dev/null +++ b/build/cli/src/usecase/steps/issue/__tests__/label_deployed_added_use_case.test.d.ts @@ -0,0 +1 @@ +export {}; diff --git a/build/cli/src/usecase/steps/issue/__tests__/update_issue_type_use_case.test.d.ts b/build/cli/src/usecase/steps/issue/__tests__/update_issue_type_use_case.test.d.ts new file mode 100644 index 00000000..cb0ff5c3 --- /dev/null +++ b/build/cli/src/usecase/steps/issue/__tests__/update_issue_type_use_case.test.d.ts @@ -0,0 +1 @@ +export {}; diff --git a/build/cli/src/usecase/steps/pull_request/__tests__/sync_size_and_progress_labels_from_issue_to_pr_use_case.test.d.ts b/build/cli/src/usecase/steps/pull_request/__tests__/sync_size_and_progress_labels_from_issue_to_pr_use_case.test.d.ts new file mode 100644 index 00000000..cb0ff5c3 --- /dev/null +++ b/build/cli/src/usecase/steps/pull_request/__tests__/sync_size_and_progress_labels_from_issue_to_pr_use_case.test.d.ts @@ -0,0 +1 @@ +export {}; diff --git a/build/cli/src/utils/constants.d.ts b/build/cli/src/utils/constants.d.ts index ddb1d79a..f9c9b80c 100644 --- a/build/cli/src/utils/constants.d.ts +++ b/build/cli/src/utils/constants.d.ts @@ -3,8 +3,12 @@ export declare const TITLE = "Giik"; export declare const REPO_URL = "https://github.com/landamessenger/git-board-flow"; /** Default OpenCode model: provider/modelID (e.g. opencode/kimi-k2.5-free). Reuse for CLI, action and Ai fallbacks. */ export declare const OPENCODE_DEFAULT_MODEL = "opencode/kimi-k2.5-free"; -/** Timeout in ms for OpenCode HTTP requests (session create, message, diff). Agent calls can be slow with many files. */ -export declare const OPENCODE_REQUEST_TIMEOUT_MS = 600000; +/** Timeout in ms for OpenCode HTTP requests (session create, message, diff). Agent calls can be slow (e.g. plan analyzing repo). */ +export declare const OPENCODE_REQUEST_TIMEOUT_MS = 900000; +/** Max attempts for OpenCode requests (retries on failure). Applied transparently in AiRepository. */ +export declare const OPENCODE_MAX_RETRIES = 5; +/** Delay in ms between OpenCode retry attempts. */ +export declare const OPENCODE_RETRY_DELAY_MS = 2000; export declare const DEFAULT_IMAGE_CONFIG: { issue: { automatic: string[]; @@ -61,6 +65,8 @@ export declare const INPUT_KEYS: { readonly AI_MEMBERS_ONLY: "ai-members-only"; readonly AI_IGNORE_FILES: "ai-ignore-files"; readonly AI_INCLUDE_REASONING: "ai-include-reasoning"; + readonly BUGBOT_SEVERITY: "bugbot-severity"; + readonly BUGBOT_COMMENT_LIMIT: "bugbot-comment-limit"; readonly PROJECT_IDS: "project-ids"; readonly PROJECT_COLUMN_ISSUE_CREATED: "project-column-issue-created"; readonly PROJECT_COLUMN_PULL_REQUEST_CREATED: "project-column-pull-request-created"; @@ -193,7 +199,13 @@ export declare const ACTIONS: { readonly THINK: "think_action"; readonly INITIAL_SETUP: "initial_setup"; readonly CHECK_PROGRESS: "check_progress_action"; - readonly DETECT_ERRORS: "detect_errors_action"; + readonly DETECT_POTENTIAL_PROBLEMS: "detect_potential_problems_action"; readonly RECOMMEND_STEPS: "recommend_steps_action"; }; +/** Hidden HTML comment prefix for bugbot findings (issue/PR comments). Format: */ +export declare const BUGBOT_MARKER_PREFIX = "gbf-bugbot"; +/** Max number of individual bugbot comments to create per issue/PR. Excess findings get one summary comment suggesting to review locally. */ +export declare const BUGBOT_MAX_COMMENTS = 20; +/** Minimum severity to publish (findings below this are dropped). Order: high > medium > low > info. */ +export declare const BUGBOT_MIN_SEVERITY: 'info' | 'low' | 'medium' | 'high'; export declare const PROMPTS: {}; diff --git a/build/cli/src/utils/content_utils.d.ts b/build/cli/src/utils/content_utils.d.ts index ae98b2d9..4a1f6ff2 100644 --- a/build/cli/src/utils/content_utils.d.ts +++ b/build/cli/src/utils/content_utils.d.ts @@ -1,3 +1,8 @@ export declare const extractVersion: (pattern: string, text: string) => string | undefined; export declare const extractReleaseType: (pattern: string, text: string) => string | undefined; +/** + * Extracts changelog content from an issue body: from the given section heading (e.g. "Changelog" or "Hotfix Solution") + * up to but not including the "Additional Context" section. Used for release/hotfix deployment bodies. + */ +export declare const extractChangelogUpToAdditionalContext: (body: string | null | undefined, sectionTitle: string) => string; export declare const injectJsonAsMarkdownBlock: (title: string, json: object) => string; diff --git a/build/cli/src/utils/opencode_server.d.ts b/build/cli/src/utils/opencode_server.d.ts index 7cdb77a6..e4b58f26 100644 --- a/build/cli/src/utils/opencode_server.d.ts +++ b/build/cli/src/utils/opencode_server.d.ts @@ -1,6 +1,7 @@ /** * Managed OpenCode server lifecycle for GitHub Actions. * Starts "npx opencode-ai serve" and stops it when the action finishes. + * If no opencode.json exists in cwd, creates one with provider timeout 10 min and removes it on stop. */ import { ChildProcess } from 'child_process'; export interface ManagedOpencodeServer { diff --git a/build/github_action/index.js b/build/github_action/index.js index 87b8cb74..f850c69a 100644 --- a/build/github_action/index.js +++ b/build/github_action/index.js @@ -42119,6 +42119,11 @@ async function runGitHubAction() { .split(',') .map(path => path.trim()) .filter(path => path.length > 0); + const bugbotSeverity = getInput(constants_1.INPUT_KEYS.BUGBOT_SEVERITY) || constants_1.BUGBOT_MIN_SEVERITY; + const bugbotCommentLimitRaw = parseInt(getInput(constants_1.INPUT_KEYS.BUGBOT_COMMENT_LIMIT), 10); + const bugbotCommentLimit = Number.isNaN(bugbotCommentLimitRaw) || bugbotCommentLimitRaw < 1 + ? constants_1.BUGBOT_MAX_COMMENTS + : Math.min(bugbotCommentLimitRaw, 200); /** * Projects Details */ @@ -42434,7 +42439,7 @@ async function runGitHubAction() { const pullRequestDesiredAssigneesCount = parseInt(getInput(constants_1.INPUT_KEYS.PULL_REQUEST_DESIRED_ASSIGNEES_COUNT)) ?? 0; const pullRequestDesiredReviewersCount = parseInt(getInput(constants_1.INPUT_KEYS.PULL_REQUEST_DESIRED_REVIEWERS_COUNT)) ?? 0; const pullRequestMergeTimeout = parseInt(getInput(constants_1.INPUT_KEYS.PULL_REQUEST_MERGE_TIMEOUT)) ?? 0; - const execution = new execution_1.Execution(debug, new single_action_1.SingleAction(singleAction, singleActionIssue, singleActionVersion, singleActionTitle, singleActionChangelog), commitPrefixBuilder, new issue_1.Issue(branchManagementAlways, reopenIssueOnPush, issueDesiredAssigneesCount), new pull_request_1.PullRequest(pullRequestDesiredAssigneesCount, pullRequestDesiredReviewersCount, pullRequestMergeTimeout), new emoji_1.Emoji(titleEmoji, branchManagementEmoji), new images_1.Images(imagesOnIssue, imagesOnPullRequest, imagesOnCommit, imagesIssueAutomatic, imagesIssueFeature, imagesIssueBugfix, imagesIssueDocs, imagesIssueChore, imagesIssueRelease, imagesIssueHotfix, imagesPullRequestAutomatic, imagesPullRequestFeature, imagesPullRequestBugfix, imagesPullRequestRelease, imagesPullRequestHotfix, imagesPullRequestDocs, imagesPullRequestChore, imagesCommitAutomatic, imagesCommitFeature, imagesCommitBugfix, imagesCommitRelease, imagesCommitHotfix, imagesCommitDocs, imagesCommitChore), new tokens_1.Tokens(token), new ai_1.Ai(opencodeServerUrl, opencodeModel, aiPullRequestDescription, aiMembersOnly, aiIgnoreFiles, aiIncludeReasoning), new labels_1.Labels(branchManagementLauncherLabel, bugLabel, bugfixLabel, hotfixLabel, enhancementLabel, featureLabel, releaseLabel, questionLabel, helpLabel, deployLabel, deployedLabel, docsLabel, documentationLabel, choreLabel, maintenanceLabel, priorityHighLabel, priorityMediumLabel, priorityLowLabel, priorityNoneLabel, sizeXxlLabel, sizeXlLabel, sizeLLabel, sizeMLabel, sizeSLabel, sizeXsLabel), new issue_types_1.IssueTypes(issueTypeTask, issueTypeTaskDescription, issueTypeTaskColor, issueTypeBug, issueTypeBugDescription, issueTypeBugColor, issueTypeFeature, issueTypeFeatureDescription, issueTypeFeatureColor, issueTypeDocumentation, issueTypeDocumentationDescription, issueTypeDocumentationColor, issueTypeMaintenance, issueTypeMaintenanceDescription, issueTypeMaintenanceColor, issueTypeHotfix, issueTypeHotfixDescription, issueTypeHotfixColor, issueTypeRelease, issueTypeReleaseDescription, issueTypeReleaseColor, issueTypeQuestion, issueTypeQuestionDescription, issueTypeQuestionColor, issueTypeHelp, issueTypeHelpDescription, issueTypeHelpColor), new locale_1.Locale(issueLocale, pullRequestLocale), new size_thresholds_1.SizeThresholds(new size_threshold_1.SizeThreshold(sizeXxlThresholdLines, sizeXxlThresholdFiles, sizeXxlThresholdCommits), new size_threshold_1.SizeThreshold(sizeXlThresholdLines, sizeXlThresholdFiles, sizeXlThresholdCommits), new size_threshold_1.SizeThreshold(sizeLThresholdLines, sizeLThresholdFiles, sizeLThresholdCommits), new size_threshold_1.SizeThreshold(sizeMThresholdLines, sizeMThresholdFiles, sizeMThresholdCommits), new size_threshold_1.SizeThreshold(sizeSThresholdLines, sizeSThresholdFiles, sizeSThresholdCommits), new size_threshold_1.SizeThreshold(sizeXsThresholdLines, sizeXsThresholdFiles, sizeXsThresholdCommits)), new branches_1.Branches(mainBranch, developmentBranch, featureTree, bugfixTree, hotfixTree, releaseTree, docsTree, choreTree), new release_1.Release(), new hotfix_1.Hotfix(), new workflows_1.Workflows(releaseWorkflow, hotfixWorkflow), new projects_1.Projects(projects, projectColumnIssueCreated, projectColumnPullRequestCreated, projectColumnIssueInProgress, projectColumnPullRequestInProgress), undefined, undefined); + const execution = new execution_1.Execution(debug, new single_action_1.SingleAction(singleAction, singleActionIssue, singleActionVersion, singleActionTitle, singleActionChangelog), commitPrefixBuilder, new issue_1.Issue(branchManagementAlways, reopenIssueOnPush, issueDesiredAssigneesCount), new pull_request_1.PullRequest(pullRequestDesiredAssigneesCount, pullRequestDesiredReviewersCount, pullRequestMergeTimeout), new emoji_1.Emoji(titleEmoji, branchManagementEmoji), new images_1.Images(imagesOnIssue, imagesOnPullRequest, imagesOnCommit, imagesIssueAutomatic, imagesIssueFeature, imagesIssueBugfix, imagesIssueDocs, imagesIssueChore, imagesIssueRelease, imagesIssueHotfix, imagesPullRequestAutomatic, imagesPullRequestFeature, imagesPullRequestBugfix, imagesPullRequestRelease, imagesPullRequestHotfix, imagesPullRequestDocs, imagesPullRequestChore, imagesCommitAutomatic, imagesCommitFeature, imagesCommitBugfix, imagesCommitRelease, imagesCommitHotfix, imagesCommitDocs, imagesCommitChore), new tokens_1.Tokens(token), new ai_1.Ai(opencodeServerUrl, opencodeModel, aiPullRequestDescription, aiMembersOnly, aiIgnoreFiles, aiIncludeReasoning, bugbotSeverity, bugbotCommentLimit), new labels_1.Labels(branchManagementLauncherLabel, bugLabel, bugfixLabel, hotfixLabel, enhancementLabel, featureLabel, releaseLabel, questionLabel, helpLabel, deployLabel, deployedLabel, docsLabel, documentationLabel, choreLabel, maintenanceLabel, priorityHighLabel, priorityMediumLabel, priorityLowLabel, priorityNoneLabel, sizeXxlLabel, sizeXlLabel, sizeLLabel, sizeMLabel, sizeSLabel, sizeXsLabel), new issue_types_1.IssueTypes(issueTypeTask, issueTypeTaskDescription, issueTypeTaskColor, issueTypeBug, issueTypeBugDescription, issueTypeBugColor, issueTypeFeature, issueTypeFeatureDescription, issueTypeFeatureColor, issueTypeDocumentation, issueTypeDocumentationDescription, issueTypeDocumentationColor, issueTypeMaintenance, issueTypeMaintenanceDescription, issueTypeMaintenanceColor, issueTypeHotfix, issueTypeHotfixDescription, issueTypeHotfixColor, issueTypeRelease, issueTypeReleaseDescription, issueTypeReleaseColor, issueTypeQuestion, issueTypeQuestionDescription, issueTypeQuestionColor, issueTypeHelp, issueTypeHelpDescription, issueTypeHelpColor), new locale_1.Locale(issueLocale, pullRequestLocale), new size_thresholds_1.SizeThresholds(new size_threshold_1.SizeThreshold(sizeXxlThresholdLines, sizeXxlThresholdFiles, sizeXxlThresholdCommits), new size_threshold_1.SizeThreshold(sizeXlThresholdLines, sizeXlThresholdFiles, sizeXlThresholdCommits), new size_threshold_1.SizeThreshold(sizeLThresholdLines, sizeLThresholdFiles, sizeLThresholdCommits), new size_threshold_1.SizeThreshold(sizeMThresholdLines, sizeMThresholdFiles, sizeMThresholdCommits), new size_threshold_1.SizeThreshold(sizeSThresholdLines, sizeSThresholdFiles, sizeSThresholdCommits), new size_threshold_1.SizeThreshold(sizeXsThresholdLines, sizeXsThresholdFiles, sizeXsThresholdCommits)), new branches_1.Branches(mainBranch, developmentBranch, featureTree, bugfixTree, hotfixTree, releaseTree, docsTree, choreTree), new release_1.Release(), new hotfix_1.Hotfix(), new workflows_1.Workflows(releaseWorkflow, hotfixWorkflow), new projects_1.Projects(projects, projectColumnIssueCreated, projectColumnPullRequestCreated, projectColumnIssueInProgress, projectColumnPullRequestInProgress), undefined, undefined); const results = await (0, common_action_1.mainRun)(execution); await finishWithResults(execution, results); } @@ -42508,13 +42513,15 @@ const constants_1 = __nccwpck_require__(8593); * API keys are configured on the OpenCode server, not here. */ class Ai { - constructor(opencodeServerUrl, opencodeModel, aiPullRequestDescription, aiMembersOnly, aiIgnoreFiles, aiIncludeReasoning) { + constructor(opencodeServerUrl, opencodeModel, aiPullRequestDescription, aiMembersOnly, aiIgnoreFiles, aiIncludeReasoning, bugbotMinSeverity, bugbotCommentLimit) { this.opencodeServerUrl = opencodeServerUrl; this.opencodeModel = opencodeModel; this.aiPullRequestDescription = aiPullRequestDescription; this.aiMembersOnly = aiMembersOnly; this.aiIgnoreFiles = aiIgnoreFiles; this.aiIncludeReasoning = aiIncludeReasoning; + this.bugbotMinSeverity = bugbotMinSeverity; + this.bugbotCommentLimit = bugbotCommentLimit; } getOpencodeServerUrl() { return this.opencodeServerUrl; @@ -42534,6 +42541,12 @@ class Ai { getAiIncludeReasoning() { return this.aiIncludeReasoning; } + getBugbotMinSeverity() { + return this.bugbotMinSeverity; + } + getBugbotCommentLimit() { + return this.bugbotCommentLimit; + } /** * Parse "provider/model-id" into { providerID, modelID } for OpenCode session.prompt. * Uses OPENCODE_DEFAULT_MODEL when no model is set (e.g. opencode/kimi-k2.5-free). @@ -42983,6 +42996,9 @@ class Execution { else { this.currentConfiguration.parentBranch = this.previousConfiguration?.parentBranch; } + if (this.currentConfiguration.parentBranch === undefined && this.previousConfiguration?.parentBranch != null) { + this.currentConfiguration.parentBranch = this.previousConfiguration.parentBranch; + } if (this.isSingleAction) { /** * Nothing to do here (for now) @@ -43745,8 +43761,8 @@ class SingleAction { get isCheckProgressAction() { return this.currentSingleAction === constants_1.ACTIONS.CHECK_PROGRESS; } - get isDetectErrorsAction() { - return this.currentSingleAction === constants_1.ACTIONS.DETECT_ERRORS; + get isDetectPotentialProblemsAction() { + return this.currentSingleAction === constants_1.ACTIONS.DETECT_POTENTIAL_PROBLEMS; } get isRecommendStepsAction() { return this.currentSingleAction === constants_1.ACTIONS.RECOMMEND_STEPS; @@ -43774,7 +43790,7 @@ class SingleAction { constants_1.ACTIONS.THINK, constants_1.ACTIONS.INITIAL_SETUP, constants_1.ACTIONS.CHECK_PROGRESS, - constants_1.ACTIONS.DETECT_ERRORS, + constants_1.ACTIONS.DETECT_POTENTIAL_PROBLEMS, constants_1.ACTIONS.RECOMMEND_STEPS, ]; /** @@ -43941,10 +43957,52 @@ exports.Workflows = Workflows; "use strict"; Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.AiRepository = exports.TRANSLATION_RESPONSE_SCHEMA = exports.OPENCODE_AGENT_BUILD = exports.OPENCODE_AGENT_PLAN = void 0; +exports.AiRepository = exports.LANGUAGE_CHECK_RESPONSE_SCHEMA = exports.THINK_RESPONSE_SCHEMA = exports.TRANSLATION_RESPONSE_SCHEMA = exports.OPENCODE_AGENT_BUILD = exports.OPENCODE_AGENT_PLAN = void 0; exports.getSessionDiff = getSessionDiff; const constants_1 = __nccwpck_require__(8593); const logger_1 = __nccwpck_require__(8836); +function delay(ms) { + return new Promise((resolve) => setTimeout(resolve, ms)); +} +/** + * Runs an async OpenCode operation with retries. On failure, logs and retries up to OPENCODE_MAX_RETRIES. + * Single retry system for all OpenCode interactions: no parallel retry logic. + * + * Retries when the operation throws, including: + * - Network errors (fetch fails, connection refused, etc.) + * - HTTP errors (4xx/5xx from session create or message) + * - Timeout (OPENCODE_REQUEST_TIMEOUT_MS) + * - Empty or invalid JSON response body (parseJsonResponse throws) + * - Missing session id in create response + * - Parse failure of expected format (e.g. expectJson but text is not valid JSON) when parse is done inside the callback + */ +async function withOpenCodeRetry(fn, context) { + let lastError; + for (let attempt = 1; attempt <= constants_1.OPENCODE_MAX_RETRIES; attempt++) { + try { + return await fn(); + } + catch (error) { + lastError = error; + const message = error instanceof Error ? error.message : String(error); + const cause = error instanceof Error && error.cause instanceof Error + ? error.cause.message + : ''; + const detail = cause ? ` (cause: ${cause})` : ''; + const noResponseHint = message === 'fetch failed' + ? ' No HTTP response; connection lost or timeout. If this was before the client timeout (see log above), the OpenCode server or a proxy may have a shorter timeout.' + : ''; + if (attempt < constants_1.OPENCODE_MAX_RETRIES) { + (0, logger_1.logInfo)(`OpenCode [${context}] attempt ${attempt}/${constants_1.OPENCODE_MAX_RETRIES} failed: ${message}${detail}.${noResponseHint} Retrying in ${constants_1.OPENCODE_RETRY_DELAY_MS}ms...`); + await delay(constants_1.OPENCODE_RETRY_DELAY_MS); + } + else { + (0, logger_1.logError)(`OpenCode [${context}] failed after ${constants_1.OPENCODE_MAX_RETRIES} attempts: ${message}${detail}`); + } + } + } + throw lastError; +} function createTimeoutSignal(ms) { const controller = new AbortController(); setTimeout(() => controller.abort(new Error(`OpenCode request timeout after ${ms}ms`)), ms); @@ -43953,7 +44011,121 @@ function createTimeoutSignal(ms) { function ensureNoTrailingSlash(url) { return url.replace(/\/+$/, '') || url; } -const OPENCODE_RESPONSE_LOG_MAX_LEN = 2000; +function truncate(s, maxLen) { + return s.length <= maxLen ? s : s.slice(0, maxLen) + '...'; +} +const OPENCODE_PROMPT_LOG_PREVIEW_LEN = 500; +const OPENCODE_PROMPT_LOG_FULL_LEN = 3000; +function getValidatedOpenCodeConfig(ai) { + const serverUrl = ai.getOpencodeServerUrl(); + const model = ai.getOpencodeModel(); + if (!serverUrl?.trim() || !model?.trim()) { + (0, logger_1.logError)('Missing required AI configuration: opencode-server-url and opencode-model'); + return null; + } + const { providerID, modelID } = ai.getOpencodeModelParts(); + return { serverUrl, providerID, modelID, model }; +} +/** + * Try to extract the first complete JSON object from text (from first `{` with balanced braces). + * Handles being inside a double-quoted string so we don't count braces there. + */ +function extractFirstJsonObject(text) { + const start = text.indexOf('{'); + if (start === -1) + return null; + let depth = 1; + let inString = false; + let escape = false; + let quoteChar = '"'; + for (let i = start + 1; i < text.length; i++) { + const c = text[i]; + if (escape) { + escape = false; + continue; + } + if (c === '\\' && inString) { + escape = true; + continue; + } + if (inString) { + if (c === quoteChar) + inString = false; + continue; + } + if (c === '"' || c === "'") { + inString = true; + quoteChar = c; + continue; + } + if (c === '{') + depth++; + else if (c === '}') { + depth--; + if (depth === 0) + return text.slice(start, i + 1); + } + } + return null; +} +/** + * Parse JSON from agent response text safely. + * Tries: (1) direct parse, (2) strip markdown code fence, (3) extract first JSON object from text (model often adds prose before JSON). + * @throws Error with clear message if parsing fails + */ +function parseJsonFromAgentText(text) { + const trimmed = text.trim(); + if (!trimmed) { + throw new Error('Agent response text is empty'); + } + // 1) Direct parse + try { + return JSON.parse(trimmed); + } + catch { + // 2) Model may wrap JSON in ```json ... ``` or ``` ... ``` + const withoutFence = trimmed + .replace(/^```(?:json)?\s*\n?/i, '') + .replace(/\n?```\s*$/i, '') + .trim(); + try { + return JSON.parse(withoutFence); + } + catch { + // 3) Model may add prose before the JSON (e.g. "Based on my analysis... { ... }") + const extracted = extractFirstJsonObject(trimmed); + if (extracted) { + try { + return JSON.parse(extracted); + } + catch (e) { + const msg = e instanceof Error ? e.message : String(e); + (0, logger_1.logDebugInfo)(`OpenCode agent response (expectJson): failed to parse extracted JSON. Full text length=${trimmed.length} firstChars=${JSON.stringify(trimmed.slice(0, 200))}`); + throw new Error(`Agent response is not valid JSON: ${msg}`); + } + } + const previewLen = 500; + const msg = trimmed.length > previewLen ? `${trimmed.slice(0, previewLen)}...` : trimmed; + const fullTruncated = trimmed.length > 3000 ? `${trimmed.slice(0, 3000)}... [total ${trimmed.length} chars]` : trimmed; + (0, logger_1.logDebugInfo)(`OpenCode agent response (expectJson): no JSON object found. length=${trimmed.length} preview=${JSON.stringify(msg)}`); + (0, logger_1.logDebugInfo)(`OpenCode agent response (expectJson) full text for debugging:\n${fullTruncated}`); + throw new Error(`Agent response is not valid JSON: no JSON object found. Response starts with: ${msg.slice(0, 150)}`); + } + } +} +/** + * Extract text from OpenCode message parts by type (e.g. 'text', 'reasoning'), joined with separator. + */ +function extractPartsByType(parts, type, joinWith) { + if (!Array.isArray(parts)) + return ''; + return parts + .filter((p) => p?.type === type && typeof p.text === 'string') + .map((p) => p.text) + .join(joinWith) + .trim(); +} +const OPENCODE_RESPONSE_LOG_MAX_LEN = 80000; /** Parse response as JSON; on empty or invalid body throw a clear error with context. */ async function parseJsonResponse(res, context) { const raw = await res.text(); @@ -43975,29 +44147,33 @@ async function parseJsonResponse(res, context) { throw err; } } -/** - * Extract plain text from OpenCode message response parts (type === 'text'). - */ +/** Extract plain text from OpenCode message response parts (type === 'text'). */ function extractTextFromParts(parts) { - if (!Array.isArray(parts)) - return ''; - return parts - .filter((p) => p?.type === 'text' && typeof p.text === 'string') - .map((p) => p.text) - .join(''); + return extractPartsByType(parts, 'text', ''); +} +/** Extract reasoning from OpenCode message parts (type === 'reasoning'). */ +function extractReasoningFromParts(parts) { + return extractPartsByType(parts, 'reasoning', '\n\n'); } +/** Max length of per-part text preview in debug log (to avoid huge log lines). */ +const OPENCODE_PART_PREVIEW_LEN = 80; /** - * Extract reasoning text from OpenCode message response parts (type === 'reasoning'). - * Used to include the agent's full reasoning in comments (e.g. progress detection). + * Build a short summary of OpenCode message parts for debug logs (types, text lengths, and short preview). */ -function extractReasoningFromParts(parts) { - if (!Array.isArray(parts)) - return ''; - return parts - .filter((p) => p?.type === 'reasoning' && typeof p.text === 'string') - .map((p) => p.text) - .join('\n\n') - .trim(); +function summarizePartsForLog(parts, context) { + if (!Array.isArray(parts) || parts.length === 0) { + return `${context}: 0 parts`; + } + const items = parts.map((p, i) => { + const type = p?.type ?? '(missing type)'; + const text = typeof p?.text === 'string' ? p.text : ''; + const len = text.length; + const preview = len > OPENCODE_PART_PREVIEW_LEN + ? `${text.slice(0, OPENCODE_PART_PREVIEW_LEN).replace(/\n/g, ' ')}...` + : text.replace(/\n/g, ' '); + return `[${i}] type=${type} length=${len}${preview ? ` preview=${JSON.stringify(preview)}` : ''}`; + }); + return `${context}: ${parts.length} part(s) — ${items.join(' | ')}`; } /** Default OpenCode agent for analysis/planning (read-only, no file edits). */ exports.OPENCODE_AGENT_PLAN = 'plan'; @@ -44019,57 +44195,48 @@ exports.TRANSLATION_RESPONSE_SCHEMA = { required: ['translatedText'], additionalProperties: false, }; -/** - * OpenCode HTTP API: create session and send message, return assistant parts. - * Uses fetch to avoid ESM-only SDK with ncc. - */ -async function opencodePrompt(baseUrl, providerID, modelID, promptText) { - const base = ensureNoTrailingSlash(baseUrl); - const signal = createTimeoutSignal(constants_1.OPENCODE_REQUEST_TIMEOUT_MS); - const createRes = await fetch(`${base}/session`, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ title: 'gbf' }), - signal, - }); - if (!createRes.ok) { - const err = await createRes.text(); - throw new Error(`OpenCode session create failed: ${createRes.status} ${err}`); - } - const session = await parseJsonResponse(createRes, 'OpenCode session.create'); - const sessionId = session?.id ?? session?.data?.id; - if (!sessionId) { - throw new Error('OpenCode session.create did not return session id'); - } - const messageRes = await fetch(`${base}/session/${sessionId}/message`, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - model: { providerID, modelID }, - parts: [{ type: 'text', text: promptText }], - }), - signal, - }); - if (!messageRes.ok) { - const err = await messageRes.text(); - throw new Error(`OpenCode message failed: ${messageRes.status} ${err}`); - } - const messageData = await parseJsonResponse(messageRes, 'OpenCode message'); - const parts = messageData?.parts ?? messageData?.data?.parts ?? []; - return extractTextFromParts(parts); -} +/** JSON schema for Think (Q&A) responses: single answer field. */ +exports.THINK_RESPONSE_SCHEMA = { + type: 'object', + properties: { + answer: { + type: 'string', + description: 'The concise answer to the user question. Required.', + }, + }, + required: ['answer'], + additionalProperties: false, +}; +/** JSON schema for language check: done (already in locale) or must_translate. */ +exports.LANGUAGE_CHECK_RESPONSE_SCHEMA = { + type: 'object', + properties: { + status: { + type: 'string', + enum: ['done', 'must_translate'], + description: 'done if text is in the requested locale, must_translate otherwise.', + }, + }, + required: ['status'], + additionalProperties: false, +}; /** * Send a message to an OpenCode agent (e.g. "plan", "build") and wait for the full response. - * The server runs the agent loop (tools, etc.) and returns when done. - * Use this to delegate PR description, progress, error detection, recommendations, or copilot (build) to OpenCode. + * Raw call: no retries. Callers (askAgent, copilotMessage) wrap in withOpenCodeRetry. */ -async function opencodeMessageWithAgent(baseUrl, options) { +async function opencodeMessageWithAgentRaw(baseUrl, options) { + (0, logger_1.logInfo)(`OpenCode request [agent ${options.agent}] model=${options.providerID}/${options.modelID} promptLength=${options.promptText.length}`); + (0, logger_1.logInfo)(`OpenCode sending prompt (preview): ${truncate(options.promptText, OPENCODE_PROMPT_LOG_PREVIEW_LEN)}`); + (0, logger_1.logDebugInfo)(`OpenCode prompt (full): ${truncate(options.promptText, OPENCODE_PROMPT_LOG_FULL_LEN)}`); + (0, logger_1.logDebugInfo)(`OpenCode message body: agent=${options.agent}, model=${options.providerID}/${options.modelID}, parts[0].text length=${options.promptText.length}`); const base = ensureNoTrailingSlash(baseUrl); const signal = createTimeoutSignal(constants_1.OPENCODE_REQUEST_TIMEOUT_MS); + const sessionBody = { title: 'gbf' }; + (0, logger_1.logDebugInfo)(`OpenCode session create body: ${JSON.stringify(sessionBody)}`); const createRes = await fetch(`${base}/session`, { method: 'POST', headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ title: 'gbf' }), + body: JSON.stringify(sessionBody), signal, }); if (!createRes.ok) { @@ -44086,6 +44253,9 @@ async function opencodeMessageWithAgent(baseUrl, options) { model: { providerID: options.providerID, modelID: options.modelID }, parts: [{ type: 'text', text: options.promptText }], }; + (0, logger_1.logDebugInfo)(`OpenCode POST /session/${sessionId}/message body (keys): agent, model, parts (${body.parts.length} part(s))`); + const timeoutMin = Math.round(constants_1.OPENCODE_REQUEST_TIMEOUT_MS / 60000); + (0, logger_1.logInfo)(`OpenCode: waiting for agent "${options.agent}" message response (client timeout: ${timeoutMin} min)...`); const messageRes = await fetch(`${base}/session/${sessionId}/message`, { method: 'POST', headers: { 'Content-Type': 'application/json' }, @@ -44098,139 +44268,117 @@ async function opencodeMessageWithAgent(baseUrl, options) { } const messageData = await parseJsonResponse(messageRes, `OpenCode agent "${options.agent}" message`); const parts = messageData?.parts ?? messageData?.data?.parts ?? []; - const text = extractTextFromParts(parts); - return { text, parts, sessionId }; + const partsArray = Array.isArray(parts) ? parts : []; + (0, logger_1.logDebugInfo)(summarizePartsForLog(partsArray, `OpenCode agent "${options.agent}" message parts`)); + const text = extractTextFromParts(partsArray); + (0, logger_1.logInfo)(`OpenCode response [agent ${options.agent}] responseLength=${text.length} sessionId=${sessionId}`); + return { text, parts: partsArray, sessionId }; } /** * Get the diff for an OpenCode session (files changed by the agent). * Call after opencodeMessageWithAgent when using the "build" agent so the user can see what was edited. + * Wrapped with retries (OPENCODE_MAX_RETRIES). */ async function getSessionDiff(baseUrl, sessionId) { - const base = ensureNoTrailingSlash(baseUrl); - const signal = createTimeoutSignal(constants_1.OPENCODE_REQUEST_TIMEOUT_MS); - const res = await fetch(`${base}/session/${sessionId}/diff`, { method: 'GET', signal }); - if (!res.ok) - return []; - const raw = await res.text(); - if (!raw?.trim()) - return []; - let data; - try { - data = JSON.parse(raw); - } - catch { - return []; - } - if (Array.isArray(data)) - return data; - if (Array.isArray(data.data)) - return data.data; - return []; + return withOpenCodeRetry(async () => { + (0, logger_1.logInfo)(`OpenCode request [session diff] sessionId=${sessionId}`); + const base = ensureNoTrailingSlash(baseUrl); + const signal = createTimeoutSignal(constants_1.OPENCODE_REQUEST_TIMEOUT_MS); + const res = await fetch(`${base}/session/${sessionId}/diff`, { method: 'GET', signal }); + if (!res.ok) { + (0, logger_1.logInfo)(`OpenCode response [session diff] fileCount=0 (status ${res.status})`); + return []; + } + const raw = await res.text(); + if (!raw?.trim()) { + (0, logger_1.logInfo)('OpenCode response [session diff] fileCount=0 (empty body)'); + return []; + } + let data; + try { + data = JSON.parse(raw); + } + catch { + (0, logger_1.logInfo)('OpenCode response [session diff] fileCount=0 (invalid JSON)'); + return []; + } + const list = Array.isArray(data) + ? data + : Array.isArray(data.data) + ? data.data + : []; + (0, logger_1.logInfo)(`OpenCode response [session diff] fileCount=${list.length}`); + return list; + }, 'session diff'); } class AiRepository { constructor() { - this.ask = async (ai, prompt) => { - const serverUrl = ai.getOpencodeServerUrl(); - const model = ai.getOpencodeModel(); - if (!serverUrl || !model) { - (0, logger_1.logError)('Missing required AI configuration: opencode-server-url and opencode-model'); - return undefined; - } - try { - const { providerID, modelID } = ai.getOpencodeModelParts(); - const text = await opencodePrompt(serverUrl, providerID, modelID, prompt); - return text || undefined; - } - catch (error) { - (0, logger_1.logError)(`Error querying OpenCode (${model}): ${error}`); - return undefined; - } - }; /** - * Ask an OpenCode agent (e.g. Plan) to perform a task. The server runs the full agent loop. - * Returns the final message (including reasoning in parts when includeReasoning is true). - * @param ai - AI config (server URL, model) - * @param agentId - OpenCode agent id (e.g. OPENCODE_AGENT_PLAN) - * @param prompt - User prompt - * @param options - expectJson, schema, includeReasoning - * @returns Response text, or parsed JSON when expectJson is true + * Ask an OpenCode agent (e.g. Plan) to perform a task. All calls use strict response (expectJson + schema). + * Single retry system: HTTP failures and parse failures both retry up to OPENCODE_MAX_RETRIES. */ this.askAgent = async (ai, agentId, prompt, options = {}) => { - const serverUrl = ai.getOpencodeServerUrl(); - const model = ai.getOpencodeModel(); - if (!serverUrl || !model) { - (0, logger_1.logError)('Missing required AI configuration: opencode-server-url and opencode-model'); + const config = getValidatedOpenCodeConfig(ai); + if (!config) return undefined; - } + const { serverUrl, providerID, modelID, model } = config; + const schemaName = options.schemaName ?? 'response'; + const promptText = options.expectJson && options.schema + ? `Respond with a single JSON object that strictly conforms to this schema (name: ${schemaName}). No other text or markdown.\n\nSchema: ${JSON.stringify(options.schema)}\n\nUser request:\n${prompt}` + : prompt; try { - const { providerID, modelID } = ai.getOpencodeModelParts(); - let promptText = prompt; - if (options.expectJson && options.schema) { - const schemaName = options.schemaName ?? 'response'; - promptText = `Respond with a single JSON object that strictly conforms to this schema (name: ${schemaName}). No other text or markdown.\n\nSchema: ${JSON.stringify(options.schema)}\n\nUser request:\n${prompt}`; - } - const { text, parts } = await opencodeMessageWithAgent(serverUrl, { - providerID, - modelID, - agent: agentId, - promptText, - }); - if (!text) - return undefined; - const reasoning = options.includeReasoning ? extractReasoningFromParts(parts) : ''; - if (options.expectJson) { - const cleaned = text.replace(/^```json\n?/, '').replace(/\n?```$/, '').trim(); - const parsed = JSON.parse(cleaned); - if (options.includeReasoning && reasoning) { - return { ...parsed, reasoning }; + return await withOpenCodeRetry(async () => { + const { text, parts } = await opencodeMessageWithAgentRaw(serverUrl, { + providerID, + modelID, + agent: agentId, + promptText, + }); + if (!text) + throw new Error('Empty response text'); + const reasoning = options.includeReasoning ? extractReasoningFromParts(parts) : ''; + if (options.expectJson && options.schema) { + const maxLogLen = 5000000; + const toLog = text.length > maxLogLen ? `${text.slice(0, maxLogLen)}\n... [truncated, total ${text.length} chars]` : text; + (0, logger_1.logInfo)(`OpenCode agent response (full text, expectJson=true) length=${text.length}:\n${toLog}`); + const parsed = parseJsonFromAgentText(text); + if (options.includeReasoning && reasoning) { + return { ...parsed, reasoning }; + } + return parsed; } - return parsed; - } - return text; + return text; + }, `agent ${agentId}`); } catch (error) { const err = error instanceof Error ? error : new Error(String(error)); - const errWithCause = err; - const cause = errWithCause.cause instanceof Error - ? errWithCause.cause.message - : errWithCause.cause != null - ? String(errWithCause.cause) - : ''; - const detail = cause ? ` (${cause})` : ''; + const cause = err instanceof Error && err.cause; + const detail = cause != null ? ` (${cause instanceof Error ? cause.message : String(cause)})` : ''; (0, logger_1.logError)(`Error querying OpenCode agent ${agentId} (${model}): ${err.message}${detail}`); return undefined; } }; /** * Run the OpenCode "build" agent for the copilot command. Returns the final message and sessionId. + * Uses the same retry system (OPENCODE_MAX_RETRIES). */ this.copilotMessage = async (ai, prompt) => { - const serverUrl = ai.getOpencodeServerUrl(); - const model = ai.getOpencodeModel(); - if (!serverUrl || !model) { - (0, logger_1.logError)('Missing required AI configuration: opencode-server-url and opencode-model'); + const config = getValidatedOpenCodeConfig(ai); + if (!config) return undefined; - } + const { serverUrl, providerID, modelID, model } = config; try { - const { providerID, modelID } = ai.getOpencodeModelParts(); - const result = await opencodeMessageWithAgent(serverUrl, { + const result = await withOpenCodeRetry(() => opencodeMessageWithAgentRaw(serverUrl, { providerID, modelID, agent: exports.OPENCODE_AGENT_BUILD, promptText: prompt, - }); + }), `agent ${exports.OPENCODE_AGENT_BUILD}`); return { text: result.text, sessionId: result.sessionId }; } catch (error) { const err = error instanceof Error ? error : new Error(String(error)); - const errWithCause = err; - const cause = errWithCause.cause instanceof Error - ? errWithCause.cause.message - : errWithCause.cause != null - ? String(errWithCause.cause) - : ''; - const detail = cause ? ` (${cause})` : ''; - (0, logger_1.logError)(`Error querying OpenCode build agent (${model}): ${err.message}${detail}`); + (0, logger_1.logError)(`Error querying OpenCode build agent (${model}): ${err.message}`); return undefined; } }; @@ -44457,7 +44605,7 @@ class BranchRepository { else { baseBranchName = hotfixBranch ?? developmentBranch; } - if (!isRenamingBranch) { + if (!isRenamingBranch || param.currentConfiguration.parentBranch === undefined) { param.currentConfiguration.parentBranch = baseBranchName; } (0, logger_1.logDebugInfo)(`============================================================================================`); @@ -45348,6 +45496,30 @@ class IssueRepository { }); (0, logger_1.logDebugInfo)(`Comment ${commentId} updated in Issue ${issueNumber}.`); }; + /** + * Lists all comments on an issue (for bugbot: find existing findings by marker). + * Uses pagination to fetch every comment (default API returns only 30 per page). + */ + this.listIssueComments = async (owner, repository, issueNumber, token) => { + const octokit = github.getOctokit(token); + const all = []; + for await (const response of octokit.paginate.iterator(octokit.rest.issues.listComments, { + owner, + repo: repository, + issue_number: issueNumber, + per_page: 100, + })) { + const data = response.data || []; + for (const c of data) { + all.push({ + id: c.id, + body: c.body ?? null, + user: c.user, + }); + } + } + return all; + }; this.closeIssue = async (owner, repository, issueNumber, token) => { const octokit = github.getOctokit(token); const { data: issue } = await octokit.rest.issues.get({ @@ -46527,15 +46699,35 @@ class PullRequestRepository { }); (0, logger_1.logDebugInfo)(`Updated PR #${pullRequestNumber} description with: ${description}`); }; + /** + * Returns all users involved in review: requested (pending) + those who already submitted a review. + * Used to avoid re-requesting someone who already reviewed when ensuring desired reviewer count. + */ this.getCurrentReviewers = async (owner, repository, pullNumber, token) => { const octokit = github.getOctokit(token); try { - const { data } = await octokit.rest.pulls.listRequestedReviewers({ - owner, - repo: repository, - pull_number: pullNumber, - }); - return data.users.map((user) => user.login); + const [requestedRes, reviewsRes] = await Promise.all([ + octokit.rest.pulls.listRequestedReviewers({ + owner, + repo: repository, + pull_number: pullNumber, + }), + octokit.rest.pulls.listReviews({ + owner, + repo: repository, + pull_number: pullNumber, + }), + ]); + const logins = new Set(); + for (const user of requestedRes.data.users) { + logins.add(user.login); + } + for (const review of reviewsRes.data) { + if (review.user?.login) { + logins.add(review.user.login); + } + } + return Array.from(logins); } catch (error) { (0, logger_1.logError)(`Error getting reviewers of PR: ${error}.`); @@ -46581,6 +46773,30 @@ class PullRequestRepository { return []; } }; + /** + * Returns for each changed file the first line number that appears in the diff (right side). + * Used so review comments use a line that GitHub can resolve (avoids "line could not be resolved"). + */ + this.getFilesWithFirstDiffLine = async (owner, repository, pullNumber, token) => { + const octokit = github.getOctokit(token); + try { + const { data } = await octokit.rest.pulls.listFiles({ + owner, + repo: repository, + pull_number: pullNumber, + }); + return (data || []) + .filter((f) => f.status !== 'removed' && (f.patch ?? '').length > 0) + .map((f) => { + const firstLine = PullRequestRepository.firstLineFromPatch(f.patch ?? ''); + return { path: f.filename, firstLine: firstLine ?? 1 }; + }); + } + catch (error) { + (0, logger_1.logError)(`Error getting files with diff lines (owner=${owner}, repo=${repository}, pullNumber=${pullNumber}): ${error}.`); + return []; + } + }; this.getPullRequestChanges = async (owner, repository, pullNumber, token) => { const octokit = github.getOctokit(token); const allFiles = []; @@ -46607,6 +46823,178 @@ class PullRequestRepository { return []; } }; + /** Head commit SHA of the PR (for creating review). */ + this.getPullRequestHeadSha = async (owner, repository, pullNumber, token) => { + const octokit = github.getOctokit(token); + try { + const { data } = await octokit.rest.pulls.get({ + owner, + repo: repository, + pull_number: pullNumber, + }); + return data.head?.sha; + } + catch (error) { + (0, logger_1.logError)(`Error getting PR head SHA: ${error}.`); + return undefined; + } + }; + /** + * List all review comments on a PR (for bugbot: find existing findings by marker). + * Uses pagination to fetch every comment (default API returns only 30 per page). + * Includes node_id for GraphQL (e.g. resolve review thread). + */ + this.listPullRequestReviewComments = async (owner, repository, pullNumber, token) => { + const octokit = github.getOctokit(token); + const all = []; + try { + for await (const response of octokit.paginate.iterator(octokit.rest.pulls.listReviewComments, { + owner, + repo: repository, + pull_number: pullNumber, + per_page: 100, + })) { + const data = response.data || []; + all.push(...data.map((c) => ({ + id: c.id, + body: c.body ?? null, + path: c.path, + line: c.line ?? undefined, + node_id: c.node_id ?? undefined, + }))); + } + return all; + } + catch (error) { + (0, logger_1.logError)(`Error listing PR review comments (owner=${owner}, repo=${repository}, pullNumber=${pullNumber}): ${error}.`); + return []; + } + }; + /** + * Resolve a PR review thread (GraphQL only). Finds the thread that contains the given comment and marks it resolved. + * Uses repository.pullRequest.reviewThreads because the field pullRequestReviewThread on PullRequestReviewComment was removed from the API. + * Paginates through all threads and all comments in each thread so the comment is found regardless of PR size. + * No-op if thread is already resolved. Logs and does not throw on error. + */ + this.resolvePullRequestReviewThread = async (owner, repository, pullNumber, commentNodeId, token) => { + const octokit = github.getOctokit(token); + try { + let threadId = null; + let threadsCursor = null; + outer: do { + const threadsData = await octokit.graphql(`query ($owner: String!, $repo: String!, $prNumber: Int!, $threadsAfter: String) { + repository(owner: $owner, name: $repo) { + pullRequest(number: $prNumber) { + reviewThreads(first: 100, after: $threadsAfter) { + nodes { + id + comments(first: 100) { + nodes { id } + pageInfo { hasNextPage endCursor } + } + } + pageInfo { hasNextPage endCursor } + } + } + } + }`, { owner, repo: repository, prNumber: pullNumber, threadsAfter: threadsCursor }); + const threads = threadsData?.repository?.pullRequest?.reviewThreads; + if (!threads?.nodes?.length) + break; + for (const thread of threads.nodes) { + let commentsCursor = null; + let commentNodes = thread.comments?.nodes ?? []; + let commentsPageInfo = thread.comments?.pageInfo; + do { + if (commentNodes.some((c) => c.id === commentNodeId)) { + threadId = thread.id; + break outer; + } + if (!commentsPageInfo?.hasNextPage || commentsPageInfo.endCursor == null) + break; + commentsCursor = commentsPageInfo.endCursor; + const nextComments = await octokit.graphql(`query ($threadId: ID!, $commentsAfter: String) { + node(id: $threadId) { + ... on PullRequestReviewThread { + comments(first: 100, after: $commentsAfter) { + nodes { id } + pageInfo { hasNextPage endCursor } + } + } + } + }`, { threadId: thread.id, commentsAfter: commentsCursor }); + commentNodes = nextComments?.node?.comments?.nodes ?? []; + commentsPageInfo = nextComments?.node?.comments?.pageInfo ?? { hasNextPage: false, endCursor: null }; + } while (commentsPageInfo?.hasNextPage === true && commentsPageInfo?.endCursor != null); + } + const pageInfo = threads.pageInfo; + if (threadId != null || !pageInfo?.hasNextPage) + break; + threadsCursor = pageInfo.endCursor ?? null; + } while (threadsCursor != null); + if (!threadId) { + (0, logger_1.logError)(`[Bugbot] No review thread found for comment node_id=${commentNodeId}.`); + return; + } + await octokit.graphql(`mutation ($threadId: ID!) { + resolveReviewThread(input: { threadId: $threadId }) { + thread { id } + } + }`, { threadId }); + (0, logger_1.logDebugInfo)(`Resolved PR review thread ${threadId}.`); + } + catch (err) { + (0, logger_1.logError)(`[Bugbot] Error resolving PR review thread (commentNodeId=${commentNodeId}, owner=${owner}, repo=${repository}): ${err}`); + } + }; + /** + * Create a review on the PR with one or more inline comments (bugbot findings). + * Each comment requires path and line (use first file and line 1 if not specified). + */ + this.createReviewWithComments = async (owner, repository, pullNumber, commitId, comments, token) => { + if (comments.length === 0) + return; + const octokit = github.getOctokit(token); + const results = await Promise.allSettled(comments.map((c) => octokit.rest.pulls.createReviewComment({ + owner, + repo: repository, + pull_number: pullNumber, + commit_id: commitId, + path: c.path, + line: c.line, + side: 'RIGHT', + body: c.body, + }))); + let created = 0; + results.forEach((result, i) => { + if (result.status === 'fulfilled') { + created += 1; + } + else { + const c = comments[i]; + (0, logger_1.logError)(`[Bugbot] Error creating PR review comment. path="${c.path}", line=${c.line}, prNumber=${pullNumber}, owner=${owner}, repo=${repository}: ${result.reason}`); + } + }); + if (created > 0) { + (0, logger_1.logDebugInfo)(`Created ${created} review comment(s) on PR #${pullNumber}.`); + } + }; + /** Update an existing PR review comment (e.g. to mark finding as resolved in body). */ + this.updatePullRequestReviewComment = async (owner, repository, commentId, body, token) => { + const octokit = github.getOctokit(token); + await octokit.rest.pulls.updateReviewComment({ + owner, + repo: repository, + comment_id: commentId, + body, + }); + (0, logger_1.logDebugInfo)(`Updated review comment ${commentId}.`); + }; + } + /** First line (right side) of the first hunk per file, for valid review comment placement. */ + static firstLineFromPatch(patch) { + const match = patch.match(/^@@ -\d+,\d+ \+(\d+),\d+ @@/m); + return match ? parseInt(match[1], 10) : undefined; } } exports.PullRequestRepository = PullRequestRepository; @@ -46879,12 +47267,46 @@ exports.ConfigurationHandler = void 0; const config_1 = __nccwpck_require__(1106); const logger_1 = __nccwpck_require__(8836); const issue_content_interface_1 = __nccwpck_require__(9913); +/** Keys that must be preserved from stored config when current has undefined (e.g. when branch already existed). */ +const CONFIG_KEYS_TO_PRESERVE = [ + 'parentBranch', + 'workingBranch', + 'releaseBranch', + 'hotfixBranch', + 'hotfixOriginBranch', + 'branchType', +]; class ConfigurationHandler extends issue_content_interface_1.IssueContentInterface { constructor() { super(...arguments); this.update = async (execution) => { try { - return await this.internalUpdate(execution, JSON.stringify(execution.currentConfiguration, null, 4)); + const current = execution.currentConfiguration; + const payload = { + branchType: current.branchType, + releaseBranch: current.releaseBranch, + workingBranch: current.workingBranch, + parentBranch: current.parentBranch, + hotfixOriginBranch: current.hotfixOriginBranch, + hotfixBranch: current.hotfixBranch, + results: current.results, + branchConfiguration: current.branchConfiguration, + }; + const storedRaw = await this.internalGetter(execution); + if (storedRaw != null && storedRaw.trim().length > 0) { + try { + const stored = JSON.parse(storedRaw); + for (const key of CONFIG_KEYS_TO_PRESERVE) { + if (payload[key] === undefined && stored[key] !== undefined) { + payload[key] = stored[key]; + } + } + } + catch { + /* ignore parse errors, save current as-is */ + } + } + return await this.internalUpdate(execution, JSON.stringify(payload, null, 4)); } catch (error) { (0, logger_1.logError)(`Error updating issue description: ${error}`); @@ -46941,7 +47363,6 @@ const PROGRESS_RESPONSE_SCHEMA = { required: ['progress', 'summary'], additionalProperties: false, }; -const MAX_PROGRESS_ATTEMPTS = 3; class CheckProgressUseCase { constructor() { this.taskId = 'CheckProgressUseCase'; @@ -47037,28 +47458,18 @@ class CheckProgressUseCase { const developmentBranch = param.branches.development || 'develop'; (0, logger_1.logInfo)(`📦 Progress will be assessed from workspace diff: base branch "${developmentBranch}", current branch "${branch}" (OpenCode agent will run git diff).`); const prompt = this.buildProgressPrompt(issueNumber, issueDescription, branch, developmentBranch); - let progress = 0; - let summary = 'Unable to determine progress.'; - let reasoning = ''; - let remaining = ''; - for (let attempt = 1; attempt <= MAX_PROGRESS_ATTEMPTS; attempt++) { - (0, logger_1.logInfo)(`🤖 Analyzing progress using OpenCode Plan agent... (attempt ${attempt}/${MAX_PROGRESS_ATTEMPTS})`); - const attemptResult = await this.fetchProgressAttempt(param.ai, prompt); - progress = attemptResult.progress; - summary = attemptResult.summary; - reasoning = attemptResult.reasoning; - remaining = attemptResult.remaining; - if (progress > 0) { - (0, logger_1.logInfo)(`✅ Progress detection completed: ${progress}%`); - break; - } - if (attempt < MAX_PROGRESS_ATTEMPTS) { - (0, logger_1.logInfo)(`⚠️ Progress returned 0% (attempt ${attempt}/${MAX_PROGRESS_ATTEMPTS}), retrying...`); - } - } - const progressFailedAfterRetries = progress === 0; - if (progressFailedAfterRetries) { - (0, logger_1.logError)(`Progress detection failed: received 0% after ${MAX_PROGRESS_ATTEMPTS} attempts. This may be due to a model error.`); + (0, logger_1.logInfo)('🤖 Analyzing progress using OpenCode Plan agent...'); + const attemptResult = await this.fetchProgressAttempt(param.ai, prompt); + const progress = attemptResult.progress; + const summary = attemptResult.summary; + const reasoning = attemptResult.reasoning; + const remaining = attemptResult.remaining; + if (progress > 0) { + (0, logger_1.logInfo)(`✅ Progress detection completed: ${progress}%`); + } + const progressFailed = progress === 0; + if (progressFailed) { + (0, logger_1.logError)('Progress detection returned 0%. This may be due to a model error or no changes detected.'); results.push(new result_1.Result({ id: this.taskId, success: false, @@ -47068,7 +47479,7 @@ class CheckProgressUseCase { summary, ], errors: [ - `Progress detection failed: received 0% after ${MAX_PROGRESS_ATTEMPTS} attempts. This may be due to a model error. There are changes on the branch; consider re-running the check.`, + 'Progress detection returned 0%. This may be due to a model error or no changes detected. Consider re-running the check.', ], payload: { progress: 0, @@ -47139,7 +47550,7 @@ class CheckProgressUseCase { } /** * Calls the OpenCode agent once and returns parsed progress, summary, and reasoning. - * Used inside the retry loop when progress is 0%. + * HTTP-level retries are handled by AiRepository (OPENCODE_MAX_RETRIES). */ async fetchProgressAttempt(ai, prompt) { const agentResponse = await this.aiRepository.askAgent(ai, ai_repository_1.OPENCODE_AGENT_PLAN, prompt, { @@ -47438,17 +47849,63 @@ class DeployedActionUseCase { `Label \`${param.labels.deployed}\` added after a success deploy.`, ], })); + const mergeResults = []; if (param.currentConfiguration.releaseBranch) { const mergeToDefaultResult = await this.branchRepository.mergeBranch(param.owner, param.repo, param.currentConfiguration.releaseBranch, param.branches.defaultBranch, param.pullRequest.mergeTimeout, param.tokens.token); result.push(...mergeToDefaultResult); + mergeResults.push(...mergeToDefaultResult); const mergeToDevelopResult = await this.branchRepository.mergeBranch(param.owner, param.repo, param.currentConfiguration.releaseBranch, param.branches.development, param.pullRequest.mergeTimeout, param.tokens.token); result.push(...mergeToDevelopResult); + mergeResults.push(...mergeToDevelopResult); } else if (param.currentConfiguration.hotfixBranch) { const mergeToDefaultResult = await this.branchRepository.mergeBranch(param.owner, param.repo, param.currentConfiguration.hotfixBranch, param.branches.defaultBranch, param.pullRequest.mergeTimeout, param.tokens.token); result.push(...mergeToDefaultResult); + mergeResults.push(...mergeToDefaultResult); const mergeToDevelopResult = await this.branchRepository.mergeBranch(param.owner, param.repo, param.branches.defaultBranch, param.branches.development, param.pullRequest.mergeTimeout, param.tokens.token); result.push(...mergeToDevelopResult); + mergeResults.push(...mergeToDevelopResult); + } + const mergesAttempted = mergeResults.length > 0; + const allMergesSucceeded = mergesAttempted && mergeResults.every((r) => r.success); + if (allMergesSucceeded) { + const issueNumber = Number(param.singleAction.issue); + const closed = await this.issueRepository.closeIssue(param.owner, param.repo, issueNumber, param.tokens.token); + if (closed) { + (0, logger_1.logDebugInfo)(`Issue #${issueNumber} closed after merges to default and develop.`); + result.push(new result_1.Result({ + id: this.taskId, + success: true, + executed: true, + steps: [ + `Issue #${issueNumber} closed after merge to \`${param.branches.defaultBranch}\` and \`${param.branches.development}\`.`, + ], + })); + } + } + else { + if (mergesAttempted) { + (0, logger_1.logDebugInfo)(`Skipping issue close: one or more merges failed. Issue #${param.singleAction.issue} remains open.`); + result.push(new result_1.Result({ + id: this.taskId, + success: false, + executed: true, + steps: [ + `Issue #${param.singleAction.issue} was not closed because one or more merge operations failed.`, + ], + })); + } + else { + (0, logger_1.logDebugInfo)(`Skipping issue close: no release or hotfix branch configured. Issue #${param.singleAction.issue} remains open.`); + result.push(new result_1.Result({ + id: this.taskId, + success: false, + executed: true, + steps: [ + `Issue #${param.singleAction.issue} was not closed because no release or hotfix branch was configured (no merge operations were performed).`, + ], + })); + } } return result; } @@ -47468,116 +47925,6 @@ class DeployedActionUseCase { exports.DeployedActionUseCase = DeployedActionUseCase; -/***/ }), - -/***/ 938: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { - -"use strict"; - -Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.DetectErrorsUseCase = void 0; -const result_1 = __nccwpck_require__(7305); -const logger_1 = __nccwpck_require__(8836); -const issue_repository_1 = __nccwpck_require__(57); -const branch_repository_1 = __nccwpck_require__(7701); -const ai_repository_1 = __nccwpck_require__(8307); -class DetectErrorsUseCase { - constructor() { - this.taskId = 'DetectErrorsUseCase'; - this.issueRepository = new issue_repository_1.IssueRepository(); - this.branchRepository = new branch_repository_1.BranchRepository(); - this.aiRepository = new ai_repository_1.AiRepository(); - } - async invoke(param) { - (0, logger_1.logInfo)(`Executing ${this.taskId}.`); - const results = []; - try { - if (!param.ai?.getOpencodeModel() || !param.ai?.getOpencodeServerUrl()) { - results.push(new result_1.Result({ - id: this.taskId, - success: false, - executed: true, - errors: ['Missing OPENCODE_SERVER_URL and OPENCODE_MODEL.'], - })); - return results; - } - const issueNumber = param.issueNumber; - if (issueNumber === -1) { - results.push(new result_1.Result({ - id: this.taskId, - success: false, - executed: true, - errors: ['Issue number not found.'], - })); - return results; - } - let branch = param.commit.branch; - if (!branch) { - const branchTypes = [ - param.branches.featureTree, - param.branches.bugfixTree, - param.branches.docsTree, - param.branches.choreTree, - ]; - const branches = await this.branchRepository.getListOfBranches(param.owner, param.repo, param.tokens.token); - for (const type of branchTypes) { - const prefix = `${type}/${issueNumber}-`; - const found = branches.find((b) => b.indexOf(prefix) > -1); - if (found) { - branch = found; - break; - } - } - } - const developmentBranch = param.branches.development || 'develop'; - if (!branch) { - results.push(new result_1.Result({ - id: this.taskId, - success: false, - executed: true, - errors: [`No branch found for issue #${issueNumber}.`], - })); - return results; - } - const changes = await this.branchRepository.getChanges(param.owner, param.repo, branch, developmentBranch, param.tokens.token); - const prompt = `Review the code changes in branch "${branch}" compared to "${developmentBranch}" and identify potential errors, bugs, or issues. - -**Changed files and patches:** -${changes.files - .slice(0, 30) - .map((f) => `### ${f.filename} (${f.status})\n\`\`\`diff\n${(f.patch ?? '').slice(0, 1500)}\n\`\`\``) - .join('\n\n')} - -List potential errors, bugs, or code quality issues. For each: file (if relevant), brief description, and severity if obvious. Use clear bullet points or numbered list.`; - (0, logger_1.logInfo)(`🤖 Detecting errors using OpenCode Plan agent...`); - const response = await this.aiRepository.askAgent(param.ai, ai_repository_1.OPENCODE_AGENT_PLAN, prompt); - const report = typeof response === 'string' - ? response - : (response && String(response.report)) || 'No response.'; - results.push(new result_1.Result({ - id: this.taskId, - success: true, - executed: true, - steps: ['Error detection completed (OpenCode Plan agent).', report], - payload: { issueNumber, branch, developmentBranch, report }, - })); - } - catch (error) { - (0, logger_1.logError)(`Error in ${this.taskId}: ${error}`); - results.push(new result_1.Result({ - id: this.taskId, - success: false, - executed: true, - errors: [`Error in ${this.taskId}: ${error}`], - })); - } - return results; - } -} -exports.DetectErrorsUseCase = DetectErrorsUseCase; - - /***/ }), /***/ 3943: @@ -47901,6 +48248,7 @@ const logger_1 = __nccwpck_require__(8836); const check_progress_use_case_1 = __nccwpck_require__(7744); const notify_new_commit_on_issue_use_case_1 = __nccwpck_require__(8020); const check_changes_issue_size_use_case_1 = __nccwpck_require__(5863); +const detect_potential_problems_use_case_1 = __nccwpck_require__(7395); class CommitUseCase { constructor() { this.taskId = 'CommitUseCase'; @@ -47919,6 +48267,7 @@ class CommitUseCase { results.push(...(await new notify_new_commit_on_issue_use_case_1.NotifyNewCommitOnIssueUseCase().invoke(param))); results.push(...(await new check_changes_issue_size_use_case_1.CheckChangesIssueSizeUseCase().invoke(param))); results.push(...(await new check_progress_use_case_1.CheckProgressUseCase().invoke(param))); + results.push(...(await new detect_potential_problems_use_case_1.DetectPotentialProblemsUseCase().invoke(param))); } catch (error) { (0, logger_1.logError)(error); @@ -48198,8 +48547,8 @@ const create_tag_use_case_1 = __nccwpck_require__(5279); const think_use_case_1 = __nccwpck_require__(3841); const initial_setup_use_case_1 = __nccwpck_require__(3943); const check_progress_use_case_1 = __nccwpck_require__(7744); -const detect_errors_use_case_1 = __nccwpck_require__(938); const recommend_steps_use_case_1 = __nccwpck_require__(3538); +const detect_potential_problems_use_case_1 = __nccwpck_require__(7395); class SingleActionUseCase { constructor() { this.taskId = 'SingleActionUseCase'; @@ -48233,8 +48582,8 @@ class SingleActionUseCase { else if (param.singleAction.isCheckProgressAction) { results.push(...await new check_progress_use_case_1.CheckProgressUseCase().invoke(param)); } - else if (param.singleAction.isDetectErrorsAction) { - results.push(...await new detect_errors_use_case_1.DetectErrorsUseCase().invoke(param)); + else if (param.singleAction.isDetectPotentialProblemsAction) { + results.push(...await new detect_potential_problems_use_case_1.DetectPotentialProblemsUseCase().invoke(param)); } else if (param.singleAction.isRecommendStepsAction) { results.push(...await new recommend_steps_use_case_1.RecommendStepsUseCase().invoke(param)); @@ -48258,6 +48607,625 @@ class SingleActionUseCase { exports.SingleActionUseCase = SingleActionUseCase; +/***/ }), + +/***/ 6339: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.buildBugbotPrompt = buildBugbotPrompt; +function buildBugbotPrompt(param, context) { + const headBranch = param.commit.branch; + const baseBranch = param.currentConfiguration.parentBranch ?? param.branches.development ?? 'develop'; + const previousBlock = context.previousFindingsBlock; + const ignorePatterns = param.ai?.getAiIgnoreFiles?.() ?? []; + const ignoreBlock = ignorePatterns.length > 0 + ? `\n**Files to ignore:** Do not report findings in files or paths matching these patterns: ${ignorePatterns.join(', ')}.` + : ''; + return `You are analyzing the latest code changes for potential bugs and issues. + +**Repository context:** +- Owner: ${param.owner} +- Repository: ${param.repo} +- Branch (head): ${headBranch} +- Base branch: ${baseBranch} +- Issue number: ${param.issueNumber} +${ignoreBlock} + +**Your task 1 (new/current problems):** Determine what has changed in the branch "${headBranch}" compared to "${baseBranch}" (you must compute or obtain the diff yourself using the repository context above). Then identify potential bugs, logic errors, security issues, and code quality problems. Be strict and descriptive. One finding per distinct problem. Return them in the \`findings\` array (each with id, title, description; optionally file, line, severity, suggestion). Only include findings in files that are not in the ignore list above. +${previousBlock} + +**Output:** Return a JSON object with: "findings" (array of new/current problems from task 1), and if we gave you previously reported issues above, "resolved_finding_ids" (array of those ids that are now fixed or no longer apply, as per task 2).`; +} + + +/***/ }), + +/***/ 7384: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.deduplicateFindings = deduplicateFindings; +/** + * Deduplicates findings by (file, line). When two findings share the same file and line, + * keeps the first; when they have no file, groups by normalized title and keeps the first. + * This reduces noise when the agent returns near-duplicate issues. + */ +function deduplicateFindings(findings) { + const seen = new Set(); + const result = []; + for (const f of findings) { + const file = f.file?.trim() ?? ''; + const line = f.line ?? 0; + const key = file || line + ? `${file}:${line}` + : `title:${(f.title ?? '').toLowerCase().trim().slice(0, 80)}`; + if (seen.has(key)) + continue; + seen.add(key); + result.push(f); + } + return result; +} + + +/***/ }), + +/***/ 3770: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.fileMatchesIgnorePatterns = fileMatchesIgnorePatterns; +/** + * Returns true if the file path matches any of the ignore patterns (glob-style). + * Used to exclude findings in test files, build output, etc. + */ +function fileMatchesIgnorePatterns(filePath, ignorePatterns) { + if (!filePath || ignorePatterns.length === 0) + return false; + const normalized = filePath.trim(); + if (!normalized) + return false; + return ignorePatterns.some((pattern) => { + const p = pattern.trim(); + if (!p) + return false; + const regexPattern = p + .replace(/[.+?^${}()|[\]\\]/g, '\\$&') + .replace(/\*/g, '.*') + .replace(/\//g, '\\/'); + const regex = p.endsWith('/*') + ? new RegExp(`^${regexPattern.replace(/\\\/\.\*$/, '(\\/.*)?')}$`) + : new RegExp(`^${regexPattern}$`); + return regex.test(normalized); + }); +} + + +/***/ }), + +/***/ 9072: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.applyCommentLimit = applyCommentLimit; +const constants_1 = __nccwpck_require__(8593); +/** + * Applies the max-comments limit: returns the first N findings to publish individually, + * and overflow count + titles for a single "revisar en local" summary comment. + */ +function applyCommentLimit(findings, maxComments = constants_1.BUGBOT_MAX_COMMENTS) { + if (findings.length <= maxComments) { + return { toPublish: findings, overflowCount: 0, overflowTitles: [] }; + } + const toPublish = findings.slice(0, maxComments); + const overflow = findings.slice(maxComments); + return { + toPublish, + overflowCount: overflow.length, + overflowTitles: overflow.map((f) => f.title?.trim() || f.id).filter(Boolean), + }; +} + + +/***/ }), + +/***/ 6319: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.loadBugbotContext = loadBugbotContext; +const issue_repository_1 = __nccwpck_require__(57); +const pull_request_repository_1 = __nccwpck_require__(634); +const marker_1 = __nccwpck_require__(2401); +function buildPreviousFindingsBlock(previousFindings) { + if (previousFindings.length === 0) + return ''; + const items = previousFindings + .map((p) => `---\n**Finding id (use this exact id in resolved_finding_ids if resolved/no longer applies):** \`${p.id.replace(/`/g, '\\`')}\`\n\n**Full comment as posted (including metadata at the end):**\n${p.fullBody}\n`) + .join('\n'); + return ` +**Previously reported issues (not yet marked resolved).** For each one we show the exact comment we posted (title, description, location, suggestion, and a hidden marker with the finding id at the end). + +${items} +**Your task 2:** For each finding above, analyze the current code and decide: +- If the problem **still exists** (same code or same issue present): do **not** include its id in \`resolved_finding_ids\`. +- If the problem **no longer applies** (e.g. that code was removed or refactored away): include its id in \`resolved_finding_ids\`. +- If the problem **has been fixed** (code was changed and the issue is resolved): include its id in \`resolved_finding_ids\`. + +Return in \`resolved_finding_ids\` only the ids from the list above that are now fixed or no longer apply. Use the exact id shown in each "Finding id" line.`; +} +/** + * Loads all context needed for bugbot: existing findings from issue + PR comments, + * open PR numbers, and the prompt block for previously reported issues. + * Also loads PR context (head sha, files, diff lines) for the first open PR. + */ +async function loadBugbotContext(param) { + const issueNumber = param.issueNumber; + const headBranch = param.commit.branch; + const token = param.tokens.token; + const owner = param.owner; + const repo = param.repo; + const issueRepository = new issue_repository_1.IssueRepository(); + const pullRequestRepository = new pull_request_repository_1.PullRequestRepository(); + const issueComments = await issueRepository.listIssueComments(owner, repo, issueNumber, token); + const existingByFindingId = {}; + for (const c of issueComments) { + for (const { findingId, resolved } of (0, marker_1.parseMarker)(c.body)) { + if (!existingByFindingId[findingId]) { + existingByFindingId[findingId] = { issueCommentId: c.id, resolved }; + } + else { + existingByFindingId[findingId].issueCommentId = c.id; + existingByFindingId[findingId].resolved = resolved; + } + } + } + const openPrNumbers = await pullRequestRepository.getOpenPullRequestNumbersByHeadBranch(owner, repo, headBranch, token); + /** Full comment body per finding id (from PR when we don't have issue comment). */ + const prFindingIdToBody = {}; + for (const prNumber of openPrNumbers) { + const prComments = await pullRequestRepository.listPullRequestReviewComments(owner, repo, prNumber, token); + for (const c of prComments) { + const body = c.body ?? ''; + for (const { findingId, resolved } of (0, marker_1.parseMarker)(body)) { + if (!existingByFindingId[findingId]) { + existingByFindingId[findingId] = { resolved }; + } + existingByFindingId[findingId].prCommentId = c.id; + existingByFindingId[findingId].prNumber = prNumber; + existingByFindingId[findingId].resolved = resolved; + prFindingIdToBody[findingId] = body; + } + } + } + /** Unresolved findings with full comment body (including hidden marker) for OpenCode to re-evaluate. */ + const previousFindingsForPrompt = []; + for (const [findingId, data] of Object.entries(existingByFindingId)) { + if (data.resolved) + continue; + const issueBody = issueComments.find((c) => c.id === data.issueCommentId)?.body ?? null; + const fullBody = (issueBody ?? prFindingIdToBody[findingId] ?? '').trim(); + if (fullBody) { + previousFindingsForPrompt.push({ id: findingId, fullBody }); + } + } + const previousFindingsBlock = buildPreviousFindingsBlock(previousFindingsForPrompt); + let prContext = null; + if (openPrNumbers.length > 0) { + const prHeadSha = await pullRequestRepository.getPullRequestHeadSha(owner, repo, openPrNumbers[0], token); + if (prHeadSha) { + const prFiles = await pullRequestRepository.getChangedFiles(owner, repo, openPrNumbers[0], token); + const filesWithLines = await pullRequestRepository.getFilesWithFirstDiffLine(owner, repo, openPrNumbers[0], token); + const pathToFirstDiffLine = {}; + for (const { path, firstLine } of filesWithLines) { + pathToFirstDiffLine[path] = firstLine; + } + prContext = { prHeadSha, prFiles, pathToFirstDiffLine }; + } + } + return { + existingByFindingId, + issueComments, + openPrNumbers, + previousFindingsBlock, + prContext, + }; +} + + +/***/ }), + +/***/ 61: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.markFindingsResolved = markFindingsResolved; +const issue_repository_1 = __nccwpck_require__(57); +const pull_request_repository_1 = __nccwpck_require__(634); +const logger_1 = __nccwpck_require__(8836); +const marker_1 = __nccwpck_require__(2401); +/** + * Marks as resolved the findings that OpenCode reported as fixed. + * Updates issue comments (with visible "Resolved" note) and PR review comments (marker only + resolve thread). + */ +async function markFindingsResolved(param) { + const { execution, context, resolvedFindingIds, normalizedResolvedIds } = param; + const { existingByFindingId, issueComments } = context; + const issueNumber = execution.issueNumber; + const token = execution.tokens.token; + const owner = execution.owner; + const repo = execution.repo; + const issueRepository = new issue_repository_1.IssueRepository(); + const pullRequestRepository = new pull_request_repository_1.PullRequestRepository(); + for (const [findingId, existing] of Object.entries(existingByFindingId)) { + const isResolvedByOpenCode = resolvedFindingIds.has(findingId) || + normalizedResolvedIds.has((0, marker_1.sanitizeFindingIdForMarker)(findingId)); + if (existing.resolved || !isResolvedByOpenCode) + continue; + const resolvedNote = '\n\n---\n**Resolved** (OpenCode confirmed fixed in latest analysis).\n'; + const markerTrue = (0, marker_1.buildMarker)(findingId, true); + const replacementWithNote = resolvedNote + markerTrue; + if (existing.issueCommentId != null) { + const comment = issueComments.find((c) => c.id === existing.issueCommentId); + if (comment == null) { + (0, logger_1.logError)(`[Bugbot] No se encontró el comentario de la issue para marcar como resuelto. findingId="${findingId}", issueCommentId=${existing.issueCommentId}, issueNumber=${issueNumber}, owner=${owner}, repo=${repo}.`); + } + else { + const resolvedBody = comment.body ?? ''; + const { updated, replaced } = (0, marker_1.replaceMarkerInBody)(resolvedBody, findingId, true, replacementWithNote); + if (replaced) { + try { + await issueRepository.updateComment(owner, repo, issueNumber, existing.issueCommentId, updated.trimEnd(), token); + (0, logger_1.logDebugInfo)(`Marked finding "${findingId}" as resolved on issue #${issueNumber} (comment ${existing.issueCommentId}).`); + } + catch (err) { + (0, logger_1.logError)(`[Bugbot] Error al actualizar comentario de la issue (marcar como resuelto). findingId="${findingId}", issueCommentId=${existing.issueCommentId}, issueNumber=${issueNumber}: ${err}`); + } + } + } + } + if (existing.prCommentId != null && existing.prNumber != null) { + const prCommentsList = await pullRequestRepository.listPullRequestReviewComments(owner, repo, existing.prNumber, token); + const prComment = prCommentsList.find((c) => c.id === existing.prCommentId); + if (prComment == null) { + (0, logger_1.logError)(`[Bugbot] No se encontró el comentario de la PR para marcar como resuelto. findingId="${findingId}", prCommentId=${existing.prCommentId}, prNumber=${existing.prNumber}, owner=${owner}, repo=${repo}.`); + } + else { + const prBody = prComment.body ?? ''; + const { updated, replaced } = (0, marker_1.replaceMarkerInBody)(prBody, findingId, true, markerTrue); + if (replaced) { + try { + await pullRequestRepository.updatePullRequestReviewComment(owner, repo, existing.prCommentId, updated.trimEnd(), token); + (0, logger_1.logDebugInfo)(`Marked finding "${findingId}" as resolved on PR #${existing.prNumber} (review comment ${existing.prCommentId}).`); + if (prComment.node_id) { + await pullRequestRepository.resolvePullRequestReviewThread(owner, repo, existing.prNumber, prComment.node_id, token); + } + } + catch (err) { + (0, logger_1.logError)(`[Bugbot] Error al actualizar comentario de revisión de la PR (marcar como resuelto). findingId="${findingId}", prCommentId=${existing.prCommentId}, prNumber=${existing.prNumber}: ${err}`); + } + } + } + } + } +} + + +/***/ }), + +/***/ 2401: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.sanitizeFindingIdForMarker = sanitizeFindingIdForMarker; +exports.buildMarker = buildMarker; +exports.parseMarker = parseMarker; +exports.markerRegexForFinding = markerRegexForFinding; +exports.replaceMarkerInBody = replaceMarkerInBody; +exports.extractTitleFromBody = extractTitleFromBody; +exports.buildCommentBody = buildCommentBody; +const constants_1 = __nccwpck_require__(8593); +const logger_1 = __nccwpck_require__(8836); +/** Sanitize finding ID so it cannot break HTML comment syntax (e.g. -->, , newlines, quotes). */ +function sanitizeFindingIdForMarker(findingId) { + return findingId + .replace(/-->/g, '') + .replace(//g, '') + .replace(/"/g, '') + .replace(/\r\n|\r|\n/g, '') + .trim(); +} +function buildMarker(findingId, resolved) { + const safeId = sanitizeFindingIdForMarker(findingId); + return ``; +} +function parseMarker(body) { + if (!body) + return []; + const results = []; + const regex = new RegExp(``, 'g'); + let m; + while ((m = regex.exec(body)) !== null) { + results.push({ findingId: m[1], resolved: m[2] === 'true' }); + } + return results; +} +/** Regex to match the marker for a specific finding (same flexible format as parseMarker). */ +function markerRegexForFinding(findingId) { + const safeId = sanitizeFindingIdForMarker(findingId); + const escapedId = safeId.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); + return new RegExp(``, 'g'); +} +/** + * Find the marker for this finding in body (using same pattern as parseMarker) and replace it. + * Returns the updated body and whether a replacement was made. Logs an error with details if no replacement occurred. + */ +function replaceMarkerInBody(body, findingId, newResolved, replacement) { + const regex = markerRegexForFinding(findingId); + const newMarker = replacement ?? buildMarker(findingId, newResolved); + const updated = body.replace(regex, newMarker); + const replaced = updated !== body; + if (!replaced) { + (0, logger_1.logError)(`[Bugbot] No se pudo marcar como resuelto: no se encontró el marcador en el comentario. findingId="${findingId}", bodyLength=${body?.length ?? 0}, bodySnippet=${(body ?? '').slice(0, 200)}...`); + } + return { updated, replaced }; +} +/** Extract title from comment body (first ## line) for context when sending to OpenCode. */ +function extractTitleFromBody(body) { + if (!body) + return ''; + const match = body.match(/^##\s+(.+)$/m); + return (match?.[1] ?? '').trim(); +} +function buildCommentBody(finding, resolved) { + const severity = finding.severity ? `**Severity:** ${finding.severity}\n\n` : ''; + const fileLine = finding.file != null + ? `**Location:** \`${finding.file}${finding.line != null ? `:${finding.line}` : ''}\`\n\n` + : ''; + const suggestion = finding.suggestion + ? `**Suggested fix:**\n${finding.suggestion}\n\n` + : ''; + const resolvedNote = resolved ? '\n\n---\n**Resolved** (no longer reported in latest analysis).\n' : ''; + const marker = buildMarker(finding.id, resolved); + return `## ${finding.title} + +${severity}${fileLine}${finding.description} +${suggestion}${resolvedNote}${marker}`; +} + + +/***/ }), + +/***/ 1999: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +/** + * Path validation for AI-returned finding.file to prevent path traversal and misuse. + * Rejects paths containing '..', null bytes, or absolute paths. + */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.isSafeFindingFilePath = isSafeFindingFilePath; +exports.isAllowedPathForPr = isAllowedPathForPr; +exports.resolveFindingPathForPr = resolveFindingPathForPr; +const NULL_BYTE = '\0'; +const PARENT_SEGMENT = '..'; +const SLASH = '/'; +const BACKSLASH = '\\'; +/** + * Returns true if the path is safe to use: no '..', no null bytes, not absolute. + * Does not check against a list of allowed files; use isAllowedPathForPr for that. + */ +function isSafeFindingFilePath(path) { + if (path == null || typeof path !== 'string') + return false; + const trimmed = path.trim(); + if (trimmed.length === 0) + return false; + if (trimmed.includes(NULL_BYTE)) + return false; + if (trimmed.includes(PARENT_SEGMENT)) + return false; + if (trimmed.startsWith(SLASH)) + return false; + if (/^[a-zA-Z]:[/\\]/.test(trimmed)) + return false; + if (trimmed.startsWith(BACKSLASH)) + return false; + return true; +} +/** + * Returns true if path is safe (isSafeFindingFilePath) and is in the list of PR changed files. + * Used to validate finding.file before using it for PR review comments. + */ +function isAllowedPathForPr(path, prFiles) { + if (!isSafeFindingFilePath(path)) + return false; + if (prFiles.length === 0) + return false; + const normalized = path.trim(); + return prFiles.some((f) => f.filename === normalized); +} +/** + * Resolves the file path to use for a PR review comment: finding.file if valid and in prFiles. + * Returns undefined when the finding's file is not in the PR so we do not attach the comment + * to the wrong file (e.g. the first file in the list). + */ +function resolveFindingPathForPr(findingFile, prFiles) { + if (prFiles.length === 0) + return undefined; + if (isAllowedPathForPr(findingFile, prFiles)) + return findingFile.trim(); + return undefined; +} + + +/***/ }), + +/***/ 6697: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.publishFindings = publishFindings; +const issue_repository_1 = __nccwpck_require__(57); +const pull_request_repository_1 = __nccwpck_require__(634); +const logger_1 = __nccwpck_require__(8836); +const marker_1 = __nccwpck_require__(2401); +const path_validation_1 = __nccwpck_require__(1999); +/** + * Publishes current findings to issue and PR: creates or updates issue comments, + * creates or updates PR review comments (or creates new ones). + */ +async function publishFindings(param) { + const { execution, context, findings, overflowCount = 0, overflowTitles = [] } = param; + const { existingByFindingId, openPrNumbers, prContext } = context; + const issueNumber = execution.issueNumber; + const token = execution.tokens.token; + const owner = execution.owner; + const repo = execution.repo; + const issueRepository = new issue_repository_1.IssueRepository(); + const pullRequestRepository = new pull_request_repository_1.PullRequestRepository(); + const prFiles = prContext?.prFiles ?? []; + const pathToFirstDiffLine = prContext?.pathToFirstDiffLine ?? {}; + const prCommentsToCreate = []; + for (const finding of findings) { + const existing = existingByFindingId[finding.id]; + const commentBody = (0, marker_1.buildCommentBody)(finding, false); + if (existing?.issueCommentId != null) { + await issueRepository.updateComment(owner, repo, issueNumber, existing.issueCommentId, commentBody, token); + (0, logger_1.logDebugInfo)(`Updated bugbot comment for finding ${finding.id} on issue.`); + } + else { + await issueRepository.addComment(owner, repo, issueNumber, commentBody, token); + (0, logger_1.logDebugInfo)(`Added bugbot comment for finding ${finding.id} on issue.`); + } + if (prContext && openPrNumbers.length > 0) { + const path = (0, path_validation_1.resolveFindingPathForPr)(finding.file, prFiles); + if (path) { + const line = finding.line ?? pathToFirstDiffLine[path] ?? 1; + if (existing?.prCommentId != null && existing.prNumber === openPrNumbers[0]) { + await pullRequestRepository.updatePullRequestReviewComment(owner, repo, existing.prCommentId, commentBody, token); + } + else { + prCommentsToCreate.push({ path, line, body: commentBody }); + } + } + } + } + if (prCommentsToCreate.length > 0 && prContext && openPrNumbers.length > 0) { + await pullRequestRepository.createReviewWithComments(owner, repo, openPrNumbers[0], prContext.prHeadSha, prCommentsToCreate, token); + } + if (overflowCount > 0) { + const titlesList = overflowTitles.length > 0 + ? '\n- ' + overflowTitles.slice(0, 15).join('\n- ') + (overflowTitles.length > 15 ? `\n- ... and ${overflowTitles.length - 15} more` : '') + : ''; + const overflowBody = `## More findings (comment limit) + +There are **${overflowCount}** more finding(s) that were not published as individual comments. Review locally or in the full diff to see the list.${titlesList}`; + await issueRepository.addComment(owner, repo, issueNumber, overflowBody, token); + (0, logger_1.logDebugInfo)(`Added overflow comment: ${overflowCount} additional finding(s) not published individually.`); + } +} + + +/***/ }), + +/***/ 8267: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.BUGBOT_RESPONSE_SCHEMA = void 0; +/** OpenCode response schema: agent computes diff, returns new findings and which previous ones are resolved. */ +exports.BUGBOT_RESPONSE_SCHEMA = { + type: 'object', + properties: { + findings: { + type: 'array', + items: { + type: 'object', + properties: { + id: { type: 'string', description: 'Stable unique id for this finding (e.g. file:line:summary)' }, + title: { type: 'string', description: 'Short title of the problem' }, + description: { type: 'string', description: 'Clear explanation of the issue' }, + file: { type: 'string', description: 'Repository-relative path when applicable' }, + line: { type: 'number', description: 'Line number when applicable' }, + severity: { type: 'string', description: 'Severity: high, medium, low, or info. Findings below the configured minimum are not published.' }, + suggestion: { type: 'string', description: 'Suggested fix when applicable' }, + }, + required: ['id', 'title', 'description'], + additionalProperties: true, + }, + }, + resolved_finding_ids: { + type: 'array', + items: { type: 'string' }, + description: 'Ids of previously reported issues (from the list we sent) that are now fixed in the current code. Only include ids we asked you to check.', + }, + }, + required: ['findings'], + additionalProperties: false, +}; + + +/***/ }), + +/***/ 3109: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.normalizeMinSeverity = normalizeMinSeverity; +exports.severityLevel = severityLevel; +exports.meetsMinSeverity = meetsMinSeverity; +const VALID_SEVERITIES = ['info', 'low', 'medium', 'high']; +/** Normalizes user input to a valid SeverityLevel; defaults to 'low' if invalid. */ +function normalizeMinSeverity(value) { + if (!value) + return 'low'; + const normalized = value.toLowerCase().trim(); + return VALID_SEVERITIES.includes(normalized) ? normalized : 'low'; +} +const SEVERITY_ORDER = { + info: 0, + low: 1, + medium: 2, + high: 3, +}; +function severityLevel(severity) { + if (!severity) + return SEVERITY_ORDER.low; + const normalized = severity.toLowerCase().trim(); + return SEVERITY_ORDER[normalized] ?? SEVERITY_ORDER.low; +} +/** Returns true if the finding's severity is at or above the minimum threshold. */ +function meetsMinSeverity(findingSeverity, minSeverity) { + return severityLevel(findingSeverity) >= SEVERITY_ORDER[minSeverity]; +} + + /***/ }), /***/ 5863: @@ -48285,12 +49253,14 @@ class CheckChangesIssueSizeUseCase { (0, logger_1.logInfo)(`Executing ${this.taskId}.`); const result = []; try { - if (param.currentConfiguration.parentBranch === undefined) { - (0, logger_1.logDebugInfo)(`Parent branch is undefined.`); + const baseBranch = param.currentConfiguration.parentBranch ?? + param.branches.development ?? + 'develop'; + if (!baseBranch) { + (0, logger_1.logDebugInfo)(`Parent branch could not be determined.`); return result; } const headBranch = param.commit.branch; - const baseBranch = param.currentConfiguration.parentBranch; const { size, githubSize, reason } = await this.branchRepository.getSizeCategoryAndReason(param.owner, param.repo, headBranch, baseBranch, param.sizeThresholds, param.labels, param.tokens.token); (0, logger_1.logDebugInfo)(`Size: ${size}`); (0, logger_1.logDebugInfo)(`Github Size: ${githubSize}`); @@ -48353,6 +49323,124 @@ class CheckChangesIssueSizeUseCase { exports.CheckChangesIssueSizeUseCase = CheckChangesIssueSizeUseCase; +/***/ }), + +/***/ 7395: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.DetectPotentialProblemsUseCase = void 0; +const result_1 = __nccwpck_require__(7305); +const ai_repository_1 = __nccwpck_require__(8307); +const constants_1 = __nccwpck_require__(8593); +const logger_1 = __nccwpck_require__(8836); +const build_bugbot_prompt_1 = __nccwpck_require__(6339); +const deduplicate_findings_1 = __nccwpck_require__(7384); +const file_ignore_1 = __nccwpck_require__(3770); +const path_validation_1 = __nccwpck_require__(1999); +const limit_comments_1 = __nccwpck_require__(9072); +const load_bugbot_context_use_case_1 = __nccwpck_require__(6319); +const mark_findings_resolved_use_case_1 = __nccwpck_require__(61); +const publish_findings_use_case_1 = __nccwpck_require__(6697); +const schema_1 = __nccwpck_require__(8267); +const severity_1 = __nccwpck_require__(3109); +const marker_1 = __nccwpck_require__(2401); +class DetectPotentialProblemsUseCase { + constructor() { + this.taskId = 'DetectPotentialProblemsUseCase'; + this.aiRepository = new ai_repository_1.AiRepository(); + } + async invoke(param) { + (0, logger_1.logInfo)(`Executing ${this.taskId}.`); + const results = []; + try { + if (!param.ai?.getOpencodeModel() || !param.ai?.getOpencodeServerUrl()) { + (0, logger_1.logDebugInfo)('OpenCode not configured; skipping potential problems detection.'); + return results; + } + if (param.issueNumber === -1) { + (0, logger_1.logDebugInfo)('No issue number for this branch; skipping.'); + return results; + } + const context = await (0, load_bugbot_context_use_case_1.loadBugbotContext)(param); + const prompt = (0, build_bugbot_prompt_1.buildBugbotPrompt)(param, context); + (0, logger_1.logInfo)('Detecting potential problems via OpenCode (agent computes changes and checks resolved)...'); + const response = await this.aiRepository.askAgent(param.ai, ai_repository_1.OPENCODE_AGENT_PLAN, prompt, { + expectJson: true, + schema: schema_1.BUGBOT_RESPONSE_SCHEMA, + schemaName: 'bugbot_findings', + }); + if (response == null || typeof response !== 'object') { + (0, logger_1.logDebugInfo)('No response from OpenCode.'); + return results; + } + const payload = response; + let findings = Array.isArray(payload.findings) ? payload.findings : []; + const resolvedFindingIdsRaw = Array.isArray(payload.resolved_finding_ids) ? payload.resolved_finding_ids : []; + const resolvedFindingIds = new Set(resolvedFindingIdsRaw); + const normalizedResolvedIds = new Set(resolvedFindingIdsRaw.map(marker_1.sanitizeFindingIdForMarker)); + const ignorePatterns = param.ai?.getAiIgnoreFiles?.() ?? []; + const minSeverity = (0, severity_1.normalizeMinSeverity)(param.ai?.getBugbotMinSeverity?.()); + findings = findings.filter((f) => f.file == null || String(f.file).trim() === '' || (0, path_validation_1.isSafeFindingFilePath)(f.file)); + findings = findings.filter((f) => !(0, file_ignore_1.fileMatchesIgnorePatterns)(f.file, ignorePatterns)); + findings = findings.filter((f) => (0, severity_1.meetsMinSeverity)(f.severity, minSeverity)); + findings = (0, deduplicate_findings_1.deduplicateFindings)(findings); + const maxComments = param.ai?.getBugbotCommentLimit?.() ?? constants_1.BUGBOT_MAX_COMMENTS; + const { toPublish, overflowCount, overflowTitles } = (0, limit_comments_1.applyCommentLimit)(findings, maxComments); + if (toPublish.length === 0 && resolvedFindingIds.size === 0) { + (0, logger_1.logDebugInfo)('OpenCode returned no new findings (after filters) and no resolved ids.'); + results.push(new result_1.Result({ + id: this.taskId, + success: true, + executed: true, + steps: ['Potential problems detection completed (no new findings, no resolved).'], + })); + return results; + } + await (0, mark_findings_resolved_use_case_1.markFindingsResolved)({ + execution: param, + context, + resolvedFindingIds, + normalizedResolvedIds, + }); + await (0, publish_findings_use_case_1.publishFindings)({ + execution: param, + context, + findings: toPublish, + overflowCount: overflowCount > 0 ? overflowCount : undefined, + overflowTitles: overflowCount > 0 ? overflowTitles : undefined, + }); + const stepParts = [`${toPublish.length} new/current finding(s) from OpenCode`]; + if (overflowCount > 0) { + stepParts.push(`${overflowCount} more not published (see summary comment)`); + } + if (resolvedFindingIds.size > 0) { + stepParts.push(`${resolvedFindingIds.size} marked as resolved by OpenCode`); + } + results.push(new result_1.Result({ + id: this.taskId, + success: true, + executed: true, + steps: [`Potential problems detection completed. ${stepParts.join('; ')}.`], + })); + } + catch (error) { + (0, logger_1.logError)(`Error in ${this.taskId}: ${error}`); + results.push(new result_1.Result({ + id: this.taskId, + success: false, + executed: true, + errors: [`Error in ${this.taskId}: ${error}`], + })); + } + return results; + } +} +exports.DetectPotentialProblemsUseCase = DetectPotentialProblemsUseCase; + + /***/ }), /***/ 8020: @@ -49213,23 +50301,26 @@ class ThinkUseCase { })); return results; } - if (!param.tokenUser?.trim()) { - (0, logger_1.logInfo)('Bot username (tokenUser) not set; skipping Think response.'); - results.push(new result_1.Result({ - id: this.taskId, - success: true, - executed: false, - })); - return results; - } - if (!commentBody.includes(`@${param.tokenUser}`)) { - (0, logger_1.logInfo)(`Comment does not mention @${param.tokenUser}; skipping.`); - results.push(new result_1.Result({ - id: this.taskId, - success: true, - executed: false, - })); - return results; + const isHelpOrQuestionIssue = param.labels.isQuestion || param.labels.isHelp; + if (!isHelpOrQuestionIssue) { + if (!param.tokenUser?.trim()) { + (0, logger_1.logInfo)('Bot username (tokenUser) not set; skipping Think response.'); + results.push(new result_1.Result({ + id: this.taskId, + success: true, + executed: false, + })); + return results; + } + if (!commentBody.includes(`@${param.tokenUser}`)) { + (0, logger_1.logInfo)(`Comment does not mention @${param.tokenUser}; skipping.`); + results.push(new result_1.Result({ + id: this.taskId, + success: true, + executed: false, + })); + return results; + } } if (!param.ai.getOpencodeModel()?.trim() || !param.ai.getOpencodeServerUrl()?.trim()) { results.push(new result_1.Result({ @@ -49240,7 +50331,9 @@ class ThinkUseCase { })); return results; } - const question = commentBody.replace(new RegExp(`@${param.tokenUser}`, 'gi'), '').trim(); + const question = isHelpOrQuestionIssue + ? commentBody.trim() + : commentBody.replace(new RegExp(`@${param.tokenUser}`, 'gi'), '').trim(); if (!question) { results.push(new result_1.Result({ id: this.taskId, @@ -49261,8 +50354,17 @@ class ThinkUseCase { ? `\n\nContext (issue #${issueNumberForContext} description):\n${issueDescription}\n\n` : '\n\n'; const prompt = `You are a helpful assistant. Answer the following question concisely, using the context below when relevant. Do not include the question in your response.${contextBlock}Question: ${question}`; - const answer = await this.aiRepository.ask(param.ai, prompt); - if (answer === undefined || !answer.trim()) { + const response = await this.aiRepository.askAgent(param.ai, ai_repository_1.OPENCODE_AGENT_PLAN, prompt, { + expectJson: true, + schema: ai_repository_1.THINK_RESPONSE_SCHEMA, + schemaName: 'think_response', + }); + const answer = response != null && + typeof response === 'object' && + typeof response.answer === 'string' + ? response.answer.trim() + : ''; + if (!answer) { (0, logger_1.logError)('OpenCode returned no answer for Think.'); results.push(new result_1.Result({ id: this.taskId, @@ -49849,6 +50951,7 @@ const result_1 = __nccwpck_require__(7305); const branch_repository_1 = __nccwpck_require__(7701); const content_utils_1 = __nccwpck_require__(7873); const logger_1 = __nccwpck_require__(8836); +const move_issue_to_in_progress_1 = __nccwpck_require__(8203); class DeployAddedUseCase { constructor() { this.taskId = 'DeployAddedUseCase'; @@ -49861,6 +50964,7 @@ class DeployAddedUseCase { if (param.issue.labeled && param.issue.labelAdded === param.labels.deploy) { (0, logger_1.logDebugInfo)(`Deploying requested.`); if (param.release.active && param.release.branch !== undefined) { + result.push(...await new move_issue_to_in_progress_1.MoveIssueToInProgressUseCase().invoke(param)); const sanitizedTitle = param.issue.title .replace(/\b\d+(\.\d+){2,}\b/g, '') .replace(/[^\p{L}\p{N}\p{P}\p{Z}^$\n]/gu, '') @@ -49871,13 +50975,12 @@ class DeployAddedUseCase { .replace(/- -/g, '-').trim() .replace(/-+/g, '-') .trim(); - const description = param.issue.body?.match(/### Changelog\n\n([\s\S]*?)(?=\n\n|$)/)?.[1]?.trim() ?? 'No changelog provided'; - const escapedDescription = description.replace(/\n/g, '\\n'); + const changelogBody = (0, content_utils_1.extractChangelogUpToAdditionalContext)(param.issue.body, 'Changelog'); const releaseUrl = `https://github.com/${param.owner}/${param.repo}/tree/${param.release.branch}`; const parameters = { version: param.release.version, title: sanitizedTitle, - changelog: escapedDescription, + changelog: changelogBody, issue: `${param.issue.number}`, }; await this.branchRepository.executeWorkflow(param.owner, param.repo, param.release.branch, param.workflows.release, parameters, param.tokens.token); @@ -49893,6 +50996,7 @@ ${(0, content_utils_1.injectJsonAsMarkdownBlock)('Workflow Parameters', paramete })); } else if (param.hotfix.active && param.hotfix.branch !== undefined) { + result.push(...await new move_issue_to_in_progress_1.MoveIssueToInProgressUseCase().invoke(param)); const sanitizedTitle = param.issue.title .replace(/\b\d+(\.\d+){2,}\b/g, '') .replace(/[^\p{L}\p{N}\p{P}\p{Z}^$\n]/gu, '') @@ -49903,16 +51007,15 @@ ${(0, content_utils_1.injectJsonAsMarkdownBlock)('Workflow Parameters', paramete .replace(/- -/g, '-').trim() .replace(/-+/g, '-') .trim(); - const description = param.issue.body?.match(/### Hotfix Solution\n\n([\s\S]*?)(?=\n\n|$)/)?.[1]?.trim() ?? 'No changelog provided'; - const escapedDescription = description.replace(/\n/g, '\\n'); + const changelogBody = (0, content_utils_1.extractChangelogUpToAdditionalContext)(param.issue.body, 'Hotfix Solution'); const hotfixUrl = `https://github.com/${param.owner}/${param.repo}/tree/${param.hotfix.branch}`; const parameters = { version: param.hotfix.version, title: sanitizedTitle, - changelog: escapedDescription, + changelog: changelogBody, issue: param.issue.number, }; - await this.branchRepository.executeWorkflow(param.owner, param.repo, param.hotfix.branch, param.workflows.release, parameters, param.tokens.token); + await this.branchRepository.executeWorkflow(param.owner, param.repo, param.hotfix.branch, param.workflows.hotfix, parameters, param.tokens.token); result.push(new result_1.Result({ id: this.taskId, success: true, @@ -50770,8 +51873,17 @@ If you'd like this comment to be translated again, please delete the entire comm The text is: ${commentBody} `; - let result = await this.aiRepository.ask(param.ai, prompt); - if (result === "done") { + const checkResponse = await this.aiRepository.askAgent(param.ai, ai_repository_1.OPENCODE_AGENT_PLAN, prompt, { + expectJson: true, + schema: ai_repository_1.LANGUAGE_CHECK_RESPONSE_SCHEMA, + schemaName: 'language_check_response', + }); + const status = checkResponse != null && + typeof checkResponse === 'object' && + typeof checkResponse.status === 'string' + ? checkResponse.status + : ''; + if (status === 'done') { results.push(new result_1.Result({ id: this.taskId, success: true, @@ -51178,9 +52290,7 @@ class SyncSizeAndProgressLabelsFromIssueToPrUseCase { id: this.taskId, success: true, executed: true, - steps: [ - `Size and progress labels copied from issue #${param.issueNumber} to this PR.`, - ], + steps: [], })); } catch (error) { @@ -51286,7 +52396,7 @@ class UpdatePullRequestDescriptionUseCase { id: this.taskId, success: true, executed: true, - steps: [`The description has been updated with AI-generated content (OpenCode Plan agent).`], + steps: [], })); } catch (error) { @@ -51383,8 +52493,17 @@ If you'd like this comment to be translated again, please delete the entire comm The text is: ${commentBody} `; - let result = await this.aiRepository.ask(param.ai, prompt); - if (result === "done") { + const checkResponse = await this.aiRepository.askAgent(param.ai, ai_repository_1.OPENCODE_AGENT_PLAN, prompt, { + expectJson: true, + schema: ai_repository_1.LANGUAGE_CHECK_RESPONSE_SCHEMA, + schemaName: 'language_check_response', + }); + const status = checkResponse != null && + typeof checkResponse === 'object' && + typeof checkResponse.status === 'string' + ? checkResponse.status + : ''; + if (status === 'done') { results.push(new result_1.Result({ id: this.taskId, success: true, @@ -51445,14 +52564,18 @@ exports.CheckPullRequestCommentLanguageUseCase = CheckPullRequestCommentLanguage "use strict"; Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.PROMPTS = exports.ACTIONS = exports.ERRORS = exports.INPUT_KEYS = exports.WORKFLOW_ACTIVE_STATUSES = exports.WORKFLOW_STATUS = exports.DEFAULT_IMAGE_CONFIG = exports.OPENCODE_REQUEST_TIMEOUT_MS = exports.OPENCODE_DEFAULT_MODEL = exports.REPO_URL = exports.TITLE = exports.COMMAND = void 0; +exports.PROMPTS = exports.BUGBOT_MIN_SEVERITY = exports.BUGBOT_MAX_COMMENTS = exports.BUGBOT_MARKER_PREFIX = exports.ACTIONS = exports.ERRORS = exports.INPUT_KEYS = exports.WORKFLOW_ACTIVE_STATUSES = exports.WORKFLOW_STATUS = exports.DEFAULT_IMAGE_CONFIG = exports.OPENCODE_RETRY_DELAY_MS = exports.OPENCODE_MAX_RETRIES = exports.OPENCODE_REQUEST_TIMEOUT_MS = exports.OPENCODE_DEFAULT_MODEL = exports.REPO_URL = exports.TITLE = exports.COMMAND = void 0; exports.COMMAND = 'giik'; exports.TITLE = 'Giik'; exports.REPO_URL = 'https://github.com/landamessenger/git-board-flow'; /** Default OpenCode model: provider/modelID (e.g. opencode/kimi-k2.5-free). Reuse for CLI, action and Ai fallbacks. */ exports.OPENCODE_DEFAULT_MODEL = 'opencode/kimi-k2.5-free'; -/** Timeout in ms for OpenCode HTTP requests (session create, message, diff). Agent calls can be slow with many files. */ -exports.OPENCODE_REQUEST_TIMEOUT_MS = 600000; +/** Timeout in ms for OpenCode HTTP requests (session create, message, diff). Agent calls can be slow (e.g. plan analyzing repo). */ +exports.OPENCODE_REQUEST_TIMEOUT_MS = 900000; +/** Max attempts for OpenCode requests (retries on failure). Applied transparently in AiRepository. */ +exports.OPENCODE_MAX_RETRIES = 5; +/** Delay in ms between OpenCode retry attempts. */ +exports.OPENCODE_RETRY_DELAY_MS = 2000; exports.DEFAULT_IMAGE_CONFIG = { issue: { automatic: [ @@ -51659,6 +52782,8 @@ exports.INPUT_KEYS = { AI_MEMBERS_ONLY: 'ai-members-only', AI_IGNORE_FILES: 'ai-ignore-files', AI_INCLUDE_REASONING: 'ai-include-reasoning', + BUGBOT_SEVERITY: 'bugbot-severity', + BUGBOT_COMMENT_LIMIT: 'bugbot-comment-limit', // Projects PROJECT_IDS: 'project-ids', PROJECT_COLUMN_ISSUE_CREATED: 'project-column-issue-created', @@ -51803,9 +52928,15 @@ exports.ACTIONS = { THINK: 'think_action', INITIAL_SETUP: 'initial_setup', CHECK_PROGRESS: 'check_progress_action', - DETECT_ERRORS: 'detect_errors_action', + DETECT_POTENTIAL_PROBLEMS: 'detect_potential_problems_action', RECOMMEND_STEPS: 'recommend_steps_action', }; +/** Hidden HTML comment prefix for bugbot findings (issue/PR comments). Format: */ +exports.BUGBOT_MARKER_PREFIX = 'gbf-bugbot'; +/** Max number of individual bugbot comments to create per issue/PR. Excess findings get one summary comment suggesting to review locally. */ +exports.BUGBOT_MAX_COMMENTS = 20; +/** Minimum severity to publish (findings below this are dropped). Order: high > medium > low > info. */ +exports.BUGBOT_MIN_SEVERITY = 'low'; exports.PROMPTS = {}; @@ -51817,7 +52948,7 @@ exports.PROMPTS = {}; "use strict"; Object.defineProperty(exports, "__esModule", ({ value: true })); -exports.injectJsonAsMarkdownBlock = exports.extractReleaseType = exports.extractVersion = void 0; +exports.injectJsonAsMarkdownBlock = exports.extractChangelogUpToAdditionalContext = exports.extractReleaseType = exports.extractVersion = void 0; const extractVersion = (pattern, text) => { const versionPattern = new RegExp(`###\\s*${pattern}\\s+(\\d+\\.\\d+\\.\\d+)`, 'i'); const match = text.match(versionPattern); @@ -51830,6 +52961,22 @@ const extractReleaseType = (pattern, text) => { return match ? match[1] : undefined; }; exports.extractReleaseType = extractReleaseType; +/** + * Extracts changelog content from an issue body: from the given section heading (e.g. "Changelog" or "Hotfix Solution") + * up to but not including the "Additional Context" section. Used for release/hotfix deployment bodies. + */ +const extractChangelogUpToAdditionalContext = (body, sectionTitle) => { + if (body == null || body === '') { + return 'No changelog provided'; + } + const escaped = sectionTitle.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); + const pattern = new RegExp(`(?:###|##)\\s*${escaped}\\s*\\n\\n([\\s\\S]*?)` + + `(?=\\n(?:###|##)\\s*Additional Context\\s*|$)`, 'i'); + const match = body.match(pattern); + const content = match?.[1]?.trim(); + return content ?? 'No changelog provided'; +}; +exports.extractChangelogUpToAdditionalContext = extractChangelogUpToAdditionalContext; const injectJsonAsMarkdownBlock = (title, json) => { const formattedJson = JSON.stringify(json, null, 4) // Pretty-print the JSON with 4 spaces. .split('\n') // Split into lines. @@ -52036,23 +53183,74 @@ function logSingleLine(message) { /***/ }), /***/ 1942: -/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { +/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { "use strict"; /** * Managed OpenCode server lifecycle for GitHub Actions. * Starts "npx opencode-ai serve" and stops it when the action finishes. + * If no opencode.json exists in cwd, creates one with provider timeout 10 min and removes it on stop. */ +var __importDefault = (this && this.__importDefault) || function (mod) { + return (mod && mod.__esModule) ? mod : { "default": mod }; +}; Object.defineProperty(exports, "__esModule", ({ value: true })); exports.startOpencodeServer = startOpencodeServer; exports.stopOpencodeServer = stopOpencodeServer; const child_process_1 = __nccwpck_require__(2081); +const promises_1 = __nccwpck_require__(3292); +const path_1 = __importDefault(__nccwpck_require__(1017)); const logger_1 = __nccwpck_require__(8836); const DEFAULT_PORT = 4096; const HEALTH_PATH = '/global/health'; const POLL_INTERVAL_MS = 500; const STARTUP_TIMEOUT_MS = 120000; // 2 min (first npx download can be slow) +const OPENCODE_CONFIG_FILENAME = 'opencode.json'; +/** Provider request timeout in ms (10 min). OpenCode default is 5 min; we need longer for plan agent. */ +const OPENCODE_PROVIDER_TIMEOUT_MS = 600000; +/** + * If opencode.json does not exist in cwd, create it with provider timeout (10 min). + * OpenCode merges configs; this file will set provider.opencode.options.timeout so long requests don't get cut at 5 min. + */ +async function ensureOpencodeConfig(cwd) { + const configPath = path_1.default.join(cwd, OPENCODE_CONFIG_FILENAME); + try { + await (0, promises_1.access)(configPath); + return { created: false, configPath }; + } + catch { + // File does not exist; create minimal config for provider timeout + } + const config = { + $schema: 'https://opencode.ai/config.json', + provider: { + opencode: { + options: { + timeout: OPENCODE_PROVIDER_TIMEOUT_MS, + }, + }, + }, + }; + await (0, promises_1.writeFile)(configPath, JSON.stringify(config, null, 2), 'utf8'); + (0, logger_1.logInfo)(`Created ${OPENCODE_CONFIG_FILENAME} with provider timeout ${OPENCODE_PROVIDER_TIMEOUT_MS / 60000} min (will remove on server stop).`); + return { created: true, configPath }; +} +/** + * Remove opencode.json if we created it (so we don't leave a temporary file in the repo). + */ +async function removeOpencodeConfigIfCreated(result) { + if (!result.created) + return; + try { + await (0, promises_1.unlink)(result.configPath); + (0, logger_1.logInfo)(`Removed temporary ${OPENCODE_CONFIG_FILENAME}.`); + } + catch (e) { + const msg = e instanceof Error ? e.message : String(e); + (0, logger_1.logError)(`Failed to remove temporary ${OPENCODE_CONFIG_FILENAME}: ${msg}`); + } +} /** * Wait until OpenCode server responds to /global/health or timeout. */ @@ -52088,6 +53286,7 @@ async function startOpencodeServer(options) { const hostname = options?.hostname ?? '127.0.0.1'; const cwd = options?.cwd ?? process.cwd(); const baseUrl = `http://${hostname}:${port}`; + const configResult = await ensureOpencodeConfig(cwd); (0, logger_1.logInfo)(`Starting OpenCode server at ${baseUrl} (this may take a moment on first run)...`); const child = (0, child_process_1.spawn)('npx', ['-y', 'opencode-ai', 'serve', '--port', String(port), '--hostname', hostname], { cwd, @@ -52095,7 +53294,10 @@ async function startOpencodeServer(options) { stdio: ['ignore', 'pipe', 'pipe'], shell: false, }); - const stop = () => stopOpencodeServer(child); + const stop = async () => { + await stopOpencodeServer(child); + await removeOpencodeConfigIfCreated(configResult); + }; // Ensure we don't leave the process running if our process exits const onExit = () => { child.kill('SIGTERM'); @@ -52355,6 +53557,14 @@ module.exports = require("fs"); /***/ }), +/***/ 3292: +/***/ ((module) => { + +"use strict"; +module.exports = require("fs/promises"); + +/***/ }), + /***/ 3685: /***/ ((module) => { diff --git a/build/github_action/src/data/model/ai.d.ts b/build/github_action/src/data/model/ai.d.ts index d6ea5394..d45b1069 100644 --- a/build/github_action/src/data/model/ai.d.ts +++ b/build/github_action/src/data/model/ai.d.ts @@ -10,13 +10,17 @@ export declare class Ai { private aiMembersOnly; private aiIgnoreFiles; private aiIncludeReasoning; - constructor(opencodeServerUrl: string, opencodeModel: string, aiPullRequestDescription: boolean, aiMembersOnly: boolean, aiIgnoreFiles: string[], aiIncludeReasoning: boolean); + private bugbotMinSeverity; + private bugbotCommentLimit; + constructor(opencodeServerUrl: string, opencodeModel: string, aiPullRequestDescription: boolean, aiMembersOnly: boolean, aiIgnoreFiles: string[], aiIncludeReasoning: boolean, bugbotMinSeverity: string, bugbotCommentLimit: number); getOpencodeServerUrl(): string; getOpencodeModel(): string; getAiPullRequestDescription(): boolean; getAiMembersOnly(): boolean; getAiIgnoreFiles(): string[]; getAiIncludeReasoning(): boolean; + getBugbotMinSeverity(): string; + getBugbotCommentLimit(): number; /** * Parse "provider/model-id" into { providerID, modelID } for OpenCode session.prompt. * Uses OPENCODE_DEFAULT_MODEL when no model is set (e.g. opencode/kimi-k2.5-free). diff --git a/build/github_action/src/data/model/single_action.d.ts b/build/github_action/src/data/model/single_action.d.ts index 6ecdd44b..5ec86825 100644 --- a/build/github_action/src/data/model/single_action.d.ts +++ b/build/github_action/src/data/model/single_action.d.ts @@ -26,7 +26,7 @@ export declare class SingleAction { get isThinkAction(): boolean; get isInitialSetupAction(): boolean; get isCheckProgressAction(): boolean; - get isDetectErrorsAction(): boolean; + get isDetectPotentialProblemsAction(): boolean; get isRecommendStepsAction(): boolean; get enabledSingleAction(): boolean; get validSingleAction(): boolean; diff --git a/build/github_action/src/data/repository/__tests__/ai_repository.test.d.ts b/build/github_action/src/data/repository/__tests__/ai_repository.test.d.ts index 5c108792..9b53426a 100644 --- a/build/github_action/src/data/repository/__tests__/ai_repository.test.d.ts +++ b/build/github_action/src/data/repository/__tests__/ai_repository.test.d.ts @@ -1,6 +1,7 @@ /** * Integration-style tests for AiRepository with mocked fetch. * Covers edge cases for the OpenCode-based architecture: missing config, - * session/message failures, empty/invalid responses, JSON parsing, reasoning, getSessionDiff. + * session/message failures, empty/invalid responses, JSON parsing, reasoning, getSessionDiff, + * and retry behavior (OPENCODE_MAX_RETRIES). */ export {}; diff --git a/build/github_action/src/data/repository/ai_repository.d.ts b/build/github_action/src/data/repository/ai_repository.d.ts index e2bb1067..e9c48bd4 100644 --- a/build/github_action/src/data/repository/ai_repository.d.ts +++ b/build/github_action/src/data/repository/ai_repository.d.ts @@ -19,6 +19,31 @@ export declare const TRANSLATION_RESPONSE_SCHEMA: { readonly required: readonly ["translatedText"]; readonly additionalProperties: false; }; +/** JSON schema for Think (Q&A) responses: single answer field. */ +export declare const THINK_RESPONSE_SCHEMA: { + readonly type: "object"; + readonly properties: { + readonly answer: { + readonly type: "string"; + readonly description: "The concise answer to the user question. Required."; + }; + }; + readonly required: readonly ["answer"]; + readonly additionalProperties: false; +}; +/** JSON schema for language check: done (already in locale) or must_translate. */ +export declare const LANGUAGE_CHECK_RESPONSE_SCHEMA: { + readonly type: "object"; + readonly properties: { + readonly status: { + readonly type: "string"; + readonly enum: readonly ["done", "must_translate"]; + readonly description: "done if text is in the requested locale, must_translate otherwise."; + }; + }; + readonly required: readonly ["status"]; + readonly additionalProperties: false; +}; export interface AskAgentOptions { /** Request JSON response and parse it. If schema provided, include it in the prompt. */ expectJson?: boolean; @@ -37,22 +62,18 @@ export interface OpenCodeFileDiff { /** * Get the diff for an OpenCode session (files changed by the agent). * Call after opencodeMessageWithAgent when using the "build" agent so the user can see what was edited. + * Wrapped with retries (OPENCODE_MAX_RETRIES). */ export declare function getSessionDiff(baseUrl: string, sessionId: string): Promise; export declare class AiRepository { - ask: (ai: Ai, prompt: string) => Promise; /** - * Ask an OpenCode agent (e.g. Plan) to perform a task. The server runs the full agent loop. - * Returns the final message (including reasoning in parts when includeReasoning is true). - * @param ai - AI config (server URL, model) - * @param agentId - OpenCode agent id (e.g. OPENCODE_AGENT_PLAN) - * @param prompt - User prompt - * @param options - expectJson, schema, includeReasoning - * @returns Response text, or parsed JSON when expectJson is true + * Ask an OpenCode agent (e.g. Plan) to perform a task. All calls use strict response (expectJson + schema). + * Single retry system: HTTP failures and parse failures both retry up to OPENCODE_MAX_RETRIES. */ askAgent: (ai: Ai, agentId: string, prompt: string, options?: AskAgentOptions) => Promise | undefined>; /** * Run the OpenCode "build" agent for the copilot command. Returns the final message and sessionId. + * Uses the same retry system (OPENCODE_MAX_RETRIES). */ copilotMessage: (ai: Ai, prompt: string) => Promise<{ text: string; diff --git a/build/github_action/src/data/repository/branch_repository.d.ts b/build/github_action/src/data/repository/branch_repository.d.ts index f65ea00a..e8965846 100644 --- a/build/github_action/src/data/repository/branch_repository.d.ts +++ b/build/github_action/src/data/repository/branch_repository.d.ts @@ -33,7 +33,7 @@ export declare class BranchRepository { totalCommits: number; files: { filename: string; - status: "added" | "removed" | "modified" | "renamed" | "copied" | "changed" | "unchanged"; + status: "modified" | "added" | "removed" | "renamed" | "copied" | "changed" | "unchanged"; additions: number; deletions: number; changes: number; diff --git a/build/github_action/src/data/repository/issue_repository.d.ts b/build/github_action/src/data/repository/issue_repository.d.ts index 85aa697f..dbd004e0 100644 --- a/build/github_action/src/data/repository/issue_repository.d.ts +++ b/build/github_action/src/data/repository/issue_repository.d.ts @@ -39,6 +39,17 @@ export declare class IssueRepository { getHeadBranch: (owner: string, repository: string, issueNumber: number, token: string) => Promise; addComment: (owner: string, repository: string, issueNumber: number, comment: string, token: string) => Promise; updateComment: (owner: string, repository: string, issueNumber: number, commentId: number, comment: string, token: string) => Promise; + /** + * Lists all comments on an issue (for bugbot: find existing findings by marker). + * Uses pagination to fetch every comment (default API returns only 30 per page). + */ + listIssueComments: (owner: string, repository: string, issueNumber: number, token: string) => Promise>; closeIssue: (owner: string, repository: string, issueNumber: number, token: string) => Promise; openIssue: (owner: string, repository: string, issueNumber: number, token: string) => Promise; getCurrentAssignees: (owner: string, repository: string, issueNumber: number, token: string) => Promise; diff --git a/build/github_action/src/data/repository/pull_request_repository.d.ts b/build/github_action/src/data/repository/pull_request_repository.d.ts index 4118cf6c..228713db 100644 --- a/build/github_action/src/data/repository/pull_request_repository.d.ts +++ b/build/github_action/src/data/repository/pull_request_repository.d.ts @@ -7,12 +7,26 @@ export declare class PullRequestRepository { isLinked: (pullRequestUrl: string) => Promise; updateBaseBranch: (owner: string, repository: string, pullRequestNumber: number, branch: string, token: string) => Promise; updateDescription: (owner: string, repository: string, pullRequestNumber: number, description: string, token: string) => Promise; + /** + * Returns all users involved in review: requested (pending) + those who already submitted a review. + * Used to avoid re-requesting someone who already reviewed when ensuring desired reviewer count. + */ getCurrentReviewers: (owner: string, repository: string, pullNumber: number, token: string) => Promise; addReviewersToPullRequest: (owner: string, repository: string, pullNumber: number, reviewers: string[], token: string) => Promise; getChangedFiles: (owner: string, repository: string, pullNumber: number, token: string) => Promise<{ filename: string; status: string; }[]>; + /** First line (right side) of the first hunk per file, for valid review comment placement. */ + private static firstLineFromPatch; + /** + * Returns for each changed file the first line number that appears in the diff (right side). + * Used so review comments use a line that GitHub can resolve (avoids "line could not be resolved"). + */ + getFilesWithFirstDiffLine: (owner: string, repository: string, pullNumber: number, token: string) => Promise>; getPullRequestChanges: (owner: string, repository: string, pullNumber: number, token: string) => Promise>; + /** Head commit SHA of the PR (for creating review). */ + getPullRequestHeadSha: (owner: string, repository: string, pullNumber: number, token: string) => Promise; + /** + * List all review comments on a PR (for bugbot: find existing findings by marker). + * Uses pagination to fetch every comment (default API returns only 30 per page). + * Includes node_id for GraphQL (e.g. resolve review thread). + */ + listPullRequestReviewComments: (owner: string, repository: string, pullNumber: number, token: string) => Promise>; + /** + * Resolve a PR review thread (GraphQL only). Finds the thread that contains the given comment and marks it resolved. + * Uses repository.pullRequest.reviewThreads because the field pullRequestReviewThread on PullRequestReviewComment was removed from the API. + * Paginates through all threads and all comments in each thread so the comment is found regardless of PR size. + * No-op if thread is already resolved. Logs and does not throw on error. + */ + resolvePullRequestReviewThread: (owner: string, repository: string, pullNumber: number, commentNodeId: string, token: string) => Promise; + /** + * Create a review on the PR with one or more inline comments (bugbot findings). + * Each comment requires path and line (use first file and line 1 if not specified). + */ + createReviewWithComments: (owner: string, repository: string, pullNumber: number, commitId: string, comments: Array<{ + path: string; + line: number; + body: string; + }>, token: string) => Promise; + /** Update an existing PR review comment (e.g. to mark finding as resolved in body). */ + updatePullRequestReviewComment: (owner: string, repository: string, commentId: number, body: string, token: string) => Promise; } diff --git a/build/github_action/src/usecase/actions/__tests__/check_progress_use_case.test.d.ts b/build/github_action/src/usecase/actions/__tests__/check_progress_use_case.test.d.ts index 01edf0e5..812db253 100644 --- a/build/github_action/src/usecase/actions/__tests__/check_progress_use_case.test.d.ts +++ b/build/github_action/src/usecase/actions/__tests__/check_progress_use_case.test.d.ts @@ -1,6 +1,6 @@ /** * Integration-style tests for CheckProgressUseCase with the OpenCode-based flow. * Covers edge cases: missing AI config, no issue/branch/description, AI returns undefined/invalid - * progress, retries when progress 0%, success path with label updates. + * progress, progress 0% (single call; HTTP retries are in AiRepository), success path with label updates. */ export {}; diff --git a/build/github_action/src/usecase/actions/__tests__/deployed_action_use_case.test.d.ts b/build/github_action/src/usecase/actions/__tests__/deployed_action_use_case.test.d.ts new file mode 100644 index 00000000..cb0ff5c3 --- /dev/null +++ b/build/github_action/src/usecase/actions/__tests__/deployed_action_use_case.test.d.ts @@ -0,0 +1 @@ +export {}; diff --git a/build/github_action/src/usecase/actions/check_progress_use_case.d.ts b/build/github_action/src/usecase/actions/check_progress_use_case.d.ts index f3d3f26a..2c536409 100644 --- a/build/github_action/src/usecase/actions/check_progress_use_case.d.ts +++ b/build/github_action/src/usecase/actions/check_progress_use_case.d.ts @@ -10,7 +10,7 @@ export declare class CheckProgressUseCase implements ParamUseCase; /** * Calls the OpenCode agent once and returns parsed progress, summary, and reasoning. - * Used inside the retry loop when progress is 0%. + * HTTP-level retries are handled by AiRepository (OPENCODE_MAX_RETRIES). */ private fetchProgressAttempt; /** diff --git a/build/github_action/src/usecase/steps/commit/__tests__/check_changes_issue_size_use_case.test.d.ts b/build/github_action/src/usecase/steps/commit/__tests__/check_changes_issue_size_use_case.test.d.ts new file mode 100644 index 00000000..cb0ff5c3 --- /dev/null +++ b/build/github_action/src/usecase/steps/commit/__tests__/check_changes_issue_size_use_case.test.d.ts @@ -0,0 +1 @@ +export {}; diff --git a/build/github_action/src/usecase/steps/commit/__tests__/detect_potential_problems_use_case.test.d.ts b/build/github_action/src/usecase/steps/commit/__tests__/detect_potential_problems_use_case.test.d.ts new file mode 100644 index 00000000..a68dd59d --- /dev/null +++ b/build/github_action/src/usecase/steps/commit/__tests__/detect_potential_problems_use_case.test.d.ts @@ -0,0 +1,6 @@ +/** + * Unit tests for DetectPotentialProblemsUseCase (bugbot on push). + * Covers: skip when OpenCode/issue missing, prompt with/without previous findings, + * new findings (add/update issue and PR comments), resolved_finding_ids, errors. + */ +export {}; diff --git a/build/github_action/src/usecase/steps/commit/bugbot/__tests__/deduplicate_findings.test.d.ts b/build/github_action/src/usecase/steps/commit/bugbot/__tests__/deduplicate_findings.test.d.ts new file mode 100644 index 00000000..fd8207cb --- /dev/null +++ b/build/github_action/src/usecase/steps/commit/bugbot/__tests__/deduplicate_findings.test.d.ts @@ -0,0 +1,4 @@ +/** + * Unit tests for deduplicateFindings: dedupe by (file, line) or by title when no location. + */ +export {}; diff --git a/build/github_action/src/usecase/steps/commit/bugbot/__tests__/file_ignore.test.d.ts b/build/github_action/src/usecase/steps/commit/bugbot/__tests__/file_ignore.test.d.ts new file mode 100644 index 00000000..e8076137 --- /dev/null +++ b/build/github_action/src/usecase/steps/commit/bugbot/__tests__/file_ignore.test.d.ts @@ -0,0 +1,4 @@ +/** + * Unit tests for file_ignore: fileMatchesIgnorePatterns (glob-style path matching). + */ +export {}; diff --git a/build/github_action/src/usecase/steps/commit/bugbot/__tests__/limit_comments.test.d.ts b/build/github_action/src/usecase/steps/commit/bugbot/__tests__/limit_comments.test.d.ts new file mode 100644 index 00000000..8bead7b4 --- /dev/null +++ b/build/github_action/src/usecase/steps/commit/bugbot/__tests__/limit_comments.test.d.ts @@ -0,0 +1,4 @@ +/** + * Unit tests for applyCommentLimit: max comments and overflow titles. + */ +export {}; diff --git a/build/github_action/src/usecase/steps/commit/bugbot/__tests__/path_validation.test.d.ts b/build/github_action/src/usecase/steps/commit/bugbot/__tests__/path_validation.test.d.ts new file mode 100644 index 00000000..cb0ff5c3 --- /dev/null +++ b/build/github_action/src/usecase/steps/commit/bugbot/__tests__/path_validation.test.d.ts @@ -0,0 +1 @@ +export {}; diff --git a/build/github_action/src/usecase/steps/commit/bugbot/__tests__/severity.test.d.ts b/build/github_action/src/usecase/steps/commit/bugbot/__tests__/severity.test.d.ts new file mode 100644 index 00000000..12b0c054 --- /dev/null +++ b/build/github_action/src/usecase/steps/commit/bugbot/__tests__/severity.test.d.ts @@ -0,0 +1,4 @@ +/** + * Unit tests for bugbot severity helpers: normalizeMinSeverity, severityLevel, meetsMinSeverity. + */ +export {}; diff --git a/build/github_action/src/usecase/steps/commit/bugbot/build_bugbot_prompt.d.ts b/build/github_action/src/usecase/steps/commit/bugbot/build_bugbot_prompt.d.ts new file mode 100644 index 00000000..9c6bc28c --- /dev/null +++ b/build/github_action/src/usecase/steps/commit/bugbot/build_bugbot_prompt.d.ts @@ -0,0 +1,3 @@ +import type { Execution } from "../../../../data/model/execution"; +import type { BugbotContext } from "./types"; +export declare function buildBugbotPrompt(param: Execution, context: BugbotContext): string; diff --git a/build/github_action/src/usecase/steps/commit/bugbot/deduplicate_findings.d.ts b/build/github_action/src/usecase/steps/commit/bugbot/deduplicate_findings.d.ts new file mode 100644 index 00000000..856de1f9 --- /dev/null +++ b/build/github_action/src/usecase/steps/commit/bugbot/deduplicate_findings.d.ts @@ -0,0 +1,7 @@ +import type { BugbotFinding } from "./types"; +/** + * Deduplicates findings by (file, line). When two findings share the same file and line, + * keeps the first; when they have no file, groups by normalized title and keeps the first. + * This reduces noise when the agent returns near-duplicate issues. + */ +export declare function deduplicateFindings(findings: BugbotFinding[]): BugbotFinding[]; diff --git a/build/github_action/src/usecase/steps/commit/bugbot/file_ignore.d.ts b/build/github_action/src/usecase/steps/commit/bugbot/file_ignore.d.ts new file mode 100644 index 00000000..f32bd91d --- /dev/null +++ b/build/github_action/src/usecase/steps/commit/bugbot/file_ignore.d.ts @@ -0,0 +1,5 @@ +/** + * Returns true if the file path matches any of the ignore patterns (glob-style). + * Used to exclude findings in test files, build output, etc. + */ +export declare function fileMatchesIgnorePatterns(filePath: string | undefined, ignorePatterns: string[]): boolean; diff --git a/build/github_action/src/usecase/steps/commit/bugbot/limit_comments.d.ts b/build/github_action/src/usecase/steps/commit/bugbot/limit_comments.d.ts new file mode 100644 index 00000000..006c7a05 --- /dev/null +++ b/build/github_action/src/usecase/steps/commit/bugbot/limit_comments.d.ts @@ -0,0 +1,14 @@ +import type { BugbotFinding } from "./types"; +export interface ApplyLimitResult { + /** Findings to publish as individual comments (up to maxComments). */ + toPublish: BugbotFinding[]; + /** Number of findings not published as individual comments. */ + overflowCount: number; + /** Titles of overflow findings (for the summary comment). */ + overflowTitles: string[]; +} +/** + * Applies the max-comments limit: returns the first N findings to publish individually, + * and overflow count + titles for a single "revisar en local" summary comment. + */ +export declare function applyCommentLimit(findings: BugbotFinding[], maxComments?: number): ApplyLimitResult; diff --git a/build/github_action/src/usecase/steps/commit/bugbot/load_bugbot_context_use_case.d.ts b/build/github_action/src/usecase/steps/commit/bugbot/load_bugbot_context_use_case.d.ts new file mode 100644 index 00000000..361f5940 --- /dev/null +++ b/build/github_action/src/usecase/steps/commit/bugbot/load_bugbot_context_use_case.d.ts @@ -0,0 +1,8 @@ +import type { Execution } from "../../../../data/model/execution"; +import type { BugbotContext } from "./types"; +/** + * Loads all context needed for bugbot: existing findings from issue + PR comments, + * open PR numbers, and the prompt block for previously reported issues. + * Also loads PR context (head sha, files, diff lines) for the first open PR. + */ +export declare function loadBugbotContext(param: Execution): Promise; diff --git a/build/github_action/src/usecase/steps/commit/bugbot/mark_findings_resolved_use_case.d.ts b/build/github_action/src/usecase/steps/commit/bugbot/mark_findings_resolved_use_case.d.ts new file mode 100644 index 00000000..93448758 --- /dev/null +++ b/build/github_action/src/usecase/steps/commit/bugbot/mark_findings_resolved_use_case.d.ts @@ -0,0 +1,13 @@ +import type { Execution } from "../../../../data/model/execution"; +import type { BugbotContext } from "./types"; +export interface MarkFindingsResolvedParam { + execution: Execution; + context: BugbotContext; + resolvedFindingIds: Set; + normalizedResolvedIds: Set; +} +/** + * Marks as resolved the findings that OpenCode reported as fixed. + * Updates issue comments (with visible "Resolved" note) and PR review comments (marker only + resolve thread). + */ +export declare function markFindingsResolved(param: MarkFindingsResolvedParam): Promise; diff --git a/build/github_action/src/usecase/steps/commit/bugbot/marker.d.ts b/build/github_action/src/usecase/steps/commit/bugbot/marker.d.ts new file mode 100644 index 00000000..316074ba --- /dev/null +++ b/build/github_action/src/usecase/steps/commit/bugbot/marker.d.ts @@ -0,0 +1,21 @@ +import type { BugbotFinding } from "./types"; +/** Sanitize finding ID so it cannot break HTML comment syntax (e.g. -->, , newlines, quotes). */ +export declare function sanitizeFindingIdForMarker(findingId: string): string; +export declare function buildMarker(findingId: string, resolved: boolean): string; +export declare function parseMarker(body: string | null): Array<{ + findingId: string; + resolved: boolean; +}>; +/** Regex to match the marker for a specific finding (same flexible format as parseMarker). */ +export declare function markerRegexForFinding(findingId: string): RegExp; +/** + * Find the marker for this finding in body (using same pattern as parseMarker) and replace it. + * Returns the updated body and whether a replacement was made. Logs an error with details if no replacement occurred. + */ +export declare function replaceMarkerInBody(body: string, findingId: string, newResolved: boolean, replacement?: string): { + updated: string; + replaced: boolean; +}; +/** Extract title from comment body (first ## line) for context when sending to OpenCode. */ +export declare function extractTitleFromBody(body: string | null): string; +export declare function buildCommentBody(finding: BugbotFinding, resolved: boolean): string; diff --git a/build/github_action/src/usecase/steps/commit/bugbot/path_validation.d.ts b/build/github_action/src/usecase/steps/commit/bugbot/path_validation.d.ts new file mode 100644 index 00000000..ca19c32e --- /dev/null +++ b/build/github_action/src/usecase/steps/commit/bugbot/path_validation.d.ts @@ -0,0 +1,25 @@ +/** + * Path validation for AI-returned finding.file to prevent path traversal and misuse. + * Rejects paths containing '..', null bytes, or absolute paths. + */ +/** + * Returns true if the path is safe to use: no '..', no null bytes, not absolute. + * Does not check against a list of allowed files; use isAllowedPathForPr for that. + */ +export declare function isSafeFindingFilePath(path: string | undefined): boolean; +/** + * Returns true if path is safe (isSafeFindingFilePath) and is in the list of PR changed files. + * Used to validate finding.file before using it for PR review comments. + */ +export declare function isAllowedPathForPr(path: string | undefined, prFiles: Array<{ + filename: string; +}>): boolean; +/** + * Resolves the file path to use for a PR review comment: finding.file if valid and in prFiles. + * Returns undefined when the finding's file is not in the PR so we do not attach the comment + * to the wrong file (e.g. the first file in the list). + */ +export declare function resolveFindingPathForPr(findingFile: string | undefined, prFiles: Array<{ + filename: string; + status: string; +}>): string | undefined; diff --git a/build/github_action/src/usecase/steps/commit/bugbot/publish_findings_use_case.d.ts b/build/github_action/src/usecase/steps/commit/bugbot/publish_findings_use_case.d.ts new file mode 100644 index 00000000..e9270fbb --- /dev/null +++ b/build/github_action/src/usecase/steps/commit/bugbot/publish_findings_use_case.d.ts @@ -0,0 +1,16 @@ +import type { Execution } from "../../../../data/model/execution"; +import type { BugbotContext } from "./types"; +import type { BugbotFinding } from "./types"; +export interface PublishFindingsParam { + execution: Execution; + context: BugbotContext; + findings: BugbotFinding[]; + /** When findings were limited by max comments, add one summary comment with this overflow info. */ + overflowCount?: number; + overflowTitles?: string[]; +} +/** + * Publishes current findings to issue and PR: creates or updates issue comments, + * creates or updates PR review comments (or creates new ones). + */ +export declare function publishFindings(param: PublishFindingsParam): Promise; diff --git a/build/github_action/src/usecase/steps/commit/bugbot/schema.d.ts b/build/github_action/src/usecase/steps/commit/bugbot/schema.d.ts new file mode 100644 index 00000000..5a66ca5e --- /dev/null +++ b/build/github_action/src/usecase/steps/commit/bugbot/schema.d.ts @@ -0,0 +1,53 @@ +/** OpenCode response schema: agent computes diff, returns new findings and which previous ones are resolved. */ +export declare const BUGBOT_RESPONSE_SCHEMA: { + readonly type: "object"; + readonly properties: { + readonly findings: { + readonly type: "array"; + readonly items: { + readonly type: "object"; + readonly properties: { + readonly id: { + readonly type: "string"; + readonly description: "Stable unique id for this finding (e.g. file:line:summary)"; + }; + readonly title: { + readonly type: "string"; + readonly description: "Short title of the problem"; + }; + readonly description: { + readonly type: "string"; + readonly description: "Clear explanation of the issue"; + }; + readonly file: { + readonly type: "string"; + readonly description: "Repository-relative path when applicable"; + }; + readonly line: { + readonly type: "number"; + readonly description: "Line number when applicable"; + }; + readonly severity: { + readonly type: "string"; + readonly description: "Severity: high, medium, low, or info. Findings below the configured minimum are not published."; + }; + readonly suggestion: { + readonly type: "string"; + readonly description: "Suggested fix when applicable"; + }; + }; + readonly required: readonly ["id", "title", "description"]; + readonly additionalProperties: true; + }; + }; + readonly resolved_finding_ids: { + readonly type: "array"; + readonly items: { + readonly type: "string"; + }; + readonly description: "Ids of previously reported issues (from the list we sent) that are now fixed in the current code. Only include ids we asked you to check."; + }; + }; + readonly required: readonly ["findings"]; + readonly additionalProperties: false; +}; diff --git a/build/github_action/src/usecase/steps/commit/bugbot/severity.d.ts b/build/github_action/src/usecase/steps/commit/bugbot/severity.d.ts new file mode 100644 index 00000000..ae6635cc --- /dev/null +++ b/build/github_action/src/usecase/steps/commit/bugbot/severity.d.ts @@ -0,0 +1,6 @@ +export type SeverityLevel = 'info' | 'low' | 'medium' | 'high'; +/** Normalizes user input to a valid SeverityLevel; defaults to 'low' if invalid. */ +export declare function normalizeMinSeverity(value: string | undefined): SeverityLevel; +export declare function severityLevel(severity: string | undefined): number; +/** Returns true if the finding's severity is at or above the minimum threshold. */ +export declare function meetsMinSeverity(findingSeverity: string | undefined, minSeverity: SeverityLevel): boolean; diff --git a/build/github_action/src/usecase/steps/commit/bugbot/types.d.ts b/build/github_action/src/usecase/steps/commit/bugbot/types.d.ts new file mode 100644 index 00000000..79e3ce79 --- /dev/null +++ b/build/github_action/src/usecase/steps/commit/bugbot/types.d.ts @@ -0,0 +1,35 @@ +/** Single finding from OpenCode (agent computes changes and returns these). */ +export interface BugbotFinding { + id: string; + title: string; + description: string; + file?: string; + line?: number; + severity?: string; + suggestion?: string; +} +export interface ExistingFindingInfo { + issueCommentId?: number; + prCommentId?: number; + prNumber?: number; + resolved: boolean; +} +export type ExistingByFindingId = Record; +export interface BugbotPrContext { + prHeadSha: string; + prFiles: Array<{ + filename: string; + status: string; + }>; + pathToFirstDiffLine: Record; +} +export interface BugbotContext { + existingByFindingId: ExistingByFindingId; + issueComments: Array<{ + id: number; + body: string | null; + }>; + openPrNumbers: number[]; + previousFindingsBlock: string; + prContext: BugbotPrContext | null; +} diff --git a/build/github_action/src/usecase/steps/commit/detect_potential_problems_use_case.d.ts b/build/github_action/src/usecase/steps/commit/detect_potential_problems_use_case.d.ts new file mode 100644 index 00000000..4c622b26 --- /dev/null +++ b/build/github_action/src/usecase/steps/commit/detect_potential_problems_use_case.d.ts @@ -0,0 +1,9 @@ +import { Execution } from "../../../data/model/execution"; +import { Result } from "../../../data/model/result"; +import { ParamUseCase } from "../../base/param_usecase"; +export type { BugbotFinding } from "./bugbot/types"; +export declare class DetectPotentialProblemsUseCase implements ParamUseCase { + taskId: string; + private aiRepository; + invoke(param: Execution): Promise; +} diff --git a/build/github_action/src/usecase/steps/common/__tests__/publish_resume_use_case.test.d.ts b/build/github_action/src/usecase/steps/common/__tests__/publish_resume_use_case.test.d.ts new file mode 100644 index 00000000..cb0ff5c3 --- /dev/null +++ b/build/github_action/src/usecase/steps/common/__tests__/publish_resume_use_case.test.d.ts @@ -0,0 +1 @@ +export {}; diff --git a/build/github_action/src/usecase/steps/issue/__tests__/assign_reviewers_to_issue_use_case.test.d.ts b/build/github_action/src/usecase/steps/issue/__tests__/assign_reviewers_to_issue_use_case.test.d.ts new file mode 100644 index 00000000..cb0ff5c3 --- /dev/null +++ b/build/github_action/src/usecase/steps/issue/__tests__/assign_reviewers_to_issue_use_case.test.d.ts @@ -0,0 +1 @@ +export {}; diff --git a/build/github_action/src/usecase/steps/issue/__tests__/label_deployed_added_use_case.test.d.ts b/build/github_action/src/usecase/steps/issue/__tests__/label_deployed_added_use_case.test.d.ts new file mode 100644 index 00000000..cb0ff5c3 --- /dev/null +++ b/build/github_action/src/usecase/steps/issue/__tests__/label_deployed_added_use_case.test.d.ts @@ -0,0 +1 @@ +export {}; diff --git a/build/github_action/src/usecase/steps/issue/__tests__/update_issue_type_use_case.test.d.ts b/build/github_action/src/usecase/steps/issue/__tests__/update_issue_type_use_case.test.d.ts new file mode 100644 index 00000000..cb0ff5c3 --- /dev/null +++ b/build/github_action/src/usecase/steps/issue/__tests__/update_issue_type_use_case.test.d.ts @@ -0,0 +1 @@ +export {}; diff --git a/build/github_action/src/usecase/steps/pull_request/__tests__/sync_size_and_progress_labels_from_issue_to_pr_use_case.test.d.ts b/build/github_action/src/usecase/steps/pull_request/__tests__/sync_size_and_progress_labels_from_issue_to_pr_use_case.test.d.ts new file mode 100644 index 00000000..cb0ff5c3 --- /dev/null +++ b/build/github_action/src/usecase/steps/pull_request/__tests__/sync_size_and_progress_labels_from_issue_to_pr_use_case.test.d.ts @@ -0,0 +1 @@ +export {}; diff --git a/build/github_action/src/utils/constants.d.ts b/build/github_action/src/utils/constants.d.ts index ddb1d79a..f9c9b80c 100644 --- a/build/github_action/src/utils/constants.d.ts +++ b/build/github_action/src/utils/constants.d.ts @@ -3,8 +3,12 @@ export declare const TITLE = "Giik"; export declare const REPO_URL = "https://github.com/landamessenger/git-board-flow"; /** Default OpenCode model: provider/modelID (e.g. opencode/kimi-k2.5-free). Reuse for CLI, action and Ai fallbacks. */ export declare const OPENCODE_DEFAULT_MODEL = "opencode/kimi-k2.5-free"; -/** Timeout in ms for OpenCode HTTP requests (session create, message, diff). Agent calls can be slow with many files. */ -export declare const OPENCODE_REQUEST_TIMEOUT_MS = 600000; +/** Timeout in ms for OpenCode HTTP requests (session create, message, diff). Agent calls can be slow (e.g. plan analyzing repo). */ +export declare const OPENCODE_REQUEST_TIMEOUT_MS = 900000; +/** Max attempts for OpenCode requests (retries on failure). Applied transparently in AiRepository. */ +export declare const OPENCODE_MAX_RETRIES = 5; +/** Delay in ms between OpenCode retry attempts. */ +export declare const OPENCODE_RETRY_DELAY_MS = 2000; export declare const DEFAULT_IMAGE_CONFIG: { issue: { automatic: string[]; @@ -61,6 +65,8 @@ export declare const INPUT_KEYS: { readonly AI_MEMBERS_ONLY: "ai-members-only"; readonly AI_IGNORE_FILES: "ai-ignore-files"; readonly AI_INCLUDE_REASONING: "ai-include-reasoning"; + readonly BUGBOT_SEVERITY: "bugbot-severity"; + readonly BUGBOT_COMMENT_LIMIT: "bugbot-comment-limit"; readonly PROJECT_IDS: "project-ids"; readonly PROJECT_COLUMN_ISSUE_CREATED: "project-column-issue-created"; readonly PROJECT_COLUMN_PULL_REQUEST_CREATED: "project-column-pull-request-created"; @@ -193,7 +199,13 @@ export declare const ACTIONS: { readonly THINK: "think_action"; readonly INITIAL_SETUP: "initial_setup"; readonly CHECK_PROGRESS: "check_progress_action"; - readonly DETECT_ERRORS: "detect_errors_action"; + readonly DETECT_POTENTIAL_PROBLEMS: "detect_potential_problems_action"; readonly RECOMMEND_STEPS: "recommend_steps_action"; }; +/** Hidden HTML comment prefix for bugbot findings (issue/PR comments). Format: */ +export declare const BUGBOT_MARKER_PREFIX = "gbf-bugbot"; +/** Max number of individual bugbot comments to create per issue/PR. Excess findings get one summary comment suggesting to review locally. */ +export declare const BUGBOT_MAX_COMMENTS = 20; +/** Minimum severity to publish (findings below this are dropped). Order: high > medium > low > info. */ +export declare const BUGBOT_MIN_SEVERITY: 'info' | 'low' | 'medium' | 'high'; export declare const PROMPTS: {}; diff --git a/build/github_action/src/utils/content_utils.d.ts b/build/github_action/src/utils/content_utils.d.ts index ae98b2d9..4a1f6ff2 100644 --- a/build/github_action/src/utils/content_utils.d.ts +++ b/build/github_action/src/utils/content_utils.d.ts @@ -1,3 +1,8 @@ export declare const extractVersion: (pattern: string, text: string) => string | undefined; export declare const extractReleaseType: (pattern: string, text: string) => string | undefined; +/** + * Extracts changelog content from an issue body: from the given section heading (e.g. "Changelog" or "Hotfix Solution") + * up to but not including the "Additional Context" section. Used for release/hotfix deployment bodies. + */ +export declare const extractChangelogUpToAdditionalContext: (body: string | null | undefined, sectionTitle: string) => string; export declare const injectJsonAsMarkdownBlock: (title: string, json: object) => string; diff --git a/build/github_action/src/utils/opencode_server.d.ts b/build/github_action/src/utils/opencode_server.d.ts index 7cdb77a6..e4b58f26 100644 --- a/build/github_action/src/utils/opencode_server.d.ts +++ b/build/github_action/src/utils/opencode_server.d.ts @@ -1,6 +1,7 @@ /** * Managed OpenCode server lifecycle for GitHub Actions. * Starts "npx opencode-ai serve" and stops it when the action finishes. + * If no opencode.json exists in cwd, creates one with provider timeout 10 min and removes it on stop. */ import { ChildProcess } from 'child_process'; export interface ManagedOpencodeServer { diff --git a/docs.json b/docs.json index dd919588..8803f8c7 100644 --- a/docs.json +++ b/docs.json @@ -46,14 +46,18 @@ "group": "Pull Requests", "tab": "pull-requests", "pages": [ - { "title": "Overview", "href": "/pull-requests", "icon": "book" } + { "title": "Overview", "href": "/pull-requests", "icon": "book" }, + { "title": "Configuration", "href": "/pull-requests/configuration", "icon": "gear" }, + { "title": "AI PR description", "href": "/pull-requests/ai-description", "icon": "file-text" } ] }, { "group": "Single Actions", "tab": "single-action", "pages": [ - { "title": "Overview", "href": "/single-actions", "icon": "play" } + { "title": "Overview", "href": "/single-actions", "icon": "play" }, + { "title": "Configuration", "href": "/single-actions/configuration", "icon": "gear" }, + { "title": "Workflow & CLI", "href": "/single-actions/workflow-and-cli", "icon": "terminal" } ] }, { diff --git a/docs/authentication.mdx b/docs/authentication.mdx index 2376f16d..f4bb58b9 100644 --- a/docs/authentication.mdx +++ b/docs/authentication.mdx @@ -7,6 +7,8 @@ Git Board Flow requires a fine-grained personal access token to perform certain Originally, the workflow also made use of the GITHUB_TOKEN for some basic tasks executed within the workflow's scope. However, to simplify the configuration and maintain a single unified bot, the use of this token has been removed, leaving only the necessary PAT. +**When the event actor is the same as the token user**: If the user who triggers the workflow (e.g. the person who opened the issue or pushed the branch) is the same as the account that owns the PAT, the action **only runs single actions** (if one is requested) or **skips** the normal issue/PR/push pipelines. This avoids the bot reacting to its own actions. Use a dedicated bot account (different from the actor) if you want full pipeline behavior on every event. + Choose which account will be used to create your PAT. This account will act as your bot. @@ -88,9 +90,6 @@ Git Board Flow requires a fine-grained personal access token to perform certain issues: types: [opened, reopened, edited, labeled, unlabeled, assigned, unassigned] - concurrency: - group: ${{ github.workflow }}-${{ github.event.issue.number || github.ref }} - jobs: git-board-issues: name: Git Board - Issue diff --git a/docs/configuration.mdx b/docs/configuration.mdx index caa81647..10334018 100644 --- a/docs/configuration.mdx +++ b/docs/configuration.mdx @@ -5,142 +5,118 @@ description: Detailed configuration options for Git Board Flow # Configuration Options -Git Board Flow provides extensive configuration options to customize your workflow. Below you'll find detailed information about each configuration option available. - -## Authentication - -- `token`: GitHub token for repository operations (required) - -## AI (OpenCode) - -- `opencode-server-url`: OpenCode server URL for AI operations (default: "http://localhost:4096"). See [OpenCode (AI)](/opencode-integration). -- `opencode-model`: Model in provider/model format, e.g. `opencode/kimi-k2.5` or `anthropic/claude-3-5-sonnet` (default: "opencode/kimi-k2.5") -- `opencode-start-server`: If true, the action starts an OpenCode server at job start and stops it at job end; no separate install needed (default: "false") - -## Branch Management - -- `branch-management-launcher-label`: Label to trigger branch management (default: "branched") -- `branch-management-always`: Ignore launcher label requirement (default: "false") -- `branch-management-emoji`: Emoji for branched issues (default: "🧑‍💻") - -## Branch Types and Labels - -- `main-branch`: Main branch name (default: "master") -- `development-branch`: Development branch name (default: "develop") -- `feature-tree`: Feature branch prefix (default: "feature") -- `bugfix-tree`: Bugfix branch prefix (default: "bugfix") -- `hotfix-tree`: Hotfix branch prefix (default: "hotfix") -- `release-tree`: Release branch prefix (default: "release") -- `docs-tree`: Documentation branch prefix (default: "docs") -- `chore-tree`: Chore branch prefix (default: "chore") - -## Issue Labels - -- `feature-label`: Feature branch label (default: "feature") -- `bugfix-label`: Bugfix branch label (default: "bugfix") -- `hotfix-label`: Hotfix branch label (default: "hotfix") -- `release-label`: Release branch label (default: "release") -- `docs-label`: Documentation label (default: "docs") -- `documentation-label`: Alternative documentation label (default: "documentation") -- `chore-label`: Chore label (default: "chore") -- `maintenance-label`: Maintenance label (default: "maintenance") -- `bug-label`: Bug type label (default: "bug") -- `enhancement-label`: Enhancement type label (default: "enhancement") -- `question-label`: Question label (default: "question") -- `help-label`: Help request label (default: "help") -- `deploy-label`: Deploy action label (default: "deploy") -- `deployed-label`: Deployed status label (default: "deployed") - -## Size Labels and Thresholds - -### Size Labels -- `size-xxl-label`: XXL size label (default: "size: XXL") -- `size-xl-label`: XL size label (default: "size: XL") -- `size-l-label`: L size label (default: "size: L") -- `size-m-label`: M size label (default: "size: M") -- `size-s-label`: S size label (default: "size: S") -- `size-xs-label`: XS size label (default: "size: XS") - -### Size Thresholds -Each size has three threshold parameters: -- `size-{size}-threshold-lines`: Number of lines threshold -- `size-{size}-threshold-files`: Number of files threshold -- `size-{size}-threshold-commits`: Number of commits threshold - -Default thresholds: -- XXL: 1000 lines, 20 files, 10 commits -- XL: 500 lines, 10 files, 5 commits -- L: 250 lines, 5 files, 3 commits -- M: 100 lines, 3 files, 2 commits -- S: 50 lines, 2 files, 1 commit -- XS: 25 lines, 1 file, 1 commit - -## Project Integration - -- `project-ids`: Comma-separated list of GitHub Project IDs to link issues and PRs -- `project-column-issue-created`: Project column for new issues (default: "Todo") -- `project-column-pull-request-created`: Project column for new PRs (default: "In Progress") -- `project-column-issue-in-progress`: Project column for in-progress issues (default: "In Progress") -- `project-column-pull-request-in-progress`: Project column for in-progress PRs (default: "In Progress") -- `desired-assignees-count`: Number of assignees for issues/PRs (default: 1, max: 10) -- `desired-reviewers-count`: Number of reviewers for PRs (default: 1, max: 15) - -## Customization - -- `emoji-labeled-title`: Enable emoji titles based on labels (default: "true") -- `commit-prefix-transforms`: Comma-separated list of transforms for commit prefix from branch name (e.g. "replace-slash", "kebab-case"). See README for full list. -- `reopen-issue-on-push`: Reopen closed issues on new commits (default: "true") -- `merge-timeout`: Timeout for merge operations in seconds (default: 600) - -## Workflow Files - -- `release-workflow`: Release workflow filename (default: "release_workflow.yml") -- `hotfix-workflow`: Hotfix workflow filename (default: "hotfix_workflow.yml") - -## Single Actions - -- `single-action`: Single action to run (e.g. `check_progress_action`, `think_action`, `create_release`). See [Single Actions](/single-actions). -- `single-action-issue`: Issue number for actions that require one (e.g. `check_progress_action`, `deployed_action`) -- `single-action-version`: Version for `create_release` or `create_tag` -- `single-action-title`: Title for `create_release` -- `single-action-changelog`: Changelog body for `create_release` - -## Image Configuration - -### Image Display Settings -- `images-on-issue`: Enable images in issue comments (default: "true") -- `images-on-pull-request`: Enable images in PR comments (default: "true") -- `images-on-commit`: Enable images in commit comments (default: "true") - -### Issue Images -- `images-issue-automatic`: Image URLs for automatic action issue comments -- `images-issue-feature`: Image URLs for feature action issue comments -- `images-issue-bugfix`: Image URLs for bugfix action issue comments -- `images-issue-docs`: Image URLs for docs action issue comments -- `images-issue-chore`: Image URLs for chore action issue comments -- `images-issue-hotfix`: Image URLs for hotfix action issue comments -- `images-issue-release`: Image URLs for release action issue comments - -### Pull Request Images -- `images-pull-request-automatic`: Image URLs for automatic action PR comments -- `images-pull-request-feature`: Image URLs for feature action PR comments -- `images-pull-request-bugfix`: Image URLs for bugfix action PR comments -- `images-pull-request-docs`: Image URLs for docs action PR comments -- `images-pull-request-chore`: Image URLs for chore action PR comments -- `images-pull-request-hotfix`: Image URLs for hotfix action PR comments -- `images-pull-request-release`: Image URLs for release action PR comments - -### Commit Images -- `images-commit-automatic`: Image URLs for automatic action commit comments -- `images-commit-feature`: Image URLs for feature action commit comments -- `images-commit-bugfix`: Image URLs for bugfix action commit comments -- `images-commit-docs`: Image URLs for docs action commit comments -- `images-commit-chore`: Image URLs for chore action commit comments -- `images-commit-hotfix`: Image URLs for hotfix action commit comments -- `images-commit-release`: Image URLs for release action commit comments - -## AI Features - -- `ai-pull-request-description`: Enable AI-powered automatic updates for pull request descriptions (default: "false"). When enabled, the OpenCode Plan agent fills your repository's pull request template (`.github/pull_request_template.md`) using the issue description and the branch diff. See [Pull Requests → AI-generated PR description](/pull-requests#ai-generated-pr-description). -- `ai-ignore-files`: Comma-separated list of files to ignore for AI operations (e.g. progress detection, error detection; not used for PR description, where the agent computes the diff in the workspace). -- `ai-members-only`: Restrict AI features to only organization/project members (default: "true"); when true, AI PR description is skipped if the PR author is not a member. \ No newline at end of file +Git Board Flow provides extensive configuration options to customize your workflow. Use the tabs below to jump to the category you need. + + + + ## Authentication + + - `token`: GitHub token for repository operations (required) + + ## AI (OpenCode) + + - `opencode-server-url`: OpenCode server URL for AI operations (default: "http://localhost:4096"). See [OpenCode (AI)](/opencode-integration). + - `opencode-model`: Model in provider/model format, e.g. `opencode/kimi-k2.5-free` or `anthropic/claude-3-5-sonnet` (default: "opencode/kimi-k2.5-free") + - `opencode-start-server`: If true, the action starts an OpenCode server at job start and stops it at job end; no separate install needed (default: "true") + + ## AI Features + + - `ai-pull-request-description`: Enable AI-powered automatic updates for pull request descriptions (default: "true"). When enabled, the OpenCode Plan agent fills your repository's pull request template (`.github/pull_request_template.md`) using the issue description and the branch diff. See [Pull Requests → AI-generated PR description](/pull-requests/ai-description). + - `issues-locale`: Target locale for issue comments (default: "en-US"). When comments are in another language, OpenCode translates them to this locale. See [OpenCode → Comment translation](/opencode-integration#how-comment-translation-works). + - `pull-requests-locale`: Target locale for PR review comments (default: "en-US"). Same translation behavior as `issues-locale` but for PR comments. + - `ai-ignore-files`: Comma-separated list of paths to ignore for AI operations (e.g. progress detection, Bugbot; not used for PR description, where the agent computes the diff in the workspace). + - `bugbot-severity`: Minimum severity for Bugbot findings to report: `info`, `low`, `medium`, or `high` (default: `low`). Findings below this threshold are not posted on the issue or PR. + - `bugbot-comment-limit`: Maximum number of findings to publish as individual comments on the issue and PR (default: `20`). Extra findings are summarized in a single overflow comment. Clamped between 1 and 200. + - `ai-members-only`: Restrict AI features to only organization/project members (default: "false"); when true, AI PR description is skipped if the PR author is not a member. + - `ai-include-reasoning`: Include reasoning or chain-of-thought in AI responses when supported by the model (default: "true"). + + + + ## Branch Management + + - `branch-management-launcher-label`: Label to trigger branch management (default: "branched") + - `branch-management-always`: Ignore launcher label requirement (default: "false") + - `branch-management-emoji`: Emoji for branched issues (default: "🧑‍💻") + + ## Branch Types + + - `main-branch`: Main branch name (default: "master") + - `development-branch`: Development branch name (default: "develop") + - `feature-tree`: Feature branch prefix (default: "feature") + - `bugfix-tree`: Bugfix branch prefix (default: "bugfix") + - `hotfix-tree`: Hotfix branch prefix (default: "hotfix") + - `release-tree`: Release branch prefix (default: "release") + - `docs-tree`: Documentation branch prefix (default: "docs") + - `chore-tree`: Chore branch prefix (default: "chore") + + ## Issue Labels + + - `feature-label`: Feature branch label (default: "feature") + - `bugfix-label`: Bugfix branch label (default: "bugfix") + - `hotfix-label`: Hotfix branch label (default: "hotfix") + - `release-label`: Release branch label (default: "release") + - `docs-label`: Documentation label (default: "docs") + - `documentation-label`: Alternative documentation label (default: "documentation") + - `chore-label`: Chore label (default: "chore") + - `maintenance-label`: Maintenance label (default: "maintenance") + - `bug-label`: Bug type label (default: "bug") + - `enhancement-label`: Enhancement type label (default: "enhancement") + - `question-label`: Question label (default: "question") + - `help-label`: Help request label (default: "help") + - `deploy-label`: Deploy action label (default: "deploy") + - `deployed-label`: Deployed status label (default: "deployed") + - `priority-high-label` through `priority-none-label`: Priority labels (default: "priority: high", etc.) + + ## Size Labels and Thresholds + + **Size labels:** `size-xxl-label`, `size-xl-label`, `size-l-label`, `size-m-label`, `size-s-label`, `size-xs-label` (defaults: "size: XXL", etc.) + + **Thresholds** (each size has three): `size-{size}-threshold-lines`, `size-{size}-threshold-files`, `size-{size}-threshold-commits` + + Default thresholds: XXL 1000/20/10, XL 500/10/5, L 250/5/3, M 100/3/2, S 50/2/1, XS 25/1/1 (lines/files/commits). + + + + ## Project Integration + + - `project-ids`: Comma-separated list of GitHub Project IDs to link issues and PRs + - `project-column-issue-created`: Project column for new issues (default: "Todo") + - `project-column-pull-request-created`: Project column for new PRs (default: "In Progress") + - `project-column-issue-in-progress`: Project column for in-progress issues (default: "In Progress") + - `project-column-pull-request-in-progress`: Project column for in-progress PRs (default: "In Progress") + - `desired-assignees-count`: Number of assignees for issues/PRs (default: 1, max: 10) + - `desired-reviewers-count`: Number of reviewers for PRs (default: 1, max: 15) + + ## Customization + + - `emoji-labeled-title`: Enable emoji titles based on labels (default: "true") + - `commit-prefix-transforms`: Comma-separated list of transforms for commit prefix from branch name (e.g. "replace-slash", "kebab-case"). See README for full list. + - `reopen-issue-on-push`: Reopen closed issues on new commits (default: "true") + - `merge-timeout`: Timeout for merge operations in seconds (default: 600) + + ## Workflow Files + + - `release-workflow`: Release workflow filename (default: "release_workflow.yml") + - `hotfix-workflow`: Hotfix workflow filename (default: "hotfix_workflow.yml") + + + + ## Single Actions + + - `single-action`: Single action to run (e.g. `check_progress_action`, `think_action`, `create_release`). See [Single Actions](/single-actions). + - `single-action-issue`: Issue number for actions that require one (e.g. `check_progress_action`, `deployed_action`) + - `single-action-version`: Version for `create_release` or `create_tag` + - `single-action-title`: Title for `create_release` + - `single-action-changelog`: Changelog body for `create_release` + + ## Image Configuration + + **Display:** `images-on-issue`, `images-on-pull-request`, `images-on-commit` (default: "true") + + **Issue images:** `images-issue-automatic`, `images-issue-feature`, `images-issue-bugfix`, `images-issue-docs`, `images-issue-chore`, `images-issue-hotfix`, `images-issue-release` + + **Pull request images:** `images-pull-request-automatic`, `images-pull-request-feature`, `images-pull-request-bugfix`, `images-pull-request-docs`, `images-pull-request-chore`, `images-pull-request-hotfix`, `images-pull-request-release` + + **Commit images:** `images-commit-automatic`, `images-commit-feature`, `images-commit-bugfix`, `images-commit-docs`, `images-commit-chore`, `images-commit-hotfix`, `images-commit-release` + + diff --git a/docs/features.mdx b/docs/features.mdx index f3600ea2..991f5db8 100644 --- a/docs/features.mdx +++ b/docs/features.mdx @@ -7,6 +7,18 @@ description: Complete reference of what the Git Board Flow GitHub Action does This page describes **every function** the Git Board Flow GitHub Action provides: workflow-triggered behavior (issues, pull requests, pushes) and **single actions** you can run on demand. + + + When you open, edit, or label issues: branch creation, project linking, assignees, issue type, size labels, and comments. + + + When PRs are opened or updated: link to issue, project column, reviewers, AI description, size/progress sync. + + + On every push: commit notifications on the issue, prefix check, reopen option, size & progress labels (with OpenCode). Details below. + + + --- ## Workflow-triggered behavior @@ -39,7 +51,7 @@ When the workflow runs on `pull_request` (opened, edited, etc.): | **Project linking** | Adds the PR to the configured GitHub Projects and moves it to the configured column. | | **Reviewers** | Assigns up to `desired-reviewers-count` reviewers. | | **Priority & size** | Applies priority and size checks (labels and thresholds). | -| **AI PR description** | If `ai-pull-request-description` is true and OpenCode is configured, generates or updates the PR description by filling the repo's `.github/pull_request_template.md` from the issue and the branch diff (OpenCode Plan agent). See [Pull Requests → AI-generated PR description](/pull-requests#ai-generated-pr-description). | +| **AI PR description** | If `ai-pull-request-description` is true and OpenCode is configured, generates or updates the PR description by filling the repo's `.github/pull_request_template.md` from the issue and the branch diff (OpenCode Plan agent). See [Pull Requests → AI-generated PR description](/pull-requests/ai-description). | | **Comments & images** | Posts comments with optional images per branch type. | ### 3. Push events (`on: push`) @@ -52,6 +64,7 @@ When the workflow runs on `push` (e.g. to any branch): | **Commit prefix check** | Warns if commit messages do not follow the prefix derived from the branch name (using `commit-prefix-transforms`). | | **Reopen issue** | If `reopen-issue-on-push` is true, reopens the issue when new commits are pushed to its branch. | | **Size & progress** | Computes size (XS–XXL) and progress (0–100%) from the branch diff; updates the **issue** and any **open PRs** for that branch with the same labels. Requires OpenCode for progress. No separate workflow is needed. | +| **Bugbot (potential problems)** | OpenCode analyzes the branch vs base and reports findings as **comments on the issue** and **review comments on open PRs**; updates issue comments when findings are resolved and **marks PR review threads as resolved** when applicable. Configurable via `bugbot-severity` and `ai-ignore-files`. See [Issues](/issues#bugbot-potential-problems) and [Pull Requests](/pull-requests#bugbot-potential-problems). | | **Comments & images** | Posts commit summary comments with optional images. | --- @@ -63,7 +76,7 @@ When you set `single-action` (and, when required, `single-action-issue`, `single | Single action value | Inputs required | Description | |--------------------|----------------|-------------| | **`check_progress_action`** | `single-action-issue` | Runs progress check on demand (e.g. without pushing). Progress is normally updated automatically on every push; use this to re-run the check or when no push workflow is configured. | -| **`detect_errors_action`** | `single-action-issue` | Uses OpenCode Plan to detect potential errors in the branch vs base; posts a comment on the issue. | +| **`detect_potential_problems_action`** | `single-action-issue` | Bugbot: OpenCode analyzes the branch vs base; reports findings as comments on the issue and as PR review comments; updates issue comments and marks PR review threads as resolved when findings are fixed. | | **`recommend_steps_action`** | `single-action-issue` | Uses OpenCode Plan to recommend implementation steps from the issue description; posts a comment on the issue. | | **`think_action`** | — | Uses OpenCode Plan for deep code analysis and change proposals (reasoning over the codebase). No issue required. | | **`initial_setup`** | — | Performs initial setup steps (e.g. for repo or project). No issue required. | @@ -83,16 +96,58 @@ All AI features go through **OpenCode** (one server URL + model). You can use 75 | Feature | Where it runs | Description | |--------|----------------|-------------| | **Check progress** | Push (commit) pipeline; optional single action `check_progress_action` / CLI `check-progress` | On every push, OpenCode Plan compares issue vs branch diff and updates the progress label on the issue and on any open PRs for that branch. You can also run it on demand via single action or CLI. | -| **Think / reasoning** | Single action `think_action` | Deep code analysis and change proposals (OpenCode Plan agent). | +| **Bugbot (potential problems)** | Push (commit) pipeline; optional single action `detect_potential_problems_action` / CLI `detect-potential-problems` | Analyzes branch vs base and posts findings as **comments on the issue** and **review comments on open PRs**; updates issue comments and marks PR review threads as resolved when findings are fixed. Configurable: `bugbot-severity`, `ai-ignore-files`. | +| **Think / reasoning** | Issue/PR comment pipeline; single action `think_action` | Deep code analysis and change proposals (OpenCode Plan agent). On comments: answers when mentioned (or on any comment for question/help issues). | +| **Comment translation** | Issue comment; PR review comment | Translates comments to the configured locale (`issues-locale`, `pull-requests-locale`) when they are written in another language. | | **AI PR description** | Pull request pipeline | Fills the repo's `.github/pull_request_template.md` from issue and branch diff (OpenCode Plan agent). | | **Copilot** | CLI `giik copilot` | Code analysis and file edits via OpenCode Build agent. | -| **Error detection** | Single action / CLI | Detects potential bugs and issues in the codebase (OpenCode Plan agent). | | **Recommend steps** | Single action / CLI | Suggests implementation steps from the issue description (OpenCode Plan agent). | Configuration: `opencode-server-url`, `opencode-model`, and optionally `opencode-start-server` (action starts and stops OpenCode in the job). See [OpenCode (AI)](/opencode-integration). --- +## Workflow concurrency and sequential execution + + + **Sequential runs:** When a new run starts, the action **waits** for any previous run of the **same workflow name** to finish. Runs of the same workflow (e.g. "Git Board Flow - Issue") execute one after another instead of in parallel or being cancelled — something GitHub does not offer natively. + + +GitHub's native [concurrency](https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#concurrency) lets you cancel in-progress runs when a new one starts (`cancel-in-progress: true`). Git Board Flow adds **sequential execution** for the same workflow: when a new run starts, the action waits for any previous run of the same workflow name to finish before doing its work. + +### How it works + +1. At the start of each run (except welcome/single-action-only flows), the action calls the GitHub API to list workflow runs for the repo. +2. It filters runs that have the **same workflow name** as the current run, a **lower run ID** (i.e. started earlier), and status `in_progress` or `queued`. +3. If any such run exists, the action waits 2 seconds and checks again, up to a long timeout (~4000 seconds). +4. When no previous run of the same workflow is active, the action continues. + +So you get a **per-workflow queue**: multiple triggers for the same workflow (e.g. many issues labeled at once) will run sequentially. + +### Example + +```yaml +name: Git Board Flow - Issue + +on: + issues: + types: [opened, edited, labeled, unlabeled] + +jobs: + git-board-flow-issues: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: landamessenger/git-board-flow@master + with: + token: ${{ secrets.PAT }} + project-ids: '2,3' +``` + +If you prefer to cancel the previous run when a new one is triggered (e.g. for PRs, so only the latest run matters), use `cancel-in-progress: true` and the same `concurrency.group` per PR or issue. + +--- + ## Other capabilities - **Commit prefix transforms**: `commit-prefix-transforms` defines how branch names are turned into commit prefixes (e.g. `replace-slash`, `kebab-case`). Used for validation and comments. diff --git a/docs/how-to-use.mdx b/docs/how-to-use.mdx index e9691743..37a0759a 100644 --- a/docs/how-to-use.mdx +++ b/docs/how-to-use.mdx @@ -29,20 +29,22 @@ Follow these steps to set up Git Board Flow in your repository: - Create issue templates to standardize task creation: - - Define templates for different types of issues (features, bugs, docs, releases, hotfixes etc.) - - Include necessary sections like Description, Acceptance Criteria, and Technical Details - - Include predefined labels on these templates. - - The action will use these templates to automatically create appropriately named branches and set up tracking. + Create issue templates to standardize task creation (in `.github/ISSUE_TEMPLATE/`): + - **Feature**: `feature_request.yml` — use labels such as `enhancement`, `feature`; add `branched` when the user should get a branch, or set `branch-management-always: true`. + - **Bugfix**: `bug_report.yml` — use labels `bug`, `bugfix`; add `branched` if needed. + - **Docs**: `doc_update.yml` — use labels `documentation`, `docs`. + - **Chore / maintenance**: `chore_task.yml` — use labels `chore`, `maintenance`. + - **Hotfix**: `hotfix.yml` — use labels `hotfix`, `branched` (branch is created from main at latest tag). + - **Release**: `release.yml` — use labels `release`, `branched` (branch is created from develop). + - **Help**: `help_request.yml` — for support requests (no branch). + Include the labels that match the [labels table](/issues#labels-by-issue-type-and-flow) so the action creates the correct branches and applies the right workflow. - Add the GitHub Action to your repository by creating a workflow file per section: - 1. Configure the behavior on each push on `.github/workflows/gbf_commit.yml` - 2. Configure the issues behavior on `.github/workflows/gbf_issue.yml` - 3. Pull Request configuration must be on `.github/workflows/gbf_pr.yml` - - These workflows will then automatically manage everything under the hood, creating branches, updating project boards, and maintaining the relationship between issues and code. + Add the GitHub Action to your repository by creating three workflow files (you can choose any names; common choices are `gbf_commit.yml`, `gbf_issue.yml`, `gbf_pr.yml` or names like "Git Board Flow - Commit", etc.): + 1. **Push (commit)** — runs on `push` to any branch: notifies issues of new commits, updates size and progress labels. Create e.g. `.github/workflows/gbf_commit.yml`. + 2. **Issues** — runs on `issues` (opened, edited, labeled, unlabeled): creates branches, links to projects, assignees. Create e.g. `.github/workflows/gbf_issue.yml`. + 3. **Pull requests** — runs on `pull_request`: links PR to issue, project, reviewers. Create e.g. `.github/workflows/gbf_pr.yml`. + See the [README](https://github.com/landamessenger/git-board-flow#quick-start) for full YAML examples. These workflows will then automatically manage branches, project boards, and the relationship between issues and code. \ No newline at end of file diff --git a/docs/index.mdx b/docs/index.mdx index 6e7cfb04..4b659cea 100644 --- a/docs/index.mdx +++ b/docs/index.mdx @@ -13,6 +13,27 @@ This repository itself utilizes this GitHub Action for task management. You can Experience seamless project management, automated branch handling, and enhanced team collaboration. Start optimizing your development process today and take your Git workflow to the next level! ✨ + + + Step-by-step setup for issues, pull requests, and push workflows. + + + Complete reference: workflow triggers, single actions, and AI (OpenCode). + + + PAT setup, permissions, and token best practices. + + + All inputs: branches, labels, projects, images, and more. + + + Run on-demand: check progress, think, create release, deployed, etc. + + + Common issues and solutions. + + + ## Motivation I've always felt that while GitHub is an outstanding platform for code management, its issue tracking and project boards — despite their powerful capabilities — are often underused or overlooked. Many teams and individual developers tend to focus primarily on version control and pull requests, missing out on the full potential of these features for organizing and streamlining their workflows. @@ -29,12 +50,14 @@ For a complete list of what the action does (workflow triggers and single action - Automated issue tracking and monitoring - Seamless integration with GitHub Projects - Issue assignment and label management +- **Bugbot**: AI-reported potential problems as comments on the issue, updated when findings are resolved ### Pull Request Features - Automatic PR linking to issues - Branch status tracking - PR review process automation - Commit monitoring and updates +- **Bugbot**: Potential problems as PR review comments, with threads marked as resolved when fixed - Efficient PR lifecycle management ### Project Integration @@ -46,6 +69,6 @@ For a complete list of what the action does (workflow triggers and single action ### Performance - Fast execution -- Concurrent workflow handling +- **Sequential workflow handling** — The action waits for previous runs of the same workflow to finish, so you can avoid cancellations and run workflows one after another (see [Features → Workflow concurrency](/features#workflow-concurrency-and-sequential-execution)). - Efficient resource utilization - Reliable automation \ No newline at end of file diff --git a/docs/integration-testing-agent-workflows.md b/docs/integration-testing-agent-workflows.md deleted file mode 100644 index 212e6c3b..00000000 --- a/docs/integration-testing-agent-workflows.md +++ /dev/null @@ -1,91 +0,0 @@ -# Final integration testing and validation of agent workflows within GitHub Actions context - -This document describes how to run **integration testing and validation** of the OpenCode-based agent workflows when the action runs **inside GitHub Actions** (real events: push, pull_request, issue, issue_comment, pull_request_review_comment). - -## Overview of agent workflows - -| Workflow file | Trigger | Agent use | OpenCode usage | -|---------------|---------|-----------|----------------| -| **gbf_commit.yml** | `push` (all branches except master/develop) | Progress detection | `CheckProgressUseCase` → `askAgent(plan)` | -| **gbf_pull_request.yml** | `pull_request` (opened, synchronize, etc.) | PR description (optional) | `UpdatePullRequestDescriptionUseCase` → `askAgent(plan)` when `ai-pull-request-description: true` | -| **gbf_issue.yml** | `issues` (opened, edited, labeled, …) | No agent in core flow | — | -| **gbf_issue_comment.yml** | `issue_comment` (created, edited) | Think + comment language/translation | `ThinkUseCase` → `ask()`; `CheckIssueCommentLanguageUseCase` → `ask()` + `askAgent(plan)` | -| **gbf_pull_request_review_comment.yml** | `pull_request_review_comment` | Comment language/translation | `CheckPullRequestCommentLanguageUseCase` → `ask()` + `askAgent(plan)` | - -Single actions (`check-progress`, `detect-errors`, `recommend-steps`) also use `askAgent(plan)` when run from the action (e.g. via `single-action` input or CLI). - -## Prerequisites for agent workflows in GitHub Actions - -1. **OpenCode server reachable from the runner** - - **Option A – Managed server in the job**: Set `opencode-start-server: true` in the action. The action will start `npx opencode-ai serve` in the job and stop it when the job ends. Provide provider API keys (e.g. `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`) as repository or organization secrets and pass them as `env` to the job. - - **Option B – External server**: Set `opencode-server-url` (e.g. from a secret like `secrets.OPENCODE_SERVER_URL`) so the runner can reach your OpenCode instance. - -2. **Model** - - Set `opencode-model` (e.g. via `vars.OPENCODE_MODEL`) to a valid provider/model (e.g. `opencode/kimi-k2.5-free`, `anthropic/claude-3-5-sonnet`). - -3. **Token and project (as for non-AI flows)** - - `token`: PAT with repo and project permissions. - - `project-ids`: If you use project automation. - -4. **Feature flags (where applicable)** - - Commit (progress): No extra flag; progress runs on every push when AI is configured. - - Pull request description: Set `ai-pull-request-description: true` in the workflow that uses the action for PRs. - -## Validation checklist (per workflow) - -Use this checklist to validate each agent workflow **in a real GitHub repo** where the action is used. - -### 1. Commit workflow (progress detection) - -- [ ] **Config**: Workflow passes `opencode-model` (and either `opencode-server-url` or `opencode-start-server: true` with API keys in env). -- [ ] **Trigger**: Push to a branch that matches an issue (e.g. `feature/123-title` for issue `#123`). -- [ ] **Expected**: Job runs; issue and any open PRs for that branch get a progress label (e.g. `25%`, `50%`) and a comment with summary/reasoning (if enabled). -- [ ] **Failure cases**: Missing AI config → action reports “Missing required AI configuration” and does not call OpenCode. No branch for issue → clear error in logs. - -### 2. Pull request workflow (AI description) - -- [ ] **Config**: `ai-pull-request-description: true`, plus `opencode-model` and OpenCode server (URL or start-server). -- [ ] **Trigger**: Open a PR (or push to an existing PR) whose head branch is linked to an issue. -- [ ] **Expected**: PR description is generated or updated by the Plan agent. -- [ ] **Failure cases**: Missing AI config → description step is skipped or fails with a clear message. - -### 3. Issue comment workflow (think + language) - -- [ ] **Config**: `opencode-model` and OpenCode server (URL or start-server). No extra flag for think/language. -- [ ] **Trigger**: Create or edit a comment on an issue. -- [ ] **Expected**: Think use case and/or language/translation use case run; comments may be updated (e.g. translation) when conditions are met. -- [ ] **Failure cases**: Missing AI config → those steps fail gracefully (error result, no crash). - -### 4. Pull request review comment workflow (language) - -- [ ] **Config**: Same as issue comment (OpenCode URL/model or start-server). -- [ ] **Trigger**: Create or edit a review comment on a PR. -- [ ] **Expected**: Language/translation logic runs; comment may be updated when applicable. -- [ ] **Failure cases**: Same as issue comment. - -## How to run final integration tests (manual) - -1. **Use a test repository** where Git Board Flow is already configured (workflows present, PAT and optional OpenCode URL/model in vars/secrets). -2. **Commit workflow** - - Create an issue (e.g. #123), then a branch `feature/123-test-progress`, push commits, and push to that branch. Check the “Git Board - Commit” run and the issue/PR for progress labels and comments. -3. **Pull request workflow** - - With `ai-pull-request-description: true`, open a PR from a branch linked to an issue. Confirm the PR description is filled/updated by the action. -4. **Issue comment workflow** - - Add or edit a comment on an issue and confirm the “Git Board - Issue Comment” job runs and that think/language steps behave as expected (e.g. no crashes, optional translation). -5. **Pull request review comment workflow** - - Add or edit a review comment on a PR and confirm the “Git Board - Pull Request Review Comment” job runs and language steps behave as expected. - -## Optional: automated smoke validation in CI - -The repository includes a workflow that runs the action in a "smoke" mode to ensure it starts correctly in the GitHub Actions context. This does **not** replace the manual checks above (which require real issues, branches, and PRs). See [.github/workflows/validate_agent_workflows.yml](../.github/workflows/validate_agent_workflows.yml): - -- **On push to `test/agent-smoke` or `test/agent-validation`**: Runs the action with `opencode-start-server: true` (and optional API keys from secrets). The run may log "Issue number not found. Skipping." if the branch is not linked to an issue—that is acceptable for a smoke run. -- **On workflow_dispatch**: Runs build, tests, and lint only (no action run with a real event). Use this to validate the codebase without triggering agent flows. - -## Success criteria (summary) - -- All workflows that use the agent have `opencode-model` and a reachable OpenCode server (URL or start-server). -- Commit runs update progress labels and post expected comments when a branch exists for the issue. -- PR runs with `ai-pull-request-description: true` update PR descriptions. -- Issue comment and PR review comment runs execute think/language steps without crashing; errors are reported as results, not unhandled exceptions. -- Logs clearly indicate “Missing required AI configuration” when OpenCode is not configured, and agent steps are skipped or return structured errors instead of failing the job unexpectedly. diff --git a/docs/issues/configuration.mdx b/docs/issues/configuration.mdx index b98fa430..0d5fc2c9 100644 --- a/docs/issues/configuration.mdx +++ b/docs/issues/configuration.mdx @@ -39,12 +39,12 @@ The following parameters can be configured in the workflow: - `feature-label`: Label to manage feature branches (default: "feature") #### Size Labels -- `size-xxl-label`: Label to indicate a task of size XXL (default: "size xxl") -- `size-xl-label`: Label to indicate a task of size XL (default: "size xl") -- `size-l-label`: Label to indicate a task of size L (default: "size l") -- `size-m-label`: Label to indicate a task of size M (default: "size m") -- `size-s-label`: Label to indicate a task of size S (default: "size s") -- `size-xs-label`: Label to indicate a task of size XS (default: "size xs") +- `size-xxl-label`: Label to indicate a task of size XXL (default: "size: XXL") +- `size-xl-label`: Label to indicate a task of size XL (default: "size: XL") +- `size-l-label`: Label to indicate a task of size L (default: "size: L") +- `size-m-label`: Label to indicate a task of size M (default: "size: M") +- `size-s-label`: Label to indicate a task of size S (default: "size: S") +- `size-xs-label`: Label to indicate a task of size XS (default: "size: XS") #### Size Thresholds ##### XXL Thresholds @@ -126,11 +126,11 @@ The following parameters can be configured in the workflow: #### AI (OpenCode) - `opencode-server-url`: OpenCode server URL (default: "http://localhost:4096"). See [OpenCode (AI)](/opencode-integration). -- `opencode-model`: Model in provider/model format (default: "opencode/kimi-k2.5") +- `opencode-model`: Model in provider/model format (default: "opencode/kimi-k2.5-free") - `opencode-start-server`: If true, the action starts and stops OpenCode in the job (default: "false") - `ai-pull-request-description`: Enable AI-powered PR descriptions (default: "false") - `ai-ignore-files`: Comma-separated list of files to ignore for AI operations -- `ai-members-only`: Restrict AI features to organization/project members (default: "true") +- `ai-members-only`: Restrict AI features to organization/project members (default: "false") #### Workflow Configuration - `release-workflow`: Release workflow for running release deploys (default: "release_workflow.yml") diff --git a/docs/issues/index.mdx b/docs/issues/index.mdx index 68a8ad1e..83c85635 100644 --- a/docs/issues/index.mdx +++ b/docs/issues/index.mdx @@ -3,7 +3,26 @@ title: Issues description: Boosted and connected issues. --- -Git Board Flow automates issue tracking, ensuring smooth branch management and seamless project integration. +Git Board Flow automates issue tracking, ensuring smooth branch management and seamless project integration. + +## Labels by issue type and flow + +Use these labels so the action creates the right branches and applies the right behavior. You can configure all label names via [Issues Configuration](/issues/configuration). + +| Flow | Required / optional labels | Branch created from | Notes | +|------|----------------------------|---------------------|--------| +| **Feature** | `feature`; optionally `branched` (or set `branch-management-always: true`) | `development-branch` | New functionality. | +| **Bugfix** | `bugfix`; optionally `branched` (or `branch-management-always: true`) | `development-branch` | Bug fixes on develop. | +| **Docs** | `docs` or `documentation`; optionally `branched` (or `branch-management-always: true`) | `development-branch` | Documentation tasks. | +| **Chore** | `chore` or `maintenance`; optionally `branched` (or `branch-management-always: true`) | `development-branch` | Maintenance, refactors, dependencies. | +| **Hotfix** | `hotfix` (branch is created without needing `branched`; templates often include `branched` too) | `main-branch` (from latest tag) | Urgent production fix. Add `deploy` to trigger deploy workflow. Only org/repo members can create hotfix issues (others are closed). | +| **Release** | `release` (branch is created without needing `branched`; templates often include `branched` too) | `development-branch` | New version release. Add `deploy` to trigger release workflow. Only org/repo members can create release issues (others are closed). | +| **Deploy** | `deploy` on the issue | — | Triggers the workflow defined by `release-workflow` or `hotfix-workflow`. | +| **Deployed** | `deployed` (added by action after deploy success) | — | Marks the issue as deployed; used for auto-close and state updates. | + +Other labels: `bug` / `enhancement` (issue type), `question` / `help` (no branch), `priority: high` / `medium` / `low`, `size: XS` … `size: XXL`. See [Configuration](/configuration). + +For **step-by-step flows** (how branches are created, naming, source branch, deploy trigger, and templates), see the issue type pages: [Feature](/issues/type/feature), [Bugfix](/issues/type/bugfix), [Docs](/issues/type/docs), [Chore](/issues/type/chore), [Hotfix](/issues/type/hotfix), [Release](/issues/type/release). To enable the GitHub Action for issues, create a workflow with the following configuration: @@ -14,17 +33,15 @@ on: issues: types: [opened, reopened, edited, labeled, unlabeled, assigned, unassigned] -concurrency: - group: ${{ github.workflow }}-${{ github.event.issue.number || github.ref }} - jobs: git-board-issues: name: Git Board - Issue runs-on: ubuntu-latest steps: - - uses: landamessenger/git-board-flow@v1 + - uses: actions/checkout@v4 + - uses: landamessenger/git-board-flow@master with: - project-ids: 1,2 + project-ids: '1,2' token: ${{ secrets.PAT }} ``` @@ -126,6 +143,10 @@ Many developers are familiar with the Git-Flow methodology, but that doesn’t p Issues take time to be resolved, and interest in their progress increases. Therefore, any changes in the branches created by the issue will be notified as comments, providing real-time feedback on the issue's progress. +### Bugbot (potential problems) {#bugbot-potential-problems} + +When the **push** workflow runs (or you run the single action `detect_potential_problems_action` with `single-action-issue`), OpenCode analyzes the branch vs the base and reports potential problems (bugs, risks, improvements) as **comments on the issue**. Each finding appears as a comment with title, severity, and optional file/line. If a previously reported finding is later fixed, the action **updates** that comment (e.g. marks it as resolved) so the issue stays in sync. Findings are also posted as **review comments on open PRs** for the same branch; see [Pull Requests → Bugbot](/pull-requests#bugbot-potential-problems). You can set a minimum severity with `bugbot-severity` and exclude paths with `ai-ignore-files`; see [Configuration](/configuration). + ### Auto-Closure Forget about closing issues when development is complete, Git-Board-Flow will automatically close them once the branches created by the issue are successfully merged. diff --git a/docs/issues/type/hotfix.mdx b/docs/issues/type/hotfix.mdx index 186d077c..78e1e21a 100644 --- a/docs/issues/type/hotfix.mdx +++ b/docs/issues/type/hotfix.mdx @@ -91,10 +91,6 @@ This workflow ensures that critical fixes reach production quickly while maintai required: true default: '-1' - concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} - cancel-in-progress: true - jobs: deploy: name: Git Board - Issue diff --git a/docs/issues/type/release.mdx b/docs/issues/type/release.mdx index 316d7a65..94b6f330 100644 --- a/docs/issues/type/release.mdx +++ b/docs/issues/type/release.mdx @@ -91,10 +91,6 @@ This workflow ensures that releases are properly planned, tested, and deployed w required: true default: '-1' - concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} - cancel-in-progress: true - jobs: deploy: name: Git Board - Issue @@ -119,14 +115,14 @@ This workflow ensures that releases are properly planned, tested, and deployed w ## Example -For a hotfix issue #123 fixing a critical login bug: +For a release issue #456 releasing version 1.3.0: -1. Hotfix branch created: `hotfix/1.2.4` -2. Bugfix branch created: `bugfix/123-fix-critical-login` -3. Fix implemented and tested on bugfix branch -4. PR merged into `hotfix/1.2.4` -5. Version 1.2.4 deployed to production -6. Changes merged to `master` and `develop` +1. Release branch created: `release/1.3.0` +2. Feature branch created: `feature/456-finalize-changelog` +3. Changes applied and tested on feature branch +4. PR merged into `release/1.3.0` +5. Version 1.3.0 deployed to production (add `deploy` label to the issue) +6. Release branch merged to `master` (with tag) and back into `develop` ## Label diff --git a/docs/opencode-integration.mdx b/docs/opencode-integration.mdx index a8939417..c3973412 100644 --- a/docs/opencode-integration.mdx +++ b/docs/opencode-integration.mdx @@ -16,8 +16,126 @@ Git Board Flow uses **OpenCode** for all AI-backed features: code analysis, prog ## Requirements 1. **OpenCode server** must be running and reachable (e.g. `http://localhost:4096` or your deployed URL). -2. **Model** in `provider/model` format (e.g. `opencode/kimi-k2.5`, `anthropic/claude-3-5-sonnet`). -3. **API keys** are configured on the OpenCode server (not in this action). Use OpenCode's auth/config to add provider keys. +2. **Model** in `provider/model` format (e.g. `opencode/kimi-k2.5-free`, `anthropic/claude-3-5-sonnet`). +3. **API keys** are configured on the OpenCode server (not in this action). OpenCode reads them from **environment variables** (and optionally from `~/.local/share/opencode/auth.json` if you use the `/connect` command in the TUI). When the action starts the server with `opencode-start-server: true`, it passes the job's `env` to the OpenCode process, so any provider key you set in the workflow is available to OpenCode. + +### How OpenCode expects provider credentials + +OpenCode reads provider API keys and options from **environment variables** (and optionally from `~/.local/share/opencode/auth.json` when using the `/connect` command in the TUI). + + + **In GitHub Actions with `opencode-start-server: true`:** the action starts a **headless** OpenCode server (`opencode serve`). There is no TUI and **`/connect` is not available** during the run. Credentials must be provided **only via environment variables** in the job's `env` (e.g. secrets). The action passes the job's `env` to the OpenCode process so any variable you set in the workflow is available to OpenCode. + + +You can also reference env vars in an `opencode.json` config via `{env:VAR_NAME}` (see [OpenCode Config – Variables](https://opencode.ai/docs/config#env-vars)). + + + Some providers are typically configured in OpenCode via the interactive `/connect` command (TUI). If you need those in CI and cannot pass credentials by env: + + 1. **Self-hosted runner with OpenCode running locally** – Run OpenCode on a machine (or container) that you control, use the TUI there to run `/connect` and add your providers. Point the action at that server with `opencode-server-url` (and leave `opencode-start-server: false`). Use a [self-hosted GitHub Actions runner](https://docs.github.com/en/actions/using-github-hosted-runners/using-self-hosted-runners) on the same network so the job can reach that OpenCode instance. + 2. **Another GitHub Action that connects to those providers** – Use a different action or workflow that starts or configures an OpenCode-compatible server (or gateway) with access to the providers you need, then set `opencode-server-url` in this action to that service. + + In both cases you use **your own** OpenCode server (or a server set up by another action) instead of `opencode-start-server: true`, so credentials are managed outside this action. + + +#### Provider credentials reference + +The following table lists the environment variables OpenCode uses for each provider, as documented in [OpenCode Providers](https://opencode.ai/docs/providers). Set the ones required by your chosen provider in the job's `env` (e.g. GitHub Actions `env:` or secrets). + +**Single API-key providers** + +| Provider | Environment variable | Notes | +|----------|----------------------|--------| +| OpenAI | `OPENAI_API_KEY` | | +| Anthropic | `ANTHROPIC_API_KEY` | | +| OpenRouter | `OPENROUTER_API_KEY` | | +| OpenCode Zen | (via `/connect` or API key in TUI) | [opencode.ai/auth](https://opencode.ai/auth) | +| Groq | `GROQ_API_KEY` | | +| DeepSeek | (via `/connect`; store key in auth or config) | | +| 302.AI | (via `/connect`) | | +| Baseten | (via `/connect`) | | +| Cerebras | (via `/connect`) | | +| Cloudflare AI Gateway | See multi-var below | | +| Cortecs | (via `/connect`) | | +| Deep Infra | (via `/connect`) | | +| Fireworks AI | (via `/connect`) | | +| Helicone | (via `/connect`) | | +| Hugging Face | (via `/connect`) | | +| IO.NET | (via `/connect`) | | +| Moonshot AI | (via `/connect`) | | +| MiniMax | (via `/connect`) | | +| Nebius Token Factory | (via `/connect`) | | +| Ollama Cloud | (via `/connect`) | | + +Many of the “via `/connect`” providers also accept an API key from config using `{env:PROVIDER_API_KEY}` in `opencode.json`; the exact env name may follow the provider’s SDK (e.g. `FIREWORKS_API_KEY`). For CI with `opencode-start-server: true`, prefer setting the key in the job's `env` and, if needed, defining the provider in `opencode.json` with `"apiKey": "{env:YOUR_SECRET_ENV}"`. If a provider cannot be configured via env and you need `/connect`, see [If you need `/connect` or providers not exposed via env](#if-you-need-connect-or-providers-not-exposed-via-env) above. + +**Multi-variable / special auth** + +| Provider | Environment variables | Notes | +|----------|------------------------|--------| +| **Amazon Bedrock** | `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`
or `AWS_PROFILE`
or `AWS_BEARER_TOKEN_BEDROCK`
optional: `AWS_REGION` | IAM, profile, or Bedrock bearer token. [Docs](https://opencode.ai/docs/providers#amazon-bedrock). | +| **Azure OpenAI** | API key via `/connect` or config
`AZURE_RESOURCE_NAME` | Resource name is part of the endpoint URL. | +| **Azure Cognitive Services** | API key via `/connect` or config
`AZURE_COGNITIVE_SERVICES_RESOURCE_NAME` | | +| **Cloudflare AI Gateway** | `CLOUDFLARE_ACCOUNT_ID`
`CLOUDFLARE_GATEWAY_ID`
`CLOUDFLARE_API_TOKEN` | | +| **Google Vertex AI** | `GOOGLE_CLOUD_PROJECT`
optional: `VERTEX_LOCATION`
`GOOGLE_APPLICATION_CREDENTIALS` (path to service account JSON) | Or use `gcloud auth application-default login`. | +| **GitLab Duo** | `GITLAB_TOKEN` (Personal Access Token)
optional: `GITLAB_INSTANCE_URL`, `GITLAB_AI_GATEWAY_URL` | Self-hosted: set `GITLAB_INSTANCE_URL`. | + +**Local / no API key** + +| Provider | Configuration | Notes | +|----------|---------------|--------| +| Ollama | `opencode.json` with `options.baseURL` (e.g. `http://localhost:11434/v1`) | No env key; local server. | +| LM Studio | Same: `baseURL` (e.g. `http://127.0.0.1:1234/v1`) | | +| llama.cpp (llama-server) | Same: `baseURL` (e.g. `http://127.0.0.1:8080/v1`) | | + +#### Using local providers (Ollama, LM Studio, etc.) with this action + +This action does **not** require API keys or block any provider. Local providers (Ollama, LM Studio, llama.cpp) work the same way: you set `opencode-model` to `provider/model-id` (e.g. `ollama/llama2`, or the model ID you use in LM Studio). No credentials are needed. + +- **With `opencode-start-server: true`:** The action starts OpenCode with the repo as the working directory, so OpenCode loads config from the project (e.g. `opencode.json` in the repo root). Add a `provider` entry for Ollama/LM Studio/llama.cpp with the right `baseURL` in `opencode.json`. The runner must have the local server (Ollama, LM Studio, etc.) **already running** and reachable at that URL—for example on a [self-hosted runner](https://docs.github.com/en/actions/using-github-hosted-runners/using-self-hosted-runners) where Ollama/LM Studio is installed, or in a job that starts the local server in a previous step and then runs this action. +- **With your own OpenCode server (`opencode-server-url`):** Point the action at a server that already has the local provider configured in its OpenCode config. No API key is required for that provider. + +Example `opencode.json` in the repo (for `opencode-start-server: true` with Ollama on the runner): + +```json +{ + "$schema": "https://opencode.ai/config.json", + "provider": { + "ollama": { + "npm": "@ai-sdk/openai-compatible", + "name": "Ollama (local)", + "options": { "baseURL": "http://localhost:11434/v1" }, + "models": { "llama2": { "name": "Llama 2" } } + } + } +} +``` + +Then set `opencode-model: 'ollama/llama2'` (or the model ID you defined). See [OpenCode Providers – Ollama](https://opencode.ai/docs/providers#ollama) for details. + +**Example: GitHub Actions with multiple providers** + +Set the variables for the provider you use in the workflow `env`. Only one primary provider is needed for the model you choose: + +```yaml +- uses: landamessenger/git-board-flow@master + env: + # Option A: Anthropic + ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} + # Option B: OpenAI + # OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + # Option C: OpenRouter (many models) + # OPENROUTER_API_KEY: ${{ secrets.OPENROUTER_API_KEY }} + # Option D: Amazon Bedrock + # AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + # AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + # AWS_REGION: us-east-1 + with: + opencode-start-server: true + opencode-model: 'anthropic/claude-3-5-sonnet' +``` + +For the full list of providers and any new env var names, see the official [OpenCode Providers](https://opencode.ai/docs/providers) documentation. ## Configuration @@ -26,8 +144,8 @@ Git Board Flow uses **OpenCode** for all AI-backed features: code analysis, prog | Input | Description | Default | |-------|-------------|--------| | `opencode-server-url` | OpenCode server URL | `http://localhost:4096` | -| `opencode-model` | Model in `provider/model` format | `opencode/kimi-k2.5` | -| `opencode-start-server` | If `true`, the action starts an OpenCode server at the beginning of the job and stops it when the job ends. No need to install or run OpenCode yourself. Requires provider API keys (e.g. `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`) as GitHub secrets. | `false` | +| `opencode-model` | Model in `provider/model` format | `opencode/kimi-k2.5-free` | +| `opencode-start-server` | If `true`, the action starts an OpenCode server at the beginning of the job and stops it when the job ends. No need to install or run OpenCode yourself. Pass provider API keys via the job's `env` (see [How OpenCode expects provider API keys](#how-opencode-expects-provider-api-keys)). | `true` | Example (using your own OpenCode server): @@ -38,15 +156,15 @@ Example (using your own OpenCode server): opencode-model: 'anthropic/claude-3-5-sonnet' ``` -Example (action starts and stops OpenCode for you; no separate server needed). Pass the provider API key via `env` so the OpenCode server can use it (e.g. `OPENAI_API_KEY`, `OPENROUTER_API_KEY`, `ANTHROPIC_API_KEY`): +Example (action starts and stops OpenCode for you; no separate server needed). Set the provider API key in `env` using the variable name OpenCode expects (e.g. `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, or `OPENROUTER_API_KEY`): ```yaml - uses: landamessenger/git-board-flow@master env: - OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} # or OPENROUTER_API_KEY, etc. + ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} # or OPENAI_API_KEY, OPENROUTER_API_KEY, etc. with: opencode-start-server: true - opencode-model: 'opencode/kimi-k2.5' + opencode-model: 'anthropic/claude-3-5-sonnet' ``` ### Environment variables (CLI / local) @@ -64,7 +182,7 @@ For the `copilot` command: ## Running OpenCode -1. **GitHub Actions – managed server (easiest)**: Set `opencode-start-server: true`. The action will start an OpenCode server at the beginning of the job (`npx opencode-ai serve` on port 4096), wait until it is healthy, run the rest of the job using that server, and stop the server when the job ends. You do not need to install or run OpenCode yourself. Pass provider API keys via env (e.g. `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`) as GitHub secrets. +1. **GitHub Actions – managed server (easiest)**: Set `opencode-start-server: true`. The action will start an OpenCode server at the beginning of the job (`npx opencode-ai serve` on port 4096), wait until it is healthy, run the rest of the job using that server, and stop the server when the job ends. You do not need to install or run OpenCode yourself. Set the provider credentials in the job's `env` using the variable names OpenCode expects (see [Provider credentials reference](#provider-credentials-reference) above). 2. **Local / self-hosted**: Install OpenCode and run the server, e.g.: ```bash @@ -78,13 +196,27 @@ For the `copilot` command: - **AI pull request description** – Generates PR descriptions from issue and diff. - **Think / reasoning** – Deep code analysis and change proposals (OpenCode Plan agent). +- **Comment translation** – Automatically translates issue and PR review comments to the configured locale (e.g. English, Spanish) when they are written in another language. Uses `issues-locale` and `pull-requests-locale` inputs. - **Check progress** – Progress detection from branch vs issue description (OpenCode Plan agent). +- **Bugbot (potential problems)** – Analyzes branch vs base and posts findings as **comments on the issue** and **review comments on the PR**; updates issue comments and marks PR review threads as resolved when the model reports fixes. Runs on push or via single action / CLI. Configure with `bugbot-severity` (minimum severity: `info`, `low`, `medium`, `high`) and `ai-ignore-files` (paths to exclude). - **Copilot** – Code analysis and manipulation agent (OpenCode Build agent). -- **Error detection** – Potential bugs and issues in the codebase (OpenCode Plan agent). - **Recommend steps** – Suggests implementation steps from the issue description (OpenCode Plan agent). All of these use the same OpenCode server and model configuration. +## How Think works on issue comments + +When someone comments on an **issue** or **PR review**, OpenCode can reply with AI-generated answers (Think feature). The trigger depends on the issue type: + +- **Issues labeled `question` or `help`**: OpenCode responds to **any comment** on the issue. No mention required — if the user needs help, simply add a comment and OpenCode will answer. +- **Other issues**: You must **mention the bot user** (the user of the PAT) in the comment, e.g. `@your-bot-user how do I configure X?` Only then does OpenCode respond. + +This lets question/help issues behave as a support channel where users can ask without knowing the bot's username. + +## How comment translation works + +When someone comments on an **issue** or **PR review**, the action checks if the text is in the configured locale (`issues-locale` for issues, `pull-requests-locale` for PRs). If the comment is in another language, OpenCode **translates it** and updates the comment with the translation (appending the original text below). A hidden marker prevents re-translating the same comment. To force a new translation, delete the comment and post again. + ## How "check progress" works (e.g. "Progress 30%" in the issue) Progress is updated **automatically on every push**: when the **commit (push) workflow** runs (e.g. `on: push`), the action computes size and progress from the branch diff, updates the progress label on the **issue**, and applies the same label to any **open PRs** for that branch. No separate "check progress" workflow is required. @@ -99,6 +231,18 @@ You can also run progress check **on demand** with `single-action: check_progres 4. **Result** – The progress label is set on the issue (and on any open PRs for that branch). When run from a workflow, **PublishResultUseCase** posts a comment on the issue with the percentage and summary. +## How Bugbot works (potential problems) + +Bugbot runs when the **push (commit) workflow** runs, or on demand via **single action** `detect_potential_problems_action` (with `single-action-issue`) or the CLI **`detect-potential-problems -i `**. + +1. **Trigger** – Push to a branch linked to an issue, or a workflow/CLI run with the single action and issue number. +2. **Analysis** – OpenCode Plan compares the branch diff vs the base and returns a list of findings (title, severity, file, line, description). It also receives previously reported findings (from issue and PR comments) and can mark some as resolved. +3. **Issue** – New findings are posted as **comments on the issue**; when a finding is resolved, the corresponding comment is updated (e.g. "Resolved"). +4. **Pull request** – For each finding, the action posts a **review comment** on the PR at the right file/line. When OpenCode reports a finding as resolved, the action **marks that review thread as resolved**. +5. **Config** – Use `bugbot-severity` (e.g. `medium`) so only findings at or above that severity are posted; use `ai-ignore-files` to exclude paths from analysis and reporting. + +See [Issues → Bugbot](/issues#bugbot-potential-problems) and [Pull Requests → Bugbot](/pull-requests#bugbot-potential-problems) for more. + ## Can we avoid `opencode-server-url` and use a "master" OpenCode server? **Current situation** @@ -120,7 +264,7 @@ You can also run progress check **on demand** with `single-action: check_progres Use `provider/model` as in OpenCode's config, for example: -- `opencode/kimi-k2.5` (free, Kimi K2.5) +- `opencode/kimi-k2.5-free` (free, Kimi K2.5) - `openai/gpt-4o-mini` - `openai/gpt-4o` - `anthropic/claude-3-5-sonnet-20241022` @@ -132,4 +276,4 @@ Check OpenCode's docs or `/config/providers` on your server for the exact model - **"Missing required AI configuration"**: Set `opencode-server-url` and `opencode-model` (or env vars). - **Connection errors**: Ensure the OpenCode server is running and reachable from the runner (network/firewall, correct URL and port). -- **Auth errors**: Configure provider API keys in OpenCode (e.g. via OpenCode UI or config), not in this action. +- **Auth errors**: Ensure the provider API key is set in the environment with the name OpenCode expects (e.g. `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, `OPENROUTER_API_KEY`). When using `opencode-start-server: true`, pass it via the job's `env`. See [OpenCode Providers](https://opencode.ai/docs/providers) for other providers. diff --git a/docs/pull-requests.mdx b/docs/pull-requests.mdx deleted file mode 100644 index a2ea90db..00000000 --- a/docs/pull-requests.mdx +++ /dev/null @@ -1,95 +0,0 @@ ---- -title: Pull Requests -description: How Git Board Flow handles pull requests ---- - -# Pull Request Management - -When your workflow runs on `pull_request` events (e.g. opened, edited, labeled, unlabeled), Git Board Flow performs a set of actions so that PRs stay linked to issues, projects, and team workflows. - -## What the action does on pull requests - -| Capability | Description | -|------------|-------------| -| **PR–issue linking** | Links the pull request to the issue associated with its branch (from the branch name, e.g. `feature/123-title`) and posts a comment on the PR. | -| **Project linking** | Adds the PR to the configured GitHub Projects (`project-ids`) and moves it to the configured column (e.g. "In Progress"). | -| **Reviewers** | Assigns up to `desired-reviewers-count` reviewers. | -| **Priority & size** | Applies priority labels and size labels (XS–XXL) based on configured thresholds (lines, files, commits). | -| **AI-generated PR description** | When enabled, generates or updates the PR description using OpenCode and your repo's PR template. See [AI-generated PR description](#ai-generated-pr-description) below. | -| **Comments & images** | Posts a comment with optional images per branch type (feature, bugfix, docs, chore, hotfix, release). | - -For all configuration options, see [Configuration](/configuration). For a high-level list of features, see [Features & Capabilities](/features). - ---- - -## AI-generated PR description - -When `ai-pull-request-description` is `true` and [OpenCode](/opencode-integration) is configured (`opencode-server-url`, `opencode-model`), the action can **generate or update the pull request description** automatically using the OpenCode Plan agent. - -### How it works - -1. The action determines the PR's **base** and **head** branch (target and source branch). -2. The OpenCode Plan agent runs in the repository workspace. It: - - Reads the repository's **pull request template** (see below). - - Computes the **diff** between base and head (e.g. `git diff base..head`) to understand what changed. - - Uses the **issue description** (from the issue linked to the PR branch) as context. -3. The agent **fills the template** with a structured description: summary, scope of changes, technical details, how to test, breaking changes, deployment notes, etc., following the same sections and format as your template. -4. The action writes the result to the PR body (prefixed with the issue number for reference). - -No pre-computed file list or patches are sent from the action; the agent has access to the workspace and computes the diff itself, similar to the [check progress](/single-actions#single-actions) flow. - -### PR template as example for the AI - -The AI is instructed to use your repository's **pull request template** as the structure for the description. You should define: - -- **`.github/pull_request_template.md`** — This file is read by the OpenCode agent and used as the **skeleton** to fill. The agent keeps the same headings, bullet lists, checkboxes (`- [ ]`, `- [x]`), and separators, and fills each section with content derived from the diff and the issue. - -If you don't have a template, the agent will still produce a structured description, but defining a template ensures consistent, professional PR descriptions that match your team's expectations (e.g. Summary, Related Issues, Scope of Changes, Technical Details, How to Test, Breaking Changes, Deployment Notes, etc.). - -**Recommendation:** Add a `.github/pull_request_template.md` in your repo with the sections you want (summary, scope, testing, breaking changes, etc.). The AI will use it as a guide and fill it with the information from the issue and the branch diff. - -### When the AI description runs - -- The PR must have an **issue linked** (branch name follows the pattern that includes the issue number, e.g. `feature/123-add-feature`). -- The linked issue must have a **non-empty description** (used as context). -- If `ai-members-only` is enabled (default), the PR author must be a **project/org member**; otherwise the step is skipped. -- The action runs on the same `pull_request` events as the rest of the PR pipeline (e.g. opened, edited). - -To enable: set `ai-pull-request-description: true` in your workflow and configure OpenCode. See [Configuration → AI Features](/configuration#ai-features) and [OpenCode (AI)](/opencode-integration). - ---- - -## Example workflow - -```yaml -name: Git Board Flow - Pull Request - -on: - pull_request: - types: [opened, edited, labeled, unlabeled] - -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} - cancel-in-progress: true - -jobs: - git-board-flow-pull-requests: - name: Git Board Flow - Pull Request - runs-on: ubuntu-latest - steps: - - name: Checkout Repository - uses: actions/checkout@v4 - - - name: Git Board Flow - Pull Request - uses: landamessenger/git-board-flow@master - with: - token: ${{ secrets.PAT }} - project-ids: '2,3' - commit-prefix-transforms: 'replace-slash' - # Optional: enable AI-generated PR descriptions (requires OpenCode) - ai-pull-request-description: true - opencode-server-url: ${{ secrets.OPENCODE_SERVER_URL }} - opencode-model: 'anthropic/claude-3-5-sonnet' -``` - -For all options, see [Configuration](/configuration) and [Features & Capabilities](/features). diff --git a/docs/pull-requests/ai-description.mdx b/docs/pull-requests/ai-description.mdx new file mode 100644 index 00000000..d4c77fa8 --- /dev/null +++ b/docs/pull-requests/ai-description.mdx @@ -0,0 +1,57 @@ +--- +title: AI-Generated PR Description +description: How OpenCode fills your pull request template from the issue and branch diff +--- + +# AI-Generated PR Description + +When `ai-pull-request-description` is `true` and [OpenCode](/opencode-integration) is configured (`opencode-server-url`, `opencode-model`), the action can **generate or update the pull request description** automatically using the OpenCode Plan agent. + +## How it works + +1. The action determines the PR's **base** and **head** branch (target and source branch). +2. The OpenCode Plan agent runs in the repository workspace. It: + - Reads the repository's **pull request template** (see below). + - Computes the **diff** between base and head (e.g. `git diff base..head`) to understand what changed. + - Uses the **issue description** (from the issue linked to the PR branch) as context. +3. The agent **fills the template** with a structured description: summary, scope of changes, technical details, how to test, breaking changes, deployment notes, etc., following the same sections and format as your template. +4. The action writes the result to the PR body (prefixed with the issue number for reference). + +No pre-computed file list or patches are sent from the action; the agent has access to the workspace and computes the diff itself, similar to the [check progress](/single-actions) flow. + +## PR template as example for the AI + +The AI is instructed to use your repository's **pull request template** as the structure for the description. You should define: + +- **`.github/pull_request_template.md`** — This file is read by the OpenCode agent and used as the **skeleton** to fill. The agent keeps the same headings, bullet lists, checkboxes (`- [ ]`, `- [x]`), and separators, and fills each section with content derived from the diff and the issue. + +If you don't have a template, the agent will still produce a structured description, but defining a template ensures consistent, professional PR descriptions that match your team's expectations (e.g. Summary, Related Issues, Scope of Changes, Technical Details, How to Test, Breaking Changes, Deployment Notes, etc.). + + + **Recommendation:** Add a `.github/pull_request_template.md` in your repo with the sections you want (summary, scope, testing, breaking changes, etc.). The AI will use it as a guide and fill it with the information from the issue and the branch diff. + + +## When the AI description runs + + + - The PR must have an **issue linked** (branch name follows the pattern that includes the issue number, e.g. `feature/123-add-feature`). + - The linked issue must have a **non-empty description** (used as context). + - If `ai-members-only` is enabled (default: false), the PR author must be a **project/org member**; otherwise the step is skipped. + - The action runs on the same `pull_request` events as the rest of the PR pipeline (e.g. opened, edited). + + +## Enable in your workflow + +Set `ai-pull-request-description: true` and configure OpenCode in your workflow: + +```yaml +- uses: landamessenger/git-board-flow@master + with: + token: ${{ secrets.PAT }} + project-ids: '2,3' + ai-pull-request-description: true + opencode-server-url: ${{ secrets.OPENCODE_SERVER_URL }} + opencode-model: 'anthropic/claude-3-5-sonnet' +``` + +See [OpenCode (AI)](/opencode-integration) for provider setup and [Configuration](/configuration) for all AI-related inputs. diff --git a/docs/pull-requests/configuration.mdx b/docs/pull-requests/configuration.mdx new file mode 100644 index 00000000..edb21a34 --- /dev/null +++ b/docs/pull-requests/configuration.mdx @@ -0,0 +1,46 @@ +--- +title: Pull Requests Configuration +description: Configuration options specific to pull request workflows +--- + +# Pull Requests Configuration + +These inputs apply when the action runs on `pull_request` events. For the complete list of all action inputs, see [Configuration](/configuration). + +## Project and assignees + +| Input | Description | Default | +|-------|-------------|---------| +| `project-ids` | Comma-separated GitHub Project IDs to link PRs to | — | +| `project-column-pull-request-created` | Column for newly created/linked PRs | "In Progress" | +| `project-column-pull-request-in-progress` | Column for in-progress PRs | "In Progress" | +| `desired-reviewers-count` | Number of reviewers to assign (max: 15) | 1 | + +## AI (PR description and comments) + +| Input | Description | Default | +|-------|-------------|---------| +| `ai-pull-request-description` | Enable AI-generated PR descriptions (requires OpenCode) | "true" | +| `pull-requests-locale` | Target locale for PR review comment translation | "en-US" | +| `ai-members-only` | Restrict AI PR description to org/project members only | "false" | + +See [AI PR description](/pull-requests/ai-description) for how the description is generated and [OpenCode (AI)](/opencode-integration) for server and model setup. + +## Images on pull requests + +| Input | Description | Default | +|-------|-------------|---------| +| `images-on-pull-request` | Enable images in PR comments | "true" | +| `images-pull-request-automatic` | Image URLs for automatic PR comments | (built-in) | +| `images-pull-request-feature` | Image URLs for feature PR comments | (built-in) | +| `images-pull-request-bugfix` | Image URLs for bugfix PR comments | (built-in) | +| … | Same pattern for `docs`, `chore`, `hotfix`, `release` | — | + +## Other + +| Input | Description | Default | +|-------|-------------|---------| +| `commit-prefix-transforms` | Transforms for commit prefix from branch name (e.g. `replace-slash`) | "replace-slash" | +| `token` | GitHub PAT (required for linking and project updates) | — | + +For size thresholds, priority labels, and branch naming (used when the action infers the issue from the branch), see [Configuration](/configuration). diff --git a/docs/pull-requests/index.mdx b/docs/pull-requests/index.mdx new file mode 100644 index 00000000..5faa7012 --- /dev/null +++ b/docs/pull-requests/index.mdx @@ -0,0 +1,57 @@ +--- +title: Pull Requests +description: How Git Board Flow handles pull requests +--- + +# Pull Request Management + +When your workflow runs on `pull_request` events (e.g. opened, edited, labeled, unlabeled), Git Board Flow performs a set of actions so that PRs stay linked to issues, projects, and team workflows. + +## Enable the action for pull requests + +Create a workflow file (e.g. `.github/workflows/gbf_pull_request.yml`) that runs on `pull_request`: + +```yaml +name: Git Board Flow - Pull Request + +on: + pull_request: + types: [opened, edited, labeled, unlabeled] + +jobs: + git-board-flow-pull-requests: + name: Git Board Flow - Pull Request + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: landamessenger/git-board-flow@master + with: + token: ${{ secrets.PAT }} + project-ids: '2,3' + commit-prefix-transforms: 'replace-slash' +``` + + + For **AI-generated PR descriptions**, add `ai-pull-request-description: true` and configure [OpenCode](/opencode-integration). See [AI PR description](/pull-requests/ai-description) for details. + + +## What the action does on pull requests + +| Capability | Description | +|------------|-------------| +| **PR–issue linking** | Links the pull request to the issue associated with its branch (from the branch name, e.g. `feature/123-title`) and posts a comment on the PR. | +| **Project linking** | Adds the PR to the configured GitHub Projects (`project-ids`) and moves it to the configured column (e.g. "In Progress"). | +| **Reviewers** | Assigns up to `desired-reviewers-count` reviewers. | +| **Priority & size** | Applies priority labels and size labels (XS–XXL) based on configured thresholds (lines, files, commits). | +| **AI-generated PR description** | When enabled, generates or updates the PR description using OpenCode and your repo's PR template. See [AI PR description](/pull-requests/ai-description). | +| **Comments & images** | Posts a comment with optional images per branch type (feature, bugfix, docs, chore, hotfix, release). | + +## Bugbot (potential problems) {#bugbot-potential-problems} + +When the **push** workflow runs (or the single action `detect_potential_problems_action`), OpenCode analyzes the branch vs the base and posts **review comments** on the PR at the relevant file and line for each finding (potential bugs, risks, or improvements). When OpenCode later reports a finding as resolved (e.g. after code changes), the action **marks that review thread as resolved**, so the PR review reflects the current status. Findings are also summarized as **comments on the linked issue**; see [Issues → Bugbot](/issues#bugbot-potential-problems). Configure minimum severity with `bugbot-severity` and excluded paths with `ai-ignore-files` in [Configuration](/configuration). + +## Next steps + +- **[Configuration](/pull-requests/configuration)** — PR-specific inputs (reviewers, columns, images, AI). +- **[AI PR description](/pull-requests/ai-description)** — How the AI fills your PR template from the issue and diff. +- [Full configuration reference](/configuration) — All action inputs. diff --git a/docs/single-actions/configuration.mdx b/docs/single-actions/configuration.mdx new file mode 100644 index 00000000..736e55b8 --- /dev/null +++ b/docs/single-actions/configuration.mdx @@ -0,0 +1,66 @@ +--- +title: Single Actions Configuration +description: Inputs for running single actions in Git Board Flow +--- + +# Single Actions Configuration + +These inputs are used when you run the action in single-action mode. For the complete list of all action inputs, see [Configuration](/configuration). + +## Required for single-action mode + +| Input | Description | Example | +|-------|-------------|---------| +| `single-action` | The action to run | `check_progress_action`, `think_action`, `create_release`, etc. | +| `token` | GitHub PAT (required for almost all single actions) | `${{ secrets.PAT }}` | + +## When the action needs an issue + +For `check_progress_action`, `detect_potential_problems_action`, `recommend_steps_action`, and `deployed_action`: + +| Input | Description | Example | +|-------|-------------|---------| +| `single-action-issue` | Issue number to run the action on | `'123'` | + +## When the action needs a version (release or tag) + +For `create_release` and `create_tag`: + +| Input | Description | Example | +|-------|-------------|---------| +| `single-action-version` | Version string (e.g. semver) | `'1.2.0'` | + +For **`create_release`** only: + +| Input | Description | Example | +|-------|-------------|---------| +| `single-action-title` | Release title | `'Release 1.2.0'` | +| `single-action-changelog` | Changelog or release notes body (markdown) | Multi-line string or file content | + +## Example: workflow with single action + +```yaml +- uses: landamessenger/git-board-flow@master + with: + token: ${{ secrets.PAT }} + single-action: check_progress_action + single-action-issue: '123' +``` + +For release: + +```yaml +- uses: landamessenger/git-board-flow@master + with: + token: ${{ secrets.PAT }} + single-action: create_release + single-action-version: '1.2.0' + single-action-title: 'Release 1.2.0' + single-action-changelog: | + ## New features + - Added X + ## Fixes + - Fixed Y +``` + +See [Workflow & CLI](/single-actions/workflow-and-cli) for more examples and CLI equivalents. diff --git a/docs/single-actions.mdx b/docs/single-actions/index.mdx similarity index 55% rename from docs/single-actions.mdx rename to docs/single-actions/index.mdx index 9304661f..a409e6ff 100644 --- a/docs/single-actions.mdx +++ b/docs/single-actions/index.mdx @@ -7,32 +7,26 @@ description: Run one-off actions on demand (check progress, think, create releas When you set the `single-action` input (and any required targets such as `single-action-issue` or `single-action-version`), Git Board Flow runs **only** that action and skips the normal issue, pull request, and push pipelines. -## Usage - -In your workflow, pass the action name and any required inputs: - -```yaml -- uses: landamessenger/git-board-flow@master - with: - token: ${{ secrets.PAT }} - single-action: check_progress_action - single-action-issue: '123' -``` - ## Available single actions | Action | Inputs | Description | |--------|--------|-------------| | `check_progress_action` | `single-action-issue` | Runs progress check on demand. Progress is normally updated automatically on every push (commit workflow); use this to re-run without pushing or when you don't use the push workflow. | -| `detect_errors_action` | `single-action-issue` | Detects potential errors in the branch vs base using OpenCode Plan agent. | +| `detect_potential_problems_action` | `single-action-issue` | Bugbot: detects potential problems in the branch vs base; reports on issue and PR, marks resolved when fixed (OpenCode). | | `recommend_steps_action` | `single-action-issue` | Recommends implementation steps for the issue based on its description (OpenCode Plan). | -| `think_action` | — | Deep code analysis and change proposals (OpenCode). No issue required. | -| `initial_setup` | — | Initial setup steps. No issue required. | +| `think_action` | — | Deep code analysis and change proposals (OpenCode Plan). No issue required; use from CLI with a question (`think -q "..."`) or from a workflow that provides context. | +| `initial_setup` | — | Performs initial setup steps (e.g. for repo or project). No issue required. | | `create_release` | `single-action-version`, `single-action-title`, `single-action-changelog` | Creates a GitHub release. | | `create_tag` | `single-action-version` | Creates a Git tag. | -| `publish_github_action` | — | Publishes or updates the GitHub Action. | +| `publish_github_action` | — | Publishes or updates the GitHub Action (e.g. versioning, release). | | `deployed_action` | `single-action-issue` | Marks the issue as deployed; updates labels and project state. | -Actions that **fail the job** if the last step fails: `publish_github_action`, `create_release`, `deployed_action`, `create_tag`. + + **Actions that fail the job** if the last step fails: `publish_github_action`, `create_release`, `deployed_action`, `create_tag`. The workflow will be marked as failed so you can act on it. + + +## Next steps -For full details and how each feature works, see [Features & Capabilities](/features). +- **[Configuration](/single-actions/configuration)** — Inputs for single actions (`single-action`, `single-action-issue`, `single-action-version`, etc.). +- **[Workflow & CLI](/single-actions/workflow-and-cli)** — How to run from a workflow and from the `giik` CLI. +- [Features & Capabilities](/features) — How each action fits into the full feature set. diff --git a/docs/single-actions/workflow-and-cli.mdx b/docs/single-actions/workflow-and-cli.mdx new file mode 100644 index 00000000..9414eef9 --- /dev/null +++ b/docs/single-actions/workflow-and-cli.mdx @@ -0,0 +1,72 @@ +--- +title: Workflow & CLI +description: Run single actions from GitHub Actions workflows and from the giik CLI +--- + +# Workflow & CLI + +You can run single actions in two ways: from a **GitHub Actions workflow** (e.g. a scheduled job or manual trigger) or from the **`giik` CLI** locally. + +## From a GitHub Actions workflow + +Add a job that sets `single-action` and any required inputs (`single-action-issue`, `single-action-version`, etc.): + +```yaml +jobs: + run-check-progress: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: landamessenger/git-board-flow@master + with: + token: ${{ secrets.PAT }} + single-action: check_progress_action + single-action-issue: '123' +``` + +Use `workflow_dispatch` to run on demand, or trigger from another event. See [Configuration](/single-actions/configuration) for all inputs. + +## From the CLI (`giik`) + +Build the CLI from the git-board-flow repo, then run it from **any repo** that has `origin` pointing to GitHub (or from the same repo): + +```bash +nvm use 20 +npm install +npm run build +# From the repo you want to analyze: +node build/cli/index.js [options] +# Or if giik is on PATH: +giik [options] +``` + +### Commands that mirror single actions + +| CLI command | Single action equivalent | Required option | Description | +|-------------|---------------------------|-----------------|-------------| +| `setup` | `initial_setup` | — | Labels, issue types, verify access | +| `check-progress` | `check_progress_action` | `-i ` | Progress check on demand | +| `detect-potential-problems` | `detect_potential_problems_action` | `-i ` [-b \] | Bugbot: detect potential problems, report on issue and PR (OpenCode) | +| `recommend-steps` | `recommend_steps_action` | `-i ` | Recommend steps from issue (OpenCode) | +| `think` | `think_action` | `-q ""` | Deep code analysis (OpenCode) | +| `copilot` | (CLI only) | `-p ""` | AI assistant: analyze/modify code (OpenCode Build) | + +Common options: `-t` / `--token` (PAT), `-d` / `--debug`, `--opencode-server-url`, `--opencode-model`. You can also set `PERSONAL_ACCESS_TOKEN` and OpenCode vars in a `.env` file (do not commit it). + +### Examples + +```bash +# Progress check for issue 123 +node build/cli/index.js check-progress -i 123 -t $PAT + +# Detect potential problems (bugbot) for issue 456 on current branch +node build/cli/index.js detect-potential-problems -i 456 -t $PAT + +# Think / reasoning with a question +node build/cli/index.js think -q "Where is authentication validated?" -t $PAT + +# Copilot (CLI-only, no workflow equivalent) +node build/cli/index.js copilot -p "Explain the main function" -t $PAT +``` + +For a step-by-step guide to testing the OpenCode Plan flows locally, see [Testing OpenCode Plan Locally](/testing-opencode-plan-locally). diff --git a/docs/support.mdx b/docs/support.mdx index bd5b79dd..1a3a4449 100644 --- a/docs/support.mdx +++ b/docs/support.mdx @@ -14,15 +14,21 @@ Learn how to get help with Git Board Flow and contribute to its development. - Check the [configuration guide](/configuration) and [Features & Capabilities](/features) ### Issue Reporting -When reporting issues: - -1. Search existing issues first -2. Include: - - Detailed description - - Steps to reproduce - - Expected vs actual behavior - - Relevant logs and configurations - - Environment details + +When reporting issues, follow these steps: + + + + Check [existing issues](https://github.com/landamessenger/git-board-flow/issues) to avoid duplicates. + + + - Detailed description + - Steps to reproduce + - Expected vs actual behavior + - Relevant logs and configurations + - Environment details + + ### Community Support - Use GitHub Discussions for questions diff --git a/docs/testing-opencode-plan-locally.mdx b/docs/testing-opencode-plan-locally.mdx index 79bbd1ce..c91e280a 100644 --- a/docs/testing-opencode-plan-locally.mdx +++ b/docs/testing-opencode-plan-locally.mdx @@ -1,11 +1,11 @@ --- title: Testing OpenCode Plan Locally -description: Step-by-step guide to test check-progress, detect-errors, and recommend-steps locally using the CLI +description: Step-by-step guide to test check-progress, detect-potential-problems, and recommend-steps locally using the CLI --- # Testing OpenCode Plan Locally -This guide walks you through testing the OpenCode Plan flows (`check-progress`, `detect-errors`, `recommend-steps`) locally using the `giik` CLI, without running GitHub Actions. +This guide walks you through testing the OpenCode Plan flows (`check-progress`, `detect-potential-problems`, `recommend-steps`) locally using the `giik` CLI, without running GitHub Actions. ## Prerequisites @@ -55,15 +55,16 @@ Options: - `-b, --branch ` – Branch name (optional; will try to determine from issue) - `-t, --token ` – PAT (or use `PERSONAL_ACCESS_TOKEN` env) - `--opencode-server-url ` – Override OpenCode URL -- `--opencode-model ` – Override model (e.g. `opencode/kimi-k2.5`, `anthropic/claude-3-5-sonnet`) +- `--opencode-model ` – Override model (e.g. `opencode/kimi-k2.5-free`, `anthropic/claude-3-5-sonnet`) - `-d, --debug` – Verbose logs -### Detect errors +### Detect potential problems (Bugbot) -Detects potential errors in the branch vs base using OpenCode Plan: +Analyzes the branch vs base and reports findings as **comments on the issue** and **review comments on open PRs**; updates issue comments and marks PR review threads as resolved when findings are fixed. ```bash -node build/cli/index.js detect-errors -i -t +node build/cli/index.js detect-potential-problems -i -t +# Optional: -b (defaults to current git branch) ``` ### Recommend steps @@ -76,12 +77,16 @@ node build/cli/index.js recommend-steps -i -t ## 4. Optional: .env File + + You can store your token and OpenCode settings in a `.env` file so you don't need to pass `-t` and OpenCode options every time. + + Create a `.env` file in the repo root (do **not** commit it): ```bash PERSONAL_ACCESS_TOKEN=ghp_xxxx OPENCODE_SERVER_URL=http://localhost:4096 -OPENCODE_MODEL=opencode/kimi-k2.5 +OPENCODE_MODEL=opencode/kimi-k2.5-free ``` Then you can run without passing `-t` every time: diff --git a/docs/troubleshooting.mdx b/docs/troubleshooting.mdx index b21013d7..1f8d0d8a 100644 --- a/docs/troubleshooting.mdx +++ b/docs/troubleshooting.mdx @@ -5,120 +5,87 @@ description: Common issues and solutions when using Git Board Flow # Troubleshooting -This guide helps you resolve common issues you might encounter while using Git Board Flow. - -## Branch Creation Issues - -### Branch Creation Fails - -**Symptoms:** -- Error messages during branch creation -- Branches not being created automatically -- Permission denied errors - -**Solutions:** -1. Verify token permissions: - - Ensure the `token` (PAT) has repo and project access - - Check token scope and validity - -2. Check label configuration: - - Confirm labels exist in repository - - Verify label names match configuration - - Check label triggers are working - -3. Validate branch names: - - Ensure branch names are valid - - Check for naming conflicts - - Verify branch prefix configuration - -## Project Integration Issues - -### Project Linking Problems - -**Symptoms:** -- Issues not appearing in projects -- PRs not being linked automatically -- Project automation not working - -**Solutions:** -1. Verify project configuration: - - Use `project-ids` (comma-separated project IDs), not URLs - - Ensure projects exist and the token has access - - Verify organization project permissions if applicable - -2. Check token scope: - - Confirm project access permissions - - Verify organization access if needed - - Check token expiration - -3. Project visibility: - - Check project visibility settings - - Verify organization permissions - - Ensure user has project access - -## Workflow Timeouts - -### Workflow Execution Issues - -**Symptoms:** -- Workflows timing out -- Long execution times -- Failed workflow runs - -**Solutions:** -1. Adjust timeout settings: - - Increase `merge-timeout` if needed - - Check workflow step timeouts - - Monitor execution times - -2. Network connectivity: - - Check GitHub API status - - Verify network access - - Monitor rate limits - -3. Workflow triggers: - - Verify event triggers - - Check workflow conditions - - Monitor concurrent executions - -## Common Error Messages - -### Understanding and Fixing Errors - -1. "Permission Denied" - - Check token permissions - - Verify repository access - - Review security settings - -2. "Resource Not Found" - - Verify resource exists - - Check access permissions - - Validate URLs and paths - -3. "Invalid Configuration" - - Review workflow syntax - - Check configuration values - - Validate input parameters - -## OpenCode (AI) issues - -- **"Missing required AI configuration"**: Set `opencode-server-url` and `opencode-model` (or use `opencode-start-server: true` and pass provider API keys as secrets). See [OpenCode (AI)](/opencode-integration). -- **Connection errors**: Ensure the OpenCode server is reachable from the runner (URL, port, network/firewall). If using `opencode-start-server: true`, check that the job has network access to download `opencode-ai` and that API keys are set. -- **Invalid JSON response**: If the AI returns malformed JSON (e.g. for progress/error detection), the model may not follow the schema. Try a different model or check the OpenCode logs. - -## CLI issues - -- **"Git repository not found"**: Ensure you're in a directory with `git` initialized and `remote.origin.url` pointing to a GitHub repository (e.g. `github.com/owner/repo`). -- **"Please provide a prompt using -p or --prompt"**: The `copilot` command requires a prompt. Use `-p "your prompt"` or `--prompt "your prompt"`. -- **"Please provide an issue number using -i or --issue"**: Commands like `check-progress`, `detect-errors`, and `recommend-steps` require an issue number with `-i `. - -## Getting Help - -If you're still experiencing issues: - -1. Check the [GitHub repository](https://github.com/landamessenger/git-board-flow) for known issues -2. Review the latest documentation -3. Open a new issue with: - - Detailed description - - Steps to reproduce - - Relevant logs and configurations \ No newline at end of file +This guide helps you resolve common issues you might encounter while using Git Board Flow. Expand the section that matches your problem. + + + + **Symptoms:** + - Error messages during branch creation + - Branches not being created automatically + - Permission denied errors + + **Solutions:** + 1. **Token permissions:** Ensure the `token` (PAT) has repo and project access; check token scope and validity. + 2. **Label configuration:** Confirm labels exist in the repository, verify label names match your config, and check that label triggers are firing. + 3. **Branch names:** Ensure branch names are valid, check for naming conflicts, and verify branch prefix configuration (e.g. `feature-tree`, `bugfix-tree`). + + + + **Symptoms:** + - Issues not appearing in projects + - PRs not being linked automatically + - Project automation not working + + **Solutions:** + 1. **Project configuration:** Use `project-ids` (comma-separated project IDs), not URLs. Ensure projects exist and the token has access. For org projects, verify organization project permissions. + 2. **Token scope:** Confirm project access permissions, verify organization access if needed, and check token expiration. + 3. **Visibility:** Check project visibility settings and ensure the bot has access. + + + + **Symptoms:** + - Workflows timing out + - Long execution times + - Failed workflow runs + + **Solutions:** + 1. **Timeouts:** Increase `merge-timeout` if needed; check workflow step timeouts and monitor execution times. + 2. **Network:** Check GitHub API status, verify network access, and monitor rate limits. + 3. **Triggers:** Verify event triggers, workflow conditions, and concurrent executions. + + + + **Symptoms:** + - The workflow runs but nothing happens (no branches created, no project linking, no comments). + - Logs say something like "User from token (...) matches actor. Ignoring." + + **Cause:** When the **event actor** (the user who triggered the event: e.g. who opened the issue, who pushed, who opened the PR) is the **same** as the user who owns the PAT (`token`), the action intentionally skips the normal issue, PR, and push pipelines. It will only run a **single action** if you passed `single-action`; otherwise it exits without doing anything. + + **Solutions:** + 1. Use a **dedicated bot account** for the PAT (e.g. `giik-bot`) so that the actor (developer) is different from the token user. Then the full pipelines run as expected. + 2. If you want to run only specific actions when you are the actor, use `single-action` (e.g. `check_progress_action`, `deployed_action`) so the action runs that single action and exits. + + See [Authentication](/authentication) for token setup and the note on token user vs. actor. + + + + **"Permission Denied"** — Check token permissions, verify repository access, and review security settings. + + **"Resource Not Found"** — Verify the resource exists, check access permissions, and validate URLs and paths. + + **"Invalid Configuration"** — Review workflow syntax, check configuration values, and validate input parameters. + + + + - **"Missing required AI configuration"**: Set `opencode-server-url` and `opencode-model` (or use `opencode-start-server: true` and pass provider API keys as secrets). See [OpenCode (AI)](/opencode-integration). + - **Connection errors**: Ensure the OpenCode server is reachable from the runner (URL, port, network/firewall). If using `opencode-start-server: true`, check that the job has network access to download `opencode-ai` and that API keys are set. + - **Invalid JSON response**: If the AI returns malformed JSON (e.g. for progress/error detection), the model may not follow the schema. Try a different model or check the OpenCode logs. + + + + - **"Git repository not found"**: Ensure you're in a directory with `git` initialized and `remote.origin.url` pointing to a GitHub repository (e.g. `github.com/owner/repo`). + - **"Please provide a prompt using -p or --prompt"**: The `copilot` command requires a prompt. Use `-p "your prompt"` or `--prompt "your prompt"`. + - **"Please provide an issue number using -i or --issue"**: Commands like `check-progress`, `detect-potential-problems`, and `recommend-steps` require an issue number with `-i `. + + + + If you're still experiencing issues: + + 1. Check the [GitHub repository](https://github.com/landamessenger/git-board-flow) for known issues. + 2. Review the latest documentation. + 3. Open a new issue with: + - Detailed description + - Steps to reproduce + - Relevant logs and configurations + + diff --git a/package-lock.json b/package-lock.json index cbf9c963..a2b89a8a 100644 --- a/package-lock.json +++ b/package-lock.json @@ -38,6 +38,7 @@ "eslint": "^9.15.0", "jest": "^30.2.0", "ts-jest": "^29.4.5", + "ts-node": "^10.9.2", "typescript": "^5.2.2", "typescript-eslint": "^8.15.0" } @@ -847,6 +848,28 @@ "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==", "dev": true }, + "node_modules/@cspotcode/source-map-support": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz", + "integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==", + "dev": true, + "dependencies": { + "@jridgewell/trace-mapping": "0.3.9" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@cspotcode/source-map-support/node_modules/@jridgewell/trace-mapping": { + "version": "0.3.9", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz", + "integrity": "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==", + "dev": true, + "dependencies": { + "@jridgewell/resolve-uri": "^3.0.3", + "@jridgewell/sourcemap-codec": "^1.4.10" + } + }, "node_modules/@emnapi/core": { "version": "1.7.1", "resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.7.1.tgz", @@ -2107,6 +2130,30 @@ "@sinonjs/commons": "^3.0.1" } }, + "node_modules/@tsconfig/node10": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.12.tgz", + "integrity": "sha512-UCYBaeFvM11aU2y3YPZ//O5Rhj+xKyzy7mvcIoAjASbigy8mHMryP5cK7dgjlz2hWxh1g5pLw084E0a/wlUSFQ==", + "dev": true + }, + "node_modules/@tsconfig/node12": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/@tsconfig/node12/-/node12-1.0.11.tgz", + "integrity": "sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==", + "dev": true + }, + "node_modules/@tsconfig/node14": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@tsconfig/node14/-/node14-1.0.3.tgz", + "integrity": "sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==", + "dev": true + }, + "node_modules/@tsconfig/node16": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@tsconfig/node16/-/node16-1.0.4.tgz", + "integrity": "sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==", + "dev": true + }, "node_modules/@tybys/wasm-util": { "version": "0.10.1", "resolved": "https://registry.npmjs.org/@tybys/wasm-util/-/wasm-util-0.10.1.tgz", @@ -2876,6 +2923,18 @@ "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" } }, + "node_modules/acorn-walk": { + "version": "8.3.4", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.4.tgz", + "integrity": "sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g==", + "dev": true, + "dependencies": { + "acorn": "^8.11.0" + }, + "engines": { + "node": ">=0.4.0" + } + }, "node_modules/agent-base": { "version": "7.1.3", "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.3.tgz", @@ -2971,6 +3030,12 @@ "node": ">= 8" } }, + "node_modules/arg": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz", + "integrity": "sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==", + "dev": true + }, "node_modules/argparse": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", @@ -3590,6 +3655,12 @@ "node": ">=10.0.0" } }, + "node_modules/create-require": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz", + "integrity": "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==", + "dev": true + }, "node_modules/cross-spawn": { "version": "7.0.6", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", @@ -3673,6 +3744,15 @@ "node": ">=8" } }, + "node_modules/diff": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.4.tgz", + "integrity": "sha512-X07nttJQkwkfKfvTPG/KSnE2OMdcUCao6+eXF3wmnIQRn2aPAHH3VxDbDOdegkd6JbPsXqShpvEOHfAT+nCNwQ==", + "dev": true, + "engines": { + "node": ">=0.3.1" + } + }, "node_modules/docker-modem": { "version": "5.0.6", "resolved": "https://registry.npmjs.org/docker-modem/-/docker-modem-5.0.6.tgz", @@ -6949,6 +7029,49 @@ "node": ">=10" } }, + "node_modules/ts-node": { + "version": "10.9.2", + "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.2.tgz", + "integrity": "sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==", + "dev": true, + "dependencies": { + "@cspotcode/source-map-support": "^0.8.0", + "@tsconfig/node10": "^1.0.7", + "@tsconfig/node12": "^1.0.7", + "@tsconfig/node14": "^1.0.0", + "@tsconfig/node16": "^1.0.2", + "acorn": "^8.4.1", + "acorn-walk": "^8.1.1", + "arg": "^4.1.0", + "create-require": "^1.1.0", + "diff": "^4.0.1", + "make-error": "^1.1.1", + "v8-compile-cache-lib": "^3.0.1", + "yn": "3.1.1" + }, + "bin": { + "ts-node": "dist/bin.js", + "ts-node-cwd": "dist/bin-cwd.js", + "ts-node-esm": "dist/bin-esm.js", + "ts-node-script": "dist/bin-script.js", + "ts-node-transpile-only": "dist/bin-transpile.js", + "ts-script": "dist/bin-script-deprecated.js" + }, + "peerDependencies": { + "@swc/core": ">=1.2.50", + "@swc/wasm": ">=1.2.50", + "@types/node": "*", + "typescript": ">=2.7" + }, + "peerDependenciesMeta": { + "@swc/core": { + "optional": true + }, + "@swc/wasm": { + "optional": true + } + } + }, "node_modules/tslib": { "version": "2.8.1", "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", @@ -7162,6 +7285,12 @@ "uuid": "dist/bin/uuid" } }, + "node_modules/v8-compile-cache-lib": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz", + "integrity": "sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==", + "dev": true + }, "node_modules/v8-to-istanbul": { "version": "9.3.0", "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.3.0.tgz", @@ -7401,6 +7530,15 @@ "node": ">=12" } }, + "node_modules/yn": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz", + "integrity": "sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==", + "dev": true, + "engines": { + "node": ">=6" + } + }, "node_modules/yocto-queue": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", diff --git a/src/actions/github_action.ts b/src/actions/github_action.ts index 33cfab2a..69d1c8ee 100644 --- a/src/actions/github_action.ts +++ b/src/actions/github_action.ts @@ -22,7 +22,7 @@ import { Workflows } from '../data/model/workflows'; import { ProjectRepository } from '../data/repository/project_repository'; import { PublishResultUseCase } from '../usecase/steps/common/publish_resume_use_case'; import { StoreConfigurationUseCase } from '../usecase/steps/common/store_configuration_use_case'; -import { DEFAULT_IMAGE_CONFIG, INPUT_KEYS, OPENCODE_DEFAULT_MODEL } from '../utils/constants'; +import { BUGBOT_MAX_COMMENTS, BUGBOT_MIN_SEVERITY, DEFAULT_IMAGE_CONFIG, INPUT_KEYS, OPENCODE_DEFAULT_MODEL } from '../utils/constants'; import { logError, logInfo } from '../utils/logger'; import { startOpencodeServer, type ManagedOpencodeServer } from '../utils/opencode_server'; import { mainRun } from './common_action'; @@ -71,6 +71,12 @@ export async function runGitHubAction(): Promise { .split(',') .map(path => path.trim()) .filter(path => path.length > 0); + const bugbotSeverity = getInput(INPUT_KEYS.BUGBOT_SEVERITY) || BUGBOT_MIN_SEVERITY; + const bugbotCommentLimitRaw = parseInt(getInput(INPUT_KEYS.BUGBOT_COMMENT_LIMIT), 10); + const bugbotCommentLimit = + Number.isNaN(bugbotCommentLimitRaw) || bugbotCommentLimitRaw < 1 + ? BUGBOT_MAX_COMMENTS + : Math.min(bugbotCommentLimitRaw, 200); /** * Projects Details @@ -511,6 +517,8 @@ export async function runGitHubAction(): Promise { aiMembersOnly, aiIgnoreFiles, aiIncludeReasoning, + bugbotSeverity, + bugbotCommentLimit, ), new Labels( branchManagementLauncherLabel, diff --git a/src/actions/local_action.ts b/src/actions/local_action.ts index c6f470d7..88d27387 100644 --- a/src/actions/local_action.ts +++ b/src/actions/local_action.ts @@ -20,7 +20,7 @@ import { Tokens } from '../data/model/tokens'; import { Welcome } from '../data/model/welcome'; import { Workflows } from '../data/model/workflows'; import { ProjectRepository } from '../data/repository/project_repository'; -import { DEFAULT_IMAGE_CONFIG, INPUT_KEYS, OPENCODE_DEFAULT_MODEL, TITLE } from '../utils/constants'; +import { BUGBOT_MAX_COMMENTS, BUGBOT_MIN_SEVERITY, DEFAULT_IMAGE_CONFIG, INPUT_KEYS, OPENCODE_DEFAULT_MODEL, TITLE } from '../utils/constants'; import { logInfo } from '../utils/logger'; import { getActionInputsWithDefaults } from '../utils/yml_utils'; import { mainRun } from './common_action'; @@ -72,6 +72,13 @@ export async function runLocalAction( .split(',') .map(path => path.trim()) .filter(path => path.length > 0); + const bugbotSeverity = (additionalParams[INPUT_KEYS.BUGBOT_SEVERITY] ?? actionInputs[INPUT_KEYS.BUGBOT_SEVERITY]) || BUGBOT_MIN_SEVERITY; + const bugbotCommentLimitRaw = additionalParams[INPUT_KEYS.BUGBOT_COMMENT_LIMIT] ?? actionInputs[INPUT_KEYS.BUGBOT_COMMENT_LIMIT]; + const bugbotCommentLimitNum = typeof bugbotCommentLimitRaw === 'number' ? bugbotCommentLimitRaw : parseInt(String(bugbotCommentLimitRaw ?? ''), 10); + const bugbotCommentLimit = + Number.isNaN(bugbotCommentLimitNum) || bugbotCommentLimitNum < 1 + ? BUGBOT_MAX_COMMENTS + : Math.min(bugbotCommentLimitNum, 200); /** * Projects Details @@ -514,6 +521,8 @@ export async function runLocalAction( aiMembersOnly, aiIgnoreFiles, aiIncludeReasoning, + bugbotSeverity, + bugbotCommentLimit, ), new Labels( branchManagementLauncherLabel, diff --git a/src/cli.ts b/src/cli.ts index 6dc9cc9c..fc8c25e3 100644 --- a/src/cli.ts +++ b/src/cli.ts @@ -31,6 +31,15 @@ function getGitInfo() { } } +/** Get current git branch (for CLI commands that need a branch when -b is omitted). */ +function getCurrentBranch(): string { + try { + return execSync('git rev-parse --abbrev-ref HEAD').toString().trim() || 'main'; + } catch { + return 'main'; + } +} + /** * Run the thinking AI scenario for deep code analysis and proposals. */ @@ -182,7 +191,7 @@ program } try { - const ai = new Ai(serverUrl, model, false, false, [], false); + const ai = new Ai(serverUrl, model, false, false, [], false, 'low', 20); const aiRepository = new AiRepository(); const result = await aiRepository.copilotMessage(ai, prompt); @@ -311,11 +320,11 @@ program }); /** - * Detect potential errors in the branch for an issue (vs base branch). + * Recommend implementation steps for an issue based on its description. */ program - .command('detect-errors') - .description(`${TITLE} - Detect potential errors in the branch (vs base) using OpenCode Plan agent`) + .command('recommend-steps') + .description(`${TITLE} - Recommend steps to implement an issue (OpenCode Plan agent)`) .option('-i, --issue ', 'Issue number (required)', '') .option('-d, --debug', 'Debug mode', false) .option('-t, --token ', 'Personal access token', process.env.PERSONAL_ACCESS_TOKEN) @@ -336,7 +345,7 @@ program // eslint-disable-next-line @typescript-eslint/no-explicit-any -- CLI options map to action inputs const params: any = { [INPUT_KEYS.DEBUG]: options.debug?.toString() ?? 'false', - [INPUT_KEYS.SINGLE_ACTION]: ACTIONS.DETECT_ERRORS, + [INPUT_KEYS.SINGLE_ACTION]: ACTIONS.RECOMMEND_STEPS, [INPUT_KEYS.SINGLE_ACTION_ISSUE]: parseInt(issueNumber), [INPUT_KEYS.TOKEN]: options.token || process.env.PERSONAL_ACCESS_TOKEN, [INPUT_KEYS.OPENCODE_SERVER_URL]: options.opencodeServerUrl || process.env.OPENCODE_SERVER_URL || 'http://127.0.0.1:4096', @@ -344,18 +353,20 @@ program repo: { owner: gitInfo.owner, repo: gitInfo.repo }, issue: { number: parseInt(issueNumber) }, }; - params[INPUT_KEYS.WELCOME_TITLE] = '🔍 Error detection'; - params[INPUT_KEYS.WELCOME_MESSAGES] = [`Detecting errors for issue #${issueNumber} in ${gitInfo.owner}/${gitInfo.repo}...`]; + params[INPUT_KEYS.WELCOME_TITLE] = '📋 Recommend steps'; + params[INPUT_KEYS.WELCOME_MESSAGES] = [`Recommending steps for issue #${issueNumber} in ${gitInfo.owner}/${gitInfo.repo}...`]; await runLocalAction(params); }); /** - * Recommend implementation steps for an issue based on its description. + * Detect potential problems (bugbot): OpenCode analyzes branch vs base, reports findings + * as comments on the issue and open PR. Previously reported findings can be marked resolved. */ program - .command('recommend-steps') - .description(`${TITLE} - Recommend steps to implement an issue (OpenCode Plan agent)`) + .command('detect-potential-problems') + .description(`${TITLE} - Detect potential problems in the branch (bugbot): report as comments on issue and PR`) .option('-i, --issue ', 'Issue number (required)', '') + .option('-b, --branch ', 'Branch name (optional, defaults to current git branch)', '') .option('-d, --debug', 'Debug mode', false) .option('-t, --token ', 'Personal access token', process.env.PERSONAL_ACCESS_TOKEN) .option('--opencode-server-url ', 'OpenCode server URL', process.env.OPENCODE_SERVER_URL || 'http://127.0.0.1:4096') @@ -372,20 +383,34 @@ program console.log('❌ Provide a valid issue number with -i or --issue'); return; } + const branch = (cleanArg(options.branch) || getCurrentBranch()).trim() || 'main'; // eslint-disable-next-line @typescript-eslint/no-explicit-any -- CLI options map to action inputs const params: any = { [INPUT_KEYS.DEBUG]: options.debug?.toString() ?? 'false', - [INPUT_KEYS.SINGLE_ACTION]: ACTIONS.RECOMMEND_STEPS, + [INPUT_KEYS.SINGLE_ACTION]: ACTIONS.DETECT_POTENTIAL_PROBLEMS, [INPUT_KEYS.SINGLE_ACTION_ISSUE]: parseInt(issueNumber), [INPUT_KEYS.TOKEN]: options.token || process.env.PERSONAL_ACCESS_TOKEN, [INPUT_KEYS.OPENCODE_SERVER_URL]: options.opencodeServerUrl || process.env.OPENCODE_SERVER_URL || 'http://127.0.0.1:4096', [INPUT_KEYS.OPENCODE_MODEL]: options.opencodeModel || process.env.OPENCODE_MODEL || OPENCODE_DEFAULT_MODEL, repo: { owner: gitInfo.owner, repo: gitInfo.repo }, issue: { number: parseInt(issueNumber) }, + commits: { ref: `refs/heads/${branch}` }, }; - params[INPUT_KEYS.WELCOME_TITLE] = '📋 Recommend steps'; - params[INPUT_KEYS.WELCOME_MESSAGES] = [`Recommending steps for issue #${issueNumber} in ${gitInfo.owner}/${gitInfo.repo}...`]; - await runLocalAction(params); + params[INPUT_KEYS.WELCOME_TITLE] = '🐛 Detect potential problems (bugbot)'; + params[INPUT_KEYS.WELCOME_MESSAGES] = [ + `Detecting potential problems for issue #${issueNumber} on branch ${branch} in ${gitInfo.owner}/${gitInfo.repo}...`, + ]; + try { + await runLocalAction(params); + process.exit(0); + } catch (err) { + const error = err instanceof Error ? err : new Error(String(err)); + console.error('❌ Error running detect-potential-problems:', error.message); + if (options.debug) { + console.error(err); + } + process.exit(1); + } }); /** diff --git a/src/data/model/ai.ts b/src/data/model/ai.ts index ddb663fd..eec9a2d2 100644 --- a/src/data/model/ai.ts +++ b/src/data/model/ai.ts @@ -12,6 +12,8 @@ export class Ai { private aiMembersOnly: boolean; private aiIgnoreFiles: string[]; private aiIncludeReasoning: boolean; + private bugbotMinSeverity: string; + private bugbotCommentLimit: number; constructor( opencodeServerUrl: string, @@ -19,7 +21,9 @@ export class Ai { aiPullRequestDescription: boolean, aiMembersOnly: boolean, aiIgnoreFiles: string[], - aiIncludeReasoning: boolean + aiIncludeReasoning: boolean, + bugbotMinSeverity: string, + bugbotCommentLimit: number ) { this.opencodeServerUrl = opencodeServerUrl; this.opencodeModel = opencodeModel; @@ -27,6 +31,8 @@ export class Ai { this.aiMembersOnly = aiMembersOnly; this.aiIgnoreFiles = aiIgnoreFiles; this.aiIncludeReasoning = aiIncludeReasoning; + this.bugbotMinSeverity = bugbotMinSeverity; + this.bugbotCommentLimit = bugbotCommentLimit; } getOpencodeServerUrl(): string { @@ -53,6 +59,14 @@ export class Ai { return this.aiIncludeReasoning; } + getBugbotMinSeverity(): string { + return this.bugbotMinSeverity; + } + + getBugbotCommentLimit(): number { + return this.bugbotCommentLimit; + } + /** * Parse "provider/model-id" into { providerID, modelID } for OpenCode session.prompt. * Uses OPENCODE_DEFAULT_MODEL when no model is set (e.g. opencode/kimi-k2.5-free). diff --git a/src/data/model/execution.ts b/src/data/model/execution.ts index a9a270d2..c5c672d6 100644 --- a/src/data/model/execution.ts +++ b/src/data/model/execution.ts @@ -337,6 +337,10 @@ export class Execution { this.currentConfiguration.parentBranch = this.previousConfiguration?.parentBranch } + if (this.currentConfiguration.parentBranch === undefined && this.previousConfiguration?.parentBranch != null) { + this.currentConfiguration.parentBranch = this.previousConfiguration.parentBranch; + } + if (this.isSingleAction) { /** * Nothing to do here (for now) diff --git a/src/data/model/single_action.ts b/src/data/model/single_action.ts index f9b34a88..567d4307 100644 --- a/src/data/model/single_action.ts +++ b/src/data/model/single_action.ts @@ -11,7 +11,7 @@ export class SingleAction { ACTIONS.THINK, ACTIONS.INITIAL_SETUP, ACTIONS.CHECK_PROGRESS, - ACTIONS.DETECT_ERRORS, + ACTIONS.DETECT_POTENTIAL_PROBLEMS, ACTIONS.RECOMMEND_STEPS, ]; /** @@ -72,8 +72,8 @@ export class SingleAction { return this.currentSingleAction === ACTIONS.CHECK_PROGRESS; } - get isDetectErrorsAction(): boolean { - return this.currentSingleAction === ACTIONS.DETECT_ERRORS; + get isDetectPotentialProblemsAction(): boolean { + return this.currentSingleAction === ACTIONS.DETECT_POTENTIAL_PROBLEMS; } get isRecommendStepsAction(): boolean { diff --git a/src/data/repository/__tests__/ai_repository.test.ts b/src/data/repository/__tests__/ai_repository.test.ts index 88f6da6c..a4afa88f 100644 --- a/src/data/repository/__tests__/ai_repository.test.ts +++ b/src/data/repository/__tests__/ai_repository.test.ts @@ -1,21 +1,24 @@ /** * Integration-style tests for AiRepository with mocked fetch. * Covers edge cases for the OpenCode-based architecture: missing config, - * session/message failures, empty/invalid responses, JSON parsing, reasoning, getSessionDiff. + * session/message failures, empty/invalid responses, JSON parsing, reasoning, getSessionDiff, + * and retry behavior (OPENCODE_MAX_RETRIES). */ +import { OPENCODE_MAX_RETRIES, OPENCODE_RETRY_DELAY_MS } from '../../../utils/constants'; import { AiRepository, getSessionDiff } from '../ai_repository'; import { Ai } from '../../model/ai'; jest.mock('../../../utils/logger', () => ({ logDebugInfo: jest.fn(), logError: jest.fn(), + logInfo: jest.fn(), })); const mockFetch = jest.fn(); function createAi(serverUrl = 'http://localhost:4096', model = 'opencode/kimi-k2.5') { - return new Ai(serverUrl, model, false, false, [], false); + return new Ai(serverUrl, model, false, false, [], false, 'low', 20); } describe('AiRepository', () => { @@ -33,147 +36,58 @@ describe('AiRepository', () => { jest.useRealTimers(); }); - describe('ask', () => { + describe('askAgent', () => { it('returns undefined when server URL is missing', async () => { const ai = createAi('', 'opencode/model'); - const result = await repo.ask(ai, 'Hello'); - expect(result).toBeUndefined(); - expect(mockFetch).not.toHaveBeenCalled(); - }); - - it('returns undefined when model is empty', async () => { - const ai = createAi('http://localhost:4096', ''); - const result = await repo.ask(ai, 'Hello'); + const result = await repo.askAgent(ai, 'plan', 'Assess progress', {}); expect(result).toBeUndefined(); expect(mockFetch).not.toHaveBeenCalled(); }); - it('returns undefined when session create fails', async () => { - const ai = createAi(); - mockFetch.mockResolvedValueOnce({ ok: false, status: 500, text: async () => 'Server error' }); - const result = await repo.ask(ai, 'Hello'); - expect(result).toBeUndefined(); - expect(mockFetch).toHaveBeenCalledTimes(1); - }); - - it('returns undefined when message request fails', async () => { - const ai = createAi(); - mockFetch - .mockResolvedValueOnce({ - ok: true, - text: async () => JSON.stringify({ id: 'sess-1' }), - }) - .mockResolvedValueOnce({ ok: false, status: 502, text: async () => 'Bad gateway' }); - const result = await repo.ask(ai, 'Hello'); - expect(result).toBeUndefined(); - expect(mockFetch).toHaveBeenCalledTimes(2); - }); - - it('returns undefined when response body is empty', async () => { + it('returns undefined when session create fails after all retries', async () => { const ai = createAi(); - mockFetch - .mockResolvedValueOnce({ - ok: true, - text: async () => JSON.stringify({ id: 'sess-1' }), - }) - .mockResolvedValueOnce({ - ok: true, - status: 200, - text: async () => '', - }); - const result = await repo.ask(ai, 'Hello'); + mockFetch.mockResolvedValue({ ok: false, status: 503, text: async () => 'Unavailable' }); + const promise = repo.askAgent(ai, 'plan', 'Prompt', {}); + await jest.advanceTimersByTimeAsync((OPENCODE_MAX_RETRIES - 1) * OPENCODE_RETRY_DELAY_MS); + const result = await promise; expect(result).toBeUndefined(); + expect(mockFetch).toHaveBeenCalledTimes(OPENCODE_MAX_RETRIES); }); - it('returns undefined when message response is invalid JSON', async () => { + it('returns undefined when agent message request fails after all retries', async () => { const ai = createAi(); - mockFetch - .mockResolvedValueOnce({ - ok: true, - text: async () => JSON.stringify({ id: 'sess-1' }), - }) - .mockResolvedValueOnce({ - ok: true, - status: 200, - text: async () => 'not json', - }); - const result = await repo.ask(ai, 'Hello'); + const sessionOk = { ok: true, text: async () => JSON.stringify({ id: 's1' }) }; + const messageFail = { ok: false, status: 500, text: async () => 'Agent error' }; + for (let i = 0; i < OPENCODE_MAX_RETRIES; i++) { + mockFetch.mockResolvedValueOnce(sessionOk).mockResolvedValueOnce(messageFail); + } + const promise = repo.askAgent(ai, 'plan', 'Prompt', {}); + await jest.advanceTimersByTimeAsync((OPENCODE_MAX_RETRIES - 1) * OPENCODE_RETRY_DELAY_MS); + const result = await promise; expect(result).toBeUndefined(); + expect(mockFetch).toHaveBeenCalledTimes(OPENCODE_MAX_RETRIES * 2); }); - it('returns extracted text from parts on success', async () => { + it('returns plain text when expectJson is false', async () => { const ai = createAi(); mockFetch .mockResolvedValueOnce({ ok: true, - text: async () => JSON.stringify({ id: 'sess-1' }), + text: async () => JSON.stringify({ id: 's1' }), }) .mockResolvedValueOnce({ ok: true, status: 200, text: async () => JSON.stringify({ - parts: [ - { type: 'text', text: 'Hello back' }, - { type: 'other', data: 'ignored' }, - ], + parts: [{ type: 'text', text: 'Just a string response' }], }), }); - const result = await repo.ask(ai, 'Hello'); - expect(result).toBe('Hello back'); - expect(mockFetch).toHaveBeenCalledTimes(2); - }); - - it('handles session response with data.id', async () => { - const ai = createAi(); - mockFetch - .mockResolvedValueOnce({ - ok: true, - text: async () => JSON.stringify({ data: { id: 'sess-alt' } }), - }) - .mockResolvedValueOnce({ - ok: true, - status: 200, - text: async () => JSON.stringify({ parts: [{ type: 'text', text: 'OK' }] }), - }); - const result = await repo.ask(ai, 'Hi'); - expect(result).toBe('OK'); - expect(mockFetch).toHaveBeenNthCalledWith( - 2, - 'http://localhost:4096/session/sess-alt/message', - expect.any(Object) - ); - }); - }); - - describe('askAgent', () => { - it('returns undefined when server URL is missing', async () => { - const ai = createAi('', 'opencode/model'); - const result = await repo.askAgent(ai, 'plan', 'Assess progress', {}); - expect(result).toBeUndefined(); - expect(mockFetch).not.toHaveBeenCalled(); - }); - - it('returns undefined when session create fails', async () => { - const ai = createAi(); - mockFetch.mockResolvedValueOnce({ ok: false, status: 503, text: async () => 'Unavailable' }); const result = await repo.askAgent(ai, 'plan', 'Prompt', {}); - expect(result).toBeUndefined(); - }); - - it('returns undefined when agent message request fails', async () => { - const ai = createAi(); - mockFetch - .mockResolvedValueOnce({ - ok: true, - text: async () => JSON.stringify({ id: 's1' }), - }) - .mockResolvedValueOnce({ ok: false, status: 500, text: async () => 'Agent error' }); - const result = await repo.askAgent(ai, 'plan', 'Prompt', {}); - expect(result).toBeUndefined(); + expect(result).toBe('Just a string response'); }); - it('returns plain text when expectJson is false', async () => { + it('returns parsed JSON when expectJson is true', async () => { const ai = createAi(); mockFetch .mockResolvedValueOnce({ @@ -185,14 +99,17 @@ describe('AiRepository', () => { status: 200, text: async () => JSON.stringify({ - parts: [{ type: 'text', text: 'Just a string response' }], + parts: [{ type: 'text', text: '{"progress": 75, "summary": "Almost done"}' }], }), }); - const result = await repo.askAgent(ai, 'plan', 'Prompt', {}); - expect(result).toBe('Just a string response'); + const result = await repo.askAgent(ai, 'plan', 'Assess', { + expectJson: true, + schema: { type: 'object', properties: { progress: {}, summary: {} } }, + }); + expect(result).toEqual({ progress: 75, summary: 'Almost done' }); }); - it('returns parsed JSON when expectJson is true', async () => { + it('strips markdown code block from JSON when expectJson is true', async () => { const ai = createAi(); mockFetch .mockResolvedValueOnce({ @@ -204,17 +121,22 @@ describe('AiRepository', () => { status: 200, text: async () => JSON.stringify({ - parts: [{ type: 'text', text: '{"progress": 75, "summary": "Almost done"}' }], + parts: [ + { + type: 'text', + text: '```json\n{"progress": 100, "summary": "Done"}\n```', + }, + ], }), }); const result = await repo.askAgent(ai, 'plan', 'Assess', { expectJson: true, - schema: { type: 'object', properties: { progress: {}, summary: {} } }, + schema: {}, }); - expect(result).toEqual({ progress: 75, summary: 'Almost done' }); + expect(result).toEqual({ progress: 100, summary: 'Done' }); }); - it('strips markdown code block from JSON when expectJson is true', async () => { + it('parses JSON when agent adds prose before the JSON object (extract first {})', async () => { const ai = createAi(); mockFetch .mockResolvedValueOnce({ @@ -229,7 +151,7 @@ describe('AiRepository', () => { parts: [ { type: 'text', - text: '```json\n{"progress": 100, "summary": "Done"}\n```', + text: 'Based on my comprehensive analysis of the code changes between the branches, here is the assessment.\n\n{"progress": 80, "summary": "Almost done", "remaining": "Final review"}', }, ], }), @@ -238,7 +160,7 @@ describe('AiRepository', () => { expectJson: true, schema: {}, }); - expect(result).toEqual({ progress: 100, summary: 'Done' }); + expect(result).toEqual({ progress: 80, summary: 'Almost done', remaining: 'Final review' }); }); it('includes reasoning in result when includeReasoning is true', async () => { @@ -271,23 +193,54 @@ describe('AiRepository', () => { }); }); - it('returns undefined when expectJson is true but response is invalid JSON', async () => { + it('returns undefined when expectJson is true but response is invalid JSON after all retries', async () => { const ai = createAi(); + const sessionOk = { ok: true, text: async () => JSON.stringify({ id: 's1' }) }; + const messageInvalidJson = { + ok: true, + status: 200, + text: async () => + JSON.stringify({ + parts: [{ type: 'text', text: 'not valid json at all' }], + }), + }; + for (let i = 0; i < OPENCODE_MAX_RETRIES; i++) { + mockFetch.mockResolvedValueOnce(sessionOk).mockResolvedValueOnce(messageInvalidJson); + } + const promise = repo.askAgent(ai, 'plan', 'Assess', { expectJson: true, schema: {} }); + await jest.advanceTimersByTimeAsync((OPENCODE_MAX_RETRIES - 1) * OPENCODE_RETRY_DELAY_MS); + const result = await promise; + expect(result).toBeUndefined(); + expect(mockFetch).toHaveBeenCalledTimes(OPENCODE_MAX_RETRIES * 2); + }); + + it('succeeds on parse retry when first response is invalid JSON and second is valid', async () => { + const ai = createAi(); + const sessionOk = { ok: true, text: async () => JSON.stringify({ id: 's1' }) }; mockFetch + .mockResolvedValueOnce(sessionOk) .mockResolvedValueOnce({ ok: true, - text: async () => JSON.stringify({ id: 's1' }), + status: 200, + text: async () => + JSON.stringify({ + parts: [{ type: 'text', text: 'not valid json' }], + }), }) + .mockResolvedValueOnce(sessionOk) .mockResolvedValueOnce({ ok: true, status: 200, text: async () => JSON.stringify({ - parts: [{ type: 'text', text: 'not valid json at all' }], + parts: [{ type: 'text', text: '{"progress": 80, "summary": "Done"}' }], }), }); - const result = await repo.askAgent(ai, 'plan', 'Assess', { expectJson: true, schema: {} }); - expect(result).toBeUndefined(); + const promise = repo.askAgent(ai, 'plan', 'Assess', { expectJson: true, schema: {} }); + await jest.advanceTimersByTimeAsync(OPENCODE_RETRY_DELAY_MS); + const result = await promise; + expect(result).toEqual({ progress: 80, summary: 'Done' }); + expect(mockFetch).toHaveBeenCalledTimes(4); }); it('removes trailing slash from server URL', async () => { @@ -315,16 +268,18 @@ describe('AiRepository', () => { expect(mockFetch).not.toHaveBeenCalled(); }); - it('returns undefined when build agent request fails', async () => { + it('returns undefined when build agent request fails after all retries', async () => { const ai = createAi(); - mockFetch - .mockResolvedValueOnce({ - ok: true, - text: async () => JSON.stringify({ id: 's1' }), - }) - .mockResolvedValueOnce({ ok: false, status: 500, text: async () => 'Error' }); - const result = await repo.copilotMessage(ai, 'Edit file'); + const sessionOk = { ok: true, text: async () => JSON.stringify({ id: 's1' }) }; + const messageFail = { ok: false, status: 500, text: async () => 'Error' }; + for (let i = 0; i < OPENCODE_MAX_RETRIES; i++) { + mockFetch.mockResolvedValueOnce(sessionOk).mockResolvedValueOnce(messageFail); + } + const promise = repo.copilotMessage(ai, 'Edit file'); + await jest.advanceTimersByTimeAsync((OPENCODE_MAX_RETRIES - 1) * OPENCODE_RETRY_DELAY_MS); + const result = await promise; expect(result).toBeUndefined(); + expect(mockFetch).toHaveBeenCalledTimes(OPENCODE_MAX_RETRIES * 2); }); it('returns text and sessionId on success', async () => { diff --git a/src/data/repository/ai_repository.ts b/src/data/repository/ai_repository.ts index e4a61d20..20e8d5a5 100644 --- a/src/data/repository/ai_repository.ts +++ b/src/data/repository/ai_repository.ts @@ -1,7 +1,57 @@ -import { OPENCODE_REQUEST_TIMEOUT_MS } from '../../utils/constants'; -import { logDebugInfo, logError } from '../../utils/logger'; +import { + OPENCODE_MAX_RETRIES, + OPENCODE_REQUEST_TIMEOUT_MS, + OPENCODE_RETRY_DELAY_MS, +} from '../../utils/constants'; +import { logDebugInfo, logError, logInfo } from '../../utils/logger'; import { Ai } from '../model/ai'; +function delay(ms: number): Promise { + return new Promise((resolve) => setTimeout(resolve, ms)); +} + +/** + * Runs an async OpenCode operation with retries. On failure, logs and retries up to OPENCODE_MAX_RETRIES. + * Single retry system for all OpenCode interactions: no parallel retry logic. + * + * Retries when the operation throws, including: + * - Network errors (fetch fails, connection refused, etc.) + * - HTTP errors (4xx/5xx from session create or message) + * - Timeout (OPENCODE_REQUEST_TIMEOUT_MS) + * - Empty or invalid JSON response body (parseJsonResponse throws) + * - Missing session id in create response + * - Parse failure of expected format (e.g. expectJson but text is not valid JSON) when parse is done inside the callback + */ +async function withOpenCodeRetry(fn: () => Promise, context: string): Promise { + let lastError: unknown; + for (let attempt = 1; attempt <= OPENCODE_MAX_RETRIES; attempt++) { + try { + return await fn(); + } catch (error) { + lastError = error; + const message = error instanceof Error ? error.message : String(error); + const cause = + error instanceof Error && (error as Error & { cause?: unknown }).cause instanceof Error + ? (error as Error & { cause: Error }).cause.message + : ''; + const detail = cause ? ` (cause: ${cause})` : ''; + const noResponseHint = + message === 'fetch failed' + ? ' No HTTP response; connection lost or timeout. If this was before the client timeout (see log above), the OpenCode server or a proxy may have a shorter timeout.' + : ''; + if (attempt < OPENCODE_MAX_RETRIES) { + logInfo( + `OpenCode [${context}] attempt ${attempt}/${OPENCODE_MAX_RETRIES} failed: ${message}${detail}.${noResponseHint} Retrying in ${OPENCODE_RETRY_DELAY_MS}ms...` + ); + await delay(OPENCODE_RETRY_DELAY_MS); + } else { + logError(`OpenCode [${context}] failed after ${OPENCODE_MAX_RETRIES} attempts: ${message}${detail}`); + } + } + } + throw lastError; +} + function createTimeoutSignal(ms: number): AbortSignal { const controller = new AbortController(); setTimeout(() => controller.abort(new Error(`OpenCode request timeout after ${ms}ms`)), ms); @@ -12,7 +62,133 @@ function ensureNoTrailingSlash(url: string): string { return url.replace(/\/+$/, '') || url; } -const OPENCODE_RESPONSE_LOG_MAX_LEN = 2000; +function truncate(s: string, maxLen: number): string { + return s.length <= maxLen ? s : s.slice(0, maxLen) + '...'; +} + +const OPENCODE_PROMPT_LOG_PREVIEW_LEN = 500; +const OPENCODE_PROMPT_LOG_FULL_LEN = 3000; + +/** Result of validating AI config for OpenCode calls. null when invalid. */ +interface OpenCodeConfig { + serverUrl: string; + providerID: string; + modelID: string; + model: string; +} + +function getValidatedOpenCodeConfig(ai: Ai): OpenCodeConfig | null { + const serverUrl = ai.getOpencodeServerUrl(); + const model = ai.getOpencodeModel(); + if (!serverUrl?.trim() || !model?.trim()) { + logError('Missing required AI configuration: opencode-server-url and opencode-model'); + return null; + } + const { providerID, modelID } = ai.getOpencodeModelParts(); + return { serverUrl, providerID, modelID, model }; +} + +/** + * Try to extract the first complete JSON object from text (from first `{` with balanced braces). + * Handles being inside a double-quoted string so we don't count braces there. + */ +function extractFirstJsonObject(text: string): string | null { + const start = text.indexOf('{'); + if (start === -1) return null; + let depth = 1; + let inString = false; + let escape = false; + let quoteChar = '"'; + for (let i = start + 1; i < text.length; i++) { + const c = text[i]; + if (escape) { + escape = false; + continue; + } + if (c === '\\' && inString) { + escape = true; + continue; + } + if (inString) { + if (c === quoteChar) inString = false; + continue; + } + if (c === '"' || c === "'") { + inString = true; + quoteChar = c; + continue; + } + if (c === '{') depth++; + else if (c === '}') { + depth--; + if (depth === 0) return text.slice(start, i + 1); + } + } + return null; +} + +/** + * Parse JSON from agent response text safely. + * Tries: (1) direct parse, (2) strip markdown code fence, (3) extract first JSON object from text (model often adds prose before JSON). + * @throws Error with clear message if parsing fails + */ +function parseJsonFromAgentText(text: string): Record { + const trimmed = text.trim(); + if (!trimmed) { + throw new Error('Agent response text is empty'); + } + // 1) Direct parse + try { + return JSON.parse(trimmed) as Record; + } catch { + // 2) Model may wrap JSON in ```json ... ``` or ``` ... ``` + const withoutFence = trimmed + .replace(/^```(?:json)?\s*\n?/i, '') + .replace(/\n?```\s*$/i, '') + .trim(); + try { + return JSON.parse(withoutFence) as Record; + } catch { + // 3) Model may add prose before the JSON (e.g. "Based on my analysis... { ... }") + const extracted = extractFirstJsonObject(trimmed); + if (extracted) { + try { + return JSON.parse(extracted) as Record; + } catch (e) { + const msg = e instanceof Error ? e.message : String(e); + logDebugInfo( + `OpenCode agent response (expectJson): failed to parse extracted JSON. Full text length=${trimmed.length} firstChars=${JSON.stringify(trimmed.slice(0, 200))}` + ); + throw new Error(`Agent response is not valid JSON: ${msg}`); + } + } + const previewLen = 500; + const msg = trimmed.length > previewLen ? `${trimmed.slice(0, previewLen)}...` : trimmed; + const fullTruncated = trimmed.length > 3000 ? `${trimmed.slice(0, 3000)}... [total ${trimmed.length} chars]` : trimmed; + logDebugInfo( + `OpenCode agent response (expectJson): no JSON object found. length=${trimmed.length} preview=${JSON.stringify(msg)}` + ); + logDebugInfo(`OpenCode agent response (expectJson) full text for debugging:\n${fullTruncated}`); + throw new Error( + `Agent response is not valid JSON: no JSON object found. Response starts with: ${msg.slice(0, 150)}` + ); + } + } +} + +/** + * Extract text from OpenCode message parts by type (e.g. 'text', 'reasoning'), joined with separator. + */ +function extractPartsByType(parts: unknown, type: string, joinWith: string): string { + if (!Array.isArray(parts)) return ''; + return (parts as Array<{ type?: string; text?: string }>) + .filter((p) => p?.type === type && typeof p.text === 'string') + .map((p) => p.text as string) + .join(joinWith) + .trim(); +} + +const OPENCODE_RESPONSE_LOG_MAX_LEN = 80000; /** Parse response as JSON; on empty or invalid body throw a clear error with context. */ async function parseJsonResponse(res: Response, context: string): Promise { @@ -39,28 +215,37 @@ async function parseJsonResponse(res: Response, context: string): Promise } } -/** - * Extract plain text from OpenCode message response parts (type === 'text'). - */ +/** Extract plain text from OpenCode message response parts (type === 'text'). */ function extractTextFromParts(parts: unknown): string { - if (!Array.isArray(parts)) return ''; - return (parts as Array<{ type?: string; text?: string }>) - .filter((p) => p?.type === 'text' && typeof p.text === 'string') - .map((p) => p.text as string) - .join(''); + return extractPartsByType(parts, 'text', ''); } +/** Extract reasoning from OpenCode message parts (type === 'reasoning'). */ +function extractReasoningFromParts(parts: unknown): string { + return extractPartsByType(parts, 'reasoning', '\n\n'); +} + +/** Max length of per-part text preview in debug log (to avoid huge log lines). */ +const OPENCODE_PART_PREVIEW_LEN = 80; + /** - * Extract reasoning text from OpenCode message response parts (type === 'reasoning'). - * Used to include the agent's full reasoning in comments (e.g. progress detection). + * Build a short summary of OpenCode message parts for debug logs (types, text lengths, and short preview). */ -function extractReasoningFromParts(parts: unknown): string { - if (!Array.isArray(parts)) return ''; - return (parts as Array<{ type?: string; text?: string }>) - .filter((p) => p?.type === 'reasoning' && typeof p.text === 'string') - .map((p) => p.text as string) - .join('\n\n') - .trim(); +function summarizePartsForLog(parts: unknown[], context: string): string { + if (!Array.isArray(parts) || parts.length === 0) { + return `${context}: 0 parts`; + } + const items = (parts as Array<{ type?: string; text?: string }>).map((p, i) => { + const type = p?.type ?? '(missing type)'; + const text = typeof p?.text === 'string' ? p.text : ''; + const len = text.length; + const preview = + len > OPENCODE_PART_PREVIEW_LEN + ? `${text.slice(0, OPENCODE_PART_PREVIEW_LEN).replace(/\n/g, ' ')}...` + : text.replace(/\n/g, ' '); + return `[${i}] type=${type} length=${len}${preview ? ` preview=${JSON.stringify(preview)}` : ''}`; + }); + return `${context}: ${parts.length} part(s) — ${items.join(' | ')}`; } /** Default OpenCode agent for analysis/planning (read-only, no file edits). */ @@ -87,56 +272,32 @@ export const TRANSLATION_RESPONSE_SCHEMA = { additionalProperties: false, } as const; -/** - * OpenCode HTTP API: create session and send message, return assistant parts. - * Uses fetch to avoid ESM-only SDK with ncc. - */ -async function opencodePrompt( - baseUrl: string, - providerID: string, - modelID: string, - promptText: string -): Promise { - const base = ensureNoTrailingSlash(baseUrl); - const signal = createTimeoutSignal(OPENCODE_REQUEST_TIMEOUT_MS); - const createRes = await fetch(`${base}/session`, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ title: 'gbf' }), - signal, - }); - if (!createRes.ok) { - const err = await createRes.text(); - throw new Error(`OpenCode session create failed: ${createRes.status} ${err}`); - } - const session = await parseJsonResponse<{ id?: string; data?: { id?: string } }>( - createRes, - 'OpenCode session.create' - ); - const sessionId = session?.id ?? session?.data?.id; - if (!sessionId) { - throw new Error('OpenCode session.create did not return session id'); - } - const messageRes = await fetch(`${base}/session/${sessionId}/message`, { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - model: { providerID, modelID }, - parts: [{ type: 'text', text: promptText }], - }), - signal, - }); - if (!messageRes.ok) { - const err = await messageRes.text(); - throw new Error(`OpenCode message failed: ${messageRes.status} ${err}`); - } - const messageData = await parseJsonResponse<{ parts?: unknown[]; data?: { parts?: unknown[] } }>( - messageRes, - 'OpenCode message' - ); - const parts = messageData?.parts ?? messageData?.data?.parts ?? []; - return extractTextFromParts(parts); -} +/** JSON schema for Think (Q&A) responses: single answer field. */ +export const THINK_RESPONSE_SCHEMA = { + type: 'object', + properties: { + answer: { + type: 'string', + description: 'The concise answer to the user question. Required.', + }, + }, + required: ['answer'], + additionalProperties: false, +} as const; + +/** JSON schema for language check: done (already in locale) or must_translate. */ +export const LANGUAGE_CHECK_RESPONSE_SCHEMA = { + type: 'object', + properties: { + status: { + type: 'string', + enum: ['done', 'must_translate'], + description: 'done if text is in the requested locale, must_translate otherwise.', + }, + }, + required: ['status'], + additionalProperties: false, +} as const; export interface AskAgentOptions { /** Request JSON response and parse it. If schema provided, include it in the prompt. */ @@ -156,10 +317,9 @@ interface OpenCodeAgentMessageResult { /** * Send a message to an OpenCode agent (e.g. "plan", "build") and wait for the full response. - * The server runs the agent loop (tools, etc.) and returns when done. - * Use this to delegate PR description, progress, error detection, recommendations, or copilot (build) to OpenCode. + * Raw call: no retries. Callers (askAgent, copilotMessage) wrap in withOpenCodeRetry. */ -async function opencodeMessageWithAgent( +async function opencodeMessageWithAgentRaw( baseUrl: string, options: { providerID: string; @@ -168,12 +328,22 @@ async function opencodeMessageWithAgent( promptText: string; } ): Promise { + logInfo( + `OpenCode request [agent ${options.agent}] model=${options.providerID}/${options.modelID} promptLength=${options.promptText.length}` + ); + logInfo(`OpenCode sending prompt (preview): ${truncate(options.promptText, OPENCODE_PROMPT_LOG_PREVIEW_LEN)}`); + logDebugInfo(`OpenCode prompt (full): ${truncate(options.promptText, OPENCODE_PROMPT_LOG_FULL_LEN)}`); + logDebugInfo( + `OpenCode message body: agent=${options.agent}, model=${options.providerID}/${options.modelID}, parts[0].text length=${options.promptText.length}` + ); const base = ensureNoTrailingSlash(baseUrl); const signal = createTimeoutSignal(OPENCODE_REQUEST_TIMEOUT_MS); + const sessionBody = { title: 'gbf' }; + logDebugInfo(`OpenCode session create body: ${JSON.stringify(sessionBody)}`); const createRes = await fetch(`${base}/session`, { method: 'POST', headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ title: 'gbf' }), + body: JSON.stringify(sessionBody), signal, }); if (!createRes.ok) { @@ -193,6 +363,9 @@ async function opencodeMessageWithAgent( model: { providerID: options.providerID, modelID: options.modelID }, parts: [{ type: 'text', text: options.promptText }], }; + logDebugInfo(`OpenCode POST /session/${sessionId}/message body (keys): agent, model, parts (${(body.parts as unknown[]).length} part(s))`); + const timeoutMin = Math.round(OPENCODE_REQUEST_TIMEOUT_MS / 60_000); + logInfo(`OpenCode: waiting for agent "${options.agent}" message response (client timeout: ${timeoutMin} min)...`); const messageRes = await fetch(`${base}/session/${sessionId}/message`, { method: 'POST', headers: { 'Content-Type': 'application/json' }, @@ -208,8 +381,13 @@ async function opencodeMessageWithAgent( `OpenCode agent "${options.agent}" message` ); const parts = messageData?.parts ?? messageData?.data?.parts ?? []; - const text = extractTextFromParts(parts); - return { text, parts, sessionId }; + const partsArray = Array.isArray(parts) ? parts : []; + logDebugInfo(summarizePartsForLog(partsArray, `OpenCode agent "${options.agent}" message parts`)); + const text = extractTextFromParts(partsArray); + logInfo( + `OpenCode response [agent ${options.agent}] responseLength=${text.length} sessionId=${sessionId}` + ); + return { text, parts: partsArray, sessionId }; } /** File diff from OpenCode GET /session/:id/diff */ @@ -222,55 +400,47 @@ export interface OpenCodeFileDiff { /** * Get the diff for an OpenCode session (files changed by the agent). * Call after opencodeMessageWithAgent when using the "build" agent so the user can see what was edited. + * Wrapped with retries (OPENCODE_MAX_RETRIES). */ export async function getSessionDiff( baseUrl: string, sessionId: string ): Promise { - const base = ensureNoTrailingSlash(baseUrl); - const signal = createTimeoutSignal(OPENCODE_REQUEST_TIMEOUT_MS); - const res = await fetch(`${base}/session/${sessionId}/diff`, { method: 'GET', signal }); - if (!res.ok) return []; - const raw = await res.text(); - if (!raw?.trim()) return []; - let data: OpenCodeFileDiff[] | { data?: OpenCodeFileDiff[] }; - try { - data = JSON.parse(raw) as OpenCodeFileDiff[] | { data?: OpenCodeFileDiff[] }; - } catch { - return []; - } - if (Array.isArray(data)) return data; - if (Array.isArray((data as { data?: OpenCodeFileDiff[] }).data)) - return (data as { data: OpenCodeFileDiff[] }).data; - return []; -} - -export class AiRepository { - ask = async (ai: Ai, prompt: string): Promise => { - const serverUrl = ai.getOpencodeServerUrl(); - const model = ai.getOpencodeModel(); - if (!serverUrl || !model) { - logError('Missing required AI configuration: opencode-server-url and opencode-model'); - return undefined; + return withOpenCodeRetry(async () => { + logInfo(`OpenCode request [session diff] sessionId=${sessionId}`); + const base = ensureNoTrailingSlash(baseUrl); + const signal = createTimeoutSignal(OPENCODE_REQUEST_TIMEOUT_MS); + const res = await fetch(`${base}/session/${sessionId}/diff`, { method: 'GET', signal }); + if (!res.ok) { + logInfo(`OpenCode response [session diff] fileCount=0 (status ${res.status})`); + return []; } + const raw = await res.text(); + if (!raw?.trim()) { + logInfo('OpenCode response [session diff] fileCount=0 (empty body)'); + return []; + } + let data: OpenCodeFileDiff[] | { data?: OpenCodeFileDiff[] }; try { - const { providerID, modelID } = ai.getOpencodeModelParts(); - const text = await opencodePrompt(serverUrl, providerID, modelID, prompt); - return text || undefined; - } catch (error) { - logError(`Error querying OpenCode (${model}): ${error}`); - return undefined; + data = JSON.parse(raw) as OpenCodeFileDiff[] | { data?: OpenCodeFileDiff[] }; + } catch { + logInfo('OpenCode response [session diff] fileCount=0 (invalid JSON)'); + return []; } - }; + const list = Array.isArray(data) + ? data + : Array.isArray((data as { data?: OpenCodeFileDiff[] }).data) + ? (data as { data: OpenCodeFileDiff[] }).data + : []; + logInfo(`OpenCode response [session diff] fileCount=${list.length}`); + return list; + }, 'session diff'); +} +export class AiRepository { /** - * Ask an OpenCode agent (e.g. Plan) to perform a task. The server runs the full agent loop. - * Returns the final message (including reasoning in parts when includeReasoning is true). - * @param ai - AI config (server URL, model) - * @param agentId - OpenCode agent id (e.g. OPENCODE_AGENT_PLAN) - * @param prompt - User prompt - * @param options - expectJson, schema, includeReasoning - * @returns Response text, or parsed JSON when expectJson is true + * Ask an OpenCode agent (e.g. Plan) to perform a task. All calls use strict response (expectJson + schema). + * Single retry system: HTTP failures and parse failures both retry up to OPENCODE_MAX_RETRIES. */ askAgent = async ( ai: Ai, @@ -278,46 +448,40 @@ export class AiRepository { prompt: string, options: AskAgentOptions = {} ): Promise | undefined> => { - const serverUrl = ai.getOpencodeServerUrl(); - const model = ai.getOpencodeModel(); - if (!serverUrl || !model) { - logError('Missing required AI configuration: opencode-server-url and opencode-model'); - return undefined; - } + const config = getValidatedOpenCodeConfig(ai); + if (!config) return undefined; + const { serverUrl, providerID, modelID, model } = config; + const schemaName = options.schemaName ?? 'response'; + const promptText = + options.expectJson && options.schema + ? `Respond with a single JSON object that strictly conforms to this schema (name: ${schemaName}). No other text or markdown.\n\nSchema: ${JSON.stringify(options.schema)}\n\nUser request:\n${prompt}` + : prompt; try { - const { providerID, modelID } = ai.getOpencodeModelParts(); - let promptText = prompt; - if (options.expectJson && options.schema) { - const schemaName = options.schemaName ?? 'response'; - promptText = `Respond with a single JSON object that strictly conforms to this schema (name: ${schemaName}). No other text or markdown.\n\nSchema: ${JSON.stringify(options.schema)}\n\nUser request:\n${prompt}`; - } - const { text, parts } = await opencodeMessageWithAgent(serverUrl, { - providerID, - modelID, - agent: agentId, - promptText, - }); - if (!text) return undefined; - const reasoning = options.includeReasoning ? extractReasoningFromParts(parts) : ''; - if (options.expectJson) { - const cleaned = text.replace(/^```json\n?/, '').replace(/\n?```$/, '').trim(); - const parsed = JSON.parse(cleaned) as Record; - if (options.includeReasoning && reasoning) { - return { ...parsed, reasoning }; + return await withOpenCodeRetry(async () => { + const { text, parts } = await opencodeMessageWithAgentRaw(serverUrl, { + providerID, + modelID, + agent: agentId, + promptText, + }); + if (!text) throw new Error('Empty response text'); + const reasoning = options.includeReasoning ? extractReasoningFromParts(parts) : ''; + if (options.expectJson && options.schema) { + const maxLogLen = 5000000; + const toLog = text.length > maxLogLen ? `${text.slice(0, maxLogLen)}\n... [truncated, total ${text.length} chars]` : text; + logInfo(`OpenCode agent response (full text, expectJson=true) length=${text.length}:\n${toLog}`); + const parsed = parseJsonFromAgentText(text); + if (options.includeReasoning && reasoning) { + return { ...parsed, reasoning }; + } + return parsed; } - return parsed; - } - return text; + return text; + }, `agent ${agentId}`); } catch (error: unknown) { const err = error instanceof Error ? error : new Error(String(error)); - const errWithCause = err as Error & { cause?: unknown }; - const cause = - errWithCause.cause instanceof Error - ? errWithCause.cause.message - : errWithCause.cause != null - ? String(errWithCause.cause) - : ''; - const detail = cause ? ` (${cause})` : ''; + const cause = err instanceof Error && (err as Error & { cause?: unknown }).cause; + const detail = cause != null ? ` (${cause instanceof Error ? cause.message : String(cause)})` : ''; logError(`Error querying OpenCode agent ${agentId} (${model}): ${err.message}${detail}`); return undefined; } @@ -325,37 +489,30 @@ export class AiRepository { /** * Run the OpenCode "build" agent for the copilot command. Returns the final message and sessionId. + * Uses the same retry system (OPENCODE_MAX_RETRIES). */ copilotMessage = async ( ai: Ai, prompt: string ): Promise<{ text: string; sessionId: string } | undefined> => { - const serverUrl = ai.getOpencodeServerUrl(); - const model = ai.getOpencodeModel(); - if (!serverUrl || !model) { - logError('Missing required AI configuration: opencode-server-url and opencode-model'); - return undefined; - } + const config = getValidatedOpenCodeConfig(ai); + if (!config) return undefined; + const { serverUrl, providerID, modelID, model } = config; try { - const { providerID, modelID } = ai.getOpencodeModelParts(); - const result = await opencodeMessageWithAgent(serverUrl, { - providerID, - modelID, - agent: OPENCODE_AGENT_BUILD, - promptText: prompt, - }); + const result = await withOpenCodeRetry( + () => + opencodeMessageWithAgentRaw(serverUrl, { + providerID, + modelID, + agent: OPENCODE_AGENT_BUILD, + promptText: prompt, + }), + `agent ${OPENCODE_AGENT_BUILD}` + ); return { text: result.text, sessionId: result.sessionId }; } catch (error: unknown) { const err = error instanceof Error ? error : new Error(String(error)); - const errWithCause = err as Error & { cause?: unknown }; - const cause = - errWithCause.cause instanceof Error - ? errWithCause.cause.message - : errWithCause.cause != null - ? String(errWithCause.cause) - : ''; - const detail = cause ? ` (${cause})` : ''; - logError(`Error querying OpenCode build agent (${model}): ${err.message}${detail}`); + logError(`Error querying OpenCode build agent (${model}): ${err.message}`); return undefined; } }; diff --git a/src/data/repository/branch_repository.ts b/src/data/repository/branch_repository.ts index 80baf5f1..edc62ebe 100644 --- a/src/data/repository/branch_repository.ts +++ b/src/data/repository/branch_repository.ts @@ -153,8 +153,8 @@ export class BranchRepository { success: true, executed: false, }) - ) - return result + ); + return result; } const branchTypes = [ @@ -213,7 +213,7 @@ export class BranchRepository { baseBranchName = hotfixBranch ?? developmentBranch; } - if (!isRenamingBranch) { + if (!isRenamingBranch || param.currentConfiguration.parentBranch === undefined) { param.currentConfiguration.parentBranch = baseBranchName; } diff --git a/src/data/repository/issue_repository.ts b/src/data/repository/issue_repository.ts index c8e21331..fd264305 100644 --- a/src/data/repository/issue_repository.ts +++ b/src/data/repository/issue_repository.ts @@ -534,6 +534,36 @@ export class IssueRepository { logDebugInfo(`Comment ${commentId} updated in Issue ${issueNumber}.`); } + /** + * Lists all comments on an issue (for bugbot: find existing findings by marker). + * Uses pagination to fetch every comment (default API returns only 30 per page). + */ + listIssueComments = async ( + owner: string, + repository: string, + issueNumber: number, + token: string, + ): Promise> => { + const octokit = github.getOctokit(token); + const all: Array<{ id: number; body: string | null; user?: { login?: string } }> = []; + for await (const response of octokit.paginate.iterator(octokit.rest.issues.listComments, { + owner, + repo: repository, + issue_number: issueNumber, + per_page: 100, + })) { + const data = response.data || []; + for (const c of data) { + all.push({ + id: c.id, + body: c.body ?? null, + user: c.user as { login?: string } | undefined, + }); + } + } + return all; + }; + closeIssue = async ( owner: string, repository: string, diff --git a/src/data/repository/pull_request_repository.ts b/src/data/repository/pull_request_repository.ts index dc573ba1..811b21e9 100644 --- a/src/data/repository/pull_request_repository.ts +++ b/src/data/repository/pull_request_repository.ts @@ -71,6 +71,10 @@ export class PullRequestRepository { logDebugInfo(`Updated PR #${pullRequestNumber} description with: ${description}`); } + /** + * Returns all users involved in review: requested (pending) + those who already submitted a review. + * Used to avoid re-requesting someone who already reviewed when ensuring desired reviewer count. + */ getCurrentReviewers = async ( owner: string, repository: string, @@ -80,13 +84,29 @@ export class PullRequestRepository { const octokit = github.getOctokit(token); try { - const {data} = await octokit.rest.pulls.listRequestedReviewers({ - owner, - repo: repository, - pull_number: pullNumber, - }); + const [requestedRes, reviewsRes] = await Promise.all([ + octokit.rest.pulls.listRequestedReviewers({ + owner, + repo: repository, + pull_number: pullNumber, + }), + octokit.rest.pulls.listReviews({ + owner, + repo: repository, + pull_number: pullNumber, + }), + ]); - return data.users.map((user) => user.login); + const logins = new Set(); + for (const user of requestedRes.data.users) { + logins.add(user.login); + } + for (const review of reviewsRes.data) { + if (review.user?.login) { + logins.add(review.user.login); + } + } + return Array.from(logins); } catch (error) { logError(`Error getting reviewers of PR: ${error}.`); return []; @@ -148,6 +168,41 @@ export class PullRequestRepository { } }; + /** First line (right side) of the first hunk per file, for valid review comment placement. */ + private static firstLineFromPatch(patch: string): number | undefined { + const match = patch.match(/^@@ -\d+,\d+ \+(\d+),\d+ @@/m); + return match ? parseInt(match[1], 10) : undefined; + } + + /** + * Returns for each changed file the first line number that appears in the diff (right side). + * Used so review comments use a line that GitHub can resolve (avoids "line could not be resolved"). + */ + getFilesWithFirstDiffLine = async ( + owner: string, + repository: string, + pullNumber: number, + token: string + ): Promise> => { + const octokit = github.getOctokit(token); + try { + const { data } = await octokit.rest.pulls.listFiles({ + owner, + repo: repository, + pull_number: pullNumber, + }); + return (data || []) + .filter((f) => f.status !== 'removed' && (f.patch ?? '').length > 0) + .map((f) => { + const firstLine = PullRequestRepository.firstLineFromPatch(f.patch ?? ''); + return { path: f.filename, firstLine: firstLine ?? 1 }; + }); + } catch (error) { + logError(`Error getting files with diff lines (owner=${owner}, repo=${repository}, pullNumber=${pullNumber}): ${error}.`); + return []; + } + }; + getPullRequestChanges = async ( owner: string, repository: string, @@ -186,4 +241,241 @@ export class PullRequestRepository { return []; } }; + + /** Head commit SHA of the PR (for creating review). */ + getPullRequestHeadSha = async ( + owner: string, + repository: string, + pullNumber: number, + token: string + ): Promise => { + const octokit = github.getOctokit(token); + try { + const { data } = await octokit.rest.pulls.get({ + owner, + repo: repository, + pull_number: pullNumber, + }); + return data.head?.sha; + } catch (error) { + logError(`Error getting PR head SHA: ${error}.`); + return undefined; + } + }; + + /** + * List all review comments on a PR (for bugbot: find existing findings by marker). + * Uses pagination to fetch every comment (default API returns only 30 per page). + * Includes node_id for GraphQL (e.g. resolve review thread). + */ + listPullRequestReviewComments = async ( + owner: string, + repository: string, + pullNumber: number, + token: string + ): Promise> => { + const octokit = github.getOctokit(token); + const all: Array<{ id: number; body: string | null; path?: string; line?: number; node_id?: string }> = []; + try { + for await (const response of octokit.paginate.iterator(octokit.rest.pulls.listReviewComments, { + owner, + repo: repository, + pull_number: pullNumber, + per_page: 100, + })) { + const data = response.data || []; + all.push( + ...data.map((c: { id: number; body: string | null; path?: string; line?: number; node_id?: string }) => ({ + id: c.id, + body: c.body ?? null, + path: c.path, + line: c.line ?? undefined, + node_id: c.node_id ?? undefined, + })) + ); + } + return all; + } catch (error) { + logError(`Error listing PR review comments (owner=${owner}, repo=${repository}, pullNumber=${pullNumber}): ${error}.`); + return []; + } + }; + + /** + * Resolve a PR review thread (GraphQL only). Finds the thread that contains the given comment and marks it resolved. + * Uses repository.pullRequest.reviewThreads because the field pullRequestReviewThread on PullRequestReviewComment was removed from the API. + * Paginates through all threads and all comments in each thread so the comment is found regardless of PR size. + * No-op if thread is already resolved. Logs and does not throw on error. + */ + resolvePullRequestReviewThread = async ( + owner: string, + repository: string, + pullNumber: number, + commentNodeId: string, + token: string + ): Promise => { + const octokit = github.getOctokit(token); + try { + type ThreadNode = { + id: string; + comments: { nodes: Array<{ id: string }>; pageInfo: { hasNextPage: boolean; endCursor: string | null } }; + }; + type ThreadsResult = { + repository?: { + pullRequest?: { + reviewThreads?: { + nodes: ThreadNode[]; + pageInfo: { hasNextPage: boolean; endCursor: string | null }; + }; + }; + }; + }; + type ThreadCommentsResult = { + node?: { + comments: { nodes: Array<{ id: string }>; pageInfo: { hasNextPage: boolean; endCursor: string | null } }; + }; + }; + + let threadId: string | null = null; + let threadsCursor: string | null = null; + + outer: do { + const threadsData: ThreadsResult = await octokit.graphql( + `query ($owner: String!, $repo: String!, $prNumber: Int!, $threadsAfter: String) { + repository(owner: $owner, name: $repo) { + pullRequest(number: $prNumber) { + reviewThreads(first: 100, after: $threadsAfter) { + nodes { + id + comments(first: 100) { + nodes { id } + pageInfo { hasNextPage endCursor } + } + } + pageInfo { hasNextPage endCursor } + } + } + } + }`, + { owner, repo: repository, prNumber: pullNumber, threadsAfter: threadsCursor } + ); + const threads = threadsData?.repository?.pullRequest?.reviewThreads as + | { nodes: ThreadNode[]; pageInfo: { hasNextPage: boolean; endCursor: string | null } } + | undefined; + if (!threads?.nodes?.length) break; + + for (const thread of threads.nodes) { + let commentsCursor: string | null = null; + let commentNodes = thread.comments?.nodes ?? []; + let commentsPageInfo = thread.comments?.pageInfo; + + do { + if (commentNodes.some((c: { id: string }) => c.id === commentNodeId)) { + threadId = thread.id; + break outer; + } + if (!commentsPageInfo?.hasNextPage || commentsPageInfo.endCursor == null) break; + commentsCursor = commentsPageInfo.endCursor; + const nextComments = await octokit.graphql( + `query ($threadId: ID!, $commentsAfter: String) { + node(id: $threadId) { + ... on PullRequestReviewThread { + comments(first: 100, after: $commentsAfter) { + nodes { id } + pageInfo { hasNextPage endCursor } + } + } + } + }`, + { threadId: thread.id, commentsAfter: commentsCursor } + ); + commentNodes = nextComments?.node?.comments?.nodes ?? []; + commentsPageInfo = nextComments?.node?.comments?.pageInfo ?? { hasNextPage: false, endCursor: null }; + } while (commentsPageInfo?.hasNextPage === true && commentsPageInfo?.endCursor != null); + } + + const pageInfo: { hasNextPage: boolean; endCursor: string | null } = threads.pageInfo; + if (threadId != null || !pageInfo?.hasNextPage) break; + threadsCursor = pageInfo.endCursor ?? null; + } while (threadsCursor != null); + + if (!threadId) { + logError(`[Bugbot] No review thread found for comment node_id=${commentNodeId}.`); + return; + } + await octokit.graphql<{ resolveReviewThread?: { thread?: { id: string } } }>( + `mutation ($threadId: ID!) { + resolveReviewThread(input: { threadId: $threadId }) { + thread { id } + } + }`, + { threadId } + ); + logDebugInfo(`Resolved PR review thread ${threadId}.`); + } catch (err) { + logError(`[Bugbot] Error resolving PR review thread (commentNodeId=${commentNodeId}, owner=${owner}, repo=${repository}): ${err}`); + } + }; + + /** + * Create a review on the PR with one or more inline comments (bugbot findings). + * Each comment requires path and line (use first file and line 1 if not specified). + */ + createReviewWithComments = async ( + owner: string, + repository: string, + pullNumber: number, + commitId: string, + comments: Array<{ path: string; line: number; body: string }>, + token: string + ): Promise => { + if (comments.length === 0) return; + const octokit = github.getOctokit(token); + const results = await Promise.allSettled( + comments.map((c) => + octokit.rest.pulls.createReviewComment({ + owner, + repo: repository, + pull_number: pullNumber, + commit_id: commitId, + path: c.path, + line: c.line, + side: 'RIGHT', + body: c.body, + }) + ) + ); + let created = 0; + results.forEach((result, i) => { + if (result.status === 'fulfilled') { + created += 1; + } else { + const c = comments[i]; + logError( + `[Bugbot] Error creating PR review comment. path="${c.path}", line=${c.line}, prNumber=${pullNumber}, owner=${owner}, repo=${repository}: ${result.reason}` + ); + } + }); + if (created > 0) { + logDebugInfo(`Created ${created} review comment(s) on PR #${pullNumber}.`); + } + }; + + /** Update an existing PR review comment (e.g. to mark finding as resolved in body). */ + updatePullRequestReviewComment = async ( + owner: string, + repository: string, + commentId: number, + body: string, + token: string + ): Promise => { + const octokit = github.getOctokit(token); + await octokit.rest.pulls.updateReviewComment({ + owner, + repo: repository, + comment_id: commentId, + body, + }); + logDebugInfo(`Updated review comment ${commentId}.`); + }; } \ No newline at end of file diff --git a/src/manager/description/configuration_handler.ts b/src/manager/description/configuration_handler.ts index 5c78adcd..d9f99157 100644 --- a/src/manager/description/configuration_handler.ts +++ b/src/manager/description/configuration_handler.ts @@ -3,6 +3,16 @@ import { Execution } from "../../data/model/execution"; import { logError } from "../../utils/logger"; import { IssueContentInterface } from "./base/issue_content_interface"; +/** Keys that must be preserved from stored config when current has undefined (e.g. when branch already existed). */ +const CONFIG_KEYS_TO_PRESERVE = [ + 'parentBranch', + 'workingBranch', + 'releaseBranch', + 'hotfixBranch', + 'hotfixOriginBranch', + 'branchType', +] as const; + export class ConfigurationHandler extends IssueContentInterface { get id(): string { return 'configuration' @@ -14,7 +24,33 @@ export class ConfigurationHandler extends IssueContentInterface { update = async (execution: Execution) => { try { - return await this.internalUpdate(execution, JSON.stringify(execution.currentConfiguration, null, 4)) + const current = execution.currentConfiguration; + const payload: Record = { + branchType: current.branchType, + releaseBranch: current.releaseBranch, + workingBranch: current.workingBranch, + parentBranch: current.parentBranch, + hotfixOriginBranch: current.hotfixOriginBranch, + hotfixBranch: current.hotfixBranch, + results: current.results, + branchConfiguration: current.branchConfiguration, + }; + + const storedRaw = await this.internalGetter(execution); + if (storedRaw != null && storedRaw.trim().length > 0) { + try { + const stored = JSON.parse(storedRaw) as Record; + for (const key of CONFIG_KEYS_TO_PRESERVE) { + if (payload[key] === undefined && stored[key] !== undefined) { + payload[key] = stored[key]; + } + } + } catch { + /* ignore parse errors, save current as-is */ + } + } + + return await this.internalUpdate(execution, JSON.stringify(payload, null, 4)); } catch (error) { logError(`Error updating issue description: ${error}`); return undefined; diff --git a/src/usecase/actions/__tests__/check_progress_use_case.test.ts b/src/usecase/actions/__tests__/check_progress_use_case.test.ts index 14bee1b1..7d309cfa 100644 --- a/src/usecase/actions/__tests__/check_progress_use_case.test.ts +++ b/src/usecase/actions/__tests__/check_progress_use_case.test.ts @@ -1,7 +1,7 @@ /** * Integration-style tests for CheckProgressUseCase with the OpenCode-based flow. * Covers edge cases: missing AI config, no issue/branch/description, AI returns undefined/invalid - * progress, retries when progress 0%, success path with label updates. + * progress, progress 0% (single call; HTTP retries are in AiRepository), success path with label updates. */ import { CheckProgressUseCase } from '../check_progress_use_case'; @@ -65,7 +65,7 @@ function baseParam(overrides: Record = {}): Execution { repo: 'repo', issueNumber: 123, tokens: { token: 'token' }, - ai: new Ai('http://localhost:4096', 'opencode/kimi-k2.5', false, false, [], false), + ai: new Ai('http://localhost:4096', 'opencode/kimi-k2.5', false, false, [], false, 'low', 20), commit: { branch: 'feature/123-add-feature' }, branches, ...overrides, @@ -88,7 +88,7 @@ describe('CheckProgressUseCase', () => { it('returns error when AI config is missing (no server URL)', async () => { const param = baseParam({ - ai: new Ai('', 'opencode/model', false, false, [], false), + ai: new Ai('', 'opencode/model', false, false, [], false, 'low', 20), }); const results = await useCase.invoke(param); @@ -102,7 +102,7 @@ describe('CheckProgressUseCase', () => { it('returns error when AI config is missing (no model)', async () => { const param = baseParam({ - ai: new Ai('http://localhost:4096', '', false, false, [], false), + ai: new Ai('http://localhost:4096', '', false, false, [], false, 'low', 20), }); const results = await useCase.invoke(param); @@ -179,26 +179,25 @@ describe('CheckProgressUseCase', () => { expect(results).toHaveLength(1); expect(results[0].success).toBe(false); - expect(results[0].errors?.some((e) => String(e).includes('Progress detection failed: received 0% after 3 attempts'))).toBe(true); - expect(mockAskAgent).toHaveBeenCalledTimes(3); + expect(results[0].errors?.some((e) => String(e).includes('Progress detection returned 0%'))).toBe(true); + expect(mockAskAgent).toHaveBeenCalledTimes(1); }); - it('retries up to MAX_PROGRESS_ATTEMPTS when progress is 0%', async () => { + it('returns error when progress is 0% (single call; HTTP retries are in AiRepository)', async () => { mockGetDescription.mockResolvedValue('Issue body'); - mockAskAgent - .mockResolvedValueOnce({ progress: 0, summary: 'No progress' }) - .mockResolvedValueOnce({ progress: 0, summary: 'Still none' }) - .mockResolvedValueOnce({ progress: 50, summary: 'Half done' }); + mockAskAgent.mockResolvedValue({ progress: 0, summary: 'No progress yet' }); mockGetOpenPullRequestNumbersByHeadBranch.mockResolvedValue([]); const results = await useCase.invoke(baseParam()); - expect(results[0].success).toBe(true); - expect(results[0].payload).toMatchObject({ progress: 50 }); - expect(mockAskAgent).toHaveBeenCalledTimes(3); + expect(results[0].success).toBe(false); + expect(results[0].payload).toMatchObject({ progress: 0 }); + expect(results[0].errors?.some((e) => String(e).includes('Progress detection returned 0%'))).toBe(true); + expect(mockAskAgent).toHaveBeenCalledTimes(1); + expect(mockSetProgressLabel).not.toHaveBeenCalled(); }); - it('treats negative progress as 0% and returns failure-after-retries (no label set)', async () => { + it('treats negative progress as 0% and returns failure (no label set)', async () => { mockGetDescription.mockResolvedValue('Issue body'); mockAskAgent.mockResolvedValue({ progress: -10, summary: 'Invalid' }); mockGetOpenPullRequestNumbersByHeadBranch.mockResolvedValue([]); diff --git a/src/usecase/actions/__tests__/deployed_action_use_case.test.ts b/src/usecase/actions/__tests__/deployed_action_use_case.test.ts new file mode 100644 index 00000000..8b541dcb --- /dev/null +++ b/src/usecase/actions/__tests__/deployed_action_use_case.test.ts @@ -0,0 +1,300 @@ +import { DeployedActionUseCase } from '../deployed_action_use_case'; +import { Result } from '../../../data/model/result'; +import type { Execution } from '../../../data/model/execution'; + +jest.mock('../../../utils/logger', () => ({ + logInfo: jest.fn(), + logDebugInfo: jest.fn(), + logError: jest.fn(), +})); + +const mockSetLabels = jest.fn(); +const mockCloseIssue = jest.fn(); +jest.mock('../../../data/repository/issue_repository', () => ({ + IssueRepository: jest.fn().mockImplementation(() => ({ + setLabels: mockSetLabels, + closeIssue: mockCloseIssue, + })), +})); + +const mockMergeBranch = jest.fn(); +jest.mock('../../../data/repository/branch_repository', () => ({ + BranchRepository: jest.fn().mockImplementation(() => ({ + mergeBranch: mockMergeBranch, + })), +})); + +function baseParam(overrides: Record = {}): Execution { + return { + owner: 'owner', + repo: 'repo', + tokens: { token: 'token' }, + labels: { + isDeploy: true, + isDeployed: false, + deploy: 'deploy', + deployed: 'deployed', + currentIssueLabels: ['deploy', 'feature'], + }, + singleAction: { issue: 42 }, + currentConfiguration: { + releaseBranch: undefined as string | undefined, + hotfixBranch: undefined as string | undefined, + }, + branches: { + defaultBranch: 'main', + development: 'develop', + }, + pullRequest: { mergeTimeout: 60 }, + ...overrides, + } as unknown as Execution; +} + +function successResult(step: string): Result[] { + return [ + new Result({ + id: 'branch_repository', + success: true, + executed: true, + steps: [step], + }), + ]; +} + +function failureResult(step: string): Result[] { + return [ + new Result({ + id: 'branch_repository', + success: false, + executed: true, + steps: [step], + }), + ]; +} + +describe('DeployedActionUseCase', () => { + let useCase: DeployedActionUseCase; + + beforeEach(() => { + useCase = new DeployedActionUseCase(); + mockSetLabels.mockResolvedValue(undefined); + mockCloseIssue.mockResolvedValue(true); + mockSetLabels.mockClear(); + mockCloseIssue.mockClear(); + mockMergeBranch.mockClear(); + }); + + it('returns failure when there is no deploy label', async () => { + const param = baseParam({ + labels: { + isDeploy: false, + isDeployed: false, + deploy: 'deploy', + deployed: 'deployed', + currentIssueLabels: ['feature'], + }, + }); + + const results = await useCase.invoke(param); + + expect(results).toHaveLength(1); + expect(results[0].success).toBe(false); + expect(results[0].steps[0]).toContain('no `deploy` label'); + expect(mockSetLabels).not.toHaveBeenCalled(); + expect(mockMergeBranch).not.toHaveBeenCalled(); + expect(mockCloseIssue).not.toHaveBeenCalled(); + }); + + it('returns failure when deployed label is already set', async () => { + const param = baseParam({ + labels: { + isDeploy: true, + isDeployed: true, + deploy: 'deploy', + deployed: 'deployed', + currentIssueLabels: ['deploy', 'deployed'], + }, + }); + + const results = await useCase.invoke(param); + + expect(results).toHaveLength(1); + expect(results[0].success).toBe(false); + expect(results[0].steps[0]).toContain('already set'); + expect(mockSetLabels).not.toHaveBeenCalled(); + expect(mockMergeBranch).not.toHaveBeenCalled(); + expect(mockCloseIssue).not.toHaveBeenCalled(); + }); + + it('updates labels but does not close issue when no release or hotfix branch (no merges)', async () => { + const param = baseParam(); + + const results = await useCase.invoke(param); + + expect(mockSetLabels).toHaveBeenCalledWith( + 'owner', + 'repo', + 42, + expect.arrayContaining(['feature', 'deployed']), + 'token' + ); + expect(mockMergeBranch).not.toHaveBeenCalled(); + expect(mockCloseIssue).not.toHaveBeenCalled(); + expect(results.some((r) => r.steps?.some((s) => s.includes('Label') && s.includes('deployed')))).toBe(true); + expect(results.some((r) => r.steps?.some((s) => s.includes('not closed because no release or hotfix branch was configured')))).toBe(true); + }); + + it('with releaseBranch: merges both branches and closes issue when all merges succeed', async () => { + mockMergeBranch + .mockResolvedValueOnce(successResult('Merged release into main')) + .mockResolvedValueOnce(successResult('Merged release into develop')); + const param = baseParam({ + currentConfiguration: { + releaseBranch: 'release/1.0.0', + hotfixBranch: undefined, + }, + }); + + const results = await useCase.invoke(param); + + expect(mockMergeBranch).toHaveBeenCalledTimes(2); + expect(mockMergeBranch).toHaveBeenNthCalledWith( + 1, + 'owner', + 'repo', + 'release/1.0.0', + 'main', + 60, + 'token' + ); + expect(mockMergeBranch).toHaveBeenNthCalledWith( + 2, + 'owner', + 'repo', + 'release/1.0.0', + 'develop', + 60, + 'token' + ); + expect(mockCloseIssue).toHaveBeenCalledWith('owner', 'repo', 42, 'token'); + expect(results.some((r) => r.steps?.some((s) => s.includes('closed after merge')))).toBe(true); + }); + + it('with hotfixBranch: merges both branches and closes issue when all merges succeed', async () => { + mockMergeBranch + .mockResolvedValueOnce(successResult('Merged hotfix into main')) + .mockResolvedValueOnce(successResult('Merged main into develop')); + const param = baseParam({ + currentConfiguration: { + releaseBranch: undefined, + hotfixBranch: 'hotfix/1.0.1', + }, + }); + + const results = await useCase.invoke(param); + + expect(mockMergeBranch).toHaveBeenCalledTimes(2); + expect(mockMergeBranch).toHaveBeenNthCalledWith( + 1, + 'owner', + 'repo', + 'hotfix/1.0.1', + 'main', + 60, + 'token' + ); + expect(mockMergeBranch).toHaveBeenNthCalledWith( + 2, + 'owner', + 'repo', + 'main', + 'develop', + 60, + 'token' + ); + expect(mockCloseIssue).toHaveBeenCalledWith('owner', 'repo', 42, 'token'); + expect(results.some((r) => r.steps?.some((s) => s.includes('closed after merge')))).toBe(true); + }); + + it('with releaseBranch: does not close issue when first merge fails', async () => { + mockMergeBranch + .mockResolvedValueOnce(failureResult('Failed to merge into main')) + .mockResolvedValueOnce(successResult('Merged into develop')); + const param = baseParam({ + currentConfiguration: { + releaseBranch: 'release/1.0.0', + hotfixBranch: undefined, + }, + }); + + const results = await useCase.invoke(param); + + expect(mockMergeBranch).toHaveBeenCalledTimes(2); + expect(mockCloseIssue).not.toHaveBeenCalled(); + expect(results.some((r) => r.success === false && r.steps?.some((s) => s.includes('not closed because one or more merge operations failed')))).toBe(true); + }); + + it('with releaseBranch: does not close issue when second merge fails', async () => { + mockMergeBranch + .mockResolvedValueOnce(successResult('Merged into main')) + .mockResolvedValueOnce(failureResult('Failed to merge into develop')); + const param = baseParam({ + currentConfiguration: { + releaseBranch: 'release/1.0.0', + hotfixBranch: undefined, + }, + }); + + const results = await useCase.invoke(param); + + expect(mockCloseIssue).not.toHaveBeenCalled(); + expect(results.some((r) => r.steps?.some((s) => s.includes('not closed because one or more merge operations failed')))).toBe(true); + }); + + it('with hotfixBranch: does not close issue when one merge fails', async () => { + mockMergeBranch + .mockResolvedValueOnce(successResult('Merged hotfix into main')) + .mockResolvedValueOnce(failureResult('Failed to merge main into develop')); + const param = baseParam({ + currentConfiguration: { + releaseBranch: undefined, + hotfixBranch: 'hotfix/1.0.1', + }, + }); + + const results = await useCase.invoke(param); + + expect(mockCloseIssue).not.toHaveBeenCalled(); + expect(results.some((r) => r.steps?.some((s) => s.includes('not closed because one or more merge operations failed')))).toBe(true); + }); + + it('pushes merge results into returned array (release path)', async () => { + mockMergeBranch + .mockResolvedValueOnce(successResult('Step A')) + .mockResolvedValueOnce(successResult('Step B')); + const param = baseParam({ + currentConfiguration: { + releaseBranch: 'release/1.0.0', + hotfixBranch: undefined, + }, + }); + + const results = await useCase.invoke(param); + + const mergeSteps = results.flatMap((r) => r.steps).filter((s) => s === 'Step A' || s === 'Step B'); + expect(mergeSteps).toContain('Step A'); + expect(mergeSteps).toContain('Step B'); + }); + + it('when setLabels throws, returns error result and does not call merge or close', async () => { + mockSetLabels.mockRejectedValueOnce(new Error('API error')); + const param = baseParam(); + + const results = await useCase.invoke(param); + + expect(results.some((r) => r.success === false)).toBe(true); + expect(results.some((r) => r.steps?.some((s) => s.includes('assign members to issue')))).toBe(true); + expect(mockMergeBranch).not.toHaveBeenCalled(); + expect(mockCloseIssue).not.toHaveBeenCalled(); + }); +}); diff --git a/src/usecase/actions/check_progress_use_case.ts b/src/usecase/actions/check_progress_use_case.ts index bf80c47d..12b8c4c9 100644 --- a/src/usecase/actions/check_progress_use_case.ts +++ b/src/usecase/actions/check_progress_use_case.ts @@ -19,8 +19,6 @@ const PROGRESS_RESPONSE_SCHEMA = { additionalProperties: false, } as const; -const MAX_PROGRESS_ATTEMPTS = 3; - interface ProgressAttemptResult { progress: number; summary: string; @@ -155,30 +153,20 @@ export class CheckProgressUseCase implements ParamUseCase { const prompt = this.buildProgressPrompt(issueNumber, issueDescription, branch, developmentBranch); - let progress = 0; - let summary = 'Unable to determine progress.'; - let reasoning = ''; - let remaining = ''; - - for (let attempt = 1; attempt <= MAX_PROGRESS_ATTEMPTS; attempt++) { - logInfo(`🤖 Analyzing progress using OpenCode Plan agent... (attempt ${attempt}/${MAX_PROGRESS_ATTEMPTS})`); - const attemptResult = await this.fetchProgressAttempt(param.ai, prompt); - progress = attemptResult.progress; - summary = attemptResult.summary; - reasoning = attemptResult.reasoning; - remaining = attemptResult.remaining; - if (progress > 0) { - logInfo(`✅ Progress detection completed: ${progress}%`); - break; - } - if (attempt < MAX_PROGRESS_ATTEMPTS) { - logInfo(`⚠️ Progress returned 0% (attempt ${attempt}/${MAX_PROGRESS_ATTEMPTS}), retrying...`); - } + logInfo('🤖 Analyzing progress using OpenCode Plan agent...'); + const attemptResult = await this.fetchProgressAttempt(param.ai, prompt); + const progress = attemptResult.progress; + const summary = attemptResult.summary; + const reasoning = attemptResult.reasoning; + const remaining = attemptResult.remaining; + + if (progress > 0) { + logInfo(`✅ Progress detection completed: ${progress}%`); } - const progressFailedAfterRetries = progress === 0; - if (progressFailedAfterRetries) { - logError(`Progress detection failed: received 0% after ${MAX_PROGRESS_ATTEMPTS} attempts. This may be due to a model error.`); + const progressFailed = progress === 0; + if (progressFailed) { + logError('Progress detection returned 0%. This may be due to a model error or no changes detected.'); results.push( new Result({ id: this.taskId, @@ -189,7 +177,7 @@ export class CheckProgressUseCase implements ParamUseCase { summary, ], errors: [ - `Progress detection failed: received 0% after ${MAX_PROGRESS_ATTEMPTS} attempts. This may be due to a model error. There are changes on the branch; consider re-running the check.`, + 'Progress detection returned 0%. This may be due to a model error or no changes detected. Consider re-running the check.', ], payload: { progress: 0, @@ -294,7 +282,7 @@ export class CheckProgressUseCase implements ParamUseCase { /** * Calls the OpenCode agent once and returns parsed progress, summary, and reasoning. - * Used inside the retry loop when progress is 0%. + * HTTP-level retries are handled by AiRepository (OPENCODE_MAX_RETRIES). */ private async fetchProgressAttempt(ai: Ai, prompt: string): Promise { const agentResponse = await this.aiRepository.askAgent( diff --git a/src/usecase/actions/deployed_action_use_case.ts b/src/usecase/actions/deployed_action_use_case.ts index 20a9afd4..917bab84 100644 --- a/src/usecase/actions/deployed_action_use_case.ts +++ b/src/usecase/actions/deployed_action_use_case.ts @@ -69,6 +69,8 @@ export class DeployedActionUseCase implements ParamUseCase }) ); + const mergeResults: Result[] = []; + if (param.currentConfiguration.releaseBranch) { const mergeToDefaultResult = await this.branchRepository.mergeBranch( param.owner, @@ -79,6 +81,7 @@ export class DeployedActionUseCase implements ParamUseCase param.tokens.token, ); result.push(...mergeToDefaultResult); + mergeResults.push(...mergeToDefaultResult); const mergeToDevelopResult = await this.branchRepository.mergeBranch( param.owner, @@ -89,6 +92,7 @@ export class DeployedActionUseCase implements ParamUseCase param.tokens.token, ); result.push(...mergeToDevelopResult); + mergeResults.push(...mergeToDevelopResult); } else if (param.currentConfiguration.hotfixBranch) { const mergeToDefaultResult = await this.branchRepository.mergeBranch( param.owner, @@ -99,6 +103,7 @@ export class DeployedActionUseCase implements ParamUseCase param.tokens.token, ); result.push(...mergeToDefaultResult); + mergeResults.push(...mergeToDefaultResult); const mergeToDevelopResult = await this.branchRepository.mergeBranch( param.owner, @@ -109,8 +114,66 @@ export class DeployedActionUseCase implements ParamUseCase param.tokens.token, ); result.push(...mergeToDevelopResult); + mergeResults.push(...mergeToDevelopResult); } - + + const mergesAttempted = mergeResults.length > 0; + const allMergesSucceeded = + mergesAttempted && mergeResults.every((r) => r.success); + + if (allMergesSucceeded) { + const issueNumber = Number(param.singleAction.issue); + const closed = await this.issueRepository.closeIssue( + param.owner, + param.repo, + issueNumber, + param.tokens.token, + ); + if (closed) { + logDebugInfo(`Issue #${issueNumber} closed after merges to default and develop.`); + result.push( + new Result({ + id: this.taskId, + success: true, + executed: true, + steps: [ + `Issue #${issueNumber} closed after merge to \`${param.branches.defaultBranch}\` and \`${param.branches.development}\`.`, + ], + }) + ); + } + } else { + if (mergesAttempted) { + logDebugInfo( + `Skipping issue close: one or more merges failed. Issue #${param.singleAction.issue} remains open.` + ); + result.push( + new Result({ + id: this.taskId, + success: false, + executed: true, + steps: [ + `Issue #${param.singleAction.issue} was not closed because one or more merge operations failed.`, + ], + }) + ); + } else { + logDebugInfo( + `Skipping issue close: no release or hotfix branch configured. Issue #${param.singleAction.issue} remains open.` + ); + result.push( + new Result({ + id: this.taskId, + success: false, + executed: true, + steps: [ + `Issue #${param.singleAction.issue} was not closed because no release or hotfix branch was configured (no merge operations were performed).`, + ], + }) + ); + } + } + return result; } catch (error) { logError(error); diff --git a/src/usecase/actions/detect_errors_use_case.ts b/src/usecase/actions/detect_errors_use_case.ts deleted file mode 100644 index a97a511e..00000000 --- a/src/usecase/actions/detect_errors_use_case.ts +++ /dev/null @@ -1,138 +0,0 @@ -import { Execution } from '../../data/model/execution'; -import { Result } from '../../data/model/result'; -import { logError, logInfo } from '../../utils/logger'; -import { ParamUseCase } from '../base/param_usecase'; -import { IssueRepository } from '../../data/repository/issue_repository'; -import { BranchRepository } from '../../data/repository/branch_repository'; -import { AiRepository, OPENCODE_AGENT_PLAN } from '../../data/repository/ai_repository'; - -export class DetectErrorsUseCase implements ParamUseCase { - taskId: string = 'DetectErrorsUseCase'; - private issueRepository: IssueRepository = new IssueRepository(); - private branchRepository: BranchRepository = new BranchRepository(); - private aiRepository: AiRepository = new AiRepository(); - - async invoke(param: Execution): Promise { - logInfo(`Executing ${this.taskId}.`); - - const results: Result[] = []; - - try { - if (!param.ai?.getOpencodeModel() || !param.ai?.getOpencodeServerUrl()) { - results.push( - new Result({ - id: this.taskId, - success: false, - executed: true, - errors: ['Missing OPENCODE_SERVER_URL and OPENCODE_MODEL.'], - }) - ); - return results; - } - - const issueNumber = param.issueNumber; - if (issueNumber === -1) { - results.push( - new Result({ - id: this.taskId, - success: false, - executed: true, - errors: ['Issue number not found.'], - }) - ); - return results; - } - - let branch: string | undefined = param.commit.branch; - if (!branch) { - const branchTypes = [ - param.branches.featureTree, - param.branches.bugfixTree, - param.branches.docsTree, - param.branches.choreTree, - ]; - const branches = await this.branchRepository.getListOfBranches( - param.owner, - param.repo, - param.tokens.token - ); - for (const type of branchTypes) { - const prefix = `${type}/${issueNumber}-`; - const found = branches.find((b) => b.indexOf(prefix) > -1); - if (found) { - branch = found; - break; - } - } - } - - const developmentBranch = param.branches.development || 'develop'; - if (!branch) { - results.push( - new Result({ - id: this.taskId, - success: false, - executed: true, - errors: [`No branch found for issue #${issueNumber}.`], - }) - ); - return results; - } - - const changes = await this.branchRepository.getChanges( - param.owner, - param.repo, - branch, - developmentBranch, - param.tokens.token - ); - - const prompt = `Review the code changes in branch "${branch}" compared to "${developmentBranch}" and identify potential errors, bugs, or issues. - -**Changed files and patches:** -${changes.files - .slice(0, 30) - .map( - (f) => - `### ${f.filename} (${f.status})\n\`\`\`diff\n${(f.patch ?? '').slice(0, 1500)}\n\`\`\`` - ) - .join('\n\n')} - -List potential errors, bugs, or code quality issues. For each: file (if relevant), brief description, and severity if obvious. Use clear bullet points or numbered list.`; - - logInfo(`🤖 Detecting errors using OpenCode Plan agent...`); - const response = await this.aiRepository.askAgent( - param.ai, - OPENCODE_AGENT_PLAN, - prompt - ); - - const report = - typeof response === 'string' - ? response - : (response && String((response as Record).report)) || 'No response.'; - - results.push( - new Result({ - id: this.taskId, - success: true, - executed: true, - steps: ['Error detection completed (OpenCode Plan agent).', report], - payload: { issueNumber, branch, developmentBranch, report }, - }) - ); - } catch (error) { - logError(`Error in ${this.taskId}: ${error}`); - results.push( - new Result({ - id: this.taskId, - success: false, - executed: true, - errors: [`Error in ${this.taskId}: ${error}`], - }) - ); - } - - return results; - } -} diff --git a/src/usecase/commit_use_case.ts b/src/usecase/commit_use_case.ts index 3052bbda..807cef75 100644 --- a/src/usecase/commit_use_case.ts +++ b/src/usecase/commit_use_case.ts @@ -5,6 +5,7 @@ import { ParamUseCase } from "./base/param_usecase"; import { CheckProgressUseCase } from "./actions/check_progress_use_case"; import { NotifyNewCommitOnIssueUseCase } from "./steps/commit/notify_new_commit_on_issue_use_case"; import { CheckChangesIssueSizeUseCase } from "./steps/commit/check_changes_issue_size_use_case"; +import { DetectPotentialProblemsUseCase } from "./steps/commit/detect_potential_problems_use_case"; export class CommitUseCase implements ParamUseCase { taskId: string = 'CommitUseCase'; @@ -26,6 +27,7 @@ export class CommitUseCase implements ParamUseCase { results.push(...(await new NotifyNewCommitOnIssueUseCase().invoke(param))); results.push(...(await new CheckChangesIssueSizeUseCase().invoke(param))); results.push(...(await new CheckProgressUseCase().invoke(param))); + results.push(...(await new DetectPotentialProblemsUseCase().invoke(param))); } catch (error) { logError(error); results.push( diff --git a/src/usecase/single_action_use_case.ts b/src/usecase/single_action_use_case.ts index a9b98063..b4079a14 100644 --- a/src/usecase/single_action_use_case.ts +++ b/src/usecase/single_action_use_case.ts @@ -9,8 +9,8 @@ import { CreateTagUseCase } from "./actions/create_tag_use_case"; import { ThinkUseCase } from "./steps/common/think_use_case"; import { InitialSetupUseCase } from "./actions/initial_setup_use_case"; import { CheckProgressUseCase } from "./actions/check_progress_use_case"; -import { DetectErrorsUseCase } from "./actions/detect_errors_use_case"; import { RecommendStepsUseCase } from "./actions/recommend_steps_use_case"; +import { DetectPotentialProblemsUseCase } from "./steps/commit/detect_potential_problems_use_case"; export class SingleActionUseCase implements ParamUseCase { taskId: string = 'SingleActionUseCase'; @@ -39,8 +39,8 @@ export class SingleActionUseCase implements ParamUseCase { results.push(...await new InitialSetupUseCase().invoke(param)); } else if (param.singleAction.isCheckProgressAction) { results.push(...await new CheckProgressUseCase().invoke(param)); - } else if (param.singleAction.isDetectErrorsAction) { - results.push(...await new DetectErrorsUseCase().invoke(param)); + } else if (param.singleAction.isDetectPotentialProblemsAction) { + results.push(...await new DetectPotentialProblemsUseCase().invoke(param)); } else if (param.singleAction.isRecommendStepsAction) { results.push(...await new RecommendStepsUseCase().invoke(param)); } diff --git a/src/usecase/steps/commit/__tests__/check_changes_issue_size_use_case.test.ts b/src/usecase/steps/commit/__tests__/check_changes_issue_size_use_case.test.ts new file mode 100644 index 00000000..deba23e6 --- /dev/null +++ b/src/usecase/steps/commit/__tests__/check_changes_issue_size_use_case.test.ts @@ -0,0 +1,144 @@ +import { CheckChangesIssueSizeUseCase } from '../check_changes_issue_size_use_case'; + +jest.mock('../../../../utils/logger', () => ({ + logInfo: jest.fn(), + logDebugInfo: jest.fn(), + logError: jest.fn(), +})); + +const mockGetSizeCategoryAndReason = jest.fn(); +const mockSetLabels = jest.fn(); +const mockGetLabels = jest.fn(); +const mockSetTaskSize = jest.fn(); +const mockGetOpenPullRequestNumbersByHeadBranch = jest.fn(); + +jest.mock('../../../../data/repository/branch_repository', () => ({ + BranchRepository: jest.fn().mockImplementation(() => ({ + getSizeCategoryAndReason: mockGetSizeCategoryAndReason, + })), +})); +jest.mock('../../../../data/repository/issue_repository', () => ({ + IssueRepository: jest.fn().mockImplementation(() => ({ + setLabels: mockSetLabels, + getLabels: mockGetLabels, + })), +})); +jest.mock('../../../../data/repository/project_repository', () => ({ + ProjectRepository: jest.fn().mockImplementation(() => ({ + setTaskSize: mockSetTaskSize, + })), +})); +jest.mock('../../../../data/repository/pull_request_repository', () => ({ + PullRequestRepository: jest.fn().mockImplementation(() => ({ + getOpenPullRequestNumbersByHeadBranch: mockGetOpenPullRequestNumbersByHeadBranch, + })), +})); + +function baseParam(overrides: Record = {}) { + return { + owner: 'o', + repo: 'r', + issueNumber: 42, + tokens: { token: 't' }, + commit: { branch: 'feature/42-foo' }, + currentConfiguration: { parentBranch: 'develop' }, + sizeThresholds: {}, + labels: { + sizedLabelOnIssue: 'size: M', + currentIssueLabels: ['feature', 'size: M'], + sizeLabels: ['size: XS', 'size: S', 'size: M', 'size: L', 'size: XL', 'size: XXL'], + }, + project: { getProjects: () => [] }, + ...overrides, + } as unknown as Parameters[0]; +} + +describe('CheckChangesIssueSizeUseCase', () => { + let useCase: CheckChangesIssueSizeUseCase; + + beforeEach(() => { + useCase = new CheckChangesIssueSizeUseCase(); + mockGetSizeCategoryAndReason.mockReset(); + mockSetLabels.mockReset(); + mockGetOpenPullRequestNumbersByHeadBranch.mockResolvedValue([]); + }); + + it('uses branches.development or "develop" as base when parentBranch is undefined', async () => { + mockGetSizeCategoryAndReason.mockResolvedValue({ + size: 'size: M', + githubSize: 'M', + reason: 'Within limits', + }); + const param = baseParam({ + currentConfiguration: { parentBranch: undefined }, + branches: { development: 'develop' }, + } as Record); + + const results = await useCase.invoke(param); + + expect(mockGetSizeCategoryAndReason).toHaveBeenCalledWith( + 'o', + 'r', + 'feature/42-foo', + 'develop', + expect.anything(), + expect.anything(), + 't' + ); + expect(results.length).toBeGreaterThan(0); + }); + + it('returns success executed true when size equals sizedLabelOnIssue (no change)', async () => { + mockGetSizeCategoryAndReason.mockResolvedValue({ + size: 'size: M', + githubSize: 'M', + reason: 'Within limits', + }); + const param = baseParam(); + + const results = await useCase.invoke(param); + + expect(results).toHaveLength(1); + expect(results[0].success).toBe(true); + expect(results[0].executed).toBe(true); + expect(mockSetLabels).not.toHaveBeenCalled(); + }); + + it('returns success executed true and updates labels when size differs from sizedLabelOnIssue', async () => { + mockGetSizeCategoryAndReason.mockResolvedValue({ + size: 'size: L', + githubSize: 'L', + reason: 'Many lines changed', + }); + mockSetLabels.mockResolvedValue(undefined); + mockSetTaskSize.mockResolvedValue(undefined); + const param = baseParam(); + + const results = await useCase.invoke(param); + + expect(results).toHaveLength(1); + expect(results[0].success).toBe(true); + expect(results[0].executed).toBe(true); + expect(results[0].steps?.some((s) => s.includes('size: L') && s.includes('resized'))).toBe(true); + expect(mockSetLabels).toHaveBeenCalledWith( + 'o', + 'r', + 42, + expect.arrayContaining(['feature', 'size: L']), + 't' + ); + }); + + it('returns failure when getSizeCategoryAndReason throws', async () => { + mockGetSizeCategoryAndReason.mockRejectedValue(new Error('API error')); + const param = baseParam(); + + const results = await useCase.invoke(param); + + expect(results).toHaveLength(1); + expect(results[0].success).toBe(false); + expect(results[0].steps).toContain( + 'Tried to check the size of the changes, but there was a problem.' + ); + }); +}); diff --git a/src/usecase/steps/commit/__tests__/detect_potential_problems_use_case.test.ts b/src/usecase/steps/commit/__tests__/detect_potential_problems_use_case.test.ts new file mode 100644 index 00000000..37194755 --- /dev/null +++ b/src/usecase/steps/commit/__tests__/detect_potential_problems_use_case.test.ts @@ -0,0 +1,724 @@ +/** + * Unit tests for DetectPotentialProblemsUseCase (bugbot on push). + * Covers: skip when OpenCode/issue missing, prompt with/without previous findings, + * new findings (add/update issue and PR comments), resolved_finding_ids, errors. + */ + +import { DetectPotentialProblemsUseCase } from '../detect_potential_problems_use_case'; +import { Ai } from '../../../../data/model/ai'; +import type { Execution } from '../../../../data/model/execution'; + +jest.mock('../../../../utils/logger', () => ({ + logInfo: jest.fn(), + logError: jest.fn(), + logDebugInfo: jest.fn(), +})); + +const mockListIssueComments = jest.fn(); +const mockAddComment = jest.fn(); +const mockUpdateComment = jest.fn(); +jest.mock('../../../../data/repository/issue_repository', () => ({ + IssueRepository: jest.fn().mockImplementation(() => ({ + listIssueComments: mockListIssueComments, + addComment: mockAddComment, + updateComment: mockUpdateComment, + })), +})); + +const mockGetOpenPullRequestNumbersByHeadBranch = jest.fn(); +const mockListPullRequestReviewComments = jest.fn(); +const mockGetPullRequestHeadSha = jest.fn(); +const mockGetChangedFiles = jest.fn(); +const mockGetFilesWithFirstDiffLine = jest.fn(); +const mockCreateReviewWithComments = jest.fn(); +const mockUpdatePullRequestReviewComment = jest.fn(); +const mockResolvePullRequestReviewThread = jest.fn(); +jest.mock('../../../../data/repository/pull_request_repository', () => ({ + PullRequestRepository: jest.fn().mockImplementation(() => ({ + getOpenPullRequestNumbersByHeadBranch: mockGetOpenPullRequestNumbersByHeadBranch, + listPullRequestReviewComments: mockListPullRequestReviewComments, + getPullRequestHeadSha: mockGetPullRequestHeadSha, + getChangedFiles: mockGetChangedFiles, + getFilesWithFirstDiffLine: mockGetFilesWithFirstDiffLine, + createReviewWithComments: mockCreateReviewWithComments, + updatePullRequestReviewComment: mockUpdatePullRequestReviewComment, + resolvePullRequestReviewThread: mockResolvePullRequestReviewThread, + })), +})); + +const mockAskAgent = jest.fn(); +jest.mock('../../../../data/repository/ai_repository', () => ({ + AiRepository: jest.fn().mockImplementation(() => ({ + askAgent: mockAskAgent, + })), + OPENCODE_AGENT_PLAN: 'plan', +})); + +function baseParam(overrides: Record = {}): Execution { + return { + owner: 'owner', + repo: 'repo', + issueNumber: 42, + tokens: { token: 'token' }, + commit: { branch: 'feature/42-add-feature' }, + currentConfiguration: { parentBranch: 'develop' }, + branches: { development: 'develop' }, + ai: new Ai('http://localhost:4096', 'opencode/model', false, false, [], false, 'low', 20), + ...overrides, + } as unknown as Execution; +} + +describe('DetectPotentialProblemsUseCase', () => { + let useCase: DetectPotentialProblemsUseCase; + + beforeEach(() => { + useCase = new DetectPotentialProblemsUseCase(); + mockListIssueComments.mockReset(); + mockAddComment.mockReset(); + mockUpdateComment.mockReset(); + mockGetOpenPullRequestNumbersByHeadBranch.mockReset(); + mockListPullRequestReviewComments.mockReset(); + mockGetPullRequestHeadSha.mockReset(); + mockGetChangedFiles.mockReset(); + mockGetFilesWithFirstDiffLine.mockReset(); + mockCreateReviewWithComments.mockReset(); + mockUpdatePullRequestReviewComment.mockReset(); + mockResolvePullRequestReviewThread.mockReset(); + mockAskAgent.mockReset(); + + mockListIssueComments.mockResolvedValue([]); + mockGetOpenPullRequestNumbersByHeadBranch.mockResolvedValue([]); + mockGetFilesWithFirstDiffLine.mockResolvedValue([]); + }); + + it('returns empty results when OpenCode is not configured (no server URL)', async () => { + const param = baseParam({ + ai: new Ai('', 'opencode/model', false, false, [], false, 'low', 20), + }); + + const results = await useCase.invoke(param); + + expect(results).toHaveLength(0); + expect(mockListIssueComments).not.toHaveBeenCalled(); + expect(mockAskAgent).not.toHaveBeenCalled(); + }); + + it('returns empty results when OpenCode is not configured (no model)', async () => { + const param = baseParam({ + ai: new Ai('http://localhost:4096', '', false, false, [], false, 'low', 20), + }); + + const results = await useCase.invoke(param); + + expect(results).toHaveLength(0); + expect(mockAskAgent).not.toHaveBeenCalled(); + }); + + it('returns empty results when ai is undefined', async () => { + const param = baseParam({ ai: undefined }); + + const results = await useCase.invoke(param); + + expect(results).toHaveLength(0); + expect(mockAskAgent).not.toHaveBeenCalled(); + }); + + it('returns empty results when issue number is -1', async () => { + const param = baseParam({ issueNumber: -1 }); + + const results = await useCase.invoke(param); + + expect(results).toHaveLength(0); + expect(mockListIssueComments).not.toHaveBeenCalled(); + expect(mockAskAgent).not.toHaveBeenCalled(); + }); + + it('returns empty results when askAgent returns null', async () => { + mockAskAgent.mockResolvedValue(null); + + const results = await useCase.invoke(baseParam()); + + expect(results).toHaveLength(0); + expect(mockAskAgent).toHaveBeenCalledTimes(1); + expect(mockAddComment).not.toHaveBeenCalled(); + }); + + it('returns empty results when askAgent returns a string (non-object)', async () => { + mockAskAgent.mockResolvedValue('plain text'); + + const results = await useCase.invoke(baseParam()); + + expect(results).toHaveLength(0); + expect(mockAddComment).not.toHaveBeenCalled(); + }); + + it('returns success with no-new-findings when response has no findings array', async () => { + mockAskAgent.mockResolvedValue({ other: 'data' }); + + const results = await useCase.invoke(baseParam()); + + expect(results).toHaveLength(1); + expect(results[0].success).toBe(true); + expect(results[0].steps?.[0]).toContain('no new findings, no resolved'); + expect(mockAddComment).not.toHaveBeenCalled(); + }); + + it('returns success with "no new findings, no resolved" when findings and resolved_finding_ids are empty', async () => { + mockAskAgent.mockResolvedValue({ findings: [], resolved_finding_ids: [] }); + + const results = await useCase.invoke(baseParam()); + + expect(results).toHaveLength(1); + expect(results[0].success).toBe(true); + expect(results[0].executed).toBe(true); + expect(results[0].steps?.[0]).toContain('no new findings, no resolved'); + expect(mockAddComment).not.toHaveBeenCalled(); + expect(mockUpdateComment).not.toHaveBeenCalled(); + }); + + it('calls listIssueComments and askAgent with repo context and no previous block when no comments', async () => { + mockAskAgent.mockResolvedValue({ findings: [], resolved_finding_ids: [] }); + + await useCase.invoke(baseParam()); + + expect(mockListIssueComments).toHaveBeenCalledWith('owner', 'repo', 42, 'token'); + expect(mockAskAgent).toHaveBeenCalledTimes(1); + const prompt = mockAskAgent.mock.calls[0][2]; + expect(prompt).toContain('Owner: owner'); + expect(prompt).toContain('Repository: repo'); + expect(prompt).toContain('feature/42-add-feature'); + expect(prompt).toContain('develop'); + expect(prompt).not.toContain('Previously reported issues'); + }); + + it('when OpenCode returns one finding, adds comment on issue and does not update', async () => { + const finding = { + id: 'src/foo.ts:10:possible-null', + title: 'Possible null dereference', + description: 'Variable x may be null here.', + }; + mockAskAgent.mockResolvedValue({ findings: [finding] }); + + const results = await useCase.invoke(baseParam()); + + expect(results).toHaveLength(1); + expect(results[0].success).toBe(true); + expect(results[0].steps?.[0]).toContain('1 new/current finding(s)'); + expect(mockAddComment).toHaveBeenCalledTimes(1); + expect(mockAddComment).toHaveBeenCalledWith('owner', 'repo', 42, expect.any(String), 'token'); + expect(mockAddComment.mock.calls[0][3]).toContain('Possible null dereference'); + expect(mockAddComment.mock.calls[0][3]).toContain('gbf-bugbot'); + expect(mockAddComment.mock.calls[0][3]).toContain('finding_id:"src/foo.ts:10:possible-null"'); + expect(mockUpdateComment).not.toHaveBeenCalled(); + }); + + it('when OpenCode returns one finding and there is an open PR, creates review comments', async () => { + const finding = { + id: 'src/bar.ts:5:missing-check', + title: 'Missing validation', + description: 'Add null check.', + file: 'src/bar.ts', + line: 5, + }; + mockAskAgent.mockResolvedValue({ findings: [finding] }); + mockGetOpenPullRequestNumbersByHeadBranch.mockResolvedValue([100]); + mockGetPullRequestHeadSha.mockResolvedValue('abc123'); + mockGetChangedFiles.mockResolvedValue([{ filename: 'src/bar.ts', status: 'modified' }]); + mockListPullRequestReviewComments.mockResolvedValue([]); + + await useCase.invoke(baseParam()); + + expect(mockCreateReviewWithComments).toHaveBeenCalledTimes(1); + expect(mockCreateReviewWithComments).toHaveBeenCalledWith( + 'owner', + 'repo', + 100, + 'abc123', + expect.arrayContaining([ + expect.objectContaining({ + path: 'src/bar.ts', + line: 5, + body: expect.stringContaining('Missing validation'), + }), + ]), + 'token' + ); + }); + + it('when finding already has issue comment, updates instead of adding', async () => { + const finding = { + id: 'existing-finding-id', + title: 'Existing problem', + description: 'Still there.', + }; + mockListIssueComments.mockResolvedValue([ + { + id: 999, + body: `## Existing problem\n\nDetails.\n\n`, + user: { login: 'bot' }, + }, + ]); + mockAskAgent.mockResolvedValue({ findings: [finding] }); + + await useCase.invoke(baseParam()); + + expect(mockUpdateComment).toHaveBeenCalledWith('owner', 'repo', 42, 999, expect.any(String), 'token'); + expect(mockAddComment).not.toHaveBeenCalled(); + }); + + it('when previous unresolved finding exists, prompt includes it and resolved_finding_ids marks it resolved', async () => { + mockListIssueComments.mockResolvedValue([ + { + id: 888, + body: `## Old bug\n\nDescription.\n\n`, + user: { login: 'bot' }, + }, + ]); + mockAskAgent.mockResolvedValue({ + findings: [], + resolved_finding_ids: ['old-bug-id'], + }); + + await useCase.invoke(baseParam()); + + const prompt = mockAskAgent.mock.calls[0][2]; + expect(prompt).toContain('Previously reported issues'); + expect(prompt).toContain('old-bug-id'); + expect(prompt).toContain('Old bug'); + + expect(mockUpdateComment).toHaveBeenCalledWith( + 'owner', + 'repo', + 42, + 888, + expect.stringContaining('Resolved'), + 'token' + ); + expect(mockUpdateComment.mock.calls[0][4]).toContain('resolved:true'); + }); + + it('when OpenCode returns resolved_finding_ids, updates PR review comment to resolved', async () => { + mockListIssueComments.mockResolvedValue([]); + mockGetOpenPullRequestNumbersByHeadBranch.mockResolvedValue([50]); + mockListPullRequestReviewComments.mockResolvedValue([ + { + id: 777, + body: `## PR finding\n\n`, + path: 'src/a.ts', + line: 1, + node_id: 'PRRC_node_777', + }, + ]); + mockAskAgent.mockResolvedValue({ + findings: [], + resolved_finding_ids: ['pr-finding'], + }); + + await useCase.invoke(baseParam()); + + expect(mockUpdatePullRequestReviewComment).toHaveBeenCalledWith( + 'owner', + 'repo', + 777, + expect.stringContaining('resolved:true'), + 'token' + ); + expect(mockResolvePullRequestReviewThread).toHaveBeenCalledWith( + 'owner', + 'repo', + 50, + 'PRRC_node_777', + 'token' + ); + }); + + it('does not mark as resolved when finding id is not in resolved_finding_ids', async () => { + mockListIssueComments.mockResolvedValue([ + { + id: 666, + body: `## Unfixed\n\n`, + user: {}, + }, + ]); + mockAskAgent.mockResolvedValue({ + findings: [], + resolved_finding_ids: [], // not including unfixed-id + }); + + await useCase.invoke(baseParam()); + + expect(mockUpdateComment).not.toHaveBeenCalled(); + }); + + it('returns failure result when askAgent throws', async () => { + mockAskAgent.mockRejectedValue(new Error('OpenCode timeout')); + + const results = await useCase.invoke(baseParam()); + + expect(results).toHaveLength(1); + expect(results[0].success).toBe(false); + expect(results[0].executed).toBe(true); + expect(results[0].errors?.some((e) => String(e).includes('DetectPotentialProblemsUseCase'))).toBe(true); + expect(results[0].errors?.some((e) => String(e).includes('OpenCode timeout'))).toBe(true); + }); + + it('step message includes both findings count and resolved count when both present', async () => { + mockAskAgent.mockResolvedValue({ + findings: [ + { id: 'new-1', title: 'New', description: 'D' }, + ], + resolved_finding_ids: ['old-1'], + }); + mockListIssueComments.mockResolvedValue([ + { id: 1, body: '', user: {} }, + ]); + + const results = await useCase.invoke(baseParam()); + + expect(results[0].success).toBe(true); + expect(results[0].steps?.[0]).toMatch(/1 new\/current finding\(s\).*1 marked as resolved/); + }); + + it('when there are no open PRs, does not call createReviewWithComments or getPullRequestHeadSha', async () => { + mockGetOpenPullRequestNumbersByHeadBranch.mockResolvedValue([]); + mockAskAgent.mockResolvedValue({ + findings: [{ id: 'f1', title: 'T', description: 'D' }], + }); + + await useCase.invoke(baseParam()); + + expect(mockGetPullRequestHeadSha).not.toHaveBeenCalled(); + expect(mockCreateReviewWithComments).not.toHaveBeenCalled(); + expect(mockAddComment).toHaveBeenCalledTimes(1); + }); + + it('when finding has no file/line, no PR review comment is created (only issue comment)', async () => { + mockAskAgent.mockResolvedValue({ + findings: [{ id: 'no-loc', title: 'General issue', description: 'No location.' }], + }); + mockGetOpenPullRequestNumbersByHeadBranch.mockResolvedValue([200]); + mockGetPullRequestHeadSha.mockResolvedValue('sha1'); + mockGetChangedFiles.mockResolvedValue([{ filename: 'lib/helper.ts', status: 'modified' }]); + mockListPullRequestReviewComments.mockResolvedValue([]); + + await useCase.invoke(baseParam()); + + expect(mockAddComment).toHaveBeenCalledWith('owner', 'repo', 42, expect.any(String), 'token'); + expect(mockCreateReviewWithComments).not.toHaveBeenCalled(); + }); + + it('when existing finding has prCommentId for same PR, updates review comment instead of creating', async () => { + const finding = { + id: 'same-pr-finding', + title: 'Same', + description: 'Desc', + file: 'x.ts', + line: 1, + }; + mockListIssueComments.mockResolvedValue([]); + mockGetOpenPullRequestNumbersByHeadBranch.mockResolvedValue([60]); + mockListPullRequestReviewComments.mockResolvedValue([ + { + id: 555, + body: `## Same\n\n`, + path: 'x.ts', + line: 1, + }, + ]); + mockGetPullRequestHeadSha.mockResolvedValue('sha2'); + mockGetChangedFiles.mockResolvedValue([{ filename: 'x.ts', status: 'modified' }]); + mockAskAgent.mockResolvedValue({ findings: [finding] }); + + await useCase.invoke(baseParam()); + + expect(mockUpdatePullRequestReviewComment).toHaveBeenCalledWith( + 'owner', + 'repo', + 555, + expect.stringContaining('Same'), + 'token' + ); + expect(mockCreateReviewWithComments).not.toHaveBeenCalled(); + }); + + it('uses branches.development when currentConfiguration.parentBranch is undefined', async () => { + mockAskAgent.mockResolvedValue({ findings: [], resolved_finding_ids: [] }); + const param = baseParam({ + currentConfiguration: { parentBranch: undefined }, + branches: { development: 'main' }, + }); + + await useCase.invoke(param); + + const prompt = mockAskAgent.mock.calls[0][2]; + expect(prompt).toContain('Base branch: main'); + }); + + it('extracts title from comment body (## line) for previous findings in prompt', async () => { + mockListIssueComments.mockResolvedValue([ + { + id: 111, + body: `## Extracted Title Here\n\nSome body.\n\n`, + user: {}, + }, + ]); + mockAskAgent.mockResolvedValue({ findings: [], resolved_finding_ids: [] }); + + await useCase.invoke(baseParam()); + + const prompt = mockAskAgent.mock.calls[0][2]; + expect(prompt).toContain('Extracted Title Here'); + expect(prompt).toContain('ex-id'); + }); + + it('treats non-array findings as empty and returns success with no new findings', async () => { + mockAskAgent.mockResolvedValue({ findings: 'not-array' }); + + const results = await useCase.invoke(baseParam()); + + expect(results).toHaveLength(1); + expect(results[0].success).toBe(true); + expect(results[0].steps?.[0]).toContain('no new findings, no resolved'); + expect(mockAddComment).not.toHaveBeenCalled(); + }); + + it('does not update comment to resolved when already resolved in marker', async () => { + mockListIssueComments.mockResolvedValue([ + { + id: 222, + body: `## Already resolved\n\n`, + user: {}, + }, + ]); + mockAskAgent.mockResolvedValue({ + findings: [], + resolved_finding_ids: ['done-id'], // OpenCode says resolved again + }); + + await useCase.invoke(baseParam()); + + expect(mockUpdateComment).not.toHaveBeenCalled(); + }); + + describe('marker replacement (regex-based, tolerates format variations)', () => { + it('replaces marker in issue comment when marker has extra whitespace', async () => { + mockListIssueComments.mockResolvedValue([ + { + id: 333, + body: `## Whitespace variant\n\n`, + user: { login: 'bot' }, + }, + ]); + mockAskAgent.mockResolvedValue({ + findings: [], + resolved_finding_ids: ['spacey-id'], + }); + + await useCase.invoke(baseParam()); + + expect(mockUpdateComment).toHaveBeenCalledTimes(1); + expect(mockUpdateComment).toHaveBeenCalledWith( + 'owner', + 'repo', + 42, + 333, + expect.any(String), + 'token' + ); + const updatedBody = mockUpdateComment.mock.calls[0][4]; + expect(updatedBody).toContain('resolved:true'); + expect(updatedBody).toContain('**Resolved** (OpenCode confirmed fixed in latest analysis)'); + expect(updatedBody).toContain('gbf-bugbot'); + }); + + it('replaces marker in PR review comment when marker has extra whitespace', async () => { + mockListIssueComments.mockResolvedValue([]); + mockGetOpenPullRequestNumbersByHeadBranch.mockResolvedValue([80]); + mockListPullRequestReviewComments + .mockResolvedValueOnce([ + { + id: 444, + body: `## PR spacey\n\n`, + path: 'src/b.ts', + line: 1, + }, + ]) + .mockResolvedValueOnce([ + { + id: 444, + body: `## PR spacey\n\n`, + path: 'src/b.ts', + line: 1, + }, + ]); + mockAskAgent.mockResolvedValue({ + findings: [], + resolved_finding_ids: ['pr-spacey-id'], + }); + + await useCase.invoke(baseParam()); + + expect(mockUpdatePullRequestReviewComment).toHaveBeenCalledTimes(1); + const updatedBody = mockUpdatePullRequestReviewComment.mock.calls[0][3]; + expect(updatedBody).toContain('resolved:true'); + }); + + it('replaces marker when finding id contains regex-special characters', async () => { + const findingId = 'src/utils (helper).ts:10:possible-null'; + mockListIssueComments.mockResolvedValue([ + { + id: 555, + body: `## Regex id\n\n`, + user: {}, + }, + ]); + mockAskAgent.mockResolvedValue({ + findings: [], + resolved_finding_ids: [findingId], + }); + + await useCase.invoke(baseParam()); + + expect(mockUpdateComment).toHaveBeenCalledTimes(1); + const updatedBody = mockUpdateComment.mock.calls[0][4]; + expect(updatedBody).toContain('resolved:true'); + expect(updatedBody).toContain(findingId); + }); + + it('sanitizes finding id so HTML comment-breaking chars do not appear in marker', async () => { + const findingWithBadChars = 'file.ts:1:bad-->id\nhere'; + mockAskAgent.mockResolvedValue({ + findings: [ + { + id: findingWithBadChars, + title: 'Sanitized ID', + description: 'Finding with unsafe ID chars.', + }, + ], + }); + + await useCase.invoke(baseParam()); + + expect(mockAddComment).toHaveBeenCalledTimes(1); + const body = mockAddComment.mock.calls[0][3]; + expect(body).toContain('gbf-bugbot'); + const markerMatch = body.match(/'); + expect(storedId).not.toContain(''); + expect(storedId).not.toContain('\n'); + expect(storedId).toBe('file.ts:1:badidwithnewlinehere'); + expect(body).toMatch(//); + }); + }); + + describe('bugbot pipeline: severity, ignore paths, limit', () => { + it('filters out findings below bugbot-severity (minSeverity)', async () => { + const param = baseParam({ + ai: new Ai('http://localhost:4096', 'opencode/model', false, false, [], false, 'medium', 20), + }); + mockAskAgent.mockResolvedValue({ + findings: [ + { id: 'low-1', title: 'Low severity', description: 'D', severity: 'low' }, + { id: 'high-1', title: 'High severity', description: 'D', severity: 'high' }, + ], + resolved_finding_ids: [], + }); + + await useCase.invoke(param); + + expect(mockAddComment).toHaveBeenCalledTimes(1); + expect(mockAddComment.mock.calls[0][3]).toContain('High severity'); + expect(mockAddComment.mock.calls[0][3]).not.toContain('Low severity'); + }); + + it('filters out findings with unsafe file path (path traversal, null byte, absolute)', async () => { + mockAskAgent.mockResolvedValue({ + findings: [ + { id: 'safe', title: 'Safe', description: 'D', file: 'src/foo.ts' }, + { id: 'traversal', title: 'Bad', description: 'D', file: '../../../etc/passwd' }, + { id: 'absolute', title: 'Absolute', description: 'D', file: '/etc/passwd' }, + ], + resolved_finding_ids: [], + }); + + await useCase.invoke(baseParam()); + + expect(mockAddComment).toHaveBeenCalledTimes(1); + expect(mockAddComment.mock.calls[0][3]).toContain('Safe'); + expect(mockAddComment.mock.calls[0][3]).not.toContain('Bad'); + expect(mockAddComment.mock.calls[0][3]).not.toContain('Absolute'); + }); + + it('filters out findings in ai-ignore-files paths', async () => { + const param = baseParam({ + ai: new Ai( + 'http://localhost:4096', + 'opencode/model', + false, + false, + ['src/ignored/*', '**/build/**'], + false, + 'low', + 20 + ), + }); + mockAskAgent.mockResolvedValue({ + findings: [ + { id: 'ignored-1', title: 'In ignored dir', description: 'D', file: 'src/ignored/foo.ts' }, + { id: 'ok-1', title: 'Not ignored', description: 'D', file: 'src/app/bar.ts' }, + ], + resolved_finding_ids: [], + }); + + await useCase.invoke(param); + + expect(mockAddComment).toHaveBeenCalledTimes(1); + expect(mockAddComment.mock.calls[0][3]).toContain('Not ignored'); + expect(mockAddComment.mock.calls[0][3]).not.toContain('In ignored dir'); + }); + + it('when findings exceed limit, publishes max then one overflow summary comment on issue', async () => { + const manyFindings = Array.from({ length: 22 }, (_, i) => ({ + id: `f${i}`, + title: `Finding ${i}`, + description: 'Desc', + })); + mockAskAgent.mockResolvedValue({ + findings: manyFindings, + resolved_finding_ids: [], + }); + + await useCase.invoke(baseParam()); + + expect(mockAddComment).toHaveBeenCalled(); + const bodies = mockAddComment.mock.calls.map((c) => c[3] as string); + const overflowComment = bodies.find( + (b) => b.includes('More findings (comment limit)') || b.includes('more finding(s)') + ); + expect(overflowComment).toBeDefined(); + expect(overflowComment).toContain('more finding(s)'); + const findingComments = bodies.filter((b) => b.includes('gbf-bugbot') && b.includes('finding_id')); + expect(findingComments.length).toBe(20); + }); + + it('deduplicates findings by file:line before publishing', async () => { + mockAskAgent.mockResolvedValue({ + findings: [ + { id: 'first', title: 'First', description: 'D', file: 'src/same.ts', line: 5 }, + { id: 'second', title: 'Second', description: 'D', file: 'src/same.ts', line: 5 }, + ], + resolved_finding_ids: [], + }); + + await useCase.invoke(baseParam()); + + expect(mockAddComment).toHaveBeenCalledTimes(1); + expect(mockAddComment.mock.calls[0][3]).toContain('First'); + expect(mockAddComment.mock.calls[0][3]).not.toContain('Second'); + }); + }); +}); diff --git a/src/usecase/steps/commit/bugbot/__tests__/deduplicate_findings.test.ts b/src/usecase/steps/commit/bugbot/__tests__/deduplicate_findings.test.ts new file mode 100644 index 00000000..88fff198 --- /dev/null +++ b/src/usecase/steps/commit/bugbot/__tests__/deduplicate_findings.test.ts @@ -0,0 +1,78 @@ +/** + * Unit tests for deduplicateFindings: dedupe by (file, line) or by title when no location. + */ + +import { deduplicateFindings } from '../deduplicate_findings'; +import type { BugbotFinding } from '../types'; + +function finding(overrides: Partial = {}): BugbotFinding { + return { + id: 'id-1', + title: 'Title', + description: 'Desc', + ...overrides, + }; +} + +describe('deduplicateFindings', () => { + it('returns empty array when input is empty', () => { + expect(deduplicateFindings([])).toEqual([]); + }); + + it('returns same array when no duplicates', () => { + const list = [ + finding({ id: 'a', file: 'a.ts', line: 1 }), + finding({ id: 'b', file: 'b.ts', line: 2 }), + ]; + expect(deduplicateFindings(list)).toEqual(list); + }); + + it('deduplicates by file:line (keeps first)', () => { + const list = [ + finding({ id: 'first', title: 'First', file: 'src/foo.ts', line: 10 }), + finding({ id: 'second', title: 'Second', file: 'src/foo.ts', line: 10 }), + ]; + const result = deduplicateFindings(list); + expect(result).toHaveLength(1); + expect(result[0].id).toBe('first'); + expect(result[0].title).toBe('First'); + }); + + it('deduplicates by normalized title when file/line missing (keeps first)', () => { + const list = [ + finding({ id: 'x', title: 'Same Title', description: 'A' }), + finding({ id: 'y', title: 'Same Title', description: 'B' }), + ]; + const result = deduplicateFindings(list); + expect(result).toHaveLength(1); + expect(result[0].id).toBe('x'); + }); + + it('uses first 80 chars of title for title-based key', () => { + const longTitle = 'A'.repeat(100); + const list = [ + finding({ id: '1', title: longTitle }), + finding({ id: '2', title: longTitle + ' different tail' }), + ]; + const result = deduplicateFindings(list); + expect(result).toHaveLength(1); + }); + + it('trims file and uses line 0 when line undefined', () => { + const list = [ + finding({ id: 'a', file: ' p.ts ', line: undefined }), + finding({ id: 'b', file: 'p.ts', line: 0 }), + ]; + const result = deduplicateFindings(list); + expect(result).toHaveLength(1); + }); + + it('different file or line keeps both', () => { + const list = [ + finding({ id: '1', file: 'a.ts', line: 1 }), + finding({ id: '2', title: 'Other', file: 'a.ts', line: 2 }), + finding({ id: '3', title: 'Other', file: 'b.ts', line: 1 }), + ]; + expect(deduplicateFindings(list)).toHaveLength(3); + }); +}); diff --git a/src/usecase/steps/commit/bugbot/__tests__/file_ignore.test.ts b/src/usecase/steps/commit/bugbot/__tests__/file_ignore.test.ts new file mode 100644 index 00000000..cc8439e6 --- /dev/null +++ b/src/usecase/steps/commit/bugbot/__tests__/file_ignore.test.ts @@ -0,0 +1,62 @@ +/** + * Unit tests for file_ignore: fileMatchesIgnorePatterns (glob-style path matching). + */ + +import { fileMatchesIgnorePatterns } from '../file_ignore'; + +describe('fileMatchesIgnorePatterns', () => { + it('returns false when filePath is undefined or empty', () => { + expect(fileMatchesIgnorePatterns(undefined, ['src/**'])).toBe(false); + expect(fileMatchesIgnorePatterns('', ['*.test.ts'])).toBe(false); + expect(fileMatchesIgnorePatterns(' ', ['x'])).toBe(false); + }); + + it('returns false when ignorePatterns is empty', () => { + expect(fileMatchesIgnorePatterns('src/foo.ts', [])).toBe(false); + }); + + it('matches exact path', () => { + expect(fileMatchesIgnorePatterns('src/foo.ts', ['src/foo.ts'])).toBe(true); + expect(fileMatchesIgnorePatterns('src/foo.ts', ['other.ts'])).toBe(false); + }); + + it('matches glob * (any characters)', () => { + // * becomes .* so it matches across path segments + expect(fileMatchesIgnorePatterns('src/foo.test.ts', ['*.test.ts'])).toBe(true); + expect(fileMatchesIgnorePatterns('foo.test.ts', ['*.test.ts'])).toBe(true); + expect(fileMatchesIgnorePatterns('bar.test.ts', ['*.test.ts'])).toBe(true); + expect(fileMatchesIgnorePatterns('bar.spec.ts', ['*.test.ts'])).toBe(false); + }); + + it('matches pattern with path segments', () => { + expect(fileMatchesIgnorePatterns('src/utils/helper.ts', ['src/utils/*'])).toBe(true); + expect(fileMatchesIgnorePatterns('src/utils/helper.ts', ['src/*'])).toBe(true); + }); + + it('matches **/ style (directory prefix with /*)', () => { + // Implementation: pattern ending with /* becomes (\/.*)? so "src/utils/*" matches "src/utils" and "src/utils/anything" + expect(fileMatchesIgnorePatterns('src/utils/helper.ts', ['src/utils/*'])).toBe(true); + expect(fileMatchesIgnorePatterns('src/utils/deep/helper.ts', ['src/utils/*'])).toBe(true); + }); + + it('trims file path and patterns', () => { + expect(fileMatchesIgnorePatterns(' src/foo.ts ', [' src/foo.ts '])).toBe(true); + }); + + it('returns true if any pattern matches', () => { + expect( + fileMatchesIgnorePatterns('src/bar.ts', ['*.test.ts', 'src/bar.ts', 'other']) + ).toBe(true); + }); + + it('returns false if no pattern matches', () => { + expect( + fileMatchesIgnorePatterns('src/bar.ts', ['*.test.ts', 'build/*', 'docs/*']) + ).toBe(false); + }); + + it('escapes regex-special chars in pattern (literal match)', () => { + expect(fileMatchesIgnorePatterns('src/file (1).ts', ['src/file (1).ts'])).toBe(true); + expect(fileMatchesIgnorePatterns('src/file (2).ts', ['src/file (1).ts'])).toBe(false); + }); +}); diff --git a/src/usecase/steps/commit/bugbot/__tests__/limit_comments.test.ts b/src/usecase/steps/commit/bugbot/__tests__/limit_comments.test.ts new file mode 100644 index 00000000..731d639f --- /dev/null +++ b/src/usecase/steps/commit/bugbot/__tests__/limit_comments.test.ts @@ -0,0 +1,66 @@ +/** + * Unit tests for applyCommentLimit: max comments and overflow titles. + */ + +import { BUGBOT_MAX_COMMENTS } from '../../../../../utils/constants'; +import { applyCommentLimit } from '../limit_comments'; +import type { BugbotFinding } from '../types'; + +function finding(id: string, title: string): BugbotFinding { + return { id, title, description: 'D' }; +} + +describe('applyCommentLimit', () => { + it('returns all findings when within limit', () => { + const list = [finding('1', 'A'), finding('2', 'B')]; + const result = applyCommentLimit(list); + expect(result.toPublish).toEqual(list); + expect(result.overflowCount).toBe(0); + expect(result.overflowTitles).toEqual([]); + }); + + it('returns all findings when exactly at limit', () => { + const list = Array.from({ length: BUGBOT_MAX_COMMENTS }, (_, i) => + finding(`id-${i}`, `Title ${i}`) + ); + const result = applyCommentLimit(list); + expect(result.toPublish).toHaveLength(BUGBOT_MAX_COMMENTS); + expect(result.overflowCount).toBe(0); + expect(result.overflowTitles).toEqual([]); + }); + + it('splits when over limit: toPublish first N, overflow rest', () => { + const limit = 5; + const list = Array.from({ length: 8 }, (_, i) => finding(`id-${i}`, `Title ${i}`)); + const result = applyCommentLimit(list, limit); + expect(result.toPublish).toHaveLength(limit); + expect(result.toPublish.map((f) => f.id)).toEqual(['id-0', 'id-1', 'id-2', 'id-3', 'id-4']); + expect(result.overflowCount).toBe(3); + expect(result.overflowTitles).toEqual(['Title 5', 'Title 6', 'Title 7']); + }); + + it('uses custom maxComments when provided', () => { + const list = [finding('1', 'A'), finding('2', 'B'), finding('3', 'C')]; + const result = applyCommentLimit(list, 2); + expect(result.toPublish).toHaveLength(2); + expect(result.overflowCount).toBe(1); + expect(result.overflowTitles).toEqual(['C']); + }); + + it('overflowTitles uses id when title is missing or empty', () => { + const list = [ + finding('id-with-title', 'Has Title'), + { id: 'id-no-title', title: '', description: 'x' } as BugbotFinding, + ]; + const result = applyCommentLimit(list, 1); + expect(result.overflowTitles).toContain('id-no-title'); + }); + + it('trims title in overflowTitles', () => { + const list = [ + finding('1', ' Trimmed '), + ]; + const result = applyCommentLimit(list, 0); + expect(result.overflowTitles).toEqual(['Trimmed']); + }); +}); diff --git a/src/usecase/steps/commit/bugbot/__tests__/path_validation.test.ts b/src/usecase/steps/commit/bugbot/__tests__/path_validation.test.ts new file mode 100644 index 00000000..47ccc5fa --- /dev/null +++ b/src/usecase/steps/commit/bugbot/__tests__/path_validation.test.ts @@ -0,0 +1,100 @@ +import { + isSafeFindingFilePath, + isAllowedPathForPr, + resolveFindingPathForPr, +} from '../path_validation'; + +describe('path_validation', () => { + describe('isSafeFindingFilePath', () => { + it('returns false for undefined or null', () => { + expect(isSafeFindingFilePath(undefined)).toBe(false); + expect(isSafeFindingFilePath(null as unknown as string)).toBe(false); + }); + + it('returns false for empty or whitespace-only', () => { + expect(isSafeFindingFilePath('')).toBe(false); + expect(isSafeFindingFilePath(' ')).toBe(false); + }); + + it('returns false for path containing null byte', () => { + expect(isSafeFindingFilePath('src/foo\x00.ts')).toBe(false); + expect(isSafeFindingFilePath('\x00etc/passwd')).toBe(false); + }); + + it('returns false for path containing ..', () => { + expect(isSafeFindingFilePath('../foo.ts')).toBe(false); + expect(isSafeFindingFilePath('src/../etc/passwd')).toBe(false); + expect(isSafeFindingFilePath('..')).toBe(false); + }); + + it('returns false for absolute paths', () => { + expect(isSafeFindingFilePath('/etc/passwd')).toBe(false); + expect(isSafeFindingFilePath('/src/foo.ts')).toBe(false); + expect(isSafeFindingFilePath('C:\\Users\\file.ts')).toBe(false); + expect(isSafeFindingFilePath('\\Windows\\path')).toBe(false); + }); + + it('returns true for relative paths without ..', () => { + expect(isSafeFindingFilePath('src/foo.ts')).toBe(true); + expect(isSafeFindingFilePath('lib/utils/helper.ts')).toBe(true); + expect(isSafeFindingFilePath('file.ts')).toBe(true); + expect(isSafeFindingFilePath(' src/bar.ts ')).toBe(true); + }); + }); + + describe('isAllowedPathForPr', () => { + const prFiles = [ + { filename: 'src/foo.ts', status: 'modified' }, + { filename: 'lib/bar.ts', status: 'added' }, + ]; + + it('returns false when path is unsafe', () => { + expect(isAllowedPathForPr('../../../etc/passwd', prFiles)).toBe(false); + expect(isAllowedPathForPr('/absolute', prFiles)).toBe(false); + }); + + it('returns false when path is not in prFiles', () => { + expect(isAllowedPathForPr('other/file.ts', prFiles)).toBe(false); + }); + + it('returns true when path is safe and in prFiles', () => { + expect(isAllowedPathForPr('src/foo.ts', prFiles)).toBe(true); + expect(isAllowedPathForPr(' src/foo.ts ', prFiles)).toBe(true); + expect(isAllowedPathForPr('lib/bar.ts', prFiles)).toBe(true); + }); + + it('returns false when prFiles is empty', () => { + expect(isAllowedPathForPr('src/foo.ts', [])).toBe(false); + }); + }); + + describe('resolveFindingPathForPr', () => { + const prFiles = [ + { filename: 'src/foo.ts', status: 'modified' }, + { filename: 'lib/bar.ts', status: 'added' }, + ]; + + it('returns finding file when valid and in prFiles', () => { + expect(resolveFindingPathForPr('src/foo.ts', prFiles)).toBe('src/foo.ts'); + expect(resolveFindingPathForPr('lib/bar.ts', prFiles)).toBe('lib/bar.ts'); + }); + + it('returns undefined when finding file is invalid (no fallback to wrong file)', () => { + expect(resolveFindingPathForPr('../../../etc/passwd', prFiles)).toBeUndefined(); + expect(resolveFindingPathForPr('/etc/passwd', prFiles)).toBeUndefined(); + }); + + it('returns undefined when finding file is not in prFiles', () => { + expect(resolveFindingPathForPr('other/file.ts', prFiles)).toBeUndefined(); + }); + + it('returns undefined when finding file is undefined', () => { + expect(resolveFindingPathForPr(undefined, prFiles)).toBeUndefined(); + }); + + it('returns undefined when prFiles is empty', () => { + expect(resolveFindingPathForPr('src/foo.ts', [])).toBeUndefined(); + expect(resolveFindingPathForPr(undefined, [])).toBeUndefined(); + }); + }); +}); diff --git a/src/usecase/steps/commit/bugbot/__tests__/severity.test.ts b/src/usecase/steps/commit/bugbot/__tests__/severity.test.ts new file mode 100644 index 00000000..9936ab31 --- /dev/null +++ b/src/usecase/steps/commit/bugbot/__tests__/severity.test.ts @@ -0,0 +1,91 @@ +/** + * Unit tests for bugbot severity helpers: normalizeMinSeverity, severityLevel, meetsMinSeverity. + */ + +import { normalizeMinSeverity, severityLevel, meetsMinSeverity } from '../severity'; + +describe('normalizeMinSeverity', () => { + it('returns "low" when value is undefined', () => { + expect(normalizeMinSeverity(undefined)).toBe('low'); + }); + + it('returns "low" when value is empty string', () => { + expect(normalizeMinSeverity('')).toBe('low'); + }); + + it('returns the level when value is valid (case-insensitive)', () => { + expect(normalizeMinSeverity('info')).toBe('info'); + expect(normalizeMinSeverity('INFO')).toBe('info'); + expect(normalizeMinSeverity('low')).toBe('low'); + expect(normalizeMinSeverity('Low')).toBe('low'); + expect(normalizeMinSeverity('medium')).toBe('medium'); + expect(normalizeMinSeverity('MEDIUM')).toBe('medium'); + expect(normalizeMinSeverity('high')).toBe('high'); + expect(normalizeMinSeverity('High')).toBe('high'); + }); + + it('trims whitespace', () => { + expect(normalizeMinSeverity(' medium ')).toBe('medium'); + }); + + it('returns "low" when value is invalid', () => { + expect(normalizeMinSeverity('critical')).toBe('low'); + expect(normalizeMinSeverity('unknown')).toBe('low'); + expect(normalizeMinSeverity('1')).toBe('low'); + }); +}); + +describe('severityLevel', () => { + it('returns numeric order for valid severities', () => { + expect(severityLevel('info')).toBe(0); + expect(severityLevel('low')).toBe(1); + expect(severityLevel('medium')).toBe(2); + expect(severityLevel('high')).toBe(3); + }); + + it('returns low (1) when severity is undefined or empty', () => { + expect(severityLevel(undefined)).toBe(1); + expect(severityLevel('')).toBe(1); + }); + + it('returns low for unknown severity', () => { + expect(severityLevel('critical')).toBe(1); + }); + + it('is case-insensitive', () => { + expect(severityLevel('HIGH')).toBe(3); + expect(severityLevel(' Medium ')).toBe(2); + }); +}); + +describe('meetsMinSeverity', () => { + it('returns true when finding severity equals min', () => { + expect(meetsMinSeverity('low', 'low')).toBe(true); + expect(meetsMinSeverity('medium', 'medium')).toBe(true); + expect(meetsMinSeverity('high', 'high')).toBe(true); + }); + + it('returns true when finding severity is above min', () => { + expect(meetsMinSeverity('high', 'low')).toBe(true); + expect(meetsMinSeverity('high', 'medium')).toBe(true); + expect(meetsMinSeverity('medium', 'low')).toBe(true); + expect(meetsMinSeverity('medium', 'info')).toBe(true); + }); + + it('returns false when finding severity is below min', () => { + expect(meetsMinSeverity('info', 'low')).toBe(false); + expect(meetsMinSeverity('low', 'medium')).toBe(false); + expect(meetsMinSeverity('medium', 'high')).toBe(false); + }); + + it('treats undefined finding severity as low', () => { + expect(meetsMinSeverity(undefined, 'info')).toBe(true); // low >= info + expect(meetsMinSeverity(undefined, 'low')).toBe(true); + expect(meetsMinSeverity(undefined, 'medium')).toBe(false); + }); + + it('handles case-insensitive finding severity', () => { + expect(meetsMinSeverity('HIGH', 'medium')).toBe(true); + expect(meetsMinSeverity('Low', 'medium')).toBe(false); + }); +}); diff --git a/src/usecase/steps/commit/bugbot/build_bugbot_prompt.ts b/src/usecase/steps/commit/bugbot/build_bugbot_prompt.ts new file mode 100644 index 00000000..46e67576 --- /dev/null +++ b/src/usecase/steps/commit/bugbot/build_bugbot_prompt.ts @@ -0,0 +1,28 @@ +import type { Execution } from "../../../../data/model/execution"; +import type { BugbotContext } from "./types"; + +export function buildBugbotPrompt(param: Execution, context: BugbotContext): string { + const headBranch = param.commit.branch; + const baseBranch = param.currentConfiguration.parentBranch ?? param.branches.development ?? 'develop'; + const previousBlock = context.previousFindingsBlock; + const ignorePatterns = param.ai?.getAiIgnoreFiles?.() ?? []; + const ignoreBlock = + ignorePatterns.length > 0 + ? `\n**Files to ignore:** Do not report findings in files or paths matching these patterns: ${ignorePatterns.join(', ')}.` + : ''; + + return `You are analyzing the latest code changes for potential bugs and issues. + +**Repository context:** +- Owner: ${param.owner} +- Repository: ${param.repo} +- Branch (head): ${headBranch} +- Base branch: ${baseBranch} +- Issue number: ${param.issueNumber} +${ignoreBlock} + +**Your task 1 (new/current problems):** Determine what has changed in the branch "${headBranch}" compared to "${baseBranch}" (you must compute or obtain the diff yourself using the repository context above). Then identify potential bugs, logic errors, security issues, and code quality problems. Be strict and descriptive. One finding per distinct problem. Return them in the \`findings\` array (each with id, title, description; optionally file, line, severity, suggestion). Only include findings in files that are not in the ignore list above. +${previousBlock} + +**Output:** Return a JSON object with: "findings" (array of new/current problems from task 1), and if we gave you previously reported issues above, "resolved_finding_ids" (array of those ids that are now fixed or no longer apply, as per task 2).`; +} diff --git a/src/usecase/steps/commit/bugbot/deduplicate_findings.ts b/src/usecase/steps/commit/bugbot/deduplicate_findings.ts new file mode 100644 index 00000000..9a5b62af --- /dev/null +++ b/src/usecase/steps/commit/bugbot/deduplicate_findings.ts @@ -0,0 +1,25 @@ +import type { BugbotFinding } from "./types"; + +/** + * Deduplicates findings by (file, line). When two findings share the same file and line, + * keeps the first; when they have no file, groups by normalized title and keeps the first. + * This reduces noise when the agent returns near-duplicate issues. + */ +export function deduplicateFindings(findings: BugbotFinding[]): BugbotFinding[] { + const seen = new Set(); + const result: BugbotFinding[] = []; + + for (const f of findings) { + const file = f.file?.trim() ?? ''; + const line = f.line ?? 0; + const key = + file || line + ? `${file}:${line}` + : `title:${(f.title ?? '').toLowerCase().trim().slice(0, 80)}`; + if (seen.has(key)) continue; + seen.add(key); + result.push(f); + } + + return result; +} diff --git a/src/usecase/steps/commit/bugbot/file_ignore.ts b/src/usecase/steps/commit/bugbot/file_ignore.ts new file mode 100644 index 00000000..bf806a74 --- /dev/null +++ b/src/usecase/steps/commit/bugbot/file_ignore.ts @@ -0,0 +1,22 @@ +/** + * Returns true if the file path matches any of the ignore patterns (glob-style). + * Used to exclude findings in test files, build output, etc. + */ +export function fileMatchesIgnorePatterns(filePath: string | undefined, ignorePatterns: string[]): boolean { + if (!filePath || ignorePatterns.length === 0) return false; + const normalized = filePath.trim(); + if (!normalized) return false; + + return ignorePatterns.some((pattern) => { + const p = pattern.trim(); + if (!p) return false; + const regexPattern = p + .replace(/[.+?^${}()|[\]\\]/g, '\\$&') + .replace(/\*/g, '.*') + .replace(/\//g, '\\/'); + const regex = p.endsWith('/*') + ? new RegExp(`^${regexPattern.replace(/\\\/\.\*$/, '(\\/.*)?')}$`) + : new RegExp(`^${regexPattern}$`); + return regex.test(normalized); + }); +} diff --git a/src/usecase/steps/commit/bugbot/limit_comments.ts b/src/usecase/steps/commit/bugbot/limit_comments.ts new file mode 100644 index 00000000..10f1e893 --- /dev/null +++ b/src/usecase/steps/commit/bugbot/limit_comments.ts @@ -0,0 +1,31 @@ +import { BUGBOT_MAX_COMMENTS } from "../../../../utils/constants"; +import type { BugbotFinding } from "./types"; + +export interface ApplyLimitResult { + /** Findings to publish as individual comments (up to maxComments). */ + toPublish: BugbotFinding[]; + /** Number of findings not published as individual comments. */ + overflowCount: number; + /** Titles of overflow findings (for the summary comment). */ + overflowTitles: string[]; +} + +/** + * Applies the max-comments limit: returns the first N findings to publish individually, + * and overflow count + titles for a single "revisar en local" summary comment. + */ +export function applyCommentLimit( + findings: BugbotFinding[], + maxComments: number = BUGBOT_MAX_COMMENTS +): ApplyLimitResult { + if (findings.length <= maxComments) { + return { toPublish: findings, overflowCount: 0, overflowTitles: [] }; + } + const toPublish = findings.slice(0, maxComments); + const overflow = findings.slice(maxComments); + return { + toPublish, + overflowCount: overflow.length, + overflowTitles: overflow.map((f) => f.title?.trim() || f.id).filter(Boolean), + }; +} diff --git a/src/usecase/steps/commit/bugbot/load_bugbot_context_use_case.ts b/src/usecase/steps/commit/bugbot/load_bugbot_context_use_case.ts new file mode 100644 index 00000000..fd9ab7e5 --- /dev/null +++ b/src/usecase/steps/commit/bugbot/load_bugbot_context_use_case.ts @@ -0,0 +1,134 @@ +import type { Execution } from "../../../../data/model/execution"; +import { IssueRepository } from "../../../../data/repository/issue_repository"; +import { PullRequestRepository } from "../../../../data/repository/pull_request_repository"; +import type { BugbotContext, ExistingByFindingId } from "./types"; +import { parseMarker } from "./marker"; + +function buildPreviousFindingsBlock(previousFindings: Array<{ id: string; fullBody: string }>): string { + if (previousFindings.length === 0) return ''; + const items = previousFindings + .map( + (p) => + `---\n**Finding id (use this exact id in resolved_finding_ids if resolved/no longer applies):** \`${p.id.replace(/`/g, '\\`')}\`\n\n**Full comment as posted (including metadata at the end):**\n${p.fullBody}\n` + ) + .join('\n'); + return ` +**Previously reported issues (not yet marked resolved).** For each one we show the exact comment we posted (title, description, location, suggestion, and a hidden marker with the finding id at the end). + +${items} +**Your task 2:** For each finding above, analyze the current code and decide: +- If the problem **still exists** (same code or same issue present): do **not** include its id in \`resolved_finding_ids\`. +- If the problem **no longer applies** (e.g. that code was removed or refactored away): include its id in \`resolved_finding_ids\`. +- If the problem **has been fixed** (code was changed and the issue is resolved): include its id in \`resolved_finding_ids\`. + +Return in \`resolved_finding_ids\` only the ids from the list above that are now fixed or no longer apply. Use the exact id shown in each "Finding id" line.`; +} + +/** + * Loads all context needed for bugbot: existing findings from issue + PR comments, + * open PR numbers, and the prompt block for previously reported issues. + * Also loads PR context (head sha, files, diff lines) for the first open PR. + */ +export async function loadBugbotContext(param: Execution): Promise { + const issueNumber = param.issueNumber; + const headBranch = param.commit.branch; + const token = param.tokens.token; + const owner = param.owner; + const repo = param.repo; + + const issueRepository = new IssueRepository(); + const pullRequestRepository = new PullRequestRepository(); + + const issueComments = await issueRepository.listIssueComments(owner, repo, issueNumber, token); + const existingByFindingId: ExistingByFindingId = {}; + for (const c of issueComments) { + for (const { findingId, resolved } of parseMarker(c.body)) { + if (!existingByFindingId[findingId]) { + existingByFindingId[findingId] = { issueCommentId: c.id, resolved }; + } else { + existingByFindingId[findingId].issueCommentId = c.id; + existingByFindingId[findingId].resolved = resolved; + } + } + } + + const openPrNumbers = await pullRequestRepository.getOpenPullRequestNumbersByHeadBranch( + owner, + repo, + headBranch, + token + ); + + /** Full comment body per finding id (from PR when we don't have issue comment). */ + const prFindingIdToBody: Record = {}; + for (const prNumber of openPrNumbers) { + const prComments = await pullRequestRepository.listPullRequestReviewComments( + owner, + repo, + prNumber, + token + ); + for (const c of prComments) { + const body = c.body ?? ''; + for (const { findingId, resolved } of parseMarker(body)) { + if (!existingByFindingId[findingId]) { + existingByFindingId[findingId] = { resolved }; + } + existingByFindingId[findingId].prCommentId = c.id; + existingByFindingId[findingId].prNumber = prNumber; + existingByFindingId[findingId].resolved = resolved; + prFindingIdToBody[findingId] = body; + } + } + } + + /** Unresolved findings with full comment body (including hidden marker) for OpenCode to re-evaluate. */ + const previousFindingsForPrompt: Array<{ id: string; fullBody: string }> = []; + for (const [findingId, data] of Object.entries(existingByFindingId)) { + if (data.resolved) continue; + const issueBody = issueComments.find((c) => c.id === data.issueCommentId)?.body ?? null; + const fullBody = (issueBody ?? prFindingIdToBody[findingId] ?? '').trim(); + if (fullBody) { + previousFindingsForPrompt.push({ id: findingId, fullBody }); + } + } + + const previousFindingsBlock = buildPreviousFindingsBlock(previousFindingsForPrompt); + + let prContext: BugbotContext['prContext'] = null; + if (openPrNumbers.length > 0) { + const prHeadSha = await pullRequestRepository.getPullRequestHeadSha( + owner, + repo, + openPrNumbers[0], + token + ); + if (prHeadSha) { + const prFiles = await pullRequestRepository.getChangedFiles( + owner, + repo, + openPrNumbers[0], + token + ); + const filesWithLines = await pullRequestRepository.getFilesWithFirstDiffLine( + owner, + repo, + openPrNumbers[0], + token + ); + const pathToFirstDiffLine: Record = {}; + for (const { path, firstLine } of filesWithLines) { + pathToFirstDiffLine[path] = firstLine; + } + prContext = { prHeadSha, prFiles, pathToFirstDiffLine }; + } + } + + return { + existingByFindingId, + issueComments, + openPrNumbers, + previousFindingsBlock, + prContext, + }; +} diff --git a/src/usecase/steps/commit/bugbot/mark_findings_resolved_use_case.ts b/src/usecase/steps/commit/bugbot/mark_findings_resolved_use_case.ts new file mode 100644 index 00000000..e32622bd --- /dev/null +++ b/src/usecase/steps/commit/bugbot/mark_findings_resolved_use_case.ts @@ -0,0 +1,123 @@ +import type { Execution } from "../../../../data/model/execution"; +import { IssueRepository } from "../../../../data/repository/issue_repository"; +import { PullRequestRepository } from "../../../../data/repository/pull_request_repository"; +import { logDebugInfo, logError } from "../../../../utils/logger"; +import type { BugbotContext } from "./types"; +import { buildMarker, replaceMarkerInBody, sanitizeFindingIdForMarker } from "./marker"; + +export interface MarkFindingsResolvedParam { + execution: Execution; + context: BugbotContext; + resolvedFindingIds: Set; + normalizedResolvedIds: Set; +} + +/** + * Marks as resolved the findings that OpenCode reported as fixed. + * Updates issue comments (with visible "Resolved" note) and PR review comments (marker only + resolve thread). + */ +export async function markFindingsResolved(param: MarkFindingsResolvedParam): Promise { + const { execution, context, resolvedFindingIds, normalizedResolvedIds } = param; + const { existingByFindingId, issueComments } = context; + const issueNumber = execution.issueNumber; + const token = execution.tokens.token; + const owner = execution.owner; + const repo = execution.repo; + + const issueRepository = new IssueRepository(); + const pullRequestRepository = new PullRequestRepository(); + + for (const [findingId, existing] of Object.entries(existingByFindingId)) { + const isResolvedByOpenCode = + resolvedFindingIds.has(findingId) || + normalizedResolvedIds.has(sanitizeFindingIdForMarker(findingId)); + if (existing.resolved || !isResolvedByOpenCode) continue; + + const resolvedNote = '\n\n---\n**Resolved** (OpenCode confirmed fixed in latest analysis).\n'; + const markerTrue = buildMarker(findingId, true); + const replacementWithNote = resolvedNote + markerTrue; + + if (existing.issueCommentId != null) { + const comment = issueComments.find((c) => c.id === existing.issueCommentId); + if (comment == null) { + logError( + `[Bugbot] No se encontró el comentario de la issue para marcar como resuelto. findingId="${findingId}", issueCommentId=${existing.issueCommentId}, issueNumber=${issueNumber}, owner=${owner}, repo=${repo}.` + ); + } else { + const resolvedBody = comment.body ?? ''; + const { updated, replaced } = replaceMarkerInBody( + resolvedBody, + findingId, + true, + replacementWithNote + ); + if (replaced) { + try { + await issueRepository.updateComment( + owner, + repo, + issueNumber, + existing.issueCommentId, + updated.trimEnd(), + token + ); + logDebugInfo(`Marked finding "${findingId}" as resolved on issue #${issueNumber} (comment ${existing.issueCommentId}).`); + } catch (err) { + logError( + `[Bugbot] Error al actualizar comentario de la issue (marcar como resuelto). findingId="${findingId}", issueCommentId=${existing.issueCommentId}, issueNumber=${issueNumber}: ${err}` + ); + } + } + } + } + if (existing.prCommentId != null && existing.prNumber != null) { + const prCommentsList = await pullRequestRepository.listPullRequestReviewComments( + owner, + repo, + existing.prNumber, + token + ); + const prComment = prCommentsList.find((c) => c.id === existing.prCommentId); + if (prComment == null) { + logError( + `[Bugbot] No se encontró el comentario de la PR para marcar como resuelto. findingId="${findingId}", prCommentId=${existing.prCommentId}, prNumber=${existing.prNumber}, owner=${owner}, repo=${repo}.` + ); + } else { + const prBody = prComment.body ?? ''; + const { updated, replaced } = replaceMarkerInBody( + prBody, + findingId, + true, + markerTrue + ); + if (replaced) { + try { + await pullRequestRepository.updatePullRequestReviewComment( + owner, + repo, + existing.prCommentId, + updated.trimEnd(), + token + ); + logDebugInfo( + `Marked finding "${findingId}" as resolved on PR #${existing.prNumber} (review comment ${existing.prCommentId}).` + ); + if (prComment.node_id) { + await pullRequestRepository.resolvePullRequestReviewThread( + owner, + repo, + existing.prNumber, + prComment.node_id, + token + ); + } + } catch (err) { + logError( + `[Bugbot] Error al actualizar comentario de revisión de la PR (marcar como resuelto). findingId="${findingId}", prCommentId=${existing.prCommentId}, prNumber=${existing.prNumber}: ${err}` + ); + } + } + } + } + } +} diff --git a/src/usecase/steps/commit/bugbot/marker.ts b/src/usecase/steps/commit/bugbot/marker.ts new file mode 100644 index 00000000..fa28170a --- /dev/null +++ b/src/usecase/steps/commit/bugbot/marker.ts @@ -0,0 +1,90 @@ +import { BUGBOT_MARKER_PREFIX } from "../../../../utils/constants"; +import { logError } from "../../../../utils/logger"; +import type { BugbotFinding } from "./types"; + +/** Sanitize finding ID so it cannot break HTML comment syntax (e.g. -->, , newlines, quotes). */ +export function sanitizeFindingIdForMarker(findingId: string): string { + return findingId + .replace(/-->/g, '') + .replace(//g, '') + .replace(/"/g, '') + .replace(/\r\n|\r|\n/g, '') + .trim(); +} + +export function buildMarker(findingId: string, resolved: boolean): string { + const safeId = sanitizeFindingIdForMarker(findingId); + return ``; +} + +export function parseMarker(body: string | null): Array<{ findingId: string; resolved: boolean }> { + if (!body) return []; + const results: Array<{ findingId: string; resolved: boolean }> = []; + const regex = new RegExp( + ``, + 'g' + ); + let m: RegExpExecArray | null; + while ((m = regex.exec(body)) !== null) { + results.push({ findingId: m[1], resolved: m[2] === 'true' }); + } + return results; +} + +/** Regex to match the marker for a specific finding (same flexible format as parseMarker). */ +export function markerRegexForFinding(findingId: string): RegExp { + const safeId = sanitizeFindingIdForMarker(findingId); + const escapedId = safeId.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); + return new RegExp( + ``, + 'g' + ); +} + +/** + * Find the marker for this finding in body (using same pattern as parseMarker) and replace it. + * Returns the updated body and whether a replacement was made. Logs an error with details if no replacement occurred. + */ +export function replaceMarkerInBody( + body: string, + findingId: string, + newResolved: boolean, + replacement?: string +): { updated: string; replaced: boolean } { + const regex = markerRegexForFinding(findingId); + const newMarker = replacement ?? buildMarker(findingId, newResolved); + const updated = body.replace(regex, newMarker); + const replaced = updated !== body; + if (!replaced) { + logError( + `[Bugbot] No se pudo marcar como resuelto: no se encontró el marcador en el comentario. findingId="${findingId}", bodyLength=${body?.length ?? 0}, bodySnippet=${(body ?? '').slice(0, 200)}...` + ); + } + return { updated, replaced }; +} + +/** Extract title from comment body (first ## line) for context when sending to OpenCode. */ +export function extractTitleFromBody(body: string | null): string { + if (!body) return ''; + const match = body.match(/^##\s+(.+)$/m); + return (match?.[1] ?? '').trim(); +} + +export function buildCommentBody(finding: BugbotFinding, resolved: boolean): string { + const severity = finding.severity ? `**Severity:** ${finding.severity}\n\n` : ''; + const fileLine = + finding.file != null + ? `**Location:** \`${finding.file}${finding.line != null ? `:${finding.line}` : ''}\`\n\n` + : ''; + const suggestion = finding.suggestion + ? `**Suggested fix:**\n${finding.suggestion}\n\n` + : ''; + const resolvedNote = resolved ? '\n\n---\n**Resolved** (no longer reported in latest analysis).\n' : ''; + const marker = buildMarker(finding.id, resolved); + return `## ${finding.title} + +${severity}${fileLine}${finding.description} +${suggestion}${resolvedNote}${marker}`; +} diff --git a/src/usecase/steps/commit/bugbot/path_validation.ts b/src/usecase/steps/commit/bugbot/path_validation.ts new file mode 100644 index 00000000..368de89b --- /dev/null +++ b/src/usecase/steps/commit/bugbot/path_validation.ts @@ -0,0 +1,53 @@ +/** + * Path validation for AI-returned finding.file to prevent path traversal and misuse. + * Rejects paths containing '..', null bytes, or absolute paths. + */ + +const NULL_BYTE = '\0'; +const PARENT_SEGMENT = '..'; +const SLASH = '/'; +const BACKSLASH = '\\'; + +/** + * Returns true if the path is safe to use: no '..', no null bytes, not absolute. + * Does not check against a list of allowed files; use isAllowedPathForPr for that. + */ +export function isSafeFindingFilePath(path: string | undefined): boolean { + if (path == null || typeof path !== 'string') return false; + const trimmed = path.trim(); + if (trimmed.length === 0) return false; + if (trimmed.includes(NULL_BYTE)) return false; + if (trimmed.includes(PARENT_SEGMENT)) return false; + if (trimmed.startsWith(SLASH)) return false; + if (/^[a-zA-Z]:[/\\]/.test(trimmed)) return false; + if (trimmed.startsWith(BACKSLASH)) return false; + return true; +} + +/** + * Returns true if path is safe (isSafeFindingFilePath) and is in the list of PR changed files. + * Used to validate finding.file before using it for PR review comments. + */ +export function isAllowedPathForPr( + path: string | undefined, + prFiles: Array<{ filename: string }> +): boolean { + if (!isSafeFindingFilePath(path)) return false; + if (prFiles.length === 0) return false; + const normalized = path!.trim(); + return prFiles.some((f) => f.filename === normalized); +} + +/** + * Resolves the file path to use for a PR review comment: finding.file if valid and in prFiles. + * Returns undefined when the finding's file is not in the PR so we do not attach the comment + * to the wrong file (e.g. the first file in the list). + */ +export function resolveFindingPathForPr( + findingFile: string | undefined, + prFiles: Array<{ filename: string; status: string }> +): string | undefined { + if (prFiles.length === 0) return undefined; + if (isAllowedPathForPr(findingFile, prFiles)) return findingFile!.trim(); + return undefined; +} diff --git a/src/usecase/steps/commit/bugbot/publish_findings_use_case.ts b/src/usecase/steps/commit/bugbot/publish_findings_use_case.ts new file mode 100644 index 00000000..2421d798 --- /dev/null +++ b/src/usecase/steps/commit/bugbot/publish_findings_use_case.ts @@ -0,0 +1,98 @@ +import type { Execution } from "../../../../data/model/execution"; +import { IssueRepository } from "../../../../data/repository/issue_repository"; +import { PullRequestRepository } from "../../../../data/repository/pull_request_repository"; +import { logDebugInfo } from "../../../../utils/logger"; +import type { BugbotContext } from "./types"; +import type { BugbotFinding } from "./types"; +import { buildCommentBody } from "./marker"; +import { resolveFindingPathForPr } from "./path_validation"; + +export interface PublishFindingsParam { + execution: Execution; + context: BugbotContext; + findings: BugbotFinding[]; + /** When findings were limited by max comments, add one summary comment with this overflow info. */ + overflowCount?: number; + overflowTitles?: string[]; +} + +/** + * Publishes current findings to issue and PR: creates or updates issue comments, + * creates or updates PR review comments (or creates new ones). + */ +export async function publishFindings(param: PublishFindingsParam): Promise { + const { execution, context, findings, overflowCount = 0, overflowTitles = [] } = param; + const { existingByFindingId, openPrNumbers, prContext } = context; + const issueNumber = execution.issueNumber; + const token = execution.tokens.token; + const owner = execution.owner; + const repo = execution.repo; + + const issueRepository = new IssueRepository(); + const pullRequestRepository = new PullRequestRepository(); + + const prFiles = prContext?.prFiles ?? []; + const pathToFirstDiffLine = prContext?.pathToFirstDiffLine ?? {}; + const prCommentsToCreate: Array<{ path: string; line: number; body: string }> = []; + + for (const finding of findings) { + const existing = existingByFindingId[finding.id]; + const commentBody = buildCommentBody(finding, false); + + if (existing?.issueCommentId != null) { + await issueRepository.updateComment( + owner, + repo, + issueNumber, + existing.issueCommentId, + commentBody, + token + ); + logDebugInfo(`Updated bugbot comment for finding ${finding.id} on issue.`); + } else { + await issueRepository.addComment(owner, repo, issueNumber, commentBody, token); + logDebugInfo(`Added bugbot comment for finding ${finding.id} on issue.`); + } + + if (prContext && openPrNumbers.length > 0) { + const path = resolveFindingPathForPr(finding.file, prFiles); + if (path) { + const line = finding.line ?? pathToFirstDiffLine[path] ?? 1; + if (existing?.prCommentId != null && existing.prNumber === openPrNumbers[0]) { + await pullRequestRepository.updatePullRequestReviewComment( + owner, + repo, + existing.prCommentId, + commentBody, + token + ); + } else { + prCommentsToCreate.push({ path, line, body: commentBody }); + } + } + } + } + + if (prCommentsToCreate.length > 0 && prContext && openPrNumbers.length > 0) { + await pullRequestRepository.createReviewWithComments( + owner, + repo, + openPrNumbers[0], + prContext.prHeadSha, + prCommentsToCreate, + token + ); + } + + if (overflowCount > 0) { + const titlesList = + overflowTitles.length > 0 + ? '\n- ' + overflowTitles.slice(0, 15).join('\n- ') + (overflowTitles.length > 15 ? `\n- ... and ${overflowTitles.length - 15} more` : '') + : ''; + const overflowBody = `## More findings (comment limit) + +There are **${overflowCount}** more finding(s) that were not published as individual comments. Review locally or in the full diff to see the list.${titlesList}`; + await issueRepository.addComment(owner, repo, issueNumber, overflowBody, token); + logDebugInfo(`Added overflow comment: ${overflowCount} additional finding(s) not published individually.`); + } +} diff --git a/src/usecase/steps/commit/bugbot/schema.ts b/src/usecase/steps/commit/bugbot/schema.ts new file mode 100644 index 00000000..94c5e90a --- /dev/null +++ b/src/usecase/steps/commit/bugbot/schema.ts @@ -0,0 +1,31 @@ +/** OpenCode response schema: agent computes diff, returns new findings and which previous ones are resolved. */ +export const BUGBOT_RESPONSE_SCHEMA = { + type: 'object', + properties: { + findings: { + type: 'array', + items: { + type: 'object', + properties: { + id: { type: 'string', description: 'Stable unique id for this finding (e.g. file:line:summary)' }, + title: { type: 'string', description: 'Short title of the problem' }, + description: { type: 'string', description: 'Clear explanation of the issue' }, + file: { type: 'string', description: 'Repository-relative path when applicable' }, + line: { type: 'number', description: 'Line number when applicable' }, + severity: { type: 'string', description: 'Severity: high, medium, low, or info. Findings below the configured minimum are not published.' }, + suggestion: { type: 'string', description: 'Suggested fix when applicable' }, + }, + required: ['id', 'title', 'description'], + additionalProperties: true, + }, + }, + resolved_finding_ids: { + type: 'array', + items: { type: 'string' }, + description: + 'Ids of previously reported issues (from the list we sent) that are now fixed in the current code. Only include ids we asked you to check.', + }, + }, + required: ['findings'], + additionalProperties: false, +} as const; diff --git a/src/usecase/steps/commit/bugbot/severity.ts b/src/usecase/steps/commit/bugbot/severity.ts new file mode 100644 index 00000000..0dbd9f2f --- /dev/null +++ b/src/usecase/steps/commit/bugbot/severity.ts @@ -0,0 +1,31 @@ +export type SeverityLevel = 'info' | 'low' | 'medium' | 'high'; + +const VALID_SEVERITIES: SeverityLevel[] = ['info', 'low', 'medium', 'high']; + +/** Normalizes user input to a valid SeverityLevel; defaults to 'low' if invalid. */ +export function normalizeMinSeverity(value: string | undefined): SeverityLevel { + if (!value) return 'low'; + const normalized = value.toLowerCase().trim() as SeverityLevel; + return VALID_SEVERITIES.includes(normalized) ? normalized : 'low'; +} + +const SEVERITY_ORDER: Record = { + info: 0, + low: 1, + medium: 2, + high: 3, +}; + +export function severityLevel(severity: string | undefined): number { + if (!severity) return SEVERITY_ORDER.low; + const normalized = severity.toLowerCase().trim() as SeverityLevel; + return SEVERITY_ORDER[normalized] ?? SEVERITY_ORDER.low; +} + +/** Returns true if the finding's severity is at or above the minimum threshold. */ +export function meetsMinSeverity( + findingSeverity: string | undefined, + minSeverity: SeverityLevel +): boolean { + return severityLevel(findingSeverity) >= SEVERITY_ORDER[minSeverity]; +} diff --git a/src/usecase/steps/commit/bugbot/types.ts b/src/usecase/steps/commit/bugbot/types.ts new file mode 100644 index 00000000..44a5cd60 --- /dev/null +++ b/src/usecase/steps/commit/bugbot/types.ts @@ -0,0 +1,33 @@ +/** Single finding from OpenCode (agent computes changes and returns these). */ +export interface BugbotFinding { + id: string; + title: string; + description: string; + file?: string; + line?: number; + severity?: string; + suggestion?: string; +} + +export interface ExistingFindingInfo { + issueCommentId?: number; + prCommentId?: number; + prNumber?: number; + resolved: boolean; +} + +export type ExistingByFindingId = Record; + +export interface BugbotPrContext { + prHeadSha: string; + prFiles: Array<{ filename: string; status: string }>; + pathToFirstDiffLine: Record; +} + +export interface BugbotContext { + existingByFindingId: ExistingByFindingId; + issueComments: Array<{ id: number; body: string | null }>; + openPrNumbers: number[]; + previousFindingsBlock: string; + prContext: BugbotPrContext | null; +} diff --git a/src/usecase/steps/commit/check_changes_issue_size_use_case.ts b/src/usecase/steps/commit/check_changes_issue_size_use_case.ts index 44fc94f7..f96062d1 100644 --- a/src/usecase/steps/commit/check_changes_issue_size_use_case.ts +++ b/src/usecase/steps/commit/check_changes_issue_size_use_case.ts @@ -20,13 +20,16 @@ export class CheckChangesIssueSizeUseCase implements ParamUseCase { + taskId: string = 'DetectPotentialProblemsUseCase'; + + private aiRepository = new AiRepository(); + + async invoke(param: Execution): Promise { + logInfo(`Executing ${this.taskId}.`); + + const results: Result[] = []; + try { + if (!param.ai?.getOpencodeModel() || !param.ai?.getOpencodeServerUrl()) { + logDebugInfo('OpenCode not configured; skipping potential problems detection.'); + return results; + } + + if (param.issueNumber === -1) { + logDebugInfo('No issue number for this branch; skipping.'); + return results; + } + + const context = await loadBugbotContext(param); + const prompt = buildBugbotPrompt(param, context); + logInfo('Detecting potential problems via OpenCode (agent computes changes and checks resolved)...'); + const response = await this.aiRepository.askAgent(param.ai, OPENCODE_AGENT_PLAN, prompt, { + expectJson: true, + schema: BUGBOT_RESPONSE_SCHEMA as unknown as Record, + schemaName: 'bugbot_findings', + }); + + if (response == null || typeof response !== 'object') { + logDebugInfo('No response from OpenCode.'); + return results; + } + + const payload = response as { findings?: BugbotFinding[]; resolved_finding_ids?: string[] }; + let findings = Array.isArray(payload.findings) ? payload.findings : []; + const resolvedFindingIdsRaw = Array.isArray(payload.resolved_finding_ids) ? payload.resolved_finding_ids : []; + const resolvedFindingIds = new Set(resolvedFindingIdsRaw); + const normalizedResolvedIds = new Set(resolvedFindingIdsRaw.map(sanitizeFindingIdForMarker)); + + const ignorePatterns = param.ai?.getAiIgnoreFiles?.() ?? []; + const minSeverity = normalizeMinSeverity(param.ai?.getBugbotMinSeverity?.()); + findings = findings.filter( + (f) => f.file == null || String(f.file).trim() === '' || isSafeFindingFilePath(f.file) + ); + findings = findings.filter((f) => !fileMatchesIgnorePatterns(f.file, ignorePatterns)); + findings = findings.filter((f) => meetsMinSeverity(f.severity, minSeverity)); + findings = deduplicateFindings(findings); + + const maxComments = param.ai?.getBugbotCommentLimit?.() ?? BUGBOT_MAX_COMMENTS; + const { toPublish, overflowCount, overflowTitles } = applyCommentLimit(findings, maxComments); + + if (toPublish.length === 0 && resolvedFindingIds.size === 0) { + logDebugInfo('OpenCode returned no new findings (after filters) and no resolved ids.'); + results.push( + new Result({ + id: this.taskId, + success: true, + executed: true, + steps: ['Potential problems detection completed (no new findings, no resolved).'], + }) + ); + return results; + } + + await markFindingsResolved({ + execution: param, + context, + resolvedFindingIds, + normalizedResolvedIds, + }); + + await publishFindings({ + execution: param, + context, + findings: toPublish, + overflowCount: overflowCount > 0 ? overflowCount : undefined, + overflowTitles: overflowCount > 0 ? overflowTitles : undefined, + }); + + const stepParts = [`${toPublish.length} new/current finding(s) from OpenCode`]; + if (overflowCount > 0) { + stepParts.push(`${overflowCount} more not published (see summary comment)`); + } + if (resolvedFindingIds.size > 0) { + stepParts.push(`${resolvedFindingIds.size} marked as resolved by OpenCode`); + } + results.push( + new Result({ + id: this.taskId, + success: true, + executed: true, + steps: [`Potential problems detection completed. ${stepParts.join('; ')}.`], + }) + ); + } catch (error) { + logError(`Error in ${this.taskId}: ${error}`); + results.push( + new Result({ + id: this.taskId, + success: false, + executed: true, + errors: [`Error in ${this.taskId}: ${error}`], + }) + ); + } + return results; + } +} diff --git a/src/usecase/steps/common/__tests__/publish_resume_use_case.test.ts b/src/usecase/steps/common/__tests__/publish_resume_use_case.test.ts new file mode 100644 index 00000000..d9461190 --- /dev/null +++ b/src/usecase/steps/common/__tests__/publish_resume_use_case.test.ts @@ -0,0 +1,115 @@ +import { Result } from '../../../../data/model/result'; +import { PublishResultUseCase } from '../publish_resume_use_case'; + +jest.mock('../../../../utils/logger', () => ({ + logInfo: jest.fn(), + logError: jest.fn(), +})); + +jest.mock('../../../../utils/list_utils', () => ({ + getRandomElement: jest.fn(() => undefined), +})); + +const mockAddComment = jest.fn(); +jest.mock('../../../../data/repository/issue_repository', () => ({ + IssueRepository: jest.fn().mockImplementation(() => ({ + addComment: mockAddComment, + })), +})); + +function baseParam(overrides: Record = {}) { + const defaultConfig = { results: [new Result({ id: 'x', success: true, executed: true, steps: ['Step 1'] })] }; + return { + owner: 'o', + repo: 'r', + issueNumber: 42, + issue: { number: 42 }, + pullRequest: { number: 99 }, + isIssue: false, + isPullRequest: false, + isPush: false, + isSingleAction: false, + currentConfiguration: defaultConfig, + tokens: { token: 't' }, + images: { + imagesOnIssue: true, + imagesOnPullRequest: true, + issueAutomaticActions: [], + issueReleaseGifs: [], + issueHotfixGifs: [], + issueBugfixGifs: [], + issueFeatureGifs: [], + issueDocsGifs: [], + issueChoreGifs: [], + pullRequestReleaseGifs: [], + pullRequestHotfixGifs: [], + pullRequestBugfixGifs: [], + pullRequestFeatureGifs: [], + pullRequestDocsGifs: [], + pullRequestChoreGifs: [], + pullRequestAutomaticActions: [], + }, + singleAction: { issue: 123 }, + release: { active: false }, + hotfix: { active: false }, + issueNotBranched: false, + ...overrides, + } as unknown as Parameters[0]; +} + +describe('PublishResultUseCase', () => { + let useCase: PublishResultUseCase; + + beforeEach(() => { + useCase = new PublishResultUseCase(); + mockAddComment.mockReset(); + }); + + it('does not call addComment when content is empty (no steps in results)', async () => { + const param = baseParam({ + isIssue: true, + currentConfiguration: { results: [new Result({ id: 'x', success: true, executed: true, steps: [] })] }, + }); + + await useCase.invoke(param); + + expect(mockAddComment).not.toHaveBeenCalled(); + }); + + it('calls addComment on issue when isIssue and results have steps', async () => { + mockAddComment.mockResolvedValue(undefined); + const param = baseParam({ isIssue: true }); + const resultsWithSteps = [new Result({ id: 'a', success: true, executed: true, steps: ['Step 1'] })]; + param.currentConfiguration = { results: resultsWithSteps } as Parameters[0]['currentConfiguration']; + + await useCase.invoke(param); + + expect(mockAddComment).toHaveBeenCalledTimes(1); + expect(mockAddComment).toHaveBeenCalledWith('o', 'r', 42, expect.stringContaining('1. Step 1'), 't'); + }); + + it('calls addComment on pull request when isPullRequest and results have steps', async () => { + mockAddComment.mockResolvedValue(undefined); + const param = baseParam({ isPullRequest: true }); + const resultsWithSteps = [new Result({ id: 'a', success: true, executed: true, steps: ['Step 1'] })]; + param.currentConfiguration = { results: resultsWithSteps } as Parameters[0]['currentConfiguration']; + + await useCase.invoke(param); + + expect(mockAddComment).toHaveBeenCalledTimes(1); + expect(mockAddComment).toHaveBeenCalledWith('o', 'r', 99, expect.stringContaining('1. Step 1'), 't'); + }); + + it('pushes failure result to currentConfiguration.results when addComment throws', async () => { + mockAddComment.mockRejectedValue(new Error('API error')); + const param = baseParam({ isIssue: true }); + const initialLength = param.currentConfiguration.results.length; + + await useCase.invoke(param); + + expect(param.currentConfiguration.results.length).toBe(initialLength + 1); + const lastResult = param.currentConfiguration.results[param.currentConfiguration.results.length - 1]; + expect(lastResult.success).toBe(false); + expect(lastResult.steps).toContain('Tried to publish the resume, but there was a problem.'); + }); +}); diff --git a/src/usecase/steps/common/__tests__/think_use_case.test.ts b/src/usecase/steps/common/__tests__/think_use_case.test.ts index 19baa3e0..d28c9ca8 100644 --- a/src/usecase/steps/common/__tests__/think_use_case.test.ts +++ b/src/usecase/steps/common/__tests__/think_use_case.test.ts @@ -6,11 +6,11 @@ jest.mock('../../../../utils/logger', () => ({ logError: jest.fn(), })); -const mockAsk = jest.fn(); +const mockAskAgent = jest.fn(); const mockAddComment = jest.fn(); const mockGetDescription = jest.fn(); jest.mock('../../../../data/repository/ai_repository', () => ({ - AiRepository: jest.fn().mockImplementation(() => ({ ask: mockAsk })), + AiRepository: jest.fn().mockImplementation(() => ({ askAgent: mockAskAgent })), })); jest.mock('../../../../data/repository/issue_repository', () => ({ IssueRepository: jest.fn().mockImplementation(() => ({ @@ -26,7 +26,8 @@ function baseParam(overrides: Record = {}) { issueNumber: 1, tokenUser: 'bot', tokens: { token: 't' }, - ai: new Ai('https://opencode.example.com', 'model-x', false, false, [], false), + ai: new Ai('https://opencode.example.com', 'model-x', false, false, [], false, 'low', 20), + labels: { isQuestion: false, isHelp: false }, issue: { isIssueComment: true, isIssue: false, @@ -46,7 +47,7 @@ describe('ThinkUseCase', () => { beforeEach(() => { useCase = new ThinkUseCase(); - mockAsk.mockReset(); + mockAskAgent.mockReset(); mockAddComment.mockReset(); mockGetDescription.mockReset(); mockGetDescription.mockResolvedValue(undefined); @@ -60,7 +61,7 @@ describe('ThinkUseCase', () => { expect(results).toHaveLength(1); expect(results[0].success).toBe(true); expect(results[0].executed).toBe(false); - expect(mockAsk).not.toHaveBeenCalled(); + expect(mockAskAgent).not.toHaveBeenCalled(); expect(mockAddComment).not.toHaveBeenCalled(); }); @@ -74,7 +75,7 @@ describe('ThinkUseCase', () => { expect(results[0].success).toBe(true); expect(results[0].executed).toBe(false); - expect(mockAsk).not.toHaveBeenCalled(); + expect(mockAskAgent).not.toHaveBeenCalled(); }); it('returns success executed false when comment does not mention @user', async () => { @@ -87,13 +88,49 @@ describe('ThinkUseCase', () => { expect(results).toHaveLength(1); expect(results[0].success).toBe(true); expect(results[0].executed).toBe(false); - expect(mockAsk).not.toHaveBeenCalled(); + expect(mockAskAgent).not.toHaveBeenCalled(); expect(mockAddComment).not.toHaveBeenCalled(); }); + it('responds without mention when issue has question label', async () => { + mockGetDescription.mockResolvedValue(undefined); + mockAskAgent.mockResolvedValue({ answer: 'Here is the answer.' }); + mockAddComment.mockResolvedValue(undefined); + const param = baseParam({ + labels: { isQuestion: true, isHelp: false }, + issue: { ...baseParam().issue, commentBody: 'how do I configure the webhook?' }, + }); + + const results = await useCase.invoke(param); + + expect(mockAskAgent).toHaveBeenCalledTimes(1); + expect(mockAskAgent.mock.calls[0][2]).toContain('how do I configure the webhook?'); + expect(mockAddComment).toHaveBeenCalledWith('o', 'r', 1, 'Here is the answer.', 't'); + expect(results[0].success).toBe(true); + expect(results[0].executed).toBe(true); + }); + + it('responds without mention when issue has help label', async () => { + mockGetDescription.mockResolvedValue(undefined); + mockAskAgent.mockResolvedValue({ answer: 'I can help with that.' }); + mockAddComment.mockResolvedValue(undefined); + const param = baseParam({ + labels: { isQuestion: false, isHelp: true }, + issue: { ...baseParam().issue, commentBody: 'I need help with deployment' }, + }); + + const results = await useCase.invoke(param); + + expect(mockAskAgent).toHaveBeenCalledTimes(1); + expect(mockAskAgent.mock.calls[0][2]).toContain('I need help with deployment'); + expect(mockAddComment).toHaveBeenCalledWith('o', 'r', 1, 'I can help with that.', 't'); + expect(results[0].success).toBe(true); + expect(results[0].executed).toBe(true); + }); + it('returns error when OpenCode model is empty', async () => { const param = baseParam({ - ai: new Ai('https://server', '', false, false, [], false), + ai: new Ai('https://server', '', false, false, [], false, 'low', 20), issue: { ...baseParam().issue, commentBody: '@bot hi' }, }); @@ -102,12 +139,12 @@ describe('ThinkUseCase', () => { expect(results).toHaveLength(1); expect(results[0].success).toBe(false); expect(results[0].errors).toContain('OpenCode server URL or model not found.'); - expect(mockAsk).not.toHaveBeenCalled(); + expect(mockAskAgent).not.toHaveBeenCalled(); }); it('returns error when OpenCode server URL is empty', async () => { const param = baseParam({ - ai: new Ai('', 'model', false, false, [], false), + ai: new Ai('', 'model', false, false, [], false, 'low', 20), issue: { ...baseParam().issue, commentBody: '@bot hi' }, }); @@ -126,12 +163,12 @@ describe('ThinkUseCase', () => { expect(results[0].success).toBe(true); expect(results[0].executed).toBe(false); - expect(mockAsk).not.toHaveBeenCalled(); + expect(mockAskAgent).not.toHaveBeenCalled(); }); - it('calls getDescription then ask and addComment when comment mentions bot', async () => { + it('calls getDescription then askAgent and addComment when comment mentions bot', async () => { mockGetDescription.mockResolvedValue(undefined); - mockAsk.mockResolvedValue('4'); + mockAskAgent.mockResolvedValue({ answer: '4' }); mockAddComment.mockResolvedValue(undefined); const param = baseParam({ issue: { ...baseParam().issue, commentBody: '@bot what is 2+2?' }, @@ -140,7 +177,7 @@ describe('ThinkUseCase', () => { const results = await useCase.invoke(param); expect(mockGetDescription).toHaveBeenCalledWith('o', 'r', 1, 't'); - expect(mockAsk).toHaveBeenCalledTimes(1); + expect(mockAskAgent).toHaveBeenCalledTimes(1); expect(mockAddComment).toHaveBeenCalledWith('o', 'r', 1, '4', 't'); expect(results).toHaveLength(1); expect(results[0].success).toBe(true); @@ -149,7 +186,7 @@ describe('ThinkUseCase', () => { it('includes issue description in prompt when getDescription returns content', async () => { mockGetDescription.mockResolvedValue('Implement login feature for the app.'); - mockAsk.mockResolvedValue('Sure, here is how...'); + mockAskAgent.mockResolvedValue({ answer: 'Sure, here is how...' }); mockAddComment.mockResolvedValue(undefined); const param = baseParam({ issue: { ...baseParam().issue, commentBody: '@bot how should I start?', number: 42 }, @@ -158,7 +195,7 @@ describe('ThinkUseCase', () => { await useCase.invoke(param); expect(mockGetDescription).toHaveBeenCalledWith('o', 'r', 42, 't'); - const prompt = mockAsk.mock.calls[0][1]; + const prompt = mockAskAgent.mock.calls[0][2]; expect(prompt).toContain('Context (issue #42 description):'); expect(prompt).toContain('Implement login feature for the app.'); expect(prompt).toContain('Question: how should I start?'); @@ -166,7 +203,7 @@ describe('ThinkUseCase', () => { it('for PR review comment uses issueNumber to fetch issue description', async () => { mockGetDescription.mockResolvedValue('Original issue description.'); - mockAsk.mockResolvedValue('Reply'); + mockAskAgent.mockResolvedValue({ answer: 'Reply' }); mockAddComment.mockResolvedValue(undefined); const param = baseParam({ issue: { ...baseParam().issue, isIssueComment: false, commentBody: '', number: 0 }, @@ -181,28 +218,42 @@ describe('ThinkUseCase', () => { await useCase.invoke(param); expect(mockGetDescription).toHaveBeenCalledWith('o', 'r', 123, 't'); - const prompt = mockAsk.mock.calls[0][1]; + const prompt = mockAskAgent.mock.calls[0][2]; expect(prompt).toContain('Context (issue #123 description):'); expect(prompt).toContain('Original issue description.'); }); it('returns error when OpenCode returns no answer', async () => { - mockAsk.mockResolvedValue(undefined); + mockAskAgent.mockResolvedValue(undefined); const param = baseParam({ issue: { ...baseParam().issue, commentBody: '@bot hello' }, }); const results = await useCase.invoke(param); - expect(mockAsk).toHaveBeenCalledTimes(1); + expect(mockAskAgent).toHaveBeenCalledTimes(1); expect(mockAddComment).not.toHaveBeenCalled(); expect(results[0].success).toBe(false); expect(results[0].executed).toBe(true); expect(results[0].errors).toContain('OpenCode returned no answer.'); }); + it('returns error when OpenCode returns empty answer', async () => { + mockAskAgent.mockResolvedValue({ answer: '' }); + const param = baseParam({ + issue: { ...baseParam().issue, commentBody: '@bot hello' }, + }); + + const results = await useCase.invoke(param); + + expect(mockAskAgent).toHaveBeenCalledTimes(1); + expect(mockAddComment).not.toHaveBeenCalled(); + expect(results[0].success).toBe(false); + expect(results[0].errors).toContain('OpenCode returned no answer.'); + }); + it('posts comment to PR number when pull_request_review_comment', async () => { - mockAsk.mockResolvedValue('Reply'); + mockAskAgent.mockResolvedValue({ answer: 'Reply' }); mockAddComment.mockResolvedValue(undefined); const param = baseParam({ issue: { ...baseParam().issue, isIssueComment: false, commentBody: '' }, @@ -221,7 +272,7 @@ describe('ThinkUseCase', () => { }); it('returns error result when addComment throws', async () => { - mockAsk.mockResolvedValue('ok'); + mockAskAgent.mockResolvedValue({ answer: 'ok' }); mockAddComment.mockRejectedValue(new Error('API error')); const param = baseParam({ issue: { ...baseParam().issue, commentBody: '@bot hi' }, diff --git a/src/usecase/steps/common/think_use_case.ts b/src/usecase/steps/common/think_use_case.ts index 1177750b..ce8e7fb9 100644 --- a/src/usecase/steps/common/think_use_case.ts +++ b/src/usecase/steps/common/think_use_case.ts @@ -1,6 +1,6 @@ import { Execution } from '../../../data/model/execution'; import { Result } from '../../../data/model/result'; -import { AiRepository } from '../../../data/repository/ai_repository'; +import { AiRepository, OPENCODE_AGENT_PLAN, THINK_RESPONSE_SCHEMA } from '../../../data/repository/ai_repository'; import { IssueRepository } from '../../../data/repository/issue_repository'; import { logError, logInfo } from '../../../utils/logger'; import { ParamUseCase } from '../../base/param_usecase'; @@ -32,28 +32,33 @@ export class ThinkUseCase implements ParamUseCase { return results; } - if (!param.tokenUser?.trim()) { - logInfo('Bot username (tokenUser) not set; skipping Think response.'); - results.push( - new Result({ - id: this.taskId, - success: true, - executed: false, - }) - ); - return results; - } + const isHelpOrQuestionIssue = + param.labels.isQuestion || param.labels.isHelp; - if (!commentBody.includes(`@${param.tokenUser}`)) { - logInfo(`Comment does not mention @${param.tokenUser}; skipping.`); - results.push( - new Result({ - id: this.taskId, - success: true, - executed: false, - }) - ); - return results; + if (!isHelpOrQuestionIssue) { + if (!param.tokenUser?.trim()) { + logInfo('Bot username (tokenUser) not set; skipping Think response.'); + results.push( + new Result({ + id: this.taskId, + success: true, + executed: false, + }) + ); + return results; + } + + if (!commentBody.includes(`@${param.tokenUser}`)) { + logInfo(`Comment does not mention @${param.tokenUser}; skipping.`); + results.push( + new Result({ + id: this.taskId, + success: true, + executed: false, + }) + ); + return results; + } } if (!param.ai.getOpencodeModel()?.trim() || !param.ai.getOpencodeServerUrl()?.trim()) { @@ -68,7 +73,9 @@ export class ThinkUseCase implements ParamUseCase { return results; } - const question = commentBody.replace(new RegExp(`@${param.tokenUser}`, 'gi'), '').trim(); + const question = isHelpOrQuestionIssue + ? commentBody.trim() + : commentBody.replace(new RegExp(`@${param.tokenUser}`, 'gi'), '').trim(); if (!question) { results.push( new Result({ @@ -99,9 +106,19 @@ export class ThinkUseCase implements ParamUseCase { ? `\n\nContext (issue #${issueNumberForContext} description):\n${issueDescription}\n\n` : '\n\n'; const prompt = `You are a helpful assistant. Answer the following question concisely, using the context below when relevant. Do not include the question in your response.${contextBlock}Question: ${question}`; - const answer = await this.aiRepository.ask(param.ai, prompt); + const response = await this.aiRepository.askAgent(param.ai, OPENCODE_AGENT_PLAN, prompt, { + expectJson: true, + schema: THINK_RESPONSE_SCHEMA as unknown as Record, + schemaName: 'think_response', + }); + const answer = + response != null && + typeof response === 'object' && + typeof (response as Record).answer === 'string' + ? ((response as Record).answer as string).trim() + : ''; - if (answer === undefined || !answer.trim()) { + if (!answer) { logError('OpenCode returned no answer for Think.'); results.push( new Result({ diff --git a/src/usecase/steps/issue/__tests__/assign_reviewers_to_issue_use_case.test.ts b/src/usecase/steps/issue/__tests__/assign_reviewers_to_issue_use_case.test.ts new file mode 100644 index 00000000..7460422e --- /dev/null +++ b/src/usecase/steps/issue/__tests__/assign_reviewers_to_issue_use_case.test.ts @@ -0,0 +1,141 @@ +import { AssignReviewersToIssueUseCase } from '../assign_reviewers_to_issue_use_case'; + +jest.mock('../../../../utils/logger', () => ({ + logInfo: jest.fn(), + logDebugInfo: jest.fn(), + logError: jest.fn(), +})); + +const mockGetCurrentReviewers = jest.fn(); +const mockGetCurrentAssignees = jest.fn(); +const mockGetRandomMembers = jest.fn(); +const mockAddReviewersToPullRequest = jest.fn(); + +jest.mock('../../../../data/repository/pull_request_repository', () => ({ + PullRequestRepository: jest.fn().mockImplementation(() => ({ + getCurrentReviewers: mockGetCurrentReviewers, + addReviewersToPullRequest: mockAddReviewersToPullRequest, + })), +})); +jest.mock('../../../../data/repository/issue_repository', () => ({ + IssueRepository: jest.fn().mockImplementation(() => ({ + getCurrentAssignees: mockGetCurrentAssignees, + })), +})); +jest.mock('../../../../data/repository/project_repository', () => ({ + ProjectRepository: jest.fn().mockImplementation(() => ({ + getRandomMembers: mockGetRandomMembers, + })), +})); + +function baseParam(overrides: Record = {}) { + return { + owner: 'o', + repo: 'r', + tokens: { token: 't' }, + pullRequest: { + number: 42, + desiredReviewersCount: 1, + creator: 'author', + }, + ...overrides, + } as unknown as Parameters[0]; +} + +describe('AssignReviewersToIssueUseCase', () => { + let useCase: AssignReviewersToIssueUseCase; + + beforeEach(() => { + useCase = new AssignReviewersToIssueUseCase(); + mockGetCurrentReviewers.mockReset(); + mockGetCurrentAssignees.mockReset(); + mockGetRandomMembers.mockReset(); + mockAddReviewersToPullRequest.mockReset(); + mockGetCurrentAssignees.mockResolvedValue([]); + }); + + it('returns success with no steps when current reviewers already meet desired count', async () => { + mockGetCurrentReviewers.mockResolvedValue(['elisalopez']); + const param = baseParam({ pullRequest: { number: 42, desiredReviewersCount: 1, creator: 'author' } }); + + const results = await useCase.invoke(param); + + expect(results).toHaveLength(1); + expect(results[0].success).toBe(true); + expect(results[0].executed).toBe(true); + expect(results[0].steps).toEqual([]); + expect(mockAddReviewersToPullRequest).not.toHaveBeenCalled(); + }); + + it('returns success with no steps when reviewer already submitted (counted in currentReviewers)', async () => { + mockGetCurrentReviewers.mockResolvedValue(['elisalopez']); + const param = baseParam({ pullRequest: { number: 42, desiredReviewersCount: 1, creator: 'author' } }); + + const results = await useCase.invoke(param); + + expect(results).toHaveLength(1); + expect(results[0].steps).toEqual([]); + expect(mockGetRandomMembers).not.toHaveBeenCalled(); + expect(mockAddReviewersToPullRequest).not.toHaveBeenCalled(); + }); + + it('requests new reviewers and adds step only for newly added when under desired count', async () => { + mockGetCurrentReviewers.mockResolvedValue([]); + mockGetRandomMembers.mockResolvedValue(['newreviewer']); + mockAddReviewersToPullRequest.mockResolvedValue(['newreviewer']); + const param = baseParam({ pullRequest: { number: 42, desiredReviewersCount: 1, creator: 'author' } }); + + const results = await useCase.invoke(param); + + expect(results).toHaveLength(1); + expect(results[0].success).toBe(true); + expect(results[0].executed).toBe(true); + expect(results[0].steps).toContain('@newreviewer was requested to review the pull request.'); + expect(mockAddReviewersToPullRequest).toHaveBeenCalledWith('o', 'r', 42, ['newreviewer'], 't'); + }); + + it('excludes creator and current reviewers/assignees when requesting members', async () => { + mockGetCurrentReviewers.mockResolvedValue(['reviewer1']); + mockGetRandomMembers.mockResolvedValue(['reviewer2']); + mockAddReviewersToPullRequest.mockResolvedValue(['reviewer2']); + const param = baseParam({ + pullRequest: { number: 42, desiredReviewersCount: 2, creator: 'author' }, + }); + mockGetCurrentAssignees.mockResolvedValue(['assignee1']); + + await useCase.invoke(param); + + expect(mockGetRandomMembers).toHaveBeenCalledWith( + 'o', + 1, + expect.arrayContaining(['author', 'reviewer1', 'assignee1']), + 't' + ); + }); + + it('returns failure when no members available to assign as reviewers', async () => { + mockGetCurrentReviewers.mockResolvedValue([]); + mockGetRandomMembers.mockResolvedValue([]); + const param = baseParam(); + + const results = await useCase.invoke(param); + + expect(results).toHaveLength(1); + expect(results[0].success).toBe(false); + expect(results[0].steps).toContain( + 'Tried to assign members as reviewers to pull request, but no one was found.' + ); + expect(mockAddReviewersToPullRequest).not.toHaveBeenCalled(); + }); + + it('returns failure when getCurrentReviewers throws', async () => { + mockGetCurrentReviewers.mockRejectedValue(new Error('API error')); + const param = baseParam(); + + const results = await useCase.invoke(param); + + expect(results).toHaveLength(1); + expect(results[0].success).toBe(false); + expect(results[0].steps).toContain('Tried to assign members to issue.'); + }); +}); diff --git a/src/usecase/steps/issue/__tests__/label_deployed_added_use_case.test.ts b/src/usecase/steps/issue/__tests__/label_deployed_added_use_case.test.ts new file mode 100644 index 00000000..b2ebfecf --- /dev/null +++ b/src/usecase/steps/issue/__tests__/label_deployed_added_use_case.test.ts @@ -0,0 +1,76 @@ +import { DeployedAddedUseCase } from '../label_deployed_added_use_case'; + +jest.mock('../../../../utils/logger', () => ({ + logInfo: jest.fn(), + logDebugInfo: jest.fn(), + logError: jest.fn(), +})); + +function baseParam(overrides: Record = {}) { + return { + owner: 'o', + repo: 'r', + issue: { labeled: false, labelAdded: '' }, + labels: { deployed: 'deployed' }, + release: { active: false, branch: undefined as string | undefined }, + hotfix: { active: false, branch: undefined as string | undefined }, + ...overrides, + } as unknown as Parameters[0]; +} + +describe('DeployedAddedUseCase (label_deployed_added)', () => { + let useCase: DeployedAddedUseCase; + + beforeEach(() => { + useCase = new DeployedAddedUseCase(); + }); + + it('returns success executed false when issue not labeled or label is not deployed', async () => { + const param = baseParam({ issue: { labeled: false, labelAdded: '' } }); + + const results = await useCase.invoke(param); + + expect(results).toHaveLength(1); + expect(results[0].success).toBe(true); + expect(results[0].executed).toBe(false); + }); + + it('returns success executed false when labeled but labelAdded is not deployed', async () => { + const param = baseParam({ issue: { labeled: true, labelAdded: 'other' }, labels: { deployed: 'deployed' } }); + + const results = await useCase.invoke(param); + + expect(results[0].executed).toBe(false); + }); + + it('returns success executed true with release step when labeled with deployed and release active', async () => { + const param = baseParam({ + issue: { labeled: true, labelAdded: 'deployed' }, + labels: { deployed: 'deployed' }, + release: { active: true, branch: 'release/1.0.0' }, + }); + + const results = await useCase.invoke(param); + + expect(results).toHaveLength(1); + expect(results[0].success).toBe(true); + expect(results[0].executed).toBe(true); + expect(results[0].steps?.some((s) => s.includes('release/1.0.0') && s.includes('Deploy complete'))).toBe(true); + }); + + it('returns success executed true with hotfix step when labeled with deployed and hotfix active', async () => { + const param = baseParam({ + issue: { labeled: true, labelAdded: 'deployed' }, + labels: { deployed: 'deployed' }, + hotfix: { active: true, branch: 'hotfix/1.0.1' }, + }); + + const results = await useCase.invoke(param); + + expect(results).toHaveLength(1); + expect(results[0].success).toBe(true); + expect(results[0].executed).toBe(true); + expect(results[0].steps?.some((s) => s.includes('hotfix/1.0.1') && s.includes('Deploy complete'))).toBe(true); + }); + +}); diff --git a/src/usecase/steps/issue/__tests__/update_issue_type_use_case.test.ts b/src/usecase/steps/issue/__tests__/update_issue_type_use_case.test.ts new file mode 100644 index 00000000..fe952ccb --- /dev/null +++ b/src/usecase/steps/issue/__tests__/update_issue_type_use_case.test.ts @@ -0,0 +1,65 @@ +import { UpdateIssueTypeUseCase } from '../update_issue_type_use_case'; + +jest.mock('../../../../utils/logger', () => ({ + logInfo: jest.fn(), + logError: jest.fn(), +})); + +const mockSetIssueType = jest.fn(); +jest.mock('../../../../data/repository/issue_repository', () => ({ + IssueRepository: jest.fn().mockImplementation(() => ({ + setIssueType: mockSetIssueType, + })), +})); + +function baseParam() { + return { + owner: 'o', + repo: 'r', + issueNumber: 42, + labels: {} as Parameters[0]['labels'], + issueTypes: {} as Parameters[0]['issueTypes'], + tokens: { token: 't' }, + } as unknown as Parameters[0]; +} + +describe('UpdateIssueTypeUseCase', () => { + let useCase: UpdateIssueTypeUseCase; + + beforeEach(() => { + useCase = new UpdateIssueTypeUseCase(); + mockSetIssueType.mockReset(); + }); + + it('returns empty result when setIssueType succeeds', async () => { + mockSetIssueType.mockResolvedValue(undefined); + const param = baseParam(); + + const results = await useCase.invoke(param); + + expect(results).toHaveLength(0); + expect(mockSetIssueType).toHaveBeenCalledWith( + 'o', + 'r', + 42, + param.labels, + param.issueTypes, + 't' + ); + }); + + it('returns failure result when setIssueType throws', async () => { + mockSetIssueType.mockRejectedValue(new Error('API error')); + const param = baseParam(); + + const results = await useCase.invoke(param); + + expect(results).toHaveLength(1); + expect(results[0].success).toBe(false); + expect(results[0].executed).toBe(true); + expect(results[0].steps).toContain( + 'Tried to update issue type, but there was a problem.' + ); + expect(results[0].errors).toBeDefined(); + }); +}); diff --git a/src/usecase/steps/issue/label_deploy_added_use_case.ts b/src/usecase/steps/issue/label_deploy_added_use_case.ts index bf5f706d..4bf8ee1a 100644 --- a/src/usecase/steps/issue/label_deploy_added_use_case.ts +++ b/src/usecase/steps/issue/label_deploy_added_use_case.ts @@ -1,9 +1,10 @@ import { Execution } from "../../../data/model/execution"; import { Result } from "../../../data/model/result"; import { BranchRepository } from "../../../data/repository/branch_repository"; -import { injectJsonAsMarkdownBlock } from "../../../utils/content_utils"; +import { extractChangelogUpToAdditionalContext, injectJsonAsMarkdownBlock } from "../../../utils/content_utils"; import { logDebugInfo, logError, logInfo } from "../../../utils/logger"; import { ParamUseCase } from "../../base/param_usecase"; +import { MoveIssueToInProgressUseCase } from "./move_issue_to_in_progress"; export class DeployAddedUseCase implements ParamUseCase { taskId: string = 'DeployAddedUseCase'; @@ -18,6 +19,8 @@ export class DeployAddedUseCase implements ParamUseCase { if (param.issue.labeled && param.issue.labelAdded === param.labels.deploy) { logDebugInfo(`Deploying requested.`) if (param.release.active && param.release.branch !== undefined) { + result.push(...await new MoveIssueToInProgressUseCase().invoke(param)); + const sanitizedTitle = param.issue.title .replace(/\b\d+(\.\d+){2,}\b/g, '') .replace(/[^\p{L}\p{N}\p{P}\p{Z}^$\n]/gu, '') @@ -29,14 +32,13 @@ export class DeployAddedUseCase implements ParamUseCase { .replace(/-+/g, '-') .trim(); - const description = param.issue.body?.match(/### Changelog\n\n([\s\S]*?)(?=\n\n|$)/)?.[1]?.trim() ?? 'No changelog provided'; - const escapedDescription = description.replace(/\n/g, '\\n'); + const changelogBody = extractChangelogUpToAdditionalContext(param.issue.body, 'Changelog'); const releaseUrl = `https://github.com/${param.owner}/${param.repo}/tree/${param.release.branch}`; const parameters = { version: param.release.version, title: sanitizedTitle, - changelog: escapedDescription, + changelog: changelogBody, issue: `${param.issue.number}`, } await this.branchRepository.executeWorkflow( @@ -60,6 +62,8 @@ ${injectJsonAsMarkdownBlock('Workflow Parameters', parameters)}` }) ) } else if (param.hotfix.active && param.hotfix.branch !== undefined) { + result.push(...await new MoveIssueToInProgressUseCase().invoke(param)); + const sanitizedTitle = param.issue.title .replace(/\b\d+(\.\d+){2,}\b/g, '') .replace(/[^\p{L}\p{N}\p{P}\p{Z}^$\n]/gu, '') @@ -71,21 +75,20 @@ ${injectJsonAsMarkdownBlock('Workflow Parameters', parameters)}` .replace(/-+/g, '-') .trim(); - const description = param.issue.body?.match(/### Hotfix Solution\n\n([\s\S]*?)(?=\n\n|$)/)?.[1]?.trim() ?? 'No changelog provided'; - const escapedDescription = description.replace(/\n/g, '\\n'); + const changelogBody = extractChangelogUpToAdditionalContext(param.issue.body, 'Hotfix Solution'); const hotfixUrl = `https://github.com/${param.owner}/${param.repo}/tree/${param.hotfix.branch}`; const parameters = { version: param.hotfix.version, title: sanitizedTitle, - changelog: escapedDescription, + changelog: changelogBody, issue: param.issue.number, } await this.branchRepository.executeWorkflow( param.owner, param.repo, param.hotfix.branch, - param.workflows.release, + param.workflows.hotfix, parameters, param.tokens.token, ) diff --git a/src/usecase/steps/issue_comment/__tests__/check_issue_comment_language_use_case.test.ts b/src/usecase/steps/issue_comment/__tests__/check_issue_comment_language_use_case.test.ts index 73b615d6..e9c25a4a 100644 --- a/src/usecase/steps/issue_comment/__tests__/check_issue_comment_language_use_case.test.ts +++ b/src/usecase/steps/issue_comment/__tests__/check_issue_comment_language_use_case.test.ts @@ -8,11 +8,10 @@ const translatedKey = ``; -const mockAsk = jest.fn(); const mockAskAgent = jest.fn(); const mockUpdateComment = jest.fn(); jest.mock('../../../../data/repository/ai_repository', () => ({ - AiRepository: jest.fn().mockImplementation(() => ({ ask: mockAsk, askAgent: mockAskAgent })), + AiRepository: jest.fn().mockImplementation(() => ({ askAgent: mockAskAgent })), })); jest.mock('../../../../data/repository/issue_repository', () => ({ IssueRepository: jest.fn().mockImplementation(() => ({ updateComment: mockUpdateComment })), @@ -35,7 +34,6 @@ describe('CheckIssueCommentLanguageUseCase', () => { beforeEach(() => { useCase = new CheckIssueCommentLanguageUseCase(); - mockAsk.mockReset(); mockAskAgent.mockReset(); mockUpdateComment.mockReset(); }); @@ -48,7 +46,7 @@ describe('CheckIssueCommentLanguageUseCase', () => { expect(results).toHaveLength(1); expect(results[0].success).toBe(true); expect(results[0].executed).toBe(false); - expect(mockAsk).not.toHaveBeenCalled(); + expect(mockAskAgent).not.toHaveBeenCalled(); }); it('returns success executed false when commentBody already contains translatedKey', async () => { @@ -60,11 +58,11 @@ describe('CheckIssueCommentLanguageUseCase', () => { expect(results[0].success).toBe(true); expect(results[0].executed).toBe(false); - expect(mockAsk).not.toHaveBeenCalled(); + expect(mockAskAgent).not.toHaveBeenCalled(); }); it('returns success executed true when AI responds done (already in locale)', async () => { - mockAsk.mockResolvedValue('done'); + mockAskAgent.mockResolvedValue({ status: 'done' }); const param = baseParam(); const results = await useCase.invoke(param); @@ -72,20 +70,20 @@ describe('CheckIssueCommentLanguageUseCase', () => { expect(results).toHaveLength(1); expect(results[0].success).toBe(true); expect(results[0].executed).toBe(true); - expect(mockAsk).toHaveBeenCalledTimes(1); + expect(mockAskAgent).toHaveBeenCalledTimes(1); expect(mockUpdateComment).not.toHaveBeenCalled(); }); it('calls updateComment when AI responds must_translate and askAgent returns schema with translatedText', async () => { - mockAsk.mockResolvedValueOnce('must_translate'); - mockAskAgent.mockResolvedValueOnce({ translatedText: 'Texto traducido' }); + mockAskAgent + .mockResolvedValueOnce({ status: 'must_translate' }) + .mockResolvedValueOnce({ translatedText: 'Texto traducido' }); mockUpdateComment.mockResolvedValue(undefined); const param = baseParam(); const results = await useCase.invoke(param); - expect(mockAsk).toHaveBeenCalledTimes(1); - expect(mockAskAgent).toHaveBeenCalledTimes(1); + expect(mockAskAgent).toHaveBeenCalledTimes(2); expect(mockUpdateComment).toHaveBeenCalledWith( 'o', 'r', @@ -97,15 +95,15 @@ describe('CheckIssueCommentLanguageUseCase', () => { expect(results.length).toBeGreaterThanOrEqual(0); }); - it('does not update comment when askAgent returns undefined', async () => { - mockAsk.mockResolvedValueOnce('must_translate'); - mockAskAgent.mockResolvedValueOnce(undefined); + it('does not update comment when askAgent returns undefined for translation', async () => { + mockAskAgent + .mockResolvedValueOnce({ status: 'must_translate' }) + .mockResolvedValueOnce(undefined); const param = baseParam(); const results = await useCase.invoke(param); - expect(mockAsk).toHaveBeenCalledTimes(1); - expect(mockAskAgent).toHaveBeenCalledTimes(1); + expect(mockAskAgent).toHaveBeenCalledTimes(2); expect(mockUpdateComment).not.toHaveBeenCalled(); expect(results).toHaveLength(1); expect(results[0].success).toBe(true); @@ -113,14 +111,14 @@ describe('CheckIssueCommentLanguageUseCase', () => { }); it('does not update comment when askAgent returns empty translatedText', async () => { - mockAsk.mockResolvedValueOnce('must_translate'); - mockAskAgent.mockResolvedValueOnce({ translatedText: ' ' }); + mockAskAgent + .mockResolvedValueOnce({ status: 'must_translate' }) + .mockResolvedValueOnce({ translatedText: ' ' }); const param = baseParam(); const results = await useCase.invoke(param); - expect(mockAsk).toHaveBeenCalledTimes(1); - expect(mockAskAgent).toHaveBeenCalledTimes(1); + expect(mockAskAgent).toHaveBeenCalledTimes(2); expect(mockUpdateComment).not.toHaveBeenCalled(); expect(results).toHaveLength(1); expect(results[0].success).toBe(true); @@ -128,13 +126,14 @@ describe('CheckIssueCommentLanguageUseCase', () => { }); it('does not update comment when askAgent returns translatedText missing', async () => { - mockAsk.mockResolvedValueOnce('must_translate'); - mockAskAgent.mockResolvedValueOnce({ reason: 'Ambiguous input' }); + mockAskAgent + .mockResolvedValueOnce({ status: 'must_translate' }) + .mockResolvedValueOnce({ reason: 'Ambiguous input' }); const param = baseParam(); const results = await useCase.invoke(param); - expect(mockAskAgent).toHaveBeenCalledTimes(1); + expect(mockAskAgent).toHaveBeenCalledTimes(2); expect(mockUpdateComment).not.toHaveBeenCalled(); expect(results).toHaveLength(1); expect(results[0].success).toBe(true); diff --git a/src/usecase/steps/issue_comment/check_issue_comment_language_use_case.ts b/src/usecase/steps/issue_comment/check_issue_comment_language_use_case.ts index 1f2785cf..fff19a5b 100644 --- a/src/usecase/steps/issue_comment/check_issue_comment_language_use_case.ts +++ b/src/usecase/steps/issue_comment/check_issue_comment_language_use_case.ts @@ -2,6 +2,7 @@ import { Execution } from "../../../data/model/execution"; import { Result } from "../../../data/model/result"; import { AiRepository, + LANGUAGE_CHECK_RESPONSE_SCHEMA, OPENCODE_AGENT_PLAN, TRANSLATION_RESPONSE_SCHEMA, } from "../../../data/repository/ai_repository"; @@ -48,13 +49,23 @@ If you'd like this comment to be translated again, please delete the entire comm The text is: ${commentBody} `; - - let result = await this.aiRepository.ask( + const checkResponse = await this.aiRepository.askAgent( param.ai, + OPENCODE_AGENT_PLAN, prompt, + { + expectJson: true, + schema: LANGUAGE_CHECK_RESPONSE_SCHEMA as unknown as Record, + schemaName: 'language_check_response', + }, ); - - if (result === "done") { + const status = + checkResponse != null && + typeof checkResponse === 'object' && + typeof (checkResponse as Record).status === 'string' + ? ((checkResponse as Record).status as string) + : ''; + if (status === 'done') { results.push( new Result({ id: this.taskId, diff --git a/src/usecase/steps/pull_request/__tests__/sync_size_and_progress_labels_from_issue_to_pr_use_case.test.ts b/src/usecase/steps/pull_request/__tests__/sync_size_and_progress_labels_from_issue_to_pr_use_case.test.ts new file mode 100644 index 00000000..4606afbf --- /dev/null +++ b/src/usecase/steps/pull_request/__tests__/sync_size_and_progress_labels_from_issue_to_pr_use_case.test.ts @@ -0,0 +1,147 @@ +import { SyncSizeAndProgressLabelsFromIssueToPrUseCase } from '../sync_size_and_progress_labels_from_issue_to_pr_use_case'; + +jest.mock('../../../../utils/logger', () => ({ + logInfo: jest.fn(), + logDebugInfo: jest.fn(), + logError: jest.fn(), +})); + +const mockGetLabels = jest.fn(); +const mockSetLabels = jest.fn(); + +jest.mock('../../../../data/repository/issue_repository', () => ({ + IssueRepository: jest.fn().mockImplementation(() => ({ + getLabels: mockGetLabels, + setLabels: mockSetLabels, + })), + PROGRESS_LABEL_PATTERN: /^\d+%$/, +})); + +const defaultSizeLabels = ['size: XS', 'size: S', 'size: M', 'size: L', 'size: XL', 'size: XXL']; + +function baseParam(overrides: Record = {}) { + return { + owner: 'o', + repo: 'r', + tokens: { token: 't' }, + issueNumber: 287, + pullRequest: { number: 100 }, + labels: { sizeLabels: defaultSizeLabels }, + ...overrides, + } as unknown as Parameters[0]; +} + +describe('SyncSizeAndProgressLabelsFromIssueToPrUseCase', () => { + let useCase: SyncSizeAndProgressLabelsFromIssueToPrUseCase; + + beforeEach(() => { + useCase = new SyncSizeAndProgressLabelsFromIssueToPrUseCase(); + mockGetLabels.mockReset(); + mockSetLabels.mockReset(); + }); + + it('returns executed false when no issue linked', async () => { + const param = baseParam({ issueNumber: -1 }); + + const results = await useCase.invoke(param); + + expect(results).toHaveLength(1); + expect(results[0].success).toBe(true); + expect(results[0].executed).toBe(false); + expect(results[0].steps).toContain('No issue linked; size/progress labels not synced.'); + expect(mockGetLabels).not.toHaveBeenCalled(); + }); + + it('returns executed true with no sync when issue has no size or progress labels', async () => { + mockGetLabels.mockResolvedValue(['bug', 'feature']); + const param = baseParam(); + + const results = await useCase.invoke(param); + + expect(results).toHaveLength(1); + expect(results[0].success).toBe(true); + expect(results[0].executed).toBe(true); + expect(results[0].steps).toContain('Issue has no size/progress labels to sync.'); + expect(mockSetLabels).not.toHaveBeenCalled(); + }); + + it('copies only progress label to PR', async () => { + mockGetLabels + .mockResolvedValueOnce(['bug', '50%']) + .mockResolvedValueOnce(['bug']); + mockSetLabels.mockResolvedValue(undefined); + const param = baseParam({ issueNumber: 287, pullRequest: { number: 100 } }); + + const results = await useCase.invoke(param); + + expect(results).toHaveLength(1); + expect(results[0].success).toBe(true); + expect(results[0].executed).toBe(true); + expect(results[0].steps).toEqual([]); + expect(mockSetLabels).toHaveBeenCalledWith( + 'o', + 'r', + 100, + expect.arrayContaining(['bug', '50%']), + 't' + ); + }); + + it('copies only size label to PR', async () => { + mockGetLabels + .mockResolvedValueOnce(['bug', 'size: M']) + .mockResolvedValueOnce(['bug']); + mockSetLabels.mockResolvedValue(undefined); + const param = baseParam(); + + const results = await useCase.invoke(param); + + expect(results).toHaveLength(1); + expect(results[0].success).toBe(true); + expect(results[0].executed).toBe(true); + expect(results[0].steps).toEqual([]); + expect(mockSetLabels).toHaveBeenCalledWith( + 'o', + 'r', + 100, + expect.arrayContaining(['bug', 'size: M']), + 't' + ); + }); + + it('copies both size and progress labels to PR', async () => { + mockGetLabels + .mockResolvedValueOnce(['bug', 'size: M', '50%']) + .mockResolvedValueOnce(['bug']); + mockSetLabels.mockResolvedValue(undefined); + const param = baseParam(); + + const results = await useCase.invoke(param); + + expect(results).toHaveLength(1); + expect(results[0].success).toBe(true); + expect(results[0].executed).toBe(true); + expect(results[0].steps).toEqual([]); + expect(mockSetLabels).toHaveBeenCalledWith( + 'o', + 'r', + 100, + expect.arrayContaining(['bug', 'size: M', '50%']), + 't' + ); + }); + + it('returns failure when setLabels throws', async () => { + mockGetLabels + .mockResolvedValueOnce(['50%']) + .mockResolvedValueOnce([]); + mockSetLabels.mockRejectedValue(new Error('API error')); + const param = baseParam(); + + const results = await useCase.invoke(param); + + expect(results).toHaveLength(1); + expect(results[0].success).toBe(false); + expect(results[0].steps).toContain('Failed to sync size/progress labels from issue to PR.'); + }); +}); diff --git a/src/usecase/steps/pull_request/sync_size_and_progress_labels_from_issue_to_pr_use_case.ts b/src/usecase/steps/pull_request/sync_size_and_progress_labels_from_issue_to_pr_use_case.ts index 0036f951..a5e9943e 100644 --- a/src/usecase/steps/pull_request/sync_size_and_progress_labels_from_issue_to_pr_use_case.ts +++ b/src/usecase/steps/pull_request/sync_size_and_progress_labels_from_issue_to_pr_use_case.ts @@ -86,9 +86,7 @@ export class SyncSizeAndProgressLabelsFromIssueToPrUseCase implements ParamUseCa id: this.taskId, success: true, executed: true, - steps: [ - `Size and progress labels copied from issue #${param.issueNumber} to this PR.`, - ], + steps: [], }), ); } catch (error) { diff --git a/src/usecase/steps/pull_request/update_pull_request_description_use_case.ts b/src/usecase/steps/pull_request/update_pull_request_description_use_case.ts index 5b762c3f..d527bd77 100644 --- a/src/usecase/steps/pull_request/update_pull_request_description_use_case.ts +++ b/src/usecase/steps/pull_request/update_pull_request_description_use_case.ts @@ -129,7 +129,7 @@ export class UpdatePullRequestDescriptionUseCase implements ParamUseCase`; -const mockAsk = jest.fn(); const mockAskAgent = jest.fn(); const mockUpdateComment = jest.fn(); jest.mock('../../../../data/repository/ai_repository', () => ({ - AiRepository: jest.fn().mockImplementation(() => ({ ask: mockAsk, askAgent: mockAskAgent })), + AiRepository: jest.fn().mockImplementation(() => ({ askAgent: mockAskAgent })), })); jest.mock('../../../../data/repository/issue_repository', () => ({ IssueRepository: jest.fn().mockImplementation(() => ({ updateComment: mockUpdateComment })), @@ -35,7 +34,6 @@ describe('CheckPullRequestCommentLanguageUseCase', () => { beforeEach(() => { useCase = new CheckPullRequestCommentLanguageUseCase(); - mockAsk.mockReset(); mockAskAgent.mockReset(); mockUpdateComment.mockReset(); }); @@ -48,7 +46,7 @@ describe('CheckPullRequestCommentLanguageUseCase', () => { expect(results).toHaveLength(1); expect(results[0].success).toBe(true); expect(results[0].executed).toBe(false); - expect(mockAsk).not.toHaveBeenCalled(); + expect(mockAskAgent).not.toHaveBeenCalled(); }); it('returns success executed false when commentBody contains translatedKey', async () => { @@ -60,30 +58,30 @@ describe('CheckPullRequestCommentLanguageUseCase', () => { expect(results[0].success).toBe(true); expect(results[0].executed).toBe(false); - expect(mockAsk).not.toHaveBeenCalled(); + expect(mockAskAgent).not.toHaveBeenCalled(); }); it('returns success executed true when AI responds done', async () => { - mockAsk.mockResolvedValue('done'); + mockAskAgent.mockResolvedValue({ status: 'done' }); const param = baseParam(); const results = await useCase.invoke(param); expect(results[0].success).toBe(true); expect(results[0].executed).toBe(true); - expect(mockAsk).toHaveBeenCalledTimes(1); + expect(mockAskAgent).toHaveBeenCalledTimes(1); }); it('calls updateComment when must_translate and askAgent returns schema with translatedText', async () => { - mockAsk.mockResolvedValueOnce('must_translate'); - mockAskAgent.mockResolvedValueOnce({ translatedText: 'Hola traducido' }); + mockAskAgent + .mockResolvedValueOnce({ status: 'must_translate' }) + .mockResolvedValueOnce({ translatedText: 'Hola traducido' }); mockUpdateComment.mockResolvedValue(undefined); const param = baseParam(); const results = await useCase.invoke(param); - expect(mockAsk).toHaveBeenCalledTimes(1); - expect(mockAskAgent).toHaveBeenCalledTimes(1); + expect(mockAskAgent).toHaveBeenCalledTimes(2); expect(mockUpdateComment).toHaveBeenCalledWith( 'o', 'r', @@ -94,15 +92,15 @@ describe('CheckPullRequestCommentLanguageUseCase', () => { ); }); - it('does not update comment when askAgent returns undefined', async () => { - mockAsk.mockResolvedValueOnce('must_translate'); - mockAskAgent.mockResolvedValueOnce(undefined); + it('does not update comment when askAgent returns undefined for translation', async () => { + mockAskAgent + .mockResolvedValueOnce({ status: 'must_translate' }) + .mockResolvedValueOnce(undefined); const param = baseParam(); const results = await useCase.invoke(param); - expect(mockAsk).toHaveBeenCalledTimes(1); - expect(mockAskAgent).toHaveBeenCalledTimes(1); + expect(mockAskAgent).toHaveBeenCalledTimes(2); expect(mockUpdateComment).not.toHaveBeenCalled(); expect(results).toHaveLength(1); expect(results[0].success).toBe(true); @@ -110,13 +108,14 @@ describe('CheckPullRequestCommentLanguageUseCase', () => { }); it('does not update comment when askAgent returns empty translatedText', async () => { - mockAsk.mockResolvedValueOnce('must_translate'); - mockAskAgent.mockResolvedValueOnce({ translatedText: '' }); + mockAskAgent + .mockResolvedValueOnce({ status: 'must_translate' }) + .mockResolvedValueOnce({ translatedText: '' }); const param = baseParam(); const results = await useCase.invoke(param); - expect(mockAskAgent).toHaveBeenCalledTimes(1); + expect(mockAskAgent).toHaveBeenCalledTimes(2); expect(mockUpdateComment).not.toHaveBeenCalled(); expect(results).toHaveLength(1); expect(results[0].success).toBe(true); diff --git a/src/usecase/steps/pull_request_review_comment/check_pull_request_comment_language_use_case.ts b/src/usecase/steps/pull_request_review_comment/check_pull_request_comment_language_use_case.ts index a957c378..bc7e4434 100644 --- a/src/usecase/steps/pull_request_review_comment/check_pull_request_comment_language_use_case.ts +++ b/src/usecase/steps/pull_request_review_comment/check_pull_request_comment_language_use_case.ts @@ -2,6 +2,7 @@ import { Execution } from "../../../data/model/execution"; import { Result } from "../../../data/model/result"; import { AiRepository, + LANGUAGE_CHECK_RESPONSE_SCHEMA, OPENCODE_AGENT_PLAN, TRANSLATION_RESPONSE_SCHEMA, } from "../../../data/repository/ai_repository"; @@ -48,13 +49,23 @@ If you'd like this comment to be translated again, please delete the entire comm The text is: ${commentBody} `; - - let result = await this.aiRepository.ask( + const checkResponse = await this.aiRepository.askAgent( param.ai, + OPENCODE_AGENT_PLAN, prompt, + { + expectJson: true, + schema: LANGUAGE_CHECK_RESPONSE_SCHEMA as unknown as Record, + schemaName: 'language_check_response', + }, ); - - if (result === "done") { + const status = + checkResponse != null && + typeof checkResponse === 'object' && + typeof (checkResponse as Record).status === 'string' + ? ((checkResponse as Record).status as string) + : ''; + if (status === 'done') { results.push( new Result({ id: this.taskId, diff --git a/src/utils/__tests__/content_utils.test.ts b/src/utils/__tests__/content_utils.test.ts index fe69f6dc..78c46c81 100644 --- a/src/utils/__tests__/content_utils.test.ts +++ b/src/utils/__tests__/content_utils.test.ts @@ -1,6 +1,7 @@ import { extractVersion, extractReleaseType, + extractChangelogUpToAdditionalContext, injectJsonAsMarkdownBlock, } from '../content_utils'; @@ -45,6 +46,61 @@ describe('content_utils', () => { }); }); + describe('extractChangelogUpToAdditionalContext', () => { + it('extracts content from Changelog section up to Additional Context', () => { + const body = [ + '### Changelog', + '', + '## OpenCode as AI backend', + '', + '- **All AI features** now use **OpenCode**.', + '', + '### Additional Context', + '', + 'Anything else to note?', + ].join('\n'); + expect(extractChangelogUpToAdditionalContext(body, 'Changelog')).toBe( + '## OpenCode as AI backend\n\n- **All AI features** now use **OpenCode**.', + ); + }); + + it('extracts content from Hotfix Solution section up to Additional Context', () => { + const body = [ + '### Hotfix Solution', + '', + 'Describe the solution.', + 'Multiple lines.', + '', + '### Additional Context', + '', + 'Extra notes.', + ].join('\n'); + expect(extractChangelogUpToAdditionalContext(body, 'Hotfix Solution')).toBe( + 'Describe the solution.\nMultiple lines.', + ); + }); + + it('returns full content when Additional Context is absent', () => { + const body = '### Changelog\n\nOnly section here.'; + expect(extractChangelogUpToAdditionalContext(body, 'Changelog')).toBe('Only section here.'); + }); + + it('handles ## style headings', () => { + const body = '## Changelog\n\nContent here.\n\n## Additional Context\n\nOther.'; + expect(extractChangelogUpToAdditionalContext(body, 'Changelog')).toBe('Content here.'); + }); + + it('returns default when body is null or empty', () => { + expect(extractChangelogUpToAdditionalContext(null, 'Changelog')).toBe('No changelog provided'); + expect(extractChangelogUpToAdditionalContext(undefined, 'Changelog')).toBe('No changelog provided'); + expect(extractChangelogUpToAdditionalContext('', 'Changelog')).toBe('No changelog provided'); + }); + + it('returns default when section is not found', () => { + expect(extractChangelogUpToAdditionalContext('### Other\n\nText.', 'Changelog')).toBe('No changelog provided'); + }); + }); + describe('injectJsonAsMarkdownBlock', () => { it('wraps JSON with title and blockquote-style markdown', () => { const result = injectJsonAsMarkdownBlock('Config', { foo: 'bar' }); diff --git a/src/utils/constants.ts b/src/utils/constants.ts index 42ac8a93..c0ffe7dc 100644 --- a/src/utils/constants.ts +++ b/src/utils/constants.ts @@ -5,8 +5,14 @@ export const REPO_URL = 'https://github.com/landamessenger/git-board-flow' /** Default OpenCode model: provider/modelID (e.g. opencode/kimi-k2.5-free). Reuse for CLI, action and Ai fallbacks. */ export const OPENCODE_DEFAULT_MODEL = 'opencode/kimi-k2.5-free' -/** Timeout in ms for OpenCode HTTP requests (session create, message, diff). Agent calls can be slow with many files. */ -export const OPENCODE_REQUEST_TIMEOUT_MS = 600_000 +/** Timeout in ms for OpenCode HTTP requests (session create, message, diff). Agent calls can be slow (e.g. plan analyzing repo). */ +export const OPENCODE_REQUEST_TIMEOUT_MS = 900_000 + +/** Max attempts for OpenCode requests (retries on failure). Applied transparently in AiRepository. */ +export const OPENCODE_MAX_RETRIES = 5 + +/** Delay in ms between OpenCode retry attempts. */ +export const OPENCODE_RETRY_DELAY_MS = 2000 export const DEFAULT_IMAGE_CONFIG = { issue: { @@ -221,6 +227,8 @@ export const INPUT_KEYS = { AI_MEMBERS_ONLY: 'ai-members-only', AI_IGNORE_FILES: 'ai-ignore-files', AI_INCLUDE_REASONING: 'ai-include-reasoning', + BUGBOT_SEVERITY: 'bugbot-severity', + BUGBOT_COMMENT_LIMIT: 'bugbot-comment-limit', // Projects PROJECT_IDS: 'project-ids', @@ -388,9 +396,18 @@ export const ACTIONS = { THINK: 'think_action', INITIAL_SETUP: 'initial_setup', CHECK_PROGRESS: 'check_progress_action', - DETECT_ERRORS: 'detect_errors_action', + DETECT_POTENTIAL_PROBLEMS: 'detect_potential_problems_action', RECOMMEND_STEPS: 'recommend_steps_action', } as const; +/** Hidden HTML comment prefix for bugbot findings (issue/PR comments). Format: */ +export const BUGBOT_MARKER_PREFIX = 'gbf-bugbot'; + +/** Max number of individual bugbot comments to create per issue/PR. Excess findings get one summary comment suggesting to review locally. */ +export const BUGBOT_MAX_COMMENTS = 20; + +/** Minimum severity to publish (findings below this are dropped). Order: high > medium > low > info. */ +export const BUGBOT_MIN_SEVERITY: 'info' | 'low' | 'medium' | 'high' = 'low'; + export const PROMPTS = { } as const; diff --git a/src/utils/content_utils.ts b/src/utils/content_utils.ts index 41aaf767..f3604ff0 100644 --- a/src/utils/content_utils.ts +++ b/src/utils/content_utils.ts @@ -10,6 +10,28 @@ export const extractReleaseType = (pattern: string, text: string): string | unde return match ? match[1] : undefined; }; +/** + * Extracts changelog content from an issue body: from the given section heading (e.g. "Changelog" or "Hotfix Solution") + * up to but not including the "Additional Context" section. Used for release/hotfix deployment bodies. + */ +export const extractChangelogUpToAdditionalContext = ( + body: string | null | undefined, + sectionTitle: string, +): string => { + if (body == null || body === '') { + return 'No changelog provided'; + } + const escaped = sectionTitle.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); + const pattern = new RegExp( + `(?:###|##)\\s*${escaped}\\s*\\n\\n([\\s\\S]*?)` + + `(?=\\n(?:###|##)\\s*Additional Context\\s*|$)`, + 'i', + ); + const match = body.match(pattern); + const content = match?.[1]?.trim(); + return content ?? 'No changelog provided'; +}; + export const injectJsonAsMarkdownBlock = (title: string, json: object): string => { const formattedJson = JSON.stringify(json, null, 4) // Pretty-print the JSON with 4 spaces. .split('\n') // Split into lines. diff --git a/src/utils/opencode_server.ts b/src/utils/opencode_server.ts index b1a97f8c..ce9c4cd2 100644 --- a/src/utils/opencode_server.ts +++ b/src/utils/opencode_server.ts @@ -1,21 +1,74 @@ /** * Managed OpenCode server lifecycle for GitHub Actions. * Starts "npx opencode-ai serve" and stops it when the action finishes. + * If no opencode.json exists in cwd, creates one with provider timeout 10 min and removes it on stop. */ import { spawn, ChildProcess } from 'child_process'; +import { access, writeFile, unlink } from 'fs/promises'; +import path from 'path'; import { logInfo, logError, logDebugInfo } from './logger'; const DEFAULT_PORT = 4096; const HEALTH_PATH = '/global/health'; const POLL_INTERVAL_MS = 500; const STARTUP_TIMEOUT_MS = 120000; // 2 min (first npx download can be slow) +const OPENCODE_CONFIG_FILENAME = 'opencode.json'; +/** Provider request timeout in ms (10 min). OpenCode default is 5 min; we need longer for plan agent. */ +const OPENCODE_PROVIDER_TIMEOUT_MS = 600_000; export interface ManagedOpencodeServer { url: string; stop: () => Promise; } +/** Result of ensuring opencode config exists. If created, caller must remove it on teardown. */ +interface OpencodeConfigResult { + created: boolean; + configPath: string; +} + +/** + * If opencode.json does not exist in cwd, create it with provider timeout (10 min). + * OpenCode merges configs; this file will set provider.opencode.options.timeout so long requests don't get cut at 5 min. + */ +async function ensureOpencodeConfig(cwd: string): Promise { + const configPath = path.join(cwd, OPENCODE_CONFIG_FILENAME); + try { + await access(configPath); + return { created: false, configPath }; + } catch { + // File does not exist; create minimal config for provider timeout + } + const config = { + $schema: 'https://opencode.ai/config.json', + provider: { + opencode: { + options: { + timeout: OPENCODE_PROVIDER_TIMEOUT_MS, + }, + }, + }, + }; + await writeFile(configPath, JSON.stringify(config, null, 2), 'utf8'); + logInfo(`Created ${OPENCODE_CONFIG_FILENAME} with provider timeout ${OPENCODE_PROVIDER_TIMEOUT_MS / 60_000} min (will remove on server stop).`); + return { created: true, configPath }; +} + +/** + * Remove opencode.json if we created it (so we don't leave a temporary file in the repo). + */ +async function removeOpencodeConfigIfCreated(result: OpencodeConfigResult): Promise { + if (!result.created) return; + try { + await unlink(result.configPath); + logInfo(`Removed temporary ${OPENCODE_CONFIG_FILENAME}.`); + } catch (e) { + const msg = e instanceof Error ? e.message : String(e); + logError(`Failed to remove temporary ${OPENCODE_CONFIG_FILENAME}: ${msg}`); + } +} + /** * Wait until OpenCode server responds to /global/health or timeout. */ @@ -56,6 +109,8 @@ export async function startOpencodeServer(options?: { const cwd = options?.cwd ?? process.cwd(); const baseUrl = `http://${hostname}:${port}`; + const configResult = await ensureOpencodeConfig(cwd); + logInfo(`Starting OpenCode server at ${baseUrl} (this may take a moment on first run)...`); const child = spawn( @@ -69,7 +124,10 @@ export async function startOpencodeServer(options?: { } ); - const stop = (): Promise => stopOpencodeServer(child); + const stop = async (): Promise => { + await stopOpencodeServer(child); + await removeOpencodeConfigIfCreated(configResult); + }; // Ensure we don't leave the process running if our process exits const onExit = () => {