From af6bda0bd93309439f2a4a251807578d56b81e0e Mon Sep 17 00:00:00 2001 From: Error Date: Fri, 14 Nov 2025 11:08:11 -0600 Subject: [PATCH 1/5] automate CI and review workflows --- .coderabbit.yaml | 42 ++++ .github/workflows/ci.yml | 175 ++++++++++++++-- .github/workflows/review-response.yml | 103 ++++++++++ biome.json | 16 ++ docs/development/ci.md | 94 +++++++++ docs/index.md | 1 + lib/commands/codex-metrics.ts | 93 ++++++--- package.json | 3 + pnpm-lock.yaml | 91 +++++++++ scripts/detect-release-type.mjs | 275 ++++++++++++++++++++++++++ scripts/review-response-context.mjs | 113 +++++++++++ scripts/sync-github-secrets.mjs | 138 +++++++++++++ spec/branch-protection.md | 26 +++ spec/ci-release-automation.md | 39 ++++ spec/codex-metrics-sse-fix.md | 42 ++++ spec/github-secret-sync.md | 21 ++ spec/review-response-automation.md | 55 ++++++ test/codex-metrics-command.test.ts | 72 ++++--- 18 files changed, 1331 insertions(+), 68 deletions(-) create mode 100644 .coderabbit.yaml create mode 100644 .github/workflows/review-response.yml create mode 100644 biome.json create mode 100644 docs/development/ci.md create mode 100644 scripts/detect-release-type.mjs create mode 100644 scripts/review-response-context.mjs create mode 100644 scripts/sync-github-secrets.mjs create mode 100644 spec/branch-protection.md create mode 100644 spec/ci-release-automation.md create mode 100644 spec/codex-metrics-sse-fix.md create mode 100644 spec/github-secret-sync.md create mode 100644 spec/review-response-automation.md diff --git a/.coderabbit.yaml b/.coderabbit.yaml new file mode 100644 index 0000000..6e4a66d --- /dev/null +++ b/.coderabbit.yaml @@ -0,0 +1,42 @@ +# yaml-language-server: $schema=https://coderabbit.ai/integrations/schema.v2.json +# Reference: https://docs.coderabbit.ai/reference/configuration +language: en-US +tone_instructions: "Review with a calm, practical tone and highlight blockers first." +reviews: + profile: assertive + high_level_summary: true + high_level_summary_in_walkthrough: true + path_filters: + - "!dist/**" + - "!node_modules/**" + - "!coverage/**" + - "src/**" + - "lib/**" + - "test/**" + auto_review: + enabled: true + drafts: false + base_branches: + - main + tools: + eslint: + enabled: true + biome: + enabled: true + gitleaks: + enabled: true + actionlint: + enabled: true +knowledge_base: + code_guidelines: + enabled: true + filePatterns: + - "docs/**" + - "AGENTS.md" + - "spec/**/*.md" + learnings: + scope: auto + issues: + scope: auto + pull_requests: + scope: auto diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 828385e..e2eec82 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,57 +2,190 @@ name: CI on: push: - branches: [main] + branches: + - '**' pull_request: - branches: [main] jobs: - test: - name: Test on Node.js ${{ matrix.node-version }} + lint: + name: Lint & Typecheck runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup pnpm + uses: pnpm/action-setup@v4 + with: + version: 10.15.0 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: 22.x + cache: pnpm + + - name: Install dependencies + run: pnpm install --frozen-lockfile + - name: Run lint + run: pnpm lint + + - name: Run typecheck + run: pnpm typecheck + + test: + name: Test + runs-on: ubuntu-latest strategy: + fail-fast: false matrix: node-version: [20.x, 22.x] - steps: - - name: Checkout code + - name: Checkout uses: actions/checkout@v4 + - name: Setup pnpm + uses: pnpm/action-setup@v4 + with: + version: 10.15.0 + - name: Setup Node.js ${{ matrix.node-version }} uses: actions/setup-node@v4 with: node-version: ${{ matrix.node-version }} - cache: 'npm' + cache: pnpm - name: Install dependencies - run: npm ci - - - name: Run type check - run: npm run typecheck + run: pnpm install --frozen-lockfile - name: Run tests - run: npm test + run: pnpm test - name: Build - run: npm run build + run: pnpm run build - lint: - name: Lint and Format Check + mutation: + name: Mutation Tests runs-on: ubuntu-latest + if: github.event_name == 'pull_request' && github.event.pull_request.base.ref == 'main' + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup pnpm + uses: pnpm/action-setup@v4 + with: + version: 10.15.0 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: 22.x + cache: pnpm + + - name: Install dependencies + run: pnpm install --frozen-lockfile + + - name: Run mutation tests + run: pnpm test:mutation + + - name: Upload Stryker reports + if: always() + uses: actions/upload-artifact@v4 + with: + name: stryker-reports + path: | + coverage/stryker.html + coverage/stryker.json + if-no-files-found: ignore + release: + name: Release + runs-on: ubuntu-latest + needs: + - lint + - test + if: github.event_name == 'push' && github.ref == 'refs/heads/main' + permissions: + contents: write + env: + OPENCODE_API_KEY: ${{ secrets.OPENCODE_API_KEY }} + OPENCODE_API_URL: ${{ secrets.OPENCODE_API_URL }} + NPM_TOKEN: ${{ secrets.NPM_TOKEN }} steps: - - name: Checkout code + - name: Checkout uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Setup pnpm + uses: pnpm/action-setup@v4 + with: + version: 10.15.0 - name: Setup Node.js uses: actions/setup-node@v4 with: - node-version: 20.x - cache: 'npm' + node-version: 22.x + cache: pnpm - name: Install dependencies - run: npm ci + run: pnpm install --frozen-lockfile + + - name: Validate release secrets + run: | + if [ -z "$NPM_TOKEN" ]; then + echo "NPM_TOKEN secret is required to publish" >&2 + exit 1 + fi + if [ -z "$OPENCODE_API_KEY" ]; then + echo "OPENCODE_API_KEY secret is required to classify releases" >&2 + exit 1 + fi + + - name: Analyze repository for release + id: analyze + run: | + node scripts/detect-release-type.mjs --output release-analysis.json + echo "release_type=$(jq -r '.releaseType' release-analysis.json)" >> "$GITHUB_OUTPUT" + echo "next_version=$(jq -r '.nextVersion' release-analysis.json)" >> "$GITHUB_OUTPUT" + { + echo "notes<<'EOF'" + jq -r '.releaseNotes' release-analysis.json + echo "EOF" + } >> "$GITHUB_OUTPUT" + + - name: Remove analyzer scratch file + run: rm -f release-analysis.json + + - name: Configure git user + run: | + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" - - name: Run type check - run: npm run typecheck + - name: Configure npm auth + run: | + echo "//registry.npmjs.org/:_authToken=${NPM_TOKEN}" > ~/.npmrc + + - name: Bump version and tag + run: pnpm version ${{ steps.analyze.outputs.next_version }} + + - name: Build package + run: pnpm run build + + - name: Publish to npm + env: + NODE_AUTH_TOKEN: ${{ env.NPM_TOKEN }} + run: pnpm publish --access public + + - name: Push changes + run: | + git push origin HEAD:main --follow-tags + + - name: Create GitHub Release + uses: softprops/action-gh-release@v2 + with: + tag_name: v${{ steps.analyze.outputs.next_version }} + name: Release ${{ steps.analyze.outputs.next_version }} + body: ${{ steps.analyze.outputs.notes }} diff --git a/.github/workflows/review-response.yml b/.github/workflows/review-response.yml new file mode 100644 index 0000000..477244a --- /dev/null +++ b/.github/workflows/review-response.yml @@ -0,0 +1,103 @@ +name: review-response + +on: + pull_request_review_comment: + types: [created] + +jobs: + auto-review-response: + if: github.event.comment.user.type != 'Bot' + runs-on: ubuntu-latest + permissions: + contents: write + pull-requests: write + env: + OPENCODE_API_KEY: ${{ secrets.OPENCODE_API_KEY }} + steps: + - name: Checkout PR head + uses: actions/checkout@v4 + with: + fetch-depth: 0 + repository: ${{ github.event.pull_request.head.repo.full_name }} + ref: ${{ github.event.pull_request.head.ref }} + + - name: Verify OpenCode secret + run: | + if [ -z "$OPENCODE_API_KEY" ]; then + echo "OPENCODE_API_KEY secret is required" >&2 + exit 1 + fi + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: 22 + + - name: Install OpenCode CLI + run: | + curl -fsSL https://opencode.ai/install.sh | sh + echo "$HOME/.local/bin" >> "$GITHUB_PATH" + + - name: Prepare review context + id: context + run: node scripts/review-response-context.mjs + + - name: Start fix branch + id: branch + run: | + branch="${{ steps.context.outputs.branch_name }}-${{ github.run_id }}" + git checkout -b "$branch" + echo "name=$branch" >> "$GITHUB_OUTPUT" + + - name: Run review-response agent + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + opencode run \ + --agent review-response \ + --model opencode/big-pickle \ + --file review-context.md \ + "Follow the instructions inside review-context.md and resolve the review comment precisely." + + - name: Detect changes + id: diff + run: | + if git diff --quiet; then + echo "has_changes=false" >> "$GITHUB_OUTPUT" + else + echo "has_changes=true" >> "$GITHUB_OUTPUT" + fi + + - name: Commit and push + if: steps.diff.outputs.has_changes == 'true' + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + git add -A + git commit -m "Auto-resolve review comment #${{ steps.context.outputs.comment_id }}" || exit 0 + git push origin "${{ steps.branch.outputs.name }}" + + - name: Open pull request + if: steps.diff.outputs.has_changes == 'true' + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + comment_url="${{ steps.context.outputs.comment_url }}" + reviewer="${{ steps.context.outputs.reviewer }}" + pr_number="${{ steps.context.outputs.pr_number }}" + cat < pr-body.txt +Automated follow-up for ${comment_url} by @${reviewer}. +This branch contains a single commit generated by the review-response agent. +Original PR: #${pr_number}. +EOF + gh pr create \ + --base "${{ steps.context.outputs.base_ref }}" \ + --head "${{ steps.branch.outputs.name }}" \ + --title "Resolve review comment #${{ steps.context.outputs.comment_id }}" \ + --body-file pr-body.txt + + - name: No-op notification + if: steps.diff.outputs.has_changes != 'true' + run: echo "Review-response agent produced no changes; skipping PR creation." diff --git a/biome.json b/biome.json new file mode 100644 index 0000000..4bef06a --- /dev/null +++ b/biome.json @@ -0,0 +1,16 @@ +{ + "$schema": "https://biomejs.dev/schemas/2.3.5/schema.json", + "files": { + "includes": ["scripts/**/*.mjs"] + }, + "formatter": { + "enabled": true, + "lineWidth": 110 + }, + "linter": { + "enabled": true, + "rules": { + "recommended": true + } + } +} diff --git a/docs/development/ci.md b/docs/development/ci.md new file mode 100644 index 0000000..ab5d94a --- /dev/null +++ b/docs/development/ci.md @@ -0,0 +1,94 @@ +# CI, Mutation Testing, and Release Automation + +Our single workflow (`.github/workflows/ci.yml`) now owns every automated quality and release check. This file describes what runs, how to configure required secrets, and how to troubleshoot the new tooling. + +## Pipeline Overview + +| Job | When it runs | What it does | +| --- | --- | --- | +| `lint` | every push + PR | Installs dependencies with pnpm, runs `pnpm lint` (Biome) and `pnpm typecheck` (TS strict mode) | +| `test` | every push + PR (Node 20.x + 22.x) | Executes `pnpm test` and `pnpm run build` to catch regressions on both supported runtimes | +| `mutation` | pull requests targeting `main` | Runs `pnpm test:mutation` (Stryker). HTML/JSON reports are uploaded even on failure for review | +| `release` | push events on `main` (after lint+test pass) | Calls the Opencode-powered analyzer, bumps the semver via `pnpm version `, publishes to npm, and creates a GitHub Release | + +### Release Flow Recap +1. A merge into `main` triggers the workflow. +2. The analyzer (`scripts/detect-release-type.mjs`) gathers commits since the last tag, calls `opencode/gpt-5-nano`, and emits structured JSON (release type, reasoning, highlights, breaking changes, Markdown notes). +3. `pnpm version ` bumps `package.json` / `pnpm-lock.yaml` and creates the git tag expected by npm + GitHub Releases. +4. The job pushes the commit + tag, runs `pnpm run build`, publishes to npm, and then uses `softprops/action-gh-release` so the GitHub release body matches the analyzer output. + +## Required Secrets + +Add these repository secrets before enabling the workflow: + +### `NPM_TOKEN` +Automation token used for publishing. Create one from the npm web UI: +1. Visit /tokens>. +2. Choose **Generate New Token → Automation**. +3. Copy the token value and add it as the `NPM_TOKEN` repository secret. + +GitHub Actions writes the token into `~/.npmrc` before running `pnpm publish --access public`. + +### `OPENCODE_API_KEY` +API key for calling Opencode's Responses endpoint with the `opencode/gpt-5-nano` model. Generate a key with the OpenCode CLI: +```bash +# Create a long-lived key for CI usage +opencode auth token create --label "ci-release" --scopes responses.create +# Copy the token and store it as the OPENCODE_API_KEY secret +``` +If you run a self-hosted Opencode endpoint, also add `OPENCODE_API_URL` (optional) to override the default `https://api.openai.com/v1/responses` base URL. + +### Optional overrides +- `RELEASE_BASE_REF`: force the analyzer to diff from a specific tag/commit (useful when backporting release branches). + +## Branch protection +- `main` requires pull requests for all changes; direct pushes and force pushes are disabled. +- Required status checks: `lint`, `test (node-version: 20.x)`, and `test (node-version: 22.x)` must pass before the merge button unlocks. (Type checking runs inside the `lint` job.) +- No human approvals are required right now—the PR gate exists for automated reviewers and CI visibility. +- Branches must be up to date with `main` before merging because strict status checks are enabled. + +## Review comment automation +- Workflow: `.github/workflows/review-response.yml` +- Trigger: every `pull_request_review_comment` with action `created` (non-bot authors only). +- Flow: check out the PR head, generate `review-context.md`, run the `review-response` agent (`.opencode/agent/review-response.md`) with `opencode/big-pickle`, then create a branch named `review/comment--` containing a single commit. The workflow pushes the branch and opens a PR back to the review’s base branch referencing the original comment. +- Requirements: `OPENCODE_API_KEY` secret (shared with release automation) and default `GITHUB_TOKEN` permissions (`contents: write`, `pull-requests: write`). +- To test locally: `act pull_request_review_comment --eventpath event.json -j auto-review-response` after installing [act](https://github.com/nektos/act) and exporting the required secrets. + +### Syncing secrets with `gh` +Instead of copying values into the GitHub UI, you can push local environment variables straight to repository secrets with the helper script: +```bash +# export whichever secrets you want to sync +export NPM_TOKEN=... # npm automation token +export OPENCODE_API_KEY=... # CLI token from `opencode auth token create` + +# dry-run first (recommended) +pnpm sync:secrets -- --dry-run + +# actually sync to this repo +pnpm sync:secrets + +# sync to another repo, sending a custom set of secrets +pnpm sync:secrets -- --repo my-org/another-repo NPM_TOKEN OPENCODE_API_KEY RELEASE_BASE_REF +``` +Requirements: +1. `gh` CLI installed and authenticated (`gh auth login`). +2. Local env vars exported for every secret you plan to send (optional ones are skipped automatically when unset). +3. Appropriate permissions for the destination repository; otherwise `gh secret set` will fail. + +## Local Analyzer Usage +You can run the same analyzer locally to preview the next release type: +```bash +OPENCODE_API_KEY=... node scripts/detect-release-type.mjs --output ./release-analysis.json +cat release-analysis.json +``` +Environment variables (e.g., `RELEASE_BASE_REF`) behave exactly like they do in CI. + +## Troubleshooting +- **Analyzer falls back to patch:** the script logs the precise reason to stderr. Check that `OPENCODE_API_KEY` is valid and the model endpoint is reachable. +- **npm publish fails (403):** confirm the `NPM_TOKEN` secret exists, has automation scope, and the account owns the `@openhax/codex` package. +- **Mutation job is slow:** it intentionally runs only for PRs targeting `main`. Local developers can reproduce with `pnpm test:mutation` before pushing. + +## References +- Workflow: `.github/workflows/ci.yml` +- Analyzer: `scripts/detect-release-type.mjs` +- Lint config: `biome.json` diff --git a/docs/index.md b/docs/index.md index c1f03de..3ddba01 100644 --- a/docs/index.md +++ b/docs/index.md @@ -32,6 +32,7 @@ Explore the engineering depth behind this plugin: - [Config System](development/CONFIG_FLOW.md) - How configuration loading and merging works - [Config Fields](development/CONFIG_FIELDS.md) - Understanding config keys, `id`, and `name` fields - [Testing Guide](development/TESTING.md) - Test scenarios, integration testing, verification matrix +- [CI & Release Automation](development/ci.md) - Secrets, mutation runs, and automatic publishing --- diff --git a/lib/commands/codex-metrics.ts b/lib/commands/codex-metrics.ts index 5a4a8e1..a2d6235 100644 --- a/lib/commands/codex-metrics.ts +++ b/lib/commands/codex-metrics.ts @@ -134,11 +134,31 @@ function createStaticResponse( ): Response { const outputTokens = estimateTokenCount(text); const commandName = metadata.command; + const responseId = `resp_cmd_${randomUUID()}`; + const messageId = `msg_cmd_${randomUUID()}`; + const created = Math.floor(Date.now() / 1000); + const resolvedModel = model || "gpt-5"; + + const assistantMessage = { + id: messageId, + type: "message", + role: "assistant", + content: [ + { + type: "output_text", + text, + }, + ], + metadata: { + source: commandName, + }, + }; + const responsePayload = { - id: `resp_cmd_${randomUUID()}`, + id: responseId, object: "response", - created: Math.floor(Date.now() / 1000), - model: model || "gpt-5", + created, + model: resolvedModel, status: "completed", usage: { input_tokens: 0, @@ -146,26 +166,53 @@ function createStaticResponse( reasoning_tokens: 0, total_tokens: outputTokens, }, - output: [ - { - id: `msg_cmd_${randomUUID()}`, - type: "message", - role: "assistant", - content: [ - { - type: "output_text", - text, - }, - ], - metadata: { - source: commandName, - }, - }, - ], + output: [assistantMessage], metadata, }; - const stream = createSsePayload(responsePayload); + // Emit the same SSE event sequence that OpenAI's Responses API uses so CLI validators pass. + const events: Record[] = [ + { + id: responseId, + type: "response.created", + response: { + id: responseId, + object: "response", + created, + model: resolvedModel, + status: "in_progress", + }, + }, + { + id: responseId, + type: "response.output_text.delta", + response_id: responseId, + output_index: 0, + item_id: messageId, + delta: text, + }, + { + id: responseId, + type: "response.output_item.added", + response_id: responseId, + output_index: 0, + item: assistantMessage, + }, + { + id: responseId, + type: "response.output_item.done", + response_id: responseId, + output_index: 0, + item: assistantMessage, + }, + { + id: responseId, + type: "response.completed", + response: responsePayload, + }, + ]; + + const stream = createSsePayload(events); return new Response(stream, { status: 200, headers: { @@ -176,10 +223,10 @@ function createStaticResponse( }); } -function createSsePayload(payload: Record): string { - const dataLine = `data: ${JSON.stringify(payload)}\n\n`; +function createSsePayload(events: Array>): string { + const chunks = events.map((event) => `data: ${JSON.stringify(event)}\n\n`).join(""); const doneLine = `data: [DONE]\n\n`; - return dataLine + doneLine; + return chunks + doneLine; } function extractLatestUserText(body: RequestBody): string | null { diff --git a/package.json b/package.json index 24b0a42..6cee2de 100644 --- a/package.json +++ b/package.json @@ -30,6 +30,8 @@ "scripts": { "build": "tsc && cp lib/oauth-success.html dist/lib/", "typecheck": "tsc --noEmit", + "lint": "biome check .", + "sync:secrets": "node scripts/sync-github-secrets.mjs", "test": "vitest run", "test:watch": "vitest", "test:ui": "vitest --ui", @@ -48,6 +50,7 @@ "@opencode-ai/plugin": "^0.13.7" }, "devDependencies": { + "@biomejs/biome": "^2.3.5", "@opencode-ai/plugin": "^0.13.7", "@opencode-ai/sdk": "^0.13.9", "@stryker-mutator/core": "^8.2.0", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index a749eb5..f5e4d8e 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -15,6 +15,9 @@ importers: specifier: ^4.10.4 version: 4.10.5 devDependencies: + '@biomejs/biome': + specifier: ^2.3.5 + version: 2.3.5 '@opencode-ai/plugin': specifier: ^0.13.7 version: 0.13.9(magicast@0.3.5)(typescript@5.9.3) @@ -216,6 +219,59 @@ packages: resolution: {integrity: sha512-6zABk/ECA/QYSCQ1NGiVwwbQerUCZ+TQbp64Q3AgmfNvurHH0j8TtXa1qbShXA6qqkpAj4V5W8pP6mLe1mcMqA==} engines: {node: '>=18'} + '@biomejs/biome@2.3.5': + resolution: {integrity: sha512-HvLhNlIlBIbAV77VysRIBEwp55oM/QAjQEin74QQX9Xb259/XP/D5AGGnZMOyF1el4zcvlNYYR3AyTMUV3ILhg==} + engines: {node: '>=14.21.3'} + hasBin: true + + '@biomejs/cli-darwin-arm64@2.3.5': + resolution: {integrity: sha512-fLdTur8cJU33HxHUUsii3GLx/TR0BsfQx8FkeqIiW33cGMtUD56fAtrh+2Fx1uhiCsVZlFh6iLKUU3pniZREQw==} + engines: {node: '>=14.21.3'} + cpu: [arm64] + os: [darwin] + + '@biomejs/cli-darwin-x64@2.3.5': + resolution: {integrity: sha512-qpT8XDqeUlzrOW8zb4k3tjhT7rmvVRumhi2657I2aGcY4B+Ft5fNwDdZGACzn8zj7/K1fdWjgwYE3i2mSZ+vOA==} + engines: {node: '>=14.21.3'} + cpu: [x64] + os: [darwin] + + '@biomejs/cli-linux-arm64-musl@2.3.5': + resolution: {integrity: sha512-eGUG7+hcLgGnMNl1KHVZUYxahYAhC462jF/wQolqu4qso2MSk32Q+QrpN7eN4jAHAg7FUMIo897muIhK4hXhqg==} + engines: {node: '>=14.21.3'} + cpu: [arm64] + os: [linux] + + '@biomejs/cli-linux-arm64@2.3.5': + resolution: {integrity: sha512-u/pybjTBPGBHB66ku4pK1gj+Dxgx7/+Z0jAriZISPX1ocTO8aHh8x8e7Kb1rB4Ms0nA/SzjtNOVJ4exVavQBCw==} + engines: {node: '>=14.21.3'} + cpu: [arm64] + os: [linux] + + '@biomejs/cli-linux-x64-musl@2.3.5': + resolution: {integrity: sha512-awVuycTPpVTH/+WDVnEEYSf6nbCBHf/4wB3lquwT7puhNg8R4XvonWNZzUsfHZrCkjkLhFH/vCZK5jHatD9FEg==} + engines: {node: '>=14.21.3'} + cpu: [x64] + os: [linux] + + '@biomejs/cli-linux-x64@2.3.5': + resolution: {integrity: sha512-XrIVi9YAW6ye0CGQ+yax0gLfx+BFOtKaNX74n+xHWla6Cl6huUmcKNO7HPx7BiKnJUzrxXY1qYlm7xMvi08X4g==} + engines: {node: '>=14.21.3'} + cpu: [x64] + os: [linux] + + '@biomejs/cli-win32-arm64@2.3.5': + resolution: {integrity: sha512-DlBiMlBZZ9eIq4H7RimDSGsYcOtfOIfZOaI5CqsWiSlbTfqbPVfWtCf92wNzx8GNMbu1s7/g3ZZESr6+GwM/SA==} + engines: {node: '>=14.21.3'} + cpu: [arm64] + os: [win32] + + '@biomejs/cli-win32-x64@2.3.5': + resolution: {integrity: sha512-nUmR8gb6yvrKhtRgzwo/gDimPwnO5a4sCydf8ZS2kHIJhEmSmk+STsusr1LHTuM//wXppBawvSQi2xFXJCdgKQ==} + engines: {node: '>=14.21.3'} + cpu: [x64] + os: [win32] + '@esbuild/aix-ppc64@0.25.12': resolution: {integrity: sha512-Hhmwd6CInZ3dwpuGTF8fJG6yoWmsToE+vYgD4nytZVxcu1ulHpUQRAB1UJ8+N1Am3Mz4+xOByoQoSZf4D+CpkA==} engines: {node: '>=18'} @@ -1910,6 +1966,41 @@ snapshots: '@bcoe/v8-coverage@1.0.2': {} + '@biomejs/biome@2.3.5': + optionalDependencies: + '@biomejs/cli-darwin-arm64': 2.3.5 + '@biomejs/cli-darwin-x64': 2.3.5 + '@biomejs/cli-linux-arm64': 2.3.5 + '@biomejs/cli-linux-arm64-musl': 2.3.5 + '@biomejs/cli-linux-x64': 2.3.5 + '@biomejs/cli-linux-x64-musl': 2.3.5 + '@biomejs/cli-win32-arm64': 2.3.5 + '@biomejs/cli-win32-x64': 2.3.5 + + '@biomejs/cli-darwin-arm64@2.3.5': + optional: true + + '@biomejs/cli-darwin-x64@2.3.5': + optional: true + + '@biomejs/cli-linux-arm64-musl@2.3.5': + optional: true + + '@biomejs/cli-linux-arm64@2.3.5': + optional: true + + '@biomejs/cli-linux-x64-musl@2.3.5': + optional: true + + '@biomejs/cli-linux-x64@2.3.5': + optional: true + + '@biomejs/cli-win32-arm64@2.3.5': + optional: true + + '@biomejs/cli-win32-x64@2.3.5': + optional: true + '@esbuild/aix-ppc64@0.25.12': optional: true diff --git a/scripts/detect-release-type.mjs b/scripts/detect-release-type.mjs new file mode 100644 index 0000000..edc3b6e --- /dev/null +++ b/scripts/detect-release-type.mjs @@ -0,0 +1,275 @@ +#!/usr/bin/env node +import { execSync } from "node:child_process"; +import { readFileSync, writeFileSync } from "node:fs"; +import path from "node:path"; + +function run(command) { + return execSync(command, { encoding: "utf8" }).trim(); +} + +function tryRun(command) { + try { + return run(command); + } catch (_error) { + return ""; + } +} + +function getBaseRef() { + if (process.env.RELEASE_BASE_REF) { + return process.env.RELEASE_BASE_REF; + } + const described = tryRun("git describe --tags --abbrev=0"); + if (described) { + return described; + } + return ""; +} + +function getHeadRef() { + return process.env.GITHUB_SHA || tryRun("git rev-parse HEAD") || "HEAD"; +} + +function getInitialCommit() { + return tryRun("git rev-list --max-parents=0 HEAD"); +} + +function collectCommits(range) { + const rangeArg = range ? `${range} ` : ""; + const raw = tryRun(`git log ${rangeArg}--no-merges --pretty=format:%h%x09%s --max-count=50`); + if (!raw) { + return []; + } + return raw + .split("\n") + .map((line) => line.trim()) + .filter(Boolean) + .map((line) => { + const [hash, subject] = line.split("\t"); + return `- ${hash ?? ""} ${subject ?? ""}`.trim(); + }); +} + +function collectDiffStats(range) { + if (!range) { + const root = getInitialCommit(); + if (!root) { + return ""; + } + range = `${root} ${getHeadRef()}`; + } + const parts = range.split(" "); + if (parts.length === 1) { + return tryRun(`git diff --stat ${parts[0]}`); + } + return tryRun(`git diff --stat ${parts[0]} ${parts[1]}`); +} + +function readPackageVersion() { + const pkg = JSON.parse(readFileSync(path.join(process.cwd(), "package.json"), "utf8")); + return pkg.version ?? "0.0.0"; +} + +function bumpVersion(current, releaseType) { + const [major = 0, minor = 0, patch = 0] = current + .split(".") + .map((value) => Number.parseInt(value, 10)) + .map((value) => (Number.isFinite(value) ? value : 0)); + if (releaseType === "major") { + return `${major + 1}.0.0`; + } + if (releaseType === "minor") { + return `${major}.${minor + 1}.0`; + } + return `${major}.${minor}.${patch + 1}`; +} + +function parseArgs(argv) { + const result = {}; + for (let index = 0; index < argv.length; index += 1) { + const arg = argv[index]; + if (arg === "--output" && argv[index + 1]) { + result.output = argv[index + 1]; + index += 1; + } + } + return result; +} + +function extractAssistantText(response) { + const output = Array.isArray(response.output) ? response.output : []; + for (const item of output) { + if (!item || item.type !== "message" || item.role !== "assistant") { + continue; + } + const parts = Array.isArray(item.content) ? item.content : []; + return parts + .map((part) => { + if (!part || typeof part !== "object") return ""; + if (part.type === "output_text" && typeof part.text === "string") { + return part.text; + } + if (part.type === "input_text" && typeof part.text === "string") { + return part.text; + } + if ("text" in part && typeof part.text === "string") { + return part.text; + } + return ""; + }) + .filter(Boolean) + .join("\n") + .trim(); + } + return ""; +} + +async function callOpencodeModel(systemPrompt, userPrompt) { + const apiKey = process.env.OPENCODE_API_KEY; + if (!apiKey) { + throw new Error("OPENCODE_API_KEY is not configured"); + } + const url = process.env.OPENCODE_API_URL || "https://api.openai.com/v1/responses"; + const schema = { + name: "release_version", + schema: { + type: "object", + additionalProperties: false, + properties: { + releaseType: { type: "string", enum: ["major", "minor", "patch"] }, + reasoning: { type: "string" }, + highlights: { + type: "array", + items: { type: "string" }, + default: [], + }, + breakingChanges: { + type: "array", + items: { type: "string" }, + default: [], + }, + }, + required: ["releaseType", "reasoning"], + }, + }; + const body = { + model: "opencode/gpt-5-nano", + response_format: { type: "json_schema", json_schema: schema }, + input: [ + { + role: "system", + content: [{ type: "input_text", text: systemPrompt }], + }, + { + role: "user", + content: [{ type: "input_text", text: userPrompt }], + }, + ], + }; + const response = await fetch(url, { + method: "POST", + headers: { + Authorization: `Bearer ${apiKey}`, + "Content-Type": "application/json", + }, + body: JSON.stringify(body), + }); + if (!response.ok) { + const errorBody = await response.text().catch(() => ""); + throw new Error(`Opencode request failed: ${response.status} ${response.statusText} ${errorBody}`); + } + const data = await response.json(); + const text = extractAssistantText(data); + if (!text) { + throw new Error("Empty assistant response from opencode analyzer"); + } + return JSON.parse(text); +} + +function formatReleaseNotes(result) { + const lines = ["## Summary", result.summary || result.reasoning || "Automated release", ""]; + lines.push("### Release Type"); + lines.push(`- ${result.releaseType.toUpperCase()} (auto-detected)`); + if (result.highlights?.length) { + lines.push("", "### Highlights"); + for (const note of result.highlights) { + lines.push(`- ${note}`); + } + } + if (result.breakingChanges?.length) { + lines.push("", "### Breaking Changes"); + for (const note of result.breakingChanges) { + lines.push(`- ${note}`); + } + } + return lines.join("\n").trim(); +} + +async function main() { + const args = parseArgs(process.argv.slice(2)); + const baseRef = getBaseRef(); + const headRef = getHeadRef(); + const range = baseRef ? `${baseRef}..${headRef}` : ""; + const commits = collectCommits(range); + const diffSummary = collectDiffStats(range || headRef); + const hasChanges = commits.length > 0 || Boolean(diffSummary); + const systemPrompt = "You are a release manager that classifies semantic version bumps."; + const lines = [ + `Base ref: ${baseRef || ""}`, + `Head ref: ${headRef}`, + `Commit count (max 50 shown): ${commits.length}`, + "", + "## Commits", + commits.length ? commits.join("\n") : "(No commits detected)", + "", + "## Diff Summary", + diffSummary || "(Diff summary unavailable)", + ]; + const userPrompt = lines.join("\n"); + const fallback = { + releaseType: "patch", + reasoning: hasChanges + ? "Fallback to patch because analyzer input could not be classified" + : "No relevant commits detected, defaulting to patch", + highlights: commits.slice(0, 5).map((line) => line.replace(/^-\s*/, "")), + breakingChanges: [], + }; + let analysis; + try { + analysis = await callOpencodeModel(systemPrompt, userPrompt); + } catch (error) { + console.error(`[release] Falling back to patch bump: ${error.message}`); + analysis = fallback; + } + const currentVersion = readPackageVersion(); + const releaseType = analysis.releaseType ?? "patch"; + const nextVersion = bumpVersion(currentVersion, releaseType); + const summary = analysis.reasoning ?? fallback.reasoning; + const highlights = Array.isArray(analysis.highlights) ? analysis.highlights : fallback.highlights; + const breakingChanges = Array.isArray(analysis.breakingChanges) + ? analysis.breakingChanges + : fallback.breakingChanges; + const releaseNotes = formatReleaseNotes({ + summary, + reasoning: summary, + releaseType, + highlights, + breakingChanges, + }); + const result = { + baseRef: baseRef || null, + headRef, + releaseType, + nextVersion, + summary, + highlights, + breakingChanges, + releaseNotes, + }; + if (args.output) { + writeFileSync(args.output, JSON.stringify(result, null, 2)); + } + console.log(JSON.stringify(result, null, 2)); +} + +await main(); diff --git a/scripts/review-response-context.mjs b/scripts/review-response-context.mjs new file mode 100644 index 0000000..c1e7212 --- /dev/null +++ b/scripts/review-response-context.mjs @@ -0,0 +1,113 @@ +#!/usr/bin/env node +import { execSync } from "node:child_process"; +import { mkdirSync, readFileSync, writeFileSync } from "node:fs"; +import path from "node:path"; + +function readEvent() { + const eventPath = process.env.GITHUB_EVENT_PATH; + if (!eventPath) { + throw new Error("GITHUB_EVENT_PATH is not defined"); + } + const raw = readFileSync(eventPath, "utf8"); + return JSON.parse(raw); +} + +function execOrEmpty(command) { + try { + return execSync(command, { encoding: "utf8", stdio: ["ignore", "pipe", "pipe"] }).trim(); + } catch (_error) { + return ""; + } +} + +function clamp(text, max = 12000) { + if (!text) return ""; + if (text.length <= max) return text; + return `${text.slice(0, max)}\n... (truncated, original length ${text.length})`; +} + +function main() { + const event = readEvent(); + const pr = event.pull_request; + const comment = event.comment; + if (!pr || !comment) { + throw new Error("This script expects a pull_request_review_comment event"); + } + + const filePath = comment.path; + const reviewer = comment.user?.login ?? "unknown"; + const branchSlug = `review/comment-${comment.id}`; + const prNumber = pr.number; + const prTitle = pr.title ?? ""; + const baseRef = pr.base?.ref ?? "main"; + const baseSha = pr.base?.sha ?? ""; + const headRef = pr.head?.ref ?? ""; + const headSha = pr.head?.sha ?? ""; + const commentUrl = comment.html_url ?? ""; + + const fileDiff = clamp(execOrEmpty(`git diff ${baseSha}...${headSha} -- ${filePath}`)); + const fileContent = clamp(execOrEmpty(`git show ${headSha}:${filePath}`), 8000); + const diffHunk = clamp(comment.diff_hunk ?? "", 6000); + + const lines = [ + `# Review Comment Context`, + `- PR: #${prNumber} — ${prTitle}`, + `- Base Branch: ${baseRef}`, + `- Head Branch: ${headRef}`, + `- File: ${filePath}`, + `- Reviewer: ${reviewer}`, + `- Comment URL: ${commentUrl}`, + `- Comment ID: ${comment.id}`, + `- Generated: ${new Date().toISOString()}`, + "", + "## Comment Body", + comment.body?.trim() || "(empty comment)", + "", + "## Diff Hunk", + "```diff", + diffHunk || "(diff unavailable)", + "```", + "", + "## Full File Diff", + "```diff", + fileDiff || "(file diff unavailable)", + "```", + "", + "## Latest File Snapshot", + "```", + fileContent || "(file content unavailable)", + "```", + "", + "## Required Actions", + "1. Resolve the review comment precisely; avoid unrelated edits.", + "2. Keep changes minimal and follow repository conventions.", + "3. Leave the working tree ready for a single commit.", + `4. Target branch: ${baseRef}.`, + ]; + + mkdirSync(path.dirname("review-context.md"), { recursive: true }); + writeFileSync("review-context.md", lines.join("\n")); + + const outputEntries = { + branch_name: branchSlug, + base_ref: baseRef, + base_sha: baseSha, + head_ref: headRef, + head_sha: headSha, + comment_id: comment.id, + comment_url: commentUrl, + pr_number: prNumber, + reviewer, + file_path: filePath, + }; + + const outputPath = process.env.GITHUB_OUTPUT; + if (outputPath) { + const buffer = Object.entries(outputEntries) + .map(([key, value]) => `${key}=${value}`) + .join("\n"); + execSync(`cat >> ${outputPath}`, { input: `${buffer}\n`, encoding: "utf8" }); + } +} + +main(); diff --git a/scripts/sync-github-secrets.mjs b/scripts/sync-github-secrets.mjs new file mode 100644 index 0000000..7d17ad5 --- /dev/null +++ b/scripts/sync-github-secrets.mjs @@ -0,0 +1,138 @@ +#!/usr/bin/env node +import { execSync, spawnSync } from "node:child_process"; +import process from "node:process"; + +const defaultSecrets = [ + { name: "NPM_TOKEN", optional: false }, + { name: "OPENCODE_API_KEY", optional: false }, + { name: "OPENCODE_API_URL", optional: true }, + { name: "RELEASE_BASE_REF", optional: true }, +]; + +function printUsage() { + console.log(`Usage: pnpm sync:secrets [--repo owner/repo] [--dry-run] [SECRET ...] + +Sync selected environment variables to GitHub repository secrets via the gh CLI. + +Options: + --repo Override repository target (default: infer from GITHUB_REPOSITORY or git remote origin) + --dry-run Show which secrets would be synced without calling gh + --help Show this message + +Arguments: + SECRET Optional list of env var names to sync. When omitted, the default set (${defaultSecrets + .map((item) => item.name) + .join(", ")}) is used. +`); +} + +function parseArgs(argv) { + const result = { repo: undefined, dryRun: false, secrets: [] }; + const args = [...argv]; + while (args.length) { + const arg = args.shift(); + if (arg === "--repo" && args.length) { + result.repo = args.shift(); + } else if (arg === "--dry-run") { + result.dryRun = true; + } else if (arg === "--help") { + printUsage(); + process.exit(0); + } else if (arg?.startsWith("--")) { + console.error(`Unknown option: ${arg}`); + printUsage(); + process.exit(1); + } else if (arg) { + result.secrets.push(arg); + } + } + return result; +} + +function ensureGhAvailable() { + const result = spawnSync("gh", ["--version"], { encoding: "utf8" }); + if (result.error || result.status !== 0) { + throw new Error( + "GitHub CLI (gh) is not available. Install it and authenticate before running this script.", + ); + } +} + +function detectRepo(explicitRepo) { + if (explicitRepo) return explicitRepo; + if (process.env.GITHUB_REPOSITORY) { + return process.env.GITHUB_REPOSITORY; + } + try { + const remoteUrl = execSync("git config --get remote.origin.url", { encoding: "utf8" }).trim(); + const match = /github\.com[:/](?[\w.-]+)\/(?[\w.-]+?)(?:\.git)?$/i.exec(remoteUrl); + if (match?.groups?.owner && match?.groups?.repo) { + const cleanRepo = match.groups.repo.replace(/\.git$/i, ""); + return `${match.groups.owner}/${cleanRepo}`; + } + } catch (error) { + throw new Error(`Unable to infer repository: ${error.message}`); + } + throw new Error( + "Could not determine GitHub repository. Pass --repo owner/repo or set the GITHUB_REPOSITORY env variable.", + ); +} + +function gatherSecrets(customNames) { + if (customNames.length > 0) { + return customNames.map((name) => ({ name, optional: false })); + } + return defaultSecrets; +} + +function syncSecret({ name, value, repo, dryRun }) { + if (dryRun) { + console.log(`[dry-run] gh secret set ${name} --repo ${repo}`); + return; + } + const command = spawnSync("gh", ["secret", "set", name, "--repo", repo, "--body", value], { + encoding: "utf8", + stdio: ["inherit", "inherit", "inherit"], + }); + if (command.status !== 0) { + throw new Error(`gh secret set ${name} failed with exit code ${command.status}`); + } + console.log(`✔ Synced ${name}`); +} + +function validateSecrets(requestedSecrets) { + const prepared = []; + for (const secret of requestedSecrets) { + const value = process.env[secret.name]; + if (!value) { + if (secret.optional) { + console.warn(`Skipping optional secret ${secret.name} (env var not set)`); + continue; + } + throw new Error(`Environment variable ${secret.name} is not set.`); + } + prepared.push({ name: secret.name, value }); + } + if (prepared.length === 0) { + throw new Error("No secrets to sync. Set the required environment variables or provide explicit names."); + } + return prepared; +} + +async function main() { + const parsed = parseArgs(process.argv.slice(2)); + ensureGhAvailable(); + const repo = detectRepo(parsed.repo); + const requestedSecrets = gatherSecrets(parsed.secrets); + const resolvedSecrets = validateSecrets(requestedSecrets); + console.log(`Syncing ${resolvedSecrets.length} secret(s) to ${repo}...`); + for (const secret of resolvedSecrets) { + syncSecret({ name: secret.name, value: secret.value, repo, dryRun: parsed.dryRun }); + } + console.log("Done."); +} + +main().catch((error) => { + console.error(`[sync-github-secrets] ${error.message}`); + process.exit(1); +}); diff --git a/spec/branch-protection.md b/spec/branch-protection.md new file mode 100644 index 0000000..b895a1a --- /dev/null +++ b/spec/branch-protection.md @@ -0,0 +1,26 @@ +# main Branch Protection + +## Summary +- Configure branch protection for `main` so merges require pull requests, include successful `lint` + `test` workflows (typecheck runs inside `lint` job), and prevent direct pushes. +- Currently no branch protection exists (`gh api repos/open-hax/codex/branches/main/protection` returns 404 on 2025-11-14 15:30 UTC). + +## Requirements / Definition of Done +1. Enable protection rules via GitHub REST API (or `gh api`) targeting `main` branch. +2. Require pull request reviews before merging (enforce at least 1 approval, disallow bypass via force push/direct push). +3. Require status checks for: + - `lint` job (covers `pnpm lint` + `pnpm typecheck`). + - `test (node-version: 20.x)` job. + - `test (node-version: 22.x)` job. +4. Allow admins to bypass? (Default: include administrators so even admins must follow rules.) +5. Document the rule in `docs/development/ci.md` or similar so contributors know PRs + green checks are mandatory. + +## Implementation Plan +### Phase 1 – Prepare data +- Identify workflow/job names (from `.github/workflows/ci.yml`). +- Confirm API payload structure for branch protection (use `required_status_checks` block with contexts). + +### Phase 2 – Apply protection +- Use `gh api --method PUT repos/open-hax/codex/branches/main/protection ...` to set rules: require PRs, require code owners? (N/A) but enforce approvals=1, status checks contexts as above. + +### Phase 3 – Documentation +- Update `docs/development/ci.md` (or README) with short section describing required checks + PR requirement. diff --git a/spec/ci-release-automation.md b/spec/ci-release-automation.md new file mode 100644 index 0000000..f1d3c6c --- /dev/null +++ b/spec/ci-release-automation.md @@ -0,0 +1,39 @@ +# CI + Release Automation Plan + +## Summary +- Expand `.github/workflows/ci.yml:1-59` so testing and linting jobs run on every push (any branch) and every PR, add a dedicated mutation-testing job for PRs to `main`, and gate a release job so it only executes after successful pushes to `main`. +- Introduce a lint workflow powered by Biome (add `@biomejs/biome` + `"lint": "biome check ."` in `package.json:30-38` and a project-level `biome.json` config) so the GitHub Action can run `pnpm lint` deterministically. +- Create an `opencode`-powered release analysis tool (`scripts/detect-release-type.mjs`) that summarizes commits since the last tag, calls `https://api.openai.com/v1/responses` with `model: "opencode/gpt-5-nano"`, and emits structured JSON describing breaking changes + release type so the workflow can pick `major|minor|patch` intelligently. +- Build a release job that (1) runs the analyzer, (2) bumps the version via `pnpm version ` (letting Git create a tag), (3) publishes to npm using `NPM_TOKEN`, and (4) creates a GitHub Release whose notes embed the analyzer’s output. +- Document CI secrets and npm token setup in a new `docs/development/ci.md`, covering how to set `NPM_TOKEN`, `OPENCODE_API_KEY`, and any optional overrides for the analyzer. + +## Requirements / Definition of Done +1. CI workflow runs `pnpm install`, `pnpm lint`, `pnpm typecheck`, `pnpm test`, and `pnpm build` on every push/PR; mutation testing (Stryker) runs for PRs into `main` (and can be skipped otherwise). +2. Pushes to `main` trigger an automated release job that depends on test+lint success, determines release type via the analyzer, bumps the semver using `pnpm version x.y.z`, pushes the commit/tag, publishes to npm, and opens a GitHub Release summarizing the changes. +3. Analyzer script must use `opencode/gpt-5-nano`, accept `OPENCODE_API_KEY`, gracefully fall back to `patch` when the LLM call fails, and write machine-readable output (JSON) for subsequent steps. +4. Documentation clearly explains how to configure `NPM_TOKEN`, `OPENCODE_API_KEY`, and other required secrets/variables in CI. +5. Mutations job should surface HTML/JSON artifacts (at least uploaded via `actions/upload-artifact`) for manual review when it fails. + +## Phases +### Phase 1 – Tooling + Package Updates +- Add `@biomejs/biome` dev dependency + `lint` script in `package.json:30-38`. +- Create `biome.json` with project conventions for lint + formatting. +- Author `scripts/detect-release-type.mjs` that: + - Discovers the previous tag (fallback: root commit) and collects `git log --no-merges` plus `git diff --stat` summaries. + - Builds a structured prompt and calls `https://api.openai.com/v1/responses` with `model: "opencode/gpt-5-nano"` using `OPENCODE_API_KEY`. + - Parses the assistant message (JSON block), falls back to `patch` if parsing fails, computes the next semver, and writes `{ releaseType, nextVersion, summary, breakingChanges }` to stdout/file. + +### Phase 2 – Workflow Updates +- Replace `.github/workflows/ci.yml:1-59` triggers with `push` on all branches and `pull_request` on all targets; switch jobs to pnpm; extend with: + - `lint` job calling `pnpm lint` + `pnpm typecheck`. + - `test` job running matrix Node versions with `pnpm test` + `pnpm build`. + - `mutation` job (`if: github.event_name == 'pull_request' && github.base_ref == 'main'`) running `pnpm test:mutation` and uploading Stryker reports. + - `release` job (`if: github.event_name == 'push' && github.ref == 'refs/heads/main'`) that depends on `lint` + `test`, configures git, runs analyzer, bump+tag via `pnpm version`, pushes changes, publishes to npm with `NPM_TOKEN`, and creates GitHub release; feed analyzer output into release body. + +### Phase 3 – Documentation & Instructions +- Add `docs/development/ci.md` describing: + - Required secrets (`NPM_TOKEN`, `OPENCODE_API_KEY`, optional `RELEASE_BASE_REF`). + - How to generate an npm automation token and store it as `NPM_TOKEN`. + - How to supply an Opencode API key for the analyzer, plus troubleshooting tips. + - Overview of workflow behavior (push vs PR vs release) so contributors know when mutation tests run and how automated releases behave. +- Update README or docs index (if needed) to link to the new CI guide. diff --git a/spec/codex-metrics-sse-fix.md b/spec/codex-metrics-sse-fix.md new file mode 100644 index 0000000..bedf4df --- /dev/null +++ b/spec/codex-metrics-sse-fix.md @@ -0,0 +1,42 @@ +# /codex-metrics SSE Response Compliance + +## Summary +- `/codex-metrics` currently emits a single SSE chunk that embeds a full Responses JSON object (`lib/commands/codex-metrics.ts:33-185`). +- OpenAI's Responses streaming contract expects typed SSE events such as `response.created`, `response.output_text.delta`, and `response.completed`. The plugin's chunk lacks a `type` field and therefore fails schema validation inside the CLI (see AI_TypeValidationError in user report). +- We must emit a minimal-but-valid SSE sequence that mirrors the Responses API so that `codex-metrics` can run without hitting the network while still satisfying downstream validators. + +## Existing Issues / PRs +- Related: `gh issue list` entry #6 ("Feature: richer Codex metrics and request inspection commands", opened 2025-11-14). This bug fix unblocks the metrics command introduced there. +- No open PRs touching this area (`gh pr list --limit 5` returned none). + +## Key Files / References +| File | Notes | +| --- | --- | +| `lib/commands/codex-metrics.ts:33-185` | Builds `/codex-metrics` response and currently serializes ad-hoc SSE payload via `createSsePayload`. Needs rewrite to emit typed events. | +| `lib/types.ts:158-163` | Defines `SSEEventData` used by `response-handler`. Final event must satisfy this parser (`type` + `response`). | +| `lib/request/response-handler.ts:1-88` | Consumes SSE stream by searching for `response.done`/`response.completed`. Command response must include such an event so `convertSseToJson` keeps working if invoked downstream. | +| `test/codex-metrics-command.test.ts:33-335` | Assumes the SSE chunk is the final JSON payload. Tests must be updated to reflect typed events and to assert on `response.completed.response`. | + +## Requirements / Definition of Done +1. `createStaticResponse()` emits SSE events that conform to the Responses API schema: + - `response.created` event with initial metadata. + - `response.output_text.delta` event carrying the metrics text (single delta is fine). + - `response.completed` event containing the same `response` payload currently produced, including metadata and usage totals. + - Trailing `[DONE]` chunk remains for compatibility. +2. All emitted events include the required fields (`type`, `response_id`, `item_id`, `output_index`, `delta`, etc.) so validators no longer complain. +3. Update tests to parse SSE streams by selecting the `response.completed` event and verifying the embedded `response` object as before. Add new assertions covering the intermediate events (created + delta) so the structure stays correct. +4. Ensure the cached token + metadata calculations remain untouched. +5. Document the new SSE behavior inline (brief comment) for future contributors. + +## Implementation Plan +**Phase 1 – Reshape SSE serialization** +- Build helper that returns an array of SSE event objects for created, delta, completed events. Each event should reuse the existing response payload (completed event) to avoid duplication. +- Update `createStaticResponse()` to stringify each event as its own `data: {...}\n\n` chunk followed by `[DONE]`. + +**Phase 2 – Test updates & validation** +- Update `test/codex-metrics-command.test.ts` helpers to capture the final response via `type === "response.completed"` and to assert presence of preceding `response.created` & `response.output_text.delta` events. +- Add regression test ensuring each event contains required fields (`type`, `response_id`, etc.) to guard against future schema regressions. + +**Phase 3 – Verification** +- Run targeted Vitest suite for the command + any affected modules to ensure green tests. +- Manually inspect SSE payload sample (maybe via new helper) to confirm textual output still matches previous human-readable metrics summary. diff --git a/spec/github-secret-sync.md b/spec/github-secret-sync.md new file mode 100644 index 0000000..75f926e --- /dev/null +++ b/spec/github-secret-sync.md @@ -0,0 +1,21 @@ +# GitHub Secret Sync Script + +## Summary +- Add a CLI helper (Node script) invoked via `pnpm sync:secrets` that pushes locally defined environment variables to GitHub repository secrets using `gh secret set`. +- The script should default to syncing the secrets required by `.github/workflows/ci.yml:3-191` (currently `NPM_TOKEN`, `OPENCODE_API_KEY`, optional `OPENCODE_API_URL`, `RELEASE_BASE_REF`) but allow overriding the list via CLI arguments. +- Documentation (`docs/development/ci.md:22-80`) already explains which secrets are needed; enhance it with instructions for the new helper. + +## References +- `.github/workflows/ci.yml:3-191` – defines required CI secrets for release job. +- `docs/development/ci.md:3-80` – describes manual setup steps for `NPM_TOKEN`, `OPENCODE_API_KEY`, and optional overrides. +- `package.json:31-70` – scripts section; add a new `sync:secrets` entry. + +## Requirements / Definition of Done +1. `scripts/sync-github-secrets.mjs` (or similar) reads env var names (default list above unless CLI args provided), validates each exists locally, and executes `gh secret set --repo --body-stdin` piping the value. Detect repo from `GITHUB_REPOSITORY` env or `git config --get remote.origin.url` fallback; allow overriding via `--repo my-org/my-repo` flag. +2. Script should fail fast with descriptive errors when: + - `gh` CLI is missing. + - `gh secret set` exits non-zero. + - Required env var is undefined. +3. Add pnpm script alias `sync:secrets: node scripts/sync-github-secrets.mjs` (with pass-through arguments) for easy invocation. +4. Update `docs/development/ci.md` with a short “Syncing Secrets with gh” section showing how to run `pnpm sync:secrets -- --repo open-hax/codex NPM_TOKEN OPENCODE_API_KEY ...` and clarifying prerequisites (logged into GitHub CLI, env vars exported locally). +5. Provide inline comments or usage description inside the script (`--help` output or README snippet) so contributors understand supported flags. diff --git a/spec/review-response-automation.md b/spec/review-response-automation.md new file mode 100644 index 0000000..35d26bf --- /dev/null +++ b/spec/review-response-automation.md @@ -0,0 +1,55 @@ +# Review Comment Automation + +## Summary +- Implement an automated workflow triggered by `pull_request_review_comment` creation to run the OpenCode `review-response` agent and produce a branch + PR resolving the feedback. +- Define the `review-response` agent in `.opencode/agent/review-response.md` using Markdown frontmatter (see OpenCode docs `agents`: https://opencode.ai/docs/agents/ ) with model `opencode/big-pickle` and permissions to edit files + run git commands. +- Generate contextual prompt data (review comment, diff hunk, file diff) via a Node helper placed in `scripts/review-response-context.mjs` and hand it to the agent via `opencode run --agent review-response`. +- After the agent applies changes, the workflow must create a branch, commit the results once, push it, and open a PR against the comment’s base branch referencing the original review. +- Provide a `.coderabbit.yaml` tuned to this repository so CodeRabbit reviews align with the automation (docs reference: https://docs.coderabbit.ai/reference/configuration ). + +## Current State +- No `.opencode/agent` directory exists (see repo root listing) and no review-specific agent is defined. +- There is no workflow responding to review comments (only `.github/workflows/ci.yml`). +- No `.coderabbit.yaml` is present in the repository root. + +## Requirements / Definition of Done +1. `.github/workflows/review-response.yml` (or similar) triggers on `pull_request_review_comment` with `types: [created]`, ignores bot comments, checks out the PR head, installs OpenCode CLI, generates context, runs `opencode run --agent review-response --model opencode/big-pickle`, and if changes exist, creates a new branch + commit + PR targeting the base branch. Workflow must grant `contents: write` and `pull-requests: write` plus supply `OPENCODE_API_KEY` from secrets. +2. `scripts/review-response-context.mjs` reads `$GITHUB_EVENT_PATH`, computes file diff (`git diff base...head -- path`), clamps overly long files/diffs, writes `review-context.md`, and exports metadata (e.g., branch slug) via `$GITHUB_OUTPUT` for downstream steps. +3. `.opencode/agent/review-response.md` contains concise frontmatter describing a subagent with low temperature, editing/bash access, and instructions telling it to: + - Understand a review comment + diff hunk + - Modify only the touched file(s) + - Run targeted tests if specified + - Produce ready-to-commit changes, leaving branch/commit creation to automation if needed + - Keep prompts short & deterministic +4. `.coderabbit.yaml` exists in repo root with tailored settings (language EN, assertive profile, auto-review on, path filters to skip generated assets, prefer Biome + Vitest tools, knowledge_base referencing docs). Reference official docs as justification. +5. Documentation updated (e.g., `docs/index.md` or new doc) to mention the new workflow, agent, and CodeRabbit config so contributors know how review comments trigger automations and how to configure required secrets. + +## Implementation Plan + +### Phase 1 – Agent & Config Assets +- Create `.opencode/agent/review-response.md` with frontmatter fields: description, mode=subagent, model=opencode/big-pickle, temperature ~0.1, tools enabling bash/write/edit, permission for git/binaries. Content: concise checklist for responding to review comments, referencing branch naming pattern `review/comment-` and requiring single commit. +- Add `.coderabbit.yaml` with: `language: en-US`, `reviews.profile: assertive`, `reviews.auto_review.enabled: true`, enable lint tools `eslint`, `biome`, `gitleaks`, set `knowledge_base.code_guidelines.filePatterns` to scan `docs/**`, and include comment referencing docs. + +### Phase 2 – Workflow + Helper Scripts +- Author `scripts/review-response-context.mjs` to parse event, compute diffs (`git diff base..head -- path` + `git show head:path`), truncate to manageable byte length, and write `review-context.md`. Set outputs for `branch_name`, `base_ref`, `pr_number`, etc. +- Add GH workflow `.github/workflows/review-response.yml` with steps: + - Trigger: `pull_request_review_comment` created (skip bots). + - Checkout PR head (fetch-depth 0). + - Setup Node 22. + - Install pnpm + dependencies if needed? (only Node + script). + - Install OpenCode CLI via official install script. + - Run context script; capture outputs. + - Execute `opencode run --agent review-response --model opencode/big-pickle --file review-context.md "Follow the instructions in review-context.md"` with env `OPENCODE_API_KEY` and `GITHUB_TOKEN`. + - If git diff exists, create branch `review/comment-${{ steps.context.outputs.comment_id }}` (append timestamp if collision), commit with message referencing comment + PR, push, and `gh pr create --base base_ref --head branch --title ... --body ...` (GH_TOKEN env). Ensure job gracefully exits if no changes. + +### Phase 3 – Docs & Guidance +- Update `docs/development/ci.md` (or new doc) with a section describing the review-comment automation, required secrets (`OPENCODE_API_KEY`), and branch naming convention. +- Optionally update `README.md` badges/sections to mention `.coderabbit.yaml` and the auto-fix workflow. + +## Definition of Done Checklist +- [ ] Agent file created + linted (Biome) + instructions verified. +- [ ] `.coderabbit.yaml` committed with doc references in comments. +- [ ] Workflow green in `act` or at least `actionlint` passes (include gating?). +- [ ] Scripts added and covered by `pnpm lint` (Biome). +- [ ] Doc updates included. +- [ ] Secrets + env documented for users. diff --git a/test/codex-metrics-command.test.ts b/test/codex-metrics-command.test.ts index e53fbea..1499884 100644 --- a/test/codex-metrics-command.test.ts +++ b/test/codex-metrics-command.test.ts @@ -30,20 +30,22 @@ function buildBody(message: string): RequestBody { }; } -async function readCommandPayload(response: Response) { +async function readSseEvents(response: Response) { const raw = await response.text(); - const chunks = raw + return raw .split("\n\n") .map((chunk) => chunk.trim()) - .filter(Boolean); - const dataChunk = chunks.find( - (chunk) => chunk.startsWith("data: ") && !chunk.includes("[DONE]"), - ); - if (!dataChunk) { - throw new Error("No data chunk found in SSE payload"); + .filter((chunk) => chunk.startsWith("data: ") && chunk !== "data: [DONE]") + .map((chunk) => JSON.parse(chunk.replace(/^data: /, ""))); +} + +async function readCommandPayload(response: Response) { + const events = await readSseEvents(response); + const completedEvent = events.find((event) => event.type === "response.completed"); + if (!completedEvent || typeof completedEvent.response !== "object") { + throw new Error("No response.completed event found in SSE payload"); } - const json = dataChunk.replace(/^data: /, ""); - return JSON.parse(json); + return { events, payload: completedEvent.response } as const; } describe("maybeHandleCodexCommand", () => { @@ -61,12 +63,34 @@ describe("maybeHandleCodexCommand", () => { const body = buildBody("/codex-metrics"); const response = maybeHandleCodexCommand(body); expect(response).toBeInstanceOf(Response); - const payload = await readCommandPayload(response!); + const { payload } = await readCommandPayload(response!); expect(payload.metadata.command).toBe("codex-metrics"); const firstOutput = payload.output?.[0]?.content?.[0]?.text ?? ""; expect(firstOutput).toContain("Codex Metrics"); }); + it("emits typed SSE events with required metadata", async () => { + const body = buildBody("/codex-metrics"); + const response = maybeHandleCodexCommand(body); + const { events, payload } = await readCommandPayload(response!); + + const created = events.find((event) => event.type === "response.created"); + expect(created?.response?.id).toBe(payload.id); + + const delta = events.find((event) => event.type === "response.output_text.delta"); + expect(delta?.item_id).toBe(payload.output?.[0]?.id); + expect(delta?.delta).toContain("Codex Metrics"); + + const itemAdded = events.find((event) => event.type === "response.output_item.added"); + expect(itemAdded?.item?.id).toBe(payload.output?.[0]?.id); + + const itemDone = events.find((event) => event.type === "response.output_item.done"); + expect(itemDone?.item?.id).toBe(payload.output?.[0]?.id); + + const completed = events.find((event) => event.type === "response.completed"); + expect(completed?.response?.status).toBe("completed"); + }); + it("embeds prompt cache stats when session data exists", async () => { const manager = new SessionManager({ enabled: true }); const conversationBody: RequestBody = { @@ -82,7 +106,7 @@ describe("maybeHandleCodexCommand", () => { } const response = maybeHandleCodexCommand(buildBody("/codex-metrics"), { sessionManager: manager }); - const payload = await readCommandPayload(response!); + const { payload } = await readCommandPayload(response!); expect(payload.metadata.promptCache.totalSessions).toBeGreaterThanOrEqual(1); expect(payload.metadata.promptCache.recentSessions[0].id).toBe("metrics-session"); }); @@ -91,7 +115,7 @@ describe("maybeHandleCodexCommand", () => { const body = buildBody("/codex-metrics detailed"); const response = maybeHandleCodexCommand(body); expect(response).toBeInstanceOf(Response); - const payload = await readCommandPayload(response!); + const { payload } = await readCommandPayload(response!); expect(payload.metadata.command).toBe("codex-metrics"); const firstOutput = payload.output?.[0]?.content?.[0]?.text ?? ""; expect(firstOutput).toContain("Codex Metrics"); @@ -101,7 +125,7 @@ describe("maybeHandleCodexCommand", () => { const body = buildBody("/CODEX-METRICS"); const response = maybeHandleCodexCommand(body); expect(response).toBeInstanceOf(Response); - const payload = await readCommandPayload(response!); + const { payload } = await readCommandPayload(response!); expect(payload.metadata.command).toBe("codex-metrics"); }); @@ -109,7 +133,7 @@ describe("maybeHandleCodexCommand", () => { const body = buildBody(" /codex-metrics "); const response = maybeHandleCodexCommand(body); expect(response).toBeInstanceOf(Response); - const payload = await readCommandPayload(response!); + const { payload } = await readCommandPayload(response!); expect(payload.metadata.command).toBe("codex-metrics"); }); @@ -231,7 +255,7 @@ describe("maybeHandleCodexCommand", () => { const response = maybeHandleCodexCommand(body); expect(response).toBeInstanceOf(Response); - const payload = await readCommandPayload(response!); + const { payload } = await readCommandPayload(response!); expect(payload).toHaveProperty('id'); expect(payload).toHaveProperty('object', 'response'); expect(payload).toHaveProperty('created'); @@ -257,7 +281,7 @@ describe("maybeHandleCodexCommand", () => { it("estimates tokens correctly for short text", async () => { const body = buildBody("/codex-metrics"); const response = maybeHandleCodexCommand(body); - const payload = await readCommandPayload(response!); + const { payload } = await readCommandPayload(response!); // Short text should still have at least 1 token expect(payload.usage.output_tokens).toBeGreaterThanOrEqual(1); }); @@ -266,7 +290,7 @@ describe("maybeHandleCodexCommand", () => { // Mock a longer response by manipulating the format function const body = buildBody("/codex-metrics"); const response = maybeHandleCodexCommand(body); - const payload = await readCommandPayload(response!); + const { payload } = await readCommandPayload(response!); // Longer text should have more tokens expect(payload.usage.output_tokens).toBeGreaterThan(10); }); @@ -274,7 +298,7 @@ describe("maybeHandleCodexCommand", () => { it("handles missing session manager", async () => { const body = buildBody("/codex-metrics"); const response = maybeHandleCodexCommand(body, {}); - const payload = await readCommandPayload(response!); + const { payload } = await readCommandPayload(response!); expect(payload.metadata.promptCache.enabled).toBe(false); expect(payload.metadata.promptCache.totalSessions).toBe(0); expect(payload.metadata.promptCache.recentSessions).toEqual([]); @@ -287,14 +311,14 @@ describe("maybeHandleCodexCommand", () => { const body = buildBody("/codex-metrics"); const response = maybeHandleCodexCommand(body, { sessionManager: managerWithoutMetrics }); - const payload = await readCommandPayload(response!); + const { payload } = await readCommandPayload(response!); expect(payload.metadata.promptCache.enabled).toBe(false); }); it("includes cache warm status in response", async () => { const body = buildBody("/codex-metrics"); const response = maybeHandleCodexCommand(body); - const payload = await readCommandPayload(response!); + const { payload } = await readCommandPayload(response!); expect(payload.metadata.cacheWarmStatus).toHaveProperty('codexInstructions'); expect(payload.metadata.cacheWarmStatus).toHaveProperty('opencodePrompt'); }); @@ -304,8 +328,8 @@ describe("maybeHandleCodexCommand", () => { const response1 = maybeHandleCodexCommand(body); const response2 = maybeHandleCodexCommand(body); - const payload1 = await readCommandPayload(response1!); - const payload2 = await readCommandPayload(response2!); + const { payload: payload1 } = await readCommandPayload(response1!); + const { payload: payload2 } = await readCommandPayload(response2!); expect(payload1.id).not.toBe(payload2.id); expect(payload1.output[0].id).not.toBe(payload2.output[0].id); @@ -329,7 +353,7 @@ describe("maybeHandleCodexCommand", () => { ], }; const response = maybeHandleCodexCommand(body); - const payload = await readCommandPayload(response!); + const { payload } = await readCommandPayload(response!); expect(payload.model).toBe("gpt-5"); // fallback model }); }); From 350459f75405cb425a0acbee705b4aa35de1874b Mon Sep 17 00:00:00 2001 From: Error Date: Fri, 14 Nov 2025 17:42:58 -0600 Subject: [PATCH 2/5] fix workflow syntax --- .github/workflows/review-response.yml | 10 +++++----- spec/review-response-workflow-fix.md | 26 ++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 5 deletions(-) create mode 100644 spec/review-response-workflow-fix.md diff --git a/.github/workflows/review-response.yml b/.github/workflows/review-response.yml index 477244a..2ba2b43 100644 --- a/.github/workflows/review-response.yml +++ b/.github/workflows/review-response.yml @@ -87,11 +87,11 @@ jobs: comment_url="${{ steps.context.outputs.comment_url }}" reviewer="${{ steps.context.outputs.reviewer }}" pr_number="${{ steps.context.outputs.pr_number }}" - cat < pr-body.txt -Automated follow-up for ${comment_url} by @${reviewer}. -This branch contains a single commit generated by the review-response agent. -Original PR: #${pr_number}. -EOF + printf "%s\n%s\n%s\n" \ + "Automated follow-up for ${comment_url} by @${reviewer}." \ + "This branch contains a single commit generated by the review-response agent." \ + "Original PR: #${pr_number}." \ + > pr-body.txt gh pr create \ --base "${{ steps.context.outputs.base_ref }}" \ --head "${{ steps.branch.outputs.name }}" \ diff --git a/spec/review-response-workflow-fix.md b/spec/review-response-workflow-fix.md new file mode 100644 index 0000000..22e9dbf --- /dev/null +++ b/spec/review-response-workflow-fix.md @@ -0,0 +1,26 @@ +# Review Response Workflow Fix + +## Context +- `.github/workflows/review-response.yml:91` – GitHub Actions reports a YAML syntax error; lines inside the heredoc body are not indented under the `run: |` block, so the workflow parser treats them as YAML and fails. + +## Existing Issues / PRs +- None discovered; manual inspection only. + +## Requirements / Definition of Done +- Workflow parses cleanly (no syntax errors) with heredoc content correctly indented. +- Heredoc body remains unchanged aside from indentation; functionality stays identical. +- Workflow file remains compliant with GitHub Actions syntax and repository conventions. + +## Plan + +### Phase 1 – Confirm Failure Source +1. Inspect `.github/workflows/review-response.yml` around the failing lines to ensure indentation is the root cause (Completed via `read`). + +### Phase 2 – Implement Fix +1. Indent the heredoc content lines (Automated follow-up…, etc.) so they remain inside the shell block. +2. Keep surrounding commands untouched to avoid altering workflow behavior. + +### Phase 3 – Validate +1. Re-read the updated YAML block to ensure indentation is consistent (12 spaces for script body). +2. Optionally run `actionlint` locally if available (not required but recommended) or rely on visual inspection + YAML structure. +3. Summarize fix and advise user on re-running GitHub workflow. From f08ed7de4efcd231e85719b4e36d32b36ab6cda8 Mon Sep 17 00:00:00 2001 From: Error Date: Fri, 14 Nov 2025 18:32:12 -0600 Subject: [PATCH 3/5] updated names in documentation --- CONTRIBUTING.md | 2 +- README.md | 10 +++--- config/full-opencode.json | 2 +- config/minimal-opencode.json | 2 +- docs/README.md | 2 +- docs/configuration.md | 8 ++--- docs/development/CONFIG_FLOW.md | 6 ++-- docs/development/TESTING.md | 18 +++++------ docs/getting-started.md | 8 ++--- docs/index.md | 6 ++-- docs/troubleshooting.md | 2 +- index.ts | 2 +- lib/oauth-success.html | 2 +- scripts/test-all-models.sh | 2 +- spec/doc-package-name-scope.md | 55 +++++++++++++++++++++++++++++++++ 15 files changed, 91 insertions(+), 36 deletions(-) create mode 100644 spec/doc-package-name-scope.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9367fdb..bc34868 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,6 +1,6 @@ # Contributing Guidelines -Thank you for your interest in contributing to openhax/codex! +Thank you for your interest in contributing to @openhax/codex! Before submitting contributions, please review these guidelines to ensure all changes maintain compliance with OpenAI's Terms of Service and the project's goals. diff --git a/README.md b/README.md index c0b08c5..63cbdc5 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@ # OpenHax Codex Plugin for Opencode -[![npm version](https://img.shields.io/npm/v/openhax%2Fcodex.svg)](https://www.npmjs.com/package/openhax/codex) +[![npm version](https://img.shields.io/npm/v/%40openhax%2Fcodex.svg)](https://www.npmjs.com/package/@openhax/codex) [![Tests](https://github.com/open-hax/codex/actions/workflows/ci.yml/badge.svg)](https://github.com/open-hax/codex/actions) -[![npm downloads](https://img.shields.io/npm/dm/openhax%2Fcodex.svg)](https://www.npmjs.com/package/openhax/codex) +[![npm downloads](https://img.shields.io/npm/dm/%40openhax%2Fcodex.svg)](https://www.npmjs.com/package/@openhax/codex) This plugin enables opencode to use OpenAI's Codex backend via ChatGPT Plus/Pro OAuth authentication, allowing you to use your ChatGPT subscription instead of OpenAI Platform API credits. @@ -76,7 +76,7 @@ For the complete experience with all reasoning variants matching the official Co { "$schema": "https://opencode.ai/config.json", "plugin": [ - "openhax/codex" + "@openhax/codex" ], "provider": { "openai": { @@ -539,7 +539,7 @@ Apply settings to all models: ```json { "$schema": "https://opencode.ai/config.json", - "plugin": ["openhax/codex"], + "plugin": ["@openhax/codex"], "model": "openai/gpt-5-codex", "provider": { "openai": { @@ -559,7 +559,7 @@ Create your own named variants in the model selector: ```json { "$schema": "https://opencode.ai/config.json", - "plugin": ["openhax/codex"], + "plugin": ["@openhax/codex"], "provider": { "openai": { "models": { diff --git a/config/full-opencode.json b/config/full-opencode.json index d7cc5c6..dd4ea69 100644 --- a/config/full-opencode.json +++ b/config/full-opencode.json @@ -1,7 +1,7 @@ { "$schema": "https://opencode.ai/config.json", "plugin": [ - "openhax/codex" + "@openhax/codex" ], "provider": { "openai": { diff --git a/config/minimal-opencode.json b/config/minimal-opencode.json index 975a4a8..6c41e04 100644 --- a/config/minimal-opencode.json +++ b/config/minimal-opencode.json @@ -1,6 +1,6 @@ { "$schema": "https://opencode.ai/config.json", - "plugin": ["openhax/codex"], + "plugin": ["@openhax/codex"], "provider": { "openai": { "options": { diff --git a/docs/README.md b/docs/README.md index 5c72d5c..4e91055 100644 --- a/docs/README.md +++ b/docs/README.md @@ -32,4 +32,4 @@ This plugin bridges two different systems with careful engineering: --- -**Quick Links**: [GitHub](https://github.com/open-hax/codex) • [npm](https://www.npmjs.com/package/openhax/codex) • [Issues](https://github.com/open-hax/codex/issues) +**Quick Links**: [GitHub](https://github.com/open-hax/codex) • [npm](https://www.npmjs.com/package/@openhax/codex) • [Issues](https://github.com/open-hax/codex/issues) diff --git a/docs/configuration.md b/docs/configuration.md index 3abf694..74db180 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -7,7 +7,7 @@ Complete reference for configuring the OpenHax Codex Plugin. ```json { "$schema": "https://opencode.ai/config.json", - "plugin": ["openhax/codex"], + "plugin": ["@openhax/codex"], "provider": { "openai": { "options": { @@ -159,7 +159,7 @@ Apply same settings to all models: ```json { - "plugin": ["openhax/codex"], + "plugin": ["@openhax/codex"], "provider": { "openai": { "options": { @@ -180,7 +180,7 @@ Different settings for different models: ```json { - "plugin": ["openhax/codex"], + "plugin": ["@openhax/codex"], "provider": { "openai": { "options": { @@ -325,7 +325,7 @@ Global config has defaults, project overrides for specific work: **~/.config/opencode/opencode.json** (global): ```json { - "plugin": ["openhax/codex"], + "plugin": ["@openhax/codex"], "provider": { "openai": { "options": { diff --git a/docs/development/CONFIG_FLOW.md b/docs/development/CONFIG_FLOW.md index 34e8d1e..013386b 100644 --- a/docs/development/CONFIG_FLOW.md +++ b/docs/development/CONFIG_FLOW.md @@ -194,7 +194,7 @@ For a given model, options are merged: ### Example 1: Global Options Only ```json { - "plugin": ["openhax/codex"], + "plugin": ["@openhax/codex"], "provider": { "openai": { "options": { @@ -212,7 +212,7 @@ For a given model, options are merged: ### Example 2: Per-Model Override ```json { - "plugin": ["openhax/codex"], + "plugin": ["@openhax/codex"], "provider": { "openai": { "options": { @@ -248,7 +248,7 @@ For a given model, options are merged: ```json { "$schema": "https://opencode.ai/config.json", - "plugin": ["openhax/codex"], + "plugin": ["@openhax/codex"], "model": "openai/gpt-5-codex-medium", "provider": { "openai": { diff --git a/docs/development/TESTING.md b/docs/development/TESTING.md index 2cd3f8a..c94ce43 100644 --- a/docs/development/TESTING.md +++ b/docs/development/TESTING.md @@ -9,7 +9,7 @@ Comprehensive testing matrix for all config scenarios and backwards compatibilit **Config:** ```json { - "plugin": ["openhax/codex"] + "plugin": ["@openhax/codex"] } ``` @@ -40,7 +40,7 @@ Comprehensive testing matrix for all config scenarios and backwards compatibilit **Config:** ```json { - "plugin": ["openhax/codex"], + "plugin": ["@openhax/codex"], "provider": { "openai": { "options": { @@ -81,7 +81,7 @@ Comprehensive testing matrix for all config scenarios and backwards compatibilit **Config:** ```json { - "plugin": ["openhax/codex"], + "plugin": ["@openhax/codex"], "provider": { "openai": { "options": { @@ -116,7 +116,7 @@ Comprehensive testing matrix for all config scenarios and backwards compatibilit **Config:** ```json { - "plugin": ["openhax/codex"], + "plugin": ["@openhax/codex"], "provider": { "openai": { "models": { @@ -209,7 +209,7 @@ API receives: "gpt-5-codex" ✅ **Config:** ```json { - "plugin": ["openhax/codex"] + "plugin": ["@openhax/codex"] } ``` @@ -346,7 +346,7 @@ Turn 4: > now delete it **Config:** ```json { - "plugin": ["openhax/codex"], + "plugin": ["@openhax/codex"], "model": "openai/gpt-5-codex" } ``` @@ -443,12 +443,12 @@ DEBUG_CODEX_PLUGIN=1 opencode run "test" --model=openai/gpt-5-codex ```bash # 1. Clear cache -(cd ~ && rm -rf .cache/opencode/node_modules/openhax/codex) +(cd ~ && rm -rf .cache/opencode/node_modules/@openhax/codex) # 2. Use minimal config cat > ~/.config/opencode/opencode.json <<'EOF' { - "plugin": ["openhax/codex"], + "plugin": ["@openhax/codex"], "model": "openai/gpt-5-codex" } EOF @@ -472,7 +472,7 @@ DEBUG_CODEX_PLUGIN=1 opencode run "write hello world to test.txt" # Update config with custom models cat > ~/.config/opencode/opencode.json <<'EOF' { - "plugin": ["openhax/codex"], + "plugin": ["@openhax/codex"], "provider": { "openai": { "models": { diff --git a/docs/getting-started.md b/docs/getting-started.md index 78fec4d..6a89ea6 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -35,7 +35,7 @@ Add this to `~/.config/opencode/opencode.json`: ```json { "$schema": "https://opencode.ai/config.json", - "plugin": ["openhax/codex"], + "plugin": ["@openhax/codex"], "provider": { "openai": { "options": { @@ -226,7 +226,7 @@ Just want to get started quickly? ```json { "$schema": "https://opencode.ai/config.json", - "plugin": ["openhax/codex"], + "plugin": ["@openhax/codex"], "model": "openai/gpt-5-codex" } ``` @@ -283,7 +283,7 @@ When a new version is released, you must manually update: ```bash # Step 1: Clear plugin cache -(cd ~ && sed -i.bak '/"openhax\/codex"/d' .cache/opencode/package.json && rm -rf .cache/opencode/node_modules/openhax/codex) +(cd ~ && sed -i.bak '/"@openhax\/codex"/d' .cache/opencode/package.json && rm -rf .cache/opencode/node_modules/@openhax/codex) # Step 2: Restart OpenCode - it will reinstall the latest version opencode @@ -306,7 +306,7 @@ For plugin development or testing unreleased changes: ```json { - "plugin": ["file:///absolute/path/to/your-fork/openhax/codex/dist"] + "plugin": ["file:///absolute/path/to/your-fork/@openhax/codex/dist"] } ``` diff --git a/docs/index.md b/docs/index.md index 3ddba01..0cc9e71 100644 --- a/docs/index.md +++ b/docs/index.md @@ -2,7 +2,7 @@ > Access GPT-5 Codex through your ChatGPT Plus/Pro subscription in OpenCode -[![npm version](https://badge.fury.io/js/openhax%2Fcodex.svg)](https://www.npmjs.com/package/openhax/codex) +[![npm version](https://badge.fury.io/js/%40openhax%2Fcodex.svg)](https://www.npmjs.com/package/@openhax/codex) [![Tests](https://github.com/open-hax/codex/actions/workflows/ci.yml/badge.svg)](https://github.com/open-hax/codex/actions) > **Maintained by the Open Hax team.** Follow the project at [github.com/open-hax/codex](https://github.com/open-hax/codex) for updates and contributions. @@ -57,7 +57,7 @@ opencode auth login To get the latest version: ```bash -(cd ~ && sed -i.bak '/"openhax\/codex"/d' .cache/opencode/package.json && rm -rf .cache/opencode/node_modules/openhax/codex) +(cd ~ && sed -i.bak '/"@openhax\/codex"/d' .cache/opencode/package.json && rm -rf .cache/opencode/node_modules/@openhax/codex) opencode # Reinstalls latest ``` @@ -66,7 +66,7 @@ opencode # Reinstalls latest ```json { "$schema": "https://opencode.ai/config.json", - "plugin": ["openhax/codex"], + "plugin": ["@openhax/codex"], "model": "openai/gpt-5-codex" } ``` diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 63976e8..77248bd 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -148,7 +148,7 @@ Items are not persisted when `store` is set to false. **Solution:** ```bash # Update plugin -(cd ~ && sed -i.bak '/"openhax\/codex"/d' .cache/opencode/package.json && rm -rf .cache/opencode/node_modules/openhax/codex) +(cd ~ && sed -i.bak '/"@openhax\/codex"/d' .cache/opencode/package.json && rm -rf .cache/opencode/node_modules/@openhax/codex) # Restart OpenCode opencode diff --git a/index.ts b/index.ts index b93b47d..2a19f7e 100644 --- a/index.ts +++ b/index.ts @@ -59,7 +59,7 @@ import type { UserConfig } from "./lib/types.js"; * @example * ```json * { - * "plugin": ["openhax/codex"], + * "plugin": ["@openhax/codex"], * "model": "openai/gpt-5-codex" * } * ``` diff --git a/lib/oauth-success.html b/lib/oauth-success.html index bee9712..61c72a1 100644 --- a/lib/oauth-success.html +++ b/lib/oauth-success.html @@ -548,7 +548,7 @@
-
openhax/codex — OAuth Authentication
+
@openhax/codex — OAuth Authentication
diff --git a/scripts/test-all-models.sh b/scripts/test-all-models.sh index 83d1d49..3cc3c52 100755 --- a/scripts/test-all-models.sh +++ b/scripts/test-all-models.sh @@ -132,7 +132,7 @@ update_config() { cat > "${OPENCODE_JSON}" << 'EOCONFIG' { "$schema": "https://opencode.ai/config.json", - "plugin": ["file:///Users/you/Repos/openhax/codex/dist"], + "plugin": ["file:///Users/you/Repos/@openhax/codex/dist"], "provider": { "openai": { "options": { diff --git a/spec/doc-package-name-scope.md b/spec/doc-package-name-scope.md new file mode 100644 index 0000000..36e7a8d --- /dev/null +++ b/spec/doc-package-name-scope.md @@ -0,0 +1,55 @@ +# Spec: Align Documentation With `@openhax/codex` + +## Context & Problem +Multiple documentation sources (README, guides, config samples, troubleshooting scripts, and inline examples) still refer to the plugin as `openhax/codex`. The published package is scoped as `@openhax/codex` (see `package.json:2`). Users copying the outdated instructions cannot install the package via npm, and cleanup commands targeting `node_modules/openhax/codex` fail because scoped packages live under `node_modules/@openhax/codex`. + +## References (files & line numbers) +- README.md:3,5 (badges), 79 (plugin array), 542 & 562 (config examples) — use `@openhax/codex` in badges/JSON blocks. +- CONTRIBUTING.md:3 — opening sentence should match the scoped name. +- docs/configuration.md:10,162,183,328 — update sample plugin arrays; ensure surrounding text reflects scope. +- docs/getting-started.md:38,229,286,309 — plugin arrays + local `file:///` example should clarify scoped install path. +- docs/README.md:35 — npm link text/URL needs the scoped package. +- docs/index.md:5 (badge), 60 (cleanup script), 69 (plugin array) — keep consistent with scoped install instructions. +- docs/troubleshooting.md:151 — cleanup command must delete `.cache/opencode/node_modules/@openhax/codex` and sed for `"@openhax\/codex"`. +- docs/development/TESTING.md:12,43,84,119,212,349,451,475 — repeated plugin config blocks and cleanup snippets. +- docs/development/CONFIG_FLOW.md:197,215,251 — plugin array examples. +- docs/development/TESTING.md & docs/troubleshooting scripts share cleanup logic; coordinate updates to avoid divergent instructions. +- config/minimal-opencode.json:3 and config/full-opencode.json:4 — canonical config artifacts must reference the scoped package. +- config/full-opencode.json plus README configuration sections ensure consistent copying. +- index.ts:62 (JSDoc example) — should showcase scoped name for consistency. +- lib/oauth-success.html:551 — UI title referencing the plugin should mention `@openhax/codex` so OAuth screen matches npm identity. + +## Related Issues / PRs +- Issue #11 "Docs: Fix package name in test/README.md" overlaps with this task; ensure our updates cover README/test references so the issue can close. +- No open PRs currently address this specific rename (checked `gh pr list --search "openhax/codex"`). PR #8 is unrelated to docs; keep changes isolated from workflow automation work. + +## Requirements +1. Replace every documentation/config reference of `openhax/codex` with `@openhax/codex`, including badge URLs (`@` must be URL-encoded) and npm links. +2. Update shell commands manipulating the cache or node_modules path to the scoped structure (`node_modules/@openhax/codex`). +3. Ensure JSON snippets remain valid; remember to escape `@` paths appropriately in sed/regex examples. +4. Keep any existing references that already use the scoped name unchanged (e.g., docs/development/ci.md:88 already correct). +5. After edits, verify markdown and JSON formatting remain intact (no trailing commas, quoting preserved). + +## Definition of Done +- All repo-wide occurrences of `openhax/codex` referring to the package name (docs, configs, inline examples) are updated to `@openhax/codex`. +- Badge URLs and npm hyperlinks resolve to `https://www.npmjs.com/package/@openhax/codex`. +- Cleanup scripts and troubleshooting steps correctly reference the scoped module path and sed pattern. +- Inline code examples (README, index.ts, docs) compile/copy without manual fixes. +- Tests/build unaffected (docs-only change), but run `rg "openhax/codex"` to confirm no lingering documentation references remain. + +## Plan (Phases) +**Phase 1 – Audit & Scope Confirmation** +- Re-run `rg "openhax/codex"` after each change chunk to ensure only intentional references remain. +- Validate badge/link formats for scoped package syntax (url-encode `@`). + +**Phase 2 – Update Documentation & Samples** +- Touch README, docs/README, docs/index, docs/getting-started, docs/configuration, docs/development/* guides, troubleshooting instructions, and config JSON artifacts. +- Adjust shell snippets (sed/rm) for `.cache/opencode/node_modules/@openhax/codex` path. +- Update JSDoc example in `index.ts` and OAuth HTML title for consistency. + +**Phase 3 – Verification** +- Run `rg "openhax/codex"` to confirm only intended occurrences remain (e.g., part of strings with `@`). +- Review diff for formatting correctness; no build/test required but ensure JSON remains valid. + +## Change Log +- 2025-11-14: Updated README, CONTRIBUTING, docs (index, getting-started, configuration, troubleshooting, developer guides), config templates, scripts, OAuth HTML, and index.ts to reference `@openhax/codex`; cleanup commands now remove `.cache/opencode/node_modules/@openhax/codex` and local dev examples reference the scoped path. From 4ea62a548e9495509377f2b464fb94c6a1242277 Mon Sep 17 00:00:00 2001 From: Error Date: Fri, 14 Nov 2025 19:02:31 -0600 Subject: [PATCH 4/5] fix: default analyzer to Zen endpoint --- docs/development/ci.md | 4 ++-- scripts/detect-release-type.mjs | 2 +- spec/branch-protection.md | 20 +++++++++++++++++--- spec/ci-release-automation.md | 4 ++-- spec/opencode-zen-endpoint.md | 25 +++++++++++++++++++++++++ 5 files changed, 47 insertions(+), 8 deletions(-) create mode 100644 spec/opencode-zen-endpoint.md diff --git a/docs/development/ci.md b/docs/development/ci.md index ab5d94a..d10b27c 100644 --- a/docs/development/ci.md +++ b/docs/development/ci.md @@ -36,14 +36,14 @@ API key for calling Opencode's Responses endpoint with the `opencode/gpt-5-nano` opencode auth token create --label "ci-release" --scopes responses.create # Copy the token and store it as the OPENCODE_API_KEY secret ``` -If you run a self-hosted Opencode endpoint, also add `OPENCODE_API_URL` (optional) to override the default `https://api.openai.com/v1/responses` base URL. +If you run a self-hosted Opencode endpoint, also add `OPENCODE_API_URL` (optional) to override the default `https://opencode.ai/zen/v1/responses` base URL. ### Optional overrides - `RELEASE_BASE_REF`: force the analyzer to diff from a specific tag/commit (useful when backporting release branches). ## Branch protection - `main` requires pull requests for all changes; direct pushes and force pushes are disabled. -- Required status checks: `lint`, `test (node-version: 20.x)`, and `test (node-version: 22.x)` must pass before the merge button unlocks. (Type checking runs inside the `lint` job.) +- Required status checks: `Lint & Typecheck`, `Test (20.x)`, and `Test (22.x)` must pass before the merge button unlocks. These names mirror the workflow job `name` fields, so keep them in sync whenever CI definitions change. (Type checking runs inside the `Lint & Typecheck` job.) - No human approvals are required right now—the PR gate exists for automated reviewers and CI visibility. - Branches must be up to date with `main` before merging because strict status checks are enabled. diff --git a/scripts/detect-release-type.mjs b/scripts/detect-release-type.mjs index edc3b6e..653d709 100644 --- a/scripts/detect-release-type.mjs +++ b/scripts/detect-release-type.mjs @@ -129,7 +129,7 @@ async function callOpencodeModel(systemPrompt, userPrompt) { if (!apiKey) { throw new Error("OPENCODE_API_KEY is not configured"); } - const url = process.env.OPENCODE_API_URL || "https://api.openai.com/v1/responses"; + const url = process.env.OPENCODE_API_URL || "https://opencode.ai/zen/v1/responses"; const schema = { name: "release_version", schema: { diff --git a/spec/branch-protection.md b/spec/branch-protection.md index b895a1a..1a16efc 100644 --- a/spec/branch-protection.md +++ b/spec/branch-protection.md @@ -8,9 +8,9 @@ 1. Enable protection rules via GitHub REST API (or `gh api`) targeting `main` branch. 2. Require pull request reviews before merging (enforce at least 1 approval, disallow bypass via force push/direct push). 3. Require status checks for: - - `lint` job (covers `pnpm lint` + `pnpm typecheck`). - - `test (node-version: 20.x)` job. - - `test (node-version: 22.x)` job. + - `Lint & Typecheck` job (covers `pnpm lint` + `pnpm typecheck`). + - `Test (20.x)` job. + - `Test (22.x)` job. 4. Allow admins to bypass? (Default: include administrators so even admins must follow rules.) 5. Document the rule in `docs/development/ci.md` or similar so contributors know PRs + green checks are mandatory. @@ -24,3 +24,17 @@ ### Phase 3 – Documentation - Update `docs/development/ci.md` (or README) with short section describing required checks + PR requirement. + +## Follow-Up: Wrong Job Contexts (2025-11-15) +- Prior to this fix the protection settings required contexts `lint`, `test (node-version: 20.x)`, `test (node-version: 22.x)`. +- Actual GitHub check names (from `gh run view 19381469238 --json jobs`) are `Lint & Typecheck`, `Test (20.x)`, `Test (22.x)`. +- Result: branch protection never saw matching checks, so merges into `main` could proceed without real gating. + +### Remediation Steps +1. Update branch protection via `gh api` (PUT) so `required_status_checks.checks` includes: + - `{ context: "Lint & Typecheck" }` + - `{ context: "Test (20.x)" }` + - `{ context: "Test (22.x)" }` +2. Keep `strict: true` and `enforce_admins: true`. +3. Document the exact job names in `docs/development/ci.md` and CONTRIBUTING so maintainers know which checks must stay in sync with workflow `name` fields. +4. Optionally add a CI test (or script) that fails if branch protection contexts drift from workflow job names (e.g., script hitting REST API + parsing `.github/workflows/ci.yml`). diff --git a/spec/ci-release-automation.md b/spec/ci-release-automation.md index f1d3c6c..2d52d93 100644 --- a/spec/ci-release-automation.md +++ b/spec/ci-release-automation.md @@ -3,7 +3,7 @@ ## Summary - Expand `.github/workflows/ci.yml:1-59` so testing and linting jobs run on every push (any branch) and every PR, add a dedicated mutation-testing job for PRs to `main`, and gate a release job so it only executes after successful pushes to `main`. - Introduce a lint workflow powered by Biome (add `@biomejs/biome` + `"lint": "biome check ."` in `package.json:30-38` and a project-level `biome.json` config) so the GitHub Action can run `pnpm lint` deterministically. -- Create an `opencode`-powered release analysis tool (`scripts/detect-release-type.mjs`) that summarizes commits since the last tag, calls `https://api.openai.com/v1/responses` with `model: "opencode/gpt-5-nano"`, and emits structured JSON describing breaking changes + release type so the workflow can pick `major|minor|patch` intelligently. +- Create an `opencode`-powered release analysis tool (`scripts/detect-release-type.mjs`) that summarizes commits since the last tag, calls `https://opencode.ai/zen/v1/responses` with `model: "opencode/gpt-5-nano"`, and emits structured JSON describing breaking changes + release type so the workflow can pick `major|minor|patch` intelligently. - Build a release job that (1) runs the analyzer, (2) bumps the version via `pnpm version ` (letting Git create a tag), (3) publishes to npm using `NPM_TOKEN`, and (4) creates a GitHub Release whose notes embed the analyzer’s output. - Document CI secrets and npm token setup in a new `docs/development/ci.md`, covering how to set `NPM_TOKEN`, `OPENCODE_API_KEY`, and any optional overrides for the analyzer. @@ -20,7 +20,7 @@ - Create `biome.json` with project conventions for lint + formatting. - Author `scripts/detect-release-type.mjs` that: - Discovers the previous tag (fallback: root commit) and collects `git log --no-merges` plus `git diff --stat` summaries. - - Builds a structured prompt and calls `https://api.openai.com/v1/responses` with `model: "opencode/gpt-5-nano"` using `OPENCODE_API_KEY`. + - Builds a structured prompt and calls `https://opencode.ai/zen/v1/responses` with `model: "opencode/gpt-5-nano"` using `OPENCODE_API_KEY`. - Parses the assistant message (JSON block), falls back to `patch` if parsing fails, computes the next semver, and writes `{ releaseType, nextVersion, summary, breakingChanges }` to stdout/file. ### Phase 2 – Workflow Updates diff --git a/spec/opencode-zen-endpoint.md b/spec/opencode-zen-endpoint.md new file mode 100644 index 0000000..fcb4b56 --- /dev/null +++ b/spec/opencode-zen-endpoint.md @@ -0,0 +1,25 @@ +# Spec: Default OpenCode Release Analyzer Endpoint + +## Context +Issue #9 reports that `scripts/detect-release-type.mjs` incorrectly defaults to the OpenAI responses endpoint. The release analyzer must target the Zen API (`https://opencode.ai/zen/v1/responses`) so that authenticated CI calls reach the managed Opencode service. Current docs (`docs/development/ci.md:33-40`) also describe the wrong default, leading contributors to configure the release workflow incorrectly. + +## References +- Issue: [#9](https://github.com/open-hax/codex/issues/9) +- Workflow docs: `docs/development/ci.md:33-44` +- Analyzer: `scripts/detect-release-type.mjs:127-187` +- Related spec: `spec/ci-release-automation.md` + +## Requirements / Definition of Done +1. `scripts/detect-release-type.mjs` must default `OPENCODE_API_URL` to `https://opencode.ai/zen/v1/responses` when the env var is unset. +2. `docs/development/ci.md` needs updated prose indicating the Zen endpoint is the automatic default, noting that `OPENCODE_API_URL` is optional for overriding the base URL. +3. Confirm no other files reference the old `https://api.openai.com/v1/responses` default; update if discovered (grep before/after). +4. Document the change in this spec (change log) and summarize in the final response. + +## Plan +1. Inspect analyzer script to confirm only the `url` constant needs adjusting (line ~132). Update string and retain env override support. +2. Update CI documentation to describe the Zen default and clarify overriding instructions. +3. Run `rg "api.openai.com/v1/responses"` to ensure no stray references remain. +4. Update this spec with a change log entry. + +## Change Log +- 2025-11-15: Switched analyzer default endpoint to `https://opencode.ai/zen/v1/responses` and updated CI docs to describe the Zen base URL. From e91b7a234fc839b65ffe03f8f23e55ca4cd9c154 Mon Sep 17 00:00:00 2001 From: Error Date: Fri, 14 Nov 2025 19:28:02 -0600 Subject: [PATCH 5/5] Fix review-response CLI install --- .github/workflows/review-response.yml | 6 ++++-- spec/review-response-automation.md | 2 +- spec/review-response-cli-fix.md | 13 +++++++++++++ 3 files changed, 18 insertions(+), 3 deletions(-) create mode 100644 spec/review-response-cli-fix.md diff --git a/.github/workflows/review-response.yml b/.github/workflows/review-response.yml index 2ba2b43..c66d531 100644 --- a/.github/workflows/review-response.yml +++ b/.github/workflows/review-response.yml @@ -35,8 +35,10 @@ jobs: - name: Install OpenCode CLI run: | - curl -fsSL https://opencode.ai/install.sh | sh - echo "$HOME/.local/bin" >> "$GITHUB_PATH" + npm install -g opencode + NPM_PREFIX="$(npm config get prefix)" + echo "${NPM_PREFIX}/bin" >> "$GITHUB_PATH" + opencode --version - name: Prepare review context id: context diff --git a/spec/review-response-automation.md b/spec/review-response-automation.md index 35d26bf..adeaece 100644 --- a/spec/review-response-automation.md +++ b/spec/review-response-automation.md @@ -37,7 +37,7 @@ - Checkout PR head (fetch-depth 0). - Setup Node 22. - Install pnpm + dependencies if needed? (only Node + script). - - Install OpenCode CLI via official install script. + - Install OpenCode CLI via `npm install -g opencode` (and add the npm global bin dir to `PATH`). - Run context script; capture outputs. - Execute `opencode run --agent review-response --model opencode/big-pickle --file review-context.md "Follow the instructions in review-context.md"` with env `OPENCODE_API_KEY` and `GITHUB_TOKEN`. - If git diff exists, create branch `review/comment-${{ steps.context.outputs.comment_id }}` (append timestamp if collision), commit with message referencing comment + PR, push, and `gh pr create --base base_ref --head branch --title ... --body ...` (GH_TOKEN env). Ensure job gracefully exits if no changes. diff --git a/spec/review-response-cli-fix.md b/spec/review-response-cli-fix.md new file mode 100644 index 0000000..fbcec30 --- /dev/null +++ b/spec/review-response-cli-fix.md @@ -0,0 +1,13 @@ +# Review Response CLI Installation Fix + +## Context +- The review-response workflow (`.github/workflows/review-response.yml:36-40`) installs the OpenCode CLI via `curl -fsSL https://opencode.ai/install.sh | sh`. That install script now returns HTTP 404, leaving the runner without the `opencode` binary and causing downstream steps to fail (`opencode run ...` in lines 52-60 cannot execute). +- User request: "do a global node module install" so that the automation can rely on npm to fetch the CLI instead of a missing shell script. +- Affected documentation: `spec/review-response-automation.md:35-44` still states "Install OpenCode CLI via official install script". + +## Definition of Done +1. Update `.github/workflows/review-response.yml` so the "Install OpenCode CLI" step installs the CLI via a global Node module (`npm install -g opencode`) and guarantees the binary path is added to `$PATH` (`$GITHUB_PATH`). +2. Ensure the workflow still sets up Node 22 first, then installs the CLI, and that the rest of the job uses the same binary. +3. Update `spec/review-response-automation.md` (and any other docs referencing the old install script) to mention the npm global install method. +4. Optionally add a quick sanity check (e.g., `opencode --version`) in the workflow step to surface install issues early. +5. Confirm no other files still reference the defunct install script URL.