diff --git a/.github/rulesets/README.md b/.github/rulesets/README.md new file mode 100644 index 0000000..2a7fc39 --- /dev/null +++ b/.github/rulesets/README.md @@ -0,0 +1,19 @@ +# GitHub Ruleset Snapshots + +These files snapshot the current GitHub repository rulesets as exported via `gh api`. GitHub does not read rules from this directory; updates still need to be applied through the Ruleset UI or API. Keep these files in sync to audit drift. + +## Refresh commands + +``` +GH_TOKEN= gh api repos/open-hax/codex/rulesets/10200441 > .github/rulesets/release.json +GH_TOKEN= gh api repos/open-hax/codex/rulesets/10223971 > .github/rulesets/main.json +``` + +After refreshing, commit changes. CI (`.github/workflows/ruleset-drift.yml`) fetches live rulesets, strips volatile fields (`created_at`, `updated_at`, `bypass_actors`, `current_user_can_bypass`), normalizes with `jq -S .`, and diffs against these snapshots; it fails on structural drift only. + +## Notes + +- `release` ruleset applies to the default branch (currently `dev`); strict required checks enforced. +- `main` ruleset applies to `refs/heads/main`; strict_required_status_checks_policy is false. +- If we rename `dev` again, update the default branch and the `release` ruleset target accordingly, then refresh these snapshots. +- CI drift check already fetches live rulesets and diffs against snapshots. diff --git a/.github/rulesets/main.json b/.github/rulesets/main.json new file mode 100644 index 0000000..f0b55f7 --- /dev/null +++ b/.github/rulesets/main.json @@ -0,0 +1,71 @@ +{ + "id": 10223971, + "name": "main", + "target": "branch", + "source_type": "Repository", + "source": "open-hax/codex", + "enforcement": "active", + "conditions": { "ref_name": { "exclude": [], "include": ["refs/heads/main"] } }, + "rules": [ + { "type": "deletion" }, + { "type": "non_fast_forward" }, + { + "type": "required_status_checks", + "parameters": { + "strict_required_status_checks_policy": false, + "do_not_enforce_on_create": false, + "required_status_checks": [ + { "context": "Test (20.x)", "integration_id": 15368 }, + { "context": "Test (22.x)", "integration_id": 15368 }, + { "context": "Lint & Typecheck", "integration_id": 15368 }, + { "context": "CodeRabbit", "integration_id": 347564 } + ] + } + }, + { + "type": "copilot_code_review", + "parameters": { "review_on_push": true, "review_draft_pull_requests": true } + }, + { + "type": "pull_request", + "parameters": { + "required_approving_review_count": 0, + "dismiss_stale_reviews_on_push": false, + "required_reviewers": [], + "require_code_owner_review": false, + "require_last_push_approval": false, + "required_review_thread_resolution": true, + "automatic_copilot_code_review_enabled": true, + "allowed_merge_methods": ["merge", "squash", "rebase"] + } + }, + { "type": "code_quality", "parameters": { "severity": "errors" } }, + { + "type": "code_scanning", + "parameters": { + "code_scanning_tools": [ + { + "tool": "CodeQL", + "security_alerts_threshold": "high_or_higher", + "alerts_threshold": "errors" + } + ] + } + }, + { + "type": "copilot_code_review_analysis_tools", + "parameters": { "tools": [{ "name": "CodeQL" }, { "name": "ESLint" }] } + } + ], + "node_id": "RRS_lACqUmVwb3NpdG9yec5AmajgzgCcAWM", + "created_at": "2025-11-20T12:56:31.538-06:00", + "updated_at": "2025-11-20T13:34:49.695-06:00", + "bypass_actors": [ + { "actor_id": null, "actor_type": "OrganizationAdmin", "bypass_mode": "always" } + ], + "current_user_can_bypass": "always", + "_links": { + "self": { "href": "https://api.github.com/repos/open-hax/codex/rulesets/10223971" }, + "html": { "href": "https://github.com/open-hax/codex/rules/10223971" } + } +} diff --git a/.github/rulesets/release.json b/.github/rulesets/release.json new file mode 100644 index 0000000..7135045 --- /dev/null +++ b/.github/rulesets/release.json @@ -0,0 +1,55 @@ +{ + "id": 10200441, + "name": "release", + "target": "branch", + "source_type": "Repository", + "source": "open-hax/codex", + "enforcement": "active", + "conditions": { "ref_name": { "exclude": [], "include": ["~DEFAULT_BRANCH"] } }, + "rules": [ + { "type": "deletion" }, + { "type": "non_fast_forward" }, + { + "type": "copilot_code_review", + "parameters": { "review_on_push": true, "review_draft_pull_requests": false } + }, + { + "type": "code_scanning", + "parameters": { + "code_scanning_tools": [ + { + "tool": "CodeQL", + "security_alerts_threshold": "high_or_higher", + "alerts_threshold": "errors" + } + ] + } + }, + { "type": "code_quality", "parameters": { "severity": "errors" } }, + { + "type": "required_status_checks", + "parameters": { + "strict_required_status_checks_policy": true, + "do_not_enforce_on_create": false, + "required_status_checks": [ + { "context": "Lint & Typecheck", "integration_id": 15368 }, + { "context": "CodeRabbit", "integration_id": 347564 }, + { "context": "Test (20.x)", "integration_id": 15368 }, + { "context": "Test (22.x)", "integration_id": 15368 } + ] + } + } + ], + "node_id": "RRS_lACqUmVwb3NpdG9yec5AmajgzgCbpXk", + "created_at": "2025-11-20T02:14:51.195-06:00", + "updated_at": "2025-11-20T12:58:18.002-06:00", + "bypass_actors": [ + { "actor_id": null, "actor_type": "OrganizationAdmin", "bypass_mode": "always" }, + { "actor_id": 5, "actor_type": "RepositoryRole", "bypass_mode": "always" } + ], + "current_user_can_bypass": "always", + "_links": { + "self": { "href": "https://api.github.com/repos/open-hax/codex/rulesets/10200441" }, + "html": { "href": "https://github.com/open-hax/codex/rules/10200441" } + } +} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 28b876c..6d48600 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -107,7 +107,7 @@ jobs: - lint - test if: >- - ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' && startsWith(github.event.head_commit.message, 'chore: release v') }} + ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' && (startsWith(github.event.head_commit.message, 'chore: release v') || startsWith(github.event.head_commit.message, 'hotfix: release v')) }} permissions: contents: write env: diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 0000000..664a907 --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,32 @@ +name: CodeQL + +on: + push: + branches: [dev, main] + pull_request: + branches: [dev, main] + schedule: + - cron: "24 21 * * 5" + +permissions: + contents: read + security-events: write + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Initialize CodeQL + uses: github/codeql-action/init@v3 + with: + languages: javascript + + - name: Autobuild + uses: github/codeql-action/autobuild@v3 + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v3 diff --git a/.github/workflows/dev-release-prep.yml b/.github/workflows/dev-release-prep.yml new file mode 100644 index 0000000..ce08411 --- /dev/null +++ b/.github/workflows/dev-release-prep.yml @@ -0,0 +1,246 @@ +name: Dev Release Prep + +on: + pull_request: + types: + - closed + branches: + - dev + paths-ignore: + - "release/**" + +permissions: + contents: write + pull-requests: write + +concurrency: + group: dev-release + +jobs: + prepare: + if: github.event.pull_request.merged == true && !startsWith(github.event.pull_request.head.ref, 'release/') && !startsWith(github.event.pull_request.head.ref, 'review/') + runs-on: ubuntu-latest + env: + OPENCODE_API_KEY: ${{ secrets.OPENCODE_API_KEY }} + OPENCODE_API_URL: ${{ secrets.OPENCODE_API_URL }} + steps: + - name: Checkout dev + uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.base.ref }} + fetch-depth: 0 + + - name: Setup pnpm + uses: pnpm/action-setup@v4 + with: + version: 10.15.0 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: 22.x + cache: pnpm + + - name: Install dependencies + run: pnpm install --frozen-lockfile + + - name: Install OpenCode CLI + run: | + npm install -g opencode-ai@latest + NPM_PREFIX="$(npm config get prefix)" + echo "${NPM_PREFIX}/bin" >> "$GITHUB_PATH" + opencode --version + + - name: Validate analyzer secret + run: | + if [ -z "$OPENCODE_API_KEY" ]; then + echo "OPENCODE_API_KEY secret is required to classify releases" >&2 + exit 1 + fi + + - name: Analyze repository for release + id: analyze + run: | + node scripts/detect-release-type.mjs --output release-analysis.json + echo "release_type=$(jq -r '.releaseType' release-analysis.json)" >> "$GITHUB_OUTPUT" + echo "next_version=$(jq -r '.nextVersion' release-analysis.json)" >> "$GITHUB_OUTPUT" + NOTES_DELIM="NOTES_$(date +%s)_$RANDOM" + { + echo "notes<<${NOTES_DELIM}" + jq -r '.releaseNotes' release-analysis.json + echo "${NOTES_DELIM}" + } >> "$GITHUB_OUTPUT" + + - name: Detect hotfix label + id: labels + run: | + HOTFIX=$(jq -r '.pull_request.labels[]?.name' "$GITHUB_EVENT_PATH" | grep -i '^hotfix$' || true) + if [ -n "$HOTFIX" ]; then + echo "hotfix=true" >> "$GITHUB_OUTPUT" + else + echo "hotfix=false" >> "$GITHUB_OUTPUT" + fi + + - name: Prepare release branch name + id: branch + env: + NEXT_VERSION: ${{ steps.analyze.outputs.next_version }} + run: | + if [ -z "$NEXT_VERSION" ]; then + echo "Next version is missing from analyzer output" >&2 + exit 1 + fi + BRANCH="release/v$NEXT_VERSION" + if git ls-remote --exit-code --heads origin "$BRANCH" >/dev/null 2>&1; then + SUFFIX=$(date +%Y%m%d%H%M%S) + BRANCH="${BRANCH}-${SUFFIX}" + fi + echo "name=$BRANCH" >> "$GITHUB_OUTPUT" + + - name: Configure git user + run: | + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + + - name: Bump version on release branch + env: + NEXT_VERSION: ${{ steps.analyze.outputs.next_version }} + PR_NUMBER: ${{ github.event.pull_request.number }} + IS_HOTFIX: ${{ steps.labels.outputs.hotfix }} + RELEASE_BRANCH: ${{ steps.branch.outputs.name }} + run: | + if [ -z "$NEXT_VERSION" ] || [ -z "$RELEASE_BRANCH" ]; then + echo "Next version or release branch is missing from analyzer output" >&2 + exit 1 + fi + git checkout -b "$RELEASE_BRANCH" + pnpm version "$NEXT_VERSION" --no-git-tag-version + git add package.json pnpm-lock.yaml + if [ -n "$PR_NUMBER" ]; then + COMMIT_MSG="chore: release v$NEXT_VERSION (PR #$PR_NUMBER)" + else + COMMIT_MSG="chore: release v$NEXT_VERSION" + fi + if [ "$IS_HOTFIX" = "true" ]; then + git commit -m "$COMMIT_MSG" -m "Labels: hotfix" + else + git commit -m "$COMMIT_MSG" + fi + + - name: Push release branch + env: + RELEASE_BRANCH: ${{ steps.branch.outputs.name }} + run: | + if [ -z "$RELEASE_BRANCH" ]; then + echo "Missing release branch" >&2 + exit 1 + fi + git push origin "$RELEASE_BRANCH" + + - name: Validate PR token + env: + PR_TOKEN: ${{ secrets.PR_AUTOMATION_TOKEN }} + run: | + if [ -z "$PR_TOKEN" ]; then + echo "PR_AUTOMATION_TOKEN is required to open PRs; org policy blocks GITHUB_TOKEN" >&2 + exit 1 + fi + + - name: Open release PR to dev + env: + GH_TOKEN: ${{ secrets.PR_AUTOMATION_TOKEN }} + REPO: ${{ github.repository }} + RELEASE_BRANCH: ${{ steps.branch.outputs.name }} + NEXT_VERSION: ${{ steps.analyze.outputs.next_version }} + HOTFIX: ${{ steps.labels.outputs.hotfix }} + BASE_BRANCH: ${{ github.event.pull_request.base.ref }} + SOURCE_PR: ${{ github.event.pull_request.number }} + RELEASE_NOTES: ${{ steps.analyze.outputs.notes }} + run: | + if [ -z "$NEXT_VERSION" ] || [ -z "$RELEASE_BRANCH" ]; then + echo "Missing data to open PR" >&2 + exit 1 + fi + PR_TITLE="chore: release v$NEXT_VERSION" + BODY=$(printf '## Release v%s\n\n- Source PR: #%s\n- Hotfix: %s\n\n### Release notes\n%s\n' "$NEXT_VERSION" "$SOURCE_PR" "$HOTFIX" "$RELEASE_NOTES") + jq -n \ + --arg title "$PR_TITLE" \ + --arg head "$RELEASE_BRANCH" \ + --arg base "$BASE_BRANCH" \ + --arg body "$BODY" \ + '{title:$title, head:$head, base:$base, body:$body}' > /tmp/pr.json + curl -s -X POST \ + -H "Authorization: Bearer $GH_TOKEN" \ + -H "Accept: application/vnd.github+json" \ + "https://api.github.com/repos/$REPO/pulls" \ + -d @/tmp/pr.json | tee /tmp/pr-response.json + PR_URL=$(jq -r '.html_url' /tmp/pr-response.json) + PR_NUMBER=$(jq -r '.number' /tmp/pr-response.json) + PR_NODE_ID=$(jq -r '.node_id' /tmp/pr-response.json) + if [ -z "$PR_URL" ] || [ "$PR_URL" = "null" ] || [ -z "$PR_NUMBER" ] || [ "$PR_NUMBER" = "null" ] || [ -z "$PR_NODE_ID" ] || [ "$PR_NODE_ID" = "null" ]; then + echo "Failed to create PR" >&2 + cat /tmp/pr-response.json >&2 + exit 1 + fi + echo "pr_url=$PR_URL" >> "$GITHUB_OUTPUT" + gh api graphql -f query='mutation($prId:ID!,$method:PullRequestMergeMethod!){enablePullRequestAutoMerge(input:{pullRequestId:$prId,mergeMethod:$method}){pullRequest{number autoMergeRequest{enabledBy{login}}}}}' -f prId="$PR_NODE_ID" -f method=SQUASH + + - name: Create release tag + env: + NEXT_VERSION: ${{ steps.analyze.outputs.next_version }} + RELEASE_NOTES: ${{ steps.analyze.outputs.notes }} + run: | + if [ -z "$NEXT_VERSION" ]; then + echo "Next version is missing from analyzer output" >&2 + exit 1 + fi + TAG="v$NEXT_VERSION" + printf "%s\n" "$RELEASE_NOTES" > release-notes.md + git tag -a "$TAG" -F release-notes.md + rm -f release-notes.md + + - name: Push release tag + env: + NEXT_VERSION: ${{ steps.analyze.outputs.next_version }} + run: | + if [ -z "$NEXT_VERSION" ]; then + echo "Missing next version for tag push" >&2 + exit 1 + fi + git push origin "v$NEXT_VERSION" + + - name: Open hotfix PR to main + if: steps.labels.outputs.hotfix == 'true' + env: + GH_TOKEN: ${{ secrets.PR_AUTOMATION_TOKEN }} + REPO: ${{ github.repository }} + RELEASE_BRANCH: ${{ steps.branch.outputs.name }} + NEXT_VERSION: ${{ steps.analyze.outputs.next_version }} + RELEASE_NOTES: ${{ steps.analyze.outputs.notes }} + run: | + if [ -z "$RELEASE_BRANCH" ]; then + echo "Release branch name is missing" >&2 + exit 1 + fi + PR_TITLE="hotfix: release v$NEXT_VERSION to main" + BODY=$(printf '## Hotfix release v%s\n\nThis PR promotes the hotfix release branch to main.\n\n### Release notes\n%s\n' "$NEXT_VERSION" "$RELEASE_NOTES") + jq -n \ + --arg title "$PR_TITLE" \ + --arg head "$RELEASE_BRANCH" \ + --arg base "main" \ + --arg body "$BODY" \ + '{title:$title, head:$head, base:$base, body:$body}' > /tmp/hotfix.json + curl -s -X POST \ + -H "Authorization: Bearer $GH_TOKEN" \ + -H "Accept: application/vnd.github+json" \ + "https://api.github.com/repos/$REPO/pulls" \ + -d @/tmp/hotfix.json | tee /tmp/hotfix-response.json + HOTFIX_NUMBER=$(jq -r '.number' /tmp/hotfix-response.json) + HOTFIX_NODE=$(jq -r '.node_id' /tmp/hotfix-response.json) + if [ -z "$HOTFIX_NUMBER" ] || [ "$HOTFIX_NUMBER" = "null" ] || [ -z "$HOTFIX_NODE" ] || [ "$HOTFIX_NODE" = "null" ]; then + echo "Failed to create hotfix PR" >&2 + cat /tmp/hotfix-response.json >&2 + exit 1 + fi + gh api repos/$REPO/pulls/$HOTFIX_NUMBER/requested_reviewers -f reviewers[]="coderabbitai" + gh api graphql -f query='mutation($prId:ID!,$method:PullRequestMergeMethod!){enablePullRequestAutoMerge(input:{pullRequestId:$prId,mergeMethod:$method}){pullRequest{number autoMergeRequest{enabledBy{login}}}}}' -f prId="$HOTFIX_NODE" -f method=SQUASH diff --git a/.github/workflows/dev-to-main-pr.yml b/.github/workflows/dev-to-main-pr.yml new file mode 100644 index 0000000..17ccb36 --- /dev/null +++ b/.github/workflows/dev-to-main-pr.yml @@ -0,0 +1,41 @@ +name: Dev to Main Promotion PR + +on: + push: + branches: + - dev + workflow_dispatch: + +permissions: + contents: write + pull-requests: write + +concurrency: + group: dev-to-main-pr + +jobs: + open-promotion-pr: + runs-on: ubuntu-latest + env: + GH_TOKEN: ${{ secrets.PR_AUTOMATION_TOKEN }} + steps: + - name: Validate PR automation token + run: | + if [ -z "$GH_TOKEN" ]; then + echo "PR_AUTOMATION_TOKEN is required to open promotion PRs" >&2 + exit 1 + fi + - name: Check for existing dev->main PR + run: | + set -euo pipefail + EXISTING=$(gh pr list --head dev --base main --state open --json number --jq '.[0].number' || true) + if [ -n "$EXISTING" ]; then + echo "Existing dev->main PR: #$EXISTING" + exit 0 + fi + - name: Open dev->main PR + run: | + set -euo pipefail + TITLE="Promote dev to main" + BODY="Automated promotion PR from dev to main after latest release merge." + gh pr create --base main --head dev --title "$TITLE" --body "$BODY" diff --git a/.github/workflows/formatting.yml b/.github/workflows/formatting.yml index f9a081c..bca9b85 100644 --- a/.github/workflows/formatting.yml +++ b/.github/workflows/formatting.yml @@ -3,7 +3,9 @@ name: Auto Formatting on: push: branches: - - "**" + - dev + - main + workflow_dispatch: jobs: format: @@ -11,7 +13,6 @@ jobs: runs-on: ubuntu-latest permissions: contents: write - workflows: write steps: - name: Checkout uses: actions/checkout@v4 diff --git a/.github/workflows/main-merge-guard.yml b/.github/workflows/main-merge-guard.yml index c30734f..273ea9c 100644 --- a/.github/workflows/main-merge-guard.yml +++ b/.github/workflows/main-merge-guard.yml @@ -10,13 +10,13 @@ permissions: contents: read jobs: - enforce-staging-source: + enforce-dev-source: runs-on: ubuntu-latest steps: - - name: Ensure only staging targets main + - name: Ensure only dev targets main run: | - if [ "${GITHUB_HEAD_REF}" != "staging" ]; then - echo "Only staging can merge into main. Update your PR to target staging." >&2 + if [ "${GITHUB_HEAD_REF}" != "dev" ]; then + echo "Only dev can merge into main. Update your PR to target dev." >&2 exit 1 fi echo "Branch check passed: ${GITHUB_HEAD_REF} -> ${GITHUB_BASE_REF}" diff --git a/.github/workflows/pr-auto-base.yml b/.github/workflows/pr-auto-base.yml index d4350b2..ab94ae8 100644 --- a/.github/workflows/pr-auto-base.yml +++ b/.github/workflows/pr-auto-base.yml @@ -2,9 +2,10 @@ name: PR Auto Base on: pull_request: - types: [opened] + types: [opened, reopened, synchronize] permissions: + contents: read pull-requests: write jobs: @@ -12,15 +13,27 @@ jobs: runs-on: ubuntu-latest env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PR_NUMBER: ${{ github.event.pull_request.number }} steps: - - name: Retarget PR to staging unless from staging + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Retarget PR to dev unless from dev run: | - if [ "${GITHUB_BASE_REF}" = "staging" ]; then - echo "PR already targets staging."; exit 0 + if [ -z "$PR_NUMBER" ]; then + echo "PR number missing; cannot retarget" >&2 + exit 1 + fi + if [ "${GITHUB_BASE_REF}" = "dev" ]; then + echo "PR already targets dev." + exit 0 fi BRANCH="${GITHUB_HEAD_REF}" - if [ "$BRANCH" = "staging" ]; then - echo "Staging PRs can target main."; exit 0 + if [ "$BRANCH" = "dev" ]; then + echo "Dev PRs can target main." + exit 0 fi - echo "Retargeting PR #${GITHUB_EVENT_PULL_REQUEST_NUMBER} to staging" - gh pr edit "$GITHUB_EVENT_PULL_REQUEST_NUMBER" --base staging + echo "Retargeting PR #$PR_NUMBER to dev" + gh pr edit "$PR_NUMBER" --base dev --repo "$GITHUB_REPOSITORY" diff --git a/.github/workflows/review-response.yml b/.github/workflows/review-response.yml index 6d96570..3b01f2b 100644 --- a/.github/workflows/review-response.yml +++ b/.github/workflows/review-response.yml @@ -1,22 +1,19 @@ name: review-response -env: - REVIEW_RESPONDER_WHITELIST: coderabbitai,riatzukiza - on: pull_request_review_comment: types: [created] - + jobs: auto-review-response: - env: - OPENCODE_API_KEY: ${{ secrets.OPENCODE_API_KEY }} - if: contains(env.REVIEW_RESPONDER_WHITELIST, github.event.comment.user.login) || github.event.comment.author_association == 'OWNER' || github.event.comment.author_association == 'MEMBER' || github.event.comment.author_association == 'COLLABORATOR' - + if: contains(fromJson('["coderabbitai","riatzukiza"]'), github.event.comment.user.login) || github.event.comment.author_association == 'OWNER' || github.event.comment.author_association == 'MEMBER' || github.event.comment.author_association == 'COLLABORATOR' runs-on: ubuntu-latest permissions: contents: write pull-requests: write + env: + OPENCODE_API_KEY: ${{ secrets.OPENCODE_API_KEY }} + GH_TOKEN: ${{ secrets.PR_AUTOMATION_TOKEN }} steps: - name: Checkout PR head uses: actions/checkout@v4 @@ -24,6 +21,7 @@ jobs: fetch-depth: 0 repository: ${{ github.event.pull_request.head.repo.full_name }} ref: ${{ github.event.pull_request.head.ref }} + token: ${{ env.GH_TOKEN }} - name: Verify OpenCode secret run: | @@ -32,6 +30,13 @@ jobs: exit 1 fi + - name: Validate PR automation token + run: | + if [ -z "$GH_TOKEN" ]; then + echo "PR_AUTOMATION_TOKEN is required to open PRs; org policy blocks GITHUB_TOKEN" >&2 + exit 1 + fi + - name: Setup Node.js uses: actions/setup-node@v4 with: @@ -57,7 +62,7 @@ jobs: - name: Run review-response agent env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GITHUB_TOKEN: ${{ env.GH_TOKEN }} run: | opencode run \ --agent review-response \ @@ -77,7 +82,7 @@ jobs: - name: Commit and push if: steps.diff.outputs.has_changes == 'true' env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GITHUB_TOKEN: ${{ env.GH_TOKEN }} run: | git config user.name "github-actions[bot]" git config user.email "github-actions[bot]@users.noreply.github.com" @@ -88,7 +93,7 @@ jobs: - name: Open pull request if: steps.diff.outputs.has_changes == 'true' env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GH_TOKEN: ${{ env.GH_TOKEN }} run: | comment_url="${{ steps.context.outputs.comment_url }}" reviewer="${{ steps.context.outputs.reviewer }}" diff --git a/.github/workflows/ruleset-drift.yml b/.github/workflows/ruleset-drift.yml new file mode 100644 index 0000000..f45f871 --- /dev/null +++ b/.github/workflows/ruleset-drift.yml @@ -0,0 +1,44 @@ +name: Ruleset Drift Check + +on: + pull_request: + branches: + - dev + - main + push: + branches: + - dev + - main + workflow_dispatch: + +permissions: + contents: read + +jobs: + ruleset-drift: + runs-on: ubuntu-latest + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Fetch live rulesets + run: | + mkdir -p /tmp/rulesets + gh api repos/${{ github.repository }}/rulesets/10200441 > /tmp/rulesets/release.json + gh api repos/${{ github.repository }}/rulesets/10223971 > /tmp/rulesets/main.json + + - name: Normalize JSON (strip volatile fields) + run: | + mkdir -p /tmp/rulesets/normalized + strip='del(.created_at, .updated_at, .bypass_actors, .current_user_can_bypass)' + jq -S "$strip" .github/rulesets/release.json > /tmp/rulesets/normalized/release.json + jq -S "$strip" /tmp/rulesets/release.json > /tmp/rulesets/normalized/release.live.json + jq -S "$strip" .github/rulesets/main.json > /tmp/rulesets/normalized/main.json + jq -S "$strip" /tmp/rulesets/main.json > /tmp/rulesets/normalized/main.live.json + + - name: Compare snapshots + run: | + diff -u /tmp/rulesets/normalized/release.json /tmp/rulesets/normalized/release.live.json + diff -u /tmp/rulesets/normalized/main.json /tmp/rulesets/normalized/main.live.json diff --git a/.github/workflows/staging-release-prep.yml b/.github/workflows/staging-release-prep.yml deleted file mode 100644 index 4ff41ab..0000000 --- a/.github/workflows/staging-release-prep.yml +++ /dev/null @@ -1,130 +0,0 @@ -name: Staging Release Prep - -on: - pull_request: - types: - - closed - branches: - - staging - -permissions: - contents: write - pull-requests: read - -concurrency: - group: staging-release - -jobs: - prepare: - if: github.event.pull_request.merged == true - runs-on: ubuntu-latest - env: - OPENCODE_API_KEY: ${{ secrets.OPENCODE_API_KEY }} - OPENCODE_API_URL: ${{ secrets.OPENCODE_API_URL }} - steps: - - name: Checkout staging - uses: actions/checkout@v4 - with: - ref: ${{ github.event.pull_request.base.ref }} - fetch-depth: 0 - - - name: Setup pnpm - uses: pnpm/action-setup@v4 - with: - version: 10.15.0 - - - name: Setup Node.js - uses: actions/setup-node@v4 - with: - node-version: 22.x - cache: pnpm - - - name: Install dependencies - run: pnpm install --frozen-lockfile - - - name: Install OpenCode CLI - run: | - npm install -g opencode-ai@latest - NPM_PREFIX="$(npm config get prefix)" - echo "${NPM_PREFIX}/bin" >> "$GITHUB_PATH" - opencode --version - - - name: Validate analyzer secret - run: | - if [ -z "$OPENCODE_API_KEY" ]; then - echo "OPENCODE_API_KEY secret is required to classify releases" >&2 - exit 1 - fi - - - name: Analyze repository for release - id: analyze - run: | - node scripts/detect-release-type.mjs --output release-analysis.json - echo "release_type=$(jq -r '.releaseType' release-analysis.json)" >> "$GITHUB_OUTPUT" - echo "next_version=$(jq -r '.nextVersion' release-analysis.json)" >> "$GITHUB_OUTPUT" - NOTES_DELIM="NOTES_$(date +%s)_$RANDOM" - { - echo "notes<<${NOTES_DELIM}" - jq -r '.releaseNotes' release-analysis.json - echo "${NOTES_DELIM}" - } >> "$GITHUB_OUTPUT" - - - name: Detect hotfix label - id: labels - run: | - HOTFIX=$(jq -r '.pull_request.labels[]?.name' "$GITHUB_EVENT_PATH" | grep -i '^hotfix$' || true) - if [ -n "$HOTFIX" ]; then - echo "hotfix=true" >> "$GITHUB_OUTPUT" - else - echo "hotfix=false" >> "$GITHUB_OUTPUT" - fi - - - name: Configure git user - run: | - git config user.name "github-actions[bot]" - git config user.email "github-actions[bot]@users.noreply.github.com" - - - name: Bump version on staging - env: - NEXT_VERSION: ${{ steps.analyze.outputs.next_version }} - PR_NUMBER: ${{ github.event.pull_request.number }} - IS_HOTFIX: ${{ steps.labels.outputs.hotfix }} - run: | - if [ -z "$NEXT_VERSION" ]; then - echo "Next version is missing from analyzer output" >&2 - exit 1 - fi - pnpm version "$NEXT_VERSION" --no-git-tag-version - git add package.json pnpm-lock.yaml - if [ -n "$PR_NUMBER" ]; then - COMMIT_MSG="chore: release v$NEXT_VERSION (PR #$PR_NUMBER)" - else - COMMIT_MSG="chore: release v$NEXT_VERSION" - fi - if [ "$IS_HOTFIX" = "true" ]; then - git commit -m "$COMMIT_MSG" -m "Labels: hotfix" - else - git commit -m "$COMMIT_MSG" - fi - printf "%s\n" "${{ steps.analyze.outputs.notes }}" > release-notes.md - git tag -a "v$NEXT_VERSION" -F release-notes.md - - - name: Push staging release commit and tag - env: - NEXT_VERSION: ${{ steps.analyze.outputs.next_version }} - run: | - git push origin HEAD:staging - git push origin "v$NEXT_VERSION" - - - name: Promote hotfix to main - if: steps.labels.outputs.hotfix == 'true' - run: | - git fetch origin main - if git rev-parse --verify main >/dev/null 2>&1; then - git checkout main - else - git checkout -b main origin/main - fi - git merge --ff-only staging - git push origin main - git checkout staging diff --git a/.gitignore b/.gitignore index 3d331cd..6dbc763 100644 --- a/.gitignore +++ b/.gitignore @@ -11,6 +11,10 @@ opencode.json tmp .codex-cache .nx/ +.obsidian/ .stryker-tmp/ .worktrees/ +.envrc +.env + diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index bc34868..803b203 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -9,24 +9,28 @@ Before submitting contributions, please review these guidelines to ensure all ch All contributions MUST: ✅ **Maintain TOS Compliance** + - Use only official OAuth authentication methods - Not facilitate violations of OpenAI's Terms of Service - Focus on legitimate personal productivity use cases - Include appropriate user warnings and disclaimers ✅ **Respect OpenAI's Systems** + - No session token scraping or cookie extraction - No bypassing of rate limits or authentication controls - No reverse-engineering of undocumented APIs - Use only officially supported authentication flows ✅ **Proper Use Cases** + - Personal development and coding assistance - Individual productivity enhancements - Terminal-based workflows - Educational purposes ❌ **Prohibited Features** + - Commercial resale or multi-user authentication - Rate limit circumvention techniques - Session token scraping or extraction @@ -43,13 +47,27 @@ All contributions MUST: ## Pull Request Process -1. **Fork the repository** and create a feature branch +1. **Fork the repository** and create a feature branch from `staging` 2. **Write clear commit messages** explaining the "why" not just "what" 3. **Include tests** for new functionality 4. **Update documentation** (README.md, config examples, etc.) 5. **Ensure compliance** with guidelines above 6. **Test thoroughly** with actual ChatGPT Plus/Pro account -7. **Submit PR** with clear description of changes +7. **Submit PR** to `staging` with clear description of changes +8. **Address review comments** - AI will automatically help generate fixes + +### Release Process + +All feature work targets the `staging` branch. When your PR merges to `staging`, it automatically: + +- Analyzes changes to determine release type (patch/minor/major) +- Bumps the version in `package.json` +- Creates an annotated git tag with release notes +- Prepares for deployment to `main` + +For hotfixes, add the `hotfix` label to your PR before merging to trigger immediate release. + +See [Release Process Guide](docs/development/RELEASE_PROCESS.md) for complete details. ## Reporting Issues @@ -64,6 +82,7 @@ When reporting issues, please: ### Issue Template Please include: + ``` **Issue Description:** [Clear description of the problem] @@ -94,12 +113,14 @@ Please include: ## Feature Requests We welcome feature requests that: + - Enhance personal productivity - Improve developer experience - Maintain compliance with OpenAI's terms - Align with the project's scope We will decline features that: + - Violate or circumvent OpenAI's Terms of Service - Enable commercial resale or multi-user access - Bypass authentication or rate limiting @@ -110,12 +131,14 @@ We will decline features that: ### Our Standards ✅ **Encouraged:** + - Respectful and constructive communication - Focus on legitimate use cases - Transparency about limitations and compliance - Helping other users with proper usage ❌ **Not Acceptable:** + - Requesting help with TOS violations - Promoting commercial misuse - Hostile or disrespectful behavior @@ -124,6 +147,7 @@ We will decline features that: ## Questions? For questions about: + - **Plugin usage:** Open a GitHub issue - **OpenAI's terms:** Contact OpenAI support - **Contributing:** Open a discussion thread diff --git a/eslint.config.mjs b/eslint.config.mjs index 4ad2f9e..aa9a2be 100644 --- a/eslint.config.mjs +++ b/eslint.config.mjs @@ -91,5 +91,9 @@ export default [ vi: "readonly", }, }, + rules: { + "max-lines-per-function": "off", + "max-lines": "off", + }, }, ]; diff --git a/index.ts b/index.ts index 5e58ce7..99759d3 100644 --- a/index.ts +++ b/index.ts @@ -25,21 +25,21 @@ import type { Plugin, PluginInput } from "@opencode-ai/plugin"; import type { Auth } from "@opencode-ai/sdk"; import { - createAuthorizationFlow, - decodeJWT, - exchangeAuthorizationCode, - REDIRECT_URI, + createAuthorizationFlow, + decodeJWT, + exchangeAuthorizationCode, + REDIRECT_URI, } from "./lib/auth/auth.js"; import { openBrowserUrl } from "./lib/auth/browser.js"; import { startLocalOAuthServer } from "./lib/auth/server.js"; import { getCodexMode, loadPluginConfig } from "./lib/config.js"; import { - AUTH_LABELS, - CODEX_BASE_URL, - DUMMY_API_KEY, - ERROR_MESSAGES, - JWT_CLAIM_PATH, - PROVIDER_ID, + AUTH_LABELS, + CODEX_BASE_URL, + DUMMY_API_KEY, + ERROR_MESSAGES, + JWT_CLAIM_PATH, + PROVIDER_ID, } from "./lib/constants.js"; import { configureLogger, logWarn, logError } from "./lib/logger.js"; import { getCodexInstructions } from "./lib/prompts/codex.js"; @@ -48,7 +48,6 @@ import { createCodexFetcher } from "./lib/request/codex-fetcher.js"; import { SessionManager } from "./lib/session/session-manager.js"; import type { UserConfig } from "./lib/types.js"; - /** * OpenAI Codex OAuth authentication plugin for opencode * @@ -65,108 +64,112 @@ import type { UserConfig } from "./lib/types.js"; * ``` */ export const OpenAIAuthPlugin: Plugin = async ({ client, directory }: PluginInput) => { - configureLogger({ client, directory }); - return { - auth: { - provider: PROVIDER_ID, - /** - * Loader function that configures OAuth authentication and request handling - */ - async loader(getAuth: () => Promise, provider: unknown) { - const auth = await getAuth(); - if (auth.type !== "oauth") return {}; - - // Extract ChatGPT account ID from JWT access token - const decoded = decodeJWT(auth.access); - const accountId = decoded?.[JWT_CLAIM_PATH]?.chatgpt_account_id; - if (!accountId) { - logError(ERROR_MESSAGES.NO_ACCOUNT_ID); - return {}; - } + configureLogger({ client, directory }); + setTimeout(() => { + logWarn( + "The OpenAI Codex plugin is intended for personal use with your own ChatGPT Plus/Pro subscription. Ensure your usage complies with OpenAI's Terms of Service.", + ); + }, 5000); + return { + auth: { + provider: PROVIDER_ID, + /** + * Loader function that configures OAuth authentication and request handling + */ + async loader(getAuth: () => Promise, provider: unknown) { + const auth = await getAuth(); + if (auth.type !== "oauth") return {}; - // Extract user configuration (global + per-model options) - const providerConfig = provider as - | { options?: Record; models?: UserConfig["models"] } - | undefined; - const userConfig: UserConfig = { - global: providerConfig?.options || {}, - models: providerConfig?.models || {}, - }; + // Extract ChatGPT account ID from JWT access token + const decoded = decodeJWT(auth.access); + const accountId = decoded?.[JWT_CLAIM_PATH]?.chatgpt_account_id; + if (!accountId) { + logError(ERROR_MESSAGES.NO_ACCOUNT_ID); + return {}; + } - // Load plugin configuration and determine CODEX_MODE - const pluginConfig = loadPluginConfig(); - const codexMode = getCodexMode(pluginConfig); - const promptCachingEnabled = pluginConfig.enablePromptCaching ?? true; - if (!promptCachingEnabled) { - logWarn( - "Prompt caching disabled via config; Codex may use more tokens and cache hit diagnostics will be limited.", - ); - } - const sessionManager = new SessionManager({ enabled: promptCachingEnabled }); + // Extract user configuration (global + per-model options) + const providerConfig = provider as + | { options?: Record; models?: UserConfig["models"] } + | undefined; + const userConfig: UserConfig = { + global: providerConfig?.options || {}, + models: providerConfig?.models || {}, + }; - // Warm caches on startup for better first-request performance (non-blocking) - const cachesAlreadyWarm = await areCachesWarm(); - if (!cachesAlreadyWarm) { - try { - await warmCachesOnStartup(); - } catch (error) { - logWarn("Cache warming failed, continuing", { - error: error instanceof Error ? error.message : String(error), - }); - } - } + // Load plugin configuration and determine CODEX_MODE + const pluginConfig = loadPluginConfig(); + const codexMode = getCodexMode(pluginConfig); + const promptCachingEnabled = pluginConfig.enablePromptCaching ?? true; + if (!promptCachingEnabled) { + logWarn( + "Prompt caching disabled via config; Codex may use more tokens and cache hit diagnostics will be limited.", + ); + } + const sessionManager = new SessionManager({ enabled: promptCachingEnabled }); - // Fetch Codex system instructions (cached with ETag for efficiency) - const CODEX_INSTRUCTIONS = await getCodexInstructions(); + // Warm caches on startup for better first-request performance (non-blocking) + const cachesAlreadyWarm = await areCachesWarm(); + if (!cachesAlreadyWarm) { + try { + await warmCachesOnStartup(); + } catch (error) { + logWarn("Cache warming failed, continuing", { + error: error instanceof Error ? error.message : String(error), + }); + } + } - const codexFetch = createCodexFetcher({ - getAuth, - client, - accountId, - userConfig, - codexMode, - sessionManager, - codexInstructions: CODEX_INSTRUCTIONS, - pluginConfig, - }); + // Fetch Codex system instructions (cached with ETag for efficiency) + const CODEX_INSTRUCTIONS = await getCodexInstructions(); - return { - apiKey: DUMMY_API_KEY, - baseURL: CODEX_BASE_URL, - fetch: codexFetch, - }; + const codexFetch = createCodexFetcher({ + getAuth, + client, + accountId, + userConfig, + codexMode, + sessionManager, + codexInstructions: CODEX_INSTRUCTIONS, + pluginConfig, + }); - }, - methods: [ - { - label: AUTH_LABELS.OAUTH, - type: "oauth" as const, - authorize: async () => { - const { pkce, state, url } = await createAuthorizationFlow(); - const serverInfo = await startLocalOAuthServer({ state }); - openBrowserUrl(url); - return { - url, - method: "auto" as const, - instructions: AUTH_LABELS.INSTRUCTIONS, - callback: async () => { - const result = await serverInfo.waitForCode(state); - serverInfo.close(); - if (!result) return { type: "failed" as const }; - const tokens = await exchangeAuthorizationCode( - result.code, - pkce.verifier, - REDIRECT_URI, - ); - return tokens?.type === "success" ? tokens : ({ type: "failed" } as const); - }, - }; - }, - }, - { label: AUTH_LABELS.API_KEY, type: "api" as const }, - ], - }, - }; + return { + apiKey: DUMMY_API_KEY, + baseURL: CODEX_BASE_URL, + fetch: codexFetch, + }; + }, + methods: [ + { + label: AUTH_LABELS.OAUTH, + type: "oauth" as const, + authorize: async () => { + const { pkce, state, url } = await createAuthorizationFlow(); + const serverInfo = await startLocalOAuthServer({ state }); + openBrowserUrl(url); + return { + url, + method: "auto" as const, + instructions: AUTH_LABELS.INSTRUCTIONS, + callback: async () => { + const result = await serverInfo.waitForCode(state); + serverInfo.close(); + if (!result) return { type: "failed" as const }; + const tokens = await exchangeAuthorizationCode( + result.code, + pkce.verifier, + REDIRECT_URI, + ); + return tokens?.type === "success" ? tokens : ({ type: "failed" } as const); + }, + }; + }, + }, + { label: AUTH_LABELS.API_KEY, type: "api" as const }, + ], + }, + }; }; export default OpenAIAuthPlugin; diff --git a/lib/compaction/codex-compaction.ts b/lib/compaction/codex-compaction.ts index 9a0c884..21682e7 100644 --- a/lib/compaction/codex-compaction.ts +++ b/lib/compaction/codex-compaction.ts @@ -85,15 +85,18 @@ export function serializeConversation( } export function buildCompactionPromptItems(transcript: string): InputItem[] { + const compactionMetadata = { source: "opencode-compaction", opencodeCompaction: true }; const developer: InputItem = { type: "message", role: "developer", content: CODEX_COMPACTION_PROMPT, + metadata: compactionMetadata, }; const user: InputItem = { type: "message", role: "user", content: transcript || "(conversation is empty)", + metadata: compactionMetadata, }; return [developer, user]; } diff --git a/lib/logger.ts b/lib/logger.ts index fa3a111..23d525a 100644 --- a/lib/logger.ts +++ b/lib/logger.ts @@ -126,6 +126,7 @@ export async function flushRollingLogsForTest(): Promise { function emit(level: LogLevel, message: string, extra?: Record): void { const sanitizedExtra = sanitizeExtra(extra); + const supportsToast = loggerClient ? hasTuiShowToast(loggerClient) : false; const entry: RollingLogEntry = { timestamp: new Date().toISOString(), service: PLUGIN_NAME, @@ -138,7 +139,9 @@ function emit(level: LogLevel, message: string, extra?: Record) appendRollingLog(entry); } - if (loggerClient?.app?.log) { + const shouldForwardToAppLog = level !== "warn" || !supportsToast; + + if (shouldForwardToAppLog && loggerClient?.app?.log) { void loggerClient.app .log({ body: entry, @@ -151,11 +154,14 @@ function emit(level: LogLevel, message: string, extra?: Record) ); } - if (level === "error") { + if (level === "error" || (level === "warn" && supportsToast)) { notifyToast(level, message, sanitizedExtra); } - logToConsole(level, message, sanitizedExtra); + const shouldLogToConsole = level !== "warn" || !supportsToast; + if (shouldLogToConsole) { + logToConsole(level, message, sanitizedExtra); + } } /** @@ -170,16 +176,17 @@ function emit(level: LogLevel, message: string, extra?: Record) * @param message - The primary text to show in the notification body. * @param extra - Optional metadata to include with the notification payload. */ -function notifyToast(level: LogLevel, message: string, extra?: Record): void { +function notifyToast(level: LogLevel, message: string, _extra?: Record): void { if (!loggerClient?.tui?.showToast) return; const variant = level === "error" ? "error" : "warning"; + const wrappedMessage = wrapToastMessage(`${PLUGIN_NAME}: ${message}`); try { void loggerClient.tui.showToast({ body: { title: level === "error" ? `${PLUGIN_NAME} error` : `${PLUGIN_NAME} warning`, - message: `${PLUGIN_NAME}: ${message}`, + message: wrappedMessage, variant, }, }); @@ -188,6 +195,46 @@ function notifyToast(level: LogLevel, message: string, extra?: Record { + if (word.length <= maxWidth) return word; + + const chunks: string[] = []; + for (let index = 0; index < word.length; index += maxWidth) { + chunks.push(word.slice(index, index + maxWidth)); + } + return chunks; + }); + + const lines: string[] = []; + let current = ""; + + for (const word of expandedWords) { + if (current.length === 0) { + current = word; + continue; + } + const nextLength = current.length + 1 + word.length; + if (nextLength <= maxWidth) { + current = `${current} ${word}`; + continue; + } + lines.push(current); + current = word; + } + + if (current) { + lines.push(current); + } + + return lines.join("\n"); +} + /** * Writes a plugin-prefixed log message to the console when the log level is applicable. * diff --git a/lib/prompts/codex.ts b/lib/prompts/codex.ts index 4e6deb4..fe77ca1 100644 --- a/lib/prompts/codex.ts +++ b/lib/prompts/codex.ts @@ -38,6 +38,163 @@ async function getLatestReleaseTag(): Promise { return data.tag_name; } +function readCacheMetadata(cacheMetaPath: string): CacheMetadata | null { + const cachedMetaContent = safeReadFile(cacheMetaPath); + if (!cachedMetaContent) return null; + + try { + return JSON.parse(cachedMetaContent) as CacheMetadata; + } catch { + return null; + } +} + +function loadSessionFromMetadata(metadata: CacheMetadata | null): string | null { + if (!metadata) return null; + const cacheKeyFromMetadata = getCodexCacheKey(metadata.etag ?? undefined, metadata.tag ?? undefined); + const sessionFromMetadata = codexInstructionsCache.get(cacheKeyFromMetadata); + if (!sessionFromMetadata) return null; + + cacheSessionEntry(sessionFromMetadata.data, sessionFromMetadata.etag, sessionFromMetadata.tag); + return sessionFromMetadata.data; +} + +function cacheIsFresh(cachedTimestamp: number | null, cacheFileExists: boolean): boolean { + return Boolean(cachedTimestamp && Date.now() - cachedTimestamp < CACHE_TTL_MS && cacheFileExists); +} + +function writeCacheMetadata(cacheMetaPath: string, metadata: CacheMetadata): void { + safeWriteFile(cacheMetaPath, JSON.stringify(metadata)); +} + +function readCachedInstructions( + cacheFilePath: string, + etag?: string | undefined, + tag?: string | undefined, +): string | null { + const fileContent = safeReadFile(cacheFilePath); + if (!fileContent) { + logWarn("Cached Codex instructions missing or empty; skipping session cache"); + return null; + } + + cacheSessionEntry(fileContent, etag, tag); + return fileContent; +} + +function loadBundledInstructions(): string { + let bundledContent: string; + try { + bundledContent = readFileSync(join(__dirname, "codex-instructions.md"), "utf8"); + } catch (error) { + logError("Failed to load bundled instructions", { error }); + throw new Error("Cannot load bundled Codex instructions; installation may be corrupted"); + } + cacheSessionEntry(bundledContent, undefined, undefined); + return bundledContent; +} + +async function fetchInstructionsFromGithub( + url: string, + cacheFilePath: string, + cacheMetaPath: string, + cachedETag: string | null, + latestTag: string, + cacheFileExists: boolean, +): Promise { + const headers: Record = {}; + if (cachedETag) { + headers["If-None-Match"] = cachedETag; + } + + const response = await fetch(url, { headers }); + + if (response.status === 304 && cacheFileExists) { + const cachedContent = readCachedInstructions(cacheFilePath, cachedETag || undefined, latestTag); + if (cachedContent) { + writeCacheMetadata(cacheMetaPath, { + etag: cachedETag || undefined, + tag: latestTag, + lastChecked: Date.now(), + url, + }); + return cachedContent; + } + throw new Error("Cached Codex instructions were unavailable after 304 response"); + } + + if (!response.ok) { + throw new Error(`HTTP ${response.status} fetching ${url}`); + } + + const instructions = await response.text(); + const newETag = response.headers.get("etag"); + + safeWriteFile(cacheFilePath, instructions); + writeCacheMetadata(cacheMetaPath, { + etag: newETag || undefined, + tag: latestTag, + lastChecked: Date.now(), + url, + }); + + cacheSessionEntry(instructions, newETag || undefined, latestTag); + return instructions; +} + +async function fetchInstructionsWithFallback( + url: string, + options: { + cacheFilePath: string; + cacheMetaPath: string; + cacheFileExists: boolean; + effectiveEtag: string | null; + latestTag: string; + cachedETag: string | null; + cachedTag: string | null; + }, +): Promise { + try { + return await fetchInstructionsFromGithub( + url, + options.cacheFilePath, + options.cacheMetaPath, + options.effectiveEtag, + options.latestTag, + options.cacheFileExists, + ); + } catch (error) { + const err = error as Error; + logError("Failed to fetch instructions from GitHub", { error: err.message }); + + const fallbackMetadata: CacheMetadata = { + etag: options.effectiveEtag || options.cachedETag || undefined, + tag: options.cachedTag || options.latestTag, + lastChecked: Date.now(), + url, + }; + + if (options.cacheFileExists) { + logWarn("Using cached instructions due to fetch failure"); + const cachedContent = readCachedInstructions( + options.cacheFilePath, + options.effectiveEtag || options.cachedETag || undefined, + options.cachedTag || undefined, + ); + if (cachedContent) { + writeCacheMetadata(options.cacheMetaPath, fallbackMetadata); + return cachedContent; + } + logWarn("Cached instructions unavailable; falling back to bundled instructions"); + } + + logWarn("Falling back to bundled instructions"); + const bundledInstructions = loadBundledInstructions(); + writeCacheMetadata(options.cacheMetaPath, fallbackMetadata); + return bundledInstructions; + } +} + /** * Fetch Codex instructions from GitHub with ETag-based caching * Uses HTTP conditional requests to efficiently check for updates @@ -54,115 +211,76 @@ export async function getCodexInstructions(): Promise { } recordCacheMiss("codexInstructions"); - let cachedETag: string | null = null; - let cachedTag: string | null = null; - let cachedTimestamp: number | null = null; - const cacheMetaPath = getOpenCodePath("cache", CACHE_FILES.CODEX_INSTRUCTIONS_META); const cacheFilePath = getOpenCodePath("cache", CACHE_FILES.CODEX_INSTRUCTIONS); - const cachedMetaContent = safeReadFile(cacheMetaPath); - if (cachedMetaContent) { - const metadata = JSON.parse(cachedMetaContent) as CacheMetadata; - cachedETag = metadata.etag || null; - cachedTag = metadata.tag; - cachedTimestamp = metadata.lastChecked; - } + const metadata = readCacheMetadata(cacheMetaPath); + const cachedETag = metadata?.etag || null; + const cachedTag = metadata?.tag || null; + const cachedTimestamp = metadata?.lastChecked || null; - const cacheKeyFromMetadata = getCodexCacheKey(cachedETag ?? undefined, cachedTag ?? undefined); - const sessionFromMetadata = codexInstructionsCache.get(cacheKeyFromMetadata); + const sessionFromMetadata = loadSessionFromMetadata(metadata); if (sessionFromMetadata) { - cacheSessionEntry(sessionFromMetadata.data, sessionFromMetadata.etag, sessionFromMetadata.tag); - return sessionFromMetadata.data; + return sessionFromMetadata; } const cacheFileExists = fileExistsAndNotEmpty(cacheFilePath); - const isCacheFresh = Boolean( - cachedTimestamp && Date.now() - cachedTimestamp < CACHE_TTL_MS && cacheFileExists, - ); - - if (isCacheFresh) { - const fileContent = safeReadFile(cacheFilePath) || ""; - cacheSessionEntry(fileContent, cachedETag || undefined, cachedTag || undefined); - return fileContent; + if (cacheIsFresh(cachedTimestamp, cacheFileExists)) { + const cachedContent = readCachedInstructions( + cacheFilePath, + cachedETag || undefined, + cachedTag || undefined, + ); + if (cachedContent) { + return cachedContent; + } + logWarn("Cached Codex instructions were empty; attempting to refetch"); } let latestTag: string | undefined; try { latestTag = await getLatestReleaseTag(); } catch (error) { - // If we can't get the latest tag, fall back to cache or bundled version - logWarn("Failed to get latest release tag, falling back to cache/bundled", { error }); - // Fall back to bundled instructions - const bundledContent = readFileSync(join(__dirname, "codex-instructions.md"), "utf8"); - cacheSessionEntry(bundledContent, undefined, undefined); - return bundledContent; + logWarn("Failed to get latest release tag; falling back to existing cache or bundled copy", { + error, + }); + if (cacheFileExists) { + const cachedContent = readCachedInstructions( + cacheFilePath, + cachedETag || undefined, + cachedTag || undefined, + ); + if (cachedContent) { + return cachedContent; + } + logWarn("Cached instructions unavailable; falling back to bundled copy"); + } + return loadBundledInstructions(); + } + + if (!latestTag) { + return loadBundledInstructions(); } - const cacheKeyForLatest = getCodexCacheKey(cachedETag ?? undefined, latestTag); - const sessionForLatest = codexInstructionsCache.get(cacheKeyForLatest); + const resolvedTag = latestTag as string; + const sessionForLatest = codexInstructionsCache.get(getCodexCacheKey(cachedETag ?? undefined, resolvedTag)); if (sessionForLatest) { cacheSessionEntry(sessionForLatest.data, sessionForLatest.etag, sessionForLatest.tag); return sessionForLatest.data; } - if (cachedTag !== latestTag) { - cachedETag = null; // Force re-fetch when tag changes - } - - const CODEX_INSTRUCTIONS_URL = `https://raw.githubusercontent.com/openai/codex/${latestTag}/codex-rs/core/gpt_5_codex_prompt.md`; - - const headers: Record = {}; - if (cachedETag) { - headers["If-None-Match"] = cachedETag; - } - - try { - const response = await fetch(CODEX_INSTRUCTIONS_URL, { headers }); - - if (response.status === 304 && cacheFileExists) { - const fileContent = safeReadFile(cacheFilePath) || ""; - cacheSessionEntry(fileContent, cachedETag || undefined, latestTag || undefined); - return fileContent; - } - - if (response.ok) { - const instructions = await response.text(); - const newETag = response.headers.get("etag"); - - // Save to file cache - safeWriteFile(cacheFilePath, instructions); - safeWriteFile( - cacheMetaPath, - JSON.stringify({ - etag: newETag || undefined, - tag: latestTag, - lastChecked: Date.now(), - url: CODEX_INSTRUCTIONS_URL, - } satisfies CacheMetadata), - ); - - cacheSessionEntry(instructions, newETag || undefined, latestTag); - return instructions; - } - - throw new Error(`HTTP ${response.status}`); - } catch (error) { - const err = error as Error; - logError("Failed to fetch instructions from GitHub", { error: err.message }); - - if (cacheFileExists) { - logError("Using cached instructions due to fetch failure"); - const fileContent = safeReadFile(cacheFilePath) || ""; - cacheSessionEntry(fileContent, cachedETag || undefined, cachedTag || undefined); - return fileContent; - } - - logError("Falling back to bundled instructions"); - const bundledContent = readFileSync(join(__dirname, "codex-instructions.md"), "utf8"); - cacheSessionEntry(bundledContent, undefined, undefined); - return bundledContent; - } + const effectiveEtag = cachedTag === resolvedTag ? cachedETag : null; + const CODEX_INSTRUCTIONS_URL = `https://raw.githubusercontent.com/openai/codex/${resolvedTag}/codex-rs/core/gpt_5_codex_prompt.md`; + + return fetchInstructionsWithFallback(CODEX_INSTRUCTIONS_URL, { + cacheFilePath, + cacheMetaPath, + cacheFileExists, + effectiveEtag, + latestTag: resolvedTag, + cachedETag, + cachedTag, + }); } /** diff --git a/lib/prompts/opencode-codex.ts b/lib/prompts/opencode-codex.ts index 9812a34..26f7183 100644 --- a/lib/prompts/opencode-codex.ts +++ b/lib/prompts/opencode-codex.ts @@ -84,6 +84,102 @@ function validateCacheFormat(cachedMeta: OpenCodeCacheMeta | null): boolean { return hasValidStructure; } +async function readCachedPrompt( + cacheFilePath: string, + cacheMetaPath: string, +): Promise<{ content: string | null; meta: OpenCodeCacheMeta | null }> { + let cachedContent: string | null = null; + let cachedMeta: OpenCodeCacheMeta | null = null; + + try { + cachedContent = await readFile(cacheFilePath, "utf-8"); + const metaContent = await readFile(cacheMetaPath, "utf-8"); + cachedMeta = JSON.parse(metaContent); + } catch (error) { + const err = error as Error & { code?: string }; + if (err.code !== "ENOENT") { + logError("Failed to read OpenCode prompt cache", { error: err.message }); + } + } + + return { content: cachedContent, meta: cachedMeta }; +} + +function cacheIsFresh(cachedMeta: OpenCodeCacheMeta | null, cachedContent: string | null): boolean { + return Boolean( + cachedMeta?.lastChecked && Date.now() - cachedMeta.lastChecked < CACHE_TTL_MS && cachedContent, + ); +} + +function updateSessionCache(content: string, etag?: string | null): void { + openCodePromptCache.set("main", { data: content, etag: etag || undefined }); +} + +async function writeMeta(cacheMetaPath: string, meta: OpenCodeCacheMeta): Promise { + await writeFile(cacheMetaPath, JSON.stringify(meta, null, 2), "utf-8"); +} + +async function writeCacheFiles( + cacheFilePath: string, + cacheMetaPath: string, + content: string, + meta: OpenCodeCacheMeta, +): Promise { + await writeFile(cacheFilePath, content, "utf-8"); + await writeMeta(cacheMetaPath, meta); +} + +async function fetchPromptFromUrl( + url: string, + cacheFilePath: string, + cacheMetaPath: string, + cachedContent: string | null, + cachedMeta: OpenCodeCacheMeta | null, +): Promise<{ content: string } | { error: Error }> { + const headers: Record = {}; + if (cachedMeta?.etag && (!cachedMeta.sourceUrl || cachedMeta.sourceUrl === url)) { + headers["If-None-Match"] = cachedMeta.etag; + } + + try { + const response = await fetch(url, { headers }); + + if (response.status === 304 && cachedContent) { + const updatedMeta: OpenCodeCacheMeta = { + etag: cachedMeta?.etag || "", + sourceUrl: cachedMeta?.sourceUrl || url, + lastFetch: cachedMeta?.lastFetch, + lastChecked: Date.now(), + url: cachedMeta?.url, + }; + await writeMeta(cacheMetaPath, updatedMeta); + updateSessionCache(cachedContent, updatedMeta.etag); + return { content: cachedContent }; + } + + if (response.ok) { + const content = await response.text(); + const etag = response.headers.get("etag") || ""; + + const meta: OpenCodeCacheMeta = { + etag, + sourceUrl: url, + lastFetch: new Date().toISOString(), // Keep for backwards compat + lastChecked: Date.now(), + }; + await writeCacheFiles(cacheFilePath, cacheMetaPath, content, meta); + updateSessionCache(content, etag); + + return { content }; + } + + return { error: new Error(`HTTP ${response.status} from ${url}`) }; + } catch (error) { + const err = error as Error; + return { error: new Error(`Failed to fetch ${url}: ${err.message}`) }; + } +} + /** * Fetch OpenCode's codex.txt prompt with ETag-based caching and conflict resolution * Uses HTTP conditional requests to efficiently check for updates @@ -110,23 +206,10 @@ export async function getOpenCodeCodexPrompt(): Promise { // Check for and migrate legacy cache files only when session cache misses await migrateLegacyCache(); - // Try to load cached content and metadata - let cachedContent: string | null = null; - let cachedMeta: OpenCodeCacheMeta | null = null; - - try { - cachedContent = await readFile(cacheFilePath, "utf-8"); - const metaContent = await readFile(cacheMetaPath, "utf-8"); - cachedMeta = JSON.parse(metaContent); - } catch (error) { - // Cache doesn't exist or is invalid, will fetch fresh - const err = error as Error & { code?: string }; - if (err.code !== "ENOENT") { - logError("Failed to read OpenCode prompt cache", { error: err.message }); - } - } + const { content: cachedContent, meta: cachedMeta } = await readCachedPrompt(cacheFilePath, cacheMetaPath); + let usableContent = cachedContent; + let usableMeta = cachedMeta; - // Validate cache format and handle conflicts if (cachedMeta && !validateCacheFormat(cachedMeta)) { logWarn("Detected incompatible cache format. Creating fresh cache for @openhax/codex...", { cacheSource: cachedMeta.url || "unknown", @@ -134,95 +217,42 @@ export async function getOpenCodeCodexPrompt(): Promise { }); // Reset cache variables to force fresh fetch - cachedContent = null; - cachedMeta = null; + usableContent = null; + usableMeta = null; } // Rate limit protection: If cache is less than 15 minutes old and valid, use it - if (cachedMeta?.lastChecked && Date.now() - cachedMeta.lastChecked < CACHE_TTL_MS && cachedContent) { - // Store in session cache for faster subsequent access - openCodePromptCache.set("main", { data: cachedContent, etag: cachedMeta?.etag || undefined }); - return cachedContent; + if (cacheIsFresh(usableMeta, usableContent)) { + updateSessionCache(usableContent as string, usableMeta?.etag); + return usableContent as string; } // Fetch from GitHub with conditional requests and fallbacks let lastError: Error | undefined; for (const url of OPENCODE_CODEX_URLS) { - const headers: Record = {}; - if (cachedMeta?.etag && (!cachedMeta.sourceUrl || cachedMeta.sourceUrl === url)) { - headers["If-None-Match"] = cachedMeta.etag; - } - - try { - const response = await fetch(url, { headers }); - - // 304 Not Modified - cache is still valid - if (response.status === 304 && cachedContent) { - const updatedMeta: OpenCodeCacheMeta = { - etag: cachedMeta?.etag || "", - sourceUrl: cachedMeta?.sourceUrl || url, - lastFetch: cachedMeta?.lastFetch, - lastChecked: Date.now(), - url: cachedMeta?.url, - }; - await writeFile(cacheMetaPath, JSON.stringify(updatedMeta, null, 2), "utf-8"); - - openCodePromptCache.set("main", { - data: cachedContent, - etag: updatedMeta.etag || undefined, - }); - return cachedContent; - } - - // 200 OK - new content available - if (response.ok) { - const content = await response.text(); - const etag = response.headers.get("etag") || ""; - - await writeFile(cacheFilePath, content, "utf-8"); - await writeFile( - cacheMetaPath, - JSON.stringify( - { - etag, - sourceUrl: url, - lastFetch: new Date().toISOString(), // Keep for backwards compat - lastChecked: Date.now(), - } satisfies OpenCodeCacheMeta, - null, - 2, - ), - "utf-8", - ); - - openCodePromptCache.set("main", { data: content, etag }); - - return content; - } - - lastError = new Error(`HTTP ${response.status} from ${url}`); - } catch (error) { - const err = error as Error; - lastError = new Error(`Failed to fetch ${url}: ${err.message}`); + const result = await fetchPromptFromUrl(url, cacheFilePath, cacheMetaPath, usableContent, usableMeta); + if ("content" in result) { + return result.content; } + lastError = result.error; } if (lastError) { logError("Failed to fetch OpenCode codex.txt from GitHub", { error: lastError.message }); } - if (cachedContent) { + if (usableContent) { const updatedMeta: OpenCodeCacheMeta = { - etag: cachedMeta?.etag || "", - sourceUrl: cachedMeta?.sourceUrl, - lastFetch: cachedMeta?.lastFetch, + etag: usableMeta?.etag || "", + sourceUrl: usableMeta?.sourceUrl, + lastFetch: usableMeta?.lastFetch, lastChecked: Date.now(), - url: cachedMeta?.url, + url: usableMeta?.url, }; - await writeFile(cacheMetaPath, JSON.stringify(updatedMeta, null, 2), "utf-8"); + await writeMeta(cacheMetaPath, updatedMeta); - openCodePromptCache.set("main", { data: cachedContent, etag: updatedMeta.etag || undefined }); - return cachedContent; + updateSessionCache(usableContent, updatedMeta.etag); + return usableContent; } throw new Error( diff --git a/lib/request/compaction-helpers.ts b/lib/request/compaction-helpers.ts new file mode 100644 index 0000000..f1c9810 --- /dev/null +++ b/lib/request/compaction-helpers.ts @@ -0,0 +1,107 @@ +/* eslint-disable no-param-reassign */ +import { + approximateTokenCount, + buildCompactionPromptItems, + collectSystemMessages, + serializeConversation, +} from "../compaction/codex-compaction.js"; +import type { CompactionDecision } from "../compaction/compaction-executor.js"; +import { filterInput } from "./input-filters.js"; +import type { InputItem, RequestBody } from "../types.js"; +import { countConversationTurns } from "../utils/input-item-utils.js"; + +export interface CompactionSettings { + enabled: boolean; + autoLimitTokens?: number; + autoMinMessages?: number; +} + +export interface CompactionOptions { + settings: CompactionSettings; + commandText: string | null; + originalInput: InputItem[]; + preserveIds?: boolean; +} + +/** + * Drop only the latest user message (e.g., a compaction command) while preserving any later assistant/tool items. + */ +function removeLastUserMessage(items: InputItem[]): InputItem[] { + for (let index = items.length - 1; index >= 0; index -= 1) { + if (items[index]?.role === "user") { + return [...items.slice(0, index), ...items.slice(index + 1)]; + } + } + return items; +} + +function maybeBuildCompactionPrompt( + originalInput: InputItem[], + commandText: string | null, + settings: CompactionSettings, +): { items: InputItem[]; decision: CompactionDecision } | null { + if (!settings.enabled) { + return null; + } + const conversationSource = commandText ? removeLastUserMessage(originalInput) : originalInput; + const turnCount = countConversationTurns(conversationSource); + let trigger: "command" | "auto" | null = null; + let reason: string | undefined; + let approxTokens: number | undefined; + + if (commandText) { + trigger = "command"; + } else if (settings.autoLimitTokens && settings.autoLimitTokens > 0) { + approxTokens = approximateTokenCount(conversationSource); + const minMessages = settings.autoMinMessages ?? 8; + if (approxTokens >= settings.autoLimitTokens && turnCount >= minMessages) { + trigger = "auto"; + reason = `~${approxTokens} tokens >= limit ${settings.autoLimitTokens}`; + } + } + + if (!trigger) { + return null; + } + + const serialization = serializeConversation(conversationSource); + const promptItems = buildCompactionPromptItems(serialization.transcript); + + return { + items: promptItems, + decision: { + mode: trigger, + reason, + approxTokens, + preservedSystem: collectSystemMessages(originalInput), + serialization, + }, + }; +} + +export function applyCompactionIfNeeded( + body: RequestBody, + compactionOptions?: CompactionOptions, +): CompactionDecision | undefined { + if (!compactionOptions?.settings.enabled) { + return undefined; + } + + const compactionBuild = maybeBuildCompactionPrompt( + compactionOptions.originalInput, + compactionOptions.commandText, + compactionOptions.settings, + ); + + if (!compactionBuild) { + return undefined; + } + + const preserveIds = compactionOptions.preserveIds ?? false; + body.input = filterInput(compactionBuild.items, { preserveIds, preserveMetadata: true }); + delete (body as any).tools; + delete (body as any).tool_choice; + delete (body as any).parallel_tool_calls; + + return compactionBuild.decision; +} diff --git a/lib/request/fetch-helpers.ts b/lib/request/fetch-helpers.ts index f49650c..aa1839f 100644 --- a/lib/request/fetch-helpers.ts +++ b/lib/request/fetch-helpers.ts @@ -99,6 +99,37 @@ export function rewriteUrlForCodex(url: string): string { return url.replace(URL_PATHS.RESPONSES, URL_PATHS.CODEX_RESPONSES); } +function buildCompactionSettings(pluginConfig?: PluginConfig) { + return { + enabled: pluginConfig?.enableCodexCompaction !== false, + autoLimitTokens: pluginConfig?.autoCompactTokenLimit, + autoMinMessages: pluginConfig?.autoCompactMinMessages ?? 8, + }; +} + +function applyPromptCacheKey(body: RequestBody, sessionContext?: SessionContext): RequestBody { + const promptCacheKey = sessionContext?.state?.promptCacheKey; + if (!promptCacheKey) return body; + + const hostProvided = (body as any).prompt_cache_key || (body as any).promptCacheKey; + if (hostProvided) { + return body; + } + + return { ...(body as any), prompt_cache_key: promptCacheKey } as RequestBody; +} + +function applyCompactionHistory( + body: RequestBody, + sessionManager: SessionManager | undefined, + sessionContext: SessionContext | undefined, + settings: { enabled: boolean }, + manualCommand: string | null, +): void { + if (!settings.enabled || manualCommand) return; + sessionManager?.applyCompactedHistory?.(body, sessionContext); +} + /** * Transforms request body and logs the transformation * @param init - Request init options @@ -131,40 +162,32 @@ export async function transformRequestForCodex( const body = JSON.parse(init.body as string) as RequestBody; const originalModel = body.model; const originalInput = cloneInputItems(body.input ?? []); - const compactionEnabled = pluginConfig?.enableCodexCompaction !== false; - const compactionSettings = { - enabled: compactionEnabled, - autoLimitTokens: pluginConfig?.autoCompactTokenLimit, - autoMinMessages: pluginConfig?.autoCompactMinMessages ?? 8, - }; - const manualCommand = compactionEnabled ? detectCompactionCommand(originalInput) : null; - + const compactionSettings = buildCompactionSettings(pluginConfig); + const manualCommand = compactionSettings.enabled ? detectCompactionCommand(originalInput) : null; const sessionContext = sessionManager?.getContext(body); - if (sessionContext?.state?.promptCacheKey) { - const hostProvided = (body as any).prompt_cache_key || (body as any).promptCacheKey; - if (!hostProvided) { - (body as any).prompt_cache_key = sessionContext.state.promptCacheKey; - } - } - if (compactionEnabled && !manualCommand) { - sessionManager?.applyCompactedHistory?.(body, sessionContext); - } - - // Log original request + + const bodyWithCacheKey = applyPromptCacheKey(body, sessionContext); + applyCompactionHistory( + bodyWithCacheKey, + sessionManager, + sessionContext, + compactionSettings, + manualCommand, + ); + logRequest(LOG_STAGES.BEFORE_TRANSFORM, { url, originalModel, - model: body.model, - hasTools: !!body.tools, - hasInput: !!body.input, - inputLength: body.input?.length, + model: bodyWithCacheKey.model, + hasTools: !!bodyWithCacheKey.tools, + hasInput: !!bodyWithCacheKey.input, + inputLength: bodyWithCacheKey.input?.length, codexMode, - body: body as unknown as Record, + body: bodyWithCacheKey as unknown as Record, }); - // Transform request body const transformResult = await transformRequestBody( - body, + bodyWithCacheKey, codexInstructions, userConfig, codexMode, @@ -181,7 +204,6 @@ export async function transformRequestForCodex( const appliedContext = sessionManager?.applyRequest(transformResult.body, sessionContext) ?? sessionContext; - // Log transformed request logRequest(LOG_STAGES.AFTER_TRANSFORM, { url, originalModel, @@ -195,7 +217,6 @@ export async function transformRequestForCodex( body: transformResult.body as unknown as Record, }); - // Serialize body once - callers must re-serialize if they mutate transformResult.body after this function returns const updatedInit: RequestInit = { ...init, body: JSON.stringify(transformResult.body), @@ -247,6 +268,91 @@ export function createCodexHeaders( return headers; } +function safeParseErrorJson(raw: string): any | null { + try { + return JSON.parse(raw) as any; + } catch { + return null; + } +} + +type RateLimitBuckets = { + primary: { used_percent?: number; window_minutes?: number; resets_at?: number }; + secondary: { used_percent?: number; window_minutes?: number; resets_at?: number }; +}; + +function parseRateLimits(headers: Headers): RateLimitBuckets | undefined { + const primary = { + used_percent: toNumber(headers.get("x-codex-primary-used-percent")), + window_minutes: toInt(headers.get("x-codex-primary-window-minutes")), + resets_at: toInt(headers.get("x-codex-primary-reset-at")), + }; + const secondary = { + used_percent: toNumber(headers.get("x-codex-secondary-used-percent")), + window_minutes: toInt(headers.get("x-codex-secondary-window-minutes")), + resets_at: toInt(headers.get("x-codex-secondary-reset-at")), + }; + const hasRateLimits = primary.used_percent !== undefined || secondary.used_percent !== undefined; + + return hasRateLimits ? { primary, secondary } : undefined; +} + +function isUsageLimitError(code: unknown): boolean { + return /usage_limit_reached|usage_not_included|rate_limit_exceeded/i.test(String(code ?? "")); +} + +function buildUsageFriendlyMessage( + err: Record, + rateLimits: RateLimitBuckets | undefined, +): string | undefined { + const parsedReset = + typeof err.resets_at === "number" + ? err.resets_at + : err.resets_at != null + ? Number(err.resets_at) + : undefined; + const resetSource = Number.isFinite(parsedReset) + ? (parsedReset as number) + : (rateLimits?.primary.resets_at ?? rateLimits?.secondary.resets_at); + const mins = + typeof resetSource === "number" + ? Math.max(0, Math.round((resetSource * 1000 - Date.now()) / 60000)) + : undefined; + const plan = err.plan_type ? ` (${String(err.plan_type).toLowerCase()} plan)` : ""; + const when = mins !== undefined ? ` Try again in ~${mins} min.` : ""; + return `You have hit your ChatGPT usage limit${plan}.${when}`.trim(); +} + +function enrichErrorBody(raw: string, response: Response): { body: string; isJson: boolean } { + const parsed = safeParseErrorJson(raw); + if (!parsed) { + return { body: raw, isJson: false }; + } + + const err = (parsed as any)?.error ?? {}; + const rate_limits = parseRateLimits(response.headers); + const usageLimit = isUsageLimitError(err.code ?? err.type); + const friendly_message = usageLimit ? buildUsageFriendlyMessage(err, rate_limits) : undefined; + const message = usageLimit + ? (err.message ?? friendly_message) + : (err.message ?? + (parsed as any)?.error?.message ?? + (typeof parsed === "string" ? parsed : undefined) ?? + `Request failed with status ${response.status}.`); + + const enhanced = { + error: { + ...err, + message, + friendly_message, + rate_limits, + status: response.status, + }, + }; + + return { body: JSON.stringify(enhanced), isJson: true }; +} + /** * Enriches a Codex API error Response with structured error details and rate-limit metadata. * @@ -255,66 +361,7 @@ export function createCodexHeaders( */ export async function handleErrorResponse(response: Response): Promise { const raw = await response.text(); - - let enriched = raw; - try { - const parsed = JSON.parse(raw) as any; - const err = parsed?.error ?? {}; - - // Parse Codex rate-limit headers if present - const h = response.headers; - const primary = { - used_percent: toNumber(h.get("x-codex-primary-used-percent")), - window_minutes: toInt(h.get("x-codex-primary-window-minutes")), - resets_at: toInt(h.get("x-codex-primary-reset-at")), - }; - const secondary = { - used_percent: toNumber(h.get("x-codex-secondary-used-percent")), - window_minutes: toInt(h.get("x-codex-secondary-window-minutes")), - resets_at: toInt(h.get("x-codex-secondary-reset-at")), - }; - const rate_limits = - primary.used_percent !== undefined || secondary.used_percent !== undefined - ? { primary, secondary } - : undefined; - - // Determine if this is a genuine usage limit error - const code = (err.code ?? err.type ?? "").toString(); - const isUsageLimitError = /usage_limit_reached|usage_not_included|rate_limit_exceeded/i.test(code); - - let friendly_message: string | undefined; - let message: string; - - if (isUsageLimitError) { - const resetsAt = err.resets_at ?? primary.resets_at ?? secondary.resets_at; - const mins = resetsAt ? Math.max(0, Math.round((resetsAt * 1000 - Date.now()) / 60000)) : undefined; - const plan = err.plan_type ? ` (${String(err.plan_type).toLowerCase()} plan)` : ""; - const when = mins !== undefined ? ` Try again in ~${mins} min.` : ""; - friendly_message = `You have hit your ChatGPT usage limit${plan}.${when}`.trim(); - message = err.message ?? friendly_message; - } else { - // Preserve original error message for non-usage-limit errors - message = - err.message ?? - parsed?.error?.message ?? - (typeof parsed === "string" ? parsed : undefined) ?? - `Request failed with status ${response.status}.`; - } - - const enhanced = { - error: { - ...err, - message, - friendly_message, - rate_limits, - status: response.status, - }, - }; - enriched = JSON.stringify(enhanced); - } catch { - // Raw body not JSON; leave unchanged - enriched = raw; - } + const { body: enriched, isJson } = enrichErrorBody(raw, response); logRequest(LOG_STAGES.ERROR_RESPONSE, { status: response.status, @@ -324,9 +371,7 @@ export async function handleErrorResponse(response: Response): Promise logError(`${response.status} error`, { body: enriched }); const headers = new Headers(response.headers); - // Only set JSON content-type if we successfully enriched the response - // Otherwise preserve the original content-type for non-JSON responses - if (enriched !== raw) { + if (isJson) { headers.set("content-type", "application/json; charset=utf-8"); } return new Response(enriched, { @@ -369,4 +414,4 @@ function toInt(v: string | null): number | undefined { if (v == null) return undefined; const n = parseInt(v, 10); return Number.isFinite(n) ? n : undefined; -} \ No newline at end of file +} diff --git a/lib/request/input-filters.ts b/lib/request/input-filters.ts new file mode 100644 index 0000000..9ad36a3 --- /dev/null +++ b/lib/request/input-filters.ts @@ -0,0 +1,303 @@ +/* eslint-disable no-param-reassign */ +import { + cacheBridgeDecision, + generateContentHash, + generateInputHash, + getCachedBridgeDecision, + hasBridgePromptInConversation, +} from "../cache/prompt-fingerprinting.js"; +import { CODEX_OPENCODE_BRIDGE } from "../prompts/codex-opencode-bridge.js"; +import { TOOL_REMAP_MESSAGE } from "../prompts/codex.js"; +import { getOpenCodeCodexPrompt } from "../prompts/opencode-codex.js"; +import type { InputItem, SessionContext } from "../types.js"; +import { extractTextFromItem } from "../utils/input-item-utils.js"; +import { logDebug } from "../logger.js"; + +const TOOL_REMAP_MESSAGE_HASH = generateContentHash(TOOL_REMAP_MESSAGE); + +export function filterInput( + input: InputItem[] | undefined, + options: { preserveIds?: boolean; preserveMetadata?: boolean } = {}, +): InputItem[] | undefined { + if (!Array.isArray(input)) return input; + + const { preserveIds = false, preserveMetadata = false } = options; + + return input + .filter((item) => { + if (item.type === "item_reference") { + return false; + } + return true; + }) + .map((item) => { + let sanitized = item as InputItem; + + if (item.id && !preserveIds) { + const { id: _id, ...itemWithoutId } = item as Record & InputItem; + sanitized = itemWithoutId as InputItem; + } + + if (!preserveIds && !preserveMetadata && "metadata" in (sanitized as Record)) { + const { metadata: _metadata, ...rest } = sanitized as Record; + sanitized = rest as InputItem; + } + + return sanitized; + }); +} + +export function isOpenCodeSystemPrompt(item: InputItem, cachedPrompt: string | null): boolean { + const isSystemRole = item.role === "developer" || item.role === "system"; + if (!isSystemRole) return false; + + const contentText = extractTextFromItem(item); + if (!contentText) return false; + + if (cachedPrompt) { + if (contentText.trim() === cachedPrompt.trim()) { + return true; + } + + const contentPrefix = contentText.trim().substring(0, 200); + const cachedPrefix = cachedPrompt.trim().substring(0, 200); + if (contentPrefix === cachedPrefix) { + return true; + } + } + + return contentText.startsWith("You are a coding agent running in"); +} + +export async function filterOpenCodeSystemPrompts( + input: InputItem[] | undefined, +): Promise { + if (!Array.isArray(input)) return input; + + let cachedPrompt: string | null = null; + try { + cachedPrompt = await getOpenCodeCodexPrompt(); + } catch { + // Fallback to text-based detection only + } + + const compactionInstructionPatterns: RegExp[] = [ + /(summary[ _-]?file)/i, + /(summary[ _-]?path)/i, + /summary\s+(?:has\s+been\s+)?saved\s+(?:to|at)/i, + /summary\s+(?:is\s+)?stored\s+(?:in|at|to)/i, + /summary\s+(?:is\s+)?available\s+(?:at|in)/i, + /write\s+(?:the\s+)?summary\s+(?:to|into)/i, + /save\s+(?:the\s+)?summary\s+(?:to|into)/i, + /open\s+(?:the\s+)?summary/i, + /read\s+(?:the\s+)?summary/i, + /cat\s+(?:the\s+)?summary/i, + /view\s+(?:the\s+)?summary/i, + /~\/\.opencode/i, + /\.opencode\/.*summary/i, + ]; + + const hasCompactionMetadataFlag = (item: InputItem): boolean => { + const rawMeta = (item as Record)?.metadata ?? (item as Record)?.meta; + if (!rawMeta || typeof rawMeta !== "object") return false; + const meta = rawMeta as Record; + const metaAny = meta as Record; + const source = metaAny.source as unknown; + if (typeof source === "string" && source.toLowerCase() === "opencode-compaction") { + return true; + } + if (metaAny.opencodeCompaction === true || metaAny.opencode_compaction === true) { + return true; + } + return false; + }; + + const matchesCompactionInstruction = (value: string): boolean => + compactionInstructionPatterns.some((pattern) => pattern.test(value)); + + const sanitizeOpenCodeCompactionPrompt = (item: InputItem): InputItem | null => { + const text = extractTextFromItem(item); + if (!text) return null; + const sanitizedText = text + .split(/\r?\n/) + .map((line) => line.trimEnd()) + .filter((line) => { + const trimmed = line.trim(); + if (!trimmed) { + return true; + } + return !matchesCompactionInstruction(trimmed); + }) + .join("\n") + .replace(/\n{3,}/g, "\n\n") + .trim(); + if (!sanitizedText) { + return null; + } + const originalMentionedCompaction = /\bauto[-\s]?compaction\b/i.test(text); + let finalText = sanitizedText; + if (originalMentionedCompaction && !/\bauto[-\s]?compaction\b/i.test(finalText)) { + finalText = `Auto-compaction summary\n\n${finalText}`; + } + return { + ...item, + content: finalText, + }; + }; + + const isOpenCodeCompactionPrompt = (item: InputItem): boolean => { + const isSystemRole = item.role === "developer" || item.role === "system"; + if (!isSystemRole) return false; + const text = extractTextFromItem(item); + if (!text) return false; + const hasCompaction = /\b(auto[-\s]?compaction|compaction|compact)\b/i.test(text); + const hasSummary = /\b(summary|summarize|summarise)\b/i.test(text); + return hasCompaction && hasSummary && matchesCompactionInstruction(text); + }; + + const filteredInput: InputItem[] = []; + for (const item of input) { + if (item.role === "user") { + filteredInput.push(item); + continue; + } + + if (isOpenCodeSystemPrompt(item, cachedPrompt)) { + continue; + } + + const compactionMetadataFlagged = hasCompactionMetadataFlag(item); + if (compactionMetadataFlagged || isOpenCodeCompactionPrompt(item)) { + const sanitized = sanitizeOpenCodeCompactionPrompt(item); + if (sanitized) { + filteredInput.push(sanitized); + } + continue; + } + + filteredInput.push(item); + } + + return filteredInput; +} + +function analyzeBridgeRequirement( + input: InputItem[] | undefined, + hasTools: boolean, +): { needsBridge: boolean; reason: string; toolCount: number } { + if (!hasTools || !Array.isArray(input)) { + return { needsBridge: false, reason: "no_tools_or_input", toolCount: 0 }; + } + + const toolCount = 1; + + return { + needsBridge: true, + reason: "tools_present", + toolCount, + }; +} + +function buildBridgeMessage(): InputItem { + return { + type: "message", + role: "developer", + content: [{ type: "input_text", text: CODEX_OPENCODE_BRIDGE }], + }; +} + +export function addCodexBridgeMessage( + input: InputItem[] | undefined, + hasTools: boolean, + sessionContext?: SessionContext, +): InputItem[] | undefined { + if (!Array.isArray(input)) return input; + + const bridgeMessage = buildBridgeMessage(); + const sessionBridgeInjected = sessionContext?.state.bridgeInjected ?? false; + const inputHash = generateInputHash(input); + const analysis = analyzeBridgeRequirement(input, hasTools); + + if (sessionBridgeInjected) { + const alreadyPresent = hasBridgePromptInConversation(input, CODEX_OPENCODE_BRIDGE); + if (alreadyPresent) { + logDebug("Bridge prompt already present; preserving session continuity"); + if (sessionContext) { + sessionContext.state.bridgeInjected = true; + } + return input; + } + + logDebug("Bridge prompt previously injected in session; reapplying for continuity"); + if (sessionContext) { + sessionContext.state.bridgeInjected = true; + } + return [bridgeMessage, ...input]; + } + + if (hasBridgePromptInConversation(input, CODEX_OPENCODE_BRIDGE)) { + logDebug("Bridge prompt already present in conversation, skipping injection"); + cacheBridgeDecision(inputHash, analysis.toolCount, false); + return input; + } + + const cachedDecision = getCachedBridgeDecision(inputHash, analysis.toolCount); + if (cachedDecision) { + const shouldAdd = cachedDecision.hash === generateContentHash("add"); + logDebug(`Using cached bridge decision: ${shouldAdd ? "add" : "skip"}`); + if (shouldAdd) { + if (sessionContext) { + sessionContext.state.bridgeInjected = true; + } + + return [bridgeMessage, ...input]; + } + return input; + } + + if (!analysis.needsBridge) { + logDebug(`Skipping bridge prompt: ${analysis.reason} (tools: ${analysis.toolCount})`); + cacheBridgeDecision(inputHash, analysis.toolCount, false); + return input; + } + + logDebug(`Adding bridge prompt: ${analysis.reason} (tools: ${analysis.toolCount})`); + cacheBridgeDecision(inputHash, analysis.toolCount, true); + + if (sessionContext) { + sessionContext.state.bridgeInjected = true; + } + + return [bridgeMessage, ...input]; +} + +export function addToolRemapMessage( + input: InputItem[] | undefined, + hasTools: boolean, +): InputItem[] | undefined { + if (!hasTools || !Array.isArray(input)) return input; + + const hasExistingToolRemap = input.some((item) => { + if (item?.type !== "message" || item?.role !== "developer") return false; + const contentText = extractTextFromItem(item); + if (!contentText) return false; + return generateContentHash(contentText) === TOOL_REMAP_MESSAGE_HASH; + }); + + if (hasExistingToolRemap) { + return input; + } + + const toolRemapMessage: InputItem = { + type: "message", + role: "developer", + content: [ + { + type: "input_text", + text: TOOL_REMAP_MESSAGE, + }, + ], + }; + + return [toolRemapMessage, ...input]; +} diff --git a/lib/request/model-config.ts b/lib/request/model-config.ts new file mode 100644 index 0000000..df5c832 --- /dev/null +++ b/lib/request/model-config.ts @@ -0,0 +1,170 @@ +import type { ConfigOptions, ReasoningConfig, UserConfig } from "../types.js"; + +export function normalizeModel(model: string | undefined): string { + const fallback = "gpt-5.1"; + if (!model) return fallback; + + const trimmed = model.trim(); + if (!trimmed) return fallback; + + const lowered = trimmed.toLowerCase(); + const sanitized = lowered.replace(/\./g, "-").replace(/[\s_/]+/g, "-"); + + const contains = (needle: string) => sanitized.includes(needle); + const hasGpt51 = contains("gpt-5-1") || sanitized.includes("gpt51"); + const hasCodexMax = contains("codex-max") || contains("codexmax"); + + if (contains("gpt-5-1-codex-mini") || (hasGpt51 && contains("codex-mini"))) { + return "gpt-5.1-codex-mini"; + } + if (contains("codex-mini")) { + return "gpt-5.1-codex-mini"; + } + if (hasCodexMax) { + return "gpt-5.1-codex-max"; + } + if (contains("gpt-5-1-codex") || (hasGpt51 && contains("codex"))) { + return "gpt-5.1-codex"; + } + if (hasGpt51) { + return "gpt-5.1"; + } + if (contains("gpt-5-codex-mini") || contains("codex-mini-latest")) { + return "gpt-5.1-codex-mini"; + } + if (contains("gpt-5-codex") || (contains("codex") && !contains("mini"))) { + return "gpt-5-codex"; + } + if (contains("gpt-5")) { + return "gpt-5"; + } + + return sanitized; +} + +export function getModelConfig( + modelName: string, + userConfig: UserConfig = { global: {}, models: {} }, +): ConfigOptions { + const globalOptions = userConfig.global || {}; + const modelOptions = userConfig.models?.[modelName]?.options || {}; + + return { ...globalOptions, ...modelOptions }; +} + +type ModelFlags = { + normalized: string; + normalizedOriginal: string; + isGpt51: boolean; + isCodexMini: boolean; + isCodexMax: boolean; + isCodexFamily: boolean; + isLightweight: boolean; +}; + +function classifyModel(originalModel: string | undefined): ModelFlags { + const normalized = normalizeModel(originalModel); + const normalizedOriginal = originalModel?.toLowerCase() ?? normalized; + const isGpt51 = normalized.startsWith("gpt-5.1"); + const isCodexMiniSlug = normalized === "gpt-5.1-codex-mini" || normalized === "codex-mini-latest"; + const isLegacyCodexMini = normalizedOriginal.includes("codex-mini-latest"); + const isCodexMini = + isCodexMiniSlug || + isLegacyCodexMini || + normalizedOriginal.includes("codex-mini") || + normalizedOriginal.includes("codex mini") || + normalizedOriginal.includes("codex_mini"); + const isCodexMax = normalized === "gpt-5.1-codex-max"; + const isCodexFamily = + normalized.startsWith("gpt-5-codex") || + normalized.startsWith("gpt-5.1-codex") || + (normalizedOriginal.includes("codex") && !isCodexMini); + const isLightweight = + !isCodexMini && + !isCodexFamily && + (normalizedOriginal.includes("nano") || normalizedOriginal.includes("mini")); + + return { + normalized, + normalizedOriginal, + isGpt51, + isCodexMini, + isCodexMax, + isCodexFamily, + isLightweight, + }; +} + +function defaultEffortFor(flags: ModelFlags): ReasoningConfig["effort"] { + if (flags.isGpt51 && !flags.isCodexFamily && !flags.isCodexMini) { + return "none"; + } + if (flags.isCodexMini) { + return "medium"; + } + if (flags.isLightweight) { + return "minimal"; + } + return "medium"; +} + +function applyRequestedEffort( + requested: ReasoningConfig["effort"], + flags: ModelFlags, +): ReasoningConfig["effort"] { + if (requested === "xhigh" && !flags.isCodexMax) { + return "high"; + } + return requested; +} + +function normalizeEffortForModel( + effort: ReasoningConfig["effort"], + flags: ModelFlags, +): ReasoningConfig["effort"] { + if (flags.isCodexMini) { + if (effort === "minimal" || effort === "low" || effort === "none") { + return "medium"; + } + return effort === "high" ? effort : "medium"; + } + + if (flags.isCodexMax) { + if (effort === "minimal" || effort === "none") { + return "low"; + } + return effort; + } + + if (flags.isCodexFamily) { + if (effort === "minimal" || effort === "none") { + return "low"; + } + return effort; + } + + if (flags.isGpt51 && effort === "minimal") { + return "none"; + } + + if (!flags.isGpt51 && effort === "none") { + return "minimal"; + } + + return effort; +} + +export function getReasoningConfig( + originalModel: string | undefined, + userConfig: ConfigOptions = {}, +): ReasoningConfig { + const flags = classifyModel(originalModel); + const requestedEffort = userConfig.reasoningEffort ?? defaultEffortFor(flags); + const effortAfterRequest = applyRequestedEffort(requestedEffort, flags); + const effort = normalizeEffortForModel(effortAfterRequest, flags); + + return { + effort, + summary: userConfig.reasoningSummary || "auto", + }; +} diff --git a/lib/request/prompt-cache.ts b/lib/request/prompt-cache.ts new file mode 100644 index 0000000..c70eccc --- /dev/null +++ b/lib/request/prompt-cache.ts @@ -0,0 +1,240 @@ +/* eslint-disable no-param-reassign */ +import { createHash, randomUUID } from "node:crypto"; +import { logDebug, logInfo, logWarn } from "../logger.js"; +import type { RequestBody } from "../types.js"; + +function stableStringify(value: unknown): string { + if (value === null || typeof value !== "object") { + return JSON.stringify(value); + } + + if (Array.isArray(value)) { + return `[${value.map((item) => stableStringify(item)).join(",")}]`; + } + + const entries = Object.keys(value as Record) + .sort() + .map((key) => `${JSON.stringify(key)}:${stableStringify((value as Record)[key])}`); + + return `{${entries.join(",")}}`; +} + +type PromptCacheKeySource = "existing" | "metadata" | "generated"; + +export interface PromptCacheKeyResult { + key: string; + source: PromptCacheKeySource; + sourceKey?: string; + forkSourceKey?: string; + hintKeys?: string[]; + unusableKeys?: string[]; + forkHintKeys?: string[]; + forkUnusableKeys?: string[]; + fallbackHash?: string; +} + +function extractString(value: unknown): string | undefined { + if (typeof value !== "string") { + return undefined; + } + const trimmed = value.trim(); + return trimmed.length > 0 ? trimmed : undefined; +} + +function normalizeCacheKeyBase(base: string): string { + const trimmed = base.trim(); + if (!trimmed) { + return `cache_${randomUUID()}`; + } + const sanitized = trimmed.replace(/\s+/g, "-"); + return sanitized.startsWith("cache_") ? sanitized : `cache_${sanitized}`; +} + +function normalizeForkSuffix(forkId: string): string { + const trimmed = forkId.trim(); + if (!trimmed) return "fork"; + return trimmed.replace(/\s+/g, "-"); +} + +export const PROMPT_CACHE_METADATA_KEYS = [ + "conversation_id", + "conversationId", + "thread_id", + "threadId", + "session_id", + "sessionId", + "chat_id", + "chatId", +]; + +export const PROMPT_CACHE_FORK_KEYS = [ + "forkId", + "fork_id", + "branchId", + "branch_id", + "parentConversationId", + "parent_conversation_id", +]; + +function derivePromptCacheKeyFromBody(body: RequestBody): { + base?: string; + sourceKey?: string; + hintKeys: string[]; + unusableKeys: string[]; + forkId?: string; + forkSourceKey?: string; + forkHintKeys: string[]; + forkUnusableKeys: string[]; +} { + const metadata = body.metadata as Record | undefined; + const root = body as Record; + + const hintKeys: string[] = []; + const unusableKeys: string[] = []; + let base: string | undefined; + let sourceKey: string | undefined; + + for (const key of PROMPT_CACHE_METADATA_KEYS) { + const raw = metadata?.[key] ?? root[key]; + if (raw !== undefined) { + hintKeys.push(key); + } + const value = extractString(raw); + if (value) { + base = value; + sourceKey = key; + break; + } + if (raw !== undefined) { + unusableKeys.push(key); + } + } + + const forkHintKeys: string[] = []; + const forkUnusableKeys: string[] = []; + let forkId: string | undefined; + let forkSourceKey: string | undefined; + + for (const key of PROMPT_CACHE_FORK_KEYS) { + const raw = metadata?.[key] ?? root[key]; + if (raw !== undefined) { + forkHintKeys.push(key); + } + const value = extractString(raw); + if (value) { + forkId = value; + forkSourceKey = key; + break; + } + if (raw !== undefined) { + forkUnusableKeys.push(key); + } + } + + return { + base, + sourceKey, + hintKeys, + unusableKeys, + forkId, + forkSourceKey, + forkHintKeys, + forkUnusableKeys, + }; +} + +function computeFallbackHashForBody(body: RequestBody): string { + try { + const inputSlice = Array.isArray(body.input) ? body.input.slice(0, 3) : undefined; + const seed = stableStringify({ + model: typeof body.model === "string" ? body.model : undefined, + metadata: body.metadata, + input: inputSlice, + }); + return createHash("sha1").update(seed).digest("hex").slice(0, 12); + } catch { + const model = typeof body.model === "string" ? body.model : "unknown"; + return createHash("sha1").update(model).digest("hex").slice(0, 12); + } +} + +export function ensurePromptCacheKey(body: RequestBody): PromptCacheKeyResult { + const hostBody = body as Record; + const existingSnake = extractString(hostBody.prompt_cache_key); + const existingCamel = extractString(hostBody.promptCacheKey); + const existing = existingSnake || existingCamel; + + if (existing) { + body.prompt_cache_key = existing; + if (existingCamel) { + hostBody.promptCacheKey = existingCamel; + } + return { key: existing, source: "existing" }; + } + + const derived = derivePromptCacheKeyFromBody(body); + if (derived.base) { + const baseKey = normalizeCacheKeyBase(derived.base); + const suffix = derived.forkId ? `-fork-${normalizeForkSuffix(derived.forkId)}` : ""; + const finalKey = `${baseKey}${suffix}`; + body.prompt_cache_key = finalKey; + return { + key: finalKey, + source: "metadata", + sourceKey: derived.sourceKey, + forkSourceKey: derived.forkSourceKey, + hintKeys: derived.hintKeys, + forkHintKeys: derived.forkHintKeys, + }; + } + + const fallbackHash = computeFallbackHashForBody(body); + const generated = `cache_${fallbackHash}`; + body.prompt_cache_key = generated; + return { + key: generated, + source: "generated", + hintKeys: derived.hintKeys, + unusableKeys: derived.unusableKeys, + forkHintKeys: derived.forkHintKeys, + forkUnusableKeys: derived.forkUnusableKeys, + fallbackHash, + }; +} + +export function logCacheKeyDecision(cacheKeyResult: PromptCacheKeyResult, isNewSession: boolean): void { + if (cacheKeyResult.source === "existing") { + return; + } + + if (cacheKeyResult.source === "metadata") { + logDebug("Prompt cache key missing; derived from metadata", { + promptCacheKey: cacheKeyResult.key, + sourceKey: cacheKeyResult.sourceKey, + forkSourceKey: cacheKeyResult.forkSourceKey, + forkHintKeys: cacheKeyResult.forkHintKeys, + }); + return; + } + + const hasHints = Boolean( + (cacheKeyResult.hintKeys && cacheKeyResult.hintKeys.length > 0) || + (cacheKeyResult.forkHintKeys && cacheKeyResult.forkHintKeys.length > 0), + ); + const message = hasHints + ? "Prompt cache key hints detected but unusable; generated fallback cache key" + : "Prompt cache key missing; generated fallback cache key"; + const logPayload = { + promptCacheKey: cacheKeyResult.key, + fallbackHash: cacheKeyResult.fallbackHash, + hintKeys: cacheKeyResult.hintKeys, + unusableKeys: cacheKeyResult.unusableKeys, + forkHintKeys: cacheKeyResult.forkHintKeys, + forkUnusableKeys: cacheKeyResult.forkUnusableKeys, + }; + if (!hasHints && isNewSession) { + logInfo(message, logPayload); + } else { + logWarn(message, logPayload); + } +} diff --git a/lib/request/request-transformer.ts b/lib/request/request-transformer.ts index 7d837a0..b75e01f 100644 --- a/lib/request/request-transformer.ts +++ b/lib/request/request-transformer.ts @@ -1,1034 +1,38 @@ /* eslint-disable no-param-reassign */ -import { createHash, randomUUID } from "node:crypto"; -import { - cacheBridgeDecision, - generateContentHash, - generateInputHash, - getCachedBridgeDecision, - hasBridgePromptInConversation, -} from "../cache/prompt-fingerprinting.js"; -import { - approximateTokenCount, - buildCompactionPromptItems, - collectSystemMessages, - serializeConversation, -} from "../compaction/codex-compaction.js"; import type { CompactionDecision } from "../compaction/compaction-executor.js"; -import { logDebug, logInfo, logWarn } from "../logger.js"; -import { TOOL_REMAP_MESSAGE } from "../prompts/codex.js"; -import { CODEX_OPENCODE_BRIDGE } from "../prompts/codex-opencode-bridge.js"; -import { getOpenCodeCodexPrompt } from "../prompts/opencode-codex.js"; -import type { - ConfigOptions, - InputItem, - ReasoningConfig, - RequestBody, - SessionContext, - UserConfig, -} from "../types.js"; -import { cloneInputItems } from "../utils/clone.js"; -import { countConversationTurns, extractTextFromItem } from "../utils/input-item-utils.js"; - -// Clone utilities now imported from ../utils/clone.ts - -function stableStringify(value: unknown): string { - if (value === null || typeof value !== "object") { - return JSON.stringify(value); - } - - if (Array.isArray(value)) { - return `[${value.map((item) => stableStringify(item)).join(",")}]`; - } - - const entries = Object.keys(value as Record) - .sort() - .map((key) => `${JSON.stringify(key)}:${stableStringify((value as Record)[key])}`); - - return `{${entries.join(",")}}`; -} - -function _computePayloadHash(item: InputItem): string { - const canonical = stableStringify(item); - return createHash("sha1").update(canonical).digest("hex"); -} - -export interface ConversationCacheEntry { - hash: string; - callId?: string; - lastUsed: number; -} - -export interface ConversationMemory { - entries: Map; - payloads: Map; - usage: Map; -} - -// CONVERSATION_ENTRY_TTL_MS and CONVERSATION_MAX_ENTRIES now imported from ../constants.ts as CONVERSATION_CONFIG - -function _decrementUsage(memory: ConversationMemory, hash: string): void { - const current = memory.usage.get(hash) ?? 0; - if (current <= 1) { - memory.usage.delete(hash); - memory.payloads.delete(hash); - } else { - memory.usage.set(hash, current - 1); - } -} - -function _incrementUsage(memory: ConversationMemory, hash: string, payload: InputItem): void { - const current = memory.usage.get(hash) ?? 0; - if (current === 0) { - memory.payloads.set(hash, payload); - } - memory.usage.set(hash, current + 1); -} - -// Removed unused conversation memory functions - dead code eliminated -/** - * Normalize incoming tools into the exact JSON shape the Codex CLI emits. - * Handles strings, CLI-style objects, AI SDK nested objects, and boolean maps. - */ -function normalizeToolsForResponses(tools: unknown): any[] | undefined { - if (!tools) return undefined; - - const defaultFunctionParameters = { - type: "object", - properties: {}, - additionalProperties: true, - }; - - const defaultFreeformFormat = { - type: "json_schema/v1", - syntax: "json", - definition: "{}", - }; - - const isNativeCodexTool = (value: unknown): value is "shell" | "apply_patch" => { - return typeof value === "string" && (value === "shell" || value === "apply_patch"); - }; - - const makeFunctionTool = (name: unknown, description?: unknown, parameters?: unknown, strict?: unknown) => { - if (typeof name !== "string" || !name.trim()) return undefined; - const tool: Record = { - type: "function", - name, - strict: typeof strict === "boolean" ? strict : false, - parameters: parameters && typeof parameters === "object" ? parameters : defaultFunctionParameters, - }; - if (typeof description === "string" && description.trim()) { - tool.description = description; - } - return tool; - }; - - const makeFreeformTool = (name: unknown, description?: unknown, format?: unknown) => { - if (typeof name !== "string" || !name.trim()) return undefined; - const tool: Record = { - type: "custom", - name, - format: format && typeof format === "object" ? format : defaultFreeformFormat, - }; - if (typeof description === "string" && description.trim()) { - tool.description = description; - } - return tool; - }; - - const convertTool = (candidate: unknown): any | undefined => { - if (!candidate) return undefined; - if (typeof candidate === "string") { - const trimmed = candidate.trim(); - if (isNativeCodexTool(trimmed)) { - return { type: trimmed }; - } - return makeFunctionTool(trimmed); - } - if (typeof candidate !== "object") { - return undefined; - } - const obj = candidate as Record; - const nestedFn = - obj.function && typeof obj.function === "object" - ? (obj.function as Record) - : undefined; - const type = typeof obj.type === "string" ? obj.type : undefined; - if (type && isNativeCodexTool(type)) { - return { type }; - } - if (type === "function") { - return makeFunctionTool( - nestedFn?.name ?? obj.name, - nestedFn?.description ?? obj.description, - nestedFn?.parameters ?? obj.parameters, - nestedFn?.strict ?? obj.strict, - ); - } - if (type === "custom") { - return makeFreeformTool( - nestedFn?.name ?? obj.name, - nestedFn?.description ?? obj.description, - nestedFn?.format ?? obj.format, - ); - } - if (type === "local_shell" || type === "web_search") { - // These variants do not require additional fields. - return { type }; - } - if (typeof obj.name === "string") { - if (isNativeCodexTool(obj.name)) { - return { type: obj.name }; - } - return makeFunctionTool(obj.name, obj.description, obj.parameters, obj.strict); - } - if (nestedFn?.name) { - return makeFunctionTool(nestedFn.name, nestedFn.description, nestedFn.parameters, nestedFn.strict); - } - return undefined; - }; - - if (Array.isArray(tools)) { - return tools.map(convertTool).filter(Boolean) as any[]; - } - - if (typeof tools === "object") { - return Object.entries(tools as Record) - .map(([name, value]) => { - if (value && typeof value === "object") { - const record = value as Record; - const enabled = record.enabled ?? record.use ?? record.allow ?? true; - if (!enabled) return undefined; - if (record.type === "custom") { - return makeFreeformTool(name, record.description, record.format); - } - return makeFunctionTool(name, record.description, record.parameters, record.strict); - } - if (value === true) { - return makeFunctionTool(name); - } - return undefined; - }) - .filter(Boolean) as any[]; - } - - return undefined; -} - -/** - * Normalize model name to Codex-supported variants - * @param model - Original model name - * @returns Normalized model name - */ -export function normalizeModel(model: string | undefined): string { - const fallback = "gpt-5.1"; - if (!model) return fallback; - - const lowered = model.toLowerCase(); - const sanitized = lowered.replace(/\./g, "-").replace(/[\s_/]+/g, "-"); - - const contains = (needle: string) => sanitized.includes(needle); - const hasGpt51 = contains("gpt-5-1") || sanitized.includes("gpt51"); - const hasCodexMax = contains("codex-max") || contains("codexmax"); - - if (contains("gpt-5-1-codex-mini") || (hasGpt51 && contains("codex-mini"))) { - return "gpt-5.1-codex-mini"; - } - if (contains("codex-mini")) { - return "gpt-5.1-codex-mini"; - } - if (hasCodexMax) { - return "gpt-5.1-codex-max"; - } - if (contains("gpt-5-1-codex") || (hasGpt51 && contains("codex"))) { - return "gpt-5.1-codex"; - } - if (hasGpt51) { - return "gpt-5.1"; - } - if (contains("gpt-5-codex-mini") || contains("codex-mini-latest")) { - return "gpt-5.1-codex-mini"; - } - if (contains("gpt-5-codex") || (contains("codex") && !contains("mini"))) { - return "gpt-5-codex"; - } - if (contains("gpt-5")) { - return "gpt-5"; - } - - return fallback; -} - -/** - * Extract configuration for a specific model - * Merges global options with model-specific options (model-specific takes precedence) - * @param modelName - Model name (e.g., "gpt-5-codex") - * @param userConfig - Full user configuration object - * @returns Merged configuration for this model - */ -export function getModelConfig( - modelName: string, - userConfig: UserConfig = { global: {}, models: {} }, -): ConfigOptions { - const globalOptions = userConfig.global || {}; - const modelOptions = userConfig.models?.[modelName]?.options || {}; - - // Model-specific options override global options - return { ...globalOptions, ...modelOptions }; -} - -/** - * Configure reasoning parameters based on model variant and user config - * - * NOTE: This plugin follows Codex CLI defaults instead of opencode defaults because: - * - We're accessing the ChatGPT backend API (not OpenAI Platform API) - * - opencode explicitly excludes gpt-5-codex from automatic reasoning configuration - * - Codex CLI has been thoroughly tested against this backend - * - * @param originalModel - Original model name before normalization - * @param userConfig - User configuration object - * @returns Reasoning configuration - */ -export function getReasoningConfig( - originalModel: string | undefined, - userConfig: ConfigOptions = {}, -): ReasoningConfig { - const normalized = normalizeModel(originalModel); - const normalizedOriginal = originalModel?.toLowerCase() ?? normalized; - const isGpt51 = normalized.startsWith("gpt-5.1"); - const isCodexMiniSlug = normalized === "gpt-5.1-codex-mini" || normalized === "codex-mini-latest"; - const isLegacyCodexMini = normalizedOriginal.includes("codex-mini-latest"); - const isCodexMini = - isCodexMiniSlug || - isLegacyCodexMini || - normalizedOriginal.includes("codex-mini") || - normalizedOriginal.includes("codex mini") || - normalizedOriginal.includes("codex_mini"); - const isCodexMax = normalized === "gpt-5.1-codex-max"; - const isCodexFamily = - normalized.startsWith("gpt-5-codex") || - normalized.startsWith("gpt-5.1-codex") || - (normalizedOriginal.includes("codex") && !isCodexMini); - const isLightweight = - !isCodexMini && - !isCodexFamily && - (normalizedOriginal.includes("nano") || normalizedOriginal.includes("mini")); - - let defaultEffort: ReasoningConfig["effort"]; - if (isGpt51 && !isCodexFamily && !isCodexMini) { - defaultEffort = "none"; - } else if (isCodexMini) { - defaultEffort = "medium"; - } else if (isLightweight) { - defaultEffort = "minimal"; - } else { - defaultEffort = "medium"; - } - - let effort = userConfig.reasoningEffort || defaultEffort; - const requestedXHigh = effort === "xhigh"; - - if (requestedXHigh && !isCodexMax) { - effort = "high"; - } - - if (isCodexMini) { - if (effort === "minimal" || effort === "low" || effort === "none") { - effort = "medium"; - } - if (effort !== "high") { - effort = "medium"; - } - } else if (isCodexMax) { - if (effort === "minimal" || effort === "none") { - effort = "low"; - } - } else if (isCodexFamily) { - if (effort === "minimal" || effort === "none") { - effort = "low"; - } - } else if (isGpt51 && effort === "minimal") { - effort = "none"; - } else if (!isGpt51 && effort === "none") { - effort = "minimal"; - } - - return { - effort, - summary: userConfig.reasoningSummary || "auto", - }; -} - -/** - * Filter input array for stateless Codex API (store: false) - * - * Two transformations needed: - * 1. Remove AI SDK-specific items (not supported by Codex API) - * 2. Strip IDs from all remaining items (stateless mode) - * - * AI SDK constructs to REMOVE (not in OpenAI Responses API spec): - * - type: "item_reference" - AI SDK uses this for server-side state lookup - * - * Items to KEEP (strip IDs): - * - type: "message" - Conversation messages (provides context to LLM) - * - type: "function_call" - Tool calls from conversation - * - type: "function_call_output" - Tool results from conversation - * - * Context is maintained through: - * - Full message history (without IDs) - * - reasoning.encrypted_content (for reasoning continuity) - * - * @param input - Original input array from OpenCode/AI SDK - * @returns Filtered input array compatible with Codex API - */ -export function filterInput( - input: InputItem[] | undefined, - options: { preserveIds?: boolean } = {}, -): InputItem[] | undefined { - if (!Array.isArray(input)) return input; - - const { preserveIds = false } = options; - - return input - .filter((item) => { - // Remove AI SDK constructs not supported by Codex API - if (item.type === "item_reference") { - return false; // AI SDK only - references server state - } - return true; // Keep all other items - }) - .map((item) => { - let sanitized = item as InputItem; - - // Strip IDs from all items (Codex API stateless mode) - if (item.id && !preserveIds) { - const { id: _id, ...itemWithoutId } = item as Record & InputItem; - sanitized = itemWithoutId as InputItem; - } - - // Remove metadata to keep prefixes stable across environments - if (!preserveIds && "metadata" in (sanitized as Record)) { - const { metadata: _metadata, ...rest } = sanitized as Record; - sanitized = rest as InputItem; - } - - return sanitized; - }); -} - -/** - * Check if an input item is the OpenCode system prompt - * Uses cached OpenCode codex.txt for verification with fallback to text matching - * @param item - Input item to check - * @param cachedPrompt - Cached OpenCode codex.txt content - * @returns True if this is the OpenCode system prompt - */ -export function isOpenCodeSystemPrompt(item: InputItem, cachedPrompt: string | null): boolean { - const isSystemRole = item.role === "developer" || item.role === "system"; - if (!isSystemRole) return false; - - // extractTextFromItem now imported from ../utils/input-item-utils.ts - - const contentText = extractTextFromItem(item); - if (!contentText) return false; - - // Primary check: Compare against cached OpenCode prompt - if (cachedPrompt) { - // Exact match (trim whitespace for comparison) - if (contentText.trim() === cachedPrompt.trim()) { - return true; - } - - // Partial match: Check if first 200 chars match (handles minor variations) - const contentPrefix = contentText.trim().substring(0, 200); - const cachedPrefix = cachedPrompt.trim().substring(0, 200); - if (contentPrefix === cachedPrefix) { - return true; - } - } - - // Fallback check: Known OpenCode prompt signature (for safety) - // This catches the prompt even if cache fails - return contentText.startsWith("You are a coding agent running in"); -} - -/** - * Filter out OpenCode system prompts from input - * Used in CODEX_MODE to replace OpenCode prompts with Codex-OpenCode bridge - * Also strips OpenCode's auto-compaction summary instructions that reference - * a non-existent "summary file" path in stateless mode. - * @param input - Input array - * @returns Input array without OpenCode system or compaction prompts - */ -export async function filterOpenCodeSystemPrompts( - input: InputItem[] | undefined, -): Promise { - if (!Array.isArray(input)) return input; - - // Fetch cached OpenCode prompt for verification - let cachedPrompt: string | null = null; - try { - cachedPrompt = await getOpenCodeCodexPrompt(); - } catch { - // If fetch fails, fallback to text-based detection only - // This is safe because we still have the "starts with" check - } - - // Heuristic detector for OpenCode auto-compaction prompts that instruct - // saving/reading a conversation summary from a file path. - const compactionInstructionPatterns: RegExp[] = [ - /(summary[ _-]?file)/i, - /(summary[ _-]?path)/i, - /summary\s+(?:has\s+been\s+)?saved\s+(?:to|at)/i, - /summary\s+(?:is\s+)?stored\s+(?:in|at|to)/i, - /summary\s+(?:is\s+)?available\s+(?:at|in)/i, - /write\s+(?:the\s+)?summary\s+(?:to|into)/i, - /save\s+(?:the\s+)?summary\s+(?:to|into)/i, - /open\s+(?:the\s+)?summary/i, - /read\s+(?:the\s+)?summary/i, - /cat\s+(?:the\s+)?summary/i, - /view\s+(?:the\s+)?summary/i, - /~\/\.opencode/i, - /\.opencode\/.*summary/i, - ]; - - // getCompactionText now uses extractTextFromItem from ../utils/input-item-utils.ts - - const matchesCompactionInstruction = (value: string): boolean => - compactionInstructionPatterns.some((pattern) => pattern.test(value)); - - const sanitizeOpenCodeCompactionPrompt = (item: InputItem): InputItem | null => { - const text = extractTextFromItem(item); - if (!text) return null; - const sanitizedText = text - .split(/\r?\n/) - .map((line) => line.trimEnd()) - .filter((line) => { - const trimmed = line.trim(); - if (!trimmed) { - return true; - } - return !matchesCompactionInstruction(trimmed); - }) - .join("\n") - .replace(/\n{3,}/g, "\n\n") - .trim(); - if (!sanitizedText) { - return null; - } - const originalMentionedCompaction = /\bauto[-\s]?compaction\b/i.test(text); - let finalText = sanitizedText; - if (originalMentionedCompaction && !/\bauto[-\s]?compaction\b/i.test(finalText)) { - finalText = `Auto-compaction summary\n\n${finalText}`; - } - return { - ...item, - content: finalText, - }; - }; - - const isOpenCodeCompactionPrompt = (item: InputItem): boolean => { - const isSystemRole = item.role === "developer" || item.role === "system"; - if (!isSystemRole) return false; - const text = extractTextFromItem(item); - if (!text) return false; - const hasCompaction = /\b(auto[-\s]?compaction|compaction|compact)\b/i.test(text); - const hasSummary = /\b(summary|summarize|summarise)\b/i.test(text); - return hasCompaction && hasSummary && matchesCompactionInstruction(text); - }; - - const filteredInput: InputItem[] = []; - for (const item of input) { - // Keep user messages - if (item.role === "user") { - filteredInput.push(item); - continue; - } - - // Filter out OpenCode system prompts entirely - if (isOpenCodeSystemPrompt(item, cachedPrompt)) { - continue; - } - - if (isOpenCodeCompactionPrompt(item)) { - const sanitized = sanitizeOpenCodeCompactionPrompt(item); - if (sanitized) { - filteredInput.push(sanitized); - } - continue; - } - - filteredInput.push(item); - } - - return filteredInput; -} - -/** - * Analyze if bridge prompt is needed based on tools and conversation context - * @param input - Input array - * @param hasTools - Whether tools are present in request - * @returns Object with analysis results - */ -function analyzeBridgeRequirement( - input: InputItem[] | undefined, - hasTools: boolean, -): { needsBridge: boolean; reason: string; toolCount: number } { - if (!hasTools || !Array.isArray(input)) { - return { needsBridge: false, reason: "no_tools_or_input", toolCount: 0 }; - } - - // For now, be more permissive - if tools are present, assume bridge is needed - // This maintains backward compatibility with existing tests - // Future optimization can make this more sophisticated - const toolCount = 1; // Simple heuristic - - return { - needsBridge: true, - reason: "tools_present", - toolCount, - }; -} - -/** - * Add Codex-OpenCode bridge message to input if tools are present - * Uses session-scoped tracking to ensure bridge is only injected once per session - * @param input - Input array - * @param hasTools - Whether tools are present in request - * @param sessionContext - Optional session context for tracking bridge injection - * @returns Input array with bridge message prepended if needed - */ -function buildBridgeMessage(): InputItem { - return { - type: "message", - role: "developer", - content: [{ type: "input_text", text: CODEX_OPENCODE_BRIDGE }], - }; -} - -export function addCodexBridgeMessage( - input: InputItem[] | undefined, - hasTools: boolean, - sessionContext?: SessionContext, -): InputItem[] | undefined { - if (!Array.isArray(input)) return input; - - const bridgeMessage = buildBridgeMessage(); - const sessionBridgeInjected = sessionContext?.state.bridgeInjected ?? false; - - // Generate input hash for caching - const inputHash = generateInputHash(input); - - // Analyze bridge requirement - const analysis = analyzeBridgeRequirement(input, hasTools); - - // Keep bridge in every turn once injected to avoid cache prefix drift - if (sessionBridgeInjected) { - logDebug("Bridge prompt previously injected in session; reapplying for continuity"); - return [bridgeMessage, ...input]; - } - - // Check if bridge prompt is already in conversation (fallback) - if (hasBridgePromptInConversation(input, CODEX_OPENCODE_BRIDGE)) { - logDebug("Bridge prompt already present in conversation, skipping injection"); - cacheBridgeDecision(inputHash, analysis.toolCount, false); - return input; - } - - // Check cache first - const cachedDecision = getCachedBridgeDecision(inputHash, analysis.toolCount); - if (cachedDecision) { - const shouldAdd = cachedDecision.hash === generateContentHash("add"); - logDebug(`Using cached bridge decision: ${shouldAdd ? "add" : "skip"}`); - if (shouldAdd) { - if (sessionContext) { - sessionContext.state.bridgeInjected = true; - } - - return [bridgeMessage, ...input]; - } - return input; - } - - // Apply conditional logic - if (!analysis.needsBridge) { - logDebug(`Skipping bridge prompt: ${analysis.reason} (tools: ${analysis.toolCount})`); - cacheBridgeDecision(inputHash, analysis.toolCount, false); - return input; - } - - logDebug(`Adding bridge prompt: ${analysis.reason} (tools: ${analysis.toolCount})`); - cacheBridgeDecision(inputHash, analysis.toolCount, true); - - // Mark bridge as injected in session state - if (sessionContext) { - sessionContext.state.bridgeInjected = true; - } - - return [bridgeMessage, ...input]; -} - -/** - * Add tool remapping message to input if tools are present - * @param input - Input array - * @param hasTools - Whether tools are present in request - * @returns Input array with tool remap message prepended if needed - */ -export function addToolRemapMessage( - input: InputItem[] | undefined, - hasTools: boolean, -): InputItem[] | undefined { - if (!hasTools || !Array.isArray(input)) return input; - - const toolRemapMessage: InputItem = { - type: "message", - role: "developer", - content: [ - { - type: "input_text", - text: TOOL_REMAP_MESSAGE, - }, - ], - }; - - return [toolRemapMessage, ...input]; -} - -function maybeBuildCompactionPrompt( - originalInput: InputItem[], - commandText: string | null, - settings: { enabled: boolean; autoLimitTokens?: number; autoMinMessages?: number }, -): { items: InputItem[]; decision: CompactionDecision } | null { - if (!settings.enabled) { - return null; - } - const conversationSource = commandText - ? removeLastUserMessage(originalInput) - : cloneInputItems(originalInput); - const turnCount = countConversationTurns(conversationSource); - let trigger: "command" | "auto" | null = null; - let reason: string | undefined; - let approxTokens: number | undefined; - - if (commandText) { - trigger = "command"; - } else if (settings.autoLimitTokens && settings.autoLimitTokens > 0) { - approxTokens = approximateTokenCount(conversationSource); - const minMessages = settings.autoMinMessages ?? 8; - if (approxTokens >= settings.autoLimitTokens && turnCount >= minMessages) { - trigger = "auto"; - reason = `~${approxTokens} tokens >= limit ${settings.autoLimitTokens}`; - } - } - - if (!trigger) { - return null; - } - - const serialization = serializeConversation(conversationSource); - const promptItems = buildCompactionPromptItems(serialization.transcript); - - return { - items: promptItems, - decision: { - mode: trigger, - reason, - approxTokens, - preservedSystem: collectSystemMessages(originalInput), - serialization, - }, - }; -} - -// cloneConversationItems now imported from ../utils/clone.ts as cloneInputItems - -function removeLastUserMessage(items: InputItem[]): InputItem[] { - const cloned = cloneInputItems(items); - for (let index = cloned.length - 1; index >= 0; index -= 1) { - if (cloned[index]?.role === "user") { - cloned.splice(index, 1); - break; - } - } - return cloned; -} - -const PROMPT_CACHE_METADATA_KEYS = [ - "conversation_id", - "conversationId", - "thread_id", - "threadId", - "session_id", - "sessionId", - "chat_id", - "chatId", -]; - -const PROMPT_CACHE_FORK_KEYS = [ - "forkId", - "fork_id", - "branchId", - "branch_id", - "parentConversationId", - "parent_conversation_id", -]; - -type PromptCacheKeySource = "existing" | "metadata" | "generated"; - -interface PromptCacheKeyResult { - key: string; - source: PromptCacheKeySource; - sourceKey?: string; - forkSourceKey?: string; - hintKeys?: string[]; - unusableKeys?: string[]; - forkHintKeys?: string[]; - forkUnusableKeys?: string[]; - fallbackHash?: string; -} - -function extractString(value: unknown): string | undefined { - if (typeof value !== "string") { - return undefined; - } - const trimmed = value.trim(); - return trimmed.length > 0 ? trimmed : undefined; -} - -function normalizeCacheKeyBase(base: string): string { - const trimmed = base.trim(); - if (!trimmed) { - return `cache_${randomUUID()}`; - } - const sanitized = trimmed.replace(/\s+/g, "-"); - return sanitized.startsWith("cache_") ? sanitized : `cache_${sanitized}`; -} - -function normalizeForkSuffix(forkId: string): string { - const trimmed = forkId.trim(); - if (!trimmed) return "fork"; - return trimmed.replace(/\s+/g, "-"); -} - -function derivePromptCacheKeyFromBody(body: RequestBody): { - base?: string; - sourceKey?: string; - hintKeys: string[]; - unusableKeys: string[]; - forkId?: string; - forkSourceKey?: string; - forkHintKeys: string[]; - forkUnusableKeys: string[]; -} { - const metadata = body.metadata as Record | undefined; - const root = body as Record; - - const hintKeys: string[] = []; - const unusableKeys: string[] = []; - let base: string | undefined; - let sourceKey: string | undefined; - - for (const key of PROMPT_CACHE_METADATA_KEYS) { - const raw = metadata?.[key] ?? root[key]; - if (raw !== undefined) { - hintKeys.push(key); - } - const value = extractString(raw); - if (value) { - base = value; - sourceKey = key; - break; - } - if (raw !== undefined) { - unusableKeys.push(key); - } - } - - const forkHintKeys: string[] = []; - const forkUnusableKeys: string[] = []; - let forkId: string | undefined; - let forkSourceKey: string | undefined; - - for (const key of PROMPT_CACHE_FORK_KEYS) { - const raw = metadata?.[key] ?? root[key]; - if (raw !== undefined) { - forkHintKeys.push(key); - } - const value = extractString(raw); - if (value) { - forkId = value; - forkSourceKey = key; - break; - } - if (raw !== undefined) { - forkUnusableKeys.push(key); - } - } - - return { - base, - sourceKey, - hintKeys, - unusableKeys, - forkId, - forkSourceKey, - forkHintKeys, - forkUnusableKeys, - }; -} - -function computeFallbackHashForBody(body: RequestBody): string { - try { - const inputSlice = Array.isArray(body.input) ? body.input.slice(0, 3) : undefined; - const seed = stableStringify({ - model: typeof body.model === "string" ? body.model : undefined, - metadata: body.metadata, - input: inputSlice, - }); - return createHash("sha1").update(seed).digest("hex").slice(0, 12); - } catch { - const model = typeof body.model === "string" ? body.model : "unknown"; - return createHash("sha1").update(model).digest("hex").slice(0, 12); - } -} - -function ensurePromptCacheKey(body: RequestBody): PromptCacheKeyResult { - const hostBody = body as Record; - const existingSnake = extractString(hostBody.prompt_cache_key); - const existingCamel = extractString(hostBody.promptCacheKey); - const existing = existingSnake || existingCamel; - - if (existing) { - // Codex backend expects snake_case, so always set prompt_cache_key - // Preserve the camelCase field for OpenCode if it was provided - body.prompt_cache_key = existing; - if (existingCamel) { - hostBody.promptCacheKey = existingCamel; // preserve OpenCode's field - } - return { key: existing, source: "existing" }; - } - - const derived = derivePromptCacheKeyFromBody(body); - if (derived.base) { - const baseKey = normalizeCacheKeyBase(derived.base); - const suffix = derived.forkId ? `-fork-${normalizeForkSuffix(derived.forkId)}` : ""; - const finalKey = `${baseKey}${suffix}`; - body.prompt_cache_key = finalKey; - // Don't set camelCase field for derived keys - only snake_case for Codex - return { - key: finalKey, - source: "metadata", - sourceKey: derived.sourceKey, - forkSourceKey: derived.forkSourceKey, - hintKeys: derived.hintKeys, - forkHintKeys: derived.forkHintKeys, - }; - } - - const fallbackHash = computeFallbackHashForBody(body); - const generated = `cache_${fallbackHash}`; - body.prompt_cache_key = generated; - // Don't set camelCase field for generated keys - only snake_case for Codex - return { - key: generated, - source: "generated", - hintKeys: derived.hintKeys, - unusableKeys: derived.unusableKeys, - forkHintKeys: derived.forkHintKeys, - forkUnusableKeys: derived.forkUnusableKeys, - fallbackHash, - }; -} - -function applyCompactionIfNeeded( - body: RequestBody, - options: TransformRequestOptions, -): CompactionDecision | undefined { - const compactionOptions = options.compaction; - if (!compactionOptions?.settings.enabled) { - return undefined; - } - - const compactionBuild = maybeBuildCompactionPrompt( - compactionOptions.originalInput, - compactionOptions.commandText, - compactionOptions.settings, - ); - - if (!compactionBuild) { - return undefined; - } - - body.input = compactionBuild.items; - delete (body as any).tools; - delete (body as any).tool_choice; - delete (body as any).parallel_tool_calls; - - return compactionBuild.decision; -} - -function logCacheKeyDecision(cacheKeyResult: PromptCacheKeyResult, isNewSession: boolean): void { - if (cacheKeyResult.source === "existing") { - return; - } - - if (cacheKeyResult.source === "metadata") { - logDebug("Prompt cache key missing; derived from metadata", { - promptCacheKey: cacheKeyResult.key, - sourceKey: cacheKeyResult.sourceKey, - forkSourceKey: cacheKeyResult.forkSourceKey, - forkHintKeys: cacheKeyResult.forkHintKeys, - }); - return; - } +import { logDebug, logWarn } from "../logger.js"; +import type { RequestBody, SessionContext, UserConfig } from "../types.js"; +import { + addCodexBridgeMessage, + addToolRemapMessage, + filterInput, + filterOpenCodeSystemPrompts, +} from "./input-filters.js"; +import { applyCompactionIfNeeded, type CompactionOptions } from "./compaction-helpers.js"; +import { getModelConfig, getReasoningConfig, normalizeModel } from "./model-config.js"; +import { ensurePromptCacheKey, logCacheKeyDecision } from "./prompt-cache.js"; +import { normalizeToolsForCodexBody } from "./tooling.js"; + +export { + addCodexBridgeMessage, + addToolRemapMessage, + filterInput, + filterOpenCodeSystemPrompts, + isOpenCodeSystemPrompt, +} from "./input-filters.js"; +export { getModelConfig, getReasoningConfig, normalizeModel } from "./model-config.js"; - const hasHints = Boolean( - (cacheKeyResult.hintKeys && cacheKeyResult.hintKeys.length > 0) || - (cacheKeyResult.forkHintKeys && cacheKeyResult.forkHintKeys.length > 0), - ); - const message = hasHints - ? "Prompt cache key hints detected but unusable; generated fallback cache key" - : "Prompt cache key missing; generated fallback cache key"; - const logPayload = { - promptCacheKey: cacheKeyResult.key, - fallbackHash: cacheKeyResult.fallbackHash, - hintKeys: cacheKeyResult.hintKeys, - unusableKeys: cacheKeyResult.unusableKeys, - forkHintKeys: cacheKeyResult.forkHintKeys, - forkUnusableKeys: cacheKeyResult.forkUnusableKeys, - }; - if (!hasHints && isNewSession) { - logInfo(message, logPayload); - } else { - logWarn(message, logPayload); - } +export interface TransformRequestOptions { + /** Preserve IDs only when conversation transforms run; may be a no-op when compaction skips them. */ + preserveIds?: boolean; + /** Compaction settings and original input context used when building compaction prompts. */ + compaction?: CompactionOptions; } -function normalizeToolsForCodexBody(body: RequestBody, skipConversationTransforms: boolean): boolean { - if (skipConversationTransforms) { - delete (body as any).tools; - delete (body as any).tool_choice; - delete (body as any).parallel_tool_calls; - return false; - } - - if (!body.tools) { - return false; - } - - const normalizedTools = normalizeToolsForResponses(body.tools); - if (normalizedTools && normalizedTools.length > 0) { - (body as any).tools = normalizedTools; - (body as any).tool_choice = "auto"; - const modelName = (body.model || "").toLowerCase(); - const codexParallelDisabled = modelName.includes("gpt-5-codex") || modelName.includes("gpt-5.1-codex"); - (body as any).parallel_tool_calls = !codexParallelDisabled; - return true; - } - - delete (body as any).tools; - delete (body as any).tool_choice; - delete (body as any).parallel_tool_calls; - return false; +export interface TransformResult { + /** Mutated request body (same instance passed into transformRequestBody). */ + body: RequestBody; + compactionDecision?: CompactionDecision; } async function transformInputForCodex( @@ -1048,10 +52,10 @@ async function transformInputForCodex( logDebug(`Filtering ${originalIds.length} message IDs from input:`, originalIds); } - body.input = filterInput(body.input, { preserveIds }); + let workingInput = filterInput(body.input, { preserveIds, preserveMetadata: true }); if (!preserveIds) { - const remainingIds = (body.input || []).filter((item) => item.id).map((item) => item.id); + const remainingIds = (workingInput || []).filter((item) => item.id).map((item) => item.id); if (remainingIds.length > 0) { logWarn(`WARNING: ${remainingIds.length} IDs still present after filtering:`, remainingIds); } else if (originalIds.length > 0) { @@ -1062,33 +66,20 @@ async function transformInputForCodex( } if (codexMode) { - body.input = await filterOpenCodeSystemPrompts(body.input); - body.input = addCodexBridgeMessage(body.input, hasNormalizedTools, sessionContext); + workingInput = await filterOpenCodeSystemPrompts(workingInput); + if (!preserveIds) { + workingInput = filterInput(workingInput, { preserveIds }); + } + workingInput = addCodexBridgeMessage(workingInput, hasNormalizedTools, sessionContext); + body.input = workingInput; return; } - body.input = addToolRemapMessage(body.input, hasNormalizedTools); -} - -/** - * Transform request body for Codex API - */ -export interface TransformRequestOptions { - preserveIds?: boolean; - compaction?: { - settings: { - enabled: boolean; - autoLimitTokens?: number; - autoMinMessages?: number; - }; - commandText: string | null; - originalInput: InputItem[]; - }; -} + if (!preserveIds) { + workingInput = filterInput(workingInput, { preserveIds }); + } -export interface TransformResult { - body: RequestBody; - compactionDecision?: CompactionDecision; + body.input = addToolRemapMessage(workingInput, hasNormalizedTools); } export async function transformRequestBody( @@ -1103,43 +94,31 @@ export async function transformRequestBody( const normalizedModel = normalizeModel(body.model); const preserveIds = options.preserveIds ?? false; - const compactionDecision = applyCompactionIfNeeded(body, options); + const compactionDecision = applyCompactionIfNeeded( + body, + options.compaction && { ...options.compaction, preserveIds }, + ); const skipConversationTransforms = Boolean(compactionDecision); - // Get model-specific configuration using ORIGINAL model name (config key) - // This allows per-model options like "gpt-5-codex-low" to work correctly const lookupModel = originalModel || normalizedModel; const modelConfig = getModelConfig(lookupModel, userConfig); - // Debug: Log which config was resolved logDebug(`Model config lookup: "${lookupModel}" → normalized to "${normalizedModel}" for API`, { hasModelSpecificConfig: !!userConfig.models?.[lookupModel], resolvedConfig: modelConfig, }); - // Normalize model name for API call body.model = normalizedModel; - - // Codex required fields - // ChatGPT backend REQUIRES store=false (confirmed via testing) body.store = false; body.stream = true; body.instructions = codexInstructions; - // Prompt caching relies on the host or SessionManager providing a stable - // prompt_cache_key. We accept both camelCase (promptCacheKey) and - // snake_case (prompt_cache_key) inputs from the host/runtime. - - // Ensure prompt_cache_key is set using our robust logic const cacheKeyResult = ensurePromptCacheKey(body); - // Default to treating missing session context as a new session to avoid noisy startup warnings const isNewSession = sessionContext?.isNew ?? true; logCacheKeyDecision(cacheKeyResult, isNewSession); - // Tool behavior parity with Codex CLI (normalize shapes) const hasNormalizedTools = normalizeToolsForCodexBody(body, skipConversationTransforms); - // Filter and transform input await transformInputForCodex( body, codexMode, @@ -1149,26 +128,19 @@ export async function transformRequestBody( skipConversationTransforms, ); - // Configure reasoning (use model-specific config) const reasoningConfig = getReasoningConfig(originalModel, modelConfig); body.reasoning = { ...body.reasoning, ...reasoningConfig, }; - // Configure text verbosity (support user config) - // Default: "medium" (matches Codex CLI default for all GPT-5 models) body.text = { ...body.text, verbosity: modelConfig.textVerbosity || "medium", }; - // Add include for encrypted reasoning content - // Default: ["reasoning.encrypted_content"] (required for stateless operation with store=false) - // This allows reasoning context to persist across turns without server-side storage body.include = modelConfig.include || ["reasoning.encrypted_content"]; - // Remove unsupported parameters body.max_output_tokens = undefined; body.max_completion_tokens = undefined; diff --git a/lib/request/tool-normalizer.ts b/lib/request/tool-normalizer.ts new file mode 100644 index 0000000..763bab4 --- /dev/null +++ b/lib/request/tool-normalizer.ts @@ -0,0 +1,157 @@ +import type { InputItem } from "../types.js"; + +const defaultFunctionParameters = { + type: "object", + properties: {}, + additionalProperties: true, +}; + +const defaultFreeformFormat = { + type: "json_schema/v1", + syntax: "json", + definition: "{}", +}; + +function isNativeCodexTool(value: unknown): value is "shell" | "apply_patch" { + return typeof value === "string" && (value === "shell" || value === "apply_patch"); +} + +function makeFunctionTool( + name: unknown, + description?: unknown, + parameters?: unknown, + strict?: unknown, +): Record | undefined { + if (typeof name !== "string" || !name.trim()) return undefined; + const tool: Record = { + type: "function", + name, + strict: typeof strict === "boolean" ? strict : false, + parameters: parameters && typeof parameters === "object" ? parameters : defaultFunctionParameters, + }; + if (typeof description === "string" && description.trim()) { + tool.description = description; + } + return tool; +} + +function makeFreeformTool( + name: unknown, + description?: unknown, + format?: unknown, +): Record | undefined { + if (typeof name !== "string" || !name.trim()) return undefined; + const tool: Record = { + type: "custom", + name, + format: format && typeof format === "object" ? format : defaultFreeformFormat, + }; + if (typeof description === "string" && description.trim()) { + tool.description = description; + } + return tool; +} + +function convertStringTool(value: string): any | undefined { + const trimmed = value.trim(); + if (!trimmed) return undefined; + if (isNativeCodexTool(trimmed)) return { type: trimmed }; + return makeFunctionTool(trimmed); +} + +function getNestedFunction(obj: Record): Record | undefined { + const fn = obj.function; + return fn && typeof fn === "object" ? (fn as Record) : undefined; +} + +function convertTypedTool( + type: string, + obj: Record, + nestedFn: Record | undefined, +): any | undefined { + if (isNativeCodexTool(type)) return { type }; + if (type === "function") { + return makeFunctionTool( + nestedFn?.name ?? obj.name, + nestedFn?.description ?? obj.description, + nestedFn?.parameters ?? obj.parameters, + nestedFn?.strict ?? obj.strict, + ); + } + if (type === "custom") { + return makeFreeformTool( + nestedFn?.name ?? obj.name, + nestedFn?.description ?? obj.description, + nestedFn?.format ?? obj.format, + ); + } + if (type === "local_shell" || type === "web_search") { + return { type }; + } + return undefined; +} + +function convertNamedTool(name: string, obj: Record): any | undefined { + if (isNativeCodexTool(name)) return { type: name }; + return makeFunctionTool(name, obj.description, obj.parameters, obj.strict); +} + +function convertObjectTool(obj: Record): any | undefined { + const nestedFn = getNestedFunction(obj); + const type = typeof obj.type === "string" ? obj.type : undefined; + + if (type) { + const typed = convertTypedTool(type, obj, nestedFn); + if (typed) return typed; + } + + if (typeof obj.name === "string") { + return convertNamedTool(obj.name, obj); + } + + if (nestedFn?.name) { + return makeFunctionTool(nestedFn.name, nestedFn.description, nestedFn.parameters, nestedFn.strict); + } + + return undefined; +} + +function normalizeToolMap(map: Record): any[] | undefined { + return Object.entries(map) + .map(([name, value]) => { + if (value && typeof value === "object") { + const record = value as Record; + const enabled = record.enabled ?? record.use ?? record.allow ?? true; + if (!enabled) return undefined; + if (record.type === "custom") { + return makeFreeformTool(name, record.description, record.format); + } + return makeFunctionTool(name, record.description, record.parameters, record.strict); + } + if (value === true) { + return makeFunctionTool(name); + } + return undefined; + }) + .filter(Boolean) as any[]; +} + +export function normalizeToolsForResponses(tools: unknown): any[] | undefined { + if (!tools) return undefined; + if (Array.isArray(tools)) { + return tools.map(convertTool).filter(Boolean) as any[]; + } + if (typeof tools === "object") { + return normalizeToolMap(tools as Record); + } + return undefined; +} + +function convertTool(candidate: unknown): any | undefined { + if (!candidate) return undefined; + if (typeof candidate === "string") return convertStringTool(candidate); + if (typeof candidate !== "object") return undefined; + return convertObjectTool(candidate as Record); +} + +export type { InputItem }; diff --git a/lib/request/tooling.ts b/lib/request/tooling.ts new file mode 100644 index 0000000..6a7013e --- /dev/null +++ b/lib/request/tooling.ts @@ -0,0 +1,31 @@ +/* eslint-disable no-param-reassign */ +import type { RequestBody } from "../types.js"; +import { normalizeToolsForResponses } from "./tool-normalizer.js"; + +export function normalizeToolsForCodexBody(body: RequestBody, skipConversationTransforms: boolean): boolean { + if (skipConversationTransforms) { + delete (body as any).tools; + delete (body as any).tool_choice; + delete (body as any).parallel_tool_calls; + return false; + } + + if (!body.tools) { + return false; + } + + const normalizedTools = normalizeToolsForResponses(body.tools); + if (normalizedTools && normalizedTools.length > 0) { + (body as any).tools = normalizedTools; + (body as any).tool_choice = "auto"; + const modelName = (body.model || "").toLowerCase(); + const codexParallelDisabled = modelName.includes("gpt-5-codex") || modelName.includes("gpt-5.1-codex"); + (body as any).parallel_tool_calls = !codexParallelDisabled; + return true; + } + + delete (body as any).tools; + delete (body as any).tool_choice; + delete (body as any).parallel_tool_calls; + return false; +} diff --git a/lib/session/session-manager.ts b/lib/session/session-manager.ts index 3d2eaf1..69ace53 100644 --- a/lib/session/session-manager.ts +++ b/lib/session/session-manager.ts @@ -1,6 +1,7 @@ import { createHash, randomUUID } from "node:crypto"; import { SESSION_CONFIG } from "../constants.js"; import { logDebug, logWarn } from "../logger.js"; +import { PROMPT_CACHE_FORK_KEYS } from "../request/prompt-cache.js"; import type { CodexResponsePayload, InputItem, RequestBody, SessionContext, SessionState } from "../types.js"; import { cloneInputItems, deepClone } from "../utils/clone.js"; import { isAssistantMessage, isUserMessage } from "../utils/input-item-utils.js"; @@ -120,7 +121,6 @@ function extractConversationId(body: RequestBody): string | undefined { function extractForkIdentifier(body: RequestBody): string | undefined { const metadata = body.metadata as Record | undefined; const bodyAny = body as Record; - const forkKeys = ["forkId", "fork_id", "branchId", "branch_id"]; const normalize = (value: unknown): string | undefined => { if (typeof value !== "string") { return undefined; @@ -129,7 +129,7 @@ function extractForkIdentifier(body: RequestBody): string | undefined { return trimmed.length > 0 ? trimmed : undefined; }; - for (const key of forkKeys) { + for (const key of PROMPT_CACHE_FORK_KEYS) { const fromMetadata = normalize(metadata?.[key]); if (fromMetadata) { return fromMetadata; diff --git a/package-lock.json b/package-lock.json index 4989c8e..2c5dc17 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@openhax/codex", - "version": "0.2.0", + "version": "0.3.5", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@openhax/codex", - "version": "0.2.0", + "version": "0.3.5", "license": "GPL-3.0-only", "dependencies": { "@openauthjs/openauth": "^0.4.3", diff --git a/package.json b/package.json index b3ef02f..18f8338 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@openhax/codex", - "version": "0.2.0", + "version": "0.4.0", "description": "OpenHax Codex OAuth plugin for Opencode — bring your ChatGPT Plus/Pro subscription instead of API credits", "main": "./dist/index.js", "types": "./dist/index.d.ts", diff --git a/scripts/review-response-context.mjs b/scripts/review-response-context.mjs index c1e7212..ddad10e 100644 --- a/scripts/review-response-context.mjs +++ b/scripts/review-response-context.mjs @@ -36,10 +36,10 @@ function main() { const filePath = comment.path; const reviewer = comment.user?.login ?? "unknown"; - const branchSlug = `review/comment-${comment.id}`; + const baseRef = pr.base?.ref ?? "main"; + const branchSlug = `review/${baseRef}-${comment.id}`; const prNumber = pr.number; const prTitle = pr.title ?? ""; - const baseRef = pr.base?.ref ?? "main"; const baseSha = pr.base?.sha ?? ""; const headRef = pr.head?.ref ?? ""; const headSha = pr.head?.sha ?? ""; diff --git a/spec/compaction-heuristics-22.md b/spec/compaction-heuristics-22.md new file mode 100644 index 0000000..638a46e --- /dev/null +++ b/spec/compaction-heuristics-22.md @@ -0,0 +1,51 @@ +# Issue 22 – Compaction heuristics metadata flag + +**Issue**: https://github.com/open-hax/codex/issues/22 (follow-up to PR #20 review comment r2532755818) + +## Context & Current Behavior + +- Compaction prompt sanitization lives in `lib/request/input-filters.ts:72-165` (`filterOpenCodeSystemPrompts`). It relies on regex heuristics over content to strip OpenCode auto-compaction summary-file instructions. +- Core filtering pipeline in `lib/request/request-transformer.ts:38-75` runs `filterInput` **before** `filterOpenCodeSystemPrompts`; `filterInput` currently strips `metadata` when `preserveIds` is false, so any upstream metadata markers are lost before heuristic detection. +- Compaction prompts produced by this plugin are built in `lib/compaction/codex-compaction.ts:88-99` via `buildCompactionPromptItems`, but no metadata flags are attached to identify them as OpenCode compaction artifacts. +- Tests for the filtering behavior live in `test/request-transformer.test.ts:539-618` and currently cover regex-only heuristics (no metadata awareness). + +## Problem + +Heuristic-only detection risks false positives/negatives. Review feedback requested an explicit metadata flag on OpenCode compaction prompts (e.g., `metadata.source === "opencode-compaction"`) and to prefer that flag over regex checks, falling back to heuristics when metadata is absent. + +## Solution Strategy + +### Phase 1: Metadata flag plumbing + +- Tag plugin-generated compaction prompt items (developer + user) with a clear metadata flag, e.g., `metadata: { source: "opencode-compaction" }` or boolean `opencodeCompaction`. Ensure the flag survives filtering. +- Adjust the filtering pipeline to preserve metadata long enough for detection (e.g., allow metadata passthrough pre-sanitization or re-order detection vs. stripping) while still removing other metadata before sending to Codex backend unless IDs are preserved. + +### Phase 2: Metadata-aware filtering + +- Update `filterOpenCodeSystemPrompts` to first check metadata flags for compaction/system prompts and sanitize/remove based on that before running regex heuristics. Heuristics remain as fallback when metadata is missing. +- Ensure system prompt detection (`isOpenCodeSystemPrompt`) remains unchanged. + +### Phase 3: Tests + +- Expand `test/request-transformer.test.ts` to cover: + - Metadata-tagged compaction prompts being sanitized/removed (preferred path). + - Fallback to heuristics when metadata flag is absent. + - Metadata preserved just long enough for detection but not leaked when `preserveIds` is false. + +## Definition of Done / Requirements + +- [x] Incoming OpenCode compaction prompts marked with metadata are detected and sanitized/removed without relying on text heuristics. +- [x] Heuristic detection remains functional when metadata is absent. +- [x] Metadata needed for detection is not stripped before filtering; final output still omits metadata unless explicitly preserved. +- [x] Tests updated/added to cover metadata flag path and fallback behavior. + +## Files to Modify + +- `lib/compaction/codex-compaction.ts` – attach metadata flag to compaction prompt items built by the plugin. +- `lib/request/input-filters.ts` – prefer metadata-aware detection and keep heuristics as fallback. +- `lib/request/request-transformer.ts` – ensure metadata survives into filter stage (ordering/options tweak) but is removed thereafter when appropriate. +- `test/request-transformer.test.ts` – add coverage for metadata-flagged compaction prompts and fallback behavior. + +## Change Log + +- 2025-11-20: Implemented metadata flag detection/preservation pipeline, tagged compaction prompt builders, added metadata-focused tests, and ran `npm test -- request-transformer.test.ts`. diff --git a/spec/complexity-reduction.md b/spec/complexity-reduction.md new file mode 100644 index 0000000..0174bfd --- /dev/null +++ b/spec/complexity-reduction.md @@ -0,0 +1,32 @@ +# Complexity Reduction Plan + +## Scope + +- Reduce ESLint complexity warnings for: + - `lib/prompts/codex.ts`: `getCodexInstructions` (lines ~49-165) + - `lib/prompts/opencode-codex.ts`: `getOpenCodeCodexPrompt` (lines ~94-230) + - `lib/request/fetch-helpers.ts`: `transformRequestForCodex` (lines ~111-210), `handleErrorResponse` (lines ~256-336) + - `lib/request/request-transformer.ts`: tool normalization arrow (`convertTool`, lines ~138-188) and `getReasoningConfig` (lines ~291-359) + +## Existing Issues / PRs + +- None identified (no open issues/PRs reviewed for this specific task). + +## Requirements + +- Preserve current behavior, caching semantics, and logging side effects. +- Keep file paths and cache metadata formats stable. +- Maintain test expectations in existing spec files for prompts, fetch helpers, and request transformer. + +## Definition of Done + +- ESLint no longer reports complexity warnings for the listed functions. +- Relevant unit tests continue to pass (targeted suite for touched modules). +- No regressions in caching or request transformation behavior (based on tests/logical review). + +## Plan (Phases) + +1. **Prompt Fetchers**: Refactor `getCodexInstructions` and `getOpenCodeCodexPrompt` by extracting helper routines for cache reads/writes, freshness checks, and network fetch handling to reduce branching. +2. **Request Transformation**: Break down `transformRequestForCodex` and tool normalization into smaller helpers (e.g., compaction config, logging wrappers, tool converters) to simplify flow. +3. **Error/Reasoning Handling**: Simplify `handleErrorResponse` and `getReasoningConfig` with helper functions and clearer rule tables; ensure messaging and rate-limit parsing stay intact. +4. **Validation**: Run targeted lint/tests to confirm complexity warnings resolved and behavior intact. diff --git a/spec/issue-23-session-fork-alignment.md b/spec/issue-23-session-fork-alignment.md new file mode 100644 index 0000000..fc77945 --- /dev/null +++ b/spec/issue-23-session-fork-alignment.md @@ -0,0 +1,30 @@ +# Issue #23 – SessionManager fork alignment + +## Context + +- Issue: https://github.com/open-hax/codex/issues/23 +- Follow-up to PR #20 CodeRabbit review discussion on `extractForkIdentifier`. +- Problem: `lib/session/session-manager.ts` uses fork hints limited to `forkId|fork_id|branchId|branch_id` (~lines 120-143). Prompt cache fork derivation in `lib/request/prompt-cache.ts` also accepts `parentConversationId|parent_conversation_id` (~lines 70-132). Requests that set only parent conversation IDs diverge: prompt cache key suffix includes fork hint, session key does not. + +## Affected areas + +- `lib/session/session-manager.ts` (extract fork keys, session key construction) +- `lib/request/prompt-cache.ts` (source of fork hint keys) +- Tests: `test/session-manager.test.ts` (missing coverage for parent conversation fork hints) + +## Requirements / Definition of Done + +- Session key and prompt cache key derivation use the same set of fork hint keys (including parent conversation IDs) so forks stay consistent regardless of hint field used. +- Normalize/trim behavior remains consistent with existing fork handling; no regressions for current fork/branch keys. +- Add/adjust tests to cover parent conversation fork hint path. +- Build/tests pass. + +## Plan (phases) + +1. Analysis: Confirm current fork key sources in session manager vs prompt cache; note normalization differences and existing tests. +2. Design/Implementation: Share or mirror fork key list to include parent conversation IDs in session manager; keep trim behavior; ensure prompt cache alignment comments updated. Update session manager logic accordingly, adjusting helper if needed. +3. Verification: Add/extend session manager tests for parent conversation fork hints and run relevant test subset (session manager + prompt cache if needed). + +## Changelog + +- 2025-11-20: Exported prompt cache fork key list for reuse, aligned SessionManager fork extraction with parent conversation hints, and added session-manager tests covering parent conversation fork identifiers. diff --git a/spec/issue-triage-2025-11-20.md b/spec/issue-triage-2025-11-20.md new file mode 100644 index 0000000..333e4e9 --- /dev/null +++ b/spec/issue-triage-2025-11-20.md @@ -0,0 +1,23 @@ +# Issue Triage — 2025-11-20 + +Scope: Verify status of open issues #6, #23, #22, #21, #39, #24, #40 against current main branch. + +## Findings + +- #23 SessionManager fork sync — Not done. `lib/session/session-manager.ts` extractForkIdentifier only checks `forkId|fork_id|branchId|branch_id` (lines ~120-143); does not consider `parentConversationId|parent_conversation_id` used in prompt cache derivation. +- #22 Compaction metadata flag — Not done. `lib/request/input-filters.ts` uses regex heuristics only to detect OpenCode compaction prompts (lines ~82-139); no metadata flag preferred path. +- #21 Summary-aware tail extraction — Not done. `lib/compaction/codex-compaction.ts` `extractTailAfterSummary` returns slice from last `user` message (lines ~120-129); no summary marker awareness. +- #24 Tests clarify tail semantics — Not done. `test/codex-compaction.test.ts` still names test "extracts tail after the latest user summary message" and asserts last-user behavior (lines ~80-89). +- #39 README installation section missing — Not done. README links to `#installation` (e.g., line ~531) but no `## Installation` heading exists. +- #40 Model stats HTML dashboard server — Not started. No references to "dashboard"/"stats html" in repo. +- #6 Richer metrics/inspect commands — Still blocked by upstream; no new implementation detected. + +## Definition of Done (per issue) + +- #23: Session key fork detection matches prompt cache fork hints (`parentConversationId` variants) with tests. +- #22: Input filtering prefers explicit metadata flag for OpenCode compaction prompts, falling back to heuristics. +- #21: Tail extraction skips summary-marked items; tests updated. +- #24: Tests renamed/rewritten to reflect current semantics and cover summary-aware path once added. +- #39: README gains actual Installation section and linked anchor. +- #40: Dashboard server implemented or scoped; code/tests/docs added. +- #6: Upstream dependency resolved; enhanced metrics/inspect commands implemented and tested. diff --git a/spec/log-warning-toasts.md b/spec/log-warning-toasts.md new file mode 100644 index 0000000..788c13e --- /dev/null +++ b/spec/log-warning-toasts.md @@ -0,0 +1,33 @@ +# Log Warnings Toasts + +## Context + +User reports warning text appearing as inline log output in the UI (see screenshot). Expectation: warnings should surface as toasts (if shown at all) rather than cluttering the log stream. + +## Relevant Files & Pointers + +- `index.ts`:66-75 — schedules Codex personal-use warning via `logWarn` after plugin setup. +- `lib/logger.ts`:65-162 — logger configuration and log emission pipeline; warns currently forwarded to app log/console, toasts only for `error` level. +- `test/logger.test.ts`:121-133 — asserts warnings emit to console; will need update for new behavior. + +## Existing Issues / PRs + +- None identified yet in repo for warning display/toast handling. + +## Plan (Phases) + +- **Phase 1: Analysis** — Confirm logger pathways for warnings (app.log, console, toast availability) and identify minimal change to favor toasts over inline log output when TUI exists. +- **Phase 2: Implementation** — Update logger to route warnings to toasts (and avoid noisy UI logging when TUI is available) while retaining diagnostics elsewhere; adjust initial warning emission if needed. +- **Phase 3: Validation** — Update/tests to cover new warn behavior; run targeted test suite for logger to ensure pass. + +## Definition of Done + +- Warning messages no longer show as inline log spam in the UI; they display as toasts when a TUI is available, or remain non-intrusive otherwise. +- Logging/diagnostics are preserved (file logging/app logging) without surfacing to end users unless toasting. +- Relevant tests updated/added and passing. + +## Requirements + +- Use toast notifications for warnings when surfaced to users; avoid duplicative inline log output that caused the reported issue. +- Keep logging functionality intact for debugging (disk or app logging), but ensure user-facing presentation is toast-first. +- Maintain compatibility with environments lacking `tui.showToast` (fallback to existing behavior without user-facing spam). diff --git a/spec/merge-conflict-resolution.md b/spec/merge-conflict-resolution.md new file mode 100644 index 0000000..55927e2 --- /dev/null +++ b/spec/merge-conflict-resolution.md @@ -0,0 +1,59 @@ +# Merge Conflict Resolution Plan (ops/release-workflow) + +## Context +- Branch: `ops/release-workflow` with merge state and unmerged paths. +- Conflicted files (from `git diff --name-only --diff-filter=U`): + - `.github/workflows/pr-auto-base.yml` + - `.github/workflows/staging-release-prep.yml` + - `eslint.config.mjs` + - `index.ts` + - `lib/logger.ts` + - `lib/prompts/codex.ts` + - `lib/prompts/opencode-codex.ts` + - `lib/request/fetch-helpers.ts` + - `lib/request/request-transformer.ts` + - `package-lock.json` + - `package.json` + - `test/logger.test.ts` + - `test/session-manager.test.ts` + +## Notable conflict locations (line references from current workspace) +- `index.ts`: bridge fetch creation formatting and indentation around ~126-148. +- `lib/logger.ts`: toast/app log forwarding logic around ~142-178. +- `lib/prompts/codex.ts`: cache metadata handling and ETag logic around ~177-270. +- `lib/prompts/opencode-codex.ts`: cache migration/ETag fetch helpers around ~88-357. +- `lib/request/fetch-helpers.ts`: compaction settings and error enrichment around ~166-470. +- `lib/request/request-transformer.ts`: imports, compaction, prompt cache key, bridge/tool injection across file (multiple conflicts starting near top and ~620-1210). +- Workflows: `pr-auto-base.yml` trigger/permissions/checkout around ~5-53; `staging-release-prep.yml` release branch/tag creation and PR automation around ~25-296. +- Config/test files: `eslint.config.mjs` test overrides (~95-100); `test/logger.test.ts` toast/console expectations (~1-190); `test/session-manager.test.ts` metrics variable naming (~159-165); `package.json` & `package-lock.json` version bump (0.3.0 vs 0.2.0). + +## Definition of Done +- All merge conflicts resolved with cohesive logic that preserves newer behaviors (cache handling, logging/toast routing, compaction settings, workflow automation, version 0.3.0). +- TypeScript sources compile conceptually (no mixed indentation or stale references). +- Package metadata consistent across `package.json` and `package-lock.json`. +- Workflow YAML passes basic syntax review. +- Relevant tests updated to match behavior (logger toast routing, session metrics variable consistency). +- `git status` clean of conflict markers; ready for commit. + +## Plan (phased) +### Phase 1 – Workflows & Config +- Merge `.github/workflows/pr-auto-base.yml` to include checkout + sync/reopen triggers, correct permissions, GH repo usage. +- Merge `.github/workflows/staging-release-prep.yml` retaining branch/tag push and auto-merge reviewer steps. +- Restore `eslint.config.mjs` test overrides for max-lines. + +### Phase 2 – Core Source Merges +- Align `index.ts` fetch creator call with repository style (spaces, no tabs). +- Resolve `lib/logger.ts` to avoid duplicate warn logging when toast available while still forwarding error logging. +- Merge `lib/prompts/codex.ts` with unified cache metadata handling and fallback semantics. +- Merge `lib/prompts/opencode-codex.ts` using fresh cache/ETag helpers and migration checks. +- Merge `lib/request/fetch-helpers.ts` compaction settings builder and enriched error handling using helper functions. +- Merge `lib/request/request-transformer.ts` (imports, prompt cache handling, compaction options, bridge/tool injection) ensuring Codex-mode defaults and logging. + +### Phase 3 – Packages & Tests +- Set version to 0.3.0 in `package.json` and `package-lock.json`; keep dependency blocks aligned. +- Update `test/logger.test.ts` to match toast + logging behavior and `OpencodeClient` typing. +- Fix `test/session-manager.test.ts` minor variable naming conflict. + +### Phase 4 – Verification +- Run targeted tests if time allows (logger/session transformer) via `npm test -- logger` subset or full `npm test` if feasible. +- Final `git status` check for cleanliness. diff --git a/spec/pr57-review.md b/spec/pr57-review.md new file mode 100644 index 0000000..55c11b3 --- /dev/null +++ b/spec/pr57-review.md @@ -0,0 +1,24 @@ +# PR 57 review follow-up + +## Context +- PR #57 (branch `dev` → `main`) reorganizes request handling and caching. +- Unresolved review feedback targets Codex instruction caching metadata refresh logic in `lib/prompts/codex.ts`. + +## Review comments to address +1) `lib/prompts/codex.ts` (approx lines 90-200): metadata `lastChecked` is not updated when GitHub returns 304 or when cached/bundled fallbacks are used after a fetch failure, causing repeated GitHub calls beyond TTL. + +## Plan +- Inspect current `getCodexInstructions` flow and helpers to locate 304 + fallback paths. +- Update metadata writes so `lastChecked` refreshes on 304 responses with valid cache and when cached/bundled fallbacks are used. +- Ensure session cache is consistent and logging remains accurate. +- Add/adjust tests in `test/prompts-codex.test.ts` (or neighboring files) to cover refreshed metadata on 304 and fallback. +- Run targeted tests for prompts/codex logic. + +## Definition of done +- Code change updates metadata refresh logic per review without altering successful fetch semantics. +- Tests updated/added and passing locally for the touched area. +- Worktree clean aside from intentional changes; review comment marked resolved. + +## Notes +- Focus file: lib/prompts/codex.ts +- Test focus: test/prompts-codex.test.ts diff --git a/spec/release-automerge-and-tags.md b/spec/release-automerge-and-tags.md new file mode 100644 index 0000000..1768912 --- /dev/null +++ b/spec/release-automerge-and-tags.md @@ -0,0 +1,27 @@ +# Release workflow: tags + auto-merge + CodeRabbit + +## Context +- PR #36 refines the staging release workflow; prior flow failed to push tags and didn’t create PRs against staging/main. +- Release ruleset (ID 10200441) enforces status checks and CodeRabbit on `main` and `staging` (refs include default branch + `refs/heads/staging`). + +## Problem +- Need PR-based release flow but still publish annotated tags. +- Release PRs should auto-merge after required checks, CodeRabbit review, and resolved conversations. +- Warn-level logs must continue to reach persistent logs/console while also surfacing as toasts. +- Auto-base workflow must reliably retarget PRs to `staging`. + +## Changes +- `.github/workflows/staging-release-prep.yml`: build release branch, tag `v`, push branch+tag, open PRs to staging (and main for hotfix), request `coderabbitai` review, and enable auto-merge (squash). Lines ~10-228. +- `.github/workflows/pr-auto-base.yml`: add `contents: read`, capture PR number, include reopened/synchronize triggers, and retarget via `gh pr edit ... --repo` to avoid git context failures. Lines ~3-38. +- `lib/logger.ts`: keep toast notifications but always forward warnings to app logs and console for persistence. Lines ~1-188. + +## Definition of Done +- Workflow creates annotated tag `v` and pushes it alongside the release branch. +- Release PR to staging (and hotfix PR to main when labeled) is opened, requests CodeRabbit review, and auto-merge is enabled; PR auto-merges only after required checks/reviews/conversation resolution (per GitHub ruleset). +- Warn logs are both toasted and persisted (app log + console) for diagnostics. +- Auto-base workflow successfully retargets PRs to `staging` and does not fail on missing git context. + +## Requirements / Notes +- Permissions: `contents: write`, `pull-requests: write` needed for branch/tag pushes, review requests, and auto-merge GraphQL; auto-base uses `contents: read` + `pull-requests: write`. +- Uses `scripts/detect-release-type.mjs` outputs (`nextVersion`, `releaseNotes`). +- Release ruleset already requires CodeRabbit + CI contexts: Lint & Typecheck, Test (20.x/22.x), Test on Node.js 20.x/22.x, CodeRabbit. diff --git a/spec/release-pr-flow.md b/spec/release-pr-flow.md new file mode 100644 index 0000000..0772248 --- /dev/null +++ b/spec/release-pr-flow.md @@ -0,0 +1,28 @@ +# Release PR Flow Adjustment + +## Context + +- Current workflow `.github/workflows/staging-release-prep.yml` triggers on PR merge into `staging` and directly pushes version bump + tag to `staging` (lines 3-130). +- Branch protection rejects direct pushes to `staging`. +- Hotfix label currently promotes `staging` to `main` when present. + +## Plan + +1. Update staging release workflow to create a release branch (e.g., `release/vX.Y.Z`) from `staging` and open a PR back to `staging` instead of pushing. +2. Include analyzer outputs (version, notes, hotfix flag) in the PR body and commit message; keep tag creation/publishing out of protected branch writes. +3. Preserve hotfix detection to surface in PR content and enable downstream promotion logic without forcing direct merges. + +## Definition of Done + +- Workflow no longer attempts `git push origin HEAD:staging` directly; creates a release branch and PR to `staging` instead. +- Version bump commit and release notes are part of the PR branch. +- Hotfix label metadata is preserved in PR content. +- Actions run without violating branch protection. + +## Notes + +- No related open issues/PRs identified. + +## Session Updates + +- Converted staging release workflow to use release branches + PRs (staging and hotfix-to-main) instead of direct pushes/tags; added branch name collision safeguard. diff --git a/spec/request-transformer-refactor.md b/spec/request-transformer-refactor.md new file mode 100644 index 0000000..4c5eefc --- /dev/null +++ b/spec/request-transformer-refactor.md @@ -0,0 +1,49 @@ +# Request Transformer Refactor + +## Context + +- `lib/request/request-transformer.ts` is 1,094 lines (eslint `max-lines` warning; target <500). +- Build failure: missing `normalizeToolsForResponses` reference (available in `lib/request/tool-normalizer.ts` line 139). +- Lint warning: `.eslintignore` deprecation message (out of scope for this task unless affected file changes). + +## Relevant Code References + +- `lib/request/request-transformer.ts` lines 1-1094: monolithic helpers for model normalization, reasoning config, input filtering, bridge/tool messages, compaction, prompt cache keys, and `transformRequestBody` entrypoint. +- `lib/request/tool-normalizer.ts` lines 1-158: provides `normalizeToolsForResponses` used by transformer but not imported. +- Tests mirror structure under `test/` (e.g., `test/request-transformer.test.ts`). + +## Definition of Done + +- `lib/request/request-transformer.ts` reduced below 500 lines while preserving behavior and exports. +- Missing `normalizeToolsForResponses` import resolved; TypeScript build passes. +- ESLint passes without new warnings/errors (existing `.eslintignore` warning acceptable if unchanged). +- Existing tests relevant to transformed logic updated if needed and passing locally (at least lint/build executed). + +## Plan (Phases) + +### Phase 1: Extraction Design + +- Identify logical groupings (model/reasoning config, input filtering/bridge, compaction helpers, prompt cache key utilities, tool normalization usage, main transform orchestration). +- Decide target helper modules under `lib/request/` to move into (e.g., `model-config.ts`, `input-filters.ts`, `prompt-cache.ts`, `compaction-helpers.ts`). + +### Phase 2: Implement Refactors + +- Create/adjust helper modules and move functions accordingly; export/import from transformer. +- Wire missing `normalizeToolsForResponses` import from `tool-normalizer.ts`. +- Keep `transformRequestBody` orchestrator lean by reusing helpers; ensure type/shared constants remain. + +### Phase 3: Validation + +- Run `pnpm lint` and `pnpm build` to confirm lint/TypeScript success. +- Update todos/spec with outcomes and note any follow-ups. + +## Notes + +- Preserve existing behavior (stateless filtering, bridge prompt caching, compaction decisions, prompt cache key derivation). +- Avoid altering public APIs consumed by tests unless necessary; adjust tests if import paths change. + +## Change Log + +- Split `lib/request/request-transformer.ts` into helper modules (`model-config.ts`, `input-filters.ts`, `prompt-cache.ts`, `compaction-helpers.ts`, `tooling.ts`) and re-exported APIs to keep the transformer under 500 lines. +- Added missing `normalizeToolsForResponses` import via `normalizeToolsForCodexBody` helper. +- Ran `pnpm build` and `pnpm lint` (lint only warning remains about legacy `.eslintignore`). diff --git a/spec/retarget-workflow-fix.md b/spec/retarget-workflow-fix.md new file mode 100644 index 0000000..b762d3d --- /dev/null +++ b/spec/retarget-workflow-fix.md @@ -0,0 +1,53 @@ +# PR Auto Base Retarget Fix + +## Problem + +The `pr-auto-base.yml` workflow was failing with the error: + +``` +failed to run git: fatal: not a git repository (or any of the parent directories): .git +``` + +This occurred because the workflow was trying to run `gh pr edit` without first checking out the repository, so there was no git context for the GitHub CLI to work with. + +## Solution + +Updated the workflow to: + +1. Add a checkout step using `actions/checkout@v4` with `fetch-depth: 0` to ensure full git history +2. Keep the retarget logic that moves PRs to `staging` unless they already target `staging` or originate from `staging` + +## Code References + +- `.github/workflows/pr-auto-base.yml`: Checkout step at lines 16-20 ensures a git repository is available for GitHub CLI commands +- `.github/workflows/pr-auto-base.yml`: Retarget logic at lines 22-32 switches PR base to `staging` unless the base is already `staging` or the head branch is `staging` +- `CONTRIBUTING.md`: Pull request process and release process sections clarify `staging` as the default base and describe release automation (lines 49-69) + +## Existing Issues / PRs + +- No linked GitHub issues or PRs referenced in this change; fix derived from observed workflow failure logs + +## Files Changed + +- `.github/workflows/pr-auto-base.yml`: Added checkout step before the retarget logic +- `CONTRIBUTING.md`: Clarified PR base branch guidance and documented release process that runs from `staging` + +## Definition of Done + +- Workflow runs without `fatal: not a git repository` errors +- PRs opened against `main` are automatically retargeted to `staging`, except when they already target `staging` or originate from `staging` +- Contributors are instructed to target `staging` and understand the release pipeline that runs post-merge + +## Requirements + +- GitHub-hosted runner with `gh` CLI available +- `pull-requests: write` permission granted to the workflow; `GH_TOKEN` sourced from `secrets.GITHUB_TOKEN` +- Repository must be checked out before invoking any `gh pr` commands + +## Testing + +The workflow should now successfully: + +- Check out the repository with full history +- Run the retarget logic in a proper git context +- Successfully retarget PRs from main to staging (except when PR is from staging branch) diff --git a/spec/review-response-env-whitelist.md b/spec/review-response-env-whitelist.md new file mode 100644 index 0000000..1549faa --- /dev/null +++ b/spec/review-response-env-whitelist.md @@ -0,0 +1,32 @@ +# Review Response Env Whitelist Fix + +## Context + +- `.github/workflows/review-response.yml:9` – workflow load failed with `Unrecognized named-value: 'env'` because the job-level condition referenced `env.REVIEW_RESPONDER_WHITELIST`. + +## Existing Issues / PRs + +- None known; reported by workflow syntax validation. + +## Requirements / Definition of Done + +- Job condition avoids unsupported `env` context and passes validation. +- Whitelist still restricts execution to trusted reviewers plus OWNER/MEMBER/COLLABORATOR associations. +- Workflow remains syntactically valid and behavior unchanged otherwise. + +## Plan + +### Phase 1 – Confirm Failure Source + +1. Inspect `.github/workflows/review-response.yml` around the job condition. (Completed) + +### Phase 2 – Implement Fix + +1. Replace the failing `env` reference with a literal allowlist using `fromJson` that the expression engine supports at job scope. +2. Keep the OWNER/MEMBER/COLLABORATOR checks intact. + +### Phase 3 – Validate + +1. Re-read the workflow to ensure expression correctness and YAML structure. +2. Optionally run `actionlint` if available. +3. Summarize changes and advise rerunning the workflow. diff --git a/spec/review-response-token.md b/spec/review-response-token.md new file mode 100644 index 0000000..eb5b823 --- /dev/null +++ b/spec/review-response-token.md @@ -0,0 +1,24 @@ +# Review-response workflow token alignment + +## Context + +- File: .github/workflows/review-response.yml (lines 1-106) +- Behavior: Responds to PR review comments; checks out PR head and pushes auto-fix branch using default GITHUB_TOKEN. +- Issue: Org policy blocks PR creation/push via default `GITHUB_TOKEN`; dev-release-prep uses `PR_AUTOMATION_TOKEN` validated explicitly. +- Existing related workflow: .github/workflows/dev-release-prep.yml uses `PR_AUTOMATION_TOKEN` for PR creation and validates secret. + +## Requirements / Definition of Done + +- review-response workflow uses `secrets.PR_AUTOMATION_TOKEN` for all GitHub write operations (push, PR creation, gh api/cli) instead of default GITHUB_TOKEN. +- Validate presence of PR_AUTOMATION_TOKEN early with a failure message mirroring dev-release-prep wording. +- Keep OPENCODE_API_KEY handling unchanged. +- Ensure checkout/push/pr steps reference the new token via env (GH_TOKEN and git auth) so branch push + PR creation succeed under org policy. +- Review-response branches should be named `review/-`. +- Release automation should treat `review/*` branches as non-release and avoid version bumps. +- Tests/build unaffected (workflow-only change). + +## Open Questions / Notes + +- No other review-response workflows present. +- PAT must have repo permissions; assumes secret already configured in repo/org. +- Checkout of fork PRs may still require permission; pushing uses PAT. diff --git a/spec/review-v0.3.5-fixes.md b/spec/review-v0.3.5-fixes.md new file mode 100644 index 0000000..a2ba497 --- /dev/null +++ b/spec/review-v0.3.5-fixes.md @@ -0,0 +1,25 @@ +# Review v0.3.5 fixes + +## Scope + +- Handle null/empty cache reads in `lib/prompts/codex.ts` around readCachedInstructions caching logic +- Remove redundant cloning in `lib/request/compaction-helpers.ts` (removeLastUserMessage, maybeBuildCompactionPrompt) +- Prevent duplicate tool remap injection in `lib/request/input-filters.ts` addToolRemapMessage + +## Existing issues / PRs + +- None identified for this branch (review/v0.3.5). + +## Definition of done + +- safeReadFile null results do not get cached as empty content; fallback logic remains available for caller +- Compaction helpers avoid unnecessary clones while preserving immutability semantics (original input reused unless truncated) +- Tool remap message is only prepended once when tools are present; logic handles undefined/null safely +- All relevant tests updated or added if behavior changes; existing suite passes locally if run + +## Requirements / notes + +- Only cache instructions when actual non-empty content is read; on null either warn and return null or allow existing fallback paths +- removeLastUserMessage should find last user role index and slice; when commandText is falsy reuse originalInput directly in maybeBuildCompactionPrompt +- addToolRemapMessage should fingerprint or compare TOOL_REMAP_MESSAGE and skip if already present (matching role/type/text) +- Preserve existing function signatures and return types throughout diff --git a/spec/toast-word-wrap.md b/spec/toast-word-wrap.md new file mode 100644 index 0000000..4f1047d --- /dev/null +++ b/spec/toast-word-wrap.md @@ -0,0 +1,21 @@ +# Toast word wrap spec + +## Code references + +- lib/logger.ts:170-195 — `notifyToast` builds toast body and sends via `tui.showToast`, currently sends single-line message. + +## Existing issues/PRs + +- None found yet in this repository. + +## Definition of done + +- Toast messages no longer get truncated; long text wraps across lines in the TUI. +- Warning/error toasts still appear with existing title/variant semantics. +- Tests cover the wrapped formatting behavior. + +## Requirements + +- Introduce safe word-wrapping for toast message bodies before calling `showToast` (keep readable at typical terminal widths). +- Prefer whole-word wrapping; avoid mangling short messages. +- Preserve existing behavior for short messages and existing title/variant fields. diff --git a/spec/version-bump-workflow-review.md b/spec/version-bump-workflow-review.md new file mode 100644 index 0000000..20e7adc --- /dev/null +++ b/spec/version-bump-workflow-review.md @@ -0,0 +1,69 @@ +# Version-Bump Release Workflow Review + +## Code References + +- `.github/workflows/dev-release-prep.yml:59-229` – dev merge trigger builds release branch, bumps version, creates tag, and opens release/hotfix PRs. +- `.github/workflows/ci.yml:103-183` – release job gating on `main` push with `head_commit.message` starting `chore: release v`, publishes to npm and creates GitHub Releases. +- `scripts/detect-release-type.mjs:62-215` – analyzer output drives `nextVersion`/notes consumption in the staging release workflow. + +## Existing Issues / PRs + +- No open GitHub issues or PRs found for the current auto-bump release workflow. Prior specs: `release-pr-flow.md`, `release-automerge-and-tags.md`, `release-workflow-cli-failure.md` describe the current design but not the new breakages noted below. + +## Findings + +1. **Tags land before review/merge and stay off the released commit** + - `dev-release-prep.yml:128-153` tags `v` on the release branch before the PR is created/merged. Auto-merge uses squash, so the final commit on `dev`/`main` has a different SHA while the tag still points to the pre-squash branch commit. GitHub Release + npm publish (driven by that tag) no longer match the code that actually ships. Future `git describe` on `dev` also ignores that tag because it is not reachable, so release diffs are computed against an older tag. +2. **Release job often never runs (especially for hotfixes)** + - `ci.yml:103-110` only runs the release job when a push to `main` has `head_commit.message` starting with `chore: release v`. Hotfix PRs opened from `dev-release-prep` use the title `hotfix: release v…` and squash merges on `main` generate commit messages that do not match the gate. Merge commits from `dev` also fail the prefix check. Result: no npm publish / GitHub Release even though the tag already exists. +3. **Tag/branch push is irreversible when PR creation fails** + - `dev-release-prep.yml:142-153` pushes branch + tag before verifying PR creation succeeds (`curl` at :154-193). If the API call fails (quota/permissions), the repo is left with a published tag/version bump branch but no PR; auto-merge/review doesn’t run and manual cleanup is needed. + +## Definition of Done + +- Release automation tags the same commit that is merged into the protected branch (dev/main), avoiding off-SHA releases. +- Release publishing workflow triggers reliably for both normal and hotfix releases without relying on brittle commit-message prefixes. +- Failure to open release PR does not leave dangling tags/branches without review. + +## Requirements / Next Steps + +- Move tag creation/push after the release PR merges (or retag against the merge commit) so GitHub Releases and npm publish use the merged SHA. +- Rework release job trigger to react to `v*` tags (or version bumps) rather than `head_commit.message` prefixes; include hotfix commits. +- Gate pushing tags on successful PR creation (or implement cleanup on failure) to avoid dangling tags when GitHub API calls fail. + +## Change Log + +- 2025-11-20: Reordered dev release workflow to push the release branch and open the PR before tagging/pushing the tag; widened `ci.yml` release guard to allow `hotfix: release v` commits on `main`. +- 2025-11-20: Renamed default branch to `dev` in workflows/guards/auto-base; added ruleset drift check coverage for `dev`. + +## Ruleset Review (2025-11-20) + +- `release` ruleset (ID 10200441): applies to default branch (`refs/heads/dev`), enforces non-FF, deletion block, strict required checks (Lint & Typecheck, Test 20.x/22.x, CodeRabbit), Copilot review on push, CodeQL/code-quality gates. +- `main` ruleset (ID 10223971): applies to `refs/heads/main`, similar required checks but `strict_required_status_checks_policy` is false; allows merge/squash/rebase. +- Snapshots added to `.github/rulesets/{release.json,main.json}` with a README on how to refresh via `gh api`. + +### Implications + +- Dev release PRs must satisfy strict required checks; auto-merge should succeed once CodeRabbit + CI pass. +- Hotfix PRs into `main` rely on the `main` ruleset; strict status enforcement is off, so merging without all statuses passing remains possible unless GitHub auto-merge requires them. If we want parity, consider enabling strict required checks or folding `main` into the `release` ruleset targets. +- If we move to tag-triggered releases, ensure tags are created from the merged commit that satisfied the applicable ruleset (dev or main) to keep enforcement consistent with published artifacts. +- If we rename `dev` again, update the default branch and adjust workflows + `release` ruleset include target, then refresh snapshots. + +## Flow Diagram + +```mermaid +flowchart TD + A[Feature PR -> dev] -->|merge| B(dev-release-prep.yml) + B --> C[Analyze commits: next_version / notes / hotfix] + C --> D[Bump version on release/vX.Y.Z branch] + D --> E[Push release branch] + E --> F[Open release PR -> dev; auto-merge + CodeRabbit] + C -->|hotfix label| G[Open hotfix PR -> main] + F -->|merge dev PR| H[Squash merge on dev] + G -->|merge hotfix PR| H2[Squash merge on main] + E --> J[Create tag vX.Y.Z on release branch] + J --> K[Push tag vX.Y.Z] + H --> I[ci.yml release job publishes npm + GitHub Release when main commit starts chore/hotfix: release v] + H2 --> I + K --> I +``` diff --git a/test/compaction-helpers.test.ts b/test/compaction-helpers.test.ts new file mode 100644 index 0000000..fdfc2da --- /dev/null +++ b/test/compaction-helpers.test.ts @@ -0,0 +1,57 @@ +import { applyCompactionIfNeeded } from "../lib/request/compaction-helpers.js"; +import type { InputItem, RequestBody } from "../lib/types.js"; + +describe("compaction helpers", () => { + it("drops only the last user command and keeps trailing items", () => { + const originalInput: InputItem[] = [ + { type: "message", role: "assistant", content: "previous response" }, + { type: "message", role: "user", content: "/codex-compact please" }, + { type: "message", role: "assistant", content: "trailing assistant" }, + ]; + const body: RequestBody = { model: "gpt-5", input: [...originalInput] }; + + const decision = applyCompactionIfNeeded(body, { + settings: { enabled: true }, + commandText: "codex-compact please", + originalInput, + }); + + expect(decision?.mode).toBe("command"); + expect(decision?.serialization.transcript).toContain("previous response"); + expect(decision?.serialization.transcript).toContain("trailing assistant"); + expect(decision?.serialization.transcript).not.toContain("codex-compact please"); + + // Verify RequestBody mutations + expect(body.input).not.toEqual(originalInput); + expect(body.input?.some((item) => item.content === "/codex-compact please")).toBe(false); + expect((body as any).tools).toBeUndefined(); + expect((body as any).tool_choice).toBeUndefined(); + expect((body as any).parallel_tool_calls).toBeUndefined(); + }); + + it("returns original items when no user message exists", () => { + const originalInput: InputItem[] = [ + { + type: "message", + role: "assistant", + content: "system-only follow-up", + }, + ]; + const body: RequestBody = { model: "gpt-5", input: [...originalInput] }; + + const decision = applyCompactionIfNeeded(body, { + settings: { enabled: true }, + commandText: null, // No command, so no compaction should occur + originalInput, + }); + + // No compaction should occur when there's no command text + expect(decision).toBeUndefined(); + // Verify RequestBody mutations - body should remain unchanged + expect(body.input).toBeDefined(); + expect(body.input).toEqual(originalInput); + expect((body as any).tools).toBeUndefined(); + expect((body as any).tool_choice).toBeUndefined(); + expect((body as any).parallel_tool_calls).toBeUndefined(); + }); +}); diff --git a/test/logger.test.ts b/test/logger.test.ts index 5189854..ff0c863 100644 --- a/test/logger.test.ts +++ b/test/logger.test.ts @@ -1,3 +1,4 @@ +import type { OpencodeClient } from "@opencode-ai/sdk"; import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; const fsMocks = { @@ -128,6 +129,59 @@ describe("logger", () => { expect(warnSpy).toHaveBeenCalledWith("[openhax/codex] warning"); }); + it("logWarn sends toast and avoids console/app log when tui available", async () => { + fsMocks.existsSync.mockReturnValue(true); + const showToast = vi.fn(); + const appLog = vi.fn().mockResolvedValue(undefined); + const { configureLogger, logWarn, flushRollingLogsForTest } = await import("../lib/logger.js"); + + const client = { + app: { log: appLog }, + tui: { showToast }, + } as unknown as OpencodeClient; + + configureLogger({ client }); + + logWarn("toast-warning"); + await flushRollingLogsForTest(); + + expect(showToast).toHaveBeenCalledWith({ + body: { + title: "openhax/codex warning", + message: "openhax/codex: toast-warning", + variant: "warning", + }, + }); + expect(appLog).not.toHaveBeenCalled(); + expect(warnSpy).not.toHaveBeenCalled(); + }); + + it("wraps long toast messages to avoid truncation", async () => { + fsMocks.existsSync.mockReturnValue(true); + const showToast = vi.fn(); + const appLog = vi.fn().mockResolvedValue(undefined); + const { configureLogger, logWarn, flushRollingLogsForTest } = await import("../lib/logger.js"); + + const client = { + app: { log: appLog }, + tui: { showToast }, + } as unknown as OpencodeClient; + + configureLogger({ client }); + + logWarn( + "prefix mismatch detected while warming the session cache; reconnecting with fallback account boundaries", + ); + await flushRollingLogsForTest(); + + expect(showToast).toHaveBeenCalledTimes(1); + const message = (showToast.mock.calls[0]?.[0] as { body: { message: string } }).body.message; + const lines = message.split("\n"); + expect(lines.length).toBeGreaterThan(1); + lines.forEach((line) => expect(line.length).toBeLessThanOrEqual(72)); + expect(appLog).not.toHaveBeenCalled(); + expect(warnSpy).not.toHaveBeenCalled(); + }); it("logInfo does not mirror to console in tests, even with debug flag", async () => { process.env.ENABLE_PLUGIN_REQUEST_LOGGING = "1"; fsMocks.existsSync.mockReturnValue(true); diff --git a/test/prompts-codex.test.ts b/test/prompts-codex.test.ts index 6f0b66a..fbfccef 100644 --- a/test/prompts-codex.test.ts +++ b/test/prompts-codex.test.ts @@ -112,13 +112,15 @@ describe("Codex Instructions Fetcher", () => { it("falls back to cached instructions when fetch fails", async () => { const consoleError = vi.spyOn(console, "error").mockImplementation(() => {}); + const consoleWarn = vi.spyOn(console, "warn").mockImplementation(() => {}); + const previousLastChecked = Date.now() - 20 * 60 * 1000; files.set(cacheFile, "still-good"); files.set( cacheMeta, JSON.stringify({ etag: '"old-etag"', tag: "v1", - lastChecked: Date.now() - 20 * 60 * 1000, + lastChecked: previousLastChecked, }), ); @@ -136,12 +138,19 @@ describe("Codex Instructions Fetcher", () => { expect(result).toBe("still-good"); expect(consoleError).toHaveBeenCalledWith( - '[openhax/codex] Failed to fetch instructions from GitHub {"error":"HTTP 500"}', + '[openhax/codex] Failed to fetch instructions from GitHub {"error":"HTTP 500 fetching https://raw.githubusercontent.com/openai/codex/v2/codex-rs/core/gpt_5_codex_prompt.md"}', ); - expect(consoleError).toHaveBeenCalledWith( + expect(consoleWarn).toHaveBeenCalledWith( "[openhax/codex] Using cached instructions due to fetch failure", ); + + const meta = JSON.parse(files.get(cacheMeta) ?? "{}"); + expect(meta.lastChecked).toBeGreaterThan(previousLastChecked); + expect(meta.tag).toBe("v1"); + expect(meta.url).toContain("codex-rs/core/gpt_5_codex_prompt.md"); + consoleError.mockRestore(); + consoleWarn.mockRestore(); }); it("serves in-memory session cache when latest entry exists", async () => { @@ -184,13 +193,14 @@ describe("Codex Instructions Fetcher", () => { }); it("uses file cache when GitHub responds 304 Not Modified", async () => { + const staleTimestamp = Date.now() - 20 * 60 * 1000; files.set(cacheFile, "from-file-304"); files.set( cacheMeta, JSON.stringify({ etag: '"etag-304"', tag: "v1", - lastChecked: Date.now() - 20 * 60 * 1000, + lastChecked: staleTimestamp, }), ); @@ -222,10 +232,17 @@ describe("Codex Instructions Fetcher", () => { const latestEntry = codexInstructionsCache.get("latest"); expect(latestEntry?.data).toBe("from-file-304"); + + const meta = JSON.parse(files.get(cacheMeta) ?? "{}"); + expect(meta.tag).toBe("v1"); + expect(meta.etag).toBe('"etag-304"'); + expect(meta.lastChecked).toBeGreaterThan(staleTimestamp); + expect(meta.url).toContain("codex-rs/core/gpt_5_codex_prompt.md"); }); it("falls back to bundled instructions when no cache is available", async () => { const consoleError = vi.spyOn(console, "error").mockImplementation(() => {}); + const consoleWarn = vi.spyOn(console, "warn").mockImplementation(() => {}); fetchMock .mockResolvedValueOnce( @@ -241,19 +258,16 @@ describe("Codex Instructions Fetcher", () => { expect(typeof result).toBe("string"); expect(consoleError).toHaveBeenCalledWith( - '[openhax/codex] Failed to fetch instructions from GitHub {"error":"HTTP 500"}', - ); - expect(consoleError).toHaveBeenCalledWith("[openhax/codex] Falling back to bundled instructions"); - - const readPaths = readFileSync.mock.calls.map((call) => call[0] as string); - const fallbackPath = readPaths.find( - (path) => path.endsWith("codex-instructions.md") && !path.startsWith(cacheDir), + '[openhax/codex] Failed to fetch instructions from GitHub {"error":"HTTP 500 fetching https://raw.githubusercontent.com/openai/codex/v1/codex-rs/core/gpt_5_codex_prompt.md"}', ); - expect(fallbackPath).toBeDefined(); + expect(consoleWarn).toHaveBeenCalledWith("[openhax/codex] Falling back to bundled instructions"); - const latestEntry = codexInstructionsCache.get("latest"); - expect(latestEntry).not.toBeNull(); + const meta = JSON.parse(files.get(cacheMeta) ?? "{}"); + expect(meta.tag).toBe("v1"); + expect(meta.lastChecked).toBeGreaterThan(0); + expect(meta.url).toContain("codex-rs/core/gpt_5_codex_prompt.md"); consoleError.mockRestore(); + consoleWarn.mockRestore(); }); }); diff --git a/test/request-transformer.test.ts b/test/request-transformer.test.ts index 3bafc87..bdc733d 100644 --- a/test/request-transformer.test.ts +++ b/test/request-transformer.test.ts @@ -10,6 +10,7 @@ import { addCodexBridgeMessage, transformRequestBody as transformRequestBodyInternal, } from "../lib/request/request-transformer.js"; +import { CODEX_OPENCODE_BRIDGE } from "../lib/prompts/codex-opencode-bridge.js"; import * as logger from "../lib/logger.js"; import { SessionManager } from "../lib/session/session-manager.js"; import type { RequestBody, SessionContext, UserConfig, InputItem } from "../lib/types.js"; @@ -34,9 +35,9 @@ describe("normalizeModel", () => { expect(normalizeModel("gpt-5-nano")).toBe("gpt-5"); }); - it("should return gpt-5.1 as default for unknown models", async () => { - expect(normalizeModel("unknown-model")).toBe("gpt-5.1"); - expect(normalizeModel("gpt-4")).toBe("gpt-5.1"); + it("should preserve unknown models without remapping", async () => { + expect(normalizeModel("unknown-model")).toBe("unknown-model"); + expect(normalizeModel("gpt-4")).toBe("gpt-4"); }); it("should return gpt-5.1 for undefined", async () => { @@ -238,6 +239,23 @@ describe("filterInput", () => { expect(result![0]).toHaveProperty("metadata"); }); + it("preserves metadata when explicitly requested without preserving IDs", async () => { + const input: InputItem[] = [ + { + id: "msg_456", + type: "message", + role: "developer", + content: "Summary saved to ~/.opencode/summary.md", + metadata: { source: "opencode-compaction" }, + }, + ]; + const result = filterInput(input, { preserveMetadata: true }); + + expect(result).toHaveLength(1); + expect(result![0]).not.toHaveProperty("id"); + expect(result![0]).toHaveProperty("metadata"); + }); + it("should handle mixed items with and without IDs", async () => { const input: InputItem[] = [ { type: "message", role: "user", content: "1" }, @@ -613,6 +631,21 @@ describe("filterOpenCodeSystemPrompts", () => { expect(result![1].role).toBe("user"); }); + it("should use metadata flag to detect compaction prompts", async () => { + const input: InputItem[] = [ + { + type: "message", + role: "developer", + content: "Summary saved to ~/.opencode/summary.md for inspection", + metadata: { source: "opencode-compaction" }, + }, + { type: "message", role: "user", content: "continue" }, + ]; + const result = await filterOpenCodeSystemPrompts(input); + expect(result).toHaveLength(1); + expect(result![0].role).toBe("user"); + }); + it("should return undefined for undefined input", async () => { expect(await filterOpenCodeSystemPrompts(undefined)).toBeUndefined(); }); @@ -654,6 +687,38 @@ describe("addCodexBridgeMessage", () => { expect(sessionContext.state.bridgeInjected).toBe(true); }); + it("avoids duplicating bridge when already present in session", async () => { + const input: InputItem[] = [ + { + type: "message", + role: "developer", + content: [{ type: "input_text", text: CODEX_OPENCODE_BRIDGE }], + }, + { type: "message", role: "user", content: "hello" }, + ]; + const sessionContext: SessionContext = { + sessionId: "ses_bridge", + enabled: true, + preserveIds: true, + isNew: false, + state: { + id: "ses_bridge", + promptCacheKey: "ses_bridge", + store: false, + lastInput: [], + lastPrefixHash: null, + lastUpdated: Date.now(), + bridgeInjected: true, + }, + }; + + const result = addCodexBridgeMessage(input, true, sessionContext); + + expect(result).toEqual(input); + expect(result?.[0]).toEqual(input[0]); + expect(sessionContext.state.bridgeInjected).toBe(true); + }); + it("should not modify input when tools not present", async () => { const input: InputItem[] = [{ type: "message", role: "user", content: "hello" }]; const result = addCodexBridgeMessage(input, false); @@ -743,6 +808,29 @@ describe("transformRequestBody", () => { expect(result2.prompt_cache_key).toBe("cache_meta-conv-789-fork-fork-x"); }); + it("filters metadata-tagged compaction prompts and strips metadata when IDs are not preserved", async () => { + const body: RequestBody = { + model: "gpt-5", + input: [ + { + type: "message", + role: "developer", + content: "Summary saved to ~/.opencode/summary.md for inspection", + metadata: { source: "opencode-compaction" }, + }, + { type: "message", role: "user", content: "continue" }, + ], + }; + + const transformedBody = await transformRequestBody(body, codexInstructions); + expect(transformedBody).toBeDefined(); + const messages = transformedBody.input ?? []; + + expect(messages.some((item) => (item as any).metadata)).toBe(false); + expect(JSON.stringify(messages)).not.toContain(".opencode/summary"); + expect(messages.some((item) => item.role === "user" && (item as any).content === "continue")).toBe(true); + }); + it("keeps bridge prompt across turns so prompt_cache_key stays stable", async () => { const sessionManager = new SessionManager({ enabled: true }); const baseInput: InputItem[] = [ diff --git a/test/session-manager.test.ts b/test/session-manager.test.ts index 9474f2b..7d17701 100644 --- a/test/session-manager.test.ts +++ b/test/session-manager.test.ts @@ -6,6 +6,8 @@ import type { InputItem, RequestBody, SessionContext } from "../lib/types.js"; interface BodyOptions { forkId?: string; + parentConversationId?: string; + parent_conversation_id?: string; } function createBody(conversationId: string, inputCount = 1, options: BodyOptions = {}): RequestBody { @@ -15,6 +17,12 @@ function createBody(conversationId: string, inputCount = 1, options: BodyOptions if (options.forkId) { metadata.forkId = options.forkId; } + if (options.parentConversationId) { + metadata.parentConversationId = options.parentConversationId; + } + if (options.parent_conversation_id) { + metadata.parent_conversation_id = options.parent_conversation_id; + } return { model: "gpt-5", @@ -222,6 +230,23 @@ describe("SessionManager", () => { expect(betaContext.state.promptCacheKey).toBe("conv-fork::fork::beta"); }); + it("derives fork ids from parent conversation hints", () => { + const manager = new SessionManager({ enabled: true }); + const parentBody = createBody("conv-fork-parent", 1, { parentConversationId: "parent-conv" }); + let parentContext = manager.getContext(parentBody) as SessionContext; + expect(parentContext.isNew).toBe(true); + expect(parentContext.state.promptCacheKey).toBe("conv-fork-parent::fork::parent-conv"); + manager.applyRequest(parentBody, parentContext); + expect(parentBody.prompt_cache_key).toBe("conv-fork-parent::fork::parent-conv"); + + const snakeParentBody = createBody("conv-fork-parent", 1, { + parent_conversation_id: "parent-snake", + }); + const snakeParentContext = manager.getContext(snakeParentBody) as SessionContext; + expect(snakeParentContext.isNew).toBe(true); + expect(snakeParentContext.state.promptCacheKey).toBe("conv-fork-parent::fork::parent-snake"); + }); + it("scopes compaction summaries per fork session", () => { const manager = new SessionManager({ enabled: true }); const alphaBody = createBody("conv-fork-summary", 1, { forkId: "alpha" });