From 8f58c032e15f712b548268b729e7b911b4b022e4 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Tue, 24 Feb 2026 12:51:33 +0000 Subject: [PATCH 1/3] Initial plan From a9d8598646e579c8dec2530cd9650537ff52aa60 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Tue, 24 Feb 2026 13:03:42 +0000 Subject: [PATCH 2/3] initial plan for fixing prompt-clustering-analysis workflow failure Co-authored-by: pelikhan <4175913+pelikhan@users.noreply.github.com> --- .github/workflows/smoke-copilot.lock.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/smoke-copilot.lock.yml b/.github/workflows/smoke-copilot.lock.yml index c5af982b005..1e2ad1db81e 100644 --- a/.github/workflows/smoke-copilot.lock.yml +++ b/.github/workflows/smoke-copilot.lock.yml @@ -1861,6 +1861,12 @@ jobs: echo "run_detection=false" >> "$GITHUB_OUTPUT" echo "Detection skipped: no agent outputs or patches to analyze" fi + - name: Clear MCP configuration for detection + if: always() && steps.detection_guard.outputs.run_detection == 'true' + run: | + rm -f /tmp/gh-aw/mcp-config/mcp-servers.json + rm -f /home/runner/.copilot/mcp-config.json + rm -f "$GITHUB_WORKSPACE/.gemini/settings.json" - name: Prepare threat detection files if: always() && steps.detection_guard.outputs.run_detection == 'true' run: | From 99b7cb617cc441f78565ece9cc2c15c9e691a2cf Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Tue, 24 Feb 2026 13:07:25 +0000 Subject: [PATCH 3/3] fix(workflow): add error handling to PR download loop in prompt-clustering-analysis Step 16 "Download full PR data with comments and reviews" failed when gh pr view returned a non-zero exit code for a specific PR (deleted, inaccessible, or rate-limited). Added if/else error handling to skip individual PR failures gracefully instead of failing the whole step. Recompiled the lock.yml to reflect changes." Co-authored-by: pelikhan <4175913+pelikhan@users.noreply.github.com> --- .../workflows/prompt-clustering-analysis.lock.yml | 4 ++-- .github/workflows/prompt-clustering-analysis.md | 12 ++++++++---- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/.github/workflows/prompt-clustering-analysis.lock.yml b/.github/workflows/prompt-clustering-analysis.lock.yml index 88a37663d34..b9f41ebb5c4 100644 --- a/.github/workflows/prompt-clustering-analysis.lock.yml +++ b/.github/workflows/prompt-clustering-analysis.lock.yml @@ -30,7 +30,7 @@ # - shared/reporting.md # - shared/trending-charts-simple.md # -# gh-aw-metadata: {"schema_version":"v1","frontmatter_hash":"a5bf79953e3ee4b73e9d1832d43db7b4c8a9ee3d70bae99da7d43dfeebf96069"} +# gh-aw-metadata: {"schema_version":"v1","frontmatter_hash":"c4de13d6035c4be451ed44a8a1679b53fd3126eb44d3c20a52166258c3310a94"} name: "Copilot Agent Prompt Clustering Analysis" "on": @@ -346,7 +346,7 @@ jobs: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} name: Download full PR data with comments and reviews - run: "# Create output directory for full PR data\nmkdir -p /tmp/gh-aw/prompt-cache/pr-full-data\n\n# Download full data for each PR including comments, reviews, commits, and files\necho \"Downloading full PR data for each PR...\"\n\nPR_COUNT=$(jq 'length' /tmp/gh-aw/pr-data/copilot-prs.json)\necho \"Processing $PR_COUNT PRs...\"\n\n# Extract PR numbers and download full data for each\njq -r '.[].number' /tmp/gh-aw/pr-data/copilot-prs.json | while read -r pr_number; do\n echo \"Downloading full data for PR #$pr_number...\"\n \n # Download full PR data with essential fields only\n gh pr view \"$pr_number\" \\\n --repo \"${{ github.repository }}\" \\\n --json number,title,body,state,createdAt,closedAt,mergedAt,url,comments,reviews,commits,changedFiles,additions,deletions,reviewDecision \\\n > \"/tmp/gh-aw/prompt-cache/pr-full-data/pr-${pr_number}.json\"\n \n echo \"Downloaded PR #$pr_number\"\ndone\n\n# Create an index file listing all downloaded PRs\nfind /tmp/gh-aw/prompt-cache/pr-full-data/ -maxdepth 1 -name 'pr-[0-9]*.json' -type f -printf '%f\\n' | \\\n sed 's/pr-\\([0-9]*\\)\\.json/\\1/' | sort -n > /tmp/gh-aw/prompt-cache/pr-full-data/index.txt\n\necho \"Full PR data cached in /tmp/gh-aw/prompt-cache/pr-full-data/\"\necho \"Total PRs with full data: $(wc -l < /tmp/gh-aw/prompt-cache/pr-full-data/index.txt)\"\n" + run: "# Create output directory for full PR data\nmkdir -p /tmp/gh-aw/prompt-cache/pr-full-data\n\n# Download full data for each PR including comments, reviews, commits, and files\necho \"Downloading full PR data for each PR...\"\n\nPR_COUNT=$(jq 'length' /tmp/gh-aw/pr-data/copilot-prs.json)\necho \"Processing $PR_COUNT PRs...\"\n\n# Extract PR numbers and download full data for each\njq -r '.[].number' /tmp/gh-aw/pr-data/copilot-prs.json | while read -r pr_number; do\n echo \"Downloading full data for PR #$pr_number...\"\n \n # Download full PR data with essential fields only\n # Use error handling to skip individual PR failures (e.g. deleted PRs, rate limits)\n if gh pr view \"$pr_number\" \\\n --repo \"${{ github.repository }}\" \\\n --json number,title,body,state,createdAt,closedAt,mergedAt,url,comments,reviews,commits,changedFiles,additions,deletions,reviewDecision \\\n > \"/tmp/gh-aw/prompt-cache/pr-full-data/pr-${pr_number}.json\" 2>/tmp/gh-aw/prompt-cache/pr-full-data/pr-${pr_number}.err; then\n echo \"Downloaded PR #$pr_number\"\n else\n echo \"Warning: Failed to download PR #$pr_number (skipping)\"\n rm -f \"/tmp/gh-aw/prompt-cache/pr-full-data/pr-${pr_number}.json\" \"/tmp/gh-aw/prompt-cache/pr-full-data/pr-${pr_number}.err\"\n fi\ndone\n\n# Create an index file listing all downloaded PRs\nfind /tmp/gh-aw/prompt-cache/pr-full-data/ -maxdepth 1 -name 'pr-[0-9]*.json' -type f -printf '%f\\n' | \\\n sed 's/pr-\\([0-9]*\\)\\.json/\\1/' | sort -n > /tmp/gh-aw/prompt-cache/pr-full-data/index.txt\n\necho \"Full PR data cached in /tmp/gh-aw/prompt-cache/pr-full-data/\"\necho \"Total PRs with full data: $(wc -l < /tmp/gh-aw/prompt-cache/pr-full-data/index.txt)\"\n" - env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/prompt-clustering-analysis.md b/.github/workflows/prompt-clustering-analysis.md index b8bf2da6c6b..8e03f0f79fe 100644 --- a/.github/workflows/prompt-clustering-analysis.md +++ b/.github/workflows/prompt-clustering-analysis.md @@ -71,12 +71,16 @@ steps: echo "Downloading full data for PR #$pr_number..." # Download full PR data with essential fields only - gh pr view "$pr_number" \ + # Use error handling to skip individual PR failures (e.g. deleted PRs, rate limits) + if gh pr view "$pr_number" \ --repo "${{ github.repository }}" \ --json number,title,body,state,createdAt,closedAt,mergedAt,url,comments,reviews,commits,changedFiles,additions,deletions,reviewDecision \ - > "/tmp/gh-aw/prompt-cache/pr-full-data/pr-${pr_number}.json" - - echo "Downloaded PR #$pr_number" + > "/tmp/gh-aw/prompt-cache/pr-full-data/pr-${pr_number}.json" 2>/tmp/gh-aw/prompt-cache/pr-full-data/pr-${pr_number}.err; then + echo "Downloaded PR #$pr_number" + else + echo "Warning: Failed to download PR #$pr_number (skipping)" + rm -f "/tmp/gh-aw/prompt-cache/pr-full-data/pr-${pr_number}.json" "/tmp/gh-aw/prompt-cache/pr-full-data/pr-${pr_number}.err" + fi done # Create an index file listing all downloaded PRs