diff --git a/.github/workflows/firewall.lock.yml b/.github/workflows/firewall.lock.yml
index 1ab0ec73a08..d4f005f557c 100644
--- a/.github/workflows/firewall.lock.yml
+++ b/.github/workflows/firewall.lock.yml
@@ -49,7 +49,7 @@ jobs:
permissions:
contents: read
concurrency:
- group: "gh-aw-claude-${{ github.workflow }}"
+ group: "gh-aw-copilot-${{ github.workflow }}"
steps:
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
@@ -99,144 +99,51 @@ jobs:
main().catch(error => {
core.setFailed(error instanceof Error ? error.message : String(error));
});
- - name: Validate ANTHROPIC_API_KEY secret
+ - name: Validate COPILOT_CLI_TOKEN secret
run: |
- if [ -z "$ANTHROPIC_API_KEY" ]; then
- echo "Error: ANTHROPIC_API_KEY secret is not set"
- echo "The Claude Code engine requires the ANTHROPIC_API_KEY secret to be configured."
+ if [ -z "$COPILOT_CLI_TOKEN" ]; then
+ echo "Error: COPILOT_CLI_TOKEN secret is not set"
+ echo "The GitHub Copilot CLI engine requires the COPILOT_CLI_TOKEN secret to be configured."
echo "Please configure this secret in your repository settings."
- echo "Documentation: https://githubnext.github.io/gh-aw/reference/engines/#anthropic-claude-code"
+ echo "Documentation: https://githubnext.github.io/gh-aw/reference/engines/#github-copilot-default"
exit 1
fi
- echo "ANTHROPIC_API_KEY secret is configured"
+ echo "COPILOT_CLI_TOKEN secret is configured"
env:
- ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
+ COPILOT_CLI_TOKEN: ${{ secrets.COPILOT_CLI_TOKEN }}
- name: Setup Node.js
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020
with:
node-version: '24'
- - name: Install Claude Code CLI
- run: npm install -g @anthropic-ai/claude-code@2.0.26
- - name: Generate Claude Settings
+ - name: Install awf binary
run: |
- mkdir -p /tmp/gh-aw/.claude
- cat > /tmp/gh-aw/.claude/settings.json << 'EOF'
- {
- "hooks": {
- "PreToolUse": [
- {
- "matcher": "WebFetch|WebSearch",
- "hooks": [
- {
- "type": "command",
- "command": ".claude/hooks/network_permissions.py"
- }
- ]
- }
- ]
- }
- }
- EOF
- - name: Generate Network Permissions Hook
- run: |
- mkdir -p .claude/hooks
- cat > .claude/hooks/network_permissions.py << 'EOF'
- #!/usr/bin/env python3
- """
- Network permissions validator for Claude Code engine.
- Generated by gh-aw from engine network permissions configuration.
- """
-
- import json
- import sys
- import urllib.parse
- import re
-
- # Domain allow-list (populated during generation)
- # JSON array safely embedded as Python list literal
- ALLOWED_DOMAINS = ["crl3.digicert.com","crl4.digicert.com","ocsp.digicert.com","ts-crl.ws.symantec.com","ts-ocsp.ws.symantec.com","crl.geotrust.com","ocsp.geotrust.com","crl.thawte.com","ocsp.thawte.com","crl.verisign.com","ocsp.verisign.com","crl.globalsign.com","ocsp.globalsign.com","crls.ssl.com","ocsp.ssl.com","crl.identrust.com","ocsp.identrust.com","crl.sectigo.com","ocsp.sectigo.com","crl.usertrust.com","ocsp.usertrust.com","s.symcb.com","s.symcd.com","json-schema.org","json.schemastore.org","archive.ubuntu.com","security.ubuntu.com","ppa.launchpad.net","keyserver.ubuntu.com","azure.archive.ubuntu.com","api.snapcraft.io","packagecloud.io","packages.cloud.google.com","packages.microsoft.com"]
-
- def extract_domain(url_or_query):
- """Extract domain from URL or search query."""
- if not url_or_query:
- return None
-
- if url_or_query.startswith(('http://', 'https://')):
- return urllib.parse.urlparse(url_or_query).netloc.lower()
-
- # Check for domain patterns in search queries
- match = re.search(r'site:([a-zA-Z0-9.-]+\.[a-zA-Z]{2,})', url_or_query)
- if match:
- return match.group(1).lower()
-
- return None
-
- def is_domain_allowed(domain):
- """Check if domain is allowed."""
- if not domain:
- # If no domain detected, allow only if not under deny-all policy
- return bool(ALLOWED_DOMAINS) # False if empty list (deny-all), True if has domains
-
- # Empty allowed domains means deny all
- if not ALLOWED_DOMAINS:
- return False
-
- for pattern in ALLOWED_DOMAINS:
- regex = pattern.replace('.', r'\.').replace('*', '.*')
- if re.match(f'^{regex}$', domain):
- return True
- return False
-
- # Main logic
- try:
- data = json.load(sys.stdin)
- tool_name = data.get('tool_name', '')
- tool_input = data.get('tool_input', {})
-
- if tool_name not in ['WebFetch', 'WebSearch']:
- sys.exit(0) # Allow other tools
-
- target = tool_input.get('url') or tool_input.get('query', '')
- domain = extract_domain(target)
-
- # For WebSearch, apply domain restrictions consistently
- # If no domain detected in search query, check if restrictions are in place
- if tool_name == 'WebSearch' and not domain:
- # Since this hook is only generated when network permissions are configured,
- # empty ALLOWED_DOMAINS means deny-all policy
- if not ALLOWED_DOMAINS: # Empty list means deny all
- print(f"Network access blocked: deny-all policy in effect", file=sys.stderr)
- print(f"No domains are allowed for WebSearch", file=sys.stderr)
- sys.exit(2) # Block under deny-all policy
- else:
- print(f"Network access blocked for web-search: no specific domain detected", file=sys.stderr)
- print(f"Allowed domains: {', '.join(ALLOWED_DOMAINS)}", file=sys.stderr)
- sys.exit(2) # Block general searches when domain allowlist is configured
-
- if not is_domain_allowed(domain):
- print(f"Network access blocked for domain: {domain}", file=sys.stderr)
- print(f"Allowed domains: {', '.join(ALLOWED_DOMAINS)}", file=sys.stderr)
- sys.exit(2) # Block with feedback to Claude
-
- sys.exit(0) # Allow
-
- except Exception as e:
- print(f"Network validation error: {e}", file=sys.stderr)
- sys.exit(2) # Block on errors
-
- EOF
- chmod +x .claude/hooks/network_permissions.py
+ LATEST_TAG=$(gh release view --repo githubnext/gh-aw-firewall --json tagName --jq .tagName)
+ echo "Installing awf from release: $LATEST_TAG"
+ curl -L https://github.com/githubnext/gh-aw-firewall/releases/download/${LATEST_TAG}/awf-linux-x64 -o awf
+ chmod +x awf
+ sudo mv awf /usr/local/bin/
+ which awf
+ awf --version
+ env:
+ GH_TOKEN: ${{ github.token }}
+ - name: Cleanup any existing awf resources
+ run: ./scripts/ci/cleanup.sh || true
+ - name: Install GitHub Copilot CLI
+ run: npm install -g @github/copilot@0.0.350
- name: Downloading container images
run: |
set -e
docker pull ghcr.io/github/github-mcp-server:v0.19.1
+ docker pull mcp/fetch
- name: Setup MCPs
run: |
mkdir -p /tmp/gh-aw/mcp-config
- cat > /tmp/gh-aw/mcp-config/mcp-servers.json << EOF
+ mkdir -p /home/runner/.copilot
+ cat > /home/runner/.copilot/mcp-config.json << EOF
{
"mcpServers": {
"github": {
+ "type": "local",
"command": "docker",
"args": [
"run",
@@ -250,13 +157,31 @@ jobs:
"GITHUB_TOOLSETS=default",
"ghcr.io/github/github-mcp-server:v0.19.1"
],
+ "tools": ["*"],
"env": {
- "GITHUB_PERSONAL_ACCESS_TOKEN": "${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}"
+ "GITHUB_PERSONAL_ACCESS_TOKEN": "\${GITHUB_MCP_SERVER_TOKEN}"
}
+ },
+ "web-fetch": {
+ "command": "docker",
+ "args": [
+ "run",
+ "-i",
+ "--rm",
+ "mcp/fetch"
+ ],
+ "tools": ["*"]
}
}
}
EOF
+ echo "-------START MCP CONFIG-----------"
+ cat /home/runner/.copilot/mcp-config.json
+ echo "-------END MCP CONFIG-----------"
+ echo "-------/home/runner/.copilot-----------"
+ find /home/runner/.copilot
+ echo "HOME: $HOME"
+ echo "GITHUB_COPILOT_CLI_MODE: $GITHUB_COPILOT_CLI_MODE"
- name: Create prompt
env:
GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
@@ -422,7 +347,7 @@ jobs:
if-no-files-found: warn
- name: Capture agent version
run: |
- VERSION_OUTPUT=$(claude --version 2>&1 || echo "unknown")
+ VERSION_OUTPUT=$(copilot --version 2>&1 || echo "unknown")
# Extract semantic version pattern (e.g., 1.2.3, v1.2.3-beta)
CLEAN_VERSION=$(echo "$VERSION_OUTPUT" | grep -oE 'v?[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9]+)?' | head -n1 || echo "unknown")
echo "AGENT_VERSION=$CLEAN_VERSION" >> $GITHUB_ENV
@@ -434,8 +359,8 @@ jobs:
const fs = require('fs');
const awInfo = {
- engine_id: "claude",
- engine_name: "Claude Code",
+ engine_id: "copilot",
+ engine_name: "GitHub Copilot CLI",
model: "",
version: "",
agent_version: process.env.AGENT_VERSION || "",
@@ -467,94 +392,36 @@ jobs:
name: aw_info.json
path: /tmp/gh-aw/aw_info.json
if-no-files-found: warn
- - name: Execute Claude Code CLI
+ - name: Execute GitHub Copilot CLI
id: agentic_execution
- # Allowed tools (sorted):
- # - ExitPlanMode
- # - Glob
- # - Grep
- # - LS
- # - NotebookRead
- # - Read
- # - Task
- # - TodoWrite
- # - WebFetch
- # - mcp__github__download_workflow_run_artifact
- # - mcp__github__get_code_scanning_alert
- # - mcp__github__get_commit
- # - mcp__github__get_dependabot_alert
- # - mcp__github__get_discussion
- # - mcp__github__get_discussion_comments
- # - mcp__github__get_file_contents
- # - mcp__github__get_issue
- # - mcp__github__get_issue_comments
- # - mcp__github__get_job_logs
- # - mcp__github__get_label
- # - mcp__github__get_latest_release
- # - mcp__github__get_me
- # - mcp__github__get_notification_details
- # - mcp__github__get_pull_request
- # - mcp__github__get_pull_request_comments
- # - mcp__github__get_pull_request_diff
- # - mcp__github__get_pull_request_files
- # - mcp__github__get_pull_request_review_comments
- # - mcp__github__get_pull_request_reviews
- # - mcp__github__get_pull_request_status
- # - mcp__github__get_release_by_tag
- # - mcp__github__get_secret_scanning_alert
- # - mcp__github__get_tag
- # - mcp__github__get_workflow_run
- # - mcp__github__get_workflow_run_logs
- # - mcp__github__get_workflow_run_usage
- # - mcp__github__list_branches
- # - mcp__github__list_code_scanning_alerts
- # - mcp__github__list_commits
- # - mcp__github__list_dependabot_alerts
- # - mcp__github__list_discussion_categories
- # - mcp__github__list_discussions
- # - mcp__github__list_issue_types
- # - mcp__github__list_issues
- # - mcp__github__list_label
- # - mcp__github__list_notifications
- # - mcp__github__list_pull_requests
- # - mcp__github__list_releases
- # - mcp__github__list_secret_scanning_alerts
- # - mcp__github__list_starred_repositories
- # - mcp__github__list_sub_issues
- # - mcp__github__list_tags
- # - mcp__github__list_workflow_jobs
- # - mcp__github__list_workflow_run_artifacts
- # - mcp__github__list_workflow_runs
- # - mcp__github__list_workflows
- # - mcp__github__pull_request_read
- # - mcp__github__search_code
- # - mcp__github__search_issues
- # - mcp__github__search_orgs
- # - mcp__github__search_pull_requests
- # - mcp__github__search_repositories
- # - mcp__github__search_users
+ # Copilot CLI tool arguments (sorted):
+ # --allow-tool github
+ # --allow-tool web-fetch
timeout-minutes: 5
run: |
set -o pipefail
- # Execute Claude Code CLI with prompt from file
- claude --print --mcp-config /tmp/gh-aw/mcp-config/mcp-servers.json --allowed-tools "ExitPlanMode,Glob,Grep,LS,NotebookRead,Read,Task,TodoWrite,WebFetch,mcp__github__download_workflow_run_artifact,mcp__github__get_code_scanning_alert,mcp__github__get_commit,mcp__github__get_dependabot_alert,mcp__github__get_discussion,mcp__github__get_discussion_comments,mcp__github__get_file_contents,mcp__github__get_issue,mcp__github__get_issue_comments,mcp__github__get_job_logs,mcp__github__get_label,mcp__github__get_latest_release,mcp__github__get_me,mcp__github__get_notification_details,mcp__github__get_pull_request,mcp__github__get_pull_request_comments,mcp__github__get_pull_request_diff,mcp__github__get_pull_request_files,mcp__github__get_pull_request_review_comments,mcp__github__get_pull_request_reviews,mcp__github__get_pull_request_status,mcp__github__get_release_by_tag,mcp__github__get_secret_scanning_alert,mcp__github__get_tag,mcp__github__get_workflow_run,mcp__github__get_workflow_run_logs,mcp__github__get_workflow_run_usage,mcp__github__list_branches,mcp__github__list_code_scanning_alerts,mcp__github__list_commits,mcp__github__list_dependabot_alerts,mcp__github__list_discussion_categories,mcp__github__list_discussions,mcp__github__list_issue_types,mcp__github__list_issues,mcp__github__list_label,mcp__github__list_notifications,mcp__github__list_pull_requests,mcp__github__list_releases,mcp__github__list_secret_scanning_alerts,mcp__github__list_starred_repositories,mcp__github__list_sub_issues,mcp__github__list_tags,mcp__github__list_workflow_jobs,mcp__github__list_workflow_run_artifacts,mcp__github__list_workflow_runs,mcp__github__list_workflows,mcp__github__pull_request_read,mcp__github__search_code,mcp__github__search_issues,mcp__github__search_orgs,mcp__github__search_pull_requests,mcp__github__search_repositories,mcp__github__search_users" --debug --verbose --permission-mode bypassPermissions --output-format stream-json --settings /tmp/gh-aw/.claude/settings.json "$(cat /tmp/gh-aw/aw-prompts/prompt.txt)" 2>&1 | tee /tmp/gh-aw/agent-stdio.log
+ sudo -E awf --env-all \
+ --allow-domains api.enterprise.githubcopilot.com,api.github.com,github.com,raw.githubusercontent.com,registry.npmjs.org \
+ --log-level debug \
+ 'npx -y @github/copilot@0.0.350 --add-dir /tmp/gh-aw/ --log-level all --disable-builtin-mcps --allow-tool github --allow-tool web-fetch --prompt "$(cat /tmp/gh-aw/aw-prompts/prompt.txt)"' \
+ 2>&1 | tee /tmp/gh-aw/agent-stdio.log
+
+ # Move preserved Copilot logs to expected location
+ COPILOT_LOGS_DIR=$(ls -td /tmp/copilot-logs-* 2>/dev/null | head -1)
+ if [ -n "$COPILOT_LOGS_DIR" ] && [ -d "$COPILOT_LOGS_DIR" ]; then
+ echo "Moving Copilot logs from $COPILOT_LOGS_DIR to /tmp/gh-aw/.copilot/logs/"
+ sudo mkdir -p /tmp/gh-aw/.copilot/logs/
+ sudo mv "$COPILOT_LOGS_DIR"/* /tmp/gh-aw/.copilot/logs/ || true
+ sudo rmdir "$COPILOT_LOGS_DIR" || true
+ fi
env:
- ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
- DISABLE_TELEMETRY: "1"
- DISABLE_ERROR_REPORTING: "1"
- DISABLE_BUG_COMMAND: "1"
+ COPILOT_AGENT_RUNNER_TYPE: STANDALONE
+ GH_AW_MCP_CONFIG: /home/runner/.copilot/mcp-config.json
GH_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt
- GH_AW_MCP_CONFIG: /tmp/gh-aw/mcp-config/mcp-servers.json
- MCP_TIMEOUT: "120000"
- MCP_TOOL_TIMEOUT: "60000"
- BASH_DEFAULT_TIMEOUT_MS: "60000"
- BASH_MAX_TIMEOUT_MS: "60000"
- - name: Clean up network proxy hook files
- if: always()
- run: |
- rm -rf .claude/hooks/network_permissions.py || true
- rm -rf .claude/hooks || true
- rm -rf .claude || true
+ GITHUB_MCP_SERVER_TOKEN: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
+ GITHUB_STEP_SUMMARY: ${{ env.GITHUB_STEP_SUMMARY }}
+ GITHUB_TOKEN: ${{ secrets.COPILOT_CLI_TOKEN }}
+ XDG_CONFIG_HOME: /home/runner
- name: Redact secrets in logs
if: always()
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd
@@ -666,10 +533,17 @@ jobs:
}
await main();
env:
- GH_AW_SECRET_NAMES: 'ANTHROPIC_API_KEY,GH_AW_GITHUB_TOKEN,GITHUB_TOKEN'
- SECRET_ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
+ GH_AW_SECRET_NAMES: 'COPILOT_CLI_TOKEN,GH_AW_GITHUB_TOKEN,GITHUB_TOKEN'
+ SECRET_COPILOT_CLI_TOKEN: ${{ secrets.COPILOT_CLI_TOKEN }}
SECRET_GH_AW_GITHUB_TOKEN: ${{ secrets.GH_AW_GITHUB_TOKEN }}
SECRET_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ - name: Upload engine output files
+ uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
+ with:
+ name: agent_outputs
+ path: |
+ /tmp/gh-aw/.copilot/logs/
+ if-no-files-found: ignore
- name: Upload MCP logs
if: always()
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
@@ -677,42 +551,392 @@ jobs:
name: mcp-logs
path: /tmp/gh-aw/mcp-logs/
if-no-files-found: ignore
+ - name: Agent Firewall logs
+ if: always()
+ run: |
+ # Squid logs are preserved in timestamped directories
+ SQUID_LOGS_DIR=$(ls -td /tmp/squid-logs-* 2>/dev/null | head -1)
+ if [ -n "$SQUID_LOGS_DIR" ] && [ -d "$SQUID_LOGS_DIR" ]; then
+ echo "Found Squid logs at: $SQUID_LOGS_DIR"
+ mkdir -p /tmp/gh-aw/squid-logs-firewall-test-agent/
+ sudo cp -r "$SQUID_LOGS_DIR"/* /tmp/gh-aw/squid-logs-firewall-test-agent/ || true
+ sudo chmod -R a+r /tmp/gh-aw/squid-logs-firewall-test-agent/ || true
+ fi
+ - name: Upload Firewall Logs
+ if: always()
+ uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
+ with:
+ name: squid-logs-firewall-test-agent
+ path: /tmp/gh-aw/squid-logs-firewall-test-agent/
+ if-no-files-found: ignore
+ - name: Parse firewall logs for step summary
+ if: always()
+ uses: actions/github-script@v8
+ with:
+ script: |
+ function main() {
+
+ const fs = require("fs");
+
+ const path = require("path");
+
+ try {
+
+ const workflowName = process.env.GITHUB_WORKFLOW || "workflow";
+
+ const sanitizedName = sanitizeWorkflowName(workflowName);
+
+ const squidLogsDir = `/tmp/gh-aw/squid-logs-${sanitizedName}/`;
+
+ if (!fs.existsSync(squidLogsDir)) {
+
+ core.info(`No firewall logs directory found at: ${squidLogsDir}`);
+
+ return;
+
+ }
+
+ const files = fs.readdirSync(squidLogsDir).filter(file => file.endsWith(".log"));
+
+ if (files.length === 0) {
+
+ core.info(`No firewall log files found in: ${squidLogsDir}`);
+
+ return;
+
+ }
+
+ core.info(`Found ${files.length} firewall log file(s)`);
+
+ let totalRequests = 0;
+
+ let allowedRequests = 0;
+
+ let deniedRequests = 0;
+
+ const allowedDomains = new Set();
+
+ const deniedDomains = new Set();
+
+ const requestsByDomain = new Map();
+
+ for (const file of files) {
+
+ const filePath = path.join(squidLogsDir, file);
+
+ core.info(`Parsing firewall log: ${file}`);
+
+ const content = fs.readFileSync(filePath, "utf8");
+
+ const lines = content.split("\n").filter(line => line.trim());
+
+ for (const line of lines) {
+
+ const entry = parseFirewallLogLine(line);
+
+ if (!entry) {
+
+ continue;
+
+ }
+
+ totalRequests++;
+
+ const isAllowed = isRequestAllowed(entry.decision, entry.status);
+
+ if (isAllowed) {
+
+ allowedRequests++;
+
+ allowedDomains.add(entry.domain);
+
+ } else {
+
+ deniedRequests++;
+
+ deniedDomains.add(entry.domain);
+
+ }
+
+ if (!requestsByDomain.has(entry.domain)) {
+
+ requestsByDomain.set(entry.domain, { allowed: 0, denied: 0 });
+
+ }
+
+ const domainStats = requestsByDomain.get(entry.domain);
+
+ if (isAllowed) {
+
+ domainStats.allowed++;
+
+ } else {
+
+ domainStats.denied++;
+
+ }
+
+ }
+
+ }
+
+ const summary = generateFirewallSummary({
+
+ totalRequests,
+
+ allowedRequests,
+
+ deniedRequests,
+
+ allowedDomains: Array.from(allowedDomains).sort(),
+
+ deniedDomains: Array.from(deniedDomains).sort(),
+
+ requestsByDomain,
+
+ });
+
+ core.summary.addRaw(summary).write();
+
+ core.info("Firewall log summary generated successfully");
+
+ } catch (error) {
+
+ core.setFailed(error instanceof Error ? error : String(error));
+
+ }
+
+ }
+
+ function parseFirewallLogLine(line) {
+
+ const trimmed = line.trim();
+
+ if (!trimmed || trimmed.startsWith("#")) {
+
+ return null;
+
+ }
+
+ const fields = trimmed.match(/(?:[^\s"]+|"[^"]*")+/g);
+
+ if (!fields || fields.length < 10) {
+
+ return null;
+
+ }
+
+ return {
+
+ timestamp: fields[0],
+
+ clientIpPort: fields[1],
+
+ domain: fields[2],
+
+ destIpPort: fields[3],
+
+ proto: fields[4],
+
+ method: fields[5],
+
+ status: fields[6],
+
+ decision: fields[7],
+
+ url: fields[8],
+
+ userAgent: fields[9] ? fields[9].replace(/^"|"$/g, "") : "-",
+
+ };
+
+ }
+
+ function isRequestAllowed(decision, status) {
+
+ const statusCode = parseInt(status, 10);
+
+ if (statusCode === 200 || statusCode === 206 || statusCode === 304) {
+
+ return true;
+
+ }
+
+ if (decision.includes("TCP_TUNNEL") || decision.includes("TCP_HIT") || decision.includes("TCP_MISS")) {
+
+ return true;
+
+ }
+
+ if (decision.includes("NONE_NONE") || decision.includes("TCP_DENIED") || statusCode === 403 || statusCode === 407) {
+
+ return false;
+
+ }
+
+ return false;
+
+ }
+
+ function generateFirewallSummary(analysis) {
+
+ const { totalRequests, deniedRequests, deniedDomains, requestsByDomain } = analysis;
+
+ let summary = "# 🔥 Firewall Blocked Requests\n\n";
+
+ if (deniedRequests > 0) {
+
+ summary += `**${deniedRequests}** request${deniedRequests !== 1 ? "s" : ""} blocked across **${deniedDomains.length}** unique domain${deniedDomains.length !== 1 ? "s" : ""}`;
+
+ summary += ` (${totalRequests > 0 ? Math.round((deniedRequests / totalRequests) * 100) : 0}% of total traffic)\n\n`;
+
+ summary += "## 🚫 Blocked Domains\n\n";
+
+ summary += "| Domain | Blocked Requests |\n";
+
+ summary += "|--------|------------------|\n";
+
+ for (const domain of deniedDomains) {
+
+ const stats = requestsByDomain.get(domain);
+
+ summary += `| ${domain} | ${stats.denied} |\n`;
+
+ }
+
+ summary += "\n";
+
+ } else {
+
+ summary += "✅ **No blocked requests detected**\n\n";
+
+ if (totalRequests > 0) {
+
+ summary += `All ${totalRequests} request${totalRequests !== 1 ? "s" : ""} were allowed through the firewall.\n\n`;
+
+ } else {
+
+ summary += "No firewall activity detected.\n\n";
+
+ }
+
+ }
+
+ return summary;
+
+ }
+
+ function sanitizeWorkflowName(name) {
+
+ return name
+
+ .toLowerCase()
+
+ .replace(/[:\\/\s]/g, "-")
+
+ .replace(/[^a-z0-9._-]/g, "-");
+
+ }
+
+ if (typeof module !== "undefined" && module.exports) {
+
+ module.exports = {
+
+ parseFirewallLogLine,
+
+ isRequestAllowed,
+
+ generateFirewallSummary,
+
+ sanitizeWorkflowName,
+
+ main,
+
+ };
+
+ }
+
+ const isDirectExecution =
+
+ typeof module === "undefined" || (typeof require !== "undefined" && typeof require.main !== "undefined" && require.main === module);
+
+ if (isDirectExecution) {
+
+ main();
+
+ }
+
- name: Parse agent logs for step summary
if: always()
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd
env:
- GH_AW_AGENT_OUTPUT: /tmp/gh-aw/agent-stdio.log
+ GH_AW_AGENT_OUTPUT: /tmp/gh-aw/.copilot/logs/
with:
script: |
function main() {
const fs = require("fs");
+ const path = require("path");
try {
- const logFile = process.env.GH_AW_AGENT_OUTPUT;
- if (!logFile) {
+ const logPath = process.env.GH_AW_AGENT_OUTPUT;
+ if (!logPath) {
core.info("No agent log file specified");
return;
}
- if (!fs.existsSync(logFile)) {
- core.info(`Log file not found: ${logFile}`);
+ if (!fs.existsSync(logPath)) {
+ core.info(`Log path not found: ${logPath}`);
return;
}
- const logContent = fs.readFileSync(logFile, "utf8");
- const result = parseClaudeLog(logContent);
- core.info(result.markdown);
- core.summary.addRaw(result.markdown).write();
- if (result.mcpFailures && result.mcpFailures.length > 0) {
- const failedServers = result.mcpFailures.join(", ");
- core.setFailed(`MCP server(s) failed to launch: ${failedServers}`);
+ let content = "";
+ const stat = fs.statSync(logPath);
+ if (stat.isDirectory()) {
+ const files = fs.readdirSync(logPath);
+ const logFiles = files.filter(file => file.endsWith(".log") || file.endsWith(".txt"));
+ if (logFiles.length === 0) {
+ core.info(`No log files found in directory: ${logPath}`);
+ return;
+ }
+ logFiles.sort();
+ for (const file of logFiles) {
+ const filePath = path.join(logPath, file);
+ const fileContent = fs.readFileSync(filePath, "utf8");
+ content += fileContent;
+ if (content.length > 0 && !content.endsWith("\n")) {
+ content += "\n";
+ }
+ }
+ } else {
+ content = fs.readFileSync(logPath, "utf8");
}
- if (result.maxTurnsHit) {
- core.setFailed(`Agent execution stopped: max-turns limit reached. The agent did not complete its task successfully.`);
+ const parsedLog = parseCopilotLog(content);
+ if (parsedLog) {
+ core.info(parsedLog);
+ core.summary.addRaw(parsedLog).write();
+ core.info("Copilot log parsed successfully");
+ } else {
+ core.error("Failed to parse Copilot log");
}
} catch (error) {
- const errorMessage = error instanceof Error ? error.message : String(error);
- core.setFailed(errorMessage);
+ core.setFailed(error instanceof Error ? error : String(error));
}
}
- function parseClaudeLog(logContent) {
+ function extractPremiumRequestCount(logContent) {
+ const patterns = [
+ /premium\s+requests?\s+consumed:?\s*(\d+)/i,
+ /(\d+)\s+premium\s+requests?\s+consumed/i,
+ /consumed\s+(\d+)\s+premium\s+requests?/i,
+ ];
+ for (const pattern of patterns) {
+ const match = logContent.match(pattern);
+ if (match && match[1]) {
+ const count = parseInt(match[1], 10);
+ if (!isNaN(count) && count > 0) {
+ return count;
+ }
+ }
+ }
+ return 1;
+ }
+ function parseCopilotLog(logContent) {
try {
let logEntries;
try {
@@ -721,41 +945,42 @@ jobs:
throw new Error("Not a JSON array");
}
} catch (jsonArrayError) {
- logEntries = [];
- const lines = logContent.split("\n");
- for (const line of lines) {
- const trimmedLine = line.trim();
- if (trimmedLine === "") {
- continue;
- }
- if (trimmedLine.startsWith("[{")) {
- try {
- const arrayEntries = JSON.parse(trimmedLine);
- if (Array.isArray(arrayEntries)) {
- logEntries.push(...arrayEntries);
+ const debugLogEntries = parseDebugLogFormat(logContent);
+ if (debugLogEntries && debugLogEntries.length > 0) {
+ logEntries = debugLogEntries;
+ } else {
+ logEntries = [];
+ const lines = logContent.split("\n");
+ for (const line of lines) {
+ const trimmedLine = line.trim();
+ if (trimmedLine === "") {
+ continue;
+ }
+ if (trimmedLine.startsWith("[{")) {
+ try {
+ const arrayEntries = JSON.parse(trimmedLine);
+ if (Array.isArray(arrayEntries)) {
+ logEntries.push(...arrayEntries);
+ continue;
+ }
+ } catch (arrayParseError) {
continue;
}
- } catch (arrayParseError) {
+ }
+ if (!trimmedLine.startsWith("{")) {
+ continue;
+ }
+ try {
+ const jsonEntry = JSON.parse(trimmedLine);
+ logEntries.push(jsonEntry);
+ } catch (jsonLineError) {
continue;
}
- }
- if (!trimmedLine.startsWith("{")) {
- continue;
- }
- try {
- const jsonEntry = JSON.parse(trimmedLine);
- logEntries.push(jsonEntry);
- } catch (jsonLineError) {
- continue;
}
}
}
if (!Array.isArray(logEntries) || logEntries.length === 0) {
- return {
- markdown: "## Agent Log Summary\n\nLog format not recognized as Claude JSON array or JSONL.\n",
- mcpFailures: [],
- maxTurnsHit: false,
- };
+ return "## Agent Log Summary\n\nLog format not recognized as Copilot JSON array or JSONL.\n";
}
const toolUsePairs = new Map();
for (const entry of logEntries) {
@@ -768,13 +993,10 @@ jobs:
}
}
let markdown = "";
- const mcpFailures = [];
const initEntry = logEntries.find(entry => entry.type === "system" && entry.subtype === "init");
if (initEntry) {
markdown += "## 🚀 Initialization\n\n";
- const initResult = formatInitializationSummary(initEntry);
- markdown += initResult.markdown;
- mcpFailures.push(...initResult.mcpFailures);
+ markdown += formatInitializationSummary(initEntry);
markdown += "\n";
}
markdown += "\n## 🤖 Reasoning\n\n";
@@ -788,7 +1010,7 @@ jobs:
}
} else if (content.type === "tool_use") {
const toolResult = toolUsePairs.get(content.id);
- const toolMarkdown = formatToolUse(content, toolResult);
+ const toolMarkdown = formatToolUseWithDetails(content, toolResult);
if (toolMarkdown) {
markdown += toolMarkdown;
}
@@ -805,7 +1027,7 @@ jobs:
const toolName = content.name;
const input = content.input || {};
if (["Read", "Write", "Edit", "MultiEdit", "LS", "Grep", "Glob", "TodoWrite"].includes(toolName)) {
- continue;
+ continue;
}
const toolResult = toolUsePairs.get(content.id);
let statusIcon = "❓";
@@ -847,6 +1069,12 @@ jobs:
if (lastEntry.total_cost_usd) {
markdown += `**Total Cost:** $${lastEntry.total_cost_usd.toFixed(4)}\n\n`;
}
+ const isPremiumModel =
+ initEntry && initEntry.model_info && initEntry.model_info.billing && initEntry.model_info.billing.is_premium === true;
+ if (isPremiumModel) {
+ const premiumRequestCount = extractPremiumRequestCount(logContent);
+ markdown += `**Premium Requests Consumed:** ${premiumRequestCount}\n\n`;
+ }
if (lastEntry.usage) {
const usage = lastEntry.usage;
if (usage.input_tokens || usage.output_tokens) {
@@ -858,34 +1086,439 @@ jobs:
markdown += "\n";
}
}
- if (lastEntry.permission_denials && lastEntry.permission_denials.length > 0) {
- markdown += `**Permission Denials:** ${lastEntry.permission_denials.length}\n\n`;
+ }
+ return markdown;
+ } catch (error) {
+ const errorMessage = error instanceof Error ? error.message : String(error);
+ return `## Agent Log Summary\n\nError parsing Copilot log (tried both JSON array and JSONL formats): ${errorMessage}\n`;
+ }
+ }
+ function scanForToolErrors(logContent) {
+ const toolErrors = new Map();
+ const lines = logContent.split("\n");
+ const recentToolCalls = [];
+ const MAX_RECENT_TOOLS = 10;
+ for (let i = 0; i < lines.length; i++) {
+ const line = lines[i];
+ if (line.includes('"tool_calls":') && !line.includes('\\"tool_calls\\"')) {
+ for (let j = i + 1; j < Math.min(i + 30, lines.length); j++) {
+ const nextLine = lines[j];
+ const idMatch = nextLine.match(/"id":\s*"([^"]+)"/);
+ const nameMatch = nextLine.match(/"name":\s*"([^"]+)"/) && !nextLine.includes('\\"name\\"');
+ if (idMatch) {
+ const toolId = idMatch[1];
+ for (let k = j; k < Math.min(j + 10, lines.length); k++) {
+ const nameLine = lines[k];
+ const funcNameMatch = nameLine.match(/"name":\s*"([^"]+)"/);
+ if (funcNameMatch && !nameLine.includes('\\"name\\"')) {
+ const toolName = funcNameMatch[1];
+ recentToolCalls.unshift({ id: toolId, name: toolName });
+ if (recentToolCalls.length > MAX_RECENT_TOOLS) {
+ recentToolCalls.pop();
+ }
+ break;
+ }
+ }
+ }
}
}
- let maxTurnsHit = false;
- const maxTurns = process.env.GH_AW_MAX_TURNS;
- if (maxTurns && lastEntry && lastEntry.num_turns) {
- const configuredMaxTurns = parseInt(maxTurns, 10);
- if (!isNaN(configuredMaxTurns) && lastEntry.num_turns >= configuredMaxTurns) {
- maxTurnsHit = true;
+ const errorMatch = line.match(/\[ERROR\].*(?:Tool execution failed|Permission denied|Resource not accessible|Error executing tool)/i);
+ if (errorMatch) {
+ const toolNameMatch = line.match(/Tool execution failed:\s*([^\s]+)/i);
+ const toolIdMatch = line.match(/tool_call_id:\s*([^\s]+)/i);
+ if (toolNameMatch) {
+ const toolName = toolNameMatch[1];
+ toolErrors.set(toolName, true);
+ const matchingTool = recentToolCalls.find(t => t.name === toolName);
+ if (matchingTool) {
+ toolErrors.set(matchingTool.id, true);
+ }
+ } else if (toolIdMatch) {
+ toolErrors.set(toolIdMatch[1], true);
+ } else if (recentToolCalls.length > 0) {
+ const lastTool = recentToolCalls[0];
+ toolErrors.set(lastTool.id, true);
+ toolErrors.set(lastTool.name, true);
}
}
- return { markdown, mcpFailures, maxTurnsHit };
- } catch (error) {
- const errorMessage = error instanceof Error ? error.message : String(error);
- return {
- markdown: `## Agent Log Summary\n\nError parsing Claude log (tried both JSON array and JSONL formats): ${errorMessage}\n`,
- mcpFailures: [],
- maxTurnsHit: false,
+ }
+ return toolErrors;
+ }
+ function parseDebugLogFormat(logContent) {
+ const entries = [];
+ const lines = logContent.split("\n");
+ const toolErrors = scanForToolErrors(logContent);
+ let model = "unknown";
+ let sessionId = null;
+ let modelInfo = null;
+ let tools = [];
+ const modelMatch = logContent.match(/Starting Copilot CLI: ([\d.]+)/);
+ if (modelMatch) {
+ sessionId = `copilot-${modelMatch[1]}-${Date.now()}`;
+ }
+ const gotModelInfoIndex = logContent.indexOf("[DEBUG] Got model info: {");
+ if (gotModelInfoIndex !== -1) {
+ const jsonStart = logContent.indexOf("{", gotModelInfoIndex);
+ if (jsonStart !== -1) {
+ let braceCount = 0;
+ let inString = false;
+ let escapeNext = false;
+ let jsonEnd = -1;
+ for (let i = jsonStart; i < logContent.length; i++) {
+ const char = logContent[i];
+ if (escapeNext) {
+ escapeNext = false;
+ continue;
+ }
+ if (char === "\\") {
+ escapeNext = true;
+ continue;
+ }
+ if (char === '"' && !escapeNext) {
+ inString = !inString;
+ continue;
+ }
+ if (inString) continue;
+ if (char === "{") {
+ braceCount++;
+ } else if (char === "}") {
+ braceCount--;
+ if (braceCount === 0) {
+ jsonEnd = i + 1;
+ break;
+ }
+ }
+ }
+ if (jsonEnd !== -1) {
+ const modelInfoJson = logContent.substring(jsonStart, jsonEnd);
+ try {
+ modelInfo = JSON.parse(modelInfoJson);
+ } catch (e) {
+ }
+ }
+ }
+ }
+ const toolsIndex = logContent.indexOf("[DEBUG] Tools:");
+ if (toolsIndex !== -1) {
+ const afterToolsLine = logContent.indexOf("\n", toolsIndex);
+ let toolsStart = logContent.indexOf("[DEBUG] [", afterToolsLine);
+ if (toolsStart !== -1) {
+ toolsStart = logContent.indexOf("[", toolsStart + 7);
+ }
+ if (toolsStart !== -1) {
+ let bracketCount = 0;
+ let inString = false;
+ let escapeNext = false;
+ let toolsEnd = -1;
+ for (let i = toolsStart; i < logContent.length; i++) {
+ const char = logContent[i];
+ if (escapeNext) {
+ escapeNext = false;
+ continue;
+ }
+ if (char === "\\") {
+ escapeNext = true;
+ continue;
+ }
+ if (char === '"' && !escapeNext) {
+ inString = !inString;
+ continue;
+ }
+ if (inString) continue;
+ if (char === "[") {
+ bracketCount++;
+ } else if (char === "]") {
+ bracketCount--;
+ if (bracketCount === 0) {
+ toolsEnd = i + 1;
+ break;
+ }
+ }
+ }
+ if (toolsEnd !== -1) {
+ let toolsJson = logContent.substring(toolsStart, toolsEnd);
+ toolsJson = toolsJson.replace(/^\d{4}-\d{2}-\d{2}T[\d:.]+Z \[DEBUG\] /gm, "");
+ try {
+ const toolsArray = JSON.parse(toolsJson);
+ if (Array.isArray(toolsArray)) {
+ tools = toolsArray
+ .map(tool => {
+ if (tool.type === "function" && tool.function && tool.function.name) {
+ let name = tool.function.name;
+ if (name.startsWith("github-")) {
+ name = "mcp__github__" + name.substring(7);
+ } else if (name.startsWith("safe_outputs-")) {
+ name = name;
+ }
+ return name;
+ }
+ return null;
+ })
+ .filter(name => name !== null);
+ }
+ } catch (e) {
+ }
+ }
+ }
+ }
+ let inDataBlock = false;
+ let currentJsonLines = [];
+ let turnCount = 0;
+ for (let i = 0; i < lines.length; i++) {
+ const line = lines[i];
+ if (line.includes("[DEBUG] data:")) {
+ inDataBlock = true;
+ currentJsonLines = [];
+ continue;
+ }
+ if (inDataBlock) {
+ const hasTimestamp = line.match(/^\d{4}-\d{2}-\d{2}T[\d:.]+Z /);
+ if (hasTimestamp) {
+ const cleanLine = line.replace(/^\d{4}-\d{2}-\d{2}T[\d:.]+Z \[DEBUG\] /, "");
+ const isJsonContent = /^[{\[}\]"]/.test(cleanLine) || cleanLine.trim().startsWith('"');
+ if (!isJsonContent) {
+ if (currentJsonLines.length > 0) {
+ try {
+ const jsonStr = currentJsonLines.join("\n");
+ const jsonData = JSON.parse(jsonStr);
+ if (jsonData.model) {
+ model = jsonData.model;
+ }
+ if (jsonData.choices && Array.isArray(jsonData.choices)) {
+ for (const choice of jsonData.choices) {
+ if (choice.message) {
+ const message = choice.message;
+ const content = [];
+ const toolResults = [];
+ if (message.content && message.content.trim()) {
+ content.push({
+ type: "text",
+ text: message.content,
+ });
+ }
+ if (message.tool_calls && Array.isArray(message.tool_calls)) {
+ for (const toolCall of message.tool_calls) {
+ if (toolCall.function) {
+ let toolName = toolCall.function.name;
+ const originalToolName = toolName;
+ const toolId = toolCall.id || `tool_${Date.now()}_${Math.random()}`;
+ let args = {};
+ if (toolName.startsWith("github-")) {
+ toolName = "mcp__github__" + toolName.substring(7);
+ } else if (toolName === "bash") {
+ toolName = "Bash";
+ }
+ try {
+ args = JSON.parse(toolCall.function.arguments);
+ } catch (e) {
+ args = {};
+ }
+ content.push({
+ type: "tool_use",
+ id: toolId,
+ name: toolName,
+ input: args,
+ });
+ const hasError = toolErrors.has(toolId) || toolErrors.has(originalToolName);
+ toolResults.push({
+ type: "tool_result",
+ tool_use_id: toolId,
+ content: hasError ? "Permission denied or tool execution failed" : "",
+ is_error: hasError,
+ });
+ }
+ }
+ }
+ if (content.length > 0) {
+ entries.push({
+ type: "assistant",
+ message: { content },
+ });
+ turnCount++;
+ if (toolResults.length > 0) {
+ entries.push({
+ type: "user",
+ message: { content: toolResults },
+ });
+ }
+ }
+ }
+ }
+ if (jsonData.usage) {
+ if (!entries._accumulatedUsage) {
+ entries._accumulatedUsage = {
+ input_tokens: 0,
+ output_tokens: 0,
+ };
+ }
+ if (jsonData.usage.prompt_tokens) {
+ entries._accumulatedUsage.input_tokens += jsonData.usage.prompt_tokens;
+ }
+ if (jsonData.usage.completion_tokens) {
+ entries._accumulatedUsage.output_tokens += jsonData.usage.completion_tokens;
+ }
+ entries._lastResult = {
+ type: "result",
+ num_turns: turnCount,
+ usage: entries._accumulatedUsage,
+ };
+ }
+ }
+ } catch (e) {
+ }
+ }
+ inDataBlock = false;
+ currentJsonLines = [];
+ continue;
+ } else if (hasTimestamp && isJsonContent) {
+ currentJsonLines.push(cleanLine);
+ }
+ } else {
+ const cleanLine = line.replace(/^\d{4}-\d{2}-\d{2}T[\d:.]+Z \[DEBUG\] /, "");
+ currentJsonLines.push(cleanLine);
+ }
+ }
+ }
+ if (inDataBlock && currentJsonLines.length > 0) {
+ try {
+ const jsonStr = currentJsonLines.join("\n");
+ const jsonData = JSON.parse(jsonStr);
+ if (jsonData.model) {
+ model = jsonData.model;
+ }
+ if (jsonData.choices && Array.isArray(jsonData.choices)) {
+ for (const choice of jsonData.choices) {
+ if (choice.message) {
+ const message = choice.message;
+ const content = [];
+ const toolResults = [];
+ if (message.content && message.content.trim()) {
+ content.push({
+ type: "text",
+ text: message.content,
+ });
+ }
+ if (message.tool_calls && Array.isArray(message.tool_calls)) {
+ for (const toolCall of message.tool_calls) {
+ if (toolCall.function) {
+ let toolName = toolCall.function.name;
+ const originalToolName = toolName;
+ const toolId = toolCall.id || `tool_${Date.now()}_${Math.random()}`;
+ let args = {};
+ if (toolName.startsWith("github-")) {
+ toolName = "mcp__github__" + toolName.substring(7);
+ } else if (toolName === "bash") {
+ toolName = "Bash";
+ }
+ try {
+ args = JSON.parse(toolCall.function.arguments);
+ } catch (e) {
+ args = {};
+ }
+ content.push({
+ type: "tool_use",
+ id: toolId,
+ name: toolName,
+ input: args,
+ });
+ const hasError = toolErrors.has(toolId) || toolErrors.has(originalToolName);
+ toolResults.push({
+ type: "tool_result",
+ tool_use_id: toolId,
+ content: hasError ? "Permission denied or tool execution failed" : "",
+ is_error: hasError,
+ });
+ }
+ }
+ }
+ if (content.length > 0) {
+ entries.push({
+ type: "assistant",
+ message: { content },
+ });
+ turnCount++;
+ if (toolResults.length > 0) {
+ entries.push({
+ type: "user",
+ message: { content: toolResults },
+ });
+ }
+ }
+ }
+ }
+ if (jsonData.usage) {
+ if (!entries._accumulatedUsage) {
+ entries._accumulatedUsage = {
+ input_tokens: 0,
+ output_tokens: 0,
+ };
+ }
+ if (jsonData.usage.prompt_tokens) {
+ entries._accumulatedUsage.input_tokens += jsonData.usage.prompt_tokens;
+ }
+ if (jsonData.usage.completion_tokens) {
+ entries._accumulatedUsage.output_tokens += jsonData.usage.completion_tokens;
+ }
+ entries._lastResult = {
+ type: "result",
+ num_turns: turnCount,
+ usage: entries._accumulatedUsage,
+ };
+ }
+ }
+ } catch (e) {
+ }
+ }
+ if (entries.length > 0) {
+ const initEntry = {
+ type: "system",
+ subtype: "init",
+ session_id: sessionId,
+ model: model,
+ tools: tools,
};
+ if (modelInfo) {
+ initEntry.model_info = modelInfo;
+ }
+ entries.unshift(initEntry);
+ if (entries._lastResult) {
+ entries.push(entries._lastResult);
+ delete entries._lastResult;
+ }
}
+ return entries;
}
function formatInitializationSummary(initEntry) {
let markdown = "";
- const mcpFailures = [];
if (initEntry.model) {
markdown += `**Model:** ${initEntry.model}\n\n`;
}
+ if (initEntry.model_info) {
+ const modelInfo = initEntry.model_info;
+ if (modelInfo.name) {
+ markdown += `**Model Name:** ${modelInfo.name}`;
+ if (modelInfo.vendor) {
+ markdown += ` (${modelInfo.vendor})`;
+ }
+ markdown += "\n\n";
+ }
+ if (modelInfo.billing) {
+ const billing = modelInfo.billing;
+ if (billing.is_premium === true) {
+ markdown += `**Premium Model:** Yes`;
+ if (billing.multiplier && billing.multiplier !== 1) {
+ markdown += ` (${billing.multiplier}x cost multiplier)`;
+ }
+ markdown += "\n";
+ if (billing.restricted_to && Array.isArray(billing.restricted_to) && billing.restricted_to.length > 0) {
+ markdown += `**Required Plans:** ${billing.restricted_to.join(", ")}\n`;
+ }
+ markdown += "\n";
+ } else if (billing.is_premium === false) {
+ markdown += `**Premium Model:** No\n\n`;
+ }
+ }
+ }
if (initEntry.session_id) {
markdown += `**Session ID:** ${initEntry.session_id}\n\n`;
}
@@ -898,9 +1531,6 @@ jobs:
for (const server of initEntry.mcp_servers) {
const statusIcon = server.status === "connected" ? "✅" : server.status === "failed" ? "❌" : "❓";
markdown += `- ${statusIcon} ${server.name} (${server.status})\n`;
- if (server.status === "failed") {
- mcpFailures.push(server.name);
- }
}
markdown += "\n";
}
@@ -938,17 +1568,7 @@ jobs:
}
markdown += "\n";
}
- if (initEntry.slash_commands && Array.isArray(initEntry.slash_commands)) {
- const commandCount = initEntry.slash_commands.length;
- markdown += `**Slash Commands:** ${commandCount} available\n`;
- if (commandCount <= 10) {
- markdown += `- ${initEntry.slash_commands.join(", ")}\n`;
- } else {
- markdown += `- ${initEntry.slash_commands.slice(0, 5).join(", ")}, and ${commandCount - 5} more\n`;
- }
- markdown += "\n";
- }
- return { markdown, mcpFailures };
+ return markdown;
}
function estimateTokens(text) {
if (!text) return 0;
@@ -967,11 +1587,11 @@ jobs:
}
return `${minutes}m ${remainingSeconds}s`;
}
- function formatToolUse(toolUse, toolResult) {
+ function formatToolUseWithDetails(toolUse, toolResult) {
const toolName = toolUse.name;
const input = toolUse.input || {};
if (toolName === "TodoWrite") {
- return "";
+ return "";
}
function getStatusIcon() {
if (toolResult) {
@@ -1012,7 +1632,7 @@ jobs:
break;
case "Read":
const filePath = input.file_path || input.path || "";
- const relativePath = filePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, "");
+ const relativePath = filePath.replace(/^\/[^\/]*\/[^\/]*\/[^\/]*\/[^\/]*\//, "");
summary = `${statusIcon} Read ${relativePath}${metadata}`;
break;
case "Write":
@@ -1053,9 +1673,19 @@ jobs:
}
}
if (details && details.trim()) {
- const maxDetailsLength = 500;
- const truncatedDetails = details.length > maxDetailsLength ? details.substring(0, maxDetailsLength) + "..." : details;
- return `\n${summary}
\n\n\`\`\`\`\`\n${truncatedDetails}\n\`\`\`\`\`\n \n\n`;
+ let detailsContent = "";
+ const inputKeys = Object.keys(input);
+ if (inputKeys.length > 0) {
+ detailsContent += "**Parameters:**\n\n";
+ detailsContent += "``````json\n";
+ detailsContent += JSON.stringify(input, null, 2);
+ detailsContent += "\n``````\n\n";
+ }
+ detailsContent += "**Response:**\n\n";
+ detailsContent += "``````\n";
+ detailsContent += details;
+ detailsContent += "\n``````";
+ return `\n${summary}
\n\n${detailsContent}\n \n\n`;
} else {
return `${summary}\n\n`;
}
@@ -1064,8 +1694,8 @@ jobs:
if (toolName.startsWith("mcp__")) {
const parts = toolName.split("__");
if (parts.length >= 3) {
- const provider = parts[1];
- const method = parts.slice(2).join("_");
+ const provider = parts[1];
+ const method = parts.slice(2).join("_");
return `${provider}::${method}`;
}
}
@@ -1086,12 +1716,7 @@ jobs:
}
function formatBashCommand(command) {
if (!command) return "";
- let formatted = command
- .replace(/\n/g, " ")
- .replace(/\r/g, " ")
- .replace(/\t/g, " ")
- .replace(/\s+/g, " ")
- .trim();
+ let formatted = command.replace(/\n/g, " ").replace(/\r/g, " ").replace(/\t/g, " ").replace(/\s+/g, " ").trim();
formatted = formatted.replace(/`/g, "\\`");
const maxLength = 300;
if (formatted.length > maxLength) {
@@ -1106,11 +1731,14 @@ jobs:
}
if (typeof module !== "undefined" && module.exports) {
module.exports = {
- parseClaudeLog,
- formatToolUse,
+ parseCopilotLog,
+ extractPremiumRequestCount,
formatInitializationSummary,
+ formatToolUseWithDetails,
formatBashCommand,
truncateString,
+ formatMcpName,
+ formatMcpParameters,
estimateTokens,
formatDuration,
};
@@ -1123,12 +1751,15 @@ jobs:
name: agent-stdio.log
path: /tmp/gh-aw/agent-stdio.log
if-no-files-found: warn
+ - name: Cleanup awf resources
+ if: always()
+ run: ./scripts/ci/cleanup.sh || true
- name: Validate agent logs for errors
if: always()
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd
env:
- GH_AW_AGENT_OUTPUT: /tmp/gh-aw/agent-stdio.log
- GH_AW_ERROR_PATTERNS: "[{\"id\":\"\",\"pattern\":\"::(error)(?:\\\\s+[^:]*)?::(.+)\",\"level_group\":1,\"message_group\":2,\"description\":\"GitHub Actions workflow command - error\"},{\"id\":\"\",\"pattern\":\"::(warning)(?:\\\\s+[^:]*)?::(.+)\",\"level_group\":1,\"message_group\":2,\"description\":\"GitHub Actions workflow command - warning\"},{\"id\":\"\",\"pattern\":\"::(notice)(?:\\\\s+[^:]*)?::(.+)\",\"level_group\":1,\"message_group\":2,\"description\":\"GitHub Actions workflow command - notice\"},{\"id\":\"\",\"pattern\":\"(ERROR|Error):\\\\s+(.+)\",\"level_group\":1,\"message_group\":2,\"description\":\"Generic ERROR messages\"},{\"id\":\"\",\"pattern\":\"(WARNING|Warning):\\\\s+(.+)\",\"level_group\":1,\"message_group\":2,\"description\":\"Generic WARNING messages\"}]"
+ GH_AW_AGENT_OUTPUT: /tmp/gh-aw/.copilot/logs/
+ GH_AW_ERROR_PATTERNS: "[{\"id\":\"\",\"pattern\":\"::(error)(?:\\\\s+[^:]*)?::(.+)\",\"level_group\":1,\"message_group\":2,\"description\":\"GitHub Actions workflow command - error\"},{\"id\":\"\",\"pattern\":\"::(warning)(?:\\\\s+[^:]*)?::(.+)\",\"level_group\":1,\"message_group\":2,\"description\":\"GitHub Actions workflow command - warning\"},{\"id\":\"\",\"pattern\":\"::(notice)(?:\\\\s+[^:]*)?::(.+)\",\"level_group\":1,\"message_group\":2,\"description\":\"GitHub Actions workflow command - notice\"},{\"id\":\"\",\"pattern\":\"(ERROR|Error):\\\\s+(.+)\",\"level_group\":1,\"message_group\":2,\"description\":\"Generic ERROR messages\"},{\"id\":\"\",\"pattern\":\"(WARNING|Warning):\\\\s+(.+)\",\"level_group\":1,\"message_group\":2,\"description\":\"Generic WARNING messages\"},{\"id\":\"\",\"pattern\":\"(\\\\d{4}-\\\\d{2}-\\\\d{2}T\\\\d{2}:\\\\d{2}:\\\\d{2}\\\\.\\\\d{3}Z)\\\\s+\\\\[(ERROR)\\\\]\\\\s+(.+)\",\"level_group\":2,\"message_group\":3,\"description\":\"Copilot CLI timestamped ERROR messages\"},{\"id\":\"\",\"pattern\":\"(\\\\d{4}-\\\\d{2}-\\\\d{2}T\\\\d{2}:\\\\d{2}:\\\\d{2}\\\\.\\\\d{3}Z)\\\\s+\\\\[(WARN|WARNING)\\\\]\\\\s+(.+)\",\"level_group\":2,\"message_group\":3,\"description\":\"Copilot CLI timestamped WARNING messages\"},{\"id\":\"\",\"pattern\":\"\\\\[(\\\\d{4}-\\\\d{2}-\\\\d{2}T\\\\d{2}:\\\\d{2}:\\\\d{2}\\\\.\\\\d{3}Z)\\\\]\\\\s+(CRITICAL|ERROR):\\\\s+(.+)\",\"level_group\":2,\"message_group\":3,\"description\":\"Copilot CLI bracketed critical/error messages with timestamp\"},{\"id\":\"\",\"pattern\":\"\\\\[(\\\\d{4}-\\\\d{2}-\\\\d{2}T\\\\d{2}:\\\\d{2}:\\\\d{2}\\\\.\\\\d{3}Z)\\\\]\\\\s+(WARNING):\\\\s+(.+)\",\"level_group\":2,\"message_group\":3,\"description\":\"Copilot CLI bracketed warning messages with timestamp\"},{\"id\":\"\",\"pattern\":\"✗\\\\s+(.+)\",\"level_group\":0,\"message_group\":1,\"description\":\"Copilot CLI failed command indicator\"},{\"id\":\"\",\"pattern\":\"(?:command not found|not found):\\\\s*(.+)|(.+):\\\\s*(?:command not found|not found)\",\"level_group\":0,\"message_group\":0,\"description\":\"Shell command not found error\"},{\"id\":\"\",\"pattern\":\"Cannot find module\\\\s+['\\\"](.+)['\\\"]\",\"level_group\":0,\"message_group\":1,\"description\":\"Node.js module not found error\"},{\"id\":\"\",\"pattern\":\"Permission denied and could not request permission from user\",\"level_group\":0,\"message_group\":0,\"description\":\"Copilot CLI permission denied warning (user interaction required)\"}]"
with:
script: |
function main() {
diff --git a/.github/workflows/firewall.md b/.github/workflows/firewall.md
index 1c2c88070b2..4f77e1d3f17 100644
--- a/.github/workflows/firewall.md
+++ b/.github/workflows/firewall.md
@@ -5,9 +5,11 @@ on:
permissions:
contents: read
-engine: claude
+engine: copilot
-# Network firewall: Allow only basic infrastructure, NOT example.com
+features:
+ firewall: true
+
network: defaults
tools: