diff --git a/.github/workflows/issue-triage-llm.lock.yml b/.github/workflows/issue-triage-llm.lock.yml index 487d165020c..8330fc4e95a 100644 --- a/.github/workflows/issue-triage-llm.lock.yml +++ b/.github/workflows/issue-triage-llm.lock.yml @@ -1412,6 +1412,9 @@ jobs: run: | pip install llm llm-github-models llm-tools-mcp llm --version + + # Show logs database path for debugging + echo "LLM logs database: $(llm logs path)" env: GITHUB_AW_MCP_CONFIG: /tmp/gh-aw/mcp-config/mcp-servers.json GITHUB_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt @@ -1425,11 +1428,19 @@ jobs: if [ -n "$GITHUB_AW_MCP_CONFIG" ] && [ -f "$GITHUB_AW_MCP_CONFIG" ]; then # Create llm-tools-mcp config directory mkdir -p ~/.llm-tools-mcp + mkdir -p ~/.llm-tools-mcp/logs # Copy MCP configuration to the expected location for llm-tools-mcp cp "$GITHUB_AW_MCP_CONFIG" ~/.llm-tools-mcp/mcp.json echo "✓ MCP configuration installed at ~/.llm-tools-mcp/mcp.json" + echo "📋 MCP configuration:" + cat ~/.llm-tools-mcp/mcp.json + echo "" + + # List available tools for debugging + echo "🔧 Listing available MCP tools:" + llm tools list || echo "⚠ Failed to list tools" else echo "ℹ No MCP configuration available" fi @@ -1438,6 +1449,7 @@ jobs: GITHUB_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + LLM_TOOLS_MCP_FULL_ERRORS: "1" - name: Run llm CLI with prompt id: llm_execution run: | @@ -1450,12 +1462,15 @@ jobs: # Run llm with the prompt from the file # Use -T MCP to enable MCP tools if configured + # Additional flags for debugging: + # --td: Show full details of tool executions + # -u: Show token usage if [ -f ~/.llm-tools-mcp/mcp.json ]; then - echo "Running with MCP tools enabled" - llm -m "$MODEL" -T MCP "$(cat $GITHUB_AW_PROMPT)" 2>&1 | tee /tmp/gh-aw/llm-output.txt + echo "🚀 Running with MCP tools enabled (debug mode)" + llm -m "$MODEL" -T MCP --td -u "$(cat $GITHUB_AW_PROMPT)" 2>&1 | tee /tmp/gh-aw/llm-output.txt else echo "Running without MCP tools" - llm -m "$MODEL" "$(cat $GITHUB_AW_PROMPT)" 2>&1 | tee /tmp/gh-aw/llm-output.txt + llm -m "$MODEL" -u "$(cat $GITHUB_AW_PROMPT)" 2>&1 | tee /tmp/gh-aw/llm-output.txt fi # Store output for safe-outputs processing if configured @@ -1467,6 +1482,18 @@ jobs: GITHUB_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + LLM_TOOLS_MCP_FULL_ERRORS: "1" + - name: Upload MCP server logs + if: always() + uses: actions/upload-artifact@v4 + env: + GITHUB_AW_MCP_CONFIG: /tmp/gh-aw/mcp-config/mcp-servers.json + GITHUB_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt + GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} + with: + if-no-files-found: ignore + name: llm-mcp-logs + path: ~/.llm-tools-mcp/logs/ - name: Ensure log file exists run: | echo "Custom steps execution completed" >> /tmp/gh-aw/agent-stdio.log @@ -2285,7 +2312,7 @@ jobs: AGENT_OUTPUT: ${{ needs.agent.outputs.output }} WORKFLOW_NAME: "Issue Triage (LLM)" WORKFLOW_DESCRIPTION: "No description provided" - WORKFLOW_MARKDOWN: "\n\n# Issue Triage\n\nYou are an issue triage assistant. Your task is to analyze newly created issues and provide helpful triage information.\n\n## Current Issue\n\n- **Issue Number**: ${{ github.event.inputs.issue_number || github.event.issue.number }}\n- **Repository**: ${{ github.repository }}\n- **Issue Content**: \n ```\n ${{ needs.activation.outputs.text }}\n ```\n\n## Triage Guidelines\n\nPlease analyze the issue and provide:\n\n1. **Issue Type Classification**: Determine if this is a:\n - Bug report (something broken or not working)\n - Feature request (new functionality)\n - Documentation update\n - Question or support request\n - Enhancement (improvement to existing feature)\n\n2. **Priority Assessment**: Based on the content, suggest a priority level:\n - Critical (security, data loss, complete breakage)\n - High (major functionality affected)\n - Medium (important but not blocking)\n - Low (minor issue or nice-to-have)\n\n3. **Initial Analysis**: Provide:\n - A brief summary of the issue\n - Any missing information that would be helpful\n - Suggested next steps or questions to ask the reporter\n - Related components or areas of the codebase that might be affected\n\n## Your Task\n\nAnalyze the issue content above and create a triage comment that includes:\n- The issue type classification\n- The suggested priority level\n- Your initial analysis and recommendations\n\nFormat your response as a helpful comment that will be added to the issue. Use clear formatting with sections and bullet points. Be professional and constructive.\n\n**Important**: Generate a single, well-formatted comment that provides value to both the issue reporter and repository maintainers.\n" + WORKFLOW_MARKDOWN: "\n\n# Issue Triage\n\nYou are an issue triage assistant. Your task is to analyze newly created issues and provide helpful triage information.\n\n## Current Issue\n\n- **Issue Number**: ${{ github.event.inputs.issue_number || github.event.issue.number }}\n- **Repository**: ${{ github.repository }}\n- **Issue Content**: \n ```\n ${{ needs.activation.outputs.text }}\n ```\n\n## Triage Guidelines\n\nPlease analyze the issue and provide:\n\n1. **Issue Type Classification**: Determine if this is a:\n - Bug report (something broken or not working)\n - Feature request (new functionality)\n - Documentation update\n - Question or support request\n - Enhancement (improvement to existing feature)\n\n2. **Priority Assessment**: Based on the content, suggest a priority level:\n - Critical (security, data loss, complete breakage)\n - High (major functionality affected)\n - Medium (important but not blocking)\n - Low (minor issue or nice-to-have)\n\n3. **Initial Analysis**: Provide:\n - A brief summary of the issue\n - Any missing information that would be helpful\n - Suggested next steps or questions to ask the reporter\n - Related components or areas of the codebase that might be affected\n\n## Your Task\n\nAnalyze the issue content above and create a triage comment that includes:\n- The issue type classification\n- The suggested priority level\n- Your initial analysis and recommendations\n\nFormat your response as a helpful comment that will be added to the issue. Use clear formatting with sections and bullet points. Be professional and constructive.\n\n**Important**: Generate a single, well-formatted comment that provides value to both the issue reporter and repository maintainers.\n" with: script: | const fs = require('fs'); @@ -2370,6 +2397,9 @@ jobs: run: | pip install llm llm-github-models llm-tools-mcp llm --version + + # Show logs database path for debugging + echo "LLM logs database: $(llm logs path)" env: GITHUB_AW_MCP_CONFIG: /tmp/gh-aw/mcp-config/mcp-servers.json GITHUB_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt @@ -2383,11 +2413,19 @@ jobs: if [ -n "$GITHUB_AW_MCP_CONFIG" ] && [ -f "$GITHUB_AW_MCP_CONFIG" ]; then # Create llm-tools-mcp config directory mkdir -p ~/.llm-tools-mcp + mkdir -p ~/.llm-tools-mcp/logs # Copy MCP configuration to the expected location for llm-tools-mcp cp "$GITHUB_AW_MCP_CONFIG" ~/.llm-tools-mcp/mcp.json echo "✓ MCP configuration installed at ~/.llm-tools-mcp/mcp.json" + echo "📋 MCP configuration:" + cat ~/.llm-tools-mcp/mcp.json + echo "" + + # List available tools for debugging + echo "🔧 Listing available MCP tools:" + llm tools list || echo "⚠ Failed to list tools" else echo "ℹ No MCP configuration available" fi @@ -2396,6 +2434,7 @@ jobs: GITHUB_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + LLM_TOOLS_MCP_FULL_ERRORS: "1" - name: Run llm CLI with prompt id: llm_execution run: | @@ -2408,12 +2447,15 @@ jobs: # Run llm with the prompt from the file # Use -T MCP to enable MCP tools if configured + # Additional flags for debugging: + # --td: Show full details of tool executions + # -u: Show token usage if [ -f ~/.llm-tools-mcp/mcp.json ]; then - echo "Running with MCP tools enabled" - llm -m "$MODEL" -T MCP "$(cat $GITHUB_AW_PROMPT)" 2>&1 | tee /tmp/gh-aw/llm-output.txt + echo "🚀 Running with MCP tools enabled (debug mode)" + llm -m "$MODEL" -T MCP --td -u "$(cat $GITHUB_AW_PROMPT)" 2>&1 | tee /tmp/gh-aw/llm-output.txt else echo "Running without MCP tools" - llm -m "$MODEL" "$(cat $GITHUB_AW_PROMPT)" 2>&1 | tee /tmp/gh-aw/llm-output.txt + llm -m "$MODEL" -u "$(cat $GITHUB_AW_PROMPT)" 2>&1 | tee /tmp/gh-aw/llm-output.txt fi # Store output for safe-outputs processing if configured @@ -2425,6 +2467,17 @@ jobs: GITHUB_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + LLM_TOOLS_MCP_FULL_ERRORS: "1" + - name: Upload MCP server logs + if: always() + uses: actions/upload-artifact@v4 + env: + GITHUB_AW_MCP_CONFIG: /tmp/gh-aw/mcp-config/mcp-servers.json + GITHUB_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt + with: + if-no-files-found: ignore + name: llm-mcp-logs + path: ~/.llm-tools-mcp/logs/ - name: Ensure log file exists run: | echo "Custom steps execution completed" >> /tmp/gh-aw/threat-detection/detection.log diff --git a/.github/workflows/shared/simonw-llm.lock.yml b/.github/workflows/shared/simonw-llm.lock.yml index fe58c947b6a..0dd0dd5460f 100644 --- a/.github/workflows/shared/simonw-llm.lock.yml +++ b/.github/workflows/shared/simonw-llm.lock.yml @@ -211,7 +211,7 @@ jobs: "--rm", "-e", "GITHUB_PERSONAL_ACCESS_TOKEN", - "ghcr.io/github/github-mcp-server:sha-09deac4" + "ghcr.io/github/github-mcp-server:v0.18.0" ], "env": { "GITHUB_PERSONAL_ACCESS_TOKEN": "${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}" @@ -344,9 +344,13 @@ jobs: run: | pip install llm llm-github-models llm-tools-mcp llm --version + + # Show logs database path for debugging + echo "LLM logs database: $(llm logs path)" env: GITHUB_AW_MCP_CONFIG: /tmp/gh-aw/mcp-config/mcp-servers.json GITHUB_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt + GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} - name: Configure llm with GitHub Models and MCP run: | # GitHub Models uses GITHUB_TOKEN by default, no key setup needed @@ -356,18 +360,28 @@ jobs: if [ -n "$GITHUB_AW_MCP_CONFIG" ] && [ -f "$GITHUB_AW_MCP_CONFIG" ]; then # Create llm-tools-mcp config directory mkdir -p ~/.llm-tools-mcp + mkdir -p ~/.llm-tools-mcp/logs # Copy MCP configuration to the expected location for llm-tools-mcp cp "$GITHUB_AW_MCP_CONFIG" ~/.llm-tools-mcp/mcp.json echo "✓ MCP configuration installed at ~/.llm-tools-mcp/mcp.json" + echo "📋 MCP configuration:" + cat ~/.llm-tools-mcp/mcp.json + echo "" + + # List available tools for debugging + echo "🔧 Listing available MCP tools:" + llm tools list || echo "⚠ Failed to list tools" else echo "ℹ No MCP configuration available" fi env: GITHUB_AW_MCP_CONFIG: /tmp/gh-aw/mcp-config/mcp-servers.json GITHUB_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt + GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + LLM_TOOLS_MCP_FULL_ERRORS: "1" - name: Run llm CLI with prompt id: llm_execution run: | @@ -380,12 +394,15 @@ jobs: # Run llm with the prompt from the file # Use -T MCP to enable MCP tools if configured + # Additional flags for debugging: + # --td: Show full details of tool executions + # -u: Show token usage if [ -f ~/.llm-tools-mcp/mcp.json ]; then - echo "Running with MCP tools enabled" - llm -m "$MODEL" -T MCP "$(cat $GITHUB_AW_PROMPT)" 2>&1 | tee /tmp/gh-aw/llm-output.txt + echo "🚀 Running with MCP tools enabled (debug mode)" + llm -m "$MODEL" -T MCP --td -u "$(cat $GITHUB_AW_PROMPT)" 2>&1 | tee /tmp/gh-aw/llm-output.txt else echo "Running without MCP tools" - llm -m "$MODEL" "$(cat $GITHUB_AW_PROMPT)" 2>&1 | tee /tmp/gh-aw/llm-output.txt + llm -m "$MODEL" -u "$(cat $GITHUB_AW_PROMPT)" 2>&1 | tee /tmp/gh-aw/llm-output.txt fi # Store output for safe-outputs processing if configured @@ -397,6 +414,17 @@ jobs: GITHUB_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + LLM_TOOLS_MCP_FULL_ERRORS: "1" + - name: Upload MCP server logs + if: always() + uses: actions/upload-artifact@v4 + env: + GITHUB_AW_MCP_CONFIG: /tmp/gh-aw/mcp-config/mcp-servers.json + GITHUB_AW_PROMPT: /tmp/gh-aw/aw-prompts/prompt.txt + with: + if-no-files-found: ignore + name: llm-mcp-logs + path: ~/.llm-tools-mcp/logs/ - name: Ensure log file exists run: | echo "Custom steps execution completed" >> /tmp/gh-aw/agent-stdio.log diff --git a/.github/workflows/shared/simonw-llm.md b/.github/workflows/shared/simonw-llm.md index 6c4bf3cd9bc..50f7fd72c6f 100644 --- a/.github/workflows/shared/simonw-llm.md +++ b/.github/workflows/shared/simonw-llm.md @@ -6,8 +6,13 @@ engine: run: | pip install llm llm-github-models llm-tools-mcp llm --version + + # Show logs database path for debugging + echo "LLM logs database: $(llm logs path)" env: GITHUB_AW_MCP_CONFIG: ${{ env.GITHUB_AW_MCP_CONFIG }} + GITHUB_AW_PROMPT: ${{ env.GITHUB_AW_PROMPT }} + GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} - name: Configure llm with GitHub Models and MCP run: | @@ -18,17 +23,28 @@ engine: if [ -n "$GITHUB_AW_MCP_CONFIG" ] && [ -f "$GITHUB_AW_MCP_CONFIG" ]; then # Create llm-tools-mcp config directory mkdir -p ~/.llm-tools-mcp + mkdir -p ~/.llm-tools-mcp/logs # Copy MCP configuration to the expected location for llm-tools-mcp cp "$GITHUB_AW_MCP_CONFIG" ~/.llm-tools-mcp/mcp.json echo "✓ MCP configuration installed at ~/.llm-tools-mcp/mcp.json" + echo "📋 MCP configuration:" + cat ~/.llm-tools-mcp/mcp.json + echo "" + + # List available tools for debugging + echo "🔧 Listing available MCP tools:" + llm tools list || echo "⚠ Failed to list tools" else echo "ℹ No MCP configuration available" fi env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_AW_MCP_CONFIG: ${{ env.GITHUB_AW_MCP_CONFIG }} + GITHUB_AW_PROMPT: ${{ env.GITHUB_AW_PROMPT }} + GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + LLM_TOOLS_MCP_FULL_ERRORS: "1" - name: Run llm CLI with prompt id: llm_execution @@ -42,12 +58,15 @@ engine: # Run llm with the prompt from the file # Use -T MCP to enable MCP tools if configured + # Additional flags for debugging: + # --td: Show full details of tool executions + # -u: Show token usage if [ -f ~/.llm-tools-mcp/mcp.json ]; then - echo "Running with MCP tools enabled" - llm -m "$MODEL" -T MCP "$(cat $GITHUB_AW_PROMPT)" 2>&1 | tee /tmp/gh-aw/llm-output.txt + echo "🚀 Running with MCP tools enabled (debug mode)" + llm -m "$MODEL" -T MCP --td -u "$(cat $GITHUB_AW_PROMPT)" 2>&1 | tee /tmp/gh-aw/llm-output.txt else echo "Running without MCP tools" - llm -m "$MODEL" "$(cat $GITHUB_AW_PROMPT)" 2>&1 | tee /tmp/gh-aw/llm-output.txt + llm -m "$MODEL" -u "$(cat $GITHUB_AW_PROMPT)" 2>&1 | tee /tmp/gh-aw/llm-output.txt fi # Store output for safe-outputs processing if configured @@ -59,6 +78,15 @@ engine: GITHUB_AW_SAFE_OUTPUTS: ${{ env.GITHUB_AW_SAFE_OUTPUTS }} GITHUB_AW_MCP_CONFIG: ${{ env.GITHUB_AW_MCP_CONFIG }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + LLM_TOOLS_MCP_FULL_ERRORS: "1" + + - name: Upload MCP server logs + if: always() + uses: actions/upload-artifact@v4 + with: + name: llm-mcp-logs + path: ~/.llm-tools-mcp/logs/ + if-no-files-found: ignore ---