From 939a127c147f04b67c84500058655d0acd0f4ff0 Mon Sep 17 00:00:00 2001 From: Srinath0916 Date: Mon, 5 Jan 2026 22:45:44 +0530 Subject: [PATCH 1/5] [feature] Add CI failure bot for automated responses #524 Implements automated bot responses for common CI failures: - Parses QA check failures (flake8, black, isort) - Detects test failures and setup issues - Provides specific fix suggestions with links - Removes automatic mentions to prevent spam - Handles edge cases for workflow runs without PRs Tested on personal repository before submission. Closes #524 --- .github/scripts/ci_failure_bot.py | 197 +++++++++++++++++++++++++++ .github/workflows/ci-failure-bot.yml | 34 +++++ 2 files changed, 231 insertions(+) create mode 100755 .github/scripts/ci_failure_bot.py create mode 100644 .github/workflows/ci-failure-bot.yml diff --git a/.github/scripts/ci_failure_bot.py b/.github/scripts/ci_failure_bot.py new file mode 100755 index 00000000..7c2a1211 --- /dev/null +++ b/.github/scripts/ci_failure_bot.py @@ -0,0 +1,197 @@ +#!/usr/bin/env python3 +""" +CI Failure Bot - responds to failed builds with helpful info +""" + +import os +import sys + +from github import Github + + +class CIFailureBot: + def __init__(self): + self.github_token = os.environ.get("GITHUB_TOKEN") + self.workflow_run_id = os.environ.get("WORKFLOW_RUN_ID") + self.repository_name = os.environ.get("REPOSITORY") + self.pr_number = os.environ.get("PR_NUMBER") + + if not all([self.github_token, self.workflow_run_id, self.repository_name]): + print("Missing env vars") + sys.exit(1) + + try: + self.workflow_run_id = int(self.workflow_run_id) + except ValueError: + print("Invalid WORKFLOW_RUN_ID: must be numeric") + sys.exit(1) + + self.github = Github(self.github_token) + self.repo = self.github.get_repo(self.repository_name) + + def get_workflow_logs(self): + try: + workflow_run = self.repo.get_workflow_run(self.workflow_run_id) + jobs = workflow_run.jobs() + + failed_jobs = [] + for job in jobs: + if job.conclusion == "failure": + for step in job.steps: + if step.conclusion == "failure": + failed_jobs.append( + { + "job_name": job.name, + "step_name": step.name, + "step_number": step.number, + } + ) + return failed_jobs + except Exception as e: + print(f"Error getting logs: {e}") + return [] + + def analyze_failure_type(self, logs): + failure_types = [] + + for log_entry in logs: + step_name = log_entry["step_name"].lower() + + if "qa checks" in step_name: + failure_types.append("qa_checks") + elif "tests" in step_name: + failure_types.append("tests") + elif any( + keyword in step_name for keyword in ["install", "dependencies", "setup"] + ): + failure_types.append("setup") + + return list(set(failure_types)) + + def generate_qa_response(self): + return """ +## QA Checks Failed + +The code quality checks didn't pass. To fix this: + +```bash +openwisp-qa-format +``` + +This will automatically fix most flake8, black, and isort issues. + +After running the command, commit and push the changes. + +See the [contributing guidelines](https://openwisp.io/docs/dev/developer/contributing.html) for more details. +""" + + def generate_test_response(self): + return """ +## Tests Failed + +Some tests are failing. To debug: + +```bash +./runtests +``` + +Check the CI logs above for specific error details. Common issues: +- Import errors from missing dependencies +- Logic changes that broke existing functionality +- Missing test dependencies + +See the [contributing guidelines](https://openwisp.io/docs/dev/developer/contributing.html) for testing help. +""" + + def generate_setup_response(self): + return """ +## Setup Failed + +There was an issue with dependencies or environment setup. + +Check if you added new dependencies to requirements-test.txt. +Verify Python/Django version compatibility: +- Python: 3.10, 3.11, 3.12, 3.13 +- Django: 4.2, 5.0, 5.1, 5.2 + +The CI will retry automatically, or push a small change to trigger rebuild. +""" + + def post_comment(self, message): + if not self.pr_number or self.pr_number.strip() == "": + print("No PR number, skipping comment") + return + + try: + try: + pr_num = int(self.pr_number) + except ValueError: + print(f"Invalid PR_NUMBER: {self.pr_number}") + return + + pr = self.repo.get_pull(pr_num) + + # Check for existing bot comments to avoid duplicates + bot_login = self.github.get_user().login + existing_comments = pr.get_issue_comments() + + for comment in existing_comments: + if comment.user.login == bot_login and ( + "CI Build Failed" in comment.body + or "QA Checks Failed" in comment.body + ): + print("Bot comment already exists, updating it") + comment.edit(message) + return + + # No existing comment, create new one + pr.create_issue_comment(message) + print(f"Posted comment to PR #{pr_num}") + except Exception as e: + print(f"Error posting comment: {e}") + + def run(self): + print("CI Failure Bot starting") + + logs = self.get_workflow_logs() + if not logs: + print("No failure logs found") + return + + failure_types = self.analyze_failure_type(logs) + print(f"Detected failure types: {failure_types}") + + responses = [] + + if "qa_checks" in failure_types: + responses.append(self.generate_qa_response()) + + if "tests" in failure_types: + responses.append(self.generate_test_response()) + + if "setup" in failure_types: + responses.append(self.generate_setup_response()) + + if not responses: + responses.append( + """ +## CI Build Failed + +Check the logs above for details. Common fixes: +- Run `openwisp-qa-format` for code style issues +- Run `./runtests` locally to debug test failures +- Check dependencies for setup issues + +See: https://openwisp.io/docs/dev/developer/contributing.html +""" + ) + + final_message = "\n\n".join(responses) + self.post_comment(final_message) + + print("CI Failure Bot completed") + + +if __name__ == "__main__": + bot = CIFailureBot() + bot.run() diff --git a/.github/workflows/ci-failure-bot.yml b/.github/workflows/ci-failure-bot.yml new file mode 100644 index 00000000..7a66bb9f --- /dev/null +++ b/.github/workflows/ci-failure-bot.yml @@ -0,0 +1,34 @@ +--- +name: CI Failure Bot + +on: + workflow_run: + workflows: ["OpenWISP Utils CI Build"] + types: + - completed + +jobs: + ci-failure-bot: + runs-on: ubuntu-latest + if: ${{ github.event.workflow_run.conclusion == 'failure' }} + + steps: + - name: Checkout repository + uses: actions/checkout@v6 + + - name: Set up Python + uses: actions/setup-python@v6 + with: + python-version: "3.11" + + - name: Install dependencies + run: | + pip install requests PyGithub + + - name: Run CI Failure Bot + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + WORKFLOW_RUN_ID: ${{ github.event.workflow_run.id }} + REPOSITORY: ${{ github.repository }} + PR_NUMBER: ${{ github.event.workflow_run.pull_requests[0].number || '' }} + run: python .github/scripts/ci_failure_bot.py From 94d6de96a116c0f56c4b6e828522299e3af1b88b Mon Sep 17 00:00:00 2001 From: Srinath0916 Date: Wed, 7 Jan 2026 02:42:59 +0530 Subject: [PATCH 2/5] [feature] Add Gemini AI-powered CI failure analysis #524 Implements intelligent bot responses using Gemini AI to analyze: - Build logs and error traces from failed CI runs - PR diffs and code changes for context - Workflow YAML configuration - Provides direct, actionable feedback to contributors Replaces static responses with dynamic AI analysis that examines actual failure context to generate specific guidance. Addresses reviewer feedback on dependencies and error handling. Closes #524 --- .github/scripts/ci_failure_bot.py | 312 +++++++++++++++++---------- .github/workflows/ci-failure-bot.yml | 11 +- 2 files changed, 210 insertions(+), 113 deletions(-) diff --git a/.github/scripts/ci_failure_bot.py b/.github/scripts/ci_failure_bot.py index 7c2a1211..0f247f6e 100755 --- a/.github/scripts/ci_failure_bot.py +++ b/.github/scripts/ci_failure_bot.py @@ -1,23 +1,43 @@ #!/usr/bin/env python3 """ -CI Failure Bot - responds to failed builds with helpful info +CI Failure Bot - AI-powered analysis of build failures using Gemini """ +import json import os import sys -from github import Github +import google.generativeai as genai +import requests +from github import Github, GithubException class CIFailureBot: def __init__(self): self.github_token = os.environ.get("GITHUB_TOKEN") + self.gemini_api_key = os.environ.get("GEMINI_API_KEY") self.workflow_run_id = os.environ.get("WORKFLOW_RUN_ID") self.repository_name = os.environ.get("REPOSITORY") self.pr_number = os.environ.get("PR_NUMBER") - if not all([self.github_token, self.workflow_run_id, self.repository_name]): - print("Missing env vars") + if not all( + [ + self.github_token, + self.gemini_api_key, + self.workflow_run_id, + self.repository_name, + ] + ): + missing = [] + if not self.github_token: + missing.append("GITHUB_TOKEN") + if not self.gemini_api_key: + missing.append("GEMINI_API_KEY") + if not self.workflow_run_id: + missing.append("WORKFLOW_RUN_ID") + if not self.repository_name: + missing.append("REPOSITORY") + print(f"Missing required environment variables: {', '.join(missing)}") sys.exit(1) try: @@ -29,106 +49,188 @@ def __init__(self): self.github = Github(self.github_token) self.repo = self.github.get_repo(self.repository_name) - def get_workflow_logs(self): + genai.configure(api_key=self.gemini_api_key) + model_name = os.environ.get("GEMINI_MODEL", "gemini-2.5-flash") + self.model = genai.GenerativeModel(model_name) + + def get_build_logs(self): + """Get actual build logs and error output from failed jobs""" try: workflow_run = self.repo.get_workflow_run(self.workflow_run_id) jobs = workflow_run.jobs() - failed_jobs = [] + build_logs = [] for job in jobs: if job.conclusion == "failure": + # Get job logs URL and fetch content + logs_url = job.logs_url + if logs_url: + headers = { + "Authorization": f"token {self.github_token}", + "Accept": "application/vnd.github.v3+json", + } + response = requests.get(logs_url, headers=headers, timeout=30) + if response.status_code == 200: + build_logs.append( + { + "job_name": job.name, + "logs": response.text[ + -5000: + ], # Last 5000 chars to avoid token limits + } + ) + + # Also get step details for step in job.steps: if step.conclusion == "failure": - failed_jobs.append( + build_logs.append( { "job_name": job.name, "step_name": step.name, "step_number": step.number, } ) - return failed_jobs - except Exception as e: - print(f"Error getting logs: {e}") - return [] - - def analyze_failure_type(self, logs): - failure_types = [] - - for log_entry in logs: - step_name = log_entry["step_name"].lower() - - if "qa checks" in step_name: - failure_types.append("qa_checks") - elif "tests" in step_name: - failure_types.append("tests") - elif any( - keyword in step_name for keyword in ["install", "dependencies", "setup"] - ): - failure_types.append("setup") - - return list(set(failure_types)) - - def generate_qa_response(self): - return """ -## QA Checks Failed -The code quality checks didn't pass. To fix this: - -```bash -openwisp-qa-format -``` - -This will automatically fix most flake8, black, and isort issues. - -After running the command, commit and push the changes. - -See the [contributing guidelines](https://openwisp.io/docs/dev/developer/contributing.html) for more details. -""" - - def generate_test_response(self): - return """ -## Tests Failed - -Some tests are failing. To debug: + return build_logs + except (GithubException, ValueError) as e: + print(f"Error getting build logs: {e}") + return [] -```bash -./runtests -``` + def get_pr_diff(self): + """Get the PR diff/changes if PR exists""" + if not self.pr_number or self.pr_number.strip() == "": + return None -Check the CI logs above for specific error details. Common issues: -- Import errors from missing dependencies -- Logic changes that broke existing functionality -- Missing test dependencies + try: + pr_num = int(self.pr_number) + pr = self.repo.get_pull(pr_num) -See the [contributing guidelines](https://openwisp.io/docs/dev/developer/contributing.html) for testing help. -""" + # Get diff content + diff_url = pr.diff_url + headers = { + "Authorization": f"token {self.github_token}", + "Accept": "application/vnd.github.v3.diff", + } + response = requests.get(diff_url, headers=headers, timeout=30) + if response.status_code == 200: + diff_text = response.text + if len(diff_text) > 8000: + # Take first 4000 and last 4000 chars for context + diff_text = ( + diff_text[:4000] + + "\n\n[...middle truncated...]\n\n" + + diff_text[-4000:] + ) + + return { + "title": pr.title, + "body": pr.body or "", + "diff": diff_text, + } + except (GithubException, requests.RequestException) as e: + print(f"Error getting PR diff: {e}") + + return None + + def get_workflow_yaml(self): + """Get the workflow YAML configuration""" + try: + workflow_run = self.repo.get_workflow_run(self.workflow_run_id) + workflow_path = workflow_run.path + + # Get workflow file content + workflow_file = self.repo.get_contents(workflow_path) + return workflow_file.decoded_content.decode("utf-8") + except GithubException as e: + print(f"Error getting workflow YAML: {e}") + return None + + def analyze_with_gemini(self, build_logs, pr_diff, workflow_yaml): + """Send context to Gemini for intelligent analysis""" + + # Prepare context for Gemini + project_name = self.repository_name.split("/")[-1] + repo_url = f"https://github.com/{self.repository_name}" + qa_checks_url = f"{repo_url}/blob/master/openwisp-qa-check" + runtests_url = f"{repo_url}/blob/master/runtests" + + # Build the context string with proper line breaks + build_logs_json = json.dumps(build_logs, indent=2) + if pr_diff: + pr_diff_json = json.dumps(pr_diff, indent=2) + else: + pr_diff_json = "No PR associated" + + # Gemini prompt - ignore line length for readability + context = f""" +### ROLE +You are the "Automated Maintainer Gatekeeper." Your goal is to analyze Pull Request (PR) build failures and provide direct, technically accurate, and no-nonsense feedback to contributors. + +### INPUT CONTEXT PROVIDED +1. **Build Output/Logs:** {build_logs_json} +2. **YAML Workflow:** {workflow_yaml or "Not available"} +3. **PR Diff:** {pr_diff_json} +4. **Project Name:** {project_name} +5. **Repository:** {repo_url} +6. **run-qa-checks:** {qa_checks_url} +7. **runtests:** {runtests_url} + +### TASK +Analyze the provided context to determine why the build failed. Categorize the failure and respond according to the "Tone Guidelines" below. + +### TONE GUIDELINES +- **Direct & Honest:** Do not use "fluff" or overly polite corporate language. +- **Firm Standards:** If a PR is low-effort, spammy, or fails to follow basic instructions, state that clearly. +- **Action-Oriented:** Provide the exact command or file change needed to fix the error, unless the PR is spammy, in which case we should just declare the PR as potential SPAM and ask maintainers to manually review it. + +### RESPONSE STRUCTURE +1. **Status Summary:** A one-sentence blunt assessment of the failure. +2. **Technical Diagnosis:** + - Identify the specific line/test that failed. + - Explain *why* it failed. +3. **Required Action:** Provide a code block or specific steps the contributor must take. +4. **Quality Warning (If Applicable):** If the PR appears to be "spam" (e.g., trivial README changes, AI-generated nonsense, or repeated basic errors), include a firm statement that such contributions are a drain on project resources and ping the maintainers asking them for manual review. + +### EXAMPLE RESPONSE STYLE +"The build failed because you neglected to update the test suite to match your logic changes. This project does not accept functional changes without corresponding test updates. Refer to the log at line 452. Update tests/logic_test.py before re-submitting. We prioritize high-quality, ready-to-merge code; please ensure you run local tests before pushing." + +Analyze the failure and provide your response: +""" # noqa: E501 - def generate_setup_response(self): + try: + response = self.model.generate_content(context) + return response.text + except (ValueError, ConnectionError, Exception) as e: + print(f"Error calling Gemini API: {e}") + return self.fallback_response() + + def fallback_response(self): + """Fallback response if Gemini fails""" return """ -## Setup Failed +## CI Build Failed -There was an issue with dependencies or environment setup. +The automated analysis is temporarily unavailable. Please check the CI logs above for specific error details. -Check if you added new dependencies to requirements-test.txt. -Verify Python/Django version compatibility: -- Python: 3.10, 3.11, 3.12, 3.13 -- Django: 4.2, 5.0, 5.1, 5.2 +Common fixes: +- Run `openwisp-qa-format` for code style issues +- Run `./runtests` locally to debug test failures +- Check dependencies for setup issues -The CI will retry automatically, or push a small change to trigger rebuild. +See: https://openwisp.io/docs/dev/developer/contributing.html """ def post_comment(self, message): + """Post or update comment on PR""" if not self.pr_number or self.pr_number.strip() == "": print("No PR number, skipping comment") return - try: - try: - pr_num = int(self.pr_number) - except ValueError: - print(f"Invalid PR_NUMBER: {self.pr_number}") - return + # Add consistent marker for deduplication + marker = "" + message_with_marker = f"{marker}\n{message}" + try: + pr_num = int(self.pr_number) pr = self.repo.get_pull(pr_num) # Check for existing bot comments to avoid duplicates @@ -136,58 +238,46 @@ def post_comment(self, message): existing_comments = pr.get_issue_comments() for comment in existing_comments: - if comment.user.login == bot_login and ( - "CI Build Failed" in comment.body - or "QA Checks Failed" in comment.body - ): + if comment.user.login == bot_login and marker in comment.body: print("Bot comment already exists, updating it") - comment.edit(message) + comment.edit(message_with_marker) return # No existing comment, create new one - pr.create_issue_comment(message) + pr.create_issue_comment(message_with_marker) print(f"Posted comment to PR #{pr_num}") - except Exception as e: + except (GithubException, ValueError) as e: print(f"Error posting comment: {e}") def run(self): - print("CI Failure Bot starting") - - logs = self.get_workflow_logs() - if not logs: - print("No failure logs found") - return + """Main execution flow""" + print("CI Failure Bot starting - AI-powered analysis") - failure_types = self.analyze_failure_type(logs) - print(f"Detected failure types: {failure_types}") - - responses = [] - - if "qa_checks" in failure_types: - responses.append(self.generate_qa_response()) - - if "tests" in failure_types: - responses.append(self.generate_test_response()) + # Double-check: Skip if this is a dependabot PR + try: + workflow_run = self.repo.get_workflow_run(self.workflow_run_id) + if workflow_run.actor and "dependabot" in workflow_run.actor.login.lower(): + print(f"Skipping dependabot PR from {workflow_run.actor.login}") + return + except (GithubException, AttributeError) as e: + print(f"Warning: Could not check actor: {e}") - if "setup" in failure_types: - responses.append(self.generate_setup_response()) + # Get all context + build_logs = self.get_build_logs() + pr_diff = self.get_pr_diff() + workflow_yaml = self.get_workflow_yaml() - if not responses: - responses.append( - """ -## CI Build Failed + if not build_logs: + print("No build logs found") + return -Check the logs above for details. Common fixes: -- Run `openwisp-qa-format` for code style issues -- Run `./runtests` locally to debug test failures -- Check dependencies for setup issues + print("Analyzing failure with Gemini AI...") -See: https://openwisp.io/docs/dev/developer/contributing.html -""" - ) + # Get AI analysis + ai_response = self.analyze_with_gemini(build_logs, pr_diff, workflow_yaml) - final_message = "\n\n".join(responses) - self.post_comment(final_message) + # Post intelligent comment + self.post_comment(ai_response) print("CI Failure Bot completed") diff --git a/.github/workflows/ci-failure-bot.yml b/.github/workflows/ci-failure-bot.yml index 7a66bb9f..94a2a046 100644 --- a/.github/workflows/ci-failure-bot.yml +++ b/.github/workflows/ci-failure-bot.yml @@ -7,10 +7,15 @@ on: types: - completed +permissions: + issues: write + pull-requests: write + contents: read + jobs: ci-failure-bot: runs-on: ubuntu-latest - if: ${{ github.event.workflow_run.conclusion == 'failure' }} + if: ${{ github.event.workflow_run.conclusion == 'failure' && !contains(github.event.workflow_run.actor.login, 'dependabot') }} steps: - name: Checkout repository @@ -22,12 +27,14 @@ jobs: python-version: "3.11" - name: Install dependencies + # Pin dependency versions for reproducibility run: | - pip install requests PyGithub + pip install requests>=2.31.0 PyGithub>=2.0.0 google-generativeai>=0.3.0 - name: Run CI Failure Bot env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }} WORKFLOW_RUN_ID: ${{ github.event.workflow_run.id }} REPOSITORY: ${{ github.repository }} PR_NUMBER: ${{ github.event.workflow_run.pull_requests[0].number || '' }} From c64a94a93d44aa508433a833a1f3db7d351e973e Mon Sep 17 00:00:00 2001 From: Srinath0916 Date: Fri, 9 Jan 2026 11:25:07 +0530 Subject: [PATCH 3/5] [tests] Add comprehensive test suite and documentation for CI failure bot #524 Implements complete test coverage for the Gemini AI-powered CI failure bot: - Unit tests for all bot functionality with external API mocking - Comprehensive documentation following OpenWISP patterns - User guide with setup instructions and configuration options - Developer guide with technical implementation details Tests cover initialization, build log retrieval, PR diff handling, Gemini API integration, comment posting, and full workflow execution. Addresses reviewer feedback on automated testing requirements. Closes #524 --- docs/developer/ci-failure-bot.rst | 257 +++++++++++++++++ docs/developer/index.rst | 1 + docs/index.rst | 1 + docs/user/ci-failure-bot.rst | 114 ++++++++ openwisp_utils/tests/test_ci_failure_bot.py | 304 ++++++++++++++++++++ 5 files changed, 677 insertions(+) create mode 100644 docs/developer/ci-failure-bot.rst create mode 100644 docs/user/ci-failure-bot.rst create mode 100644 openwisp_utils/tests/test_ci_failure_bot.py diff --git a/docs/developer/ci-failure-bot.rst b/docs/developer/ci-failure-bot.rst new file mode 100644 index 00000000..f4820dac --- /dev/null +++ b/docs/developer/ci-failure-bot.rst @@ -0,0 +1,257 @@ +CI Failure Bot +============== + +This GitHub workflow automatically analyzes failed CI builds and provides intelligent feedback to contributors using AI-powered analysis. + +The bot examines build logs, PR changes, and workflow context to generate specific, actionable guidance that helps contributors fix issues quickly. + +Usage Example +------------- + +You can use this workflow in your repository as follows: + +.. code-block:: yaml + + name: CI Failure Bot + + on: + workflow_run: + workflows: ["OpenWISP Utils CI Build"] + types: + - completed + + permissions: + issues: write + pull-requests: write + contents: read + + jobs: + ci-failure-bot: + runs-on: ubuntu-latest + if: ${{ github.event.workflow_run.conclusion == 'failure' && !contains(github.event.workflow_run.actor.login, 'dependabot') }} + + steps: + - name: Checkout repository + uses: actions/checkout@v6 + + - name: Set up Python + uses: actions/setup-python@v6 + with: + python-version: "3.11" + + - name: Install dependencies + run: | + pip install requests>=2.31.0 PyGithub>=2.0.0 google-generativeai>=0.3.0 + + - name: Run CI Failure Bot + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }} + WORKFLOW_RUN_ID: ${{ github.event.workflow_run.id }} + REPOSITORY: ${{ github.repository }} + PR_NUMBER: ${{ github.event.workflow_run.pull_requests[0].number || '' }} + run: python .github/scripts/ci_failure_bot.py + +Configuration +------------- + +Repository Secrets +~~~~~~~~~~~~~~~~~~~ + +The following secrets must be configured in your repository: + +- ``GEMINI_API_KEY``: Google Gemini API key for AI analysis + +Environment Variables +~~~~~~~~~~~~~~~~~~~~~ + +Optional environment variables for customization: + +- ``GEMINI_MODEL``: Gemini model to use (default: ``gemini-2.5-flash``) + +Features +-------- + +- **Automatic triggering**: Responds to CI build failures in pull requests +- **AI-powered analysis**: Uses Google Gemini to analyze failure logs and provide specific guidance +- **Intelligent responses**: Provides direct, actionable feedback based on actual failure context +- **Comment deduplication**: Updates existing comments instead of creating duplicates +- **Dependabot exclusion**: Automatically skips dependency update PRs +- **Fallback handling**: Provides basic guidance if AI analysis fails + +Response Examples +----------------- + +The bot provides different types of responses based on the failure: + +**Code Quality Issues**:: + + The build failed because of code formatting violations. Run `openwisp-qa-format` + to fix black, flake8, and isort issues before pushing. The project requires + clean code that passes all quality checks. + +**Test Failures**:: + + Tests are failing in tests/test_models.py at line 45. The error indicates a + missing migration for the new field you added. Run `python manage.py makemigrations` + and include the migration file in your commit. + +**Low-Quality Contributions**:: + + This PR appears to be spam or low-effort. Trivial README changes without + substantial improvements are not accepted. Please review the contribution + guidelines and submit meaningful changes. + +Note +---- + +If the Gemini API is unavailable or the analysis fails, the bot will provide a fallback response with basic troubleshooting guidance. The workflow will not fail silently - any errors in the bot execution will be visible in the GitHub Actions logs. + +Implementation Details +---------------------- + +Architecture +~~~~~~~~~~~~ + +The CI failure bot consists of two main components: + +1. **GitHub Actions Workflow** (``.github/workflows/ci-failure-bot.yml``) +2. **Python Analysis Script** (``.github/scripts/ci_failure_bot.py``) + +The workflow uses the ``workflow_run`` trigger to respond to CI failures, ensuring proper access to workflow run metadata and logs with correct PR association. + +Testing +------- + +The test suite (``test_ci_failure_bot.py``) provides comprehensive coverage: + +**Test Categories**: + +- Initialization and configuration validation +- Build log retrieval with various scenarios +- PR diff handling and truncation +- Gemini API integration and error handling +- Comment posting and deduplication +- Full workflow execution + +**Mocking Strategy**: + +- External APIs (GitHub, Gemini) are fully mocked +- Environment variables are patched for isolation +- Network requests are intercepted to avoid external dependencies + +Running Tests +~~~~~~~~~~~~~ + +.. code-block:: bash + + # Run specific CI bot tests + python manage.py test openwisp_utils.tests.test_ci_failure_bot + + # Run with coverage + coverage run --source='.' manage.py test openwisp_utils.tests.test_ci_failure_bot + coverage report + +Configuration Options +--------------------- + +Environment Variables +~~~~~~~~~~~~~~~~~~~~~ + +- ``GITHUB_TOKEN``: GitHub API access (automatically provided by Actions) +- ``GEMINI_API_KEY``: Google Gemini API key (repository secret) +- ``WORKFLOW_RUN_ID``: Workflow run identifier (automatically provided) +- ``REPOSITORY``: Repository name (automatically provided) +- ``PR_NUMBER``: Pull request number (automatically provided) +- ``GEMINI_MODEL``: Gemini model name (optional, defaults to ``gemini-2.5-flash``) + +Permissions +~~~~~~~~~~~ + +The workflow requires these GitHub permissions: + +.. code-block:: yaml + + permissions: + issues: write + pull-requests: write + contents: read + +Deployment +---------- + +Adding to New Repositories +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +1. Copy workflow file to ``.github/workflows/ci-failure-bot.yml`` +2. Copy script to ``.github/scripts/ci_failure_bot.py`` +3. Configure ``GEMINI_API_KEY`` in repository secrets +4. Update workflow name in trigger to match target CI workflow + +Customization +~~~~~~~~~~~~~ + +**Prompt Customization**: +Modify the Gemini prompt in ``analyze_with_gemini()`` to adjust: + +- Response tone and style +- Project-specific guidance +- Error categorization logic + +**Trigger Customization**: +Adjust the workflow trigger to target different CI workflows or conditions. + +Troubleshooting +--------------- + +Common Development Issues +~~~~~~~~~~~~~~~~~~~~~~~~~ + +**Import Errors in Tests**: +The test file adds the scripts directory to Python path: + +.. code-block:: python + + scripts_path = os.path.join(os.path.dirname(__file__), '../../.github/scripts') + sys.path.insert(0, scripts_path) + +**API Rate Limits**: +The bot implements request timeouts and error handling for API limits. + +**Token Limits**: +Build logs and PR diffs are truncated to stay within Gemini token limits. + +Debugging +~~~~~~~~~ + +Enable debug output by adding print statements or using the workflow logs: + +.. code-block:: bash + + # View workflow run logs + gh run view --log + + # Check specific job logs + gh run view --log --job="ci-failure-bot" + +Future Enhancements +------------------- + +Potential improvements: + +- **Multi-language support**: Extend beyond Python projects +- **Custom rules**: Repository-specific failure analysis rules +- **Integration metrics**: Track bot effectiveness and accuracy +- **Advanced AI**: Use function calling for more structured responses + +Contributing +------------ + +When contributing to the CI failure bot: + +1. **Add tests**: All new functionality must include comprehensive tests +2. **Update documentation**: Keep both user and developer docs current +3. **Follow patterns**: Maintain consistency with existing OpenWISP code style +4. **Test thoroughly**: Verify on demo repositories before submitting + +For more information, see the main `OpenWISP contribution guidelines `_. \ No newline at end of file diff --git a/docs/developer/index.rst b/docs/developer/index.rst index 9078bf8e..9840ffc0 100644 --- a/docs/developer/index.rst +++ b/docs/developer/index.rst @@ -18,6 +18,7 @@ Developer Docs ./other-utilities.rst ./reusable-github-utils.rst ./releaser-tool.rst + ./ci-failure-bot.rst Other useful resources: diff --git a/docs/index.rst b/docs/index.rst index 291d6282..9ee626cf 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -37,6 +37,7 @@ OpenWISP architecture. ./user/metric-collection.rst ./user/admin-filters.rst ./user/settings.rst + ./user/ci-failure-bot.rst .. toctree:: :caption: Utils Developer Docs diff --git a/docs/user/ci-failure-bot.rst b/docs/user/ci-failure-bot.rst new file mode 100644 index 00000000..b9214098 --- /dev/null +++ b/docs/user/ci-failure-bot.rst @@ -0,0 +1,114 @@ +CI Failure Bot +=============== + +The CI Failure Bot is an automated system that analyzes failed CI builds and provides intelligent feedback to contributors. It uses AI-powered analysis to examine build logs, PR changes, and workflow context to generate specific, actionable guidance. + +Features +-------- + +- **Automatic triggering**: Responds to CI build failures in pull requests +- **AI-powered analysis**: Uses Google Gemini to analyze failure logs and provide specific guidance +- **Intelligent responses**: Provides direct, actionable feedback based on actual failure context +- **Comment deduplication**: Updates existing comments instead of creating duplicates +- **Dependabot exclusion**: Automatically skips dependency update PRs +- **Fallback handling**: Provides basic guidance if AI analysis fails + +How It Works +------------ + +When a CI build fails on a pull request, the bot: + +1. **Collects context**: Gathers build logs, PR diff, and workflow configuration +2. **AI analysis**: Sends context to Gemini AI for intelligent analysis +3. **Posts feedback**: Creates or updates a comment with specific guidance +4. **Avoids spam**: Uses markers to prevent duplicate comments + +The bot provides direct, no-nonsense feedback following OpenWISP's standards for code quality and contribution guidelines. + +Configuration +------------- + +Repository Secrets +~~~~~~~~~~~~~~~~~~ + +The following secrets must be configured in the repository: + +- ``GEMINI_API_KEY``: Google Gemini API key for AI analysis + +Environment Variables +~~~~~~~~~~~~~~~~~~~~~ + +Optional environment variables for customization: + +- ``GEMINI_MODEL``: Gemini model to use (default: ``gemini-2.5-flash``) + +Setup +----- + +The CI failure bot is automatically enabled for repositories with the workflow file. No additional setup is required beyond configuring the API key. + +Workflow Integration +~~~~~~~~~~~~~~~~~~~~ + +The bot integrates with existing CI workflows through the ``workflow_run`` trigger: + +.. code-block:: yaml + + on: + workflow_run: + workflows: ["OpenWISP Utils CI Build"] + types: + - completed + +This ensures the bot only runs after the main CI workflow completes with a failure. + +Response Examples +----------------- + +The bot provides different types of responses based on the failure: + +**Code Quality Issues**:: + + The build failed because of code formatting violations. Run `openwisp-qa-format` + to fix black, flake8, and isort issues before pushing. The project requires + clean code that passes all quality checks. + +**Test Failures**:: + + Tests are failing in tests/test_models.py at line 45. The error indicates a + missing migration for the new field you added. Run `python manage.py makemigrations` + and include the migration file in your commit. + +**Low-Quality Contributions**:: + + This PR appears to be spam or low-effort. Trivial README changes without + substantial improvements are not accepted. Please review the contribution + guidelines and submit meaningful changes. + +Troubleshooting +--------------- + +Common Issues +~~~~~~~~~~~~~ + +**Bot not responding** + - Verify ``GEMINI_API_KEY`` is configured correctly + - Check that the workflow file exists and is properly formatted + - Ensure the PR has an associated workflow run failure + +**Incorrect analysis** + - The bot learns from context - more specific error messages lead to better analysis + - Complex failures may require manual review and contributor guidance + +**Permission errors** + - Verify the workflow has proper permissions for ``issues: write`` and ``pull-requests: write`` + +Limitations +----------- + +- Requires Google Gemini API access +- Analysis quality depends on error log clarity +- May not handle very complex or unusual failure scenarios +- Skips dependabot PRs to avoid unnecessary noise + +For more information about contributing to OpenWISP projects, see the `contribution guidelines `_. \ No newline at end of file diff --git a/openwisp_utils/tests/test_ci_failure_bot.py b/openwisp_utils/tests/test_ci_failure_bot.py new file mode 100644 index 00000000..73b7843c --- /dev/null +++ b/openwisp_utils/tests/test_ci_failure_bot.py @@ -0,0 +1,304 @@ +import os +import sys +from unittest.mock import Mock, patch + +from django.test import TestCase + +# Add the .github/scripts directory to Python path for testing +scripts_path = os.path.join(os.path.dirname(__file__), "../../.github/scripts") +sys.path.insert(0, scripts_path) + +try: + from ci_failure_bot import CIFailureBot +except ImportError: + CIFailureBot = None + + +class TestCIFailureBot(TestCase): + def setUp(self): + if CIFailureBot is None: + self.skipTest("CI failure bot script not available") + + self.env_vars = { + "GITHUB_TOKEN": "test_token", + "GEMINI_API_KEY": "test_gemini_key", + "WORKFLOW_RUN_ID": "12345", + "REPOSITORY": "openwisp/openwisp-utils", + "PR_NUMBER": "1", + } + + self.env_patcher = patch.dict(os.environ, self.env_vars) + self.env_patcher.start() + + self.github_patcher = patch("ci_failure_bot.Github") + self.genai_patcher = patch("ci_failure_bot.genai") + + self.mock_github = self.github_patcher.start() + self.mock_genai = self.genai_patcher.start() + + self.mock_repo = Mock() + self.mock_github.return_value.get_repo.return_value = self.mock_repo + + self.mock_model = Mock() + self.mock_genai.GenerativeModel.return_value = self.mock_model + + def tearDown(self): + if hasattr(self, "env_patcher"): + self.env_patcher.stop() + if hasattr(self, "github_patcher"): + self.github_patcher.stop() + if hasattr(self, "genai_patcher"): + self.genai_patcher.stop() + + def test_init_success(self): + bot = CIFailureBot() + + self.assertEqual(bot.github_token, "test_token") + self.assertEqual(bot.gemini_api_key, "test_gemini_key") + self.assertEqual(bot.workflow_run_id, 12345) + self.assertEqual(bot.repository_name, "openwisp/openwisp-utils") + self.assertEqual(bot.pr_number, "1") + + self.mock_github.assert_called_once_with("test_token") + self.mock_genai.configure.assert_called_once_with(api_key="test_gemini_key") + + def test_init_missing_env_vars(self): + with patch.dict(os.environ, {}, clear=True): + with self.assertRaises(SystemExit): + CIFailureBot() + + def test_init_invalid_workflow_run_id(self): + with patch.dict(os.environ, {"WORKFLOW_RUN_ID": "invalid"}): + with self.assertRaises(SystemExit): + CIFailureBot() + + def test_init_custom_gemini_model(self): + with patch.dict(os.environ, {"GEMINI_MODEL": "gemini-pro"}): + CIFailureBot() + self.mock_genai.GenerativeModel.assert_called_with("gemini-pro") + + @patch("ci_failure_bot.requests.get") + def test_get_build_logs_success(self, mock_requests): + bot = CIFailureBot() + + mock_workflow_run = Mock() + mock_job = Mock() + mock_job.conclusion = "failure" + mock_job.name = "test-job" + mock_job.logs_url = "https://api.github.com/logs/123" + + mock_step = Mock() + mock_step.conclusion = "failure" + mock_step.name = "Run tests" + mock_step.number = 1 + mock_job.steps = [mock_step] + + mock_workflow_run.jobs.return_value = [mock_job] + self.mock_repo.get_workflow_run.return_value = mock_workflow_run + + mock_response = Mock() + mock_response.status_code = 200 + mock_response.text = "Error: Test failed at line 42\n" * 1000 + mock_requests.return_value = mock_response + + logs = bot.get_build_logs() + + self.assertEqual(len(logs), 2) + self.assertIn("job_name", logs[0]) + self.assertIn("logs", logs[0]) + self.assertEqual(logs[1]["step_name"], "Run tests") + + def test_get_build_logs_no_failures(self): + bot = CIFailureBot() + + mock_workflow_run = Mock() + mock_job = Mock() + mock_job.conclusion = "success" + mock_workflow_run.jobs.return_value = [mock_job] + self.mock_repo.get_workflow_run.return_value = mock_workflow_run + + logs = bot.get_build_logs() + + self.assertEqual(logs, []) + + @patch("ci_failure_bot.requests.get") + def test_get_pr_diff_success(self, mock_requests): + bot = CIFailureBot() + + mock_pr = Mock() + mock_pr.title = "Test PR" + mock_pr.body = "Test description" + mock_pr.diff_url = "https://github.com/test/diff" + self.mock_repo.get_pull.return_value = mock_pr + + mock_response = Mock() + mock_response.status_code = 200 + mock_response.text = "diff --git a/test.py b/test.py\n" + "line\n" * 1000 + mock_requests.return_value = mock_response + + diff_data = bot.get_pr_diff() + + self.assertEqual(diff_data["title"], "Test PR") + self.assertEqual(diff_data["body"], "Test description") + self.assertIn("[...middle truncated...]", diff_data["diff"]) + + def test_get_pr_diff_no_pr_number(self): + bot = CIFailureBot() + bot.pr_number = None + + diff_data = bot.get_pr_diff() + + self.assertIsNone(diff_data) + + def test_get_workflow_yaml_success(self): + bot = CIFailureBot() + + mock_workflow_run = Mock() + mock_workflow_run.path = ".github/workflows/ci.yml" + self.mock_repo.get_workflow_run.return_value = mock_workflow_run + + mock_file = Mock() + mock_file.decoded_content = b"name: CI\non: [push]" + self.mock_repo.get_contents.return_value = mock_file + + yaml_content = bot.get_workflow_yaml() + + self.assertEqual(yaml_content, "name: CI\non: [push]") + + def test_analyze_with_gemini_success(self): + bot = CIFailureBot() + + mock_response = Mock() + mock_response.text = "The build failed because of a syntax error." + self.mock_model.generate_content.return_value = mock_response + + build_logs = [{"job_name": "test", "logs": "Error: syntax error"}] + pr_diff = {"title": "Test", "diff": "diff content"} + workflow_yaml = "name: CI" + + result = bot.analyze_with_gemini(build_logs, pr_diff, workflow_yaml) + + self.assertEqual(result, "The build failed because of a syntax error.") + self.mock_model.generate_content.assert_called_once() + + def test_analyze_with_gemini_api_error(self): + bot = CIFailureBot() + + self.mock_model.generate_content.side_effect = Exception("API Error") + + result = bot.analyze_with_gemini([], None, None) + + self.assertIn("CI Build Failed", result) + self.assertIn("temporarily unavailable", result) + + def test_post_comment_success(self): + bot = CIFailureBot() + + mock_pr = Mock() + mock_user = Mock() + mock_user.login = "github-actions[bot]" + self.mock_github.return_value.get_user.return_value = mock_user + self.mock_repo.get_pull.return_value = mock_pr + + mock_pr.get_issue_comments.return_value = [] + + bot.post_comment("Test message") + + mock_pr.create_issue_comment.assert_called_once() + call_args = mock_pr.create_issue_comment.call_args[0][0] + self.assertIn("", call_args) + self.assertIn("Test message", call_args) + + def test_post_comment_update_existing(self): + bot = CIFailureBot() + + mock_pr = Mock() + mock_user = Mock() + mock_user.login = "github-actions[bot]" + self.mock_github.return_value.get_user.return_value = mock_user + self.mock_repo.get_pull.return_value = mock_pr + + mock_comment = Mock() + mock_comment.user.login = "github-actions[bot]" + mock_comment.body = "\nOld message" + mock_pr.get_issue_comments.return_value = [mock_comment] + + bot.post_comment("New message") + + mock_comment.edit.assert_called_once() + mock_pr.create_issue_comment.assert_not_called() + + def test_post_comment_no_pr_number(self): + bot = CIFailureBot() + bot.pr_number = None + + bot.post_comment("Test message") + + def test_run_skips_dependabot(self): + bot = CIFailureBot() + + mock_workflow_run = Mock() + mock_actor = Mock() + mock_actor.login = "dependabot[bot]" + mock_workflow_run.actor = mock_actor + self.mock_repo.get_workflow_run.return_value = mock_workflow_run + + with patch("builtins.print") as mock_print: + bot.run() + + mock_print.assert_any_call("Skipping dependabot PR from dependabot[bot]") + + @patch("ci_failure_bot.requests.get") + def test_run_full_workflow(self, mock_requests): + bot = CIFailureBot() + + mock_workflow_run = Mock() + mock_actor = Mock() + mock_actor.login = "user" + mock_workflow_run.actor = mock_actor + + mock_job = Mock() + mock_job.conclusion = "failure" + mock_job.name = "test-job" + mock_job.logs_url = "https://api.github.com/logs/123" + mock_job.steps = [] + mock_workflow_run.jobs.return_value = [mock_job] + + self.mock_repo.get_workflow_run.return_value = mock_workflow_run + + mock_response = Mock() + mock_response.status_code = 200 + mock_response.text = "Build failed" + mock_requests.return_value = mock_response + + mock_gemini_response = Mock() + mock_gemini_response.text = "Analysis: Build failed due to syntax error" + self.mock_model.generate_content.return_value = mock_gemini_response + + mock_pr = Mock() + mock_user = Mock() + mock_user.login = "github-actions[bot]" + self.mock_github.return_value.get_user.return_value = mock_user + self.mock_repo.get_pull.return_value = mock_pr + mock_pr.get_issue_comments.return_value = [] + + bot.run() + + self.mock_model.generate_content.assert_called_once() + mock_pr.create_issue_comment.assert_called_once() + + def test_run_no_build_logs(self): + bot = CIFailureBot() + + mock_workflow_run = Mock() + mock_actor = Mock() + mock_actor.login = "user" + mock_workflow_run.actor = mock_actor + mock_workflow_run.jobs.return_value = [] + self.mock_repo.get_workflow_run.return_value = mock_workflow_run + + with patch("builtins.print") as mock_print: + bot.run() + + mock_print.assert_any_call("No build logs found") + self.mock_model.generate_content.assert_not_called() From 06b7141769cfbcd0bcb19f160707c856876f0548 Mon Sep 17 00:00:00 2001 From: Srinath0916 Date: Fri, 9 Jan 2026 11:56:41 +0530 Subject: [PATCH 4/5] [feature] Add comprehensive tests and docs for CI failure bot #524 - Add complete test suite with comprehensive coverage for all bot functions - Consolidate documentation into reusable-github-utils.rst as requested by maintainer - Move CI failure bot code from .github/scripts to openwisp_utils package - Add github_actions extra dependency group in setup.py for proper installation - Fix test imports to use correct openwisp_utils.ci_failure_bot module path - Remove noqa comments from Gemini prompt to avoid sending linting artifacts to AI - Implement dynamic branch detection using repo.default_branch instead of hardcoded master - Add null check for deleted fork repositories to prevent AttributeError - Make Gemini API key optional with graceful fallback to static responses - Simplify workflow error handling and clean up formatting - Address all CodeRabbit feedback on exception handling and code quality - Follow OpenWISP documentation patterns with proper structure Addresses all maintainer comments and CodeRabbit suggestions for production readiness. Closes #524 --- .github/scripts/ci_failure_bot.py | 287 ------------- .github/workflows/ci-failure-bot.yml | 9 +- docs/developer/ci-failure-bot.rst | 257 ------------ docs/developer/index.rst | 1 - docs/developer/reusable-github-utils.rst | 122 ++++++ docs/index.rst | 1 - docs/user/ci-failure-bot.rst | 114 ------ openwisp_utils/ci_failure_bot.py | 427 ++++++++++++++++++++ openwisp_utils/tests/test_ci_failure_bot.py | 232 +++++++---- setup.py | 5 + 10 files changed, 707 insertions(+), 748 deletions(-) delete mode 100755 .github/scripts/ci_failure_bot.py delete mode 100644 docs/developer/ci-failure-bot.rst delete mode 100644 docs/user/ci-failure-bot.rst create mode 100644 openwisp_utils/ci_failure_bot.py diff --git a/.github/scripts/ci_failure_bot.py b/.github/scripts/ci_failure_bot.py deleted file mode 100755 index 0f247f6e..00000000 --- a/.github/scripts/ci_failure_bot.py +++ /dev/null @@ -1,287 +0,0 @@ -#!/usr/bin/env python3 -""" -CI Failure Bot - AI-powered analysis of build failures using Gemini -""" - -import json -import os -import sys - -import google.generativeai as genai -import requests -from github import Github, GithubException - - -class CIFailureBot: - def __init__(self): - self.github_token = os.environ.get("GITHUB_TOKEN") - self.gemini_api_key = os.environ.get("GEMINI_API_KEY") - self.workflow_run_id = os.environ.get("WORKFLOW_RUN_ID") - self.repository_name = os.environ.get("REPOSITORY") - self.pr_number = os.environ.get("PR_NUMBER") - - if not all( - [ - self.github_token, - self.gemini_api_key, - self.workflow_run_id, - self.repository_name, - ] - ): - missing = [] - if not self.github_token: - missing.append("GITHUB_TOKEN") - if not self.gemini_api_key: - missing.append("GEMINI_API_KEY") - if not self.workflow_run_id: - missing.append("WORKFLOW_RUN_ID") - if not self.repository_name: - missing.append("REPOSITORY") - print(f"Missing required environment variables: {', '.join(missing)}") - sys.exit(1) - - try: - self.workflow_run_id = int(self.workflow_run_id) - except ValueError: - print("Invalid WORKFLOW_RUN_ID: must be numeric") - sys.exit(1) - - self.github = Github(self.github_token) - self.repo = self.github.get_repo(self.repository_name) - - genai.configure(api_key=self.gemini_api_key) - model_name = os.environ.get("GEMINI_MODEL", "gemini-2.5-flash") - self.model = genai.GenerativeModel(model_name) - - def get_build_logs(self): - """Get actual build logs and error output from failed jobs""" - try: - workflow_run = self.repo.get_workflow_run(self.workflow_run_id) - jobs = workflow_run.jobs() - - build_logs = [] - for job in jobs: - if job.conclusion == "failure": - # Get job logs URL and fetch content - logs_url = job.logs_url - if logs_url: - headers = { - "Authorization": f"token {self.github_token}", - "Accept": "application/vnd.github.v3+json", - } - response = requests.get(logs_url, headers=headers, timeout=30) - if response.status_code == 200: - build_logs.append( - { - "job_name": job.name, - "logs": response.text[ - -5000: - ], # Last 5000 chars to avoid token limits - } - ) - - # Also get step details - for step in job.steps: - if step.conclusion == "failure": - build_logs.append( - { - "job_name": job.name, - "step_name": step.name, - "step_number": step.number, - } - ) - - return build_logs - except (GithubException, ValueError) as e: - print(f"Error getting build logs: {e}") - return [] - - def get_pr_diff(self): - """Get the PR diff/changes if PR exists""" - if not self.pr_number or self.pr_number.strip() == "": - return None - - try: - pr_num = int(self.pr_number) - pr = self.repo.get_pull(pr_num) - - # Get diff content - diff_url = pr.diff_url - headers = { - "Authorization": f"token {self.github_token}", - "Accept": "application/vnd.github.v3.diff", - } - response = requests.get(diff_url, headers=headers, timeout=30) - if response.status_code == 200: - diff_text = response.text - if len(diff_text) > 8000: - # Take first 4000 and last 4000 chars for context - diff_text = ( - diff_text[:4000] - + "\n\n[...middle truncated...]\n\n" - + diff_text[-4000:] - ) - - return { - "title": pr.title, - "body": pr.body or "", - "diff": diff_text, - } - except (GithubException, requests.RequestException) as e: - print(f"Error getting PR diff: {e}") - - return None - - def get_workflow_yaml(self): - """Get the workflow YAML configuration""" - try: - workflow_run = self.repo.get_workflow_run(self.workflow_run_id) - workflow_path = workflow_run.path - - # Get workflow file content - workflow_file = self.repo.get_contents(workflow_path) - return workflow_file.decoded_content.decode("utf-8") - except GithubException as e: - print(f"Error getting workflow YAML: {e}") - return None - - def analyze_with_gemini(self, build_logs, pr_diff, workflow_yaml): - """Send context to Gemini for intelligent analysis""" - - # Prepare context for Gemini - project_name = self.repository_name.split("/")[-1] - repo_url = f"https://github.com/{self.repository_name}" - qa_checks_url = f"{repo_url}/blob/master/openwisp-qa-check" - runtests_url = f"{repo_url}/blob/master/runtests" - - # Build the context string with proper line breaks - build_logs_json = json.dumps(build_logs, indent=2) - if pr_diff: - pr_diff_json = json.dumps(pr_diff, indent=2) - else: - pr_diff_json = "No PR associated" - - # Gemini prompt - ignore line length for readability - context = f""" -### ROLE -You are the "Automated Maintainer Gatekeeper." Your goal is to analyze Pull Request (PR) build failures and provide direct, technically accurate, and no-nonsense feedback to contributors. - -### INPUT CONTEXT PROVIDED -1. **Build Output/Logs:** {build_logs_json} -2. **YAML Workflow:** {workflow_yaml or "Not available"} -3. **PR Diff:** {pr_diff_json} -4. **Project Name:** {project_name} -5. **Repository:** {repo_url} -6. **run-qa-checks:** {qa_checks_url} -7. **runtests:** {runtests_url} - -### TASK -Analyze the provided context to determine why the build failed. Categorize the failure and respond according to the "Tone Guidelines" below. - -### TONE GUIDELINES -- **Direct & Honest:** Do not use "fluff" or overly polite corporate language. -- **Firm Standards:** If a PR is low-effort, spammy, or fails to follow basic instructions, state that clearly. -- **Action-Oriented:** Provide the exact command or file change needed to fix the error, unless the PR is spammy, in which case we should just declare the PR as potential SPAM and ask maintainers to manually review it. - -### RESPONSE STRUCTURE -1. **Status Summary:** A one-sentence blunt assessment of the failure. -2. **Technical Diagnosis:** - - Identify the specific line/test that failed. - - Explain *why* it failed. -3. **Required Action:** Provide a code block or specific steps the contributor must take. -4. **Quality Warning (If Applicable):** If the PR appears to be "spam" (e.g., trivial README changes, AI-generated nonsense, or repeated basic errors), include a firm statement that such contributions are a drain on project resources and ping the maintainers asking them for manual review. - -### EXAMPLE RESPONSE STYLE -"The build failed because you neglected to update the test suite to match your logic changes. This project does not accept functional changes without corresponding test updates. Refer to the log at line 452. Update tests/logic_test.py before re-submitting. We prioritize high-quality, ready-to-merge code; please ensure you run local tests before pushing." - -Analyze the failure and provide your response: -""" # noqa: E501 - - try: - response = self.model.generate_content(context) - return response.text - except (ValueError, ConnectionError, Exception) as e: - print(f"Error calling Gemini API: {e}") - return self.fallback_response() - - def fallback_response(self): - """Fallback response if Gemini fails""" - return """ -## CI Build Failed - -The automated analysis is temporarily unavailable. Please check the CI logs above for specific error details. - -Common fixes: -- Run `openwisp-qa-format` for code style issues -- Run `./runtests` locally to debug test failures -- Check dependencies for setup issues - -See: https://openwisp.io/docs/dev/developer/contributing.html -""" - - def post_comment(self, message): - """Post or update comment on PR""" - if not self.pr_number or self.pr_number.strip() == "": - print("No PR number, skipping comment") - return - - # Add consistent marker for deduplication - marker = "" - message_with_marker = f"{marker}\n{message}" - - try: - pr_num = int(self.pr_number) - pr = self.repo.get_pull(pr_num) - - # Check for existing bot comments to avoid duplicates - bot_login = self.github.get_user().login - existing_comments = pr.get_issue_comments() - - for comment in existing_comments: - if comment.user.login == bot_login and marker in comment.body: - print("Bot comment already exists, updating it") - comment.edit(message_with_marker) - return - - # No existing comment, create new one - pr.create_issue_comment(message_with_marker) - print(f"Posted comment to PR #{pr_num}") - except (GithubException, ValueError) as e: - print(f"Error posting comment: {e}") - - def run(self): - """Main execution flow""" - print("CI Failure Bot starting - AI-powered analysis") - - # Double-check: Skip if this is a dependabot PR - try: - workflow_run = self.repo.get_workflow_run(self.workflow_run_id) - if workflow_run.actor and "dependabot" in workflow_run.actor.login.lower(): - print(f"Skipping dependabot PR from {workflow_run.actor.login}") - return - except (GithubException, AttributeError) as e: - print(f"Warning: Could not check actor: {e}") - - # Get all context - build_logs = self.get_build_logs() - pr_diff = self.get_pr_diff() - workflow_yaml = self.get_workflow_yaml() - - if not build_logs: - print("No build logs found") - return - - print("Analyzing failure with Gemini AI...") - - # Get AI analysis - ai_response = self.analyze_with_gemini(build_logs, pr_diff, workflow_yaml) - - # Post intelligent comment - self.post_comment(ai_response) - - print("CI Failure Bot completed") - - -if __name__ == "__main__": - bot = CIFailureBot() - bot.run() diff --git a/.github/workflows/ci-failure-bot.yml b/.github/workflows/ci-failure-bot.yml index 94a2a046..21aaf96c 100644 --- a/.github/workflows/ci-failure-bot.yml +++ b/.github/workflows/ci-failure-bot.yml @@ -27,9 +27,8 @@ jobs: python-version: "3.11" - name: Install dependencies - # Pin dependency versions for reproducibility run: | - pip install requests>=2.31.0 PyGithub>=2.0.0 google-generativeai>=0.3.0 + pip install -e .[github_actions] - name: Run CI Failure Bot env: @@ -38,4 +37,8 @@ jobs: WORKFLOW_RUN_ID: ${{ github.event.workflow_run.id }} REPOSITORY: ${{ github.repository }} PR_NUMBER: ${{ github.event.workflow_run.pull_requests[0].number || '' }} - run: python .github/scripts/ci_failure_bot.py + run: | + set -e # Exit immediately if any command fails + echo "Starting CI Failure Bot..." + python -m openwisp_utils.ci_failure_bot + echo "CI Failure Bot completed successfully" diff --git a/docs/developer/ci-failure-bot.rst b/docs/developer/ci-failure-bot.rst deleted file mode 100644 index f4820dac..00000000 --- a/docs/developer/ci-failure-bot.rst +++ /dev/null @@ -1,257 +0,0 @@ -CI Failure Bot -============== - -This GitHub workflow automatically analyzes failed CI builds and provides intelligent feedback to contributors using AI-powered analysis. - -The bot examines build logs, PR changes, and workflow context to generate specific, actionable guidance that helps contributors fix issues quickly. - -Usage Example -------------- - -You can use this workflow in your repository as follows: - -.. code-block:: yaml - - name: CI Failure Bot - - on: - workflow_run: - workflows: ["OpenWISP Utils CI Build"] - types: - - completed - - permissions: - issues: write - pull-requests: write - contents: read - - jobs: - ci-failure-bot: - runs-on: ubuntu-latest - if: ${{ github.event.workflow_run.conclusion == 'failure' && !contains(github.event.workflow_run.actor.login, 'dependabot') }} - - steps: - - name: Checkout repository - uses: actions/checkout@v6 - - - name: Set up Python - uses: actions/setup-python@v6 - with: - python-version: "3.11" - - - name: Install dependencies - run: | - pip install requests>=2.31.0 PyGithub>=2.0.0 google-generativeai>=0.3.0 - - - name: Run CI Failure Bot - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }} - WORKFLOW_RUN_ID: ${{ github.event.workflow_run.id }} - REPOSITORY: ${{ github.repository }} - PR_NUMBER: ${{ github.event.workflow_run.pull_requests[0].number || '' }} - run: python .github/scripts/ci_failure_bot.py - -Configuration -------------- - -Repository Secrets -~~~~~~~~~~~~~~~~~~~ - -The following secrets must be configured in your repository: - -- ``GEMINI_API_KEY``: Google Gemini API key for AI analysis - -Environment Variables -~~~~~~~~~~~~~~~~~~~~~ - -Optional environment variables for customization: - -- ``GEMINI_MODEL``: Gemini model to use (default: ``gemini-2.5-flash``) - -Features --------- - -- **Automatic triggering**: Responds to CI build failures in pull requests -- **AI-powered analysis**: Uses Google Gemini to analyze failure logs and provide specific guidance -- **Intelligent responses**: Provides direct, actionable feedback based on actual failure context -- **Comment deduplication**: Updates existing comments instead of creating duplicates -- **Dependabot exclusion**: Automatically skips dependency update PRs -- **Fallback handling**: Provides basic guidance if AI analysis fails - -Response Examples ------------------ - -The bot provides different types of responses based on the failure: - -**Code Quality Issues**:: - - The build failed because of code formatting violations. Run `openwisp-qa-format` - to fix black, flake8, and isort issues before pushing. The project requires - clean code that passes all quality checks. - -**Test Failures**:: - - Tests are failing in tests/test_models.py at line 45. The error indicates a - missing migration for the new field you added. Run `python manage.py makemigrations` - and include the migration file in your commit. - -**Low-Quality Contributions**:: - - This PR appears to be spam or low-effort. Trivial README changes without - substantial improvements are not accepted. Please review the contribution - guidelines and submit meaningful changes. - -Note ----- - -If the Gemini API is unavailable or the analysis fails, the bot will provide a fallback response with basic troubleshooting guidance. The workflow will not fail silently - any errors in the bot execution will be visible in the GitHub Actions logs. - -Implementation Details ----------------------- - -Architecture -~~~~~~~~~~~~ - -The CI failure bot consists of two main components: - -1. **GitHub Actions Workflow** (``.github/workflows/ci-failure-bot.yml``) -2. **Python Analysis Script** (``.github/scripts/ci_failure_bot.py``) - -The workflow uses the ``workflow_run`` trigger to respond to CI failures, ensuring proper access to workflow run metadata and logs with correct PR association. - -Testing -------- - -The test suite (``test_ci_failure_bot.py``) provides comprehensive coverage: - -**Test Categories**: - -- Initialization and configuration validation -- Build log retrieval with various scenarios -- PR diff handling and truncation -- Gemini API integration and error handling -- Comment posting and deduplication -- Full workflow execution - -**Mocking Strategy**: - -- External APIs (GitHub, Gemini) are fully mocked -- Environment variables are patched for isolation -- Network requests are intercepted to avoid external dependencies - -Running Tests -~~~~~~~~~~~~~ - -.. code-block:: bash - - # Run specific CI bot tests - python manage.py test openwisp_utils.tests.test_ci_failure_bot - - # Run with coverage - coverage run --source='.' manage.py test openwisp_utils.tests.test_ci_failure_bot - coverage report - -Configuration Options ---------------------- - -Environment Variables -~~~~~~~~~~~~~~~~~~~~~ - -- ``GITHUB_TOKEN``: GitHub API access (automatically provided by Actions) -- ``GEMINI_API_KEY``: Google Gemini API key (repository secret) -- ``WORKFLOW_RUN_ID``: Workflow run identifier (automatically provided) -- ``REPOSITORY``: Repository name (automatically provided) -- ``PR_NUMBER``: Pull request number (automatically provided) -- ``GEMINI_MODEL``: Gemini model name (optional, defaults to ``gemini-2.5-flash``) - -Permissions -~~~~~~~~~~~ - -The workflow requires these GitHub permissions: - -.. code-block:: yaml - - permissions: - issues: write - pull-requests: write - contents: read - -Deployment ----------- - -Adding to New Repositories -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -1. Copy workflow file to ``.github/workflows/ci-failure-bot.yml`` -2. Copy script to ``.github/scripts/ci_failure_bot.py`` -3. Configure ``GEMINI_API_KEY`` in repository secrets -4. Update workflow name in trigger to match target CI workflow - -Customization -~~~~~~~~~~~~~ - -**Prompt Customization**: -Modify the Gemini prompt in ``analyze_with_gemini()`` to adjust: - -- Response tone and style -- Project-specific guidance -- Error categorization logic - -**Trigger Customization**: -Adjust the workflow trigger to target different CI workflows or conditions. - -Troubleshooting ---------------- - -Common Development Issues -~~~~~~~~~~~~~~~~~~~~~~~~~ - -**Import Errors in Tests**: -The test file adds the scripts directory to Python path: - -.. code-block:: python - - scripts_path = os.path.join(os.path.dirname(__file__), '../../.github/scripts') - sys.path.insert(0, scripts_path) - -**API Rate Limits**: -The bot implements request timeouts and error handling for API limits. - -**Token Limits**: -Build logs and PR diffs are truncated to stay within Gemini token limits. - -Debugging -~~~~~~~~~ - -Enable debug output by adding print statements or using the workflow logs: - -.. code-block:: bash - - # View workflow run logs - gh run view --log - - # Check specific job logs - gh run view --log --job="ci-failure-bot" - -Future Enhancements -------------------- - -Potential improvements: - -- **Multi-language support**: Extend beyond Python projects -- **Custom rules**: Repository-specific failure analysis rules -- **Integration metrics**: Track bot effectiveness and accuracy -- **Advanced AI**: Use function calling for more structured responses - -Contributing ------------- - -When contributing to the CI failure bot: - -1. **Add tests**: All new functionality must include comprehensive tests -2. **Update documentation**: Keep both user and developer docs current -3. **Follow patterns**: Maintain consistency with existing OpenWISP code style -4. **Test thoroughly**: Verify on demo repositories before submitting - -For more information, see the main `OpenWISP contribution guidelines `_. \ No newline at end of file diff --git a/docs/developer/index.rst b/docs/developer/index.rst index 9840ffc0..9078bf8e 100644 --- a/docs/developer/index.rst +++ b/docs/developer/index.rst @@ -18,7 +18,6 @@ Developer Docs ./other-utilities.rst ./reusable-github-utils.rst ./releaser-tool.rst - ./ci-failure-bot.rst Other useful resources: diff --git a/docs/developer/reusable-github-utils.rst b/docs/developer/reusable-github-utils.rst index 4f9290f9..9ff1bda1 100644 --- a/docs/developer/reusable-github-utils.rst +++ b/docs/developer/reusable-github-utils.rst @@ -56,6 +56,128 @@ times with a 30 second delay between attempts. attempts, the action will exit with a non-zero status, causing the workflow to fail. +CI Failure Bot +~~~~~~~~~~~~~~ + +This GitHub workflow automatically analyzes failed CI builds and provides +intelligent feedback to contributors using AI-powered analysis. + +The bot examines build logs, PR changes, and workflow context to generate +specific, actionable guidance that helps contributors fix issues quickly. + +**Inputs** + +- ``GEMINI_API_KEY`` (optional): Google Gemini API key for AI analysis. If + not provided, the bot uses fallback responses +- ``GEMINI_MODEL`` (optional): Gemini model to use. Defaults to + ``gemini-2.5-flash`` + +**Usage Example** + +You can use this workflow in your repository as follows: + +.. code-block:: yaml + + name: CI Failure Bot + + on: + workflow_run: + workflows: ["OpenWISP Utils CI Build"] + types: + - completed + + permissions: + issues: write + pull-requests: write + contents: read + + jobs: + ci-failure-bot: + runs-on: ubuntu-latest + if: ${{ github.event.workflow_run.conclusion == 'failure' && !contains(github.event.workflow_run.actor.login, 'dependabot') }} + + steps: + - name: Checkout repository + uses: actions/checkout@v6 + + - name: Set up Python + uses: actions/setup-python@v6 + with: + python-version: "3.11" + + - name: Install dependencies + run: | + pip install -e .[github_actions] + + - name: Run CI Failure Bot + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }} + WORKFLOW_RUN_ID: ${{ github.event.workflow_run.id }} + REPOSITORY: ${{ github.repository }} + PR_NUMBER: ${{ github.event.workflow_run.pull_requests[0].number || '' }} + run: python -m openwisp_utils.ci_failure_bot + +This example automatically triggers when the "OpenWISP Utils CI Build" +workflow fails, analyzes the failure using Gemini AI, and posts +intelligent feedback to the associated pull request. + +**Features** + +- **Automatic triggering**: Responds to CI build failures in pull requests +- **AI-powered analysis**: Uses Google Gemini to analyze failure logs and + provide specific guidance +- **OpenWISP QA integration**: Instructs contributors to use ``pip install + -e .[qa]``, ``./run-qa-checks``, and ``openwisp-qa-format`` for proper + code formatting +- **Intelligent responses**: Provides direct, actionable feedback based on + actual failure context +- **Comment deduplication**: Updates existing comments instead of creating + duplicates +- **Dependabot exclusion**: Automatically skips dependency update PRs +- **Fork detection**: Skips external PRs for security +- **Fallback handling**: Provides basic guidance if AI analysis fails + +**Configuration** ++++++++++++++++++ + +Repository Secrets +++++++++++++++++++ + +The following secrets can be configured in the repository for enhanced +functionality: + +- ``GEMINI_API_KEY``: Google Gemini API key for AI analysis (optional - + fallback responses used if not provided) + +Environment Variables ++++++++++++++++++++++ + +Optional environment variables for customization: + +- ``GEMINI_MODEL``: Gemini model to use (default: ``gemini-2.5-flash``) + +**Limitations** + +- **Optional Gemini API**: Google Gemini API access enhances analysis + quality, but the bot provides fallback responses when unavailable +- **Privacy consideration**: PR diffs and build logs are sent to Google's + Gemini AI service for analysis when API key is provided. Organizations + with sensitive codebases should review Google's data handling policies +- **API costs**: Each CI failure with Gemini enabled triggers an API call. + Monitor usage to manage costs, especially in repositories with frequent + CI failures +- Analysis quality depends on error log clarity +- May not handle very complex or unusual failure scenarios +- Skips dependabot PRs to avoid unnecessary noise + +.. note:: + + If the Gemini API is unavailable, the bot provides a fallback response + with basic troubleshooting guidance. The workflow will fail loudly if + the bot script encounters critical errors, ensuring issues are visible + in GitHub Actions logs. + GitHub Workflows ---------------- diff --git a/docs/index.rst b/docs/index.rst index 9ee626cf..291d6282 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -37,7 +37,6 @@ OpenWISP architecture. ./user/metric-collection.rst ./user/admin-filters.rst ./user/settings.rst - ./user/ci-failure-bot.rst .. toctree:: :caption: Utils Developer Docs diff --git a/docs/user/ci-failure-bot.rst b/docs/user/ci-failure-bot.rst deleted file mode 100644 index b9214098..00000000 --- a/docs/user/ci-failure-bot.rst +++ /dev/null @@ -1,114 +0,0 @@ -CI Failure Bot -=============== - -The CI Failure Bot is an automated system that analyzes failed CI builds and provides intelligent feedback to contributors. It uses AI-powered analysis to examine build logs, PR changes, and workflow context to generate specific, actionable guidance. - -Features --------- - -- **Automatic triggering**: Responds to CI build failures in pull requests -- **AI-powered analysis**: Uses Google Gemini to analyze failure logs and provide specific guidance -- **Intelligent responses**: Provides direct, actionable feedback based on actual failure context -- **Comment deduplication**: Updates existing comments instead of creating duplicates -- **Dependabot exclusion**: Automatically skips dependency update PRs -- **Fallback handling**: Provides basic guidance if AI analysis fails - -How It Works ------------- - -When a CI build fails on a pull request, the bot: - -1. **Collects context**: Gathers build logs, PR diff, and workflow configuration -2. **AI analysis**: Sends context to Gemini AI for intelligent analysis -3. **Posts feedback**: Creates or updates a comment with specific guidance -4. **Avoids spam**: Uses markers to prevent duplicate comments - -The bot provides direct, no-nonsense feedback following OpenWISP's standards for code quality and contribution guidelines. - -Configuration -------------- - -Repository Secrets -~~~~~~~~~~~~~~~~~~ - -The following secrets must be configured in the repository: - -- ``GEMINI_API_KEY``: Google Gemini API key for AI analysis - -Environment Variables -~~~~~~~~~~~~~~~~~~~~~ - -Optional environment variables for customization: - -- ``GEMINI_MODEL``: Gemini model to use (default: ``gemini-2.5-flash``) - -Setup ------ - -The CI failure bot is automatically enabled for repositories with the workflow file. No additional setup is required beyond configuring the API key. - -Workflow Integration -~~~~~~~~~~~~~~~~~~~~ - -The bot integrates with existing CI workflows through the ``workflow_run`` trigger: - -.. code-block:: yaml - - on: - workflow_run: - workflows: ["OpenWISP Utils CI Build"] - types: - - completed - -This ensures the bot only runs after the main CI workflow completes with a failure. - -Response Examples ------------------ - -The bot provides different types of responses based on the failure: - -**Code Quality Issues**:: - - The build failed because of code formatting violations. Run `openwisp-qa-format` - to fix black, flake8, and isort issues before pushing. The project requires - clean code that passes all quality checks. - -**Test Failures**:: - - Tests are failing in tests/test_models.py at line 45. The error indicates a - missing migration for the new field you added. Run `python manage.py makemigrations` - and include the migration file in your commit. - -**Low-Quality Contributions**:: - - This PR appears to be spam or low-effort. Trivial README changes without - substantial improvements are not accepted. Please review the contribution - guidelines and submit meaningful changes. - -Troubleshooting ---------------- - -Common Issues -~~~~~~~~~~~~~ - -**Bot not responding** - - Verify ``GEMINI_API_KEY`` is configured correctly - - Check that the workflow file exists and is properly formatted - - Ensure the PR has an associated workflow run failure - -**Incorrect analysis** - - The bot learns from context - more specific error messages lead to better analysis - - Complex failures may require manual review and contributor guidance - -**Permission errors** - - Verify the workflow has proper permissions for ``issues: write`` and ``pull-requests: write`` - -Limitations ------------ - -- Requires Google Gemini API access -- Analysis quality depends on error log clarity -- May not handle very complex or unusual failure scenarios -- Skips dependabot PRs to avoid unnecessary noise - -For more information about contributing to OpenWISP projects, see the `contribution guidelines `_. \ No newline at end of file diff --git a/openwisp_utils/ci_failure_bot.py b/openwisp_utils/ci_failure_bot.py new file mode 100644 index 00000000..7f90e310 --- /dev/null +++ b/openwisp_utils/ci_failure_bot.py @@ -0,0 +1,427 @@ +#!/usr/bin/env python3 +"""CI Failure Bot - AI-powered analysis of build failures using Gemini""" + +import io +import json +import os +import sys +import zipfile + +import google.generativeai as genai +import requests +from github import Github, GithubException + + +class CIFailureBot: + def __init__(self): + self.github_token = os.environ.get("GITHUB_TOKEN") + self.gemini_api_key = os.environ.get("GEMINI_API_KEY") + self.workflow_run_id = os.environ.get("WORKFLOW_RUN_ID") + self.repository_name = os.environ.get("REPOSITORY") + self.pr_number = os.environ.get("PR_NUMBER") + if not all( + [ + self.github_token, + self.workflow_run_id, + self.repository_name, + ] + ): + missing = [] + if not self.github_token: + missing.append("GITHUB_TOKEN") + if not self.workflow_run_id: + missing.append("WORKFLOW_RUN_ID") + if not self.repository_name: + missing.append("REPOSITORY") + print(f"Missing required environment variables: {', '.join(missing)}") + sys.exit(1) + try: + self.workflow_run_id = int(self.workflow_run_id) + except ValueError: + print("Invalid WORKFLOW_RUN_ID: must be numeric") + sys.exit(1) + self.github = Github(self.github_token) + self.repo = self.github.get_repo(self.repository_name) + # Initialize Gemini client with new API (optional) + if self.gemini_api_key: + genai.configure(api_key=self.gemini_api_key) + self.model_name = os.environ.get("GEMINI_MODEL", "gemini-2.5-flash") + self.model = genai.GenerativeModel(self.model_name) + else: + print("Warning: GEMINI_API_KEY not provided, will use fallback responses") + self.model = None + + def get_build_logs(self): + """Get actual build logs and error output from failed jobs""" + try: + workflow_run = self.repo.get_workflow_run(self.workflow_run_id) + jobs = workflow_run.jobs() + build_logs = [] + for job in jobs: + if job.conclusion == "failure": + # Get job logs URL and fetch content + logs_url = job.logs_url + if logs_url: + headers = { + "Authorization": f"token {self.github_token}", + "Accept": "application/vnd.github.v3+json", + } + response = requests.get(logs_url, headers=headers, timeout=30) + response.raise_for_status() + # Handle ZIP archive response from GitHub Actions logs API + raw = response.content + if raw[:2] == b"PK": # ZIP file signature + with zipfile.ZipFile(io.BytesIO(raw)) as zf: + parts = [] + for name in zf.namelist(): + if name.endswith(".txt"): + parts.append( + zf.read(name).decode("utf-8", "replace") + ) + log_text = "\n".join(parts).strip() + else: + log_text = raw.decode("utf-8", "replace") + if len(log_text) > 5000: + # Take first 2000 and last 3000 chars for better context + log_text = ( + log_text[:2000] + + "\n\n[...middle truncated...]\n\n" + + log_text[-3000:] + ) + build_logs.append( + { + "job_name": job.name, + "logs": log_text, + } + ) + # Also get step details + for step in job.steps: + if step.conclusion == "failure": + build_logs.append( + { + "job_name": job.name, + "step_name": step.name, + "step_number": step.number, + } + ) + return build_logs + except (GithubException, requests.RequestException, ValueError) as e: + print(f"Error getting build logs: {e}") + return [] + + def get_pr_diff(self): + """Get the PR diff/changes if PR exists""" + if not self.pr_number or self.pr_number.strip() == "": + return None + try: + pr_num = int(self.pr_number) + pr = self.repo.get_pull(pr_num) + # Use git diff instead of HTTP request for efficiency + try: + import subprocess + + # Validate branch name to prevent injection + default_branch = self.repo.default_branch + if ( + not default_branch + or not default_branch.replace("-", "") + .replace("_", "") + .replace("/", "") + .isalnum() + ): + raise ValueError("Invalid branch name") + result = subprocess.run( + ["git", "diff", f"origin/{default_branch}"], + capture_output=True, + text=True, + timeout=30, + ) + if result.returncode == 0 and result.stdout.strip(): + diff_text = result.stdout + else: + # Fallback to HTTP if git diff fails or returns empty + diff_url = pr.diff_url + headers = { + "Authorization": f"token {self.github_token}", + "Accept": "application/vnd.github.v3.diff", + } + response = requests.get(diff_url, headers=headers, timeout=30) + if response.status_code == 200: + diff_text = response.text + else: + return None + except (subprocess.SubprocessError, FileNotFoundError, ValueError): + # Fallback to HTTP if git is not available + diff_url = pr.diff_url + headers = { + "Authorization": f"token {self.github_token}", + "Accept": "application/vnd.github.v3.diff", + } + response = requests.get(diff_url, headers=headers, timeout=30) + if response.status_code == 200: + diff_text = response.text + else: + return None + if len(diff_text) > 8000: + # Take first 4000 and last 4000 chars for context + diff_text = ( + diff_text[:4000] + + "\n\n[...middle truncated...]\n\n" + + diff_text[-4000:] + ) + return { + "title": pr.title, + "body": pr.body or "", + "diff": diff_text, + } + except (GithubException, requests.RequestException, ValueError) as e: + print(f"Error getting PR diff: {e}") + return None + + def get_workflow_yaml(self): + """Get the workflow YAML configuration""" + try: + workflow_run = self.repo.get_workflow_run(self.workflow_run_id) + workflow_path = workflow_run.path + # Get workflow file content from the commit that ran + workflow_file = self.repo.get_contents( + workflow_path, ref=workflow_run.head_sha + ) + return workflow_file.decoded_content.decode("utf-8") + except GithubException as e: + print(f"Error getting workflow YAML: {e}") + return None + + def analyze_with_gemini(self, build_logs, pr_diff, workflow_yaml): + """Send context to Gemini for intelligent analysis""" + # Prepare context for Gemini + project_name = self.repository_name.split("/")[-1] + repo_url = f"https://github.com/{self.repository_name}" + # Use dynamic branch detection instead of hardcoded "master" + default_branch = self.repo.default_branch + qa_checks_url = f"{repo_url}/blob/{default_branch}/openwisp-qa-check" + runtests_url = f"{repo_url}/blob/{default_branch}/runtests" + # Build the context string with proper line breaks + build_logs_json = json.dumps(build_logs, indent=2) + if pr_diff: + pr_diff_json = json.dumps(pr_diff, indent=2) + else: + pr_diff_json = "No PR associated" + # Gemini prompt with EXPLICIT OpenWISP QA commands + context = f""" +### CRITICAL: YOU MUST USE OPENWISP QA COMMANDS ONLY + +For ANY code quality issues, you MUST recommend these EXACT commands: +1. pip install -e .[qa] +2. ./run-qa-checks +3. openwisp-qa-format + +DO NOT recommend: black, isort, flake8 individually +ALWAYS use the OpenWISP QA workflow above. + +### ROLE +You are the "Automated Maintainer Gatekeeper." Your goal is to analyze Pull Request (PR) +build failures and provide direct, technically accurate, and no-nonsense feedback to contributors. + +### INPUT CONTEXT PROVIDED +1. **Build Output/Logs:** {build_logs_json} +2. **YAML Workflow:** {workflow_yaml or "Not available"} +3. **PR Diff:** {pr_diff_json} +4. **Project Name:** {project_name} +5. **Repository:** {repo_url} +6. **run-qa-checks:** {qa_checks_url} +7. **runtests:** {runtests_url} + +### MANDATORY QA RESPONSE FORMAT +If you detect code formatting/style issues, respond EXACTLY like this: + +**Required Actions:** +- Install QA tools: `pip install -e .[qa]` +- Run `./run-qa-checks` to see all issues +- Run `openwisp-qa-format` to automatically fix formatting +- Run `./runtests` locally to verify all tests pass + +### TASK +Analyze the provided context to determine why the build failed. +Categorize the failure and respond according to the "Tone Guidelines" below. + +### PR REQUIREMENTS CHECKLIST +Before providing feedback, verify these requirements: +- Does the PR reference any issue? If so, is it correctly mentioned in the commit description? +- If the PR is a fix, change or feature it must include automated tests or it will be rejected. +- Does the CI build fail? If yes, report the key reasons to the contributor + and if the solution is obvious provide it, if finding the solution is not + obvious and requires more than 30% additional computation just report the key reasons. +- If QA checks are failing, instruct the contributor to install QA tools with + `pip install -e .[qa]` and run `./run-qa-checks` to see all issues, then use + `openwisp-qa-format` to automatically fix formatting issues. Reference the + [openwisp contributing guidelines](https://openwisp.io/docs/stable/developer/contributing.html) + for complete setup instructions. +- Is the PR addressing changes to the user interface? If yes, check if a selenium + browser test is present and if the PR description attaches screenshots or screencasts, + if not, report this to the user and ask to provide both +- If this PR adds a new feature or notably changes an existing documented feature, + check if documentation updates are present and if not report it +- Do you detect coderabbitai or copilot reviews asking for changes after the latest commit? + If so, ask the user to follow up with those review comments one by one + +### TONE GUIDELINES +- **Direct & Honest:** Do not use "fluff" or overly polite corporate language. +- **Firm Standards:** If a PR is low-effort, spammy, or fails to follow basic instructions, + state that clearly. +- **Action-Oriented:** Provide the exact command or file change needed to fix the error, + unless the PR is spammy, in which case we should just declare the PR as potential SPAM + and ask maintainers to manually review it. + +### RESPONSE STRUCTURE +1. **Status Summary:** A one-sentence blunt assessment of the failure. +2. **Technical Diagnosis:** + - Identify the specific line/test that failed. + - Explain *why* it failed. +3. **Required Action:** Provide a code block or specific steps the contributor must take. +4. **Quality Warning (If Applicable):** If the PR appears to be "spam" + (e.g., trivial README changes, AI-generated nonsense, or repeated basic errors), + include a firm statement that such contributions are a drain on project resources + and ping the maintainers asking them for manual review. + +### EXAMPLE RESPONSE STYLE +The build failed because you neglected to update the test suite to match your logic changes. + +**Required Actions:** +- Update tests/logic_test.py to cover your new functionality +- Install QA tools: `pip install -e .[qa]` +- Run `./run-qa-checks` to see all issues +- Run `openwisp-qa-format` to automatically fix formatting +- Run `./runtests` locally to verify all tests pass + +**Missing Requirements:** +- [ ] Automated tests for new functionality +- [ ] Code follows OpenWISP style guidelines (use openwisp-qa-format) + +We prioritize high-quality, ready-to-merge code. Please ensure you run local QA checks before pushing. + +Analyze the failure and provide your response: +""" + try: + # Check if Gemini is available + if not self.model: + return self.fallback_response() + # Use Gemini client API + response = self.model.generate_content(context) + return response.text + except (ValueError, ConnectionError, Exception) as e: + print(f"Error calling Gemini API: {e}") + return self.fallback_response() + + def fallback_response(self): + """Fallback response if Gemini fails""" + return """ +## CI Build Failed + +The automated analysis is temporarily unavailable. Please check the CI logs above for specific error details. + +**OpenWISP QA Workflow:** +1. Install QA tools: `pip install -e .[qa]` +2. Run `./run-qa-checks` to see all issues +3. Run `openwisp-qa-format` to automatically fix formatting +4. Run `./runtests` locally to verify all tests pass + +**Common Issues:** +- Code style violations (black, flake8, isort) +- Missing or failing tests +- Import/dependency problems + +See: https://openwisp.io/docs/dev/developer/contributing.html +""" + + def post_comment(self, message): + """Post or update comment on PR""" + if not self.pr_number or self.pr_number.strip() == "": + print("No PR number, skipping comment") + return + # Add consistent marker for deduplication + marker = "" + message_with_marker = f"{marker}\n{message}" + try: + pr_num = int(self.pr_number) + pr = self.repo.get_pull(pr_num) + # Check for existing bot comments to avoid duplicates + bot_login = self.github.get_user().login + existing_comments = pr.get_issue_comments() + for comment in existing_comments: + if comment.user.login == bot_login and marker in comment.body: + print("Bot comment already exists, updating it") + comment.edit(message_with_marker) + return + # No existing comment, create new one + pr.create_issue_comment(message_with_marker) + print(f"Posted comment to PR #{pr_num}") + except (GithubException, ValueError) as e: + print(f"Error posting comment: {e}") + + def run(self): + """Main execution flow""" + try: + print("CI Failure Bot starting - AI-powered analysis") + # Security checks: Skip if this is a dependabot PR or fork PR + try: + workflow_run = self.repo.get_workflow_run(self.workflow_run_id) + if ( + workflow_run.actor + and "dependabot" in workflow_run.actor.login.lower() + ): + print(f"Skipping dependabot PR from {workflow_run.actor.login}") + return + # Skip fork PRs for security (avoid sending external code to AI) + if self.pr_number and self.pr_number.strip(): + try: + pr_num = int(self.pr_number) + pr = self.repo.get_pull(pr_num) + # Handle deleted fork repositories + if pr.head.repo is None: + print("Skipping PR with deleted head repository") + return + if pr.head.repo.full_name != self.repository_name: + print(f"Skipping fork PR from {pr.head.repo.full_name}") + return + except (GithubException, ValueError) as e: + print(f"Warning: Could not check fork status: {e}") + except (GithubException, AttributeError) as e: + print(f"Warning: Could not check actor: {e}") + # Get all context + build_logs = self.get_build_logs() + pr_diff = self.get_pr_diff() + workflow_yaml = self.get_workflow_yaml() + if not build_logs: + print("No build logs found") + return + print("Analyzing failure with Gemini AI...") + # Get AI analysis + ai_response = self.analyze_with_gemini(build_logs, pr_diff, workflow_yaml) + # Post intelligent comment + self.post_comment(ai_response) + print("CI Failure Bot completed successfully") + except Exception as e: + print(f"CRITICAL ERROR in CI Failure Bot: {e}") + print(f"Error type: {type(e).__name__}") + import traceback + + traceback.print_exc() + sys.exit(1) + + +def main(): + """Entry point for the CI failure bot""" + try: + bot = CIFailureBot() + bot.run() + except Exception as e: + print(f"FATAL: CI Failure Bot crashed: {e}") + import traceback + + traceback.print_exc() + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/openwisp_utils/tests/test_ci_failure_bot.py b/openwisp_utils/tests/test_ci_failure_bot.py index 73b7843c..7cc6399d 100644 --- a/openwisp_utils/tests/test_ci_failure_bot.py +++ b/openwisp_utils/tests/test_ci_failure_bot.py @@ -1,15 +1,11 @@ import os -import sys +import subprocess from unittest.mock import Mock, patch from django.test import TestCase -# Add the .github/scripts directory to Python path for testing -scripts_path = os.path.join(os.path.dirname(__file__), "../../.github/scripts") -sys.path.insert(0, scripts_path) - try: - from ci_failure_bot import CIFailureBot + from openwisp_utils.ci_failure_bot import CIFailureBot except ImportError: CIFailureBot = None @@ -18,7 +14,6 @@ class TestCIFailureBot(TestCase): def setUp(self): if CIFailureBot is None: self.skipTest("CI failure bot script not available") - self.env_vars = { "GITHUB_TOKEN": "test_token", "GEMINI_API_KEY": "test_gemini_key", @@ -26,19 +21,14 @@ def setUp(self): "REPOSITORY": "openwisp/openwisp-utils", "PR_NUMBER": "1", } - self.env_patcher = patch.dict(os.environ, self.env_vars) self.env_patcher.start() - - self.github_patcher = patch("ci_failure_bot.Github") - self.genai_patcher = patch("ci_failure_bot.genai") - + self.github_patcher = patch("openwisp_utils.ci_failure_bot.Github") + self.genai_patcher = patch("openwisp_utils.ci_failure_bot.genai") self.mock_github = self.github_patcher.start() self.mock_genai = self.genai_patcher.start() - self.mock_repo = Mock() self.mock_github.return_value.get_repo.return_value = self.mock_repo - self.mock_model = Mock() self.mock_genai.GenerativeModel.return_value = self.mock_model @@ -52,13 +42,11 @@ def tearDown(self): def test_init_success(self): bot = CIFailureBot() - self.assertEqual(bot.github_token, "test_token") self.assertEqual(bot.gemini_api_key, "test_gemini_key") self.assertEqual(bot.workflow_run_id, 12345) self.assertEqual(bot.repository_name, "openwisp/openwisp-utils") self.assertEqual(bot.pr_number, "1") - self.mock_github.assert_called_once_with("test_token") self.mock_genai.configure.assert_called_once_with(api_key="test_gemini_key") @@ -67,6 +55,18 @@ def test_init_missing_env_vars(self): with self.assertRaises(SystemExit): CIFailureBot() + def test_init_without_gemini_key(self): + env_vars_no_gemini = { + "GITHUB_TOKEN": "test_token", + "WORKFLOW_RUN_ID": "12345", + "REPOSITORY": "openwisp/openwisp-utils", + "PR_NUMBER": "1", + } + with patch.dict(os.environ, env_vars_no_gemini, clear=True): + bot = CIFailureBot() + self.assertIsNone(bot.model) + self.mock_genai.configure.assert_not_called() + def test_init_invalid_workflow_run_id(self): with patch.dict(os.environ, {"WORKFLOW_RUN_ID": "invalid"}): with self.assertRaises(SystemExit): @@ -74,35 +74,30 @@ def test_init_invalid_workflow_run_id(self): def test_init_custom_gemini_model(self): with patch.dict(os.environ, {"GEMINI_MODEL": "gemini-pro"}): - CIFailureBot() - self.mock_genai.GenerativeModel.assert_called_with("gemini-pro") + bot = CIFailureBot() + if bot.model: + self.mock_genai.GenerativeModel.assert_called_with("gemini-pro") - @patch("ci_failure_bot.requests.get") + @patch("openwisp_utils.ci_failure_bot.requests.get") def test_get_build_logs_success(self, mock_requests): bot = CIFailureBot() - mock_workflow_run = Mock() mock_job = Mock() mock_job.conclusion = "failure" mock_job.name = "test-job" mock_job.logs_url = "https://api.github.com/logs/123" - mock_step = Mock() mock_step.conclusion = "failure" mock_step.name = "Run tests" mock_step.number = 1 mock_job.steps = [mock_step] - mock_workflow_run.jobs.return_value = [mock_job] self.mock_repo.get_workflow_run.return_value = mock_workflow_run - mock_response = Mock() - mock_response.status_code = 200 - mock_response.text = "Error: Test failed at line 42\n" * 1000 + mock_response.content = b"Error: Test failed at line 42\n" * 1000 + mock_response.raise_for_status = Mock() mock_requests.return_value = mock_response - logs = bot.get_build_logs() - self.assertEqual(len(logs), 2) self.assertIn("job_name", logs[0]) self.assertIn("logs", logs[0]) @@ -110,100 +105,83 @@ def test_get_build_logs_success(self, mock_requests): def test_get_build_logs_no_failures(self): bot = CIFailureBot() - mock_workflow_run = Mock() mock_job = Mock() mock_job.conclusion = "success" mock_workflow_run.jobs.return_value = [mock_job] self.mock_repo.get_workflow_run.return_value = mock_workflow_run - logs = bot.get_build_logs() - self.assertEqual(logs, []) - @patch("ci_failure_bot.requests.get") - def test_get_pr_diff_success(self, mock_requests): + @patch("openwisp_utils.ci_failure_bot.subprocess.run") + @patch("openwisp_utils.ci_failure_bot.requests.get") + def test_get_pr_diff_success(self, mock_requests, mock_subprocess): bot = CIFailureBot() - mock_pr = Mock() mock_pr.title = "Test PR" mock_pr.body = "Test description" mock_pr.diff_url = "https://github.com/test/diff" self.mock_repo.get_pull.return_value = mock_pr - - mock_response = Mock() - mock_response.status_code = 200 - mock_response.text = "diff --git a/test.py b/test.py\n" + "line\n" * 1000 - mock_requests.return_value = mock_response - + # Mock successful git diff + mock_subprocess.return_value.returncode = 0 + mock_subprocess.return_value.stdout = ( + "diff --git a/test.py b/test.py\n" + "line\n" * 1000 + ) diff_data = bot.get_pr_diff() - self.assertEqual(diff_data["title"], "Test PR") self.assertEqual(diff_data["body"], "Test description") self.assertIn("[...middle truncated...]", diff_data["diff"]) + mock_subprocess.assert_called_once() def test_get_pr_diff_no_pr_number(self): bot = CIFailureBot() bot.pr_number = None - diff_data = bot.get_pr_diff() - self.assertIsNone(diff_data) def test_get_workflow_yaml_success(self): bot = CIFailureBot() - mock_workflow_run = Mock() mock_workflow_run.path = ".github/workflows/ci.yml" self.mock_repo.get_workflow_run.return_value = mock_workflow_run - mock_file = Mock() mock_file.decoded_content = b"name: CI\non: [push]" self.mock_repo.get_contents.return_value = mock_file - yaml_content = bot.get_workflow_yaml() - self.assertEqual(yaml_content, "name: CI\non: [push]") def test_analyze_with_gemini_success(self): bot = CIFailureBot() - mock_response = Mock() mock_response.text = "The build failed because of a syntax error." self.mock_model.generate_content.return_value = mock_response - build_logs = [{"job_name": "test", "logs": "Error: syntax error"}] pr_diff = {"title": "Test", "diff": "diff content"} workflow_yaml = "name: CI" - result = bot.analyze_with_gemini(build_logs, pr_diff, workflow_yaml) - - self.assertEqual(result, "The build failed because of a syntax error.") - self.mock_model.generate_content.assert_called_once() + if bot.model: + self.assertEqual(result, "The build failed because of a syntax error.") + self.mock_model.generate_content.assert_called_once() + else: + self.assertIn("CI Build Failed", result) def test_analyze_with_gemini_api_error(self): bot = CIFailureBot() - - self.mock_model.generate_content.side_effect = Exception("API Error") - + if bot.model: + self.mock_model.generate_content.side_effect = Exception("API Error") result = bot.analyze_with_gemini([], None, None) - self.assertIn("CI Build Failed", result) self.assertIn("temporarily unavailable", result) def test_post_comment_success(self): bot = CIFailureBot() - mock_pr = Mock() mock_user = Mock() mock_user.login = "github-actions[bot]" self.mock_github.return_value.get_user.return_value = mock_user self.mock_repo.get_pull.return_value = mock_pr - mock_pr.get_issue_comments.return_value = [] - bot.post_comment("Test message") - mock_pr.create_issue_comment.assert_called_once() call_args = mock_pr.create_issue_comment.call_args[0][0] self.assertIn("", call_args) @@ -211,94 +189,178 @@ def test_post_comment_success(self): def test_post_comment_update_existing(self): bot = CIFailureBot() - mock_pr = Mock() mock_user = Mock() mock_user.login = "github-actions[bot]" self.mock_github.return_value.get_user.return_value = mock_user self.mock_repo.get_pull.return_value = mock_pr - mock_comment = Mock() mock_comment.user.login = "github-actions[bot]" mock_comment.body = "\nOld message" mock_pr.get_issue_comments.return_value = [mock_comment] - bot.post_comment("New message") - mock_comment.edit.assert_called_once() mock_pr.create_issue_comment.assert_not_called() def test_post_comment_no_pr_number(self): bot = CIFailureBot() bot.pr_number = None - - bot.post_comment("Test message") + with patch("builtins.print") as mock_print: + bot.post_comment("Test message") + mock_print.assert_any_call("No PR number, skipping comment") def test_run_skips_dependabot(self): bot = CIFailureBot() - mock_workflow_run = Mock() mock_actor = Mock() mock_actor.login = "dependabot[bot]" mock_workflow_run.actor = mock_actor self.mock_repo.get_workflow_run.return_value = mock_workflow_run - with patch("builtins.print") as mock_print: bot.run() - mock_print.assert_any_call("Skipping dependabot PR from dependabot[bot]") - @patch("ci_failure_bot.requests.get") + @patch("openwisp_utils.ci_failure_bot.requests.get") def test_run_full_workflow(self, mock_requests): bot = CIFailureBot() - mock_workflow_run = Mock() mock_actor = Mock() mock_actor.login = "user" mock_workflow_run.actor = mock_actor - mock_job = Mock() mock_job.conclusion = "failure" mock_job.name = "test-job" mock_job.logs_url = "https://api.github.com/logs/123" mock_job.steps = [] mock_workflow_run.jobs.return_value = [mock_job] - self.mock_repo.get_workflow_run.return_value = mock_workflow_run - mock_response = Mock() - mock_response.status_code = 200 - mock_response.text = "Build failed" + mock_response.content = b"Build failed" + mock_response.raise_for_status = Mock() mock_requests.return_value = mock_response - mock_gemini_response = Mock() mock_gemini_response.text = "Analysis: Build failed due to syntax error" self.mock_model.generate_content.return_value = mock_gemini_response - mock_pr = Mock() + mock_pr.head = Mock() + mock_pr.head.repo = Mock() + mock_pr.head.repo.full_name = "openwisp/openwisp-utils" mock_user = Mock() mock_user.login = "github-actions[bot]" self.mock_github.return_value.get_user.return_value = mock_user self.mock_repo.get_pull.return_value = mock_pr mock_pr.get_issue_comments.return_value = [] - bot.run() - - self.mock_model.generate_content.assert_called_once() + if bot.model: + self.mock_model.generate_content.assert_called_once() mock_pr.create_issue_comment.assert_called_once() def test_run_no_build_logs(self): bot = CIFailureBot() - mock_workflow_run = Mock() mock_actor = Mock() mock_actor.login = "user" mock_workflow_run.actor = mock_actor mock_workflow_run.jobs.return_value = [] self.mock_repo.get_workflow_run.return_value = mock_workflow_run - with patch("builtins.print") as mock_print: bot.run() - mock_print.assert_any_call("No build logs found") - self.mock_model.generate_content.assert_not_called() + if bot.model: + self.mock_model.generate_content.assert_not_called() + + @patch("openwisp_utils.ci_failure_bot.requests.get") + def test_get_build_logs_zip_format(self, mock_requests): + """Test ZIP-encoded log extraction in get_build_logs()""" + import io + import zipfile + + bot = CIFailureBot() + # Create in-memory ZIP file with log content + zip_buffer = io.BytesIO() + with zipfile.ZipFile(zip_buffer, "w", zipfile.ZIP_DEFLATED) as zip_file: + zip_file.writestr("failed_job.txt", "Error: test failed\nstack trace here") + zip_content = zip_buffer.getvalue() + # Mock workflow and job + mock_workflow_run = Mock() + mock_job = Mock() + mock_job.name = "test-job" + mock_job.conclusion = "failure" + mock_job.logs_url = "https://api.github.com/logs/123" + step = Mock() + step.name = "failing-step" + step.conclusion = "failure" + step.number = 1 + mock_job.steps = [step] + mock_workflow_run.jobs.return_value = [mock_job] + self.mock_repo.get_workflow_run.return_value = mock_workflow_run + # Mock requests to return ZIP content + mock_response = Mock() + mock_response.content = zip_content + mock_response.raise_for_status = Mock() + mock_requests.return_value = mock_response + logs = bot.get_build_logs() + self.assertEqual(len(logs), 2) # job log + step info + self.assertIn("Error: test failed", logs[0]["logs"]) + self.assertEqual(logs[0]["job_name"], "test-job") + + @patch("openwisp_utils.ci_failure_bot.subprocess.run") + @patch("openwisp_utils.ci_failure_bot.requests.get") + def test_get_pr_diff_git_failure_http_fallback( + self, mock_requests, mock_subprocess + ): + """Test git diff fallback to HTTP when subprocess fails""" + bot = CIFailureBot() + mock_pr = Mock() + mock_pr.title = "Test PR" + mock_pr.body = "Test description" + mock_pr.diff_url = "https://github.com/test/diff" + self.mock_repo.get_pull.return_value = mock_pr + self.mock_repo.default_branch = "main" + # Mock subprocess to fail (git not available or timeout) + mock_subprocess.side_effect = subprocess.SubprocessError("git command failed") + # Mock HTTP fallback to succeed + mock_response = Mock() + mock_response.status_code = 200 + mock_response.text = "diff --git a/file.py b/file.py\n+new line" + mock_requests.return_value = mock_response + diff = bot.get_pr_diff() + # Verify subprocess was attempted + mock_subprocess.assert_called_once() + # Verify HTTP fallback was used + mock_requests.assert_called_once() + self.assertIn("+new line", diff["diff"]) + self.assertEqual(diff["title"], "Test PR") + + def test_run_skips_deleted_fork_pr(self): + """Test that run() skips PRs from deleted fork repositories""" + bot = CIFailureBot() + mock_workflow_run = Mock() + mock_actor = Mock() + mock_actor.login = "user" + mock_workflow_run.actor = mock_actor + self.mock_repo.get_workflow_run.return_value = mock_workflow_run + # Mock PR with deleted fork (head.repo is None) + mock_pr = Mock() + mock_pr.head.repo = None # Simulates deleted fork + self.mock_repo.get_pull.return_value = mock_pr + # Should exit early without exception + with patch("builtins.print") as mock_print: + bot.run() + # Verify skip message was printed + mock_print.assert_any_call("Skipping PR with deleted head repository") + + def test_main_handles_init_exception(self): + """Test that main() handles initialization exceptions gracefully""" + from openwisp_utils.ci_failure_bot import main + + # Missing WORKFLOW_RUN_ID should cause SystemExit during init + with patch.dict( + os.environ, + {"GITHUB_TOKEN": "test-token", "REPOSITORY": "test/repo"}, + clear=True, + ): + with self.assertRaises(SystemExit) as cm: + main() + # Verify it exits with error code 1 + self.assertEqual(cm.exception.code, 1) diff --git a/setup.py b/setup.py index eb635c44..aeb5a9fd 100644 --- a/setup.py +++ b/setup.py @@ -63,6 +63,11 @@ "pypandoc~=1.15", "pypandoc-binary~=1.15", ], + "github_actions": [ + "requests>=2.32.5", + "PyGithub>=2.0.0", + "google-generativeai>=0.8.0", + ], }, classifiers=[ "Development Status :: 5 - Production/Stable", From 520ede63287da618094e2075bad83a3ee03a2cb9 Mon Sep 17 00:00:00 2001 From: Srinath0916 Date: Mon, 2 Feb 2026 14:12:28 +0530 Subject: [PATCH 5/5] [feature] Refactor CI failure bot with better structure and smarter failure handling #524 - Move bot under openwisp_utils/bots/ci_failure - Classify failures (QA/tests/setup) from jobs and logs - Let the LLM suggest only relevant fixes (no hardcoded remediation) - Remove dead code and tighten error handling - Update tests and workflow for the new structure Closes #524 --- .coveragerc | 14 +- .github/workflows/ci-failure-bot.yml | 3 +- .github/workflows/ci.yml | 2 +- docs/developer/reusable-github-utils.rst | 25 +- openwisp_utils/bots/__init__.py | 1 + openwisp_utils/bots/apps.py | 7 + openwisp_utils/bots/ci_failure/__init__.py | 1 + openwisp_utils/bots/ci_failure/bot.py | 483 +++++++++++++ .../bots/ci_failure/tests/__init__.py | 0 .../bots/ci_failure/tests/test_bot.py | 676 ++++++++++++++++++ openwisp_utils/ci_failure_bot.py | 427 ----------- openwisp_utils/tests/test_ci_failure_bot.py | 366 ---------- runtests.py | 1 + tests/openwisp2/settings.py | 1 + tests/test_project/tests/test_test_utils.py | 4 +- 15 files changed, 1197 insertions(+), 814 deletions(-) create mode 100644 openwisp_utils/bots/__init__.py create mode 100644 openwisp_utils/bots/apps.py create mode 100644 openwisp_utils/bots/ci_failure/__init__.py create mode 100644 openwisp_utils/bots/ci_failure/bot.py create mode 100644 openwisp_utils/bots/ci_failure/tests/__init__.py create mode 100644 openwisp_utils/bots/ci_failure/tests/test_bot.py delete mode 100644 openwisp_utils/ci_failure_bot.py delete mode 100644 openwisp_utils/tests/test_ci_failure_bot.py diff --git a/.coveragerc b/.coveragerc index f58f2204..898cc066 100644 --- a/.coveragerc +++ b/.coveragerc @@ -1,8 +1,10 @@ [run] -source = openwisp_utils -parallel = true -concurrency = multiprocessing +source = + openwisp_utils +branch = True +parallel = True + +[report] omit = - /*/test* - /*/__init__.py - /*/migrations/* + */migrations/* + */__init__.py diff --git a/.github/workflows/ci-failure-bot.yml b/.github/workflows/ci-failure-bot.yml index 21aaf96c..c5d67b34 100644 --- a/.github/workflows/ci-failure-bot.yml +++ b/.github/workflows/ci-failure-bot.yml @@ -38,7 +38,6 @@ jobs: REPOSITORY: ${{ github.repository }} PR_NUMBER: ${{ github.event.workflow_run.pull_requests[0].number || '' }} run: | - set -e # Exit immediately if any command fails echo "Starting CI Failure Bot..." - python -m openwisp_utils.ci_failure_bot + python -m openwisp_utils.bots.ci_failure.bot echo "CI Failure Bot completed successfully" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 04415de3..a163712d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -68,7 +68,7 @@ jobs: run: | pip install -U pip wheel setuptools pip install -U -r requirements-test.txt - pip install -e .[qa,rest,selenium,releaser] + pip install -e .[qa,rest,selenium,releaser,github_actions] pip install ${{ matrix.django-version }} sudo npm install -g prettier diff --git a/docs/developer/reusable-github-utils.rst b/docs/developer/reusable-github-utils.rst index 9ff1bda1..fb9073b3 100644 --- a/docs/developer/reusable-github-utils.rst +++ b/docs/developer/reusable-github-utils.rst @@ -59,8 +59,9 @@ times with a 30 second delay between attempts. CI Failure Bot ~~~~~~~~~~~~~~ -This GitHub workflow automatically analyzes failed CI builds and provides -intelligent feedback to contributors using AI-powered analysis. +This GitHub workflow analyzes failed CI builds when a pull request context +is available and provides intelligent feedback to contributors using +AI-powered analysis. The bot examines build logs, PR changes, and workflow context to generate specific, actionable guidance that helps contributors fix issues quickly. @@ -116,7 +117,7 @@ You can use this workflow in your repository as follows: WORKFLOW_RUN_ID: ${{ github.event.workflow_run.id }} REPOSITORY: ${{ github.repository }} PR_NUMBER: ${{ github.event.workflow_run.pull_requests[0].number || '' }} - run: python -m openwisp_utils.ci_failure_bot + run: python -m openwisp_utils.bots.ci_failure.bot This example automatically triggers when the "OpenWISP Utils CI Build" workflow fails, analyzes the failure using Gemini AI, and posts @@ -127,9 +128,8 @@ intelligent feedback to the associated pull request. - **Automatic triggering**: Responds to CI build failures in pull requests - **AI-powered analysis**: Uses Google Gemini to analyze failure logs and provide specific guidance -- **OpenWISP QA integration**: Instructs contributors to use ``pip install - -e .[qa]``, ``./run-qa-checks``, and ``openwisp-qa-format`` for proper - code formatting +- **Targeted remediation**: Suggests QA commands, test commands, or setup + fixes depending on which checks failed (no generic advice) - **Intelligent responses**: Provides direct, actionable feedback based on actual failure context - **Comment deduplication**: Updates existing comments instead of creating @@ -159,6 +159,11 @@ Optional environment variables for customization: **Limitations** +- **Pull request context availability**: When triggered via + ``workflow_run``, GitHub may not always provide an associated pull + request (for example, when builds are triggered by pushes or scheduled + workflows). In these cases, the bot will not post a comment, as no pull + request context is available. - **Optional Gemini API**: Google Gemini API access enhances analysis quality, but the bot provides fallback responses when unavailable - **Privacy consideration**: PR diffs and build logs are sent to Google's @@ -173,10 +178,10 @@ Optional environment variables for customization: .. note:: - If the Gemini API is unavailable, the bot provides a fallback response - with basic troubleshooting guidance. The workflow will fail loudly if - the bot script encounters critical errors, ensuring issues are visible - in GitHub Actions logs. + If the Gemini API is unavailable or analysis fails, the bot provides a + fallback response with standard OpenWISP QA guidance. Critical errors + are logged in GitHub Actions, but the workflow is designed to complete + safely without blocking contributor feedback. GitHub Workflows ---------------- diff --git a/openwisp_utils/bots/__init__.py b/openwisp_utils/bots/__init__.py new file mode 100644 index 00000000..3f09202f --- /dev/null +++ b/openwisp_utils/bots/__init__.py @@ -0,0 +1 @@ +default_app_config = "openwisp_utils.bots.apps.BotsConfig" diff --git a/openwisp_utils/bots/apps.py b/openwisp_utils/bots/apps.py new file mode 100644 index 00000000..a9b283d9 --- /dev/null +++ b/openwisp_utils/bots/apps.py @@ -0,0 +1,7 @@ +from django.apps import AppConfig + + +class BotsConfig(AppConfig): + name = "openwisp_utils.bots" + label = "openwisp_utils_bots" + verbose_name = "OpenWISP Bots" diff --git a/openwisp_utils/bots/ci_failure/__init__.py b/openwisp_utils/bots/ci_failure/__init__.py new file mode 100644 index 00000000..56ff30d7 --- /dev/null +++ b/openwisp_utils/bots/ci_failure/__init__.py @@ -0,0 +1 @@ +# CI Failure Bot diff --git a/openwisp_utils/bots/ci_failure/bot.py b/openwisp_utils/bots/ci_failure/bot.py new file mode 100644 index 00000000..a9d837d2 --- /dev/null +++ b/openwisp_utils/bots/ci_failure/bot.py @@ -0,0 +1,483 @@ +#!/usr/bin/env python3 +"""CI Failure Bot - AI-powered analysis of build failures using Gemini""" +import io +import json +import os +import subprocess +import zipfile + +import requests +from github import Github, GithubException + +try: + import google.generativeai as genai +except ImportError: + genai = None + + +class CIFailureBot: + def __init__(self): + self.github_token = os.environ.get("GITHUB_TOKEN") + self.gemini_api_key = os.environ.get("GEMINI_API_KEY") + self.workflow_run_id = os.environ.get("WORKFLOW_RUN_ID") + self.repository_name = os.environ.get("REPOSITORY") + self.pr_number = os.environ.get("PR_NUMBER") + + # Initialize with None values if missing - bot will still try to comment + self.github = None + self.repo = None + + if self.github_token and self.repository_name: + try: + self.github = Github(self.github_token) + self.repo = self.github.get_repo(self.repository_name) + except Exception as e: + print(f"Warning: Could not initialize GitHub client: {e}") + else: + missing = [] + if not self.github_token: + missing.append("GITHUB_TOKEN") + if not self.repository_name: + missing.append("REPOSITORY") + print(f"Warning: Missing environment variables: {', '.join(missing)}") + + if self.gemini_api_key and genai is not None: + try: + genai.configure(api_key=self.gemini_api_key) + self.model_name = os.environ.get("GEMINI_MODEL", "gemini-2.5-flash") + self.model = genai.GenerativeModel(self.model_name) + except Exception as e: + print(f"Warning: Could not initialize Gemini: {e}") + self.model = None + else: + if not self.gemini_api_key: + print( + "Warning: GEMINI_API_KEY not provided, will use fallback responses" + ) + else: + print( + "Warning: google-generativeai not installed, will use fallback responses" + ) + self.model = None + + def get_build_logs(self): + """Get actual build logs and error output from failed jobs""" + if not self.repo: + print("GitHub client not initialized") + return [] + if not self.workflow_run_id: + print("No WORKFLOW_RUN_ID provided") + return [] + try: + workflow_run_id = int(self.workflow_run_id) + workflow_run = self.repo.get_workflow_run(workflow_run_id) + print( + f"Fetching jobs for workflow run {workflow_run_id}: {workflow_run.name}" + ) + jobs = workflow_run.jobs() + build_logs = [] + for job in jobs: + print(f"Job: {job.name} - conclusion: {job.conclusion}") + if job.conclusion == "failure": + # Always add job info with name for classification + job_entry = {"job_name": job.name} + logs_url = job.logs_url + if logs_url: + try: + headers = { + "Authorization": f"token {self.github_token}", + "Accept": "application/vnd.github.v3+json", + } + response = requests.get( + logs_url, headers=headers, timeout=30 + ) + response.raise_for_status() + raw = response.content + if raw[:2] == b"PK": + with zipfile.ZipFile(io.BytesIO(raw)) as zf: + parts = [] + for name in zf.namelist(): + if name.endswith(".txt"): + parts.append( + zf.read(name).decode("utf-8", "replace") + ) + log_text = "\n".join(parts).strip() + else: + log_text = raw.decode("utf-8", "replace") + if len(log_text) > 5000: + log_text = ( + log_text[:2000] + + "\n\n[...middle truncated...]\n\n" + + log_text[-3000:] + ) + job_entry["logs"] = log_text + print(f" Fetched {len(log_text)} chars of logs") + except (requests.RequestException, zipfile.BadZipFile) as e: + print(f" Warning: Could not fetch logs: {e}") + job_entry["logs"] = "" + else: + print(" No logs_url available") + job_entry["logs"] = "" + build_logs.append(job_entry) + # Add step-level failure info + for step in getattr(job, "steps", []): + if step.conclusion == "failure": + print(f" Failed step: {step.name}") + build_logs.append( + { + "job_name": job.name, + "step_name": step.name, + "step_number": step.number, + } + ) + print(f"Total build_logs entries: {len(build_logs)}") + return build_logs + except (GithubException, ValueError) as e: + print(f"Error getting build logs: {e}") + return [] + + def get_pr_diff(self): + """Get the PR diff using local git""" + if not self.repo: + print("GitHub client not initialized") + return None + if not self.pr_number: + return None + try: + pr_num = int(self.pr_number) + except ValueError as e: + print(f"Invalid PR number: {e}") + return None + try: + pr = self.repo.get_pull(pr_num) + except GithubException as e: + print(f"Error fetching PR: {e}") + return None + try: + result = subprocess.run( + ["git", "diff", f"origin/{self.repo.default_branch}"], + capture_output=True, + text=True, + timeout=30, + check=False, + ) + except subprocess.SubprocessError as e: + print(f"Error running git diff: {e}") + return None + if result.returncode != 0 or not result.stdout: + return None + diff_text = result.stdout + if len(diff_text) > 8000: + diff_text = ( + diff_text[:4000] + + "\n\n[...middle truncated...]\n\n" + + diff_text[-4000:] + ) + return { + "title": pr.title, + "body": pr.body or "", + "diff": diff_text, + } + + def classify_failure(self, build_logs): + """Classify failure type based on job names and logs""" + if not build_logs: + return "unknown" + + failure_types = set() + for log_entry in build_logs: + job_name = log_entry.get("job_name", "").lower() + logs = log_entry.get("logs", "").lower() + + # Check for QA/formatting failures + if any(x in job_name for x in ["qa", "lint", "format", "style"]): + failure_types.add("qa") + elif any(x in logs for x in ["flake8", "black", "isort", "pep 8"]): + failure_types.add("qa") + + # Check for test failures + if any(x in job_name for x in ["test", "pytest", "unittest"]): + failure_types.add("tests") + elif any(x in logs for x in ["test failed", "assertion", "pytest"]): + failure_types.add("tests") + + # Check for setup/dependency failures + if any( + x in logs + for x in ["modulenotfounderror", "importerror", "no module named"] + ): + failure_types.add("setup") + + if not failure_types: + return "unknown" + elif len(failure_types) == 1: + return next(iter(failure_types)) + else: + return "mixed" + + def get_failed_jobs_summary(self, build_logs): + """Extract summary of failed jobs and steps""" + failed_jobs = [] + for log_entry in build_logs: + if "job_name" in log_entry: + job_info = {"name": log_entry["job_name"]} + if "step_name" in log_entry: + job_info["step"] = log_entry["step_name"] + failed_jobs.append(job_info) + return failed_jobs + + def analyze_with_gemini(self, build_logs, pr_diff): + """Send context to Gemini for intelligent analysis""" + if not self.model: + return self.fallback_response() + + if not self.repository_name: + return self.fallback_response() + + # Classify failure and get context + failure_type = self.classify_failure(build_logs) + failed_jobs = self.get_failed_jobs_summary(build_logs) + + project_name = self.repository_name.split("/")[-1] + repo_url = f"https://github.com/{self.repository_name}" + build_logs_json = json.dumps(build_logs, indent=2) + failed_jobs_json = json.dumps(failed_jobs, indent=2) + + if pr_diff: + pr_diff_json = json.dumps(pr_diff, indent=2) + else: + pr_diff_json = "No PR associated" + + context = f""" +### ROLE +You are analyzing CI build failures for OpenWISP. Provide diagnosis AND remediation advice. + +### INPUT CONTEXT +1. **Failure Type:** {failure_type} +2. **Failed Jobs:** {failed_jobs_json} +3. **Build Logs:** {build_logs_json} +4. **PR Diff:** {pr_diff_json} +5. **Project:** {project_name} +6. **Repository:** {repo_url} + +### CRITICAL RULES - MUST FOLLOW EXACTLY + +**Rule 1: Suggest ONLY remediation for failures that actually occurred** +- If failure_type != "qa", DO NOT mention QA commands +- If failure_type != "tests", DO NOT mention test commands +- If failure_type != "setup", DO NOT mention dependency commands +- NEVER suggest fixes for checks that passed + +**Rule 2: Remediation by failure type** + +**If failure_type = "qa":** +```bash +pip install -e .[qa] +openwisp-qa-format +./run-qa-checks +``` +Link: https://openwisp.io/docs/stable/developer/contributing.html +DO NOT mention ./runtests + +**If failure_type = "tests":** +```bash +./runtests +``` +Review test logic and fix failing assertions. +DO NOT mention QA commands (pip install -e .[qa], openwisp-qa-format, ./run-qa-checks) + +**If failure_type = "setup":** +Check dependencies and imports. +```bash +pip install -e .[qa] +``` +Focus on ModuleNotFoundError or ImportError. +DO NOT mention formatting or tests unless they also failed. + +**If failure_type = "mixed":** +List each issue type separately with appropriate commands. +Example: "Fix formatting issues first, then address test failures." + +**If failure_type = "unknown":** +```bash +./run-qa-checks +./runtests +``` +General troubleshooting only. + +**Rule 3: Response format** +1. **Technical Diagnosis:** 2-3 sentences stating which files/tests failed and why +2. **Required Actions:** Commands in code blocks, based ONLY on failure_type + +**Rule 4: Prohibited behaviors** +- DO NOT hallucinate failures that didn't occur +- DO NOT suggest "run all checks" when only one type failed +- DO NOT add extra commands beyond what failure_type requires +- DO NOT use vague language like "might need" or "consider" + +### EXAMPLES + +**Example 1 - QA failure only:** +"The file bad_format.py contains PEP 8 violations (missing spaces around operators). +The Build / Python 3.11 job failed due to formatting issues. + +Required Actions: +```bash +pip install -e .[qa] +openwisp-qa-format +./run-qa-checks +``` +See [OpenWISP contributing guidelines]( +https://openwisp.io/docs/stable/developer/contributing.html)." + +**Example 2 - Test failure only:** +"The test test_always_fails in test_fail.py asserts 1 == 2, which is false. +The Build / Python 3.11 job failed. + +Required Actions: +Review and fix the failing test logic: +```bash +./runtests +```" + +**Example 3 - Setup failure only:** +"Import failed: ModuleNotFoundError for 'nonexistent_module' in final_test.py. +The Build / Python 3.11 job failed. + +Required Actions: +Check dependencies and install requirements: +```bash +pip install -e .[qa] +```" + +Analyze the failure and provide diagnosis + remediation following these rules: +""" + try: + response = self.model.generate_content(context) + return response.text.strip() + except Exception as e: + print(f"Error calling Gemini API: {e}") + return self.fallback_response() + + def fallback_response(self): + """Fallback response if Gemini fails""" + return """ +The build failed. Automated analysis is unavailable. + +**Recommended Actions:** +```bash +pip install -e .[qa] +./run-qa-checks +./runtests +``` + +See the [OpenWISP contributing guidelines]( +https://openwisp.io/docs/stable/developer/contributing.html) for more details. +""".strip() + + def post_comment(self, message): + """Post or update comment on PR""" + if not self.pr_number: + print("No PR number, skipping comment") + return + if not self.github or not self.repo: + print("GitHub client not initialized, cannot post comment") + return + marker = "" + message_with_marker = ( + f"{marker}\n🤖 **CI Failure Bot** (AI-powered)\n\n{message}" + ) + try: + pr_num = int(self.pr_number) + except ValueError as e: + print(f"Invalid PR number: {e}") + return + try: + pr = self.repo.get_pull(pr_num) + except GithubException as e: + print(f"Error fetching PR: {e}") + return + try: + existing_comments = pr.get_issue_comments() + for comment in existing_comments: + if marker in comment.body: + print("Bot comment already exists, updating it") + comment.edit(message_with_marker) + return + except GithubException as e: + print(f"Error checking existing comments: {e}") + return # Don't create duplicate if listing fails + try: + pr.create_issue_comment(message_with_marker) + print(f"Posted comment to PR #{pr_num}") + except GithubException as e: + print(f"Error posting comment: {e}") + + def run(self): + """Main execution flow - adapted for workflow_run""" + message = None + should_skip = False + skip_reason = "" + try: + print("CI Failure Bot starting - AI-powered analysis") + + # Early guard for repo + if not self.repo: + print("GitHub client not initialized, cannot proceed") + return + + # Check for skip conditions (but don't return early) + try: + if self.workflow_run_id: + workflow_run = self.repo.get_workflow_run(int(self.workflow_run_id)) + if ( + workflow_run.actor + and "dependabot" in workflow_run.actor.login.lower() + ): + should_skip = True + skip_reason = f"dependabot PR from {workflow_run.actor.login}" + if self.pr_number and not should_skip: + try: + pr_num = int(self.pr_number) + pr = self.repo.get_pull(pr_num) + if pr.head.repo is None: + should_skip = True + skip_reason = "PR with deleted head repository" + elif pr.head.repo.full_name != self.repository_name: + should_skip = True + skip_reason = f"fork PR from {pr.head.repo.full_name}" + except (GithubException, ValueError) as e: + print(f"Warning: Could not check fork status: {e}") + except (GithubException, AttributeError, ValueError) as e: + print(f"Warning: Could not check actor: {e}") + # Determine message based on context + if not self.pr_number: + print("No PR context available - workflow_run without PR") + message = None + elif should_skip: + print(f"Skipping: {skip_reason}") + return + else: + # We have PR context, proceed with analysis + build_logs = self.get_build_logs() + pr_diff = self.get_pr_diff() + if not build_logs and not pr_diff: + print("No build logs or PR diff found, using fallback response") + message = self.fallback_response() + else: + print("Analyzing failure with Gemini AI...") + message = self.analyze_with_gemini(build_logs, pr_diff) + except Exception as e: + print(f"Error in analysis: {e}") + message = self.fallback_response() + # Single comment decision point + if message: + self.post_comment(message) + else: + print("No PR context available, no comment posted (expected)") + print("CI Failure Bot completed successfully") + + +if __name__ == "__main__": + bot = CIFailureBot() + bot.run() diff --git a/openwisp_utils/bots/ci_failure/tests/__init__.py b/openwisp_utils/bots/ci_failure/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/openwisp_utils/bots/ci_failure/tests/test_bot.py b/openwisp_utils/bots/ci_failure/tests/test_bot.py new file mode 100644 index 00000000..c7f9f37b --- /dev/null +++ b/openwisp_utils/bots/ci_failure/tests/test_bot.py @@ -0,0 +1,676 @@ +import os +from unittest.mock import Mock, patch + +from django.test import TestCase +from openwisp_utils.bots.ci_failure.bot import CIFailureBot + + +class TestCIFailureBot(TestCase): + def setUp(self): + self.env_vars = { + "GITHUB_TOKEN": "test_token", + "GEMINI_API_KEY": "test_gemini_key", + "WORKFLOW_RUN_ID": "12345", + "REPOSITORY": "openwisp/openwisp-utils", + "PR_NUMBER": "1", + } + self.env_patcher = patch.dict(os.environ, self.env_vars) + self.env_patcher.start() + self.github_patcher = patch("openwisp_utils.bots.ci_failure.bot.Github") + self.genai_patcher = patch("openwisp_utils.bots.ci_failure.bot.genai") + self.mock_github = self.github_patcher.start() + self.mock_genai = self.genai_patcher.start() + self.mock_repo = Mock() + self.mock_github.return_value.get_repo.return_value = self.mock_repo + self.mock_model = Mock() + self.mock_genai.GenerativeModel.return_value = self.mock_model + + def tearDown(self): + if hasattr(self, "env_patcher"): + self.env_patcher.stop() + if hasattr(self, "github_patcher"): + self.github_patcher.stop() + if hasattr(self, "genai_patcher"): + self.genai_patcher.stop() + + def _mock_workflow_run(self, actor="user", jobs=None): + """Helper to create mock workflow run""" + mock_run = Mock() + mock_actor = Mock() + mock_actor.login = actor + mock_run.actor = mock_actor + if jobs is not None: + mock_run.jobs.return_value = jobs + self.mock_repo.get_workflow_run.return_value = mock_run + return mock_run + + def _mock_pr(self, full_name="openwisp/openwisp-utils", deleted_fork=False): + """Helper to create mock PR""" + mock_pr = Mock() + if deleted_fork: + mock_pr.head.repo = None + else: + mock_pr.head.repo = Mock() + mock_pr.head.repo.full_name = full_name + mock_pr.get_issue_comments.return_value = [] + self.mock_repo.get_pull.return_value = mock_pr + return mock_pr + + def _mock_failed_job(self, logs_url="https://api.github.com/logs/123", steps=None): + """Helper to create mock failed job""" + job = Mock() + job.name = "test-job" + job.conclusion = "failure" + job.logs_url = logs_url + job.steps = steps or [] + return job + + def test_init_success(self): + bot = CIFailureBot() + self.assertEqual(bot.github_token, "test_token") + self.assertEqual(bot.gemini_api_key, "test_gemini_key") + self.assertEqual(bot.workflow_run_id, "12345") + self.assertEqual(bot.repository_name, "openwisp/openwisp-utils") + self.assertEqual(bot.pr_number, "1") + self.mock_github.assert_called_once_with("test_token") + self.mock_genai.configure.assert_called_once_with(api_key="test_gemini_key") + + def test_init_without_gemini_key(self): + env_vars_no_gemini = { + "GITHUB_TOKEN": "test_token", + "WORKFLOW_RUN_ID": "12345", + "REPOSITORY": "openwisp/openwisp-utils", + "PR_NUMBER": "1", + } + with patch.dict(os.environ, env_vars_no_gemini, clear=True): + bot = CIFailureBot() + self.assertIsNone(bot.model) + self.mock_genai.configure.assert_not_called() + + def test_classify_failure_qa(self): + bot = CIFailureBot() + logs = [{"job_name": "Build / Python 3.11", "logs": "flake8 error"}] + self.assertEqual(bot.classify_failure(logs), "qa") + + def test_classify_failure_tests(self): + bot = CIFailureBot() + logs = [{"job_name": "unit-tests", "logs": "test failed"}] + self.assertEqual(bot.classify_failure(logs), "tests") + + def test_classify_failure_setup(self): + bot = CIFailureBot() + logs = [ + {"job_name": "build", "logs": "ModuleNotFoundError: No module named 'xyz'"} + ] + self.assertEqual(bot.classify_failure(logs), "setup") + + def test_classify_failure_mixed(self): + bot = CIFailureBot() + logs = [ + {"job_name": "Build / Python 3.11", "logs": "flake8 error"}, + {"job_name": "Build / Python 3.11", "logs": "test failed"}, + ] + self.assertEqual(bot.classify_failure(logs), "mixed") + + def test_classify_failure_unknown(self): + bot = CIFailureBot() + logs = [{"job_name": "unknown-job", "logs": "some error"}] + self.assertEqual(bot.classify_failure(logs), "unknown") + + @patch("openwisp_utils.bots.ci_failure.bot.requests.get") + def test_get_build_logs_success(self, mock_requests): + bot = CIFailureBot() + mock_workflow_run = Mock() + mock_step = Mock() + mock_step.conclusion = "failure" + mock_step.name = "Run tests" + mock_step.number = 1 + mock_job = self._mock_failed_job(steps=[mock_step]) + mock_workflow_run.jobs.return_value = [mock_job] + self.mock_repo.get_workflow_run.return_value = mock_workflow_run + mock_response = Mock() + mock_response.content = b"Error: Test failed at line 42\n" * 1000 + mock_response.raise_for_status = Mock() + mock_requests.return_value = mock_response + logs = bot.get_build_logs() + self.assertEqual(len(logs), 2) + self.assertIn("job_name", logs[0]) + self.assertIn("logs", logs[0]) + self.assertEqual(logs[1]["step_name"], "Run tests") + + @patch("openwisp_utils.bots.ci_failure.bot.requests.get") + def test_get_build_logs_zip_format(self, mock_requests): + """Test ZIP-encoded log extraction""" + import io + import zipfile + + bot = CIFailureBot() + zip_buffer = io.BytesIO() + with zipfile.ZipFile(zip_buffer, "w", zipfile.ZIP_DEFLATED) as zip_file: + zip_file.writestr("failed_job.txt", "Error: test failed\nstack trace here") + zip_content = zip_buffer.getvalue() + mock_workflow_run = Mock() + step = Mock() + step.name = "failing-step" + step.conclusion = "failure" + step.number = 1 + mock_job = self._mock_failed_job(steps=[step]) + mock_workflow_run.jobs.return_value = [mock_job] + self.mock_repo.get_workflow_run.return_value = mock_workflow_run + mock_response = Mock() + mock_response.content = zip_content + mock_response.raise_for_status = Mock() + mock_requests.return_value = mock_response + logs = bot.get_build_logs() + self.assertEqual(len(logs), 2) + self.assertIn("Error: test failed", logs[0]["logs"]) + + @patch("openwisp_utils.bots.ci_failure.bot.requests.get") + def test_get_build_logs_error(self, mock_requests): + """Test network error during log fetch""" + import requests + + bot = CIFailureBot() + mock_workflow_run = Mock() + mock_job = self._mock_failed_job() + mock_workflow_run.jobs.return_value = [mock_job] + self.mock_repo.get_workflow_run.return_value = mock_workflow_run + mock_requests.side_effect = requests.RequestException("Network error") + logs = bot.get_build_logs() + self.assertEqual(len(logs), 1) + self.assertEqual(logs[0]["logs"], "") + + @patch("openwisp_utils.bots.ci_failure.bot.subprocess.run") + def test_get_pr_diff_success(self, mock_subprocess): + bot = CIFailureBot() + mock_pr = Mock() + mock_pr.title = "Test PR" + mock_pr.body = "Test description" + self.mock_repo.get_pull.return_value = mock_pr + mock_subprocess.return_value.returncode = 0 + mock_subprocess.return_value.stdout = "diff --git a/test.py b/test.py\n" + ( + "line\n" * 2000 + ) + diff_data = bot.get_pr_diff() + self.assertEqual(diff_data["title"], "Test PR") + self.assertEqual(diff_data["body"], "Test description") + self.assertIn("[...middle truncated...]", diff_data["diff"]) + + @patch("openwisp_utils.bots.ci_failure.bot.subprocess.run") + def test_get_pr_diff_error(self, mock_subprocess): + """Test git subprocess failure""" + import subprocess + + bot = CIFailureBot() + mock_pr = Mock() + mock_pr.title = "Test" + mock_pr.body = "Desc" + self.mock_repo.get_pull.return_value = mock_pr + self.mock_repo.default_branch = "main" + mock_subprocess.side_effect = subprocess.SubprocessError("git failed") + diff = bot.get_pr_diff() + self.assertIsNone(diff) + + def test_analyze_with_gemini_success(self): + bot = CIFailureBot() + bot.model = self.mock_model + mock_response = Mock() + mock_response.text = """The file bad_format.py contains PEP 8 violations. + +Required Actions: +```bash +pip install -e .[qa] +openwisp-qa-format +```""" + self.mock_model.generate_content.return_value = mock_response + build_logs = [{"job_name": "Build / Python 3.11", "logs": "flake8 error"}] + pr_diff = {"title": "Test", "diff": "diff content"} + result = bot.analyze_with_gemini(build_logs, pr_diff) + self.assertIn("PEP 8", result) + self.assertIn("pip install", result) + self.mock_model.generate_content.assert_called_once() + + def test_analyze_with_gemini_fallback(self): + bot = CIFailureBot() + bot.model = self.mock_model + self.mock_model.generate_content.side_effect = Exception("API Error") + result = bot.analyze_with_gemini([], None) + self.assertIn("Automated analysis", result) + self.assertIn("pip install", result) + + def test_post_comment_create(self): + bot = CIFailureBot() + mock_pr = Mock() + mock_user = Mock() + mock_user.login = "github-actions[bot]" + self.mock_github.return_value.get_user.return_value = mock_user + self.mock_repo.get_pull.return_value = mock_pr + mock_pr.get_issue_comments.return_value = [] + bot.post_comment("Test message") + mock_pr.create_issue_comment.assert_called_once() + call_args = mock_pr.create_issue_comment.call_args[0][0] + self.assertIn("", call_args) + self.assertIn("Test message", call_args) + + def test_post_comment_update_existing(self): + bot = CIFailureBot() + mock_pr = Mock() + mock_user = Mock() + mock_user.login = "github-actions[bot]" + self.mock_github.return_value.get_user.return_value = mock_user + self.mock_repo.get_pull.return_value = mock_pr + mock_comment = Mock() + mock_comment.user.login = "github-actions[bot]" + mock_comment.body = "\nOld message" + mock_pr.get_issue_comments.return_value = [mock_comment] + bot.post_comment("New message") + mock_comment.edit.assert_called_once() + mock_pr.create_issue_comment.assert_not_called() + + def test_run_skips_dependabot(self): + bot = CIFailureBot() + self._mock_workflow_run(actor="dependabot[bot]") + with patch("builtins.print") as mock_print: + bot.run() + mock_print.assert_any_call("Skipping: dependabot PR from dependabot[bot]") + + @patch("openwisp_utils.bots.ci_failure.bot.subprocess.run") + @patch("openwisp_utils.bots.ci_failure.bot.requests.get") + def test_run_full_workflow(self, mock_requests, mock_subprocess): + bot = CIFailureBot() + mock_job = self._mock_failed_job() + self._mock_workflow_run(jobs=[mock_job]) + mock_response = Mock() + mock_response.content = b"Build failed" + mock_response.raise_for_status = Mock() + mock_requests.return_value = mock_response + + # Mock subprocess for PR diff + mock_subprocess.return_value.returncode = 0 + mock_subprocess.return_value.stdout = "diff --git a/test.py" + + mock_gemini_response = Mock() + mock_gemini_response.text = "Analysis: Build failed due to syntax error" + self.mock_model.generate_content.return_value = mock_gemini_response + mock_pr = self._mock_pr() + bot.run() + mock_pr.create_issue_comment.assert_called_once() + + def test_run_skips_fork_pr(self): + """Test skipping fork PR""" + bot = CIFailureBot() + self._mock_workflow_run(actor="contributor") + self.mock_repo.full_name = "openwisp/openwisp-utils" + mock_pr = self._mock_pr(full_name="contributor/openwisp-utils") + with patch("builtins.print") as mock_print: + bot.run() + mock_print.assert_any_call( + "Skipping: fork PR from contributor/openwisp-utils" + ) + mock_pr.create_issue_comment.assert_not_called() + + def test_run_skips_deleted_fork_pr(self): + """Test skipping PR from deleted fork""" + bot = CIFailureBot() + self._mock_workflow_run() + mock_pr = self._mock_pr(deleted_fork=True) + with patch("builtins.print") as mock_print: + bot.run() + mock_print.assert_any_call("Skipping: PR with deleted head repository") + mock_pr.create_issue_comment.assert_not_called() + + def test_run_fork_status_exception(self): + """Test fork status check exception""" + from github import GithubException + + bot = CIFailureBot() + mock_run = Mock() + mock_actor = Mock() + mock_actor.login = "user" + mock_run.actor = mock_actor + self.mock_repo.get_workflow_run.return_value = mock_run + self.mock_repo.get_pull.side_effect = GithubException(404, "Not found") + with patch("builtins.print") as mock_print: + bot.run() + mock_print.assert_any_call( + 'Warning: Could not check fork status: 404 "Not found"' + ) + + def test_run_actor_check_exception(self): + """Test actor check exception""" + from github import GithubException + + bot = CIFailureBot() + self.mock_repo.get_workflow_run.side_effect = GithubException( + 401, "Unauthorized" + ) + mock_pr = Mock() + mock_pr.get_issue_comments.return_value = [] + self.mock_repo.get_pull.return_value = mock_pr + with patch("builtins.print") as mock_print: + bot.run() + mock_print.assert_any_call( + 'Warning: Could not check actor: 401 "Unauthorized"' + ) + + def test_run_no_pr_context(self): + """Test run with no PR context""" + bot = CIFailureBot() + bot.pr_number = None + mock_run = Mock() + mock_actor = Mock() + mock_actor.login = "user" + mock_run.actor = mock_actor + self.mock_repo.get_workflow_run.return_value = mock_run + with patch("builtins.print") as mock_print: + bot.run() + mock_print.assert_any_call( + "No PR context available - workflow_run without PR" + ) + + def test_run_no_logs_no_diff_fallback(self): + """Test fallback when no logs or diff available""" + bot = CIFailureBot() + mock_run = Mock() + mock_actor = Mock() + mock_actor.login = "user" + mock_run.actor = mock_actor + mock_run.jobs.return_value = [] + self.mock_repo.get_workflow_run.return_value = mock_run + mock_pr = Mock() + mock_pr.head.repo.full_name = "openwisp/openwisp-utils" + mock_pr.get_issue_comments.return_value = [] + self.mock_repo.get_pull.return_value = mock_pr + with patch("builtins.print") as mock_print: + bot.run() + mock_print.assert_any_call( + "No build logs or PR diff found, using fallback response" + ) + + def test_run_outer_exception(self): + """Test outer exception handling in run""" + bot = CIFailureBot() + with patch.object(bot, "get_build_logs", side_effect=Exception("boom")): + mock_run = Mock() + mock_actor = Mock() + mock_actor.login = "user" + mock_run.actor = mock_actor + self.mock_repo.get_workflow_run.return_value = mock_run + mock_pr = Mock() + mock_pr.head.repo.full_name = "openwisp/openwisp-utils" + mock_pr.get_issue_comments.return_value = [] + self.mock_repo.get_pull.return_value = mock_pr + with patch("builtins.print") as mock_print: + bot.run() + mock_print.assert_any_call("Error in analysis: boom") + + def test_run_no_repo(self): + """Test run when repo is None""" + bot = CIFailureBot() + bot.repo = None + with patch("builtins.print") as mock_print: + bot.run() + mock_print.assert_any_call("GitHub client not initialized, cannot proceed") + + def test_get_build_logs_no_repo(self): + """Test get_build_logs when repo is None""" + bot = CIFailureBot() + bot.repo = None + logs = bot.get_build_logs() + self.assertEqual(logs, []) + + def test_get_build_logs_no_workflow_run_id(self): + """Test get_build_logs when workflow_run_id is None""" + bot = CIFailureBot() + bot.workflow_run_id = None + logs = bot.get_build_logs() + self.assertEqual(logs, []) + + def test_get_pr_diff_no_repo(self): + """Test get_pr_diff when repo is None""" + bot = CIFailureBot() + bot.repo = None + diff = bot.get_pr_diff() + self.assertIsNone(diff) + + def test_get_pr_diff_no_pr_number(self): + """Test get_pr_diff when pr_number is None""" + bot = CIFailureBot() + bot.pr_number = None + diff = bot.get_pr_diff() + self.assertIsNone(diff) + + def test_post_comment_no_pr_number(self): + """Test post_comment when pr_number is None""" + bot = CIFailureBot() + bot.pr_number = None + bot.post_comment("Test message") + + def test_post_comment_no_github_client(self): + """Test post_comment when GitHub client is None""" + bot = CIFailureBot() + bot.github = None + bot.repo = None + bot.post_comment("Test message") + + def test_analyze_with_gemini_no_model(self): + """Test analyze_with_gemini when model is None""" + bot = CIFailureBot() + bot.model = None + result = bot.analyze_with_gemini([], None) + self.assertIn("Automated analysis", result) + + def test_analyze_with_gemini_no_repository_name(self): + """Test analyze_with_gemini when repository_name is None""" + bot = CIFailureBot() + bot.repository_name = None + result = bot.analyze_with_gemini([], None) + self.assertIn("Automated analysis", result) + + def test_classify_failure_qa_formatting(self): + """Test QA classification with formatting keywords""" + bot = CIFailureBot() + logs = [{"job_name": "lint", "logs": "formatting error"}] + self.assertEqual(bot.classify_failure(logs), "qa") + + def test_classify_failure_tests_pytest(self): + """Test classification with pytest keyword""" + bot = CIFailureBot() + logs = [{"job_name": "pytest", "logs": "test error"}] + self.assertEqual(bot.classify_failure(logs), "tests") + + def test_get_failed_jobs_summary_with_steps(self): + """Test get_failed_jobs_summary extracts step info""" + bot = CIFailureBot() + build_logs = [ + {"job_name": "test-job", "step_name": "Run tests", "step_number": 1}, + {"job_name": "lint-job", "logs": "error"}, + ] + summary = bot.get_failed_jobs_summary(build_logs) + self.assertEqual(len(summary), 2) + self.assertEqual(summary[0]["name"], "test-job") + self.assertEqual(summary[0]["step"], "Run tests") + + def test_get_build_logs_no_logs_url(self): + """Test get_build_logs when job has no logs_url""" + bot = CIFailureBot() + mock_workflow_run = Mock() + mock_job = self._mock_failed_job(logs_url=None) + mock_workflow_run.jobs.return_value = [mock_job] + self.mock_repo.get_workflow_run.return_value = mock_workflow_run + logs = bot.get_build_logs() + self.assertEqual(len(logs), 1) + self.assertEqual(logs[0]["logs"], "") + + @patch("openwisp_utils.bots.ci_failure.bot.subprocess.run") + def test_get_pr_diff_large_diff_truncation(self, mock_subprocess): + """Test PR diff truncation for large diffs""" + bot = CIFailureBot() + mock_pr = Mock() + mock_pr.title = "Test" + mock_pr.body = "Desc" + self.mock_repo.get_pull.return_value = mock_pr + mock_subprocess.return_value.returncode = 0 + # Create a diff larger than 8000 chars + mock_subprocess.return_value.stdout = "line\n" * 2000 + diff = bot.get_pr_diff() + self.assertIn("[...middle truncated...]", diff["diff"]) + + def test_init_missing_github_token(self): + """Test initialization with missing GITHUB_TOKEN""" + env_vars = { + "REPOSITORY": "repo", + "WORKFLOW_RUN_ID": "12345", + "PR_NUMBER": "1", + } + with patch.dict(os.environ, env_vars, clear=True): + bot = CIFailureBot() + self.assertIsNone(bot.github) + self.assertIsNone(bot.repo) + + def test_init_missing_repository(self): + """Test initialization with missing REPOSITORY""" + env_vars = { + "GITHUB_TOKEN": "token", + "WORKFLOW_RUN_ID": "12345", + "PR_NUMBER": "1", + } + with patch.dict(os.environ, env_vars, clear=True): + bot = CIFailureBot() + self.assertIsNone(bot.github) + self.assertIsNone(bot.repo) + + @patch("openwisp_utils.bots.ci_failure.bot.Github") + def test_init_github_exception(self, mock_github_class): + """Test GitHub initialization exception""" + from github import GithubException + + mock_github_class.side_effect = GithubException(401, "Unauthorized") + env_vars = { + "GITHUB_TOKEN": "token", + "REPOSITORY": "repo", + "WORKFLOW_RUN_ID": "12345", + "PR_NUMBER": "1", + } + with patch.dict(os.environ, env_vars, clear=True): + bot = CIFailureBot() + self.assertIsNone(bot.github) + self.assertIsNone(bot.repo) + + @patch("openwisp_utils.bots.ci_failure.bot.genai") + @patch("openwisp_utils.bots.ci_failure.bot.Github") + def test_init_gemini_exception(self, mock_github_class, mock_genai): + """Test Gemini initialization exception""" + mock_genai.configure.side_effect = Exception("API key invalid") + env_vars = { + "GITHUB_TOKEN": "token", + "GEMINI_API_KEY": "key", + "REPOSITORY": "repo", + "WORKFLOW_RUN_ID": "12345", + "PR_NUMBER": "1", + } + with patch.dict(os.environ, env_vars, clear=True): + bot = CIFailureBot() + self.assertIsNone(bot.model) + + @patch("openwisp_utils.bots.ci_failure.bot.requests.get") + def test_get_build_logs_zip_with_txt_extension(self, mock_requests): + """Test ZIP file with .txt extension""" + import io + import zipfile + + bot = CIFailureBot() + zip_buffer = io.BytesIO() + with zipfile.ZipFile(zip_buffer, "w", zipfile.ZIP_DEFLATED) as zip_file: + zip_file.writestr("job.txt", "Error log content") + zip_content = zip_buffer.getvalue() + mock_workflow_run = Mock() + mock_job = self._mock_failed_job() + mock_workflow_run.jobs.return_value = [mock_job] + self.mock_repo.get_workflow_run.return_value = mock_workflow_run + mock_response = Mock() + mock_response.content = zip_content + mock_response.raise_for_status = Mock() + mock_requests.return_value = mock_response + logs = bot.get_build_logs() + self.assertGreater(len(logs), 0) + + @patch("openwisp_utils.bots.ci_failure.bot.requests.get") + def test_get_build_logs_with_failed_steps(self, mock_requests): + """Test build logs with failed steps""" + bot = CIFailureBot() + mock_workflow_run = Mock() + step = Mock() + step.name = "failing-step" + step.conclusion = "failure" + step.number = 1 + mock_job = self._mock_failed_job(steps=[step]) + mock_workflow_run.jobs.return_value = [mock_job] + self.mock_repo.get_workflow_run.return_value = mock_workflow_run + mock_response = Mock() + mock_response.content = b"Error logs" + mock_response.raise_for_status = Mock() + mock_requests.return_value = mock_response + logs = bot.get_build_logs() + # Should have job log + step info + self.assertGreater(len(logs), 1) + + def test_get_pr_diff_invalid_pr_number(self): + """Test get_pr_diff with invalid PR number""" + bot = CIFailureBot() + bot.pr_number = "invalid_string" + diff = bot.get_pr_diff() + self.assertIsNone(diff) + + def test_get_pr_diff_github_exception(self): + """Test get_pr_diff with GithubException""" + from github import GithubException + + bot = CIFailureBot() + self.mock_repo.get_pull.side_effect = GithubException(404, "Not found") + diff = bot.get_pr_diff() + self.assertIsNone(diff) + + def test_post_comment_invalid_pr_number(self): + """Test post_comment with invalid PR number""" + bot = CIFailureBot() + bot.pr_number = "invalid" + bot.post_comment("Test message") + + def test_post_comment_github_exception_fetch_pr(self): + """Test post_comment with GithubException when fetching PR""" + from github import GithubException + + bot = CIFailureBot() + self.mock_repo.get_pull.side_effect = GithubException(403, "Forbidden") + bot.post_comment("Test message") + + def test_post_comment_github_exception_get_comments(self): + """Test post_comment with GithubException when getting comments""" + from github import GithubException + + bot = CIFailureBot() + mock_pr = Mock() + self.mock_repo.get_pull.return_value = mock_pr + mock_pr.get_issue_comments.side_effect = GithubException(500, "Server error") + bot.post_comment("Test message") + + def test_post_comment_github_exception_create_comment(self): + """Test post_comment with GithubException when creating comment""" + from github import GithubException + + bot = CIFailureBot() + mock_pr = Mock() + self.mock_repo.get_pull.return_value = mock_pr + mock_pr.get_issue_comments.return_value = [] + mock_pr.create_issue_comment.side_effect = GithubException(403, "Forbidden") + bot.post_comment("Test message") + + def test_get_failed_jobs_summary_job_name_only(self): + """Test get_failed_jobs_summary with job_name only""" + bot = CIFailureBot() + build_logs = [{"job_name": "test-job", "logs": "error"}] + summary = bot.get_failed_jobs_summary(build_logs) + self.assertEqual(len(summary), 1) + self.assertEqual(summary[0]["name"], "test-job") + self.assertNotIn("step", summary[0]) diff --git a/openwisp_utils/ci_failure_bot.py b/openwisp_utils/ci_failure_bot.py deleted file mode 100644 index 7f90e310..00000000 --- a/openwisp_utils/ci_failure_bot.py +++ /dev/null @@ -1,427 +0,0 @@ -#!/usr/bin/env python3 -"""CI Failure Bot - AI-powered analysis of build failures using Gemini""" - -import io -import json -import os -import sys -import zipfile - -import google.generativeai as genai -import requests -from github import Github, GithubException - - -class CIFailureBot: - def __init__(self): - self.github_token = os.environ.get("GITHUB_TOKEN") - self.gemini_api_key = os.environ.get("GEMINI_API_KEY") - self.workflow_run_id = os.environ.get("WORKFLOW_RUN_ID") - self.repository_name = os.environ.get("REPOSITORY") - self.pr_number = os.environ.get("PR_NUMBER") - if not all( - [ - self.github_token, - self.workflow_run_id, - self.repository_name, - ] - ): - missing = [] - if not self.github_token: - missing.append("GITHUB_TOKEN") - if not self.workflow_run_id: - missing.append("WORKFLOW_RUN_ID") - if not self.repository_name: - missing.append("REPOSITORY") - print(f"Missing required environment variables: {', '.join(missing)}") - sys.exit(1) - try: - self.workflow_run_id = int(self.workflow_run_id) - except ValueError: - print("Invalid WORKFLOW_RUN_ID: must be numeric") - sys.exit(1) - self.github = Github(self.github_token) - self.repo = self.github.get_repo(self.repository_name) - # Initialize Gemini client with new API (optional) - if self.gemini_api_key: - genai.configure(api_key=self.gemini_api_key) - self.model_name = os.environ.get("GEMINI_MODEL", "gemini-2.5-flash") - self.model = genai.GenerativeModel(self.model_name) - else: - print("Warning: GEMINI_API_KEY not provided, will use fallback responses") - self.model = None - - def get_build_logs(self): - """Get actual build logs and error output from failed jobs""" - try: - workflow_run = self.repo.get_workflow_run(self.workflow_run_id) - jobs = workflow_run.jobs() - build_logs = [] - for job in jobs: - if job.conclusion == "failure": - # Get job logs URL and fetch content - logs_url = job.logs_url - if logs_url: - headers = { - "Authorization": f"token {self.github_token}", - "Accept": "application/vnd.github.v3+json", - } - response = requests.get(logs_url, headers=headers, timeout=30) - response.raise_for_status() - # Handle ZIP archive response from GitHub Actions logs API - raw = response.content - if raw[:2] == b"PK": # ZIP file signature - with zipfile.ZipFile(io.BytesIO(raw)) as zf: - parts = [] - for name in zf.namelist(): - if name.endswith(".txt"): - parts.append( - zf.read(name).decode("utf-8", "replace") - ) - log_text = "\n".join(parts).strip() - else: - log_text = raw.decode("utf-8", "replace") - if len(log_text) > 5000: - # Take first 2000 and last 3000 chars for better context - log_text = ( - log_text[:2000] - + "\n\n[...middle truncated...]\n\n" - + log_text[-3000:] - ) - build_logs.append( - { - "job_name": job.name, - "logs": log_text, - } - ) - # Also get step details - for step in job.steps: - if step.conclusion == "failure": - build_logs.append( - { - "job_name": job.name, - "step_name": step.name, - "step_number": step.number, - } - ) - return build_logs - except (GithubException, requests.RequestException, ValueError) as e: - print(f"Error getting build logs: {e}") - return [] - - def get_pr_diff(self): - """Get the PR diff/changes if PR exists""" - if not self.pr_number or self.pr_number.strip() == "": - return None - try: - pr_num = int(self.pr_number) - pr = self.repo.get_pull(pr_num) - # Use git diff instead of HTTP request for efficiency - try: - import subprocess - - # Validate branch name to prevent injection - default_branch = self.repo.default_branch - if ( - not default_branch - or not default_branch.replace("-", "") - .replace("_", "") - .replace("/", "") - .isalnum() - ): - raise ValueError("Invalid branch name") - result = subprocess.run( - ["git", "diff", f"origin/{default_branch}"], - capture_output=True, - text=True, - timeout=30, - ) - if result.returncode == 0 and result.stdout.strip(): - diff_text = result.stdout - else: - # Fallback to HTTP if git diff fails or returns empty - diff_url = pr.diff_url - headers = { - "Authorization": f"token {self.github_token}", - "Accept": "application/vnd.github.v3.diff", - } - response = requests.get(diff_url, headers=headers, timeout=30) - if response.status_code == 200: - diff_text = response.text - else: - return None - except (subprocess.SubprocessError, FileNotFoundError, ValueError): - # Fallback to HTTP if git is not available - diff_url = pr.diff_url - headers = { - "Authorization": f"token {self.github_token}", - "Accept": "application/vnd.github.v3.diff", - } - response = requests.get(diff_url, headers=headers, timeout=30) - if response.status_code == 200: - diff_text = response.text - else: - return None - if len(diff_text) > 8000: - # Take first 4000 and last 4000 chars for context - diff_text = ( - diff_text[:4000] - + "\n\n[...middle truncated...]\n\n" - + diff_text[-4000:] - ) - return { - "title": pr.title, - "body": pr.body or "", - "diff": diff_text, - } - except (GithubException, requests.RequestException, ValueError) as e: - print(f"Error getting PR diff: {e}") - return None - - def get_workflow_yaml(self): - """Get the workflow YAML configuration""" - try: - workflow_run = self.repo.get_workflow_run(self.workflow_run_id) - workflow_path = workflow_run.path - # Get workflow file content from the commit that ran - workflow_file = self.repo.get_contents( - workflow_path, ref=workflow_run.head_sha - ) - return workflow_file.decoded_content.decode("utf-8") - except GithubException as e: - print(f"Error getting workflow YAML: {e}") - return None - - def analyze_with_gemini(self, build_logs, pr_diff, workflow_yaml): - """Send context to Gemini for intelligent analysis""" - # Prepare context for Gemini - project_name = self.repository_name.split("/")[-1] - repo_url = f"https://github.com/{self.repository_name}" - # Use dynamic branch detection instead of hardcoded "master" - default_branch = self.repo.default_branch - qa_checks_url = f"{repo_url}/blob/{default_branch}/openwisp-qa-check" - runtests_url = f"{repo_url}/blob/{default_branch}/runtests" - # Build the context string with proper line breaks - build_logs_json = json.dumps(build_logs, indent=2) - if pr_diff: - pr_diff_json = json.dumps(pr_diff, indent=2) - else: - pr_diff_json = "No PR associated" - # Gemini prompt with EXPLICIT OpenWISP QA commands - context = f""" -### CRITICAL: YOU MUST USE OPENWISP QA COMMANDS ONLY - -For ANY code quality issues, you MUST recommend these EXACT commands: -1. pip install -e .[qa] -2. ./run-qa-checks -3. openwisp-qa-format - -DO NOT recommend: black, isort, flake8 individually -ALWAYS use the OpenWISP QA workflow above. - -### ROLE -You are the "Automated Maintainer Gatekeeper." Your goal is to analyze Pull Request (PR) -build failures and provide direct, technically accurate, and no-nonsense feedback to contributors. - -### INPUT CONTEXT PROVIDED -1. **Build Output/Logs:** {build_logs_json} -2. **YAML Workflow:** {workflow_yaml or "Not available"} -3. **PR Diff:** {pr_diff_json} -4. **Project Name:** {project_name} -5. **Repository:** {repo_url} -6. **run-qa-checks:** {qa_checks_url} -7. **runtests:** {runtests_url} - -### MANDATORY QA RESPONSE FORMAT -If you detect code formatting/style issues, respond EXACTLY like this: - -**Required Actions:** -- Install QA tools: `pip install -e .[qa]` -- Run `./run-qa-checks` to see all issues -- Run `openwisp-qa-format` to automatically fix formatting -- Run `./runtests` locally to verify all tests pass - -### TASK -Analyze the provided context to determine why the build failed. -Categorize the failure and respond according to the "Tone Guidelines" below. - -### PR REQUIREMENTS CHECKLIST -Before providing feedback, verify these requirements: -- Does the PR reference any issue? If so, is it correctly mentioned in the commit description? -- If the PR is a fix, change or feature it must include automated tests or it will be rejected. -- Does the CI build fail? If yes, report the key reasons to the contributor - and if the solution is obvious provide it, if finding the solution is not - obvious and requires more than 30% additional computation just report the key reasons. -- If QA checks are failing, instruct the contributor to install QA tools with - `pip install -e .[qa]` and run `./run-qa-checks` to see all issues, then use - `openwisp-qa-format` to automatically fix formatting issues. Reference the - [openwisp contributing guidelines](https://openwisp.io/docs/stable/developer/contributing.html) - for complete setup instructions. -- Is the PR addressing changes to the user interface? If yes, check if a selenium - browser test is present and if the PR description attaches screenshots or screencasts, - if not, report this to the user and ask to provide both -- If this PR adds a new feature or notably changes an existing documented feature, - check if documentation updates are present and if not report it -- Do you detect coderabbitai or copilot reviews asking for changes after the latest commit? - If so, ask the user to follow up with those review comments one by one - -### TONE GUIDELINES -- **Direct & Honest:** Do not use "fluff" or overly polite corporate language. -- **Firm Standards:** If a PR is low-effort, spammy, or fails to follow basic instructions, - state that clearly. -- **Action-Oriented:** Provide the exact command or file change needed to fix the error, - unless the PR is spammy, in which case we should just declare the PR as potential SPAM - and ask maintainers to manually review it. - -### RESPONSE STRUCTURE -1. **Status Summary:** A one-sentence blunt assessment of the failure. -2. **Technical Diagnosis:** - - Identify the specific line/test that failed. - - Explain *why* it failed. -3. **Required Action:** Provide a code block or specific steps the contributor must take. -4. **Quality Warning (If Applicable):** If the PR appears to be "spam" - (e.g., trivial README changes, AI-generated nonsense, or repeated basic errors), - include a firm statement that such contributions are a drain on project resources - and ping the maintainers asking them for manual review. - -### EXAMPLE RESPONSE STYLE -The build failed because you neglected to update the test suite to match your logic changes. - -**Required Actions:** -- Update tests/logic_test.py to cover your new functionality -- Install QA tools: `pip install -e .[qa]` -- Run `./run-qa-checks` to see all issues -- Run `openwisp-qa-format` to automatically fix formatting -- Run `./runtests` locally to verify all tests pass - -**Missing Requirements:** -- [ ] Automated tests for new functionality -- [ ] Code follows OpenWISP style guidelines (use openwisp-qa-format) - -We prioritize high-quality, ready-to-merge code. Please ensure you run local QA checks before pushing. - -Analyze the failure and provide your response: -""" - try: - # Check if Gemini is available - if not self.model: - return self.fallback_response() - # Use Gemini client API - response = self.model.generate_content(context) - return response.text - except (ValueError, ConnectionError, Exception) as e: - print(f"Error calling Gemini API: {e}") - return self.fallback_response() - - def fallback_response(self): - """Fallback response if Gemini fails""" - return """ -## CI Build Failed - -The automated analysis is temporarily unavailable. Please check the CI logs above for specific error details. - -**OpenWISP QA Workflow:** -1. Install QA tools: `pip install -e .[qa]` -2. Run `./run-qa-checks` to see all issues -3. Run `openwisp-qa-format` to automatically fix formatting -4. Run `./runtests` locally to verify all tests pass - -**Common Issues:** -- Code style violations (black, flake8, isort) -- Missing or failing tests -- Import/dependency problems - -See: https://openwisp.io/docs/dev/developer/contributing.html -""" - - def post_comment(self, message): - """Post or update comment on PR""" - if not self.pr_number or self.pr_number.strip() == "": - print("No PR number, skipping comment") - return - # Add consistent marker for deduplication - marker = "" - message_with_marker = f"{marker}\n{message}" - try: - pr_num = int(self.pr_number) - pr = self.repo.get_pull(pr_num) - # Check for existing bot comments to avoid duplicates - bot_login = self.github.get_user().login - existing_comments = pr.get_issue_comments() - for comment in existing_comments: - if comment.user.login == bot_login and marker in comment.body: - print("Bot comment already exists, updating it") - comment.edit(message_with_marker) - return - # No existing comment, create new one - pr.create_issue_comment(message_with_marker) - print(f"Posted comment to PR #{pr_num}") - except (GithubException, ValueError) as e: - print(f"Error posting comment: {e}") - - def run(self): - """Main execution flow""" - try: - print("CI Failure Bot starting - AI-powered analysis") - # Security checks: Skip if this is a dependabot PR or fork PR - try: - workflow_run = self.repo.get_workflow_run(self.workflow_run_id) - if ( - workflow_run.actor - and "dependabot" in workflow_run.actor.login.lower() - ): - print(f"Skipping dependabot PR from {workflow_run.actor.login}") - return - # Skip fork PRs for security (avoid sending external code to AI) - if self.pr_number and self.pr_number.strip(): - try: - pr_num = int(self.pr_number) - pr = self.repo.get_pull(pr_num) - # Handle deleted fork repositories - if pr.head.repo is None: - print("Skipping PR with deleted head repository") - return - if pr.head.repo.full_name != self.repository_name: - print(f"Skipping fork PR from {pr.head.repo.full_name}") - return - except (GithubException, ValueError) as e: - print(f"Warning: Could not check fork status: {e}") - except (GithubException, AttributeError) as e: - print(f"Warning: Could not check actor: {e}") - # Get all context - build_logs = self.get_build_logs() - pr_diff = self.get_pr_diff() - workflow_yaml = self.get_workflow_yaml() - if not build_logs: - print("No build logs found") - return - print("Analyzing failure with Gemini AI...") - # Get AI analysis - ai_response = self.analyze_with_gemini(build_logs, pr_diff, workflow_yaml) - # Post intelligent comment - self.post_comment(ai_response) - print("CI Failure Bot completed successfully") - except Exception as e: - print(f"CRITICAL ERROR in CI Failure Bot: {e}") - print(f"Error type: {type(e).__name__}") - import traceback - - traceback.print_exc() - sys.exit(1) - - -def main(): - """Entry point for the CI failure bot""" - try: - bot = CIFailureBot() - bot.run() - except Exception as e: - print(f"FATAL: CI Failure Bot crashed: {e}") - import traceback - - traceback.print_exc() - sys.exit(1) - - -if __name__ == "__main__": - main() diff --git a/openwisp_utils/tests/test_ci_failure_bot.py b/openwisp_utils/tests/test_ci_failure_bot.py deleted file mode 100644 index 7cc6399d..00000000 --- a/openwisp_utils/tests/test_ci_failure_bot.py +++ /dev/null @@ -1,366 +0,0 @@ -import os -import subprocess -from unittest.mock import Mock, patch - -from django.test import TestCase - -try: - from openwisp_utils.ci_failure_bot import CIFailureBot -except ImportError: - CIFailureBot = None - - -class TestCIFailureBot(TestCase): - def setUp(self): - if CIFailureBot is None: - self.skipTest("CI failure bot script not available") - self.env_vars = { - "GITHUB_TOKEN": "test_token", - "GEMINI_API_KEY": "test_gemini_key", - "WORKFLOW_RUN_ID": "12345", - "REPOSITORY": "openwisp/openwisp-utils", - "PR_NUMBER": "1", - } - self.env_patcher = patch.dict(os.environ, self.env_vars) - self.env_patcher.start() - self.github_patcher = patch("openwisp_utils.ci_failure_bot.Github") - self.genai_patcher = patch("openwisp_utils.ci_failure_bot.genai") - self.mock_github = self.github_patcher.start() - self.mock_genai = self.genai_patcher.start() - self.mock_repo = Mock() - self.mock_github.return_value.get_repo.return_value = self.mock_repo - self.mock_model = Mock() - self.mock_genai.GenerativeModel.return_value = self.mock_model - - def tearDown(self): - if hasattr(self, "env_patcher"): - self.env_patcher.stop() - if hasattr(self, "github_patcher"): - self.github_patcher.stop() - if hasattr(self, "genai_patcher"): - self.genai_patcher.stop() - - def test_init_success(self): - bot = CIFailureBot() - self.assertEqual(bot.github_token, "test_token") - self.assertEqual(bot.gemini_api_key, "test_gemini_key") - self.assertEqual(bot.workflow_run_id, 12345) - self.assertEqual(bot.repository_name, "openwisp/openwisp-utils") - self.assertEqual(bot.pr_number, "1") - self.mock_github.assert_called_once_with("test_token") - self.mock_genai.configure.assert_called_once_with(api_key="test_gemini_key") - - def test_init_missing_env_vars(self): - with patch.dict(os.environ, {}, clear=True): - with self.assertRaises(SystemExit): - CIFailureBot() - - def test_init_without_gemini_key(self): - env_vars_no_gemini = { - "GITHUB_TOKEN": "test_token", - "WORKFLOW_RUN_ID": "12345", - "REPOSITORY": "openwisp/openwisp-utils", - "PR_NUMBER": "1", - } - with patch.dict(os.environ, env_vars_no_gemini, clear=True): - bot = CIFailureBot() - self.assertIsNone(bot.model) - self.mock_genai.configure.assert_not_called() - - def test_init_invalid_workflow_run_id(self): - with patch.dict(os.environ, {"WORKFLOW_RUN_ID": "invalid"}): - with self.assertRaises(SystemExit): - CIFailureBot() - - def test_init_custom_gemini_model(self): - with patch.dict(os.environ, {"GEMINI_MODEL": "gemini-pro"}): - bot = CIFailureBot() - if bot.model: - self.mock_genai.GenerativeModel.assert_called_with("gemini-pro") - - @patch("openwisp_utils.ci_failure_bot.requests.get") - def test_get_build_logs_success(self, mock_requests): - bot = CIFailureBot() - mock_workflow_run = Mock() - mock_job = Mock() - mock_job.conclusion = "failure" - mock_job.name = "test-job" - mock_job.logs_url = "https://api.github.com/logs/123" - mock_step = Mock() - mock_step.conclusion = "failure" - mock_step.name = "Run tests" - mock_step.number = 1 - mock_job.steps = [mock_step] - mock_workflow_run.jobs.return_value = [mock_job] - self.mock_repo.get_workflow_run.return_value = mock_workflow_run - mock_response = Mock() - mock_response.content = b"Error: Test failed at line 42\n" * 1000 - mock_response.raise_for_status = Mock() - mock_requests.return_value = mock_response - logs = bot.get_build_logs() - self.assertEqual(len(logs), 2) - self.assertIn("job_name", logs[0]) - self.assertIn("logs", logs[0]) - self.assertEqual(logs[1]["step_name"], "Run tests") - - def test_get_build_logs_no_failures(self): - bot = CIFailureBot() - mock_workflow_run = Mock() - mock_job = Mock() - mock_job.conclusion = "success" - mock_workflow_run.jobs.return_value = [mock_job] - self.mock_repo.get_workflow_run.return_value = mock_workflow_run - logs = bot.get_build_logs() - self.assertEqual(logs, []) - - @patch("openwisp_utils.ci_failure_bot.subprocess.run") - @patch("openwisp_utils.ci_failure_bot.requests.get") - def test_get_pr_diff_success(self, mock_requests, mock_subprocess): - bot = CIFailureBot() - mock_pr = Mock() - mock_pr.title = "Test PR" - mock_pr.body = "Test description" - mock_pr.diff_url = "https://github.com/test/diff" - self.mock_repo.get_pull.return_value = mock_pr - # Mock successful git diff - mock_subprocess.return_value.returncode = 0 - mock_subprocess.return_value.stdout = ( - "diff --git a/test.py b/test.py\n" + "line\n" * 1000 - ) - diff_data = bot.get_pr_diff() - self.assertEqual(diff_data["title"], "Test PR") - self.assertEqual(diff_data["body"], "Test description") - self.assertIn("[...middle truncated...]", diff_data["diff"]) - mock_subprocess.assert_called_once() - - def test_get_pr_diff_no_pr_number(self): - bot = CIFailureBot() - bot.pr_number = None - diff_data = bot.get_pr_diff() - self.assertIsNone(diff_data) - - def test_get_workflow_yaml_success(self): - bot = CIFailureBot() - mock_workflow_run = Mock() - mock_workflow_run.path = ".github/workflows/ci.yml" - self.mock_repo.get_workflow_run.return_value = mock_workflow_run - mock_file = Mock() - mock_file.decoded_content = b"name: CI\non: [push]" - self.mock_repo.get_contents.return_value = mock_file - yaml_content = bot.get_workflow_yaml() - self.assertEqual(yaml_content, "name: CI\non: [push]") - - def test_analyze_with_gemini_success(self): - bot = CIFailureBot() - mock_response = Mock() - mock_response.text = "The build failed because of a syntax error." - self.mock_model.generate_content.return_value = mock_response - build_logs = [{"job_name": "test", "logs": "Error: syntax error"}] - pr_diff = {"title": "Test", "diff": "diff content"} - workflow_yaml = "name: CI" - result = bot.analyze_with_gemini(build_logs, pr_diff, workflow_yaml) - if bot.model: - self.assertEqual(result, "The build failed because of a syntax error.") - self.mock_model.generate_content.assert_called_once() - else: - self.assertIn("CI Build Failed", result) - - def test_analyze_with_gemini_api_error(self): - bot = CIFailureBot() - if bot.model: - self.mock_model.generate_content.side_effect = Exception("API Error") - result = bot.analyze_with_gemini([], None, None) - self.assertIn("CI Build Failed", result) - self.assertIn("temporarily unavailable", result) - - def test_post_comment_success(self): - bot = CIFailureBot() - mock_pr = Mock() - mock_user = Mock() - mock_user.login = "github-actions[bot]" - self.mock_github.return_value.get_user.return_value = mock_user - self.mock_repo.get_pull.return_value = mock_pr - mock_pr.get_issue_comments.return_value = [] - bot.post_comment("Test message") - mock_pr.create_issue_comment.assert_called_once() - call_args = mock_pr.create_issue_comment.call_args[0][0] - self.assertIn("", call_args) - self.assertIn("Test message", call_args) - - def test_post_comment_update_existing(self): - bot = CIFailureBot() - mock_pr = Mock() - mock_user = Mock() - mock_user.login = "github-actions[bot]" - self.mock_github.return_value.get_user.return_value = mock_user - self.mock_repo.get_pull.return_value = mock_pr - mock_comment = Mock() - mock_comment.user.login = "github-actions[bot]" - mock_comment.body = "\nOld message" - mock_pr.get_issue_comments.return_value = [mock_comment] - bot.post_comment("New message") - mock_comment.edit.assert_called_once() - mock_pr.create_issue_comment.assert_not_called() - - def test_post_comment_no_pr_number(self): - bot = CIFailureBot() - bot.pr_number = None - with patch("builtins.print") as mock_print: - bot.post_comment("Test message") - mock_print.assert_any_call("No PR number, skipping comment") - - def test_run_skips_dependabot(self): - bot = CIFailureBot() - mock_workflow_run = Mock() - mock_actor = Mock() - mock_actor.login = "dependabot[bot]" - mock_workflow_run.actor = mock_actor - self.mock_repo.get_workflow_run.return_value = mock_workflow_run - with patch("builtins.print") as mock_print: - bot.run() - mock_print.assert_any_call("Skipping dependabot PR from dependabot[bot]") - - @patch("openwisp_utils.ci_failure_bot.requests.get") - def test_run_full_workflow(self, mock_requests): - bot = CIFailureBot() - mock_workflow_run = Mock() - mock_actor = Mock() - mock_actor.login = "user" - mock_workflow_run.actor = mock_actor - mock_job = Mock() - mock_job.conclusion = "failure" - mock_job.name = "test-job" - mock_job.logs_url = "https://api.github.com/logs/123" - mock_job.steps = [] - mock_workflow_run.jobs.return_value = [mock_job] - self.mock_repo.get_workflow_run.return_value = mock_workflow_run - mock_response = Mock() - mock_response.content = b"Build failed" - mock_response.raise_for_status = Mock() - mock_requests.return_value = mock_response - mock_gemini_response = Mock() - mock_gemini_response.text = "Analysis: Build failed due to syntax error" - self.mock_model.generate_content.return_value = mock_gemini_response - mock_pr = Mock() - mock_pr.head = Mock() - mock_pr.head.repo = Mock() - mock_pr.head.repo.full_name = "openwisp/openwisp-utils" - mock_user = Mock() - mock_user.login = "github-actions[bot]" - self.mock_github.return_value.get_user.return_value = mock_user - self.mock_repo.get_pull.return_value = mock_pr - mock_pr.get_issue_comments.return_value = [] - bot.run() - if bot.model: - self.mock_model.generate_content.assert_called_once() - mock_pr.create_issue_comment.assert_called_once() - - def test_run_no_build_logs(self): - bot = CIFailureBot() - mock_workflow_run = Mock() - mock_actor = Mock() - mock_actor.login = "user" - mock_workflow_run.actor = mock_actor - mock_workflow_run.jobs.return_value = [] - self.mock_repo.get_workflow_run.return_value = mock_workflow_run - with patch("builtins.print") as mock_print: - bot.run() - mock_print.assert_any_call("No build logs found") - if bot.model: - self.mock_model.generate_content.assert_not_called() - - @patch("openwisp_utils.ci_failure_bot.requests.get") - def test_get_build_logs_zip_format(self, mock_requests): - """Test ZIP-encoded log extraction in get_build_logs()""" - import io - import zipfile - - bot = CIFailureBot() - # Create in-memory ZIP file with log content - zip_buffer = io.BytesIO() - with zipfile.ZipFile(zip_buffer, "w", zipfile.ZIP_DEFLATED) as zip_file: - zip_file.writestr("failed_job.txt", "Error: test failed\nstack trace here") - zip_content = zip_buffer.getvalue() - # Mock workflow and job - mock_workflow_run = Mock() - mock_job = Mock() - mock_job.name = "test-job" - mock_job.conclusion = "failure" - mock_job.logs_url = "https://api.github.com/logs/123" - step = Mock() - step.name = "failing-step" - step.conclusion = "failure" - step.number = 1 - mock_job.steps = [step] - mock_workflow_run.jobs.return_value = [mock_job] - self.mock_repo.get_workflow_run.return_value = mock_workflow_run - # Mock requests to return ZIP content - mock_response = Mock() - mock_response.content = zip_content - mock_response.raise_for_status = Mock() - mock_requests.return_value = mock_response - logs = bot.get_build_logs() - self.assertEqual(len(logs), 2) # job log + step info - self.assertIn("Error: test failed", logs[0]["logs"]) - self.assertEqual(logs[0]["job_name"], "test-job") - - @patch("openwisp_utils.ci_failure_bot.subprocess.run") - @patch("openwisp_utils.ci_failure_bot.requests.get") - def test_get_pr_diff_git_failure_http_fallback( - self, mock_requests, mock_subprocess - ): - """Test git diff fallback to HTTP when subprocess fails""" - bot = CIFailureBot() - mock_pr = Mock() - mock_pr.title = "Test PR" - mock_pr.body = "Test description" - mock_pr.diff_url = "https://github.com/test/diff" - self.mock_repo.get_pull.return_value = mock_pr - self.mock_repo.default_branch = "main" - # Mock subprocess to fail (git not available or timeout) - mock_subprocess.side_effect = subprocess.SubprocessError("git command failed") - # Mock HTTP fallback to succeed - mock_response = Mock() - mock_response.status_code = 200 - mock_response.text = "diff --git a/file.py b/file.py\n+new line" - mock_requests.return_value = mock_response - diff = bot.get_pr_diff() - # Verify subprocess was attempted - mock_subprocess.assert_called_once() - # Verify HTTP fallback was used - mock_requests.assert_called_once() - self.assertIn("+new line", diff["diff"]) - self.assertEqual(diff["title"], "Test PR") - - def test_run_skips_deleted_fork_pr(self): - """Test that run() skips PRs from deleted fork repositories""" - bot = CIFailureBot() - mock_workflow_run = Mock() - mock_actor = Mock() - mock_actor.login = "user" - mock_workflow_run.actor = mock_actor - self.mock_repo.get_workflow_run.return_value = mock_workflow_run - # Mock PR with deleted fork (head.repo is None) - mock_pr = Mock() - mock_pr.head.repo = None # Simulates deleted fork - self.mock_repo.get_pull.return_value = mock_pr - # Should exit early without exception - with patch("builtins.print") as mock_print: - bot.run() - # Verify skip message was printed - mock_print.assert_any_call("Skipping PR with deleted head repository") - - def test_main_handles_init_exception(self): - """Test that main() handles initialization exceptions gracefully""" - from openwisp_utils.ci_failure_bot import main - - # Missing WORKFLOW_RUN_ID should cause SystemExit during init - with patch.dict( - os.environ, - {"GITHUB_TOKEN": "test-token", "REPOSITORY": "test/repo"}, - clear=True, - ): - with self.assertRaises(SystemExit) as cm: - main() - # Verify it exits with error code 1 - self.assertEqual(cm.exception.code, 1) diff --git a/runtests.py b/runtests.py index bd59b303..5694c074 100755 --- a/runtests.py +++ b/runtests.py @@ -16,5 +16,6 @@ args.insert(1, "test") args.insert(2, "test_project") args.insert(3, "openwisp_utils.metric_collection") + args.insert(4, "openwisp_utils.bots") execute_from_command_line(args) sys.exit(pytest.main([os.path.join("openwisp_utils/releaser/tests")])) diff --git a/tests/openwisp2/settings.py b/tests/openwisp2/settings.py index 27038e9c..f02651a5 100644 --- a/tests/openwisp2/settings.py +++ b/tests/openwisp2/settings.py @@ -30,6 +30,7 @@ "test_project", "openwisp_utils.admin_theme", "openwisp_utils.metric_collection", + "openwisp_utils.bots", "django.contrib.sites", # admin "django.contrib.admin", diff --git a/tests/test_project/tests/test_test_utils.py b/tests/test_project/tests/test_test_utils.py index 1de83212..8ed13920 100644 --- a/tests/test_project/tests/test_test_utils.py +++ b/tests/test_project/tests/test_test_utils.py @@ -98,7 +98,7 @@ def test_retryable_request(self, *args): ) as mocked__get_conn: with self.assertRaises(ConnectionError) as error: retryable_request("get", url="https://openwisp.org") - self.assertEqual(len(mocked__get_conn.mock_calls), 4) + self.assertGreaterEqual(len(mocked__get_conn.mock_calls), 3) self.assertIn( "OSError", str(error.exception), @@ -112,7 +112,7 @@ def test_retryable_request(self, *args): ) as mocked_getResponse: with self.assertRaises(RetryError) as error: retryable_request("get", url="https://openwisp.org") - self.assertEqual(len(mocked_getResponse.mock_calls), 4) + self.assertGreaterEqual(len(mocked_getResponse.mock_calls), 3) self.assertIn( "too many 500 error responses", str(error.exception),