diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ddac0def..83dc6e9a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -68,22 +68,24 @@ jobs: - name: Install dependencies run: | python -m pip install -U pip + pip install -r requirements.txt + pip install -r requirements-dev.txt pip install -e . pip install pytest pytest-cov pytest-timeout pytest-asyncio pytest-mock - name: Run tests with coverage - continue-on-error: true # Allow CI to pass while tests are being fixed env: ANTHROPIC_API_KEY: "test-key-for-ci" OPENAI_API_KEY: "test-key-for-ci" run: | - pytest tests/ -v \ + pytest tests/ test/ -v --tb=short \ --cov=cortex \ --cov-report=xml \ --cov-report=term-missing \ --cov-fail-under=0 \ --timeout=60 \ - --ignore=tests/integration + --ignore=tests/integration \ + --ignore=test/integration - name: Upload coverage to Codecov if: matrix.python-version == '3.11' diff --git a/contribution.md b/contribution.md new file mode 100644 index 00000000..c2d0a5ff --- /dev/null +++ b/contribution.md @@ -0,0 +1,63 @@ +# Contribution Guide + +Thank you for your interest in contributing to **Cortex**. This document explains the +project workflow, coding standards, and review expectations so that every pull +request is straightforward to review and merge. + +## Getting Started + +1. **Fork and clone the repository.** +2. **Create a feature branch** from `main` using a descriptive name, for example + `issue-40-kimi-k2`. +3. **Install dependencies** in a virtual environment: + ```bash + python -m venv .venv + source .venv/bin/activate # Windows: .venv\Scripts\activate + pip install --upgrade pip + pip install -r LLM/requirements.txt + pip install -r src/requirements.txt + pip install -e . + ``` +4. **Run the full test suite** (`python test/run_all_tests.py`) to ensure your + environment is healthy before you start coding. + +## Coding Standards + +- **Type hints and docstrings** are required for all public functions, classes, + and modules. CodeRabbit enforces an 80% docstring coverage threshold. +- **Formatting** follows `black` (line length 100) and `isort` ordering. Please run: + ```bash + black . + isort . + ``` +- **Linting** uses `ruff`. Address warnings locally before opening a pull request. +- **Logging and messages** must use the structured status labels (`[INFO]`, `[PLAN]`, + `[EXEC]`, `[SUCCESS]`, `[ERROR]`, etc.) to provide a consistent CLI experience. +- **Secrets** such as API keys must never be hard-coded or committed. +- **Dependency changes** must update both `LLM/requirements.txt` and any related + documentation (`README.md`, `test.md`). + +## Tests + +- Unit tests live under `test/` and should be added or updated alongside code + changes. +- Integration tests live under `test/integration/` and are designed to run inside + Docker. Use the helper utilities in `test/integration/docker_utils.py` to keep + the tests concise and reliable. +- Ensure that every new feature or regression fix includes corresponding test + coverage. Submissions without meaningful tests will be sent back for revision. +- Before requesting review, run: + ```bash + python test/run_all_tests.py + ``` + Optionally, include `CORTEX_PROVIDER=fake` to avoid contacting external APIs. + +## Pull Request Checklist + +- Provide a **clear title** that references the issue being addressed. +- Include a **summary** of the change, **testing notes**, and **risk assessment**. +- Confirm that **CI passes** and that **docstring coverage** meets the required threshold. +- Link the pull request to the relevant GitHub issue (`Fixes #`). +- Be responsive to review feedback and keep discussions on-topic. + +We appreciate your time and effort—welcome aboard! diff --git a/cortex/config_manager.py b/cortex/config_manager.py index e77bf31b..9b6e22dd 100755 --- a/cortex/config_manager.py +++ b/cortex/config_manager.py @@ -72,6 +72,11 @@ def _enforce_directory_security(self, directory: Path) -> None: Raises: PermissionError: If ownership or permissions cannot be secured """ + # Cortex targets Linux. On non-POSIX systems (e.g., Windows), uid/gid ownership + # APIs like os.getuid/os.chown are unavailable, so skip strict enforcement. + if os.name != "posix" or not hasattr(os, "getuid") or not hasattr(os, "getgid"): + return + try: # Get directory statistics stat_info = directory.stat() diff --git a/cortex/sandbox/sandbox_executor.py b/cortex/sandbox/sandbox_executor.py index 8bd694ae..e8d81fee 100644 --- a/cortex/sandbox/sandbox_executor.py +++ b/cortex/sandbox/sandbox_executor.py @@ -16,7 +16,6 @@ import logging import os import re -import resource import shlex import shutil import subprocess @@ -25,6 +24,14 @@ from datetime import datetime from typing import Any +try: + import resource # type: ignore + + HAS_RESOURCE = True +except ImportError: # pragma: no cover + resource = None # type: ignore + HAS_RESOURCE = False + class CommandBlocked(Exception): """Raised when a command is blocked.""" @@ -599,6 +606,8 @@ def execute( def set_resource_limits(): """Set resource limits for the subprocess.""" + if not HAS_RESOURCE: + return try: # Memory limit (RSS - Resident Set Size) memory_bytes = self.max_memory_mb * 1024 * 1024 diff --git a/docs/ISSUE_40_KIMI_K2_IMPLEMENTATION.md b/docs/ISSUE_40_KIMI_K2_IMPLEMENTATION.md new file mode 100644 index 00000000..88215a5f --- /dev/null +++ b/docs/ISSUE_40_KIMI_K2_IMPLEMENTATION.md @@ -0,0 +1,237 @@ +# Issue #40: Kimi K2 API Integration + +**Issue Link:** [cortexlinux/cortex#40](https://github.com/cortexlinux/cortex/issues/40) +**PR Link:** [cortexlinux/cortex#192](https://github.com/cortexlinux/cortex/pull/192) +**Bounty:** $150 +**Status:** ✅ Implemented +**Date Completed:** December 2, 2025 + +## Summary + +Successfully integrated Moonshot AI's Kimi K2 model as a new LLM provider for Cortex, expanding the platform's multi-LLM capabilities. This implementation allows users to leverage Kimi K2 for natural language command interpretation as an alternative to OpenAI GPT-4o and Anthropic Claude 3.5. + +## Implementation Details + +### 1. Core Integration (LLM/interpreter.py) + +**Added:** +- `KIMI` enum value to `APIProvider` +- `_call_kimi()` method for Kimi K2 HTTP API integration +- Kimi-specific initialization in `_initialize_client()` +- Default model detection for Kimi K2 (`kimi-k2-turbo-preview`) + +**Features:** +- Full HTTP-based API integration using `requests` library +- Configurable base URL via `KIMI_API_BASE_URL` environment variable (defaults to `https://api.moonshot.ai`) +- Configurable model via `KIMI_DEFAULT_MODEL` environment variable +- Proper error handling with descriptive exceptions +- Request timeout set to 60 seconds +- JSON response parsing with validation + +**Security:** +- Bearer token authentication +- Proper SSL/TLS via HTTPS +- Input validation and sanitization +- Error messages don't leak sensitive information + +### 2. CLI Support (cortex/cli.py) + +**Updated Methods:** +- `_get_provider()`: Added Kimi detection via `KIMI_API_KEY` +- `_get_api_key(provider)`: Added Kimi API key mapping +- Updated install workflow to support fake provider for testing + +**Environment Variables:** +- `KIMI_API_KEY`: Required for Kimi K2 authentication +- `CORTEX_PROVIDER`: Optional override (supports `openai`, `claude`, `kimi`, `fake`) +- `KIMI_API_BASE_URL`: Optional base URL override +- `KIMI_DEFAULT_MODEL`: Optional model override (default: `kimi-k2-turbo-preview`) + +### 3. Dependencies (LLM/requirements.txt) + +**Updated:** +- Added `requests>=2.32.4` (addresses CVE-2024-35195, CVE-2024-37891, CVE-2023-32681) +- Security-focused version constraint ensures patched vulnerabilities + +### 4. Testing + +**Added Tests:** +- `test_get_provider_kimi`: Provider detection +- `test_get_api_key_kimi`: API key retrieval +- `test_initialization_kimi`: Kimi initialization +- `test_call_kimi_success`: Successful API call +- `test_call_kimi_failure`: Error handling +- `test_call_fake_with_env_commands`: Fake provider testing + +**Test Coverage:** +- Unit tests: ✅ 143 tests passing +- Integration tests: ✅ 5 Docker-based tests (skipped without Docker) +- All existing tests remain passing +- No regressions introduced + +### 5. Documentation + +**Updated Files:** +- `README.md`: Added Kimi K2 to supported providers table, usage examples +- `cortex/cli.py`: Updated help text with Kimi environment variables +- `docs/ISSUE_40_KIMI_K2_IMPLEMENTATION.md`: This summary document + +## Configuration Examples + +### Getting a Valid API Key + +1. Visit [Moonshot AI Platform](https://platform.moonshot.ai/) +2. Sign up or log in to your account +3. Navigate to [API Keys Console](https://platform.moonshot.ai/console/api-keys) +4. Click "Create API Key" and copy the key +5. The key format should start with `sk-` + +### Basic Usage + +```bash +# Set Kimi API key (get from Moonshot Console) +export KIMI_API_KEY="sk-your-actual-key-here" + +# Install with Kimi K2 (auto-detected) +cortex install docker + +# Explicit provider override +export CORTEX_PROVIDER=kimi +cortex install "nginx with ssl" +``` + +### Advanced Configuration + +```bash +# Custom model (options: kimi-k2-turbo-preview, kimi-k2-0905-preview, kimi-k2-thinking, kimi-k2-thinking-turbo) +export KIMI_DEFAULT_MODEL="kimi-k2-0905-preview" + +# Custom base URL (default: https://api.moonshot.ai) +export KIMI_API_BASE_URL="https://api.moonshot.ai" + +# Dry run mode +cortex install postgresql --dry-run +``` + +### Testing Without API Costs + +```bash +# Use fake provider for testing +export CORTEX_PROVIDER=fake +export CORTEX_FAKE_COMMANDS='{"commands": ["echo Step 1", "echo Step 2"]}' +cortex install docker --dry-run +``` + +## API Request Format + +The Kimi K2 integration uses the OpenAI-compatible chat completions endpoint: + +```json +POST https://api.moonshot.ai/v1/chat/completions + +Headers: + Authorization: Bearer {KIMI_API_KEY} + Content-Type: application/json + +Body: +{ + "model": "kimi-k2-turbo-preview", + "messages": [ + {"role": "system", "content": "System prompt..."}, + {"role": "user", "content": "User request..."} + ], + "temperature": 0.3, + "max_tokens": 1000 +} +``` + +## Error Handling + +The implementation includes comprehensive error handling: + +1. **Missing Dependencies:** Clear error if `requests` package not installed +2. **API Failures:** Runtime errors with descriptive messages +3. **Empty Responses:** Validation that API returns valid choices +4. **Network Issues:** Timeout protection (60s) +5. **Authentication Errors:** HTTP status code validation via `raise_for_status()` + +## Code Quality Improvements + +Based on CodeRabbit feedback, the following improvements were made: + +1. ✅ **Security:** Updated `requests>=2.32.4` to address known CVEs +2. ✅ **Model Defaults:** Updated OpenAI default to `gpt-4o` (current best practice) +3. ✅ **Test Organization:** Removed duplicate test files (`cortex/test_cli.py`, `cortex/test_coordinator.py`) +4. ✅ **Import Fixes:** Added missing imports (`unittest`, `Mock`, `patch`, `SimpleNamespace`) +5. ✅ **Method Signatures:** Updated `_get_api_key(provider)` to accept provider parameter +6. ✅ **Provider Exclusions:** Removed Groq provider as per requirements (only Kimi K2 added) +7. ✅ **Setup.py Fix:** Corrected syntax errors in package configuration + +## Performance Considerations + +- **HTTP Request Timeout:** 60 seconds prevents hanging on slow connections +- **Connection Reuse:** `requests` library handles connection pooling automatically +- **Error Recovery:** Fast-fail on API errors with informative messages +- **Memory Efficiency:** JSON parsing directly from response without intermediate storage + +## Future Enhancements + +Potential improvements for future iterations: + +1. **Streaming Support:** Add streaming response support for real-time feedback +2. **Retry Logic:** Implement exponential backoff for transient failures +3. **Rate Limiting:** Add rate limit awareness and queuing +4. **Batch Operations:** Support multiple requests in parallel +5. **Model Selection:** UI/CLI option to select specific Kimi models +6. **Caching:** Cache common responses to reduce API costs + +## Testing Results + +```text +Ran 143 tests in 10.136s + +OK (skipped=5) +``` + +All tests pass successfully: +- ✅ 138 tests passed +- ⏭️ 5 integration tests skipped (require Docker) +- ❌ 0 failures +- ❌ 0 errors + +## Migration Notes + +For users upgrading: + +1. **Backward Compatible:** Existing OpenAI and Claude configurations continue to work +2. **New Dependency:** `pip install requests>=2.32.4` required +3. **Environment Variables:** Optional - no breaking changes to existing setups +4. **Default Behavior:** No change - OpenAI remains default if multiple keys present + +## Related Issues + +- **Issue #16:** Integration test suite (optional, addressed in PR #192) +- **Issue #11:** CLI improvements (referenced in commits) +- **Issue #8:** Multi-step coordinator (referenced in commits) + +## Contributors + +- @Sahilbhatane - Primary implementation +- @mikejmorgan-ai - Code review and issue management +- @dhvll - Code review +- @coderabbitai - Automated code review and suggestions + +## Lessons Learned + +1. **API Documentation:** Kimi K2 follows OpenAI-compatible format, simplifying integration +2. **Security First:** Always use latest patched dependencies (`requests>=2.32.4`) +3. **Test Coverage:** Comprehensive testing prevents regressions +4. **Error Messages:** Descriptive errors improve user experience +5. **Environment Variables:** Flexible configuration reduces hard-coded values + +## References + +- **Kimi K2 Documentation:** [Moonshot AI Docs](https://platform.moonshot.ai/docs) +- **Original PR:** [cortexlinux/cortex#192](https://github.com/cortexlinux/cortex/pull/192) +- **Issue Discussion:** [cortexlinux/cortex#40](https://github.com/cortexlinux/cortex/issues/40) +- **CVE Fixes:** CVE-2024-35195, CVE-2024-37891, CVE-2023-32681 diff --git a/requirements-dev.txt b/requirements-dev.txt index ed48a6f9..2ccb0205 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -3,6 +3,7 @@ pytest>=7.0.0 pytest-cov>=4.0.0 pytest-asyncio>=0.23.0 pytest-mock>=3.12.0 +pytest-timeout>=2.3.1 black>=24.0.0 ruff>=0.8.0 isort>=5.13.0 diff --git a/requirements.txt b/requirements.txt index 4077f056..9baf881e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,6 +3,10 @@ # LLM Provider APIs anthropic>=0.18.0 openai>=1.0.0 +requests>=2.32.4 + +# Configuration +PyYAML>=6.0.0 # Terminal UI rich>=13.0.0 diff --git a/test.md b/test.md new file mode 100644 index 00000000..a0ecc617 --- /dev/null +++ b/test.md @@ -0,0 +1,67 @@ +# Testing Strategy + +Cortex relies on a mix of fast unit tests and Docker-backed integration tests to +validate the full installation workflow. This guide explains how to run the +suites locally and in CI. + +## Test Suites + +| Suite | Location | Purpose | Invocation | +|-------|----------|---------|------------| +| Unit | `test/*.py` | Validate individual modules (CLI, coordinator, interpreter). | `python test/run_all_tests.py` | +| Integration | `test/integration/*.py` | Exercise end-to-end scenarios inside disposable Docker containers. | `python -m unittest test.integration.test_end_to_end` | + +## Running Tests Locally + +1. **Prepare the environment** + ```bash + python -m venv .venv + source .venv/bin/activate # Windows: .venv\Scripts\activate + pip install --upgrade pip + pip install -r LLM/requirements.txt + pip install -r src/requirements.txt + pip install -e . + ``` + +2. **Unit tests** + ```bash + python test/run_all_tests.py + ``` + Use the fake provider to avoid external API calls when necessary: + ```bash + CORTEX_PROVIDER=fake python test/run_all_tests.py + ``` + +3. **Integration tests** (requires Docker) + ```bash + python -m unittest test.integration.test_end_to_end + ``` + Customise the Docker image with `CORTEX_INTEGRATION_IMAGE` if you need a + different base image: + ```bash + CORTEX_INTEGRATION_IMAGE=python:3.12-slim python -m unittest test.integration.test_end_to_end + ``` + +## Continuous Integration Recommendations + +- Run unit tests on every pull request. +- Schedule integration tests nightly or on demand using a GitHub Actions job + with the `docker` service enabled. +- Fail the workflow if docstring coverage (tracked by CodeRabbit) drops below + 80%. +- Publish the HTML report from `python -m coverage html` when running coverage + builds to assist reviewers. + +## Troubleshooting + +- **Docker not available** – Integration tests are skipped automatically when + the Docker CLI is missing. Install Docker Desktop (macOS/Windows) or the + `docker` package (Linux) to enable them. +- **Missing API keys** – Set `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, or + `KIMI_API_KEY` as appropriate. For offline development use + `CORTEX_PROVIDER=fake` plus optional `CORTEX_FAKE_COMMANDS`. +- **Docstring coverage failures** – Add module/class/function docstrings. The + CodeRabbit gate requires 80% coverage. + +By following this guide, contributors can quickly validate their changes and +ship reliable improvements to Cortex. diff --git a/test/integration/__init__.py b/test/integration/__init__.py new file mode 100644 index 00000000..4630c8a8 --- /dev/null +++ b/test/integration/__init__.py @@ -0,0 +1 @@ +"""Integration tests for Cortex executed against Docker-based environments.""" diff --git a/test/integration/docker_utils.py b/test/integration/docker_utils.py new file mode 100644 index 00000000..8728c1be --- /dev/null +++ b/test/integration/docker_utils.py @@ -0,0 +1,106 @@ +"""Helpers for running Cortex integration tests inside Docker containers.""" + +from __future__ import annotations + +import shutil +import subprocess +from collections.abc import Iterable +from dataclasses import dataclass +from pathlib import Path + + +@dataclass +class DockerRunResult: + """Container execution result metadata.""" + + returncode: int + stdout: str + stderr: str + + def succeeded(self) -> bool: + """Return ``True`` when the container exited successfully.""" + return self.returncode == 0 + + +def docker_available() -> bool: + """Return ``True`` when the Docker client is available on the host.""" + + docker_path = shutil.which("docker") + if not docker_path: + return False + + try: + subprocess.run( + [docker_path, "--version"], + check=True, + capture_output=True, + text=True, + timeout=5, + ) + subprocess.run( + [docker_path, "info"], + check=True, + capture_output=True, + text=True, + timeout=5, + ) + return True + except (subprocess.CalledProcessError, subprocess.TimeoutExpired, FileNotFoundError, OSError): + return False + + +def run_in_docker( + image: str, + command: str, + *, + env: dict[str, str] | None = None, + mounts: Iterable[tuple[Path, str]] | None = None, + workdir: str = "/workspace", + timeout: int = 300, +) -> DockerRunResult: + """Run ``command`` inside the specified Docker ``image``. + + Parameters + ---------- + image: + Docker image tag to use. + command: + Shell command executed via ``bash -lc`` inside the container. + env: + Optional environment variables exported inside the container. + mounts: + Iterable of (host_path, container_path) tuples for mounting directories. + + workdir: + Working directory set inside the container. + timeout: + Maximum run time in seconds before raising ``TimeoutExpired``. + """ + + docker_cmd: list[str] = ["docker", "run", "--rm"] + + for key, value in (env or {}).items(): + docker_cmd.extend(["-e", f"{key}={value}"]) + + for host_path, container_path in mounts or []: + docker_cmd.extend( + [ + "-v", + f"{str(host_path.resolve())}:{container_path}", + ] + ) + + docker_cmd.extend(["-w", workdir]) + + docker_cmd.append(image) + docker_cmd.extend(["bash", "-lc", command]) + + result = subprocess.run( + docker_cmd, + check=False, + capture_output=True, + text=True, + timeout=timeout, + ) + + return DockerRunResult(result.returncode, result.stdout, result.stderr) diff --git a/test/integration/test_end_to_end.py b/test/integration/test_end_to_end.py new file mode 100644 index 00000000..b9049380 --- /dev/null +++ b/test/integration/test_end_to_end.py @@ -0,0 +1,120 @@ +"""Docker-backed integration tests that exercise Cortex end-to-end flows.""" + +from __future__ import annotations + +import json +import os +import unittest +from pathlib import Path + +from .docker_utils import DockerRunResult, docker_available, run_in_docker + +REPO_ROOT = Path(__file__).resolve().parents[2] +DEFAULT_IMAGE = os.environ.get("CORTEX_INTEGRATION_IMAGE", "python:3.11-slim") +MOUNT = (REPO_ROOT, "/workspace") +BASE_ENV = { + "PYTHONUNBUFFERED": "1", + "PYTHONPATH": "/workspace", + "PYTHONDONTWRITEBYTECODE": "1", +} +PIP_BOOTSTRAP = "python -m pip install --quiet -r /workspace/requirements.txt" + + +@unittest.skipUnless(docker_available(), "Docker is required for integration tests") +class TestEndToEndWorkflows(unittest.TestCase): + """Run Cortex commands inside disposable Docker containers.""" + + def _run(self, command: str, env: dict | None = None) -> DockerRunResult: + effective_env = dict(BASE_ENV) + if env: + effective_env.update(env) + return run_in_docker( + DEFAULT_IMAGE, + f"{PIP_BOOTSTRAP} && {command}", + env=effective_env, + mounts=[MOUNT], + workdir="/workspace", + ) + + def test_cli_help_executes(self): + """`cortex --help` should run successfully in a clean container.""" + + result = self._run("python -m cortex.cli --help") + self.assertTrue(result.succeeded(), msg=result.stderr) + self.assertIn("AI-powered Linux command interpreter", result.stdout) + + def test_cli_dry_run_with_fake_provider(self): + """Dry-run installations rely on the fake provider and skip API calls.""" + + fake_commands = json.dumps( + { + "commands": [ + "echo Step 1", + "echo Step 2", + ] + } + ) + env = { + "CORTEX_PROVIDER": "fake", + "CORTEX_FAKE_COMMANDS": fake_commands, + } + result = self._run("python -m cortex.cli install docker --dry-run", env=env) + + self.assertTrue(result.succeeded(), msg=result.stderr) + self.assertIn("Generated commands", result.stdout) + self.assertIn("echo Step 1", result.stdout) + + def test_cli_execute_with_fake_provider(self): + """Execution mode should run fake commands without touching the host.""" + + fake_commands = json.dumps( + { + "commands": [ + "echo Exec Step 1", + "echo Exec Step 2", + ] + } + ) + env = { + "CORTEX_PROVIDER": "fake", + "CORTEX_FAKE_COMMANDS": fake_commands, + } + result = self._run("python -m cortex.cli install docker --execute", env=env) + + self.assertTrue(result.succeeded(), msg=result.stderr) + # Output formatting may vary (Rich UI vs legacy), but the success text should be present. + self.assertIn("docker installed successfully!", result.stdout) + + def test_coordinator_executes_in_container(self): + """InstallationCoordinator should execute simple commands inside Docker.""" + + script = ( + "python - <<'PY'\n" + "from cortex.coordinator import InstallationCoordinator\n" + "plan = InstallationCoordinator(['echo coordinator step'])\n" + "result = plan.execute()\n" + "assert result.success\n" + "print('STEPS', len(result.steps))\n" + "PY" + ) + result = self._run(script) + + self.assertTrue(result.succeeded(), msg=result.stderr) + self.assertIn("STEPS 1", result.stdout) + + def test_project_tests_run_inside_container(self): + """The unified test runner should pass within the container.""" + + env = { + "CORTEX_PROVIDER": "fake", + "CORTEX_FAKE_COMMANDS": json.dumps({"commands": ["echo plan"]}), + } + result = self._run("python test/run_all_tests.py", env=env) + + self.assertTrue(result.succeeded(), msg=result.stderr) + combined_output = f"{result.stdout}\n{result.stderr}" + self.assertIn("OK", combined_output) + + +if __name__ == "__main__": # pragma: no cover + unittest.main() diff --git a/test/run_all_tests.py b/test/run_all_tests.py new file mode 100644 index 00000000..5958f0c4 --- /dev/null +++ b/test/run_all_tests.py @@ -0,0 +1,37 @@ +"""Unified test runner that discovers unit and integration suites.""" + +from __future__ import annotations + +import argparse +import os +import sys +import unittest + + +def discover_tests(pattern: str = "test_*.py") -> unittest.TestSuite: + """Discover tests starting from the repository's ``test`` directory.""" + + start_dir = os.path.dirname(__file__) + loader = unittest.TestLoader() + return loader.discover(start_dir=start_dir, pattern=pattern) + + +def main(argv: list[str] | None = None) -> int: + """Execute all test suites and return the exit code.""" + + parser = argparse.ArgumentParser(description="Run Cortex unit/integration tests") + parser.add_argument( + "--pattern", + default="test_*.py", + help="Glob pattern used for discovery (defaults to test_*.py)", + ) + args = parser.parse_args(argv) + + suite = discover_tests(pattern=args.pattern) + runner = unittest.TextTestRunner(verbosity=2) + result = runner.run(suite) + return 0 if result.wasSuccessful() else 1 + + +if __name__ == "__main__": # pragma: no cover + sys.exit(main()) diff --git a/test/test_cli.py b/test/test_cli.py new file mode 100644 index 00000000..4bd642be --- /dev/null +++ b/test/test_cli.py @@ -0,0 +1,294 @@ +import os +import sys +import unittest +from unittest.mock import Mock, patch + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +from cortex.cli import CortexCLI, main + + +class TestCortexCLI(unittest.TestCase): + """Unit tests covering the high-level CLI behaviours.""" + + def setUp(self) -> None: + self.cli = CortexCLI() + + @patch.dict(os.environ, {"OPENAI_API_KEY": "sk-test-key"}, clear=True) + def test_get_api_key_openai(self) -> None: + api_key = self.cli._get_api_key() + self.assertEqual(api_key, "sk-test-key") + + @patch.dict(os.environ, {"ANTHROPIC_API_KEY": "sk-ant-test-claude-key"}, clear=True) + def test_get_api_key_claude(self) -> None: + api_key = self.cli._get_api_key() + self.assertEqual(api_key, "sk-ant-test-claude-key") + + @patch.object(CortexCLI, "_get_provider", return_value="openai") + @patch.dict(os.environ, {}, clear=True) + def test_get_api_key_not_found(self, _mock_get_provider) -> None: + api_key = self.cli._get_api_key() + self.assertIsNone(api_key) + + @patch.dict(os.environ, {"OPENAI_API_KEY": "test-key"}, clear=True) + def test_get_provider_openai(self) -> None: + provider = self.cli._get_provider() + self.assertEqual(provider, "openai") + + @patch.dict(os.environ, {"ANTHROPIC_API_KEY": "test-key"}, clear=True) + def test_get_provider_claude(self) -> None: + provider = self.cli._get_provider() + self.assertEqual(provider, "claude") + + @patch.dict(os.environ, {"CORTEX_PROVIDER": "openai"}, clear=True) + def test_get_provider_override(self) -> None: + provider = self.cli._get_provider() + self.assertEqual(provider, "openai") + + @patch("cortex.cli.cx_print") + def test_print_status(self, mock_cx_print) -> None: + self.cli._print_status("🧠", "Test message") + mock_cx_print.assert_called_once_with("Test message", "thinking") + + @patch("cortex.cli.cx_print") + def test_print_error(self, mock_cx_print) -> None: + self.cli._print_error("Test error") + mock_cx_print.assert_called_once() + + @patch("cortex.cli.cx_print") + def test_print_success(self, mock_cx_print) -> None: + self.cli._print_success("Test success") + mock_cx_print.assert_called_once_with("Test success", "success") + + @patch.object(CortexCLI, "_get_api_key", return_value=None) + def test_install_no_api_key(self, _mock_get_api_key) -> None: + result = self.cli.install("docker") + self.assertEqual(result, 1) + + @patch.object(CortexCLI, "_get_provider", return_value="openai") + @patch.object(CortexCLI, "_get_api_key", return_value="sk-test-key") + @patch.object(CortexCLI, "_animate_spinner", return_value=None) + @patch.object(CortexCLI, "_clear_line", return_value=None) + @patch("cortex.cli.CommandInterpreter") + def test_install_dry_run( + self, + mock_interpreter_class, + _mock_clear_line, + _mock_spinner, + _mock_get_api_key, + _mock_get_provider, + ) -> None: + mock_interpreter = Mock() + mock_interpreter.parse.return_value = ["apt update", "apt install docker"] + mock_interpreter_class.return_value = mock_interpreter + + result = self.cli.install("docker", dry_run=True) + + self.assertEqual(result, 0) + mock_interpreter.parse.assert_called_once_with("install docker") + + @patch.object(CortexCLI, "_get_provider", return_value="openai") + @patch.object(CortexCLI, "_get_api_key", return_value="sk-test-key") + @patch.object(CortexCLI, "_animate_spinner", return_value=None) + @patch.object(CortexCLI, "_clear_line", return_value=None) + @patch("cortex.cli.CommandInterpreter") + def test_install_no_execute( + self, + mock_interpreter_class, + _mock_clear_line, + _mock_spinner, + _mock_get_api_key, + _mock_get_provider, + ) -> None: + mock_interpreter = Mock() + mock_interpreter.parse.return_value = ["apt update", "apt install docker"] + mock_interpreter_class.return_value = mock_interpreter + + result = self.cli.install("docker", execute=False) + + self.assertEqual(result, 0) + mock_interpreter.parse.assert_called_once() + + @patch.object(CortexCLI, "_get_provider", return_value="openai") + @patch.object(CortexCLI, "_get_api_key", return_value="sk-test-key") + @patch.object(CortexCLI, "_animate_spinner", return_value=None) + @patch.object(CortexCLI, "_clear_line", return_value=None) + @patch("cortex.cli.CommandInterpreter") + @patch("cortex.cli.InstallationCoordinator") + def test_install_with_execute_success( + self, + mock_coordinator_class, + mock_interpreter_class, + _mock_clear_line, + _mock_spinner, + _mock_get_api_key, + _mock_get_provider, + ) -> None: + mock_interpreter = Mock() + mock_interpreter.parse.return_value = ["echo test"] + mock_interpreter_class.return_value = mock_interpreter + + mock_coordinator = Mock() + mock_result = Mock() + mock_result.success = True + mock_result.total_duration = 1.5 + mock_coordinator.execute.return_value = mock_result + mock_coordinator_class.return_value = mock_coordinator + + result = self.cli.install("docker", execute=True) + + self.assertEqual(result, 0) + mock_coordinator.execute.assert_called_once() + + @patch.object(CortexCLI, "_get_provider", return_value="openai") + @patch.object(CortexCLI, "_get_api_key", return_value="sk-test-key") + @patch.object(CortexCLI, "_animate_spinner", return_value=None) + @patch.object(CortexCLI, "_clear_line", return_value=None) + @patch("cortex.cli.CommandInterpreter") + @patch("cortex.cli.InstallationCoordinator") + def test_install_with_execute_failure( + self, + mock_coordinator_class, + mock_interpreter_class, + _mock_clear_line, + _mock_spinner, + _mock_get_api_key, + _mock_get_provider, + ) -> None: + mock_interpreter = Mock() + mock_interpreter.parse.return_value = ["invalid command"] + mock_interpreter_class.return_value = mock_interpreter + + mock_coordinator = Mock() + mock_result = Mock() + mock_result.success = False + mock_result.failed_step = 0 + mock_result.error_message = "command not found" + mock_coordinator.execute.return_value = mock_result + mock_coordinator_class.return_value = mock_coordinator + + result = self.cli.install("docker", execute=True) + + self.assertEqual(result, 1) + + @patch.object(CortexCLI, "_get_provider", return_value="openai") + @patch.object(CortexCLI, "_get_api_key", return_value="sk-test-key") + @patch.object(CortexCLI, "_animate_spinner", return_value=None) + @patch.object(CortexCLI, "_clear_line", return_value=None) + @patch("cortex.cli.CommandInterpreter") + def test_install_no_commands_generated( + self, + mock_interpreter_class, + _mock_clear_line, + _mock_spinner, + _mock_get_api_key, + _mock_get_provider, + ) -> None: + mock_interpreter = Mock() + mock_interpreter.parse.return_value = [] + mock_interpreter_class.return_value = mock_interpreter + + result = self.cli.install("docker") + + self.assertEqual(result, 1) + + @patch.object(CortexCLI, "_get_provider", return_value="openai") + @patch.object(CortexCLI, "_get_api_key", return_value="sk-test-key") + @patch.object(CortexCLI, "_animate_spinner", return_value=None) + @patch.object(CortexCLI, "_clear_line", return_value=None) + @patch("cortex.cli.CommandInterpreter") + def test_install_value_error( + self, + mock_interpreter_class, + _mock_clear_line, + _mock_spinner, + _mock_get_api_key, + _mock_get_provider, + ) -> None: + mock_interpreter = Mock() + mock_interpreter.parse.side_effect = ValueError("Invalid input") + mock_interpreter_class.return_value = mock_interpreter + + result = self.cli.install("docker") + + self.assertEqual(result, 1) + + @patch.object(CortexCLI, "_get_provider", return_value="openai") + @patch.object(CortexCLI, "_get_api_key", return_value="sk-test-key") + @patch.object(CortexCLI, "_animate_spinner", return_value=None) + @patch.object(CortexCLI, "_clear_line", return_value=None) + @patch("cortex.cli.CommandInterpreter") + def test_install_runtime_error( + self, + mock_interpreter_class, + _mock_clear_line, + _mock_spinner, + _mock_get_api_key, + _mock_get_provider, + ) -> None: + mock_interpreter = Mock() + mock_interpreter.parse.side_effect = RuntimeError("API failed") + mock_interpreter_class.return_value = mock_interpreter + + result = self.cli.install("docker") + + self.assertEqual(result, 1) + + @patch.object(CortexCLI, "_get_provider", return_value="openai") + @patch.object(CortexCLI, "_get_api_key", return_value="sk-test-key") + @patch.object(CortexCLI, "_animate_spinner", return_value=None) + @patch.object(CortexCLI, "_clear_line", return_value=None) + @patch("cortex.cli.CommandInterpreter") + def test_install_unexpected_error( + self, + mock_interpreter_class, + _mock_clear_line, + _mock_spinner, + _mock_get_api_key, + _mock_get_provider, + ) -> None: + mock_interpreter = Mock() + mock_interpreter.parse.side_effect = Exception("Unexpected") + mock_interpreter_class.return_value = mock_interpreter + + result = self.cli.install("docker") + + self.assertEqual(result, 1) + + @patch("sys.argv", ["cortex"]) + def test_main_no_command(self) -> None: + result = main() + self.assertEqual(result, 0) + + @patch("sys.argv", ["cortex", "install", "docker"]) + @patch("cortex.cli.CortexCLI.install") + def test_main_install_command(self, mock_install) -> None: + mock_install.return_value = 0 + result = main() + self.assertEqual(result, 0) + mock_install.assert_called_once_with("docker", execute=False, dry_run=False) + + @patch("sys.argv", ["cortex", "install", "docker", "--execute"]) + @patch("cortex.cli.CortexCLI.install") + def test_main_install_with_execute(self, mock_install) -> None: + mock_install.return_value = 0 + result = main() + self.assertEqual(result, 0) + mock_install.assert_called_once_with("docker", execute=True, dry_run=False) + + @patch("sys.argv", ["cortex", "install", "docker", "--dry-run"]) + @patch("cortex.cli.CortexCLI.install") + def test_main_install_with_dry_run(self, mock_install) -> None: + mock_install.return_value = 0 + result = main() + self.assertEqual(result, 0) + mock_install.assert_called_once_with("docker", execute=False, dry_run=True) + + def test_spinner_animation(self) -> None: + initial_idx = self.cli.spinner_idx + self.cli._animate_spinner("Testing") + self.assertNotEqual(self.cli.spinner_idx, initial_idx) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_cli.py b/tests/test_cli.py index bd08262b..2a186f07 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -13,7 +13,7 @@ class TestCortexCLI(unittest.TestCase): def setUp(self): self.cli = CortexCLI() - @patch.dict(os.environ, {"OPENAI_API_KEY": "sk-test-openai-key-123"}) + @patch.dict(os.environ, {"OPENAI_API_KEY": "sk-test-openai-key-123"}, clear=True) def test_get_api_key_openai(self): api_key = self.cli._get_api_key() self.assertEqual(api_key, "sk-test-openai-key-123") @@ -34,7 +34,7 @@ def test_get_api_key_not_found(self, mock_stderr): api_key = self.cli._get_api_key() self.assertEqual(api_key, "ollama-local") - @patch.dict(os.environ, {"OPENAI_API_KEY": "sk-test-openai-key-123"}) + @patch.dict(os.environ, {"OPENAI_API_KEY": "sk-test-openai-key-123"}, clear=True) def test_get_provider_openai(self): provider = self.cli._get_provider() self.assertEqual(provider, "openai") @@ -64,7 +64,7 @@ def test_install_no_api_key(self): result = self.cli.install("docker") self.assertEqual(result, 1) - @patch.dict(os.environ, {"OPENAI_API_KEY": "sk-test-openai-key-123"}) + @patch.dict(os.environ, {"OPENAI_API_KEY": "sk-test-openai-key-123"}, clear=True) @patch("cortex.cli.CommandInterpreter") def test_install_dry_run(self, mock_interpreter_class): mock_interpreter = Mock() @@ -76,7 +76,7 @@ def test_install_dry_run(self, mock_interpreter_class): self.assertEqual(result, 0) mock_interpreter.parse.assert_called_once_with("install docker") - @patch.dict(os.environ, {"OPENAI_API_KEY": "sk-test-openai-key-123"}) + @patch.dict(os.environ, {"OPENAI_API_KEY": "sk-test-openai-key-123"}, clear=True) @patch("cortex.cli.CommandInterpreter") def test_install_no_execute(self, mock_interpreter_class): mock_interpreter = Mock() @@ -88,7 +88,7 @@ def test_install_no_execute(self, mock_interpreter_class): self.assertEqual(result, 0) mock_interpreter.parse.assert_called_once() - @patch.dict(os.environ, {"OPENAI_API_KEY": "sk-test-openai-key-123"}) + @patch.dict(os.environ, {"OPENAI_API_KEY": "sk-test-openai-key-123"}, clear=True) @patch("cortex.cli.CommandInterpreter") @patch("cortex.cli.InstallationCoordinator") def test_install_with_execute_success(self, mock_coordinator_class, mock_interpreter_class): @@ -108,7 +108,7 @@ def test_install_with_execute_success(self, mock_coordinator_class, mock_interpr self.assertEqual(result, 0) mock_coordinator.execute.assert_called_once() - @patch.dict(os.environ, {"OPENAI_API_KEY": "sk-test-openai-key-123"}) + @patch.dict(os.environ, {"OPENAI_API_KEY": "sk-test-openai-key-123"}, clear=True) @patch("cortex.cli.CommandInterpreter") @patch("cortex.cli.InstallationCoordinator") def test_install_with_execute_failure(self, mock_coordinator_class, mock_interpreter_class): @@ -128,7 +128,7 @@ def test_install_with_execute_failure(self, mock_coordinator_class, mock_interpr self.assertEqual(result, 1) - @patch.dict(os.environ, {"OPENAI_API_KEY": "sk-test-openai-key-123"}) + @patch.dict(os.environ, {"OPENAI_API_KEY": "sk-test-openai-key-123"}, clear=True) @patch("cortex.cli.CommandInterpreter") def test_install_no_commands_generated(self, mock_interpreter_class): mock_interpreter = Mock() @@ -139,7 +139,7 @@ def test_install_no_commands_generated(self, mock_interpreter_class): self.assertEqual(result, 1) - @patch.dict(os.environ, {"OPENAI_API_KEY": "sk-test-openai-key-123"}) + @patch.dict(os.environ, {"OPENAI_API_KEY": "sk-test-openai-key-123"}, clear=True) @patch("cortex.cli.CommandInterpreter") def test_install_value_error(self, mock_interpreter_class): mock_interpreter = Mock() @@ -150,7 +150,7 @@ def test_install_value_error(self, mock_interpreter_class): self.assertEqual(result, 1) - @patch.dict(os.environ, {"OPENAI_API_KEY": "sk-test-openai-key-123"}) + @patch.dict(os.environ, {"OPENAI_API_KEY": "sk-test-openai-key-123"}, clear=True) @patch("cortex.cli.CommandInterpreter") def test_install_runtime_error(self, mock_interpreter_class): mock_interpreter = Mock() @@ -161,7 +161,7 @@ def test_install_runtime_error(self, mock_interpreter_class): self.assertEqual(result, 1) - @patch.dict(os.environ, {"OPENAI_API_KEY": "sk-test-openai-key-123"}) + @patch.dict(os.environ, {"OPENAI_API_KEY": "sk-test-openai-key-123"}, clear=True) @patch("cortex.cli.CommandInterpreter") def test_install_unexpected_error(self, mock_interpreter_class): mock_interpreter = Mock() diff --git a/tests/test_hardware_detection.py b/tests/test_hardware_detection.py index 93ce489e..2d8b68d9 100644 --- a/tests/test_hardware_detection.py +++ b/tests/test_hardware_detection.py @@ -284,7 +284,7 @@ def test_has_nvidia_gpu_false(self, mock_run, detector): assert result is False - @patch("os.statvfs") + @patch("os.statvfs", create=True) def test_get_disk_free_gb(self, mock_statvfs, detector): """Test disk free space detection.""" mock_statvfs.return_value = MagicMock(f_frsize=4096, f_bavail=262144000) # ~1TB free @@ -317,7 +317,7 @@ class TestDetectionMethods: def detector(self): return HardwareDetector(use_cache=False) - @patch("os.uname") + @patch("os.uname", create=True) def test_detect_system(self, mock_uname, detector): """Test system info detection.""" mock_uname.return_value = MagicMock(nodename="testhost", release="5.15.0-generic") diff --git a/tests/test_interpreter.py b/tests/test_interpreter.py index b1d6ef55..3df495ad 100644 --- a/tests/test_interpreter.py +++ b/tests/test_interpreter.py @@ -141,7 +141,10 @@ def test_parse_with_validation(self, mock_openai): mock_response.choices[0].message.content = '{"commands": ["apt update", "rm -rf /"]}' mock_client.chat.completions.create.return_value = mock_response - interpreter = CommandInterpreter(api_key=self.api_key, provider="openai") + mock_cache = Mock() + mock_cache.get_commands.return_value = None + + interpreter = CommandInterpreter(api_key=self.api_key, provider="openai", cache=mock_cache) interpreter.client = mock_client result = interpreter.parse("test command", validate=True) @@ -155,7 +158,10 @@ def test_parse_without_validation(self, mock_openai): mock_response.choices[0].message.content = '{"commands": ["apt update", "rm -rf /"]}' mock_client.chat.completions.create.return_value = mock_response - interpreter = CommandInterpreter(api_key=self.api_key, provider="openai") + mock_cache = Mock() + mock_cache.get_commands.return_value = None + + interpreter = CommandInterpreter(api_key=self.api_key, provider="openai", cache=mock_cache) interpreter.client = mock_client result = interpreter.parse("test command", validate=False) @@ -169,7 +175,10 @@ def test_parse_with_context(self, mock_openai): mock_response.choices[0].message.content = '{"commands": ["apt update"]}' mock_client.chat.completions.create.return_value = mock_response - interpreter = CommandInterpreter(api_key=self.api_key, provider="openai") + mock_cache = Mock() + mock_cache.get_commands.return_value = None + + interpreter = CommandInterpreter(api_key=self.api_key, provider="openai", cache=mock_cache) interpreter.client = mock_client system_info = {"os": "ubuntu", "version": "22.04"} @@ -217,7 +226,10 @@ def test_parse_docker_installation(self, mock_openai): ) mock_client.chat.completions.create.return_value = mock_response - interpreter = CommandInterpreter(api_key=self.api_key, provider="openai") + mock_cache = Mock() + mock_cache.get_commands.return_value = None + + interpreter = CommandInterpreter(api_key=self.api_key, provider="openai", cache=mock_cache) interpreter.client = mock_client result = interpreter.parse("install docker") diff --git a/tests/test_llm_router.py b/tests/test_llm_router.py index 746ff77c..f0b6da03 100644 --- a/tests/test_llm_router.py +++ b/tests/test_llm_router.py @@ -78,6 +78,7 @@ def test_force_provider_override(self): class TestFallbackBehavior(unittest.TestCase): """Test fallback when primary LLM is unavailable.""" + @patch.dict(os.environ, {}, clear=True) def test_fallback_to_kimi_when_claude_unavailable(self): """Should fallback to Kimi K2 if Claude unavailable.""" router = LLMRouter( @@ -88,6 +89,7 @@ def test_fallback_to_kimi_when_claude_unavailable(self): decision = router.route_task(TaskType.USER_CHAT) self.assertEqual(decision.provider, LLMProvider.KIMI_K2) + @patch.dict(os.environ, {}, clear=True) def test_fallback_to_claude_when_kimi_unavailable(self): """Should fallback to Claude if Kimi K2 unavailable.""" router = LLMRouter( @@ -98,6 +100,7 @@ def test_fallback_to_claude_when_kimi_unavailable(self): decision = router.route_task(TaskType.SYSTEM_OPERATION) self.assertEqual(decision.provider, LLMProvider.CLAUDE) + @patch.dict(os.environ, {}, clear=True) def test_error_when_no_providers_available(self): """Should raise error if no providers configured.""" router = LLMRouter(claude_api_key=None, kimi_api_key=None, enable_fallback=True) @@ -105,6 +108,7 @@ def test_error_when_no_providers_available(self): with self.assertRaises(RuntimeError): router.route_task(TaskType.USER_CHAT) + @patch.dict(os.environ, {}, clear=True) def test_error_when_fallback_disabled(self): """Should raise error if primary unavailable and fallback disabled.""" router = LLMRouter(claude_api_key=None, kimi_api_key="test-kimi-key", enable_fallback=False)