Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
117 changes: 103 additions & 14 deletions cortex/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
from cortex.installation_history import InstallationHistory, InstallationStatus, InstallationType
from cortex.llm.interpreter import CommandInterpreter
from cortex.notification_manager import NotificationManager
from cortex.preflight_checker import PreflightChecker, export_report, format_report
from cortex.stack_manager import StackManager
from cortex.user_preferences import (
PreferencesManager,
Expand All @@ -43,32 +44,49 @@ def _debug(self, message: str):
console.print(f"[dim][DEBUG] {message}[/dim]")

def _get_api_key(self) -> str | None:
# Check if using Ollama (no API key needed)
"""Return the API key for the active provider.

Notes:
- Prefer the key that matches the chosen provider.
- Ollama requires no remote API key.
"""
provider = self._get_provider()

# Check if using Ollama (no API key needed)
if provider == "ollama":
self._debug("Using Ollama (no API key required)")
return "ollama-local" # Placeholder for Ollama

is_valid, detected_provider, error = validate_api_key()
if provider == "openai":
api_key = os.environ.get("OPENAI_API_KEY")
if api_key:
return api_key

if provider == "claude":
api_key = os.environ.get("ANTHROPIC_API_KEY")
if api_key:
return api_key

# If we get here, provider expects a key but none was found.
is_valid, _detected_provider, error = validate_api_key()
if not is_valid:
self._print_error(error)
cx_print("Run [bold]cortex wizard[/bold] to configure your API key.", "info")
cx_print("Or use [bold]CORTEX_PROVIDER=ollama[/bold] for offline mode.", "info")
return None
api_key = os.environ.get("ANTHROPIC_API_KEY") or os.environ.get("OPENAI_API_KEY")
return api_key
return None

def _get_provider(self) -> str:
# Check environment variable for explicit provider choice
explicit_provider = os.environ.get("CORTEX_PROVIDER", "").lower()
if explicit_provider in ["ollama", "openai", "claude"]:
return explicit_provider

# Auto-detect based on available API keys
# Auto-detect based on available API keys.
# Prefer OpenAI when both are present (keeps tests stable in CI).
if os.environ.get("OPENAI_API_KEY"):
return "openai"
if os.environ.get("ANTHROPIC_API_KEY"):
return "claude"
elif os.environ.get("OPENAI_API_KEY"):
return "openai"

# Fallback to Ollama for offline mode
return "ollama"
Expand Down Expand Up @@ -285,12 +303,22 @@ def doctor(self):
doctor = SystemDoctor()
return doctor.run_checks()

def install(self, software: str, execute: bool = False, dry_run: bool = False):
def install(
self,
software: str,
execute: bool = False,
dry_run: bool = False,
simulate: bool = False,
):
# Validate input first
is_valid, error = validate_install_request(software)
if not is_valid:
self._print_error(error)
self._print_error(error or "Invalid install request")
return 1

# Handle simulation mode early (no install execution)
if simulate:
return self._run_simulation(software)
# Special-case the ml-cpu stack:
# The LLM sometimes generates outdated torch==1.8.1+cpu installs
# which fail on modern Python. For the "pytorch-cpu jupyter numpy pandas"
Expand Down Expand Up @@ -432,6 +460,37 @@ def progress_callback(current, total, step):
self._print_error(f"Unexpected error: {str(e)}")
return 1

def _run_simulation(self, software: str) -> int:
"""Run preflight simulation check for installation"""
try:
# Get API key for LLM-powered package info (optional).
# Keep provider selection consistent with the rest of the CLI.
provider = self._get_provider()
provider_for_preflight = provider if provider in {"openai", "claude"} else "openai"
if provider == "openai":
api_key = os.environ.get("OPENAI_API_KEY")
elif provider == "claude":
api_key = os.environ.get("ANTHROPIC_API_KEY")
else:
api_key = None

# Create checker with optional API key for enhanced accuracy
checker = PreflightChecker(api_key=api_key, provider=provider_for_preflight)
report = checker.run_all_checks(software)

# Print formatted report
output = format_report(report, software)
print(output)

# Return error code if blocking issues found
if report.errors:
return 1
return 0

except Exception as e:
self._print_error(f"Simulation failed: {str(e)}")
return 1

def cache_stats(self) -> int:
try:
from cortex.semantic_cache import SemanticCache
Expand Down Expand Up @@ -717,6 +776,26 @@ def main():
prog="cortex",
description="AI-powered Linux command interpreter",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
cortex install docker
cortex install docker --execute
cortex install docker --simulate
cortex install "python 3.11 with pip"
cortex install nginx --dry-run
cortex history
cortex history show <id>
cortex rollback <id>
cortex check-pref
cortex check-pref ai.model
cortex edit-pref set ai.model gpt-4
cortex edit-pref delete theme
cortex edit-pref reset-all

Environment Variables:
OPENAI_API_KEY OpenAI API key for GPT-4
ANTHROPIC_API_KEY Anthropic API key for Claude
""",
)

# Global flags
Expand All @@ -743,9 +822,14 @@ def main():
# Install command
install_parser = subparsers.add_parser("install", help="Install software")
install_parser.add_argument("software", type=str, help="Software to install")
install_parser.add_argument("--execute", action="store_true", help="Execute commands")
install_parser.add_argument("--dry-run", action="store_true", help="Show commands only")

install_mode_group = install_parser.add_mutually_exclusive_group()
install_mode_group.add_argument("--execute", action="store_true", help="Execute commands")
install_mode_group.add_argument("--dry-run", action="store_true", help="Show commands only")
install_mode_group.add_argument(
"--simulate",
action="store_true",
help="Simulate installation without making changes",
)
# History command
history_parser = subparsers.add_parser("history", help="View history")
history_parser.add_argument("--limit", type=int, default=20)
Expand Down Expand Up @@ -818,7 +902,12 @@ def main():
elif args.command == "status":
return cli.status()
elif args.command == "install":
return cli.install(args.software, execute=args.execute, dry_run=args.dry_run)
return cli.install(
args.software,
execute=args.execute,
dry_run=args.dry_run,
simulate=bool(getattr(args, "simulate", False)),
)
elif args.command == "history":
return cli.history(limit=args.limit, status=args.status, show_id=args.show_id)
elif args.command == "rollback":
Expand Down
10 changes: 9 additions & 1 deletion cortex/config_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
"""

import json
import logging
import os
import re
import subprocess
Expand All @@ -15,6 +16,8 @@

import yaml

logger = logging.getLogger(__name__)


class ConfigManager:
"""
Expand Down Expand Up @@ -74,7 +77,12 @@ def _enforce_directory_security(self, directory: Path) -> None:
"""
# Cortex targets Linux. On non-POSIX systems (e.g., Windows), uid/gid ownership
# APIs like os.getuid/os.chown are unavailable, so skip strict enforcement.
if os.name != "posix" or not hasattr(os, "getuid") or not hasattr(os, "getgid"):
if (
os.name != "posix"
or not hasattr(os, "getuid")
or not hasattr(os, "getgid")
or not hasattr(os, "chown")
):
return

try:
Expand Down
Loading
Loading