Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
74 changes: 63 additions & 11 deletions cortex/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -852,6 +852,28 @@ def _confirm_risky_operation(self, prediction: FailurePrediction) -> bool:

# --- End Sandbox Commands ---

def monitor(self, args: argparse.Namespace) -> int:
"""
Monitor system resource usage (CPU, RAM, Disk, Network) in real-time.

Args:
args: Parsed command-line arguments

Returns:
Exit code (0 for success)
"""
from cortex.monitor.monitor_ui import run_standalone_monitor

duration = getattr(args, "duration", None)
interval = getattr(args, "interval", 1.0)
export_path = getattr(args, "export", None)

return run_standalone_monitor(
duration=duration,
interval=interval,
export_path=export_path,
)

def ask(self, question: str) -> int:
"""Answer a natural language question about the system."""
api_key = self._get_api_key()
Expand Down Expand Up @@ -1445,8 +1467,8 @@ def install(
dry_run: bool = False,
parallel: bool = False,
json_output: bool = False,
monitor: bool = False,
) -> int:
"""Install software using the LLM-powered package manager."""
# Initialize installation history
history = InstallationHistory()
install_id = None
Expand Down Expand Up @@ -1670,6 +1692,7 @@ def parallel_log_callback(message: str, level: str = "info"):
timeout=300,
stop_on_error=True,
progress_callback=progress_callback,
enable_monitoring=monitor,
)

result = coordinator.execute()
Expand All @@ -1678,6 +1701,10 @@ def parallel_log_callback(message: str, level: str = "info"):
self._print_success(t("install.package_installed", package=software))
print(f"\n{t('progress.completed_in', seconds=f'{result.total_duration:.2f}')}")

# Display peak usage if monitoring was enabled
if monitor and result.peak_cpu is not None:
print(f"\n📊 Peak usage: CPU {result.peak_cpu:.0f}%, RAM {result.peak_ram_gb:.1f} GB")

# Record successful installation
if install_id:
history.update_installation(install_id, InstallationStatus.SUCCESS)
Expand Down Expand Up @@ -4810,14 +4837,40 @@ def main():
help="Enable parallel execution for multi-step installs",
)
install_parser.add_argument(
"--json",
"--monitor",
action="store_true",
help="Output as JSON",
help="Monitor system resources during installation",
)
install_parser.add_argument(
"--mic",
action="store_true",
help="Use voice input for software name (press F9 to record)",

# Monitor command - real-time system resource monitoring
# Note: Monitoring is client-side using psutil. Daemon integration is intentionally
# out of scope to keep the feature self-contained and avoid cortexd dependencies.
monitor_parser = subparsers.add_parser(
"monitor",
help="Monitor system resource usage",
description="Track CPU, RAM, Disk, and Network usage in real-time.",
)
monitor_parser.add_argument(
"--duration",
"-d",
type=int,
metavar="SECONDS",
help="Run for fixed duration (seconds); omit for continuous monitoring",
)
monitor_parser.add_argument(
"--interval",
"-i",
type=float,
default=1.0,
metavar="SECONDS",
help="Sampling interval in seconds (default: 1.0)",
)
monitor_parser.add_argument(
"--export",
"-e",
type=str,
metavar="FILE",
help="Export metrics to file (JSON or CSV). Experimental feature.",
)

# Remove command - uninstall with impact analysis
Expand Down Expand Up @@ -5492,9 +5545,8 @@ def main():
return cli.printer(
action=getattr(args, "action", "status"), verbose=getattr(args, "verbose", False)
)
elif args.command == "voice":
model = getattr(args, "model", None)
return cli.voice(continuous=not getattr(args, "single", False), model=model)
elif args.command == "monitor":
return cli.monitor(args)
elif args.command == "ask":
# Handle --mic flag for voice input
if getattr(args, "mic", False):
Expand Down Expand Up @@ -5561,7 +5613,7 @@ def main():
execute=args.execute,
dry_run=args.dry_run,
parallel=args.parallel,
json_output=args.json,
monitor=getattr(args, "monitor", False),
)
elif args.command == "remove":
# Handle --execute flag to override default dry-run
Expand Down
53 changes: 52 additions & 1 deletion cortex/coordinator.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,10 @@ class InstallationResult:
total_duration: float
failed_step: int | None = None
error_message: str | None = None
# Monitoring data (optional)
peak_cpu: float | None = None
peak_ram_percent: float | None = None
peak_ram_gb: float | None = None


class InstallationCoordinator:
Expand All @@ -60,13 +64,17 @@ def __init__(
enable_rollback: bool = False,
log_file: str | None = None,
progress_callback: Callable[[int, int, InstallationStep], None] | None = None,
enable_monitoring: bool = False,
):
"""Initialize an installation run with optional logging and rollback."""
"""Initialize an installation run with optional logging, rollback, and monitoring."""
self.timeout = timeout
self.stop_on_error = stop_on_error
self.enable_rollback = enable_rollback
self.log_file = log_file
self.progress_callback = progress_callback
self.enable_monitoring = enable_monitoring
self._sampler: "ResourceSampler | None" = None # type: ignore[name-defined]
self._peak_usage: "PeakUsage | None" = None # type: ignore[name-defined]

if descriptions and len(descriptions) != len(commands):
raise ValueError("Number of descriptions must match number of commands")
Expand All @@ -90,6 +98,7 @@ def from_plan(
enable_rollback: bool | None = None,
log_file: str | None = None,
progress_callback: Callable[[int, int, InstallationStep], None] | None = None,
enable_monitoring: bool = False,
) -> "InstallationCoordinator":
"""Create a coordinator from a structured plan produced by an LLM.

Expand Down Expand Up @@ -124,6 +133,7 @@ def from_plan(
),
log_file=log_file,
progress_callback=progress_callback,
enable_monitoring=enable_monitoring,
)

for rollback_cmd in rollback_commands:
Expand Down Expand Up @@ -227,13 +237,40 @@ def add_rollback_command(self, command: str):
"""Register a rollback command executed if a step fails."""
self.rollback_commands.append(command)

def _stop_monitoring_and_get_peaks(self) -> tuple[float | None, float | None, float | None]:
"""Stop the sampler and return (peak_cpu, peak_ram_percent, peak_ram_gb)."""
if not self._sampler:
return None, None, None
self._sampler.stop()
self._peak_usage = self._sampler.get_peak_usage()
return (
self._peak_usage.cpu_percent,
self._peak_usage.ram_percent,
self._peak_usage.ram_used_gb,
)

def execute(self) -> InstallationResult:
"""Run each installation step and capture structured results."""
start_time = time.time()
failed_step_index = None

self._log(f"Starting installation with {len(self.steps)} steps")

# Start monitoring if enabled
if self.enable_monitoring:
try:
from cortex.monitor.sampler import ResourceSampler
self._sampler = ResourceSampler(interval=1.0)
self._sampler.start()
# Only log if sampler actually started
if self._sampler.is_running:
self._log("Resource monitoring started")
else:
self._sampler = None
except ImportError:
self._log("Monitor module not available, skipping monitoring")
self._sampler = None

for i, step in enumerate(self.steps):
if self.progress_callback:
self.progress_callback(i + 1, len(self.steps), step)
Expand All @@ -249,6 +286,9 @@ def execute(self) -> InstallationResult:
if self.enable_rollback:
self._rollback()

# Stop monitoring on failure
peak_cpu, peak_ram_percent, peak_ram_gb = self._stop_monitoring_and_get_peaks()

total_duration = time.time() - start_time
self._log(f"Installation failed at step {i + 1}")

Expand All @@ -258,11 +298,19 @@ def execute(self) -> InstallationResult:
total_duration=total_duration,
failed_step=i,
error_message=step.error or "Command failed",
peak_cpu=peak_cpu,
peak_ram_percent=peak_ram_percent,
peak_ram_gb=peak_ram_gb,
)

total_duration = time.time() - start_time
all_success = all(s.status == StepStatus.SUCCESS for s in self.steps)

# Stop monitoring and capture peak usage
peak_cpu, peak_ram_percent, peak_ram_gb = self._stop_monitoring_and_get_peaks()
if peak_cpu is not None:
self._log(f"Monitoring stopped. Peak CPU: {peak_cpu:.1f}%, Peak RAM: {peak_ram_gb:.1f}GB")

if all_success:
self._log("Installation completed successfully")
else:
Expand All @@ -276,6 +324,9 @@ def execute(self) -> InstallationResult:
error_message=(
self.steps[failed_step_index].error if failed_step_index is not None else None
),
peak_cpu=peak_cpu,
peak_ram_percent=peak_ram_percent,
peak_ram_gb=peak_ram_gb,
)

def verify_installation(self, verify_commands: list[str]) -> dict[str, bool]:
Expand Down
19 changes: 19 additions & 0 deletions cortex/monitor/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
"""
Cortex Monitor Module
Real-time system resource monitoring for Cortex Linux.
"""

from cortex.monitor.sampler import (
AlertThresholds,
PeakUsage,
ResourceSample,
ResourceSampler,
)

__all__ = [
"AlertThresholds",
"PeakUsage",
"ResourceSample",
"ResourceSampler",
]
Loading
Loading