diff --git a/cortex/cli.py b/cortex/cli.py index c808d5e4..f944da41 100644 --- a/cortex/cli.py +++ b/cortex/cli.py @@ -180,6 +180,114 @@ def demo(self): """ return run_demo() + def config(self): + from cortex.hardware_detection import detect_hardware + + print("\n🧠 CORTEX INTERACTIVE SETUP") + print("=" * 32) + + # 1ļøāƒ£ Detect hardware first + print("\nšŸ” Detecting hardware...") + hw = detect_hardware() + + gpu_info = None + if getattr(hw, "gpu", None): + print(f"āœ” GPU detected: {hw.gpu}") + gpu_info = str(hw.gpu) + has_gpu = True + else: + print("āš ļø No GPU detected (CPU mode)") + has_gpu = False + + cpu_model = getattr(hw.cpu, "model", None) if hw.cpu else None + if cpu_model: + print(f"āœ” CPU: {cpu_model}") + else: + print("āœ” CPU detected") + + # RAM (safe detection, same logic as demo) + if hasattr(hw, "memory") and hw.memory: + ram_gb = getattr(hw.memory, "total_gb", None) + if ram_gb: + print(f"āœ” RAM: {ram_gb} GB") + else: + print("āœ” RAM detected") + else: + print("āœ” RAM: Unknown") + + # 2ļøāƒ£ Provider selection + print("\nšŸ¤– Select default LLM provider:\n") + + print("[1] Anthropic Claude (cloud)") + print("[2] OpenAI GPT (cloud)") + if has_gpu: + print("[3] Ollama (local) - recommended for your hardware") + else: + print("[3] Ollama (local)") + + choice = input("\nChoice (1/2/3): ").strip() + + provider_map = { + "1": "anthropic", + "2": "openai", + "3": "ollama", + } + + provider = provider_map.get(choice) + + if not provider: + print("āŒ Invalid choice. Please re-run `cortex config`.") + return 1 + + print(f"\nāœ” Selected provider: {provider}\n") + # 3ļøāƒ£ API key configuration (if required) + api_key = None + + if provider in ("anthropic", "openai"): + env_var = "ANTHROPIC_API_KEY" if provider == "anthropic" else "OPENAI_API_KEY" + print(f"šŸ”‘ Enter your {env_var}:") + + api_key = input("> ").strip() + + # Very light validation + if len(api_key) < 10: + print("āŒ API key looks invalid. Please re-run `cortex config`.") + return 1 + + print("āœ” API key accepted\n") + else: + print("ā„¹ļø Ollama selected — no API key required\n") + + print("Setup step complete.\n") + # 4ļøāƒ£ Save configuration to ~/.cortex/config.yaml + from pathlib import Path + + import yaml + + config_dir = Path.home() / ".cortex" + config_dir.mkdir(exist_ok=True) + + config_path = config_dir / "config.yaml" + + config_data = { + "provider": provider, + "hardware": { + "gpu": gpu_info, + "cpu": str(getattr(hw, "cpu", None)), + "memory_gb": getattr(hw, "memory_gb", None), + }, + "preferences": { + "verbose": False, + "dry_run_default": False, + }, + } + + with open(config_path, "w") as f: + yaml.safe_dump(config_data, f) + + print(f"šŸ’¾ Configuration saved to {config_path}") + return 0 + def stack(self, args: argparse.Namespace) -> int: """Handle `cortex stack` commands (list/describe/install/dry-run).""" try: @@ -824,6 +932,11 @@ def main(): # Wizard command wizard_parser = subparsers.add_parser("wizard", help="Configure API key interactively") + # config command + config_parser = subparsers.add_parser( + "config", help="Interactive setup wizard for Contex Configuration" + ) + # Status command status_parser = subparsers.add_parser("status", help="Show system status") @@ -910,6 +1023,8 @@ def main(): return cli.demo() elif args.command == "wizard": return cli.wizard() + elif args.command == "config": + return cli.config() elif args.command == "status": return cli.status() elif args.command == "install":