diff --git a/.github/workflows/python-build.yml b/.github/workflows/python-build.yml new file mode 100644 index 0000000..91659ac --- /dev/null +++ b/.github/workflows/python-build.yml @@ -0,0 +1,28 @@ +name: Build + +on: + push: + branches: ["master", "main"] + pull_request: + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: '3.x' + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + pip install pylint pytest pyright + - name: Lint + run: pylint zdeploy + - name: Type check + run: pyright zdeploy + - name: Test + run: pytest + - name: Compile + run: python -m compileall -q zdeploy diff --git a/README.md b/README.md index 3877878..34d5b37 100644 --- a/README.md +++ b/README.md @@ -92,12 +92,23 @@ Here is a list of all supported config parameters to date: | cache | Deployment cache directory path | No | String | cache | | logs | Deployment logs directory path | No | String | logs | | installer | Default installer, used when an unrecognized dependency is found in the `require` file | No | String | apt-get install -y | -| force | Force entire deployment every time (default is to pick up with a previous failing deployment left off | No | String | no | +| force | Force entire deployment every time (default is to pick up with a previous failing deployment left off | No | Boolean | False | | user | Default username (used for recipes that don't specify a username, i.e. RECIPE_USER). | No | String | root | | password | Default password (used in case a private key isn't auto-detected). | No | String | None | | port | Default port number (used for recipes that don't specify a port number, i.e. RECIPE_PORT). | No | Integer | 22 | -> NOTE: This table will be updated to always support the most recent release of Zdeploy. +> NOTE: This table will be updated to always support the most recent release of Zdeploy. + +## Development + +Install development tools and run lint, type checks, and the test suite: + +```bash +pip install -r requirements.txt +pylint zdeploy +pyright zdeploy +pytest +``` ## Author [Fadi Hanna Al-Kass](https://github.com/alkass) diff --git a/TODO b/TODO deleted file mode 100644 index 2831134..0000000 --- a/TODO +++ /dev/null @@ -1,3 +0,0 @@ -* hooks -* apps -* links diff --git a/requirements.txt b/requirements.txt index 6b13d8f..3142875 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,8 @@ -paramiko -requests -python-dotenv -scp -cryptography>=39.0.1 # not directly required, pinned by Snyk to avoid a vulnerability +paramiko==3.5.1 +requests==2.32.4 +python-dotenv==1.1.1 +scp==0.15.0 +cryptography>=45.0.4 # not directly required, pinned to avoid a vulnerability +pylint==3.3.7 +pytest==8.4.1 +pyright==1.1.402 diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..8abafde --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,3 @@ +import os +import sys +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) diff --git a/tests/test_config.py b/tests/test_config.py new file mode 100644 index 0000000..bf78757 --- /dev/null +++ b/tests/test_config.py @@ -0,0 +1,9 @@ +from zdeploy import config + +def test_load_defaults(tmp_path): + cfg = config.load(str(tmp_path / 'missing.json')) + assert cfg.configs == 'configs' + assert cfg.recipes == 'recipes' + assert cfg.cache == 'cache' + assert cfg.logs == 'logs' + assert cfg.force is False diff --git a/tests/test_recipeset.py b/tests/test_recipeset.py new file mode 100644 index 0000000..2045def --- /dev/null +++ b/tests/test_recipeset.py @@ -0,0 +1,25 @@ +import zdeploy.recipe as recipe_mod +from zdeploy.recipeset import RecipeSet +import logging +from zdeploy.config import Config + + +def test_recipeset_iterable(tmp_path): + cfg = Config(recipes=str(tmp_path)) + log = logging.getLogger("test") + r = recipe_mod.Recipe( + "pkg1", + None, + tmp_path / "cfg", + "host", + "user", + None, + 22, + log, + cfg, + ) + rs = RecipeSet(cfg, log) + rs.add(r) + for item in rs: + assert item is r + assert len(list(rs)) == 1 diff --git a/tests/test_shell.py b/tests/test_shell.py new file mode 100644 index 0000000..d1cf9fe --- /dev/null +++ b/tests/test_shell.py @@ -0,0 +1,6 @@ +from zdeploy.shell import execute + +def test_execute_echo(): + output, rc = execute('echo hello') + assert output.strip() == 'hello' + assert rc == 0 diff --git a/tests/test_utils.py b/tests/test_utils.py new file mode 100644 index 0000000..bdac598 --- /dev/null +++ b/tests/test_utils.py @@ -0,0 +1,12 @@ +import zdeploy.utils as utils + +def test_reformat_time(): + assert utils.reformat_time('1:02:03') == '1h, 2m, and 3s' + + +def test_str2bool(): + assert utils.str2bool('yes') is True + assert utils.str2bool('true') is True + assert utils.str2bool('enable') is True + assert utils.str2bool('no') is False + assert utils.str2bool('maybe') is False diff --git a/zdeploy/__init__.py b/zdeploy/__init__.py index 29b2310..9944196 100644 --- a/zdeploy/__init__.py +++ b/zdeploy/__init__.py @@ -1,60 +1,71 @@ -from argparse import ArgumentParser -from os.path import isdir -from os import listdir, makedirs -from sys import stdout +"""Command line interface for zdeploy.""" + +from argparse import ArgumentParser, Namespace from datetime import datetime -from zdeploy.log import Log +import logging +from os import listdir +from pathlib import Path +from sys import stdout + +from zdeploy.utils import str2bool + from zdeploy.app import deploy -from zdeploy.config import load as load_config - -def str2bool(v): - if isinstance(v, bool): - return v - if v.lower() in ('yes', 'y'): - return True - elif v.lower() in ('no', 'n'): - return False - raise Exception('Invalid value: %s' % v) - -def handle_config(config_name, args, cfg): - # TODO: document - log_dir_path = '%s/%s' % (cfg.logs, config_name) - cache_dir_path = '%s/%s' % (cfg.cache, config_name) - if not isdir(log_dir_path): - makedirs(log_dir_path) - if not isdir(cache_dir_path): - makedirs(cache_dir_path) - log = Log() - log.register_logger(stdout) - log.register_logger(open('%s/%s.log' % (log_dir_path, '{0:%Y-%m-%d %H:%M:%S}'.format(datetime.now())), 'w')) - deploy(config_name, cache_dir_path, log, args, cfg) - -def handle_configs(args, cfg): - ''' - Iterate over all retrieved configs and deploy them in a pipelined order. - ''' +from zdeploy.config import load as load_config, Config + + +def deploy_config(config_name: str, args: Namespace, cfg: Config) -> None: + """Deploy a single configuration.""" + log_dir_path = Path(cfg.logs) / config_name + cache_dir_path = Path(cfg.cache) / config_name + if not log_dir_path.is_dir(): + log_dir_path.mkdir(parents=True) + if not cache_dir_path.is_dir(): + cache_dir_path.mkdir(parents=True) + log_file_path = log_dir_path / f"{datetime.now():%Y-%m-%d %H:%M:%S}.log" + + logger = logging.getLogger(config_name) + logger.setLevel(logging.INFO) + formatter = logging.Formatter("%(message)s") + stream_handler = logging.StreamHandler(stdout) + stream_handler.setFormatter(formatter) + file_handler = logging.FileHandler(log_file_path, encoding="utf-8") + file_handler.setFormatter(formatter) + logger.addHandler(stream_handler) + logger.addHandler(file_handler) + + try: + deploy(config_name, cache_dir_path, logger, args, cfg) + finally: + logger.removeHandler(file_handler) + file_handler.close() + + +def deploy_configs(args: Namespace, cfg: Config) -> None: + """Deploy each config provided on the command line.""" for config_name in args.configs: - handle_config(config_name, args, cfg) + deploy_config(config_name, args, cfg) + -def main(): - # Default config file name is config.json, so it needs not be specified in our case. +def main() -> None: + """CLI entry point.""" cfg = load_config() parser = ArgumentParser() parser.add_argument( - '-c', - '--configs', - help='Deployment destination(s)', - nargs='+', + "-c", + "--configs", + help="Deployment destination(s)", + nargs="+", required=True, - choices=listdir(cfg.configs) if isdir(cfg.configs) else ()) + choices=listdir(cfg.configs) if Path(cfg.configs).is_dir() else (), + ) parser.add_argument( - '-f', - '--force', - help='Force full deployment (overlooks the cache)', - nargs='?', + "-f", + "--force", + help="Force full deployment (overlooks the cache)", + nargs="?", required=False, - default=cfg.force, # Default behavior can be defined by the user in a config file + default=cfg.force, const=True, - type=str2bool + type=str2bool, ) - handle_configs(parser.parse_args(), cfg) + deploy_configs(parser.parse_args(), cfg) diff --git a/zdeploy/app.py b/zdeploy/app.py index 8f03390..f6e67bc 100644 --- a/zdeploy/app.py +++ b/zdeploy/app.py @@ -1,81 +1,138 @@ -from os import listdir, makedirs, environ -from os.path import isdir, isfile +"""Deployment core logic.""" + +from os import environ +from pathlib import Path from shutil import rmtree from datetime import datetime +from argparse import Namespace +import logging + from dotenv import load_dotenv from zdeploy.recipe import Recipe from zdeploy.recipeset import RecipeSet from zdeploy.utils import reformat_time +from zdeploy.config import Config -def deploy(config_name, cache_dir_path, log, args, cfg): - config_path = '%s/%s' % (cfg.configs, config_name) - print('Config:', config_path) - load_dotenv(config_path) - recipes = RecipeSet(cfg, log) +def _load_recipes(config_path: Path, log: logging.Logger, cfg: Config) -> RecipeSet: + """Return a ``RecipeSet`` loaded from environment variables.""" - recipe_names = environ.get('RECIPES') - if recipe_names.startswith('(') and recipe_names.endswith(')'): + load_dotenv(str(config_path)) + + recipes = RecipeSet(cfg, log) + recipe_names = environ.get("RECIPES", "") + if recipe_names.startswith("(") and recipe_names.endswith(")"): recipe_names = recipe_names[1:-1] - for recipe_name in recipe_names.split(' '): + for recipe_name in recipe_names.split(" "): recipe_name = recipe_name.strip() - HOST_IP = environ.get(recipe_name) - if HOST_IP is None: - log.fatal('%s is undefined in %s' % (recipe_name, config_path)) - HOST_USER = environ.get('%s_USER' % recipe_name, cfg.user) - HOST_PASSWORD = environ.get('%s_PASSWORD' % recipe_name, cfg.password) - HOST_PORT = environ.get('%s_PORT' % recipe_name, cfg.port) - recipe = Recipe(recipe_name, None, config_path, HOST_IP, HOST_USER, HOST_PASSWORD, HOST_PORT, log, cfg) + host_ip = environ.get(recipe_name) + if host_ip is None: + log.error(f"{recipe_name} is undefined in {config_path}") + raise RuntimeError("undefined host") + host_user = environ.get(f"{recipe_name}_USER", cfg.user) + host_password = environ.get(f"{recipe_name}_PASSWORD", cfg.password) + host_port_str = environ.get(f"{recipe_name}_PORT") + host_port = int(host_port_str) if host_port_str is not None else cfg.port + + recipe = Recipe( + recipe_name, + None, + config_path, + host_ip, + host_user, + host_password, + host_port, + log, + cfg, + ) + for env in environ: if env.startswith(recipe_name) and env != recipe_name: - # Properties aren't used anywhere internally. We only - # monitor them so hashes are generated properly. That - # said, if a recipe-name-related environment variable - # changes, we should assume a level of relevancy at - # the recipe level. recipe.set_property(env, environ.get(env)) - recipes.add_recipes(recipe.get_requirements()) - recipes.add_recipe(recipe) + + recipes.update(recipe.load_requirements()) + recipes.add(recipe) + + return recipes + + +def _clean_cache(cache_dir_path: Path, deployment_cache_path: Path, log: logging.Logger) -> None: + """Remove stale cache directories inside ``cache_dir_path``.""" + + if not deployment_cache_path.is_dir(): + deployment_cache_path.mkdir(parents=True) + for directory in cache_dir_path.iterdir(): + if directory != deployment_cache_path: + log.info(f"Removing stale cache directory {directory}") + rmtree(directory) + + +def _deploy_recipe( + recipe: Recipe, + deployment_cache_path: Path, + force: bool, + started_all: datetime, + log: logging.Logger, +) -> None: + """Deploy a single ``recipe`` and update its cache entry.""" + + recipe_cache_path = deployment_cache_path / recipe.name + if recipe_cache_path.is_file(): + with recipe_cache_path.open("r", encoding="utf-8") as fp: + cache_contents = fp.read() + if recipe.deep_hash() in cache_contents and not force: + log.warning( + f"Skipping {recipe.name} because it is already deployed" + ) + return + + started_recipe = datetime.now() + log.info( + f"Starting recipe '{recipe.name}' at " + f"{started_recipe:%H:%M:%S} on {started_all:%Y-%m-%d}" + ) + recipe.deploy() + ended_recipe = datetime.now() + log.info( + f"Finished recipe '{recipe.name}' at " + f"{ended_recipe:%H:%M:%S} on {started_all:%Y-%m-%d}" + ) + + total_recipe_time = ended_recipe - started_recipe + log.info(f"{recipe.name} finished in {reformat_time(total_recipe_time)}") + with recipe_cache_path.open("w", encoding="utf-8") as fp: + fp.write(recipe.deep_hash()) + + +def deploy( + config_name: str, + cache_dir_path: Path, + log: logging.Logger, + args: Namespace, + cfg: Config, +) -> None: + """Deploy recipes defined in ``config_name``.""" + + config_path = Path(cfg.configs) / config_name + log.info("Config: %s", config_path) + + recipes = _load_recipes(config_path, log, cfg) started_all = datetime.now() - log.info('Started %s deployment at %s on %s' % - (config_path, - started_all.strftime('%H:%M:%S'), - started_all.strftime('%Y-%m-%d'))) - deployment_cache_path = '%s/%s' % (cache_dir_path, recipes.get_hash()) - if not isdir(deployment_cache_path): - makedirs(deployment_cache_path) - for dir in listdir(cache_dir_path): - # Delete all stale cache tracks so we don't run into issues - # when reverting deployments. - dir = '%s/%s' % (cache_dir_path, dir) - if dir != deployment_cache_path: - log.info('Deleting %s' % dir) - rmtree(dir) + log.info( + f"Starting deployment of {config_path} at {started_all:%H:%M:%S} on {started_all:%Y-%m-%d}" + ) + + deployment_cache_path = cache_dir_path / recipes.get_hash() + _clean_cache(cache_dir_path, deployment_cache_path, log) + for recipe in recipes: - recipe_cache_path = '%s/%s' % (deployment_cache_path, recipe.get_name()) - if isfile(recipe_cache_path) and recipe.get_deep_hash() in open(recipe_cache_path, 'r').read() and not args.force: - log.warn('%s is already deployed. Skipping...' % recipe.get_name()) - continue - started_recipe = datetime.now() - log.info('Started %s recipe deployment at %s on %s' % - (recipe.get_name(), - started_recipe.strftime('%H:%M:%S'), - started_all.strftime('%Y-%m-%d'))) - recipe.deploy() - ended_recipe = datetime.now() - log.info('Ended %s recipe deployment at %s on %s' % - (recipe.get_name(), - ended_recipe.strftime('%H:%M:%S'), - started_all.strftime('%Y-%m-%d'))) - total_recipe_time = ended_recipe - started_recipe - log.success('%s finished in %s' % (recipe.get_name(), reformat_time(total_recipe_time))) - open(recipe_cache_path, 'w').write(recipe.get_deep_hash()) + _deploy_recipe(recipe, deployment_cache_path, args.force, started_all, log) + ended_all = datetime.now() total_deployment_time = ended_all - started_all - log.info('Ended %s deployment at %s on %s' % - (config_path, - ended_all.strftime('%H:%M:%S'), - started_all.strftime('%Y-%m-%d'))) - log.success('%s finished in %s' % (config_path, reformat_time(total_deployment_time))) - log.info('Deployment hash is %s' % recipes.get_hash()) + log.info( + f"Completed deployment of {config_path} at {ended_all:%H:%M:%S} on {started_all:%Y-%m-%d}" + ) + log.info(f"{config_path} finished in {reformat_time(total_deployment_time)}") + log.info(f"Deployment hash is {recipes.get_hash()}") diff --git a/zdeploy/clients.py b/zdeploy/clients.py index 7b4fe91..fc20259 100644 --- a/zdeploy/clients.py +++ b/zdeploy/clients.py @@ -1,42 +1,76 @@ -from paramiko import SSHClient, AutoAddPolicy +"""Remote client helpers for recipes.""" + +import logging +from paramiko import SSHClient, AutoAddPolicy, Transport from scp import SCPClient + class SSH(SSHClient): - def __init__(self, recipe, log, hostname, username, password, port): - SSHClient.__init__(self) + """SSH helper that exposes a simple execute function.""" + + # pylint: disable=too-many-arguments,too-many-positional-arguments + def __init__( + self, + recipe: str, + log: logging.Logger, + hostname: str, + username: str, + password: str | None, + port: int, + ) -> None: + """Establish an SSH connection to ``hostname``.""" + + super().__init__() self.load_system_host_keys() self.set_missing_host_key_policy(AutoAddPolicy()) self.connect(hostname=hostname, port=port, username=username, password=password) self.recipe = recipe self.log = log - def __del__(self): - ''' - Auto close connection - ''' + + def __del__(self) -> None: + """Ensure the SSH connection is closed.""" + self.close() - def execute(self, *args, bail_on_failure=True, show_command=True, show_output=True, show_error=True): - cmd = ' '.join(args) + + def execute( + self, + *args: str, + bail_on_failure: bool = True, + show_command: bool = True, + show_output: bool = True, + show_error: bool = True, + ) -> int: + """Run ``args`` over SSH and return the exit code.""" + cmd = " ".join(args) if show_command: - self.log.info('Running', cmd) - _, stdout, _ = self.exec_command('%s 2>&1' % cmd) + self.log.info("Running %s", cmd) + _, stdout, _ = self.exec_command(f"{cmd} 2>&1") if show_output: for line in stdout: - self.log.info('%s: %s' % (self.recipe, line.rstrip())) + self.log.info(f"{self.recipe}: {line.rstrip()}") rc = stdout.channel.recv_exit_status() if rc != 0: if show_error: - self.log.fail("Failed to run '%s'. Exit code: %d" % (cmd, rc)) + self.log.error("Failed to run '%s'. Exit code: %s", cmd, rc) if bail_on_failure: - raise Exception('failed to execute %s' % cmd) + raise RuntimeError(f"failed to execute {cmd}") return rc + class SCP(SCPClient): - def __init__(self, transport): - SCPClient.__init__(self, transport) - def __del__(self): - ''' - Auto close connection - ''' + """SCP helper for transferring files over SSH.""" + + def __init__(self, transport: Transport) -> None: + """Create an SCP client using ``transport``.""" + + super().__init__(transport) + + def __del__(self) -> None: + """Ensure the SCP connection is closed.""" + self.close() - def upload(self, src, dest): - pass + + def upload(self, src: str, dest: str) -> None: + """Upload ``src`` to ``dest`` on the remote host.""" + + self.put(src, remote_path=dest) diff --git a/zdeploy/config.py b/zdeploy/config.py index 0328028..b9f3bb2 100644 --- a/zdeploy/config.py +++ b/zdeploy/config.py @@ -1,40 +1,66 @@ +"""Configuration loader for zdeploy.""" +# pylint: disable=too-many-instance-attributes + +from dataclasses import dataclass from json import loads from os.path import isfile +from typing import Any, Dict, cast + +from zdeploy.utils import str2bool + + +@dataclass # pylint: disable=too-many-instance-attributes +class Config: + """Simple container for configuration settings.""" + + configs: str = "configs" + recipes: str = "recipes" + cache: str = "cache" + logs: str = "logs" + installer: str = "apt-get install -y" + force: bool = False + user: str = "root" + password: str | None = None + port: int = 22 + + +def load(cfg_path: str = "config.json") -> Config: + """Load configuration from ``cfg_path`` or return defaults.""" + + # Initiate an empty config dictionary in case a config file isn't present. + cfg: Dict[str, Any] = {} + + if isfile(cfg_path): + # Load data in JSON form. + with open(cfg_path, "r", encoding="utf-8") as fp: + cfg = loads(fp.read()) + + # Set defaults + cfg["configs"] = cfg.get("configs", Config.configs) + cfg["recipes"] = cfg.get("recipes", Config.recipes) + cfg["cache"] = cfg.get("cache", Config.cache) + cfg["logs"] = cfg.get("logs", Config.logs) + + # Default installer is apt-get. This is used for virtual + # recipes (recipes that aren't defined by a directory + # structure). + cfg["installer"] = cfg.get("installer", Config.installer) + + # Force is disabled by default. This sets the behavior to + # only deploy undeployed recipes and/or pick up where a + # previous deployment was halted or had crashed. + force_value = cfg.get("force", Config.force) + if isinstance(force_value, str): + force_value = str2bool(force_value) + cfg["force"] = force_value + + # Default username is root + cfg["user"] = cfg.get("user", Config.user) + + # The default is no password (private key present) + cfg["password"] = cfg.get("password", Config.password) + + cfg["port"] = cfg.get("port", Config.port) -def load(cfg_path = 'config.json'): - # Initiate an empty config dictionary in case a config - # file isn't present. - cfg = {} - - if isfile(cfg_path): - # Load data in JSON form. - # TODO: try..catch with an informative exception - cfg = loads(open(cfg_path).read()) - - # Set defaults - cfg['configs'] = cfg.get('configs', 'configs') - cfg['recipes'] = cfg.get('recipes', 'recipes') - cfg['cache'] = cfg.get('cache', 'cache') - cfg['logs'] = cfg.get('logs', 'logs') - - # Default installer is apt-get. This is used for virtual - # recipes (recipes that aren't defined by a directory - # structure). - cfg['installer'] = cfg.get('installer', 'apt-get install -y') - - # Force is disabled by default. This sets the behavior to - # only deploy undeployed recipes and/or pick up where a - # previous deployment was halted or had crashed. - cfg['force'] = cfg.get('force', 'no') - - # Default username is root - cfg['user'] = cfg.get('user', 'root') - - # The default is no password (private key present) - cfg['password'] = cfg.get('password', None) - - cfg['port'] = cfg.get('port', 22) - - # Turn cfg into a class allowing us to reference all the field via the dot operator, - # e.g.: cfg.logs instead of cfg['logs'] - return type('', (), cfg) + # Convert the dictionary into a Config instance to allow attribute access + return Config(**cast(Dict[str, Any], cfg)) diff --git a/zdeploy/log.py b/zdeploy/log.py deleted file mode 100644 index 158392a..0000000 --- a/zdeploy/log.py +++ /dev/null @@ -1,36 +0,0 @@ -class Log: - ''' - Log is a generic logging class that can write to anything with - a `write` function. The recommended use of this class is to log - to the standard output (stdout) and to a file, e.g.: - logger = Log() - logger.register_logger(sys.stdout) - logger.register_logger(open('mylog.log', 'w')) - ''' - def __init__(self, *loggers): - self.loggers = list(loggers) - def register_logger(self, logger): - self.loggers.append(logger) - def register_loggers(self, loggers): - for logger in loggers: - self.register_logger(logger) - def write(self, *args): - message = ' '.join(args) - for logger in self.loggers: - logger.write('%s\n' % message) - def fatal(self, *args): - self.fail(*args) - exit(1) - def fail(self, *args): - self.write('\033[0;31m', *args, '\033[0;00m') - def warn(self, *args): - self.write('\033[1;33m', *args, '\033[0;00m') - def success(self, *args): - self.write('\033[0;32m', *args, '\033[0;00m') - def info(self, *args): - self.write('\033[1;35m', *args, '\033[0;00m') - def close(self): - for logger in self.loggers: - logger.close() - def __del__(self): - self.close() \ No newline at end of file diff --git a/zdeploy/recipe.py b/zdeploy/recipe.py index bc9b16a..732a542 100644 --- a/zdeploy/recipe.py +++ b/zdeploy/recipe.py @@ -1,141 +1,223 @@ +"""Recipe abstraction for deploying and tracking dependencies.""" +# pylint: disable=too-many-instance-attributes,too-few-public-methods,too-many-arguments,too-many-positional-arguments + from os import listdir -from os.path import isdir, isfile -from datetime import datetime +from pathlib import Path from hashlib import md5 +from typing import Dict, List, Optional +import logging + from zdeploy.clients import SSH, SCP from zdeploy.shell import execute as shell_execute +from zdeploy.config import Config + class Recipe: + """Represents a deployable script or package.""" + class Type: + """Supported recipe types.""" + DEFINED = 1 VIRTUAL = 2 - def __init__(self, recipe, parent_recipe, config, hostname, username, password, port, log, cfg): + + def __init__( + self, + recipe: str, + parent_recipe: Optional[str], + config: Path, + hostname: str, + username: str, + password: str | None, + port: int, + log: logging.Logger, + cfg: Config, + ) -> None: + """Initialize a recipe instance.""" + self.log = log - if not config or not len(config.strip()): - self.log.fatal('Invalid value for config') - if not recipe or not len(recipe.strip()): - self.log.fatal('Invalid value for recipe') - if not hostname or not len(hostname.strip()): - self.log.fatal('Invalid value for hostname') + if not str(config).strip(): + self.log.error("Invalid value for config") + raise ValueError("invalid config") + if not recipe or not recipe.strip(): + self.log.error("Invalid value for recipe") + raise ValueError("invalid recipe") + if not hostname or not hostname.strip(): + self.log.error("Invalid value for hostname") + raise ValueError("invalid hostname") try: self.port = int(port) except ValueError: - self.log.fatal('Invalid value for port: %s' % port) + self.log.error("Invalid value for port: %s", port) + raise self.cfg = cfg self.parent_recipe = parent_recipe - self.set_recipe_name_and_type(recipe) + self._resolve_name_and_type(recipe) self.config = config self.hostname = hostname self.username = username self.password = password - self.properties = {} - def set_property(self, key, value): + self.properties: Dict[str, str | None] = {} + + def set_property(self, key: str, value: str | None) -> None: + """Store an arbitrary ``key``/``value`` pair.""" + self.properties[key] = value - def set_recipe_name_and_type(self, recipe): + + def _resolve_name_and_type(self, recipe: str) -> None: + """Resolve ``recipe`` name and determine if it is defined or virtual.""" + for r in listdir(self.cfg.recipes): if recipe.lower() == r.lower(): self.recipe = r if self.parent_recipe == r: # Recipe references itself - self.log.fatal('Invalid recipe: %s references itself' % r) - else: - self._type = self.Type.DEFINED + self.log.error("Invalid recipe: %s references itself", r) + raise ValueError("recipe references itself") + self._type = self.Type.DEFINED return self.recipe = recipe self._type = self.Type.VIRTUAL - def __str__(self): - return '%s -> %s@%s:%d :: %s' % (self.recipe, self.username, self.hostname, self.port, self.properties) - def get_name(self): + + def __str__(self) -> str: + """Return a string representation of this recipe.""" + + return f"{self.recipe} -> {self.username}@{self.hostname}:{self.port} :: {self.properties}" + + @property + def name(self) -> str: + """Return the recipe name.""" + return self.recipe - def __hash__(self): + + def __hash__(self) -> int: + """Return a hash so recipes can be used in sets and dictionaries.""" + return hash(str(self)) - def __eq__(self, other): + + def __eq__(self, other: object) -> bool: + """Compare recipes by their hash.""" + return hash(self) == hash(other) - def get_deep_hash(self, dir_path=None): - hashes = '' + + def is_virtual(self) -> bool: + """Return ``True`` if this is a virtual recipe.""" + + return self._type == self.Type.VIRTUAL + + def deep_hash(self, dir_path: Path | None = None) -> str: + """Return an MD5 hash representing the recipe and its requirements.""" + + hashes = "" if self._type == self.Type.VIRTUAL: hashes = md5(self.recipe.encode()).hexdigest() elif self._type == self.Type.DEFINED: if dir_path is None: - dir_path = '%s/%s' % (self.cfg.recipes, self.recipe) + dir_path = Path(self.cfg.recipes) / self.recipe # Execute the hash script and copy its output into our hashes variable. # NOTE: We perform this check specifically inside this block because when # dir_path is None, we know we're at the main recipe directory path. - if isfile('%s/hash' % dir_path): - cmd_out, cmd_rc = shell_execute('chmod +x %s/hash && bash %s && ./%s/hash' % (dir_path, self.config, dir_path)) + hash_path = dir_path / "hash" + if hash_path.is_file(): + cmd_out, cmd_rc = shell_execute( + f"chmod +x {hash_path} && bash {self.config} && ./{hash_path}" + ) if cmd_rc != 0: - raise Exception(cmd_out) + raise RuntimeError(cmd_out) hashes += cmd_out - for recipe in self.get_requirements(): - hashes += recipe.get_deep_hash() + for recipe in self.load_requirements(): + hashes += recipe.deep_hash() hashes += md5(str(self).encode()).hexdigest() for node in listdir(dir_path): - rel_path = '%s/%s' % (dir_path, node) - if isfile(rel_path): - file_hash = md5(open(rel_path, 'rb').read()).hexdigest() + rel_path = dir_path / node + if rel_path.is_file(): + with rel_path.open("rb") as fp: + file_hash = md5(fp.read()).hexdigest() hashes += file_hash - elif isdir(rel_path): - hashes += self.get_deep_hash(rel_path) + elif rel_path.is_dir(): + hashes += self.deep_hash(rel_path) return md5(hashes.encode()).hexdigest() - def get_requirements(self): - req_file = '%s/%s/require' % (self.cfg.recipes, self.recipe) - requirements = [] - if isfile(req_file): - for requirement in open(req_file).read().split('\n'): - requirement = requirement.strip() - if requirement == '' or requirement.startswith('#'): - continue - recipe = Recipe( - recipe=requirement, - parent_recipe=self.recipe, - config=self.config, - hostname=self.hostname, - username=self.username, - password=self.password, - port=self.port, - log=self.log, - cfg=self.cfg) - for req in recipe.get_requirements(): - requirements.append(req) - requirements.append(recipe) + + def load_requirements(self) -> List["Recipe"]: + """Return a list of Recipe objects this recipe depends on.""" + + req_file = Path(self.cfg.recipes) / self.recipe / "require" + requirements: List["Recipe"] = [] + if req_file.is_file(): + with req_file.open("r", encoding="utf-8") as req_fp: + for requirement in req_fp.read().split("\n"): + requirement = requirement.strip() + if requirement == "" or requirement.startswith("#"): + continue + recipe = Recipe( + recipe=requirement, + parent_recipe=self.recipe, + config=self.config, + hostname=self.hostname, + username=self.username, + password=self.password, + port=self.port, + log=self.log, + cfg=self.cfg, + ) + for req in recipe.load_requirements(): + requirements.append(req) + requirements.append(recipe) return requirements - def deploy(self): - self.log.info('Deploying %s to %s' % (self.recipe, self.hostname)) - ssh = SSH(recipe=self.recipe, + + def deploy(self) -> None: + """Deploy this recipe using SSH/SCP.""" + + self.log.info(f"Deploying '{self.recipe}' to {self.hostname}") + ssh = SSH( + recipe=self.recipe, log=self.log, hostname=self.hostname, username=self.username, password=self.password, - port=self.port) + port=self.port, + ) if self._type == self.Type.DEFINED: - ssh.execute('rm -rf /opt/%s' % self.recipe, show_command=False) + ssh.execute(f"rm -rf /opt/{self.recipe}", show_command=False) - scp = SCP(ssh.get_transport()) - scp.put('%s/%s' % (self.cfg.recipes, self.recipe), remote_path='/opt/%s' % self.recipe, recursive=True) - scp.put(self.config, remote_path='/opt/%s/config' % self.recipe) + transport = ssh.get_transport() + assert transport is not None + scp = SCP(transport) + scp.put( + str(Path(self.cfg.recipes) / self.recipe), + remote_path=f"/opt/{self.recipe}", + recursive=True, + ) + scp.put(str(self.config), remote_path=f"/opt/{self.recipe}/config") try: if self._type == self.Type.VIRTUAL: - ssh.execute('%s %s' % (self.cfg.installer, self.recipe)) + ssh.execute(f"{self.cfg.installer} {self.recipe}") elif self._type == self.Type.DEFINED: - if not isfile('%s/%s/run' % (self.cfg.recipes, self.recipe)): + if not (Path(self.cfg.recipes) / self.recipe / "run").is_file(): # Recipes with no run file are acceptable since they (may) have a require file # and don't necessarily require the execution of anything of their own. - self.log.warn("%s doesn't have a run file. Continuing..." % self.recipe) + self.log.warning( + "Recipe '%s' has no run file; continuing", self.recipe + ) else: - ssh.execute('cd /opt/%s && chmod +x ./run && ./run' % self.recipe, show_command=False) + ssh.execute( + f"cd /opt/{self.recipe} && chmod +x ./run && ./run", + show_command=False, + ) passed = True - except Exception: + except Exception as exc: # pylint: disable=broad-except + self.log.error(str(exc)) passed = False finally: if self._type == self.Type.DEFINED: - self.log.info('Deleting /opt/%s from remote host' % self.recipe) - ssh.execute('rm -rf /opt/%s' % self.recipe, show_command=False) + self.log.info(f"Removing /opt/{self.recipe} from remote host") + ssh.execute(f"rm -rf /opt/{self.recipe}", show_command=False) if not passed: - self.log.fatal('Failed to deploy %s' % self.recipe) - self.log.success('Done with %s' % self.recipe) + self.log.error("Failed to deploy %s", self.recipe) + self.log.info("Done with %s", self.recipe) diff --git a/zdeploy/recipeset.py b/zdeploy/recipeset.py index 7c9f607..fa258f1 100644 --- a/zdeploy/recipeset.py +++ b/zdeploy/recipeset.py @@ -1,37 +1,57 @@ +"""Helper for managing sets of ``Recipe`` objects.""" + from hashlib import md5 +from typing import Iterable +import logging + +from zdeploy.config import Config from zdeploy.recipe import Recipe -class RecipeSet: - ''' - RecipeSet is a unique set of Recipes maintainer. - ''' - def __init__(self, cfg, log): - self.recipes = [] + +class RecipeSet(set[Recipe]): + """Container for ``Recipe`` objects with convenience helpers.""" + + def __init__(self, cfg: Config, log: logging.Logger) -> None: + """Create an empty ``RecipeSet`` using ``cfg`` and ``log``.""" + + super().__init__() self.cfg = cfg self.log = log - def add_recipes(self, recipes): + + def update(self, recipes: Iterable[Recipe]) -> None: # type: ignore[override] + """Add a sequence of ``recipes`` to the set.""" + for recipe in recipes: - self.add_recipe(recipe) - def add_recipe(self, recipe): - if recipe in self.recipes: - self.log.warn('%s is already added to the recipes list. Skipping...' % recipe.get_name()) + self.add(recipe) + + def add(self, recipe: Recipe) -> None: # type: ignore[override] + """Add a single ``recipe`` if not already present.""" + + if recipe in self: + self.log.warning("Recipe '%s' is already added; skipping", recipe.name) return - self.log.info("Adding '%s' to the recipes list" % recipe.get_name()) - self.recipes.append(recipe) - if recipe._type == Recipe.Type.VIRTUAL: - self.log.warn("'%s' doesn't correspond to anything defined under the %s directory" % (recipe.recipe, self.cfg.recipes)) - self.log.warn("this recipe will be marked virtual and execute as `%s %s`" % (recipe.cfg.installer, recipe.recipe)) - self.log.warn("If you want to use a different package manager, add an 'installer' field to the config.json file") - def get_hash(self): - ''' - Return an MD5 hash out of the hash of all recipes combined. - The end result is used to create a cache directory under deployments cache. - ''' - return md5(' '.join([str(recipe) for recipe in self.recipes]).encode()).hexdigest() - def __iter__(self): - ''' - Allow caller to iterate over recipes with a regular for loop, e.g.: - for recipe in recipes: - print(recipe) - ''' - return iter(self.recipes) + self.log.info("Registering recipe '%s'", recipe.name) + super().add(recipe) + if recipe.is_virtual(): + self.log.warning( + ( + f"Recipe '{recipe.recipe}' is not found under {self.cfg.recipes} " + "and will be treated as a system package" + ) + ) + self.log.warning( + ( + f"The package will be installed using `{recipe.cfg.installer} {recipe.recipe}`" + ) + ) + self.log.warning( + ( + "To use a different package manager, specify " + "an 'installer' entry in config.json" + ) + ) + + def get_hash(self) -> str: + """Return an MD5 hash of all recipes combined.""" + + return md5(" ".join(str(recipe) for recipe in self).encode()).hexdigest() diff --git a/zdeploy/shell.py b/zdeploy/shell.py index d70727f..405e397 100644 --- a/zdeploy/shell.py +++ b/zdeploy/shell.py @@ -1,11 +1,19 @@ +"""Simple shell helpers.""" + import subprocess +from typing import Tuple + + +def execute(cmd: str) -> Tuple[str, int]: + """Execute ``cmd`` in a shell and return output and return code.""" -def execute(cmd): - proc = subprocess.Popen('%s 2>&1' % cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - shell=True, - universal_newlines=True) - std_out, _ = proc.communicate() - rc = proc.returncode + with subprocess.Popen( + f"{cmd} 2>&1", + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=True, + universal_newlines=True, + ) as proc: + std_out, _ = proc.communicate() + rc = proc.returncode return std_out, rc diff --git a/zdeploy/utils.py b/zdeploy/utils.py index 390fb11..ab134b2 100644 --- a/zdeploy/utils.py +++ b/zdeploy/utils.py @@ -1,3 +1,26 @@ -def reformat_time(time): - h, m, s = [int(float(x)) for x in ('%s' % time).split(':')] - return '%sh, %sm, and %ds' % (h, m, s) +"""Utility helpers for zdeploy.""" + + +from datetime import timedelta +from typing import Union +import logging + + +def str2bool(value: str) -> bool: + """Return ``True`` if ``value`` is a truthy string.""" + + value = value.lower() + if value in {"yes", "y", "true", "t", "e", "enable"}: + return True + + logging.warning("'%s' is not recognized as a true value, defaulting to False", value) + return False + + +def reformat_time(time: Union[str, timedelta]) -> str: + """Return ``time`` in "Nh, Nm, and Ns" format.""" + + if isinstance(time, timedelta): + time = str(time) + h, m, s = [int(float(x)) for x in time.split(":")] + return f"{h}h, {m}m, and {s}s"