diff --git a/.flake8 b/.flake8
index d0ed9fc5..12bd9cf3 100644
--- a/.flake8
+++ b/.flake8
@@ -1,3 +1,3 @@
[flake8]
max-line-length = 100
-exclude = .git,__pycache__,docs/source/conf.py,old,build,dist,venv
+exclude = .git,__pycache__,docs/source/conf.py,old,build,dist,venv,helper-scripts
diff --git a/.gitignore b/.gitignore
index 96c24b74..707381fd 100644
--- a/.gitignore
+++ b/.gitignore
@@ -109,7 +109,11 @@ venv.bak/
.DS_Store
# info.py file generated automatically during package build
-cli/info.py
+node_cli/cli/info.py
meta.json
-resource_allocation.json
\ No newline at end of file
+
+disk_mountpoint.txt
+sgx_server_url.txt
+resource_allocation.json
+conf.json
\ No newline at end of file
diff --git a/README.md b/README.md
index 1d1b5d0c..0d9e31e3 100644
--- a/README.md
+++ b/README.md
@@ -11,18 +11,16 @@ SKALE Node CLI, part of the SKALE suite of validator tools, is the command line
1. [Installation](#installation)
2. [CLI usage](#cli-usage)
2.1 [Top level commands](#top-level-commands)
- 2.2 [User](#user-commands)
- 2.3 [Node](#node-commands)
- 2.4 [Wallet](#wallet-commands)
- 2.5 [sChains](#schain-commands)
- 2.6 [Containers](#containers-commands)
- 2.7 [SGX](#sgx-commands)
- 2.8 [SSL](#ssl-commands)
- 2.9 [Logs](#logs-commands)
- 2.10 [Resources allocation](#resources-allocation-commands)
- 2.11 [Validate](#validate-commands)
-
-3. [Development](#development)
+ 2.2 [Node](#node-commands)
+ 2.3 [Wallet](#wallet-commands)
+ 2.4 [sChains](#schain-commands)
+ 2.5 [Health](#health-commands)
+ 2.6 [SSL](#ssl-commands)
+ 2.7 [Logs](#logs-commands)
+ 2.8 [Resources allocation](#resources-allocation-commands)
+ 2.9 [Validate](#validate-commands)
+3. [Exit codes](#exit-codes)
+4. [Development](#development)
## Installation
@@ -108,11 +106,7 @@ Arguments:
- `ENV_FILE` - path to .env file (required parameters are listed in the `skale init` command)
-Required options:
-
-- `--dry-run` - create only needed files and directories and don't create containers
-
-You should also specify the following environment variables:
+You should specify the following environment variables:
- `SGX_SERVER_URL` - SGX server URL
- `DISK_MOUNTPOINT` - disk mount point for storing sChains data
@@ -123,10 +117,7 @@ You should also specify the following environment variables:
- `MANAGER_CONTRACTS_ABI_URL` - URL to SKALE Manager contracts ABI and addresses
- `IMA_CONTRACTS_ABI_URL` - URL to IMA contracts ABI and addresses
- `FILEBEAT_URL` - URL to the Filebeat log server
-- `DB_USER`' - MySQL user for local node database
-- `DB_PASSWORD` - Password for root user of node internal database
- (equal to user password by default)
-- `DB_PORT` - Port for node internal database (default is `3306`)
+
Optional variables:
@@ -158,12 +149,7 @@ skale node backup [BACKUP_FOLDER_PATH] [ENV_FILE]
Arguments:
- `BACKUP_FOLDER_PATH` - path to the folder where the backup file will be saved
-- `ENV_FILE` - path to .env file (required parameters are listed in the `skale init` command)
-`
-
-Optional arguments:
-- `--no-database` - skip mysql database backup (in case if mysql container is not started)
#### Node Registration
@@ -305,7 +291,7 @@ Optional arguments:
`--yes` - Send without additional confirmation
-### SKALE Chain commands
+### sChain commands
> Prefix: `skale schains`
@@ -351,58 +337,40 @@ Turn on repair mode for SKALE Chain
skale schains repair SCHAIN_NAME
```
-#### SKALE Chain healthcheck
-
-Show healthcheck results for all SKALE Chains on the node
-
-```shell
-skale schains checks
-```
-
-Options:
-
-- `--json` - Show data in JSON format
-
-### Container commands
+### Health commands
-Node container commands
+> Prefix: `skale health`
-> Prefix: `skale containers`
-
-#### List containers
+#### SKALE containers
List all SKALE containers running on the connected node
```shell
-skale containers ls
+skale health containers
```
Options:
- `-a/--all` - list all containers (by default - only running)
-#### SKALE Chain containers
+#### sChains healthchecks
-List of SKALE chain containers running on the connected node
+Show health check results for all SKALE Chains on the node
```shell
-skale containers schains
+skale health schains
```
Options:
-- `-a/--all` - list all SKALE chain containers (by default - only running)
-
-### SGX commands
-
-> Prefix: `skale sgx`
+- `--json` - Show data in JSON format
-#### Status
+#### SGX
Status of the SGX server. Returns the SGX server URL and connection status.
```shell
-$ skale sgx status
+$ skale health sgx
SGX server status:
┌────────────────┬────────────────────────────┐
@@ -412,8 +380,6 @@ SGX server status:
└────────────────┴────────────────────────────┘
```
-Admin API URL: \[GET] `/api/ssl/sgx`
-
### SSL commands
> Prefix: `skale ssl`
@@ -534,6 +500,22 @@ Options:
- `--json` - show validation result in json format
+## Exit codes
+
+Exit codes conventions for SKALE CLI tools
+
+- `0` - Everything is OK
+- `1` - General error exit code
+- `3` - Bad API response**
+- `4` - Script execution error**
+- `5` - Transaction error*
+- `6` - Revert error*
+- `7` - Bad user error**
+- `8` - Node state error**
+
+`*` - `validator-cli` only
+`**` - `node-cli` only
+
## Development
### Setup repo
diff --git a/cli/containers.py b/cli/containers.py
deleted file mode 100644
index 700888d6..00000000
--- a/cli/containers.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# This file is part of node-cli
-#
-# Copyright (C) 2019 SKALE Labs
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Affero General Public License for more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with this program. If not, see .
-
-import click
-from core.helper import get_request
-from core.print_formatters import (print_containers,
- print_err_response)
-
-
-@click.group()
-def containers_cli():
- pass
-
-
-@containers_cli.group('containers', help="Node containers commands")
-def containers():
- pass
-
-
-@containers.command(help="List of sChain containers running on connected node")
-@click.option('--all', '-a', is_flag=True)
-def schains(all):
- status, payload = get_request('schains_containers', {'all': all})
- if status == 'ok':
- print_containers(payload)
- else:
- print_err_response(payload)
-
-
-@containers.command(help="List of SKALE containers running on connected node")
-@click.option('--all', '-a', is_flag=True)
-def ls(all):
- status, payload = get_request('skale_containers', {'all': all})
- if status == 'ok':
- print_containers(payload.get('containers', []))
- else:
- print_err_response(payload)
diff --git a/cli/exit.py b/cli/exit.py
deleted file mode 100644
index 7f0ac612..00000000
--- a/cli/exit.py
+++ /dev/null
@@ -1,51 +0,0 @@
-import click
-import logging
-from core.helper import get_request, post_request, abort_if_false
-from core.print_formatters import print_err_response, print_exit_status
-from tools.texts import Texts
-
-logger = logging.getLogger(__name__)
-TEXTS = Texts()
-
-
-@click.group()
-def exit_cli():
- pass
-
-
-@exit_cli.group('exit', help="Exit commands")
-def node_exit():
- pass
-
-
-@node_exit.command('start', help="Start exiting process")
-@click.option('--yes', is_flag=True, callback=abort_if_false,
- expose_value=False,
- prompt='Are you sure you want to destroy your SKALE node?')
-def start():
- status, payload = post_request('start_exit')
- if status == 'ok':
- msg = TEXTS['exit']['start']
- logger.info(msg)
- print(msg)
- else:
- print_err_response(payload)
-
-
-@node_exit.command('status', help="Get exit process status")
-@click.option('--format', '-f', type=click.Choice(['json', 'text']))
-def status(format):
- status, payload = get_request('exit_status')
- if status == 'ok':
- exit_status = payload
- if format == 'json':
- print(exit_status)
- else:
- print_exit_status(exit_status)
- else:
- print_err_response(payload)
-
-
-@node_exit.command('finalize', help="Finalize exit process")
-def finalize():
- pass
diff --git a/cli/sgx.py b/cli/sgx.py
deleted file mode 100644
index 32c81af3..00000000
--- a/cli/sgx.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# This file is part of node-cli
-#
-# Copyright (C) 2019 SKALE Labs
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with this program. If not, see .
-
-import click
-from terminaltables import SingleTable
-
-from core.helper import get_request, safe_load_texts
-from core.print_formatters import print_err_response
-
-
-TEXTS = safe_load_texts()
-
-
-@click.group()
-def sgx_cli():
- pass
-
-
-@sgx_cli.group('sgx', help="SGX commands")
-def sgx():
- pass
-
-
-@sgx.command(help="Info about connected SGX server")
-def info():
- status, payload = get_request('sgx_info')
- if status == 'ok':
- data = payload
- table_data = [
- ['SGX info', ''],
- ['Server URL', data['sgx_server_url']],
- ['SGXWallet Version', data['sgx_wallet_version']],
- ['Node SGX keyname', data['sgx_keyname']],
- ['Status', data['status_name']]
- ]
- table = SingleTable(table_data)
- print(table.table)
- else:
- print_err_response(payload)
diff --git a/configs/routes.py b/configs/routes.py
deleted file mode 100644
index e32ac4d1..00000000
--- a/configs/routes.py
+++ /dev/null
@@ -1,43 +0,0 @@
-ROUTES = {
- 'login': '/login',
- 'logout': '/logout',
- 'register': '/join',
- 'node_info': '/node-info',
- 'node_about': '/about-node',
- 'create_node': '/create-node',
- 'node_signature': '/node-signature',
- 'test_host': '/test-host',
-
- 'wallet_info': '/load-wallet',
- 'validators_info': '/validators-info',
- 'send_eth': '/api/send-eth',
-
- 'schains_containers': '/containers/schains/list',
- 'schains_healthchecks': '/api/schains/healthchecks',
- 'node_schains': '/schains/list',
- 'schain_config': '/schain-config',
- 'skale_containers': '/containers/list',
-
- 'logs_dump': '/logs/dump',
-
- 'ssl_status': '/api/ssl/status',
- 'ssl_upload': '/api/ssl/upload',
-
- 'dkg_statuses': '/api/dkg/statuses',
-
- 'sgx_info': '/api/sgx/info',
-
- 'start_exit': '/api/exit/start',
- 'exit_status': '/api/exit/status',
- 'finalize_exit': '/api/exit/finalize',
- 'get_schain_firewall_rules': '/api/schains/firewall/show',
- 'turn_on_schain_firewall_rules': '/api/schains/firewall/on',
- 'turn_off_schain_firewall_rules': '/api/schains/firewall/off',
-
- 'maintenance_on': '/api/node/maintenance-on',
- 'maintenance_off': '/api/node/maintenance-off',
- 'repair_schain': '/api/schains/repair',
- 'describe_schain': '/api/schains/get',
-
- 'set_domain_name': '/api/node/set-domain-name'
-}
diff --git a/core/core.py b/core/core.py
deleted file mode 100644
index ecd50009..00000000
--- a/core/core.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# This file is part of node-cli
-#
-# Copyright (C) 2019 SKALE Labs
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Affero General Public License for more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with this program. If not, see .
-
-import inspect
-from enum import Enum
-from configs import LONG_LINE
-from core.helper import get_request, safe_load_texts
-from core.print_formatters import print_err_response
-
-
-class NodeStatuses(Enum):
- """This class contains possible node statuses"""
- ACTIVE = 0
- LEAVING = 1
- FROZEN = 2
- IN_MAINTENANCE = 3
- LEFT = 4
- NOT_CREATED = 5
-
-
-TEXTS = safe_load_texts()
-
-
-def get_node_info(config, format):
- status, payload = get_request('node_info')
- if status == 'ok':
- node_info = payload['node_info']
- if format == 'json':
- print(node_info)
- elif node_info['status'] == NodeStatuses.NOT_CREATED.value:
- print(TEXTS['service']['node_not_registered'])
- else:
- print_node_info(node_info)
- else:
- print_err_response(payload)
-
-
-def get_node_about(config, format):
- status, payload = get_request('node_about')
- if status == 'ok':
- print(payload)
- else:
- print_err_response(payload)
-
-
-def get_node_status(status):
- node_status = NodeStatuses(status).name
- return TEXTS['node']['status'][node_status]
-
-
-def print_node_info(node):
- print(inspect.cleandoc(f'''
- {LONG_LINE}
- Node info
- Name: {node['name']}
- ID: {node['id']}
- IP: {node['ip']}
- Public IP: {node['publicIP']}
- Port: {node['port']}
- Domain name: {node['domain_name']}
- Status: {get_node_status(int(node['status']))}
- {LONG_LINE}
- '''))
diff --git a/core/mysql_backup.py b/core/mysql_backup.py
deleted file mode 100644
index 77578865..00000000
--- a/core/mysql_backup.py
+++ /dev/null
@@ -1,57 +0,0 @@
-import logging
-import subprocess
-import shlex
-
-from configs import MYSQL_BACKUP_CONTAINER_PATH, MYSQL_BACKUP_PATH
-from tools.helper import run_cmd, extract_env_params
-
-
-logger = logging.getLogger(__name__)
-
-
-def run_mysql_cmd(cmd, env_filepath):
- mysql_creds_str = mysql_creds_for_cmd(env_filepath)
- cmd_str = f'docker exec -t skale_mysql bash -c "{cmd} {mysql_creds_str}"'
- cmd = shlex.split(cmd_str)
- return run_cmd(cmd, secure=True)
-
-
-def mysql_creds_for_cmd(env_filepath: str) -> str:
- """Returns string with user and password flags for MySQL CLI.
-
- :param env_filepath: Path to the environment params file
- :type address: str
- :returns: Formatted string
- :rtype: str
- """
- env_params = extract_env_params(env_filepath)
- return f'-u \'{env_params["DB_USER"]}\' -p\'{env_params["DB_PASSWORD"]}\''
-
-
-def create_mysql_backup(env_filepath: str) -> bool:
- try:
- print('Creating MySQL backup...')
- run_mysql_cmd(
- f'mysqldump --all-databases --single-transaction --no-tablespaces '
- f'--quick --lock-tables=false > {MYSQL_BACKUP_CONTAINER_PATH}',
- env_filepath
- )
- print(f'MySQL backup successfully created: {MYSQL_BACKUP_PATH}')
- return True
- except subprocess.CalledProcessError as e:
- logger.error(e)
- return False
-
-
-def restore_mysql_backup(env_filepath: str) -> bool:
- try:
- print('Restoring MySQL from backup...')
- run_mysql_cmd(
- f'mysql < {MYSQL_BACKUP_CONTAINER_PATH}',
- env_filepath
- )
- print(f'MySQL DB was successfully restored from backup: {MYSQL_BACKUP_PATH}')
- return True
- except subprocess.CalledProcessError:
- logger.exception('MySQL restore command failed')
- return False
diff --git a/core/node.py b/core/node.py
deleted file mode 100644
index 4bdaf824..00000000
--- a/core/node.py
+++ /dev/null
@@ -1,349 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# This file is part of node-cli
-#
-# Copyright (C) 2019 SKALE Labs
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Affero General Public License for more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with this program. If not, see .
-
-import datetime
-import logging
-import os
-import shlex
-import subprocess
-import time
-
-import docker
-
-from cli.info import VERSION
-from configs import (SKALE_DIR, INSTALL_SCRIPT, UNINSTALL_SCRIPT,
- BACKUP_INSTALL_SCRIPT,
- DATAFILES_FOLDER, INIT_ENV_FILEPATH,
- BACKUP_ARCHIVE_NAME, HOME_DIR, RESTORE_SLEEP_TIMEOUT,
- TURN_OFF_SCRIPT, TURN_ON_SCRIPT, TM_INIT_TIMEOUT)
-from configs.cli_logger import LOG_DIRNAME
-
-from core.operations import update_op
-from core.helper import get_request, post_request
-from core.mysql_backup import create_mysql_backup, restore_mysql_backup
-from core.host import (is_node_inited, prepare_host,
- save_env_params, get_flask_secret_key)
-from core.print_formatters import print_err_response, print_node_cmd_error
-from core.resources import update_resource_allocation
-from tools.meta import update_meta
-from tools.helper import run_cmd, extract_env_params
-from tools.texts import Texts
-
-logger = logging.getLogger(__name__)
-TEXTS = Texts()
-
-BASE_CONTAINERS_AMOUNT = 5
-
-
-def register_node(config, name, p2p_ip,
- public_ip, port, domain_name,
- gas_limit=None,
- gas_price=None,
- skip_dry_run=False):
-
- if not is_node_inited():
- print(TEXTS['node']['not_inited'])
- return
-
- # todo: add name, ips and port checks
- json_data = {
- 'name': name,
- 'ip': p2p_ip,
- 'publicIP': public_ip,
- 'port': port,
- 'domain_name': domain_name,
- 'gas_limit': gas_limit,
- 'gas_price': gas_price,
- 'skip_dry_run': skip_dry_run
- }
- status, payload = post_request('create_node',
- json=json_data)
- if status == 'ok':
- msg = TEXTS['node']['registered']
- logger.info(msg)
- print(msg)
- else:
- error_msg = payload
- logger.error(f'Registration error {error_msg}')
- print_err_response(error_msg)
-
-
-def init(env_filepath, dry_run=False):
- if is_node_inited():
- print(TEXTS['node']['already_inited'])
- return
- env_params = extract_env_params(env_filepath)
- if env_params is None:
- return
- prepare_host(
- env_filepath,
- env_params['DISK_MOUNTPOINT'],
- env_params['SGX_SERVER_URL'],
- env_params['ENV_TYPE']
- )
- update_meta(
- VERSION,
- env_params['CONTAINER_CONFIGS_STREAM'],
- env_params['DOCKER_LVMPY_STREAM']
- )
- dry_run = 'yes' if dry_run else ''
- env = {
- 'SKALE_DIR': SKALE_DIR,
- 'DATAFILES_FOLDER': DATAFILES_FOLDER,
- 'DRY_RUN': dry_run,
- **env_params
- }
- try:
- run_cmd(['bash', INSTALL_SCRIPT], env=env)
- except Exception:
- logger.exception('Install script process errored')
- print_node_cmd_error()
- return
- print('Waiting for transaction manager initialization ...')
- time.sleep(TM_INIT_TIMEOUT)
- if not is_base_containers_alive():
- print_node_cmd_error()
- return
- logger.info('Generating resource allocation file ...')
- update_resource_allocation(env_params['ENV_TYPE'])
- print('Init procedure finished')
-
-
-def restore(backup_path, env_filepath):
- env_params = extract_env_params(env_filepath)
- if env_params is None:
- return
- save_env_params(env_filepath)
- if not run_restore_script(backup_path, env_params):
- return
- time.sleep(RESTORE_SLEEP_TIMEOUT)
- if not restore_mysql_backup(env_filepath):
- print('WARNING: MySQL data restoring failed. '
- 'Check < skale logs cli > for more information')
- logger.info('Generating resource allocation file ...')
- update_resource_allocation(env_params['ENV_TYPE'])
- print('Node is restored from backup')
-
-
-def run_restore_script(backup_path, env_params) -> bool:
- env = {
- 'SKALE_DIR': SKALE_DIR,
- 'DATAFILES_FOLDER': DATAFILES_FOLDER,
- 'BACKUP_RUN': 'True',
- 'BACKUP_PATH': backup_path,
- 'HOME_DIR': HOME_DIR,
- **env_params
- }
- try:
- run_cmd(['bash', BACKUP_INSTALL_SCRIPT], env=env)
- except Exception:
- logger.exception('Restore script process errored')
- print_node_cmd_error()
- return False
- return True
-
-
-def purge():
- # todo: check that node is installed
- run_cmd(['sudo', 'bash', UNINSTALL_SCRIPT])
- print('Success')
-
-
-def get_inited_node_env(env_filepath, sync_schains):
- if env_filepath is not None:
- env_params = extract_env_params(env_filepath)
- if env_params is None:
- return
- save_env_params(env_filepath)
- else:
- env_params = extract_env_params(INIT_ENV_FILEPATH)
- flask_secret_key = get_flask_secret_key()
- env = {
- 'SKALE_DIR': SKALE_DIR,
- 'FLASK_SECRET_KEY': flask_secret_key,
- 'DATAFILES_FOLDER': DATAFILES_FOLDER,
- **env_params
- }
- if sync_schains:
- env['BACKUP_RUN'] = 'True'
- return env
-
-
-def update(env_filepath):
- if not is_node_inited():
- print(TEXTS['node']['not_inited'])
- return
- logger.info('Node update started')
- env = get_inited_node_env(env_filepath, sync_schains=False)
- # todo: tmp fix for update procedure
- clear_env = {k: v for k, v in env.items() if v != ''}
- update_op(env_filepath, clear_env)
- logger.info('Waiting for transaction manager initialization')
- time.sleep(TM_INIT_TIMEOUT)
- if not is_base_containers_alive():
- print_node_cmd_error()
- return
- logger.info('Node update finished')
-
-
-def get_node_signature(validator_id):
- params = {'validator_id': validator_id}
- status, payload = get_request('node_signature', params=params)
- if status == 'ok':
- return payload['signature']
- else:
- return payload
-
-
-def backup(path, env_filepath, mysql_backup=True):
- if mysql_backup:
- if not create_mysql_backup(env_filepath):
- print('Something went wrong while trying to create MySQL backup, '
- 'check out < skale logs cli > output')
- return
- backup_filepath = get_backup_filepath(path)
- create_backup_archive(backup_filepath)
-
-
-def get_backup_filename():
- time = datetime.datetime.utcnow().strftime("%Y-%m-%d-%H-%M-%S")
- return f'{BACKUP_ARCHIVE_NAME}-{time}.tar.gz'
-
-
-def get_backup_filepath(base_path):
- return os.path.join(base_path, get_backup_filename())
-
-
-def create_backup_archive(backup_filepath):
- print('Creating backup archive...')
- log_skale_path = os.path.join('.skale', LOG_DIRNAME)
- cmd = shlex.split(
- f'tar -zcvf {backup_filepath} -C {HOME_DIR} '
- f'--exclude {log_skale_path} .skale'
- )
- try:
- run_cmd(cmd)
- print(f'Backup archive successfully created: {backup_filepath}')
- except subprocess.CalledProcessError:
- logger.exception('Backup archive creation failed')
- print_node_cmd_error()
-
-
-def set_maintenance_mode_on():
- print('Setting maintenance mode on...')
- status, payload = post_request('maintenance_on')
- if status == 'ok':
- msg = TEXTS['node']['maintenance_on']
- logger.info(msg)
- print(msg)
- else:
- error_msg = payload
- logger.error(f'Set maintenance mode error {error_msg}')
- print_err_response(error_msg)
-
-
-def set_maintenance_mode_off():
- print('Setting maintenance mode off...')
- status, payload = post_request('maintenance_off')
- if status == 'ok':
- msg = TEXTS['node']['maintenance_off']
- logger.info(msg)
- print(msg)
- else:
- error_msg = payload
- logger.error(f'Remove from maintenance mode error {error_msg}')
- print_err_response(error_msg)
-
-
-def run_turn_off_script():
- print('Turing off the node...')
- cmd_env = {
- 'SKALE_DIR': SKALE_DIR,
- 'DATAFILES_FOLDER': DATAFILES_FOLDER
- }
- try:
- run_cmd(['bash', TURN_OFF_SCRIPT], env=cmd_env)
- except Exception:
- logger.exception('Turning off failed')
- print_node_cmd_error()
- return
- print('Node was successfully turned off')
-
-
-def run_turn_on_script(sync_schains, env_filepath):
- print('Turning on the node...')
- env = get_inited_node_env(env_filepath, sync_schains)
- try:
- run_cmd(['bash', TURN_ON_SCRIPT], env=env)
- except Exception:
- logger.exception('Turning on failed')
- print_node_cmd_error()
- return
- print('Waiting for transaction manager initialization ...')
- time.sleep(TM_INIT_TIMEOUT)
- print('Node was successfully turned on')
-
-
-def turn_off(maintenance_on):
- if not is_node_inited():
- print(TEXTS['node']['not_inited'])
- return
- if maintenance_on:
- set_maintenance_mode_on()
- run_turn_off_script()
-
-
-def turn_on(maintenance_off, sync_schains, env_file):
- if not is_node_inited():
- print(TEXTS['node']['not_inited'])
- return
- run_turn_on_script(sync_schains, env_file)
- # TODO: Handle error from turn on script
- if maintenance_off:
- set_maintenance_mode_off()
-
-
-def is_base_containers_alive():
- dclient = docker.from_env()
- containers = dclient.containers.list()
- skale_containers = list(filter(
- lambda c: c.name.startswith('skale_'), containers
- ))
- return len(skale_containers) >= BASE_CONTAINERS_AMOUNT
-
-
-def set_domain_name(domain_name):
- if not is_node_inited():
- print(TEXTS['node']['not_inited'])
- return
- print(f'Setting new domain name: {domain_name}')
- status, payload = post_request(
- url_name='set_domain_name',
- json={
- 'domain_name': domain_name
- }
- )
- if status == 'ok':
- msg = TEXTS['node']['domain_name_changed']
- logger.info(msg)
- print(msg)
- else:
- error_msg = payload
- logger.error(f'Domain name change error: {error_msg}')
- print_err_response(error_msg)
diff --git a/core/operations/base.py b/core/operations/base.py
deleted file mode 100644
index 8ca40d27..00000000
--- a/core/operations/base.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# This file is part of node-cli
-#
-# Copyright (C) 2021 SKALE Labs
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Affero General Public License for more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with this program. If not, see .
-
-from cli.info import VERSION
-from core.host import prepare_host
-from core.operations.common import (
- remove_dynamic_containers, backup_old_contracts,
- download_contracts, docker_lvmpy_update,
- update_skale_node, download_filestorage_artifacts
-)
-from tools.docker_utils import compose_rm, compose_up
-from tools.meta import update_meta
-
-
-def update(env_filepath: str, env: str) -> None:
- compose_rm(env)
- remove_dynamic_containers()
-
- backup_old_contracts()
- download_contracts(env)
- docker_lvmpy_update(env)
- update_skale_node(env)
-
- prepare_host(
- env_filepath,
- env['DISK_MOUNTPOINT'],
- env['SGX_SERVER_URL'],
- env['ENV_TYPE'],
- allocation=True
- )
- update_meta(
- VERSION,
- env['CONTAINER_CONFIGS_STREAM'],
- env['DOCKER_LVMPY_STREAM']
- )
- download_filestorage_artifacts()
- compose_up(env)
-
-
-def init(env):
- pass
-
-
-def backup_init(env):
- pass
-
-
-def turn_off(env):
- pass
-
-
-def turn_on(env):
- pass
diff --git a/core/operations/common.py b/core/operations/common.py
deleted file mode 100644
index 4fba5b59..00000000
--- a/core/operations/common.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# This file is part of node-cli
-#
-# Copyright (C) 2021 SKALE Labs
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Affero General Public License for more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with this program. If not, see .
-
-import os
-import logging
-
-import urllib.request
-from distutils.dir_util import copy_tree
-
-from core.operations.git_helper import sync_repo, rm_local_repo
-from tools.docker_utils import (rm_all_schain_containers, rm_all_ima_containers, compose_pull,
- compose_build)
-from configs import (CONTRACTS_PATH, BACKUP_CONTRACTS_PATH,
- MANAGER_CONTRACTS_FILEPATH, IMA_CONTRACTS_FILEPATH, DOCKER_LVMPY_PATH,
- CONTAINER_CONFIG_PATH, FILESTORAGE_INFO_FILE, FILESTORAGE_ARTIFACTS_FILE,
- DOCKER_LVMPY_REPO_URL, SKALE_NODE_REPO_URL)
-from tools.helper import run_cmd, read_json
-
-logger = logging.getLogger(__name__)
-
-
-def remove_dynamic_containers():
- logger.info(f'Removing sChains containers')
- rm_all_schain_containers()
- logger.info(f'Removing IMA containers')
- rm_all_ima_containers()
-
-
-def backup_old_contracts():
- logging.info('Copying old contracts ABIs')
- copy_tree(CONTRACTS_PATH, BACKUP_CONTRACTS_PATH)
-
-
-def download_contracts(env):
- urllib.request.urlretrieve(env['MANAGER_CONTRACTS_ABI_URL'], MANAGER_CONTRACTS_FILEPATH)
- urllib.request.urlretrieve(env['IMA_CONTRACTS_ABI_URL'], IMA_CONTRACTS_FILEPATH)
-
-
-def docker_lvmpy_update(env):
- rm_local_repo(DOCKER_LVMPY_PATH)
- sync_repo(DOCKER_LVMPY_REPO_URL, DOCKER_LVMPY_PATH, env["DOCKER_LVMPY_STREAM"])
-
- logging.info('Running docker-lvmpy update script')
- env['PHYSICAL_VOLUME'] = env['DISK_MOUNTPOINT']
- env['VOLUME_GROUP'] = 'schains'
- env['PATH'] = os.environ.get('PATH', None)
- run_cmd(
- cmd=f'sudo -H -E {DOCKER_LVMPY_PATH}/scripts/update.sh'.split(),
- env=env
- )
- logging.info('docker-lvmpy update done')
-
-
-def download_filestorage_artifacts():
- logger.info(f'Updating filestorage artifacts')
- fs_artifacts_url = read_json(FILESTORAGE_INFO_FILE)['artifacts_url']
- logger.debug(f'Downloading {fs_artifacts_url} to {FILESTORAGE_ARTIFACTS_FILE}')
- urllib.request.urlretrieve(fs_artifacts_url, FILESTORAGE_ARTIFACTS_FILE)
-
-
-def update_skale_node(env):
- if 'CONTAINER_CONFIGS_DIR' in env:
- update_skale_node_dev(env)
- else:
- update_skale_node_git(env)
-
-
-def update_skale_node_git(env):
- rm_local_repo(CONTAINER_CONFIG_PATH)
- sync_repo(SKALE_NODE_REPO_URL, CONTAINER_CONFIG_PATH, env["CONTAINER_CONFIGS_STREAM"])
- compose_pull()
-
-
-def update_skale_node_dev(env):
- sync_skale_node_dev(env)
- compose_build()
-
-
-def sync_skale_node_dev(env):
- logger.info(f'Syncing {CONTAINER_CONFIG_PATH} with {env["CONTAINER_CONFIGS_DIR"]}')
- run_cmd(
- cmd=f'rsync -r {env["CONTAINER_CONFIGS_DIR"]}/ {CONTAINER_CONFIG_PATH}'.split()
- )
- run_cmd(
- cmd=f'rsync -r {env["CONTAINER_CONFIGS_DIR"]}/.git {CONTAINER_CONFIG_PATH}'.split()
- )
diff --git a/core/schains.py b/core/schains.py
deleted file mode 100644
index d119e40d..00000000
--- a/core/schains.py
+++ /dev/null
@@ -1,84 +0,0 @@
-import json
-import logging
-import pprint
-
-from core.helper import get_request, post_request
-from core.print_formatters import (
- print_dkg_statuses,
- print_err_response,
- print_firewall_rules,
- print_schain_info,
- print_schains,
- print_schains_healthchecks
-)
-
-
-logger = logging.getLogger(__name__)
-
-
-def get_schain_firewall_rules(schain: str) -> None:
- status, payload = get_request('get_schain_firewall_rules',
- {'schain': schain})
- if status == 'ok':
- print_firewall_rules(payload['endpoints'])
- else:
- print_err_response(payload)
-
-
-def show_schains() -> None:
- status, payload = get_request('node_schains')
- if status == 'ok':
- schains = payload
- if not schains:
- print('No sChains found')
- return
- print_schains(schains)
- else:
- print_err_response(payload)
-
-
-def show_dkg_info(all_: bool = False) -> None:
- params = {'all': all_}
- status, payload = get_request('dkg_statuses', params=params)
- if status == 'ok':
- print_dkg_statuses(payload)
- else:
- print_err_response(payload)
-
-
-def show_config(name: str) -> None:
- status, payload = get_request('schain_config', {'schain-name': name})
- if status == 'ok':
- pprint.pprint(payload)
- else:
- print_err_response(payload)
-
-
-def show_checks(json_format: bool = False) -> None:
- status, payload = get_request('schains_healthchecks')
- if status == 'ok':
- if not payload:
- print('No sChains found')
- return
- if json_format:
- print(json.dumps(payload))
- else:
- print_schains_healthchecks(payload)
- else:
- print_err_response(payload)
-
-
-def toggle_schain_repair_mode(schain: str) -> None:
- status, payload = post_request('repair_schain', {'schain': schain})
- if status == 'ok':
- print('Schain has been set for repair')
- else:
- print_err_response(payload)
-
-
-def describe(schain: str, raw=False) -> None:
- status, payload = get_request('describe_schain', {'schain': schain})
- if status == 'ok':
- print_schain_info(payload, raw=raw)
- else:
- print_err_response(payload)
diff --git a/datafiles/backup-install.sh b/datafiles/backup-install.sh
deleted file mode 100644
index d8ae74db..00000000
--- a/datafiles/backup-install.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env bash
-set -e
-
-CONFIG_DIR="$SKALE_DIR"/config
-CONTRACTS_DIR="$SKALE_DIR"/contracts_info
-NODE_DATA_DIR=$SKALE_DIR/node_data
-
-
-source "$DATAFILES_FOLDER"/helper.sh
-
-tar -zxvf $BACKUP_PATH -C $HOME_DIR
-
-echo "Creating .env symlink to $CONFIG_DIR/.env ..."
-if [[ -f $CONFIG_DIR/.env ]]; then
- rm "$CONFIG_DIR/.env"
-fi
-
-ln -sf "$SKALE_DIR/.env" "$CONFIG_DIR/.env"
-
-cd $SKALE_DIR
-
-iptables_configure
-docker_lvmpy_install
-
-cd $CONFIG_DIR
-if [[ ! -z $CONTAINER_CONFIGS_DIR ]]; then
- echo "Building containers ..."
- SKALE_DIR=$SKALE_DIR docker-compose -f docker-compose.yml build
-fi
-up_compose
diff --git a/datafiles/dependencies.sh b/datafiles/dependencies.sh
deleted file mode 100644
index 7a56e49e..00000000
--- a/datafiles/dependencies.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env bash
-
-sudo apt-get update
-# sudo apt-get install \
-# apt-transport-https \
-# ca-certificates \
-# curl \
-# gnupg-agent \
-# software-properties-common
-#
-# curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
-# sudo apt-get update
-# sudo apt-get install docker-ce docker-ce-cli containerd.io
-
-# For skipping installation dialog
-echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
-echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
-sudo apt install iptables-persistent -y
-
-
-while ! (ps -ef | grep "[d]ocker" | awk {'print $2'});
-do
- echo "Waiting for docker daemon file..."
- sleep 5
-done
diff --git a/datafiles/helper.sh b/datafiles/helper.sh
deleted file mode 100644
index 4250b753..00000000
--- a/datafiles/helper.sh
+++ /dev/null
@@ -1,163 +0,0 @@
-#!/usr/bin/env bash
-
-export FLASK_SECRET_KEY_FILE=$NODE_DATA_DIR/flask_db_key.txt
-export DISK_MOUNTPOINT_FILE=$NODE_DATA_DIR/disk_mountpoint.txt
-export SGX_CERTIFICATES_DIR_NAME=sgx_certs
-
-export FILESTORAGE_INFO_FILE=$SKALE_DIR/config/filestorage_info.json
-export FILESTORAGE_ARTIFACTS_FILE=$NODE_DATA_DIR/filestorage_artifacts.json
-
-export CONTRACTS_DIR="$SKALE_DIR"/contracts_info
-export BACKUP_CONTRACTS_DIR="$SKALE_DIR"/.old_contracts_info
-
-export BASE_SERVICES="transaction-manager skale-admin skale-api mysql bounty nginx watchdog filebeat"
-export NOTIFICATION_SERVICES="celery redis"
-
-remove_dynamic_containers () {
- echo 'Removing schains containers ...'
- SCHAIN_CONTAINERS="$(docker ps -a --format '{{.Names}}' | grep '^skale_schain_' | awk '{print $1}' | xargs)"
- for CONTAINER in $SCHAIN_CONTAINERS; do
- echo 'Stopping' $CONTAINER
- docker stop $CONTAINER -t 40
- echo 'Removing' $CONTAINER
- docker rm $CONTAINER
- done
-
- echo 'Removing ima containers...'
- IMA_CONTAINERS="$(docker ps -a --format '{{.Names}}' | grep '^skale_ima_' | awk '{print $1}' | xargs)"
- for CONTAINER in $IMA_CONTAINERS; do
- echo 'Stopping' $CONTAINER
- docker stop $CONTAINER -t 40
- echo 'Removing' $CONTAINER
- docker rm $CONTAINER
- done
-}
-
-remove_compose_containers () {
- echo 'Removing node containers ...'
- COMPOSE_PATH=$SKALE_DIR/config/docker-compose.yml
- echo 'Removing api, bounty, admin containers ...'
- DB_PORT=0 docker-compose -f $COMPOSE_PATH rm -s -f skale-api bounty skale-admin
- echo 'Api, bounty, admin containers were removed. Sleeping ...'
- sleep 7
- echo 'Removing transaction-manager, mysql and rest ...'
- DB_PORT=0 docker-compose -f $COMPOSE_PATH rm -s -f
- echo 'Done'
-}
-
-download_contracts () {
- echo "Downloading contracts ABI ..."
- curl -L $MANAGER_CONTRACTS_ABI_URL > $CONTRACTS_DIR/manager.json
- curl -L $IMA_CONTRACTS_ABI_URL > $CONTRACTS_DIR/ima.json
-}
-
-download_filestorage_artifacts () {
- echo "Downloading filestorage artifacts ..."
- FS_ARTIFACTS_URL=$(cat $FILESTORAGE_INFO_FILE | sed -n 's/^[[:space:]]*"artifacts_url"[[:space:]]*:[[:space:]]*//p')
- FS_ARTIFACTS_URL=$(echo "$FS_ARTIFACTS_URL" | sed -r 's/["]+//g')
- curl -L $FS_ARTIFACTS_URL > $FILESTORAGE_ARTIFACTS_FILE
-}
-
-backup_old_contracts () {
- echo "Copying old contracts ABI ..."
- cp -R $CONTRACTS_DIR $BACKUP_CONTRACTS_DIR
-}
-
-
-update_docker_lvmpy_sources () {
- echo 'Updating docker-lvmpy sources ...'
- cd docker-lvmpy
- echo "Fetching changes ..."
- git fetch
- echo "Checkouting to $DOCKER_LVMPY_STREAM ..."
- git checkout $DOCKER_LVMPY_STREAM
- is_branch="$(git show-ref --verify refs/heads/$DOCKER_LVMPY_STREAM > /dev/null 2>&1; echo $?)"
- if [[ $is_branch -eq 0 ]] ; then
- echo "Pulling recent changes from $DOCKER_LVMPY_STREAM ..."
- git pull
- fi
-}
-
-docker_lvmpy_install () {
- echo 'Installing docker-lvmpy ...'
- if [[ ! -d docker-lvmpy ]]; then
- git clone "https://github.com/skalenetwork/docker-lvmpy.git"
- fi
- update_docker_lvmpy_sources
- echo "Running install.sh script ..."
- sudo -H PHYSICAL_VOLUME=$DISK_MOUNTPOINT VOLUME_GROUP=schains PATH=$PATH scripts/install.sh
- cd -
-}
-
-docker_lvmpy_update () {
- update_docker_lvmpy_sources
- echo "Running update.sh script ..."
- sudo -H PHYSICAL_VOLUME=$DISK_MOUNTPOINT VOLUME_GROUP=schains PATH=$PATH scripts/update.sh
- cd -
-}
-
-iptables_configure() {
- echo "Configuring iptables ..."
- mkdir -p /etc/iptables/
- # Base policies (drop all incoming, allow all outcoming, drop all forwarding)
- sudo iptables -P INPUT ACCEPT
- sudo iptables -P OUTPUT ACCEPT
- sudo iptables -P FORWARD DROP
- # Allow conntrack established connections
- sudo iptables -A INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT
- # Allow local loopback services
- sudo iptables -A INPUT -i lo -j ACCEPT
- # Allow ssh
- sudo iptables -A INPUT -p tcp --dport 22 -j ACCEPT
- # Allow http
- sudo iptables -A INPUT -p tcp --dport 8080 -j ACCEPT
- # Allow https
- sudo iptables -A INPUT -p tcp --dport 443 -j ACCEPT
- # Allow dns
- sudo iptables -A INPUT -p tcp --dport 53 -j ACCEPT
- sudo iptables -A INPUT -p udp --dport 53 -j ACCEPT
- # Allow watchdog
- sudo iptables -A INPUT -p tcp --dport 3009 -j ACCEPT
- # Allow monitor node exporter
- sudo iptables -A INPUT -p tcp --dport 9100 -j ACCEPT
- # Drop all the rest
- sudo iptables -A INPUT -p tcp -j DROP
- sudo iptables -A INPUT -p udp -j DROP
- # Allow pings
- sudo iptables -I INPUT -p icmp --icmp-type destination-unreachable -j ACCEPT
- sudo iptables -I INPUT -p icmp --icmp-type source-quench -j ACCEPT
- sudo iptables -I INPUT -p icmp --icmp-type time-exceeded -j ACCEPT
- sudo bash -c 'iptables-save > /etc/iptables/rules.v4'
-}
-
-configure_flask () {
- echo "Configuring flask secret key ..."
- if [ -e $FLASK_SECRET_KEY_FILE ]; then
- echo "File $FLASK_SECRET_KEY_FILE already exists!"
- else
- FLASK_SECRET_KEY=$(openssl rand -base64 32)
- echo $FLASK_SECRET_KEY >> $FLASK_SECRET_KEY_FILE
- fi
- export FLASK_SECRET_KEY=$FLASK_SECRET_KEY
-}
-
-configure_filebeat () {
- echo "Configuring filebeat ..."
- sudo cp $CONFIG_DIR/filebeat.yml $NODE_DATA_DIR/
- sudo chown root $NODE_DATA_DIR/filebeat.yml
- sudo chmod go-w $NODE_DATA_DIR/filebeat.yml
-}
-
-up_compose() {
- if [[ "$MONITORING_CONTAINERS" == "True" ]]; then
- echo "Running SKALE Node with monitoring containers..."
- SKALE_DIR="$SKALE_DIR" docker-compose -f docker-compose.yml up -d
- else
- echo "Running SKALE Node with base set of containers..."
- SKALE_DIR="$SKALE_DIR" docker-compose -f docker-compose.yml up -d $BASE_SERVICES
- fi
- if [[ ! -z "$TG_API_KEY" && ! -z "$TG_CHAT_ID" ]]; then
- SKALE_DIR="$SKALE_DIR" docker-compose -f docker-compose.yml up -d $NOTIFICATION_SERVICES
- echo "Running containers for telegram notifications..."
- fi
-}
diff --git a/datafiles/install.sh b/datafiles/install.sh
deleted file mode 100644
index ee239d40..00000000
--- a/datafiles/install.sh
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env bash
-set -e
-
-CONFIG_DIR="$SKALE_DIR"/config
-CONTRACTS_DIR="$SKALE_DIR"/contracts_info
-NODE_DATA_DIR=$SKALE_DIR/node_data
-
-source "$DATAFILES_FOLDER"/helper.sh
-
-if [[ -z $CONTAINER_CONFIGS_DIR ]]; then
- cd $CONFIG_DIR
- if [[ ! -d .git ]]; then
- echo "Cloning container configs ..."
- git clone "https://github.com/skalenetwork/skale-node.git" "$CONFIG_DIR"
- fi
- echo "Fetching new branches and tags..."
- git fetch
- echo "Checkouting to container configs branch $CONTAINER_CONFIGS_STREAM ..."
- git checkout $CONTAINER_CONFIGS_STREAM
- is_branch="$(git show-ref --verify refs/heads/$CONTAINER_CONFIGS_STREAM >/dev/null 2>&1; echo $?)"
- if [[ $is_branch -eq 0 ]] ; then
- echo "Pulling recent changes from $CONTAINER_CONFIGS_STREAM ..."
- git pull
- fi
-else
- echo "Syncing container configs ..."
- rsync -r $CONTAINER_CONFIGS_DIR/* $CONFIG_DIR
- rsync -r $CONTAINER_CONFIGS_DIR/.git $CONFIG_DIR
-fi
-
-echo "Creating .env symlink to $CONFIG_DIR/.env ..."
-if [[ -f $CONFIG_DIR/.env ]]; then
- rm "$CONFIG_DIR/.env"
-fi
-ln -s $SKALE_DIR/.env $CONFIG_DIR/.env
-
-cd $SKALE_DIR
-
-download_contracts
-download_filestorage_artifacts
-configure_filebeat
-configure_flask
-iptables_configure
-
-if [[ -z $DRY_RUN ]]; then
- docker_lvmpy_install
- cd $CONFIG_DIR
- if [[ ! -z $CONTAINER_CONFIGS_DIR ]]; then
- echo "Building containers ..."
- SKALE_DIR=$SKALE_DIR docker-compose -f docker-compose.yml build
- fi
- up_compose
-fi
diff --git a/datafiles/turn-off.sh b/datafiles/turn-off.sh
deleted file mode 100644
index 5fd52584..00000000
--- a/datafiles/turn-off.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/usr/bin/env bash
-set -e
-
-source "$DATAFILES_FOLDER"/helper.sh
-
-remove_compose_containers
-remove_dynamic_containers
diff --git a/datafiles/turn-on.sh b/datafiles/turn-on.sh
deleted file mode 100644
index d520e65e..00000000
--- a/datafiles/turn-on.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env bash
-set -e
-
-CONFIG_DIR="$SKALE_DIR"/config
-CONTRACTS_DIR="$SKALE_DIR"/contracts_info
-NODE_DATA_DIR=$SKALE_DIR/node_data
-
-source "$DATAFILES_FOLDER"/helper.sh
-
-cd $CONFIG_DIR
-up_compose
diff --git a/datafiles/uninstall.sh b/datafiles/uninstall.sh
deleted file mode 100644
index cac47792..00000000
--- a/datafiles/uninstall.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env bash
-
-export CURRENT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-source $CURRENT_DIR/helper.sh
-
-remove_compose_containers
-remove_dynamic_containers
-
-# todo: format disk setted in $NODE_DATA_DIR/disk_mountpoint.txt
-
-rm -rf /tmp/.skale
-cp -r $SKALE_DIR /tmp/.skale
-rm -rf $SKALE_DIR
diff --git a/datafiles/update.sh b/datafiles/update.sh
deleted file mode 100644
index 460261b7..00000000
--- a/datafiles/update.sh
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/usr/bin/env bash
-set -e
-
-CONFIG_DIR="$SKALE_DIR"/config
-CONTRACTS_DIR="$SKALE_DIR"/contracts_info
-NODE_DATA_DIR=$SKALE_DIR/node_data
-
-source "$DATAFILES_FOLDER"/helper.sh
-
-cd $SKALE_DIR
-
-remove_compose_containers
-remove_dynamic_containers
-
-backup_old_contracts
-download_contracts
-docker_lvmpy_update
-
-cd $CONFIG_DIR
-if [[ -z $CONTAINER_CONFIGS_DIR ]]; then
- echo "Fetching new branches and tags..."
- git fetch
- echo "Checkouting to container configs branch $CONTAINER_CONFIGS_STREAM ..."
- git checkout $CONTAINER_CONFIGS_STREAM
- is_branch="$(git show-ref --verify refs/heads/$CONTAINER_CONFIGS_STREAM >/dev/null 2>&1; echo $?)"
- if [[ $is_branch -eq 0 ]] ; then
- echo "Pulling changes ..."
- git pull
- fi
- echo "Pulling new version of images ..."
- SKALE_DIR=$SKALE_DIR docker-compose -f docker-compose.yml pull
-else
- echo "Syncing configs with CONTAINER_CONFIGS_DIR"
- rsync -r "$CONTAINER_CONFIGS_DIR/" "$CONFIG_DIR"
- rsync -r "$CONTAINER_CONFIGS_DIR/.git" "$CONFIG_DIR"
- echo "Building containers ..."
- SKALE_DIR=$SKALE_DIR docker-compose -f docker-compose.yml build
-fi
-
-download_filestorage_artifacts
-
-up_compose
diff --git a/helper-scripts b/helper-scripts
index ced34747..dc21eb10 160000
--- a/helper-scripts
+++ b/helper-scripts
@@ -1 +1 @@
-Subproject commit ced34747acb3335f1ea858b4e847f3c21b4ccf7e
+Subproject commit dc21eb1005ac0d4b45d29d89a8a7783eba6ecf20
diff --git a/main.spec b/main.spec
index 6db9dbc4..3c776386 100644
--- a/main.spec
+++ b/main.spec
@@ -4,20 +4,19 @@
# if distutils.distutils_path.endswith('__init__.py'):
# distutils.distutils_path = os.path.dirname(distutils.distutils_path)
+import importlib.util
+
+libxtwrapper_path = importlib.util.find_spec('libxtwrapper').origin
+
+
block_cipher = None
a = Analysis(
- ['main.py'],
+ ['node_cli/main.py'],
pathex=['.'],
- binaries=[],
+ binaries=[(libxtwrapper_path, '.')],
datas=[
("./text.yml", "data"),
- ("./datafiles/install.sh", "data/datafiles"),
- ("./datafiles/backup-install.sh", "data/datafiles"),
- ("./datafiles/update.sh", "data/datafiles"),
- ("./datafiles/helper.sh", "data/datafiles"),
- ("./datafiles/turn-off.sh", "data/datafiles"),
- ("./datafiles/turn-on.sh", "data/datafiles"),
("./datafiles/skaled-ssl-test", "data/datafiles")
],
hiddenimports=[],
diff --git a/__init__.py b/node_cli/__init__.py
similarity index 100%
rename from __init__.py
rename to node_cli/__init__.py
diff --git a/cli/__init__.py b/node_cli/cli/__init__.py
similarity index 69%
rename from cli/__init__.py
rename to node_cli/cli/__init__.py
index d60dd6e2..4534a389 100644
--- a/cli/__init__.py
+++ b/node_cli/cli/__init__.py
@@ -1,4 +1,4 @@
-__version__ = '1.1.1'
+__version__ = '2.0.1'
if __name__ == "__main__":
print(__version__)
diff --git a/node_cli/cli/exit.py b/node_cli/cli/exit.py
new file mode 100644
index 00000000..1ef0223a
--- /dev/null
+++ b/node_cli/cli/exit.py
@@ -0,0 +1,80 @@
+# -*- coding: utf-8 -*-
+#
+# This file is part of node-cli
+#
+# Copyright (C) 2020 SKALE Labs
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+import logging
+
+import click
+
+from node_cli.utils.print_formatters import print_exit_status
+from node_cli.utils.helper import error_exit, get_request, post_request, abort_if_false
+from node_cli.utils.exit_codes import CLIExitCodes
+from node_cli.utils.texts import Texts
+
+logger = logging.getLogger(__name__)
+TEXTS = Texts()
+BLUEPRINT_NAME = 'node'
+
+
+@click.group()
+def exit_cli():
+ pass
+
+
+@exit_cli.group('exit', help="Exit commands")
+def node_exit():
+ pass
+
+
+@node_exit.command('start', help="Start exiting process")
+@click.option('--yes', is_flag=True, callback=abort_if_false,
+ expose_value=False,
+ prompt='Are you sure you want to destroy your SKALE node?')
+def start():
+ status, payload = post_request(
+ blueprint=BLUEPRINT_NAME,
+ method='exit/start'
+ )
+ if status == 'ok':
+ msg = TEXTS['exit']['start']
+ logger.info(msg)
+ print(msg)
+ else:
+ error_exit(payload, exit_code=CLIExitCodes.BAD_API_RESPONSE)
+
+
+@node_exit.command('status', help="Get exit process status")
+@click.option('--format', '-f', type=click.Choice(['json', 'text']))
+def status(format):
+ status, payload = get_request(
+ blueprint=BLUEPRINT_NAME,
+ method='exit/status'
+ )
+ if status == 'ok':
+ exit_status = payload
+ if format == 'json':
+ print(exit_status)
+ else:
+ print_exit_status(exit_status)
+ else:
+ error_exit(payload, exit_code=CLIExitCodes.BAD_API_RESPONSE)
+
+
+@node_exit.command('finalize', help="Finalize exit process")
+def finalize():
+ pass
diff --git a/node_cli/cli/health.py b/node_cli/cli/health.py
new file mode 100644
index 00000000..808082c8
--- /dev/null
+++ b/node_cli/cli/health.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+#
+# This file is part of node-cli
+#
+# Copyright (C) 2020 SKALE Labs
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+import click
+from node_cli.utils.texts import Texts
+
+from node_cli.core.health import get_containers, get_schains_checks, get_sgx_info
+
+
+G_TEXTS = Texts()
+TEXTS = G_TEXTS['health']
+
+
+@click.group()
+def health_cli():
+ pass
+
+
+@health_cli.group('health', help=TEXTS['help'])
+def health():
+ pass
+
+
+@health.command(help=TEXTS['containers']['help'])
+@click.option('--all', '-a', is_flag=True)
+def containers(all):
+ get_containers(_all=all)
+
+
+@health.command(help=TEXTS['schains_checks']['help'])
+@click.option(
+ '--json',
+ 'json_format',
+ help=G_TEXTS['common']['json']['help'],
+ is_flag=True
+)
+def schains(json_format: bool) -> None:
+ get_schains_checks(json_format)
+
+
+@health.command(help=TEXTS['sgx']['help'])
+def sgx():
+ get_sgx_info()
diff --git a/cli/logs.py b/node_cli/cli/logs.py
similarity index 80%
rename from cli/logs.py
rename to node_cli/cli/logs.py
index 3d345e89..7472bcbf 100644
--- a/cli/logs.py
+++ b/node_cli/cli/logs.py
@@ -17,9 +17,12 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
+import sys
+
import click
-from core.helper import download_dump
-from configs.cli_logger import LOG_FILEPATH, DEBUG_LOG_FILEPATH
+from node_cli.core.logs import create_logs_dump
+from node_cli.configs.cli_logger import LOG_FILEPATH, DEBUG_LOG_FILEPATH
+from node_cli.utils.exit_codes import CLIExitCodes
@click.group()
@@ -49,6 +52,8 @@ def cli(debug):
)
@click.argument('path')
def dump(container, path):
- res = download_dump(path, container)
+ res = create_logs_dump(path, container)
if res:
- print(f'File {res} downloaded')
+ print(f'Logs dump created: {res}')
+ else:
+ sys.exit(CLIExitCodes.OPERATION_EXECUTION_ERROR)
diff --git a/cli/node.py b/node_cli/cli/node.py
similarity index 81%
rename from cli/node.py
rename to node_cli/cli/node.py
index 071038e0..d4708a56 100644
--- a/cli/node.py
+++ b/node_cli/cli/node.py
@@ -22,16 +22,20 @@
import click
-from core.core import get_node_info, get_node_about
-from core.node import (get_node_signature, init, restore,
- register_node as register, update, backup, set_maintenance_mode_on,
- set_maintenance_mode_off, turn_off, turn_on, set_domain_name)
-from core.helper import abort_if_false, safe_load_texts
-from configs import DEFAULT_NODE_BASE_PORT
-from tools.helper import session_config
+from node_cli.core.node import (
+ configure_firewall_rules,
+ get_node_signature, init, restore,
+ register_node as register,
+ update, backup,
+ set_maintenance_mode_on, set_maintenance_mode_off,
+ turn_off, turn_on, get_node_info,
+ set_domain_name, run_checks
+)
+from node_cli.configs import DEFAULT_NODE_BASE_PORT
+from node_cli.configs.env import ALLOWED_ENV_TYPES
+from node_cli.utils.helper import abort_if_false, safe_load_texts, streamed_cmd
-config = session_config()
TEXTS = safe_load_texts()
@@ -78,15 +82,7 @@ def node():
@node.command('info', help="Get info about SKALE node")
@click.option('--format', '-f', type=click.Choice(['json', 'text']))
def node_info(format):
- config = session_config()
- get_node_info(config, format)
-
-
-@node.command('about', help="Get service info about SKALE node")
-@click.option('--format', '-f', type=click.Choice(['json', 'text']))
-def node_about(format):
- config = session_config()
- get_node_about(config, format)
+ get_node_info(format)
@node.command('register', help="Register current node in the SKALE Manager")
@@ -132,20 +128,16 @@ def node_about(format):
default=False,
help='Skip dry run for registration transaction'
)
+@streamed_cmd
def register_node(name, ip, port, domain, gas_limit, gas_price, skip_dry_run):
- config = session_config()
- register(config, name, ip, ip, port, domain, gas_limit, gas_price, skip_dry_run)
+ register(name, ip, ip, port, domain, gas_limit, gas_price, skip_dry_run)
@node.command('init', help="Initialize SKALE node")
@click.argument('env_file')
-@click.option(
- '--dry-run',
- is_flag=True,
- help="Dry run node init (don't setup containers)"
-)
-def init_node(env_file, dry_run):
- init(env_file, dry_run)
+@streamed_cmd
+def init_node(env_file):
+ init(env_file)
@node.command('update', help='Update node from .env file')
@@ -153,6 +145,7 @@ def init_node(env_file, dry_run):
expose_value=False,
prompt='Are you sure you want to update SKALE node software?')
@click.argument('env_file')
+@streamed_cmd
def update_node(env_file):
update(env_file)
@@ -166,17 +159,15 @@ def signature(validator_id):
@node.command('backup', help="Generate backup file to restore SKALE node on another machine")
@click.argument('backup_folder_path')
-@click.argument('env_file')
-@click.option('--no-database', is_flag=True,
- help="Skip mysql backup")
-def backup_node(backup_folder_path, env_file, no_database):
- backup_mysql = True if not no_database else False
- backup(backup_folder_path, env_file, backup_mysql)
+@streamed_cmd
+def backup_node(backup_folder_path):
+ backup(backup_folder_path)
@node.command('restore', help="Restore SKALE node on another machine")
@click.argument('backup_path')
@click.argument('env_file')
+@streamed_cmd
def restore_node(backup_path, env_file):
restore(backup_path, env_file)
@@ -185,11 +176,13 @@ def restore_node(backup_path, env_file):
@click.option('--yes', is_flag=True, callback=abort_if_false,
expose_value=False,
prompt='Are you sure you want to set SKALE node into maintenance mode?')
+@streamed_cmd
def set_node_in_maintenance():
set_maintenance_mode_on()
@node.command('maintenance-off', help="Remove SKALE node from maintenance mode")
+@streamed_cmd
def remove_node_from_maintenance():
set_maintenance_mode_off()
@@ -203,6 +196,7 @@ def remove_node_from_maintenance():
@click.option('--yes', is_flag=True, callback=abort_if_false,
expose_value=False,
prompt='Are you sure you want to turn off the node?')
+@streamed_cmd
def _turn_off(maintenance_on):
turn_off(maintenance_on)
@@ -223,6 +217,7 @@ def _turn_off(maintenance_on):
expose_value=False,
prompt='Are you sure you want to turn on the node?')
@click.argument('env_file')
+@streamed_cmd
def _turn_on(maintenance_off, sync_schains, env_file):
turn_on(maintenance_off, sync_schains, env_file)
@@ -237,5 +232,25 @@ def _turn_on(maintenance_off, sync_schains, env_file):
@click.option('--yes', is_flag=True, callback=abort_if_false,
expose_value=False,
prompt='Are you sure you want to set domain name?')
+@streamed_cmd
def _set_domain_name(domain):
set_domain_name(domain)
+
+
+@node.command(help='Check if node meet network requirements')
+@click.option(
+ '--network', '-n',
+ type=click.Choice(ALLOWED_ENV_TYPES),
+ default='mainnet',
+ help='Network to check'
+)
+def check(network):
+ run_checks(network)
+
+
+@node.command(help='Reconfigure iptables rules')
+@click.option('--yes', is_flag=True, callback=abort_if_false,
+ expose_value=False,
+ prompt='Are you sure you want to reconfigure firewall rules?')
+def configure_firewall():
+ configure_firewall_rules()
diff --git a/cli/resources_allocation.py b/node_cli/cli/resources_allocation.py
similarity index 88%
rename from cli/resources_allocation.py
rename to node_cli/cli/resources_allocation.py
index f2d8208b..c8ed758c 100644
--- a/cli/resources_allocation.py
+++ b/node_cli/cli/resources_allocation.py
@@ -20,12 +20,12 @@
import json
import click
-from core.resources import (get_resource_allocation_info,
- generate_resource_allocation_config)
-from core.helper import abort_if_false, safe_load_texts
-from tools.helper import session_config
+from node_cli.core.resources import (
+ get_resource_allocation_info,
+ generate_resource_allocation_config
+)
+from node_cli.utils.helper import abort_if_false, safe_load_texts
-config = session_config()
TEXTS = safe_load_texts()
diff --git a/cli/schains.py b/node_cli/cli/schains.py
similarity index 87%
rename from cli/schains.py
rename to node_cli/cli/schains.py
index ea7643f1..db70a49e 100644
--- a/cli/schains.py
+++ b/node_cli/cli/schains.py
@@ -19,11 +19,10 @@
import click
-from core.helper import abort_if_false
-from core.schains import (
+from node_cli.utils.helper import abort_if_false
+from node_cli.core.schains import (
describe,
get_schain_firewall_rules,
- show_checks,
show_config,
show_dkg_info,
show_schains,
@@ -68,17 +67,6 @@ def show_rules(schain_name: str) -> None:
get_schain_firewall_rules(schain_name)
-@schains.command(help="List of healthchecks for sChains served by connected node")
-@click.option(
- '--json',
- 'json_format',
- help='Show data in JSON format',
- is_flag=True
-)
-def checks(json_format: bool) -> None:
- show_checks(json_format)
-
-
@schains.command('repair', help='Toggle schain repair mode')
@click.argument('schain_name')
@click.option('--yes', is_flag=True, callback=abort_if_false,
diff --git a/cli/ssl.py b/node_cli/cli/ssl.py
similarity index 83%
rename from cli/ssl.py
rename to node_cli/cli/ssl.py
index 9e9298b8..2d0bfdfa 100644
--- a/cli/ssl.py
+++ b/node_cli/cli/ssl.py
@@ -20,16 +20,14 @@
import click
from terminaltables import SingleTable
-from configs import DEFAULT_SSL_CHECK_PORT, SSL_CERT_FILEPATH, SSL_KEY_FILEPATH
-from core.helper import (
- get_request,
- print_err_response,
- safe_load_texts
-)
-from core.ssl import check_cert, upload_cert
+from node_cli.utils.exit_codes import CLIExitCodes
+from node_cli.utils.helper import get_request, safe_load_texts, error_exit
+from node_cli.configs import DEFAULT_SSL_CHECK_PORT, SSL_CERT_FILEPATH, SSL_KEY_FILEPATH
+from node_cli.core.ssl import check_cert, upload_cert
TEXTS = safe_load_texts()
+BLUEPRINT_NAME = 'ssl'
@click.group()
@@ -44,7 +42,10 @@ def ssl():
@ssl.command(help="Status of the SSL certificates on the node")
def status():
- status, payload = get_request('ssl_status')
+ status, payload = get_request(
+ blueprint=BLUEPRINT_NAME,
+ method='status'
+ )
if status == 'ok':
if payload.get('is_empty'):
print(TEXTS['ssl']['no_cert'])
@@ -57,7 +58,7 @@ def status():
print('SSL certificates status:')
print(table.table)
else:
- print_err_response(payload)
+ error_exit(payload, exit_code=CLIExitCodes.BAD_API_RESPONSE)
@ssl.command(help="Upload new SSL certificates")
@@ -78,7 +79,7 @@ def upload(key_path, cert_path, force):
if status == 'ok':
print(TEXTS['ssl']['uploaded'])
else:
- print_err_response(payload)
+ error_exit(payload, exit_code=CLIExitCodes.BAD_API_RESPONSE)
@ssl.command(help="Check certificates")
@@ -127,4 +128,4 @@ def check(key_path, cert_path, port, no_client, type_, no_wss):
if status == 'ok':
print(TEXTS['ssl']['check_passed'])
else:
- print_err_response(payload)
+ error_exit(payload, exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR)
diff --git a/cli/validate.py b/node_cli/cli/validate.py
similarity index 95%
rename from cli/validate.py
rename to node_cli/cli/validate.py
index f3377a55..f8134df9 100644
--- a/cli/validate.py
+++ b/node_cli/cli/validate.py
@@ -19,7 +19,7 @@
import click
-from core.host import validate_abi_files
+from node_cli.core.host import validate_abi_files
@click.group()
diff --git a/cli/wallet.py b/node_cli/cli/wallet.py
similarity index 94%
rename from cli/wallet.py
rename to node_cli/cli/wallet.py
index deed279f..543e1071 100644
--- a/cli/wallet.py
+++ b/node_cli/cli/wallet.py
@@ -21,8 +21,8 @@
import click
-from core.helper import abort_if_false
-from core.wallet import get_wallet_info, send_eth
+from node_cli.utils.helper import abort_if_false
+from node_cli.core.wallet import get_wallet_info, send_eth
logger = logging.getLogger(__name__)
diff --git a/configs/__init__.py b/node_cli/configs/__init__.py
similarity index 69%
rename from configs/__init__.py
rename to node_cli/configs/__init__.py
index 6681558b..0161c84d 100644
--- a/configs/__init__.py
+++ b/node_cli/configs/__init__.py
@@ -19,24 +19,39 @@
import os
import sys
-from pathlib import Path
-from configs.routes import ROUTES # noqa: F401
+from node_cli.utils.global_config import read_g_config
-HOME_DIR = os.getenv('HOME_DIR') or str(Path.home())
-SKALE_DIR = os.path.join(HOME_DIR, '.skale')
+
+GLOBAL_SKALE_DIR = os.getenv('GLOBAL_SKALE_DIR') or '/etc/skale'
+GLOBAL_SKALE_CONF_FILENAME = 'conf.json'
+GLOBAL_SKALE_CONF_FILEPATH = os.path.join(GLOBAL_SKALE_DIR, GLOBAL_SKALE_CONF_FILENAME)
+GLOBAL_CONFIG = read_g_config(GLOBAL_SKALE_DIR, GLOBAL_SKALE_CONF_FILEPATH)
+
+G_CONF_USER = GLOBAL_CONFIG['user']
+G_CONF_HOME = os.getenv('TEST_HOME_DIR') or GLOBAL_CONFIG['home_dir']
+
+SKALE_STATE_DIR = '/var/lib/skale'
+FILESTORAGE_MAPPING = os.path.join(SKALE_STATE_DIR, 'filestorage')
+SNAPSHOTS_SHARED_VOLUME = 'shared-space'
+SCHAINS_MNT_DIR = '/mnt'
+
+SKALE_DIR = os.path.join(G_CONF_HOME, '.skale')
NODE_DATA_PATH = os.path.join(SKALE_DIR, 'node_data')
CONTAINER_CONFIG_PATH = os.path.join(SKALE_DIR, 'config')
CONTRACTS_PATH = os.path.join(SKALE_DIR, 'contracts_info')
BACKUP_CONTRACTS_PATH = os.path.join(SKALE_DIR, '.old_contracts_info')
INIT_ENV_FILEPATH = os.path.join(SKALE_DIR, '.env')
+SKALE_TMP_DIR = os.path.join(SKALE_DIR, '.tmp')
SGX_CERTIFICATES_DIR_NAME = 'sgx_certs'
COMPOSE_PATH = os.path.join(CONTAINER_CONFIG_PATH, 'docker-compose.yml')
FILESTORAGE_INFO_FILE = os.path.join(CONTAINER_CONFIG_PATH, 'filestorage_info.json')
FILESTORAGE_ARTIFACTS_FILE = os.path.join(NODE_DATA_PATH, 'filestorage_artifacts.json')
-CONFIGS_FILEPATH = os.path.join(CONTAINER_CONFIG_PATH, 'configs.yml')
+ENVIRONMENT_PARAMS_FILEPATH = os.path.join(CONTAINER_CONFIG_PATH, 'environment_params.yaml')
+NGINX_TEMPLATE_FILEPATH = os.path.join(CONTAINER_CONFIG_PATH, 'nginx.conf.j2')
+NGINX_CONFIG_FILEPATH = os.path.join(NODE_DATA_PATH, 'nginx.conf')
LOG_PATH = os.path.join(NODE_DATA_PATH, 'log')
REMOVED_CONTAINERS_FOLDER_NAME = '.removed_containers'
@@ -48,16 +63,23 @@
SGX_CERTS_PATH = os.path.join(NODE_DATA_PATH, 'sgx_certs')
SCHAINS_DATA_PATH = os.path.join(NODE_DATA_PATH, 'schains')
-CONFIG_FILEPATH = os.environ.get('CONFIG_FILEPATH') or \
- os.path.join(SKALE_DIR, '.skale-cli.yaml')
-
-TOKENS_FILEPATH = os.path.join(NODE_DATA_PATH, 'tokens.json')
-
CURRENT_FILE_LOCATION = os.path.dirname(os.path.realpath(__file__))
DOTENV_FILEPATH = os.path.join(os.path.dirname(CURRENT_FILE_LOCATION), '.env')
+SRC_FILEBEAT_CONFIG_PATH = os.path.join(CONTAINER_CONFIG_PATH, 'filebeat.yml')
+FILEBEAT_CONFIG_PATH = os.path.join(NODE_DATA_PATH, 'filebeat.yml')
+
DOCKER_LVMPY_PATH = os.path.join(SKALE_DIR, 'docker-lvmpy')
+IPTABLES_DIR = '/etc/iptables/'
+IPTABLES_RULES_STATE_FILEPATH = os.path.join(IPTABLES_DIR, 'rules.v4')
+
+FLASK_SECRET_KEY_FILENAME = 'flask_db_key.txt'
+FLASK_SECRET_KEY_FILE = os.path.join(NODE_DATA_PATH, FLASK_SECRET_KEY_FILENAME)
+
+DOCKER_CONFIG_FILEPATH = '/etc/docker/daemon.json'
+HIDE_STREAM_LOG = os.getenv('HIDE_STREAM_LOG')
+
def _get_env():
try:
@@ -72,26 +94,21 @@ def _get_env():
if ENV == 'dev':
PARDIR = os.path.join(CURRENT_FILE_LOCATION, os.pardir)
+ PROJECT_DIR = os.path.join(PARDIR, os.pardir)
else:
PARDIR = os.path.join(sys._MEIPASS, 'data')
+ PROJECT_DIR = PARDIR
-TEXT_FILE = os.path.join(PARDIR, 'text.yml')
+TEXT_FILE = os.path.join(PROJECT_DIR, 'text.yml')
DATAFILES_FOLDER = os.path.join(PARDIR, 'datafiles')
-THIRDPARTY_FOLDER_PATH = os.path.join(DATAFILES_FOLDER, 'third_party')
-
-INSTALL_SCRIPT = os.path.join(DATAFILES_FOLDER, 'install.sh')
-BACKUP_INSTALL_SCRIPT = os.path.join(DATAFILES_FOLDER, 'backup-install.sh')
-UNINSTALL_SCRIPT = os.path.join(DATAFILES_FOLDER, 'uninstall.sh')
-UPDATE_SCRIPT = os.path.join(DATAFILES_FOLDER, 'update.sh')
-TURN_OFF_SCRIPT = os.path.join(DATAFILES_FOLDER, 'turn-off.sh')
-TURN_ON_SCRIPT = os.path.join(DATAFILES_FOLDER, 'turn-on.sh')
-REDIS_DATA_PATH = os.path.join(NODE_DATA_PATH, 'redis-data')
SKALED_SSL_TEST_SCRIPT = os.path.join(DATAFILES_FOLDER, 'skaled-ssl-test')
ALLOCATION_FILEPATH = os.path.join(CONTAINER_CONFIG_PATH,
'schain_allocation.yml')
+REDIS_DATA_PATH = os.path.join(NODE_DATA_PATH, 'redis-data')
+
LONG_LINE = '-' * 50
ADMIN_PORT = 3007
@@ -101,12 +118,7 @@ def _get_env():
DEFAULT_NODE_BASE_PORT = 10000
BACKUP_ARCHIVE_NAME = 'skale-node-backup'
-MYSQL_BACKUP_FILE_NAME = 'backup.sql'
-MYSQL_BACKUP_FOLDER = os.path.join(SKALE_DIR, NODE_DATA_PATH, '.mysql-backup')
-MYSQL_BACKUP_CONTAINER_FOLDER = '/mysql-backup'
-MYSQL_BACKUP_PATH = os.path.join(MYSQL_BACKUP_FOLDER, MYSQL_BACKUP_FILE_NAME)
-MYSQL_BACKUP_CONTAINER_PATH = os.path.join(MYSQL_BACKUP_CONTAINER_FOLDER,
- MYSQL_BACKUP_FILE_NAME)
+
SSL_FOLDER_PATH = os.path.join(NODE_DATA_PATH, 'ssl')
SSL_CERT_FILEPATH = os.path.join(SSL_FOLDER_PATH, 'ssl_cert')
SSL_KEY_FILEPATH = os.path.join(SSL_FOLDER_PATH, 'ssl_key')
@@ -119,6 +131,7 @@ def _get_env():
IMA_CONTRACTS_FILEPATH = os.path.join(CONTRACTS_PATH, 'ima.json')
META_FILEPATH = os.path.join(NODE_DATA_PATH, 'meta.json')
+
DEFAULT_SSL_CHECK_PORT = 4536
SKALE_NODE_REPO_URL = 'https://github.com/skalenetwork/skale-node.git'
diff --git a/configs/cli_logger.py b/node_cli/configs/cli_logger.py
similarity index 84%
rename from configs/cli_logger.py
rename to node_cli/configs/cli_logger.py
index dceb90d0..f0e4dff5 100644
--- a/configs/cli_logger.py
+++ b/node_cli/configs/cli_logger.py
@@ -18,10 +18,11 @@
# along with this program. If not, see .
import os
-from configs import SKALE_DIR
+from node_cli.configs import SKALE_DIR
-FILE_LOG_FORMAT = '%(asctime)s - %(levelname)s - %(name)s - %(message)s'
+LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
STREAM_LOG_FORMAT = '%(asctime)s - %(levelname)s - %(message)s'
+FILE_LOG_FORMAT = '[%(asctime)s %(levelname)s] %(name)s:%(lineno)d - %(threadName)s - %(message)s' # noqa
LOG_FILE_SIZE_MB = 300
LOG_FILE_SIZE_BYTES = LOG_FILE_SIZE_MB * 1000000
diff --git a/configs/env.py b/node_cli/configs/env.py
similarity index 89%
rename from configs/env.py
rename to node_cli/configs/env.py
index 46a0232c..91215c52 100644
--- a/configs/env.py
+++ b/node_cli/configs/env.py
@@ -1,5 +1,10 @@
import os
from dotenv import load_dotenv
+from node_cli.configs import SKALE_DIR, CONTAINER_CONFIG_PATH
+
+
+SKALE_DIR_ENV_FILEPATH = os.path.join(SKALE_DIR, '.env')
+CONFIGS_ENV_FILEPATH = os.path.join(CONTAINER_CONFIG_PATH, '.env')
ALLOWED_ENV_TYPES = ['mainnet', 'testnet', 'qanet', 'devnet']
@@ -9,10 +14,6 @@
'IMA_ENDPOINT': '',
'CONTAINER_CONFIGS_STREAM': '',
'ENDPOINT': '',
- 'DB_USER': 'root',
- 'DB_PASSWORD': '',
- 'DB_ROOT_PASSWORD': '',
- 'DB_PORT': '3306',
'MANAGER_CONTRACTS_ABI_URL': '',
'IMA_CONTRACTS_ABI_URL': '',
'FILEBEAT_HOST': '',
diff --git a/configs/resource_allocation.py b/node_cli/configs/resource_allocation.py
similarity index 94%
rename from configs/resource_allocation.py
rename to node_cli/configs/resource_allocation.py
index d59bc508..e51d6ec9 100644
--- a/configs/resource_allocation.py
+++ b/node_cli/configs/resource_allocation.py
@@ -18,11 +18,11 @@
# along with this program. If not, see .
import os
-from configs import NODE_DATA_PATH
+from node_cli.configs import NODE_DATA_PATH
LARGE_DIVIDER = 1
-MEDIUM_DIVIDER = 32
-TEST_DIVIDER = 32
+MEDIUM_DIVIDER = 8
+TEST_DIVIDER = 8
SMALL_DIVIDER = 128
TIMES = 1
diff --git a/node_cli/configs/routes.py b/node_cli/configs/routes.py
new file mode 100644
index 00000000..19e7847e
--- /dev/null
+++ b/node_cli/configs/routes.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+#
+# This file is part of SKALE Admin
+#
+# Copyright (C) 2020 SKALE Labs
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+import os
+
+
+CURRENT_API_VERSION = 'v1'
+API_PREFIX = '/api'
+
+ROUTES = {
+ 'v1': {
+ 'node': ['info', 'register', 'maintenance-on', 'maintenance-off', 'signature',
+ 'send-tg-notification', 'exit/start', 'exit/status', 'set-domain-name'],
+ 'health': ['containers', 'schains', 'sgx'],
+ 'schains': ['config', 'list', 'dkg-statuses', 'firewall-rules', 'repair', 'get'],
+ 'ssl': ['status', 'upload'],
+ 'wallet': ['info', 'send-eth']
+ }
+}
+
+
+class RouteNotFoundException(Exception):
+ """Raised when requested route is not found in provided API version"""
+
+
+def route_exists(blueprint, method, api_version):
+ return ROUTES.get(api_version) and ROUTES[api_version].get(blueprint) and \
+ method in ROUTES[api_version][blueprint]
+
+
+def get_route(blueprint, method, api_version=CURRENT_API_VERSION, check=True):
+ route = os.path.join(API_PREFIX, api_version, blueprint, method)
+ if check and not route_exists(blueprint, method, api_version):
+ raise RouteNotFoundException(route)
+ return route
+
+
+def get_all_available_routes(api_version=CURRENT_API_VERSION):
+ routes = ROUTES[api_version]
+ return [get_route(blueprint, method, api_version) for blueprint in routes
+ for method in routes[blueprint]]
diff --git a/core/__init__.py b/node_cli/core/__init__.py
similarity index 100%
rename from core/__init__.py
rename to node_cli/core/__init__.py
diff --git a/node_cli/core/checks.py b/node_cli/core/checks.py
new file mode 100644
index 00000000..56d760db
--- /dev/null
+++ b/node_cli/core/checks.py
@@ -0,0 +1,336 @@
+# -*- coding: utf-8 -*-
+#
+# This file is part of node-cli
+#
+# Copyright (C) 2019 SKALE Labs
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+
+import inspect
+import json
+import logging
+import os
+import psutil
+import shutil
+import socket
+from collections import namedtuple
+from functools import wraps
+from typing import Dict, List
+
+import docker
+import yaml
+from debian import debian_support
+from packaging.version import parse as version_parse
+
+from node_cli.configs import (
+ DOCKER_CONFIG_FILEPATH, ENVIRONMENT_PARAMS_FILEPATH
+)
+from node_cli.utils.helper import run_cmd
+
+logger = logging.getLogger(__name__)
+
+
+CheckResult = namedtuple('CheckResult', ['name', 'status', 'info'])
+ListChecks = List[CheckResult]
+
+
+NETWORK_CHECK_TIMEOUT = 4
+CLOUDFLARE_DNS_HOST = '1.1.1.1'
+CLOUDFLARE_DNS_HOST_PORT = 443
+
+
+def get_env_params(network: str = 'mainnet'):
+ with open(ENVIRONMENT_PARAMS_FILEPATH) as requirements_file:
+ ydata = yaml.load(requirements_file, Loader=yaml.Loader)
+ return ydata['envs'][network]
+
+
+def node_check(func):
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except Exception as err:
+ logger.exception('%s check errored')
+ return CheckResult(
+ name=func.__name__,
+ status='error',
+ info=repr(err)
+ )
+ return wrapper
+
+
+class BaseChecker:
+ def _ok(self, name: str, info=None) -> CheckResult:
+ return CheckResult(name=name, status='ok', info=info)
+
+ def _failed(self, name: str, info=None) -> CheckResult:
+ return CheckResult(name=name, status='failed', info=info)
+
+ def check(self) -> ListChecks:
+ myself = inspect.stack()[0][3]
+ check_methods = inspect.getmembers(
+ type(self),
+ predicate=lambda m: inspect.isfunction(m) and
+ not m.__name__.startswith('_') and not m.__name__ == myself
+ )
+ return [cm[1](self) for cm in check_methods]
+
+
+class MachineChecker(BaseChecker):
+ def __init__(self, requirements: Dict) -> None:
+ self.requirements = requirements
+
+ @node_check
+ def cpu_total(self) -> CheckResult:
+ name = 'cpu-total'
+ actual = psutil.cpu_count(logical=True)
+ expected = self.requirements['cpu_total']
+ info = f'Expected {expected} logical cores, actual {actual} cores'
+ if actual < expected:
+ return self._failed(name=name, info=info)
+ else:
+ return self._ok(name=name, info=info)
+
+ @node_check
+ def cpu_physical(self) -> CheckResult:
+ name = 'cpu-physical'
+ actual = psutil.cpu_count(logical=False)
+ expected = self.requirements['cpu_physical']
+ info = f'Expected {expected} physical cores, actual {actual} cores'
+ if actual < expected:
+ return self._failed(name=name, info=info)
+ else:
+ return self._ok(name=name, info=info)
+
+ @node_check
+ def memory(self) -> CheckResult:
+ name = 'memory'
+ actual = psutil.virtual_memory().total,
+ actual = actual[0]
+ expected = self.requirements['memory']
+ actual_gb = round(actual / 1024 ** 3, 2)
+ expected_gb = round(expected / 1024 ** 3, 2)
+ info = f'Expected RAM {expected_gb} GB, actual {actual_gb} GB'
+ if actual < expected:
+ return self._failed(name=name, info=info)
+ else:
+ return self._ok(name=name, info=info)
+
+ @node_check
+ def swap(self) -> CheckResult:
+ name = 'swap'
+ actual = psutil.swap_memory().total
+ expected = self.requirements['swap']
+ actual_gb = round(actual / 1024 ** 3, 2)
+ expected_gb = round(expected / 1024 ** 3, 2)
+ info = f'Expected swap memory {expected_gb} GB, actual {actual_gb} GB'
+ if actual < expected:
+ return self._failed(name=name, info=info)
+ else:
+ return self._ok(name=name, info=info)
+
+ @node_check
+ def network(self) -> CheckResult:
+ name = 'network'
+ try:
+ socket.setdefaulttimeout(NETWORK_CHECK_TIMEOUT)
+ socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect(
+ (CLOUDFLARE_DNS_HOST, CLOUDFLARE_DNS_HOST_PORT))
+ return self._ok(name=name)
+ except socket.error as err:
+ info = f'Network checking returned error: {err}'
+ return self._failed(name=name, info=info)
+
+
+class PackagesChecker(BaseChecker):
+ def __init__(self, requirements: dict) -> None:
+ self.requirements = requirements
+
+ @node_check
+ def iptables_persistent(self) -> CheckResult:
+ return self._check_apt_package('iptables-persistent')
+
+ @node_check
+ def lvm2(self) -> CheckResult:
+ return self._check_apt_package('lvm2')
+
+ @node_check
+ def btrfs_progs(self) -> CheckResult:
+ return self._check_apt_package('btrfs-progs')
+
+ @node_check
+ def lsof(self) -> CheckResult:
+ return self._check_apt_package('lsof')
+
+ @node_check
+ def psmisc(self) -> CheckResult:
+ return self._check_apt_package('psmisc')
+
+ def _version_from_dpkg_output(self, output: str) -> str:
+ info_lines = map(lambda s: s.strip(), output.split('\n'))
+ v_line = next(filter(
+ lambda s: s.startswith('Version'),
+ info_lines
+ ))
+ return v_line.split()[1]
+
+ def _check_apt_package(self, package_name: str,
+ version: str = None) -> CheckResult:
+ # TODO: check versions
+ dpkg_cmd_result = run_cmd(
+ ['dpkg', '-s', package_name], check_code=False)
+ output = dpkg_cmd_result.stdout.decode('utf-8').strip()
+ if dpkg_cmd_result.returncode != 0:
+ return self._failed(name=package_name, info=output)
+
+ actual_version = self._version_from_dpkg_output(output)
+ expected_version = self.requirements[package_name]
+ info = {
+ 'expected_version': expected_version,
+ 'actual_version': actual_version
+ }
+ compare_result = debian_support.version_compare(
+ actual_version, expected_version
+ )
+ if compare_result == -1:
+ return self._failed(name=package_name, info=info)
+ else:
+ return self._ok(name=package_name, info=info)
+
+
+class DockerChecker(BaseChecker):
+ def __init__(self, requirements: dict) -> None:
+ self.docker_client = docker.from_env()
+ self.requirements = requirements
+
+ def _check_docker_command(self) -> str:
+ return shutil.which('docker')
+
+ def _get_docker_version_info(self) -> dict:
+ try:
+ return self.docker_client.version()
+ except Exception as err:
+ logger.error(f'Request to docker api failed {err}')
+
+ @node_check
+ def docker_engine(self) -> CheckResult:
+ name = 'docker-engine'
+ if self._check_docker_command() is None:
+ return self._failed(name=name, info='No such command: "docker"')
+
+ version_info = self._get_docker_version_info()
+ if not version_info:
+ return self._failed(
+ name=name,
+ info='Docker api request failed. Is docker installed?'
+ )
+ logger.info('Docker version info %s', version_info)
+ actual_version = self.docker_client.version()['Version']
+ expected_version = self.requirements['docker-engine']
+ info = {
+ 'expected_version': expected_version,
+ 'actual_version': actual_version
+ }
+ if version_parse(actual_version) < version_parse(expected_version):
+ return self._failed(name=name, info=info)
+ else:
+ return self._ok(name=name, info=info)
+
+ @node_check
+ def docker_api(self) -> CheckResult:
+ name = 'docker-api'
+ if self._check_docker_command() is None:
+ return self._failed(name=name, info='No such command: "docker"')
+
+ version_info = self._get_docker_version_info()
+ if not version_info:
+ return self._failed(
+ name=name,
+ info='Docker api request failed. Is docker installed?'
+ )
+ logger.info('Docker version info %s', version_info)
+ actual_version = version_info['ApiVersion']
+ expected_version = self.requirements['docker-api']
+ info = {
+ 'expected_version': expected_version,
+ 'actual_version': actual_version
+ }
+ if version_parse(actual_version) < version_parse(expected_version):
+ return self._failed(name=name, info=info)
+ else:
+ return self._ok(name=name, info=info)
+
+ @node_check
+ def docker_compose(self) -> CheckResult:
+ name = 'docker-compose'
+ cmd = shutil.which('docker-compose')
+ if cmd is None:
+ info = 'No such command: "docker-compose"'
+ return self._failed(name=name, info=info)
+
+ v_cmd_result = run_cmd(['docker-compose', '-v'], check_code=False)
+ output = v_cmd_result.stdout.decode('utf-8').rstrip()
+ if v_cmd_result.returncode != 0:
+ output = v_cmd_result.stdout.decode('utf-8')
+ info = f'Checking docker-compose version failed with: {output}'
+ return self._failed(name=name, info=output)
+
+ actual_version = output.split(',')[0].split()[-1].strip()
+ expected_version = self.requirements['docker-compose']
+
+ info = {
+ 'expected_version': expected_version,
+ 'actual_version': actual_version
+ }
+ info = f'Expected docker-compose version {expected_version}, actual {actual_version}' # noqa
+ if version_parse(actual_version) < version_parse(expected_version):
+ return self._failed(name=name, info=info)
+ else:
+ return self._ok(name=name, info=info)
+
+ def _get_docker_config(self) -> dict:
+ if not os.path.isfile(DOCKER_CONFIG_FILEPATH):
+ logger.error(f'No such file {DOCKER_CONFIG_FILEPATH}')
+ return {}
+ with open(DOCKER_CONFIG_FILEPATH) as docker_config_file:
+ try:
+ docker_config = json.load(docker_config_file)
+ except json.decoder.JSONDecodeError as err:
+ logger.error(f'Loading docker config json failed with {err}')
+ return {}
+ return docker_config
+
+ def _check_docker_alive_option(self, config: dict) -> tuple:
+ actual_value = config.get('live-restore', None)
+ if actual_value is not True:
+ info = (
+ 'Docker daemon live-restore option '
+ 'should be set as "true"'
+ )
+ return False, info
+ else:
+ info = 'Docker daemon live-restore option is set as "true"'
+ return True, info
+
+ @node_check
+ def keeping_containers_alive(self) -> CheckResult:
+ name = 'live-restore'
+ config = self._get_docker_config()
+ is_ok, info = self._check_docker_alive_option(config)
+ if is_ok:
+ return self._ok(name=name, info=info)
+ else:
+ return self._failed(name=name, info=info)
diff --git a/node_cli/core/health.py b/node_cli/core/health.py
new file mode 100644
index 00000000..623c20d7
--- /dev/null
+++ b/node_cli/core/health.py
@@ -0,0 +1,80 @@
+# -*- coding: utf-8 -*-
+#
+# This file is part of node-cli
+#
+# Copyright (C) 2020 SKALE Labs
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+import json
+from terminaltables import SingleTable
+
+from node_cli.utils.print_formatters import (
+ print_containers,
+ print_schains_healthchecks
+)
+from node_cli.utils.helper import error_exit, get_request
+from node_cli.utils.exit_codes import CLIExitCodes
+
+
+BLUEPRINT_NAME = 'health'
+
+
+def get_containers(_all):
+ status, payload = get_request(
+ blueprint=BLUEPRINT_NAME,
+ method='containers',
+ params={'all': _all}
+ )
+ if status == 'ok':
+ print_containers(payload)
+ else:
+ error_exit(payload, exit_code=CLIExitCodes.BAD_API_RESPONSE)
+
+
+def get_schains_checks(json_format: bool = False) -> None:
+ status, payload = get_request(
+ blueprint=BLUEPRINT_NAME,
+ method='schains'
+ )
+ if status == 'ok':
+ if not payload:
+ print('No sChains found')
+ return
+ if json_format:
+ print(json.dumps(payload))
+ else:
+ print_schains_healthchecks(payload)
+ else:
+ error_exit(payload, exit_code=CLIExitCodes.BAD_API_RESPONSE)
+
+
+def get_sgx_info():
+ status, payload = get_request(
+ blueprint=BLUEPRINT_NAME,
+ method='sgx'
+ )
+ if status == 'ok':
+ data = payload
+ table_data = [
+ ['SGX info', ''],
+ ['Server URL', data['sgx_server_url']],
+ ['SGXWallet Version', data['sgx_wallet_version']],
+ ['Node SGX keyname', data['sgx_keyname']],
+ ['Status', data['status_name']]
+ ]
+ table = SingleTable(table_data)
+ print(table.table)
+ else:
+ error_exit(payload, exit_code=CLIExitCodes.BAD_API_RESPONSE)
diff --git a/core/host.py b/node_cli/core/host.py
similarity index 62%
rename from core/host.py
rename to node_cli/core/host.py
index 935ab9e3..4c748599 100644
--- a/core/host.py
+++ b/node_cli/core/host.py
@@ -23,23 +23,29 @@
from shutil import copyfile
from urllib.parse import urlparse
-from core.helper import validate_abi
-from core.resources import update_resource_allocation
-
-from configs import (ADMIN_PORT,
- DEFAULT_URL_SCHEME, NODE_DATA_PATH,
- SKALE_DIR, CONTAINER_CONFIG_PATH, CONTRACTS_PATH,
- ETH_STATE_PATH, NODE_CERTS_PATH, SGX_CERTS_PATH,
- REDIS_DATA_PATH, SCHAINS_DATA_PATH,
- MYSQL_BACKUP_FOLDER, REMOVED_CONTAINERS_FOLDER_PATH,
- IMA_CONTRACTS_FILEPATH, MANAGER_CONTRACTS_FILEPATH)
-from configs.resource_allocation import RESOURCE_ALLOCATION_FILEPATH
-from configs.cli_logger import LOG_DATA_PATH
-from core.print_formatters import print_abi_validation_errors
-from configs.resource_allocation import (DISK_MOUNTPOINT_FILEPATH,
- SGX_SERVER_URL_FILEPATH)
-
-from core.helper import safe_load_texts
+from node_cli.core.resources import update_resource_allocation
+from node_cli.core.checks import (
+ get_env_params, DockerChecker, ListChecks,
+ MachineChecker, PackagesChecker
+)
+
+from node_cli.configs import (
+ ADMIN_PORT, DEFAULT_URL_SCHEME, NODE_DATA_PATH,
+ SKALE_DIR, CONTAINER_CONFIG_PATH, CONTRACTS_PATH,
+ ETH_STATE_PATH, NODE_CERTS_PATH, SGX_CERTS_PATH,
+ REDIS_DATA_PATH, SCHAINS_DATA_PATH, LOG_PATH,
+ REMOVED_CONTAINERS_FOLDER_PATH,
+ IMA_CONTRACTS_FILEPATH, MANAGER_CONTRACTS_FILEPATH,
+ SKALE_TMP_DIR
+)
+from node_cli.configs.resource_allocation import RESOURCE_ALLOCATION_FILEPATH
+from node_cli.configs.cli_logger import LOG_DATA_PATH
+from node_cli.configs.env import SKALE_DIR_ENV_FILEPATH, CONFIGS_ENV_FILEPATH
+from node_cli.utils.print_formatters import print_abi_validation_errors
+from node_cli.configs.resource_allocation import (DISK_MOUNTPOINT_FILEPATH,
+ SGX_SERVER_URL_FILEPATH)
+
+from node_cli.utils.helper import safe_load_texts, validate_abi
TEXTS = safe_load_texts()
@@ -75,6 +81,22 @@ def prepare_host(env_filepath, disk_mountpoint, sgx_server_url, env_type,
update_resource_allocation(env_type)
+def run_preinstall_checks(env_type: str = 'mainnet') -> ListChecks:
+ logger.info('Checking that host meets requirements ...')
+ requirements = get_env_params(env_type)
+ checkers = [
+ MachineChecker(requirements['server']),
+ PackagesChecker(requirements['package']),
+ DockerChecker(requirements['docker'])
+ ]
+ result = []
+ for checker in checkers:
+ result.extend(filter(lambda r: r.status != 'ok', checker.check()))
+ if result:
+ logger.info('Host is not fully meet the requirements')
+ return result
+
+
def is_node_inited():
return os.path.isfile(RESOURCE_ALLOCATION_FILEPATH)
@@ -83,8 +105,9 @@ def make_dirs():
for dir_path in (
SKALE_DIR, NODE_DATA_PATH, CONTAINER_CONFIG_PATH,
CONTRACTS_PATH, ETH_STATE_PATH, NODE_CERTS_PATH,
- MYSQL_BACKUP_FOLDER, REMOVED_CONTAINERS_FOLDER_PATH,
- SGX_CERTS_PATH, SCHAINS_DATA_PATH, REDIS_DATA_PATH
+ REMOVED_CONTAINERS_FOLDER_PATH,
+ SGX_CERTS_PATH, SCHAINS_DATA_PATH, LOG_PATH, REDIS_DATA_PATH,
+ SKALE_TMP_DIR
):
safe_mk_dirs(dir_path)
@@ -102,7 +125,13 @@ def save_sgx_server_url(sgx_server_url):
def save_env_params(env_filepath):
- copyfile(env_filepath, os.path.join(SKALE_DIR, '.env'))
+ copyfile(env_filepath, SKALE_DIR_ENV_FILEPATH)
+
+
+def link_env_file():
+ if not (os.path.islink(CONFIGS_ENV_FILEPATH) or os.path.isfile(CONFIGS_ENV_FILEPATH)):
+ logger.info(f'Creating symlink {SKALE_DIR_ENV_FILEPATH} → {CONFIGS_ENV_FILEPATH}')
+ os.symlink(SKALE_DIR_ENV_FILEPATH, CONFIGS_ENV_FILEPATH)
def init_logs_dir():
diff --git a/node_cli/core/iptables.py b/node_cli/core/iptables.py
new file mode 100644
index 00000000..796f9492
--- /dev/null
+++ b/node_cli/core/iptables.py
@@ -0,0 +1,189 @@
+# -*- coding: utf-8 -*-
+#
+# This file is part of node-cli
+#
+# Copyright (C) 2021 SKALE Labs
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+import logging
+import sys
+from pathlib import Path
+from node_cli.configs import IPTABLES_DIR, IPTABLES_RULES_STATE_FILEPATH
+from node_cli.utils.helper import run_cmd
+
+
+logger = logging.getLogger(__name__)
+
+try:
+ import iptc
+except (FileNotFoundError, AttributeError) as err:
+ if "pytest" in sys.modules:
+ from collections import namedtuple # hotfix for tests
+ iptc = namedtuple('iptc', ['Chain', 'Rule'])
+ else:
+ logger.error(f'Unable to import iptc due to an error {err}')
+
+
+ALLOWED_INCOMING_TCP_PORTS = [
+ '80', # filestorage
+ '22', # ssh
+ '311', # watchdog https
+ '8080', # http
+ '443', # https
+ '53', # dns
+ '3009', # watchdog http
+ '9100' # node exporter
+]
+
+ALLOWED_INCOMING_UDP_PORTS = [
+ '53' # dns
+]
+
+
+def configure_iptables():
+ """
+ This is the main function used for the initial setup of the firewall rules on the SKALE Node
+ host machine
+ """
+ logger.info('Configuring iptables...')
+ if not iptc:
+ raise ImportError('Unable to import iptc package')
+ Path(IPTABLES_DIR).mkdir(parents=True, exist_ok=True)
+
+ tb = iptc.Table(iptc.Table.FILTER)
+ input_chain = iptc.Chain(tb, 'INPUT')
+
+ set_base_policies()
+ allow_loopback(input_chain)
+ accept_icmp(input_chain)
+ allow_conntrack(input_chain)
+ allow_base_ports(input_chain)
+ drop_all_tcp(input_chain)
+ drop_all_udp(input_chain)
+ save_iptables_rules_state()
+
+
+def save_iptables_rules_state():
+ res = run_cmd(['iptables-save'])
+ plain_rules = res.stdout.decode('utf-8').rstrip()
+ with open(IPTABLES_RULES_STATE_FILEPATH, 'w') as state_file:
+ state_file.write(plain_rules)
+
+
+def set_base_policies() -> None:
+ """Drop all incoming, allow all outcoming, drop all forwarding"""
+ logger.debug('Setting base policies...')
+ iptc.easy.set_policy(iptc.Table.FILTER, 'INPUT', 'ACCEPT')
+ iptc.easy.set_policy(iptc.Table.FILTER, 'OUTPUT', 'ACCEPT')
+ iptc.easy.set_policy(iptc.Table.FILTER, 'FORWARD', 'DROP')
+
+
+def allow_loopback(chain: iptc.Chain) -> None:
+ """Allow local loopback services"""
+ logger.debug('Allowing loopback packages...')
+ rule = iptc.Rule()
+ rule.target = iptc.Target(rule, 'ACCEPT')
+ rule.in_interface = 'lo'
+ ensure_rule(chain, rule)
+
+
+def allow_conntrack(chain: iptc.Chain) -> None:
+ """Allow conntrack established connections"""
+ logger.debug('Allowing conntrack...')
+ rule = iptc.Rule()
+ rule.target = iptc.Target(rule, 'ACCEPT')
+ match = iptc.Match(rule, 'conntrack')
+ chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'INPUT')
+ match.ctstate = 'RELATED,ESTABLISHED'
+ rule.add_match(match)
+ ensure_rule(chain, rule)
+
+
+def drop_all_tcp(chain: iptc.Chain) -> None:
+ """Drop the rest of tcp connections"""
+ logger.debug('Adding drop tcp rule ...')
+ r = iptc.Rule()
+ t = iptc.Target(r, 'DROP')
+ r.target = t
+ r.protocol = 'tcp'
+ ensure_rule(chain, r)
+
+
+def drop_all_udp(chain: iptc.Chain) -> None:
+ """Drop the rest of udp connections """
+ logger.debug('Adding drop udp rule ...')
+ r = iptc.Rule()
+ t = iptc.Target(r, 'DROP')
+ r.target = t
+ r.protocol = 'udp'
+ ensure_rule(chain, r)
+
+
+def allow_base_ports(chain: iptc.Chain) -> None:
+ logger.debug('Allowing base ports...')
+ for port in ALLOWED_INCOMING_TCP_PORTS:
+ accept_incoming(chain, port, 'tcp')
+ for port in ALLOWED_INCOMING_UDP_PORTS:
+ accept_incoming(chain, port, 'udp')
+
+
+def accept_incoming(chain, port, protocol) -> None:
+ rule = iptc.Rule()
+ rule.protocol = protocol
+ match = iptc.Match(rule, protocol)
+ match.dport = port
+ t = iptc.Target(rule, 'ACCEPT')
+ rule.target = t
+ rule.add_match(match)
+ ensure_rule(chain, rule, insert=True)
+
+
+def accept_icmp(chain: iptc.Chain) -> None:
+ add_icmp_rule(chain, 'destination-unreachable')
+ add_icmp_rule(chain, 'source-quench')
+ add_icmp_rule(chain, 'time-exceeded')
+
+
+def add_icmp_rule(chain: iptc.Chain, icmp_type: str) -> None:
+ rule = iptc.Rule()
+ rule.protocol = 'icmp'
+ match = iptc.Match(rule, 'icmp')
+ match.icmp_type = icmp_type
+ t = iptc.Target(rule, 'ACCEPT')
+ rule.target = t
+ rule.add_match(match)
+ ensure_rule(chain, rule)
+
+
+def ensure_rule(chain: iptc.Chain, rule: iptc.Rule, insert=False) -> None:
+ if rule not in chain.rules:
+ logger.debug(f'Adding rule: {rule_to_dict(rule)}, chain: {chain.name}')
+ if insert:
+ chain.insert_rule(rule)
+ else:
+ chain.append_rule(rule)
+ else:
+ logger.debug(f'Rule already present: {rule_to_dict(rule)}, chain: {chain.name}')
+
+
+def rule_to_dict(rule):
+ return {
+ 'proto': rule.protocol,
+ 'src': rule.src,
+ 'dst': rule.dst,
+ 'in_interface': rule.in_interface,
+ 'out': rule.out_interface,
+ 'target': rule.target.name,
+ }
diff --git a/node_cli/core/logs.py b/node_cli/core/logs.py
new file mode 100644
index 00000000..d76a2207
--- /dev/null
+++ b/node_cli/core/logs.py
@@ -0,0 +1,79 @@
+# -*- coding: utf-8 -*-
+#
+# This file is part of node-cli
+#
+# Copyright (C) 2021 SKALE Labs
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+import os
+import shutil
+import logging
+import datetime
+
+from node_cli.core.host import safe_mk_dirs
+from node_cli.utils.helper import run_cmd
+from node_cli.utils.docker_utils import (
+ save_container_logs, get_containers
+)
+from node_cli.configs import REMOVED_CONTAINERS_FOLDER_PATH, SKALE_TMP_DIR
+from node_cli.configs.cli_logger import LOG_DATA_PATH
+
+
+logger = logging.getLogger(__name__)
+
+
+def create_logs_dump(path, filter_container=None):
+ dump_folder_path, dump_folder_name = create_dump_dir()
+
+ containers_logs_path = os.path.join(dump_folder_path, 'containers')
+ cli_logs_path = os.path.join(dump_folder_path, 'cli')
+ removed_containers_logs_path = os.path.join(dump_folder_path, 'removed_containers')
+ archive_path = os.path.join(path, f'{dump_folder_name}.tar.gz')
+
+ if filter_container:
+ containers = get_containers(filter_container)
+ else:
+ containers = get_containers('skale')
+
+ for container in containers:
+ log_filepath = os.path.join(containers_logs_path, f'{container.name}.log')
+ save_container_logs(container, log_filepath, tail='all')
+
+ shutil.copytree(LOG_DATA_PATH, cli_logs_path)
+ shutil.copytree(REMOVED_CONTAINERS_FOLDER_PATH, removed_containers_logs_path)
+ create_archive(archive_path, dump_folder_path)
+ rm_dump_dir(dump_folder_path)
+ if not os.path.isfile(archive_path):
+ return None
+ return archive_path
+
+
+def create_dump_dir():
+ time = datetime.datetime.utcnow().strftime("%Y-%m-%d--%H-%M-%S")
+ folder_name = f'skale-logs-dump-{time}'
+ folder_path = os.path.join(SKALE_TMP_DIR, folder_name)
+ containers_path = os.path.join(folder_path, 'containers')
+ logger.debug(f'Creating tmp dir for logs dump: {folder_path}')
+ safe_mk_dirs(containers_path)
+ return folder_path, folder_name
+
+
+def rm_dump_dir(dump_folder_path: str) -> None:
+ logger.debug(f'Going to remove tmp dir with logs dump: {dump_folder_path}')
+ shutil.rmtree(dump_folder_path)
+
+
+def create_archive(archive_path, source_path):
+ run_cmd(['tar', '-czvf', archive_path, '-C', source_path, '.'])
diff --git a/node_cli/core/nginx.py b/node_cli/core/nginx.py
new file mode 100644
index 00000000..d3d9b60f
--- /dev/null
+++ b/node_cli/core/nginx.py
@@ -0,0 +1,26 @@
+import logging
+import os.path
+
+from node_cli.configs import NODE_CERTS_PATH, NGINX_TEMPLATE_FILEPATH, NGINX_CONFIG_FILEPATH
+from node_cli.utils.helper import process_template
+
+
+logger = logging.getLogger(__name__)
+
+SSL_KEY_NAME = 'ssl_key'
+SSL_CRT_NAME = 'ssl_cert'
+
+
+def generate_nginx_config():
+ ssl_on = check_ssl_certs()
+ template_data = {
+ 'ssl': ssl_on,
+ }
+ logger.info(f'Processing nginx template. ssl: {ssl_on}')
+ process_template(NGINX_TEMPLATE_FILEPATH, NGINX_CONFIG_FILEPATH, template_data)
+
+
+def check_ssl_certs():
+ crt_path = os.path.join(NODE_CERTS_PATH, SSL_CRT_NAME)
+ key_path = os.path.join(NODE_CERTS_PATH, SSL_KEY_NAME)
+ return os.path.exists(crt_path) and os.path.exists(key_path)
diff --git a/node_cli/core/node.py b/node_cli/core/node.py
new file mode 100644
index 00000000..d474858d
--- /dev/null
+++ b/node_cli/core/node.py
@@ -0,0 +1,397 @@
+# -*- coding: utf-8 -*-
+#
+# This file is part of node-cli
+#
+# Copyright (C) 2019 SKALE Labs
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+import datetime
+import logging
+import os
+import tarfile
+import time
+from contextlib import contextmanager
+from enum import Enum
+from pathlib import Path
+from typing import Tuple
+
+import docker
+
+from node_cli.configs import (
+ FILESTORAGE_MAPPING,
+ SKALE_DIR,
+ INIT_ENV_FILEPATH,
+ BACKUP_ARCHIVE_NAME,
+ RESTORE_SLEEP_TIMEOUT,
+ SCHAINS_MNT_DIR,
+ TM_INIT_TIMEOUT,
+ LOG_PATH
+)
+from node_cli.configs.cli_logger import LOG_DATA_PATH as CLI_LOG_DATA_PATH
+
+from node_cli.core.iptables import configure_iptables
+from node_cli.core.host import (
+ is_node_inited, save_env_params,
+ get_flask_secret_key, run_preinstall_checks
+)
+from node_cli.core.resources import update_resource_allocation
+from node_cli.operations import (
+ update_op,
+ init_op, turn_off_op, turn_on_op, restore_op
+)
+from node_cli.utils.print_formatters import (
+ print_failed_requirements_checks, print_node_cmd_error, print_node_info
+)
+from node_cli.utils.helper import error_exit, get_request, post_request
+from node_cli.utils.helper import extract_env_params
+from node_cli.utils.texts import Texts
+from node_cli.utils.exit_codes import CLIExitCodes
+from node_cli.utils.decorators import (
+ check_not_inited,
+ check_inited,
+ check_user
+)
+
+
+logger = logging.getLogger(__name__)
+TEXTS = Texts()
+
+BASE_CONTAINERS_AMOUNT = 5
+BLUEPRINT_NAME = 'node'
+
+
+class NodeStatuses(Enum):
+ """This class contains possible node statuses"""
+ ACTIVE = 0
+ LEAVING = 1
+ FROZEN = 2
+ IN_MAINTENANCE = 3
+ LEFT = 4
+ NOT_CREATED = 5
+
+
+@check_inited
+@check_user
+def register_node(name, p2p_ip,
+ public_ip, port, domain_name,
+ gas_limit=None,
+ gas_price=None,
+ skip_dry_run=False):
+
+ if not is_node_inited():
+ print(TEXTS['node']['not_inited'])
+ return
+
+ # todo: add name, ips and port checks
+ json_data = {
+ 'name': name,
+ 'ip': p2p_ip,
+ 'publicIP': public_ip,
+ 'port': port,
+ 'domain_name': domain_name,
+ 'gas_limit': gas_limit,
+ 'gas_price': gas_price,
+ 'skip_dry_run': skip_dry_run
+ }
+ status, payload = post_request(
+ blueprint=BLUEPRINT_NAME,
+ method='register',
+ json=json_data
+ )
+ if status == 'ok':
+ msg = TEXTS['node']['registered']
+ logger.info(msg)
+ print(msg)
+ else:
+ error_msg = payload
+ logger.error(f'Registration error {error_msg}')
+ error_exit(error_msg, exit_code=CLIExitCodes.BAD_API_RESPONSE)
+
+
+@check_not_inited
+def init(env_filepath):
+ env = get_node_env(env_filepath)
+ if env is None:
+ return
+ init_result = init_op(env_filepath, env)
+ if not init_result:
+ error_exit(
+ 'Init operation failed',
+ exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR
+ )
+ return
+ logger.info('Waiting for containers initialization')
+ time.sleep(TM_INIT_TIMEOUT)
+ if not is_base_containers_alive():
+ error_exit(
+ 'Containers are not running',
+ exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR
+ )
+ return
+ logger.info('Generating resource allocation file ...')
+ update_resource_allocation(env['ENV_TYPE'])
+ logger.info('Init procedure finished')
+
+
+@check_not_inited
+def restore(backup_path, env_filepath):
+ env = get_node_env(env_filepath)
+ if env is None:
+ return
+ save_env_params(env_filepath)
+ env['SKALE_DIR'] = SKALE_DIR
+ env['BACKUP_RUN'] = 'True' # should be str
+ restore_op(env, backup_path)
+ time.sleep(RESTORE_SLEEP_TIMEOUT)
+ logger.info('Generating resource allocation file ...')
+ update_resource_allocation(env['ENV_TYPE'])
+ print('Node is restored from backup')
+
+
+def get_node_env(env_filepath, inited_node=False, sync_schains=None):
+ if env_filepath is not None:
+ env_params = extract_env_params(env_filepath)
+ if env_params is None:
+ return
+ save_env_params(env_filepath)
+ else:
+ env_params = extract_env_params(INIT_ENV_FILEPATH)
+ env = {
+ 'SKALE_DIR': SKALE_DIR,
+ 'SCHAINS_MNT_DIR': SCHAINS_MNT_DIR,
+ 'FILESTORAGE_MAPPING': FILESTORAGE_MAPPING,
+ **env_params
+ }
+ if inited_node:
+ flask_secret_key = get_flask_secret_key()
+ env['FLASK_SECRET_KEY'] = flask_secret_key
+ if sync_schains:
+ env['BACKUP_RUN'] = 'True'
+ return {k: v for k, v in env.items() if v != ''}
+
+
+@check_inited
+@check_user
+def update(env_filepath):
+ logger.info('Node update started')
+ configure_firewall_rules()
+ env = get_node_env(env_filepath, inited_node=True, sync_schains=False)
+ update_op(env_filepath, env)
+ logger.info('Waiting for containers initialization')
+ time.sleep(TM_INIT_TIMEOUT)
+ if not is_base_containers_alive():
+ print_node_cmd_error()
+ return
+ logger.info('Node update finished')
+
+
+def get_node_signature(validator_id):
+ params = {'validator_id': validator_id}
+ status, payload = get_request(
+ blueprint=BLUEPRINT_NAME,
+ method='signature',
+ params=params
+ )
+ if status == 'ok':
+ return payload['signature']
+ else:
+ return payload
+
+
+def backup(path):
+ backup_filepath = get_backup_filepath(path)
+ create_backup_archive(backup_filepath)
+
+
+def get_backup_filename():
+ time = datetime.datetime.utcnow().strftime("%Y-%m-%d-%H-%M-%S")
+ return f'{BACKUP_ARCHIVE_NAME}-{time}.tar.gz'
+
+
+def get_backup_filepath(base_path):
+ return os.path.abspath(os.path.join(base_path, get_backup_filename()))
+
+
+@contextmanager
+def chdir(dest):
+ old = os.getcwd()
+ try:
+ os.chdir(dest)
+ yield
+ finally:
+ os.chdir(old)
+
+
+def pack_dir(source: str, dest: str, exclude: Tuple[str] = ()):
+ logger.info('Packing dir %s to %s excluding %s', source, dest, exclude)
+
+ source, dest = Path(source), Path(dest)
+ exclude = [Path(e).relative_to(source.parent) for e in exclude]
+
+ def logfilter(tarinfo):
+ path = Path(tarinfo.name)
+ for e in exclude:
+ logger.debug('Cheking if %s is parent of %s', e, tarinfo.name)
+ try:
+ path.relative_to(e)
+ except ValueError:
+ pass
+ else:
+ logger.debug('Excluding %s', tarinfo.name)
+ return None
+ return tarinfo
+
+ with chdir(source.parent):
+ with tarfile.open(dest, 'w:gz') as tar:
+ tar.add(source.name, filter=logfilter)
+ logger.info('Packing finished %s', source)
+
+
+def create_backup_archive(backup_filepath):
+ print('Creating backup archive...')
+ cli_log_path = CLI_LOG_DATA_PATH
+ container_log_path = LOG_PATH
+ pack_dir(
+ SKALE_DIR,
+ backup_filepath,
+ exclude=(cli_log_path, container_log_path)
+ )
+ print(f'Backup archive succesfully created {backup_filepath}')
+
+
+def set_maintenance_mode_on():
+ print('Setting maintenance mode on...')
+ status, payload = post_request(
+ blueprint=BLUEPRINT_NAME,
+ method='maintenance-on'
+ )
+ if status == 'ok':
+ msg = TEXTS['node']['maintenance_on']
+ logger.info(msg)
+ print(msg)
+ else:
+ error_msg = payload
+ logger.error(f'Set maintenance mode error {error_msg}')
+ error_exit(error_msg, exit_code=CLIExitCodes.BAD_API_RESPONSE)
+
+
+def set_maintenance_mode_off():
+ print('Setting maintenance mode off...')
+ status, payload = post_request(
+ blueprint=BLUEPRINT_NAME,
+ method='maintenance-off'
+ )
+ if status == 'ok':
+ msg = TEXTS['node']['maintenance_off']
+ logger.info(msg)
+ print(msg)
+ else:
+ error_msg = payload
+ logger.error(f'Remove from maintenance mode error {error_msg}')
+ error_exit(error_msg, exit_code=CLIExitCodes.BAD_API_RESPONSE)
+
+
+@check_inited
+@check_user
+def turn_off(maintenance_on):
+ if maintenance_on:
+ set_maintenance_mode_on()
+ turn_off_op()
+
+
+@check_inited
+@check_user
+def turn_on(maintenance_off, sync_schains, env_file):
+ env = get_node_env(env_file, inited_node=True, sync_schains=sync_schains)
+ turn_on_op(env)
+ logger.info('Waiting for containers initialization')
+ time.sleep(TM_INIT_TIMEOUT)
+ if not is_base_containers_alive():
+ print_node_cmd_error()
+ return
+ logger.info('Node turned on')
+ if maintenance_off:
+ set_maintenance_mode_off()
+
+
+def is_base_containers_alive():
+ dclient = docker.from_env()
+ containers = dclient.containers.list()
+ skale_containers = list(filter(
+ lambda c: c.name.startswith('skale_'), containers
+ ))
+ return len(skale_containers) >= BASE_CONTAINERS_AMOUNT
+
+
+def get_node_info(format):
+ status, payload = get_request(
+ blueprint=BLUEPRINT_NAME,
+ method='info'
+ )
+ if status == 'ok':
+ node_info = payload['node_info']
+ if format == 'json':
+ print(node_info)
+ elif node_info['status'] == NodeStatuses.NOT_CREATED.value:
+ print(TEXTS['service']['node_not_registered'])
+ else:
+ print_node_info(
+ node_info,
+ get_node_status(int(node_info['status']))
+ )
+ else:
+ error_exit(payload, exit_code=CLIExitCodes.BAD_API_RESPONSE)
+
+
+def get_node_status(status):
+ node_status = NodeStatuses(status).name
+ return TEXTS['node']['status'][node_status]
+
+
+@check_inited
+def set_domain_name(domain_name):
+ print(f'Setting new domain name: {domain_name}')
+ status, payload = post_request(
+ blueprint=BLUEPRINT_NAME,
+ method='set-domain-name',
+ json={
+ 'domain_name': domain_name
+ }
+ )
+ if status == 'ok':
+ msg = TEXTS['node']['domain_name_changed']
+ logger.info(msg)
+ print(msg)
+ else:
+ error_exit(payload, exit_code=CLIExitCodes.BAD_API_RESPONSE)
+
+
+def run_checks(network: str) -> None:
+ if not is_node_inited():
+ print(TEXTS['node']['not_inited'])
+ return
+
+ failed_checks = run_preinstall_checks(network)
+ if not failed_checks:
+ print('Requirements checking succesfully finished!')
+ else:
+ print('Node is not fully meet the requirements!')
+ print_failed_requirements_checks(failed_checks)
+
+
+def configure_firewall_rules() -> None:
+ print('Configuring firewall ...')
+ configure_iptables()
+ print('Done')
diff --git a/core/validators.py b/node_cli/core/node_config.py
similarity index 63%
rename from core/validators.py
rename to node_cli/core/node_config.py
index 66f1feb9..c7050918 100644
--- a/core/validators.py
+++ b/node_cli/core/node_config.py
@@ -2,7 +2,7 @@
#
# This file is part of node-cli
#
-# Copyright (C) 2019 SKALE Labs
+# Copyright (C) 2021 SKALE Labs
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
@@ -17,17 +17,19 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
-import json
-from core.helper import get_request
-from core.print_formatters import print_err_response
+class NodeConfig:
+ def __init__(self, config_filepath, env_filepath=None):
+ pass
-def get_validators_info(config, format):
- status, payload = get_request('validators_info')
- if status == 'ok':
- if format == 'json':
- print(json.dumps({'validators_info': payload}))
- else:
- print(payload)
- else:
- print_err_response(payload)
+ def load_env(self):
+ pass
+
+ def validate_env(self):
+ pass
+
+ def load_config(self):
+ pass
+
+ def validate_config(self):
+ pass
diff --git a/core/resources.py b/node_cli/core/resources.py
similarity index 72%
rename from core/resources.py
rename to node_cli/core/resources.py
index 10d6350c..b2fb517c 100644
--- a/core/resources.py
+++ b/node_cli/core/resources.py
@@ -19,16 +19,20 @@
import os
import logging
-import subprocess
from time import sleep
import psutil
-from tools.schain_types import SchainTypes
-from tools.helper import write_json, read_json, run_cmd, format_output, extract_env_params
-from core.helper import safe_load_yml
-from configs import ALLOCATION_FILEPATH, CONFIGS_FILEPATH
-from configs.resource_allocation import (
+from node_cli.utils.docker_utils import ensure_volume
+from node_cli.utils.schain_types import SchainTypes
+from node_cli.utils.helper import (
+ write_json, read_json, run_cmd, extract_env_params, safe_load_yml
+)
+from node_cli.configs import (
+ ALLOCATION_FILEPATH, ENVIRONMENT_PARAMS_FILEPATH,
+ SNAPSHOTS_SHARED_VOLUME
+)
+from node_cli.configs.resource_allocation import (
RESOURCE_ALLOCATION_FILEPATH, TIMES, TIMEOUT,
TEST_DIVIDER, SMALL_DIVIDER, MEDIUM_DIVIDER, LARGE_DIVIDER,
MEMORY_FACTOR, DISK_MOUNTPOINT_FILEPATH, MAX_CPU_SHARES
@@ -37,6 +41,10 @@
logger = logging.getLogger(__name__)
+class NotEnoughDiskSpaceError(Exception):
+ pass
+
+
class ResourceAlloc:
def __init__(self, value, fractional=False):
self.values = {
@@ -62,21 +70,21 @@ def get_resource_allocation_info():
def compose_resource_allocation_config(env_type):
- net_configs = safe_load_yml(CONFIGS_FILEPATH)
+ env_configs = safe_load_yml(ENVIRONMENT_PARAMS_FILEPATH)
schain_allocation_data = safe_load_yml(ALLOCATION_FILEPATH)
- schain_cpu_alloc, ima_cpu_alloc = get_cpu_alloc(net_configs)
- schain_mem_alloc, ima_mem_alloc = get_memory_alloc(net_configs)
+ schain_cpu_alloc, ima_cpu_alloc = get_cpu_alloc(env_configs)
+ schain_mem_alloc, ima_mem_alloc = get_memory_alloc(env_configs)
- verify_disk_size(net_configs, env_type)
+ verify_disk_size(env_configs, env_type)
return {
'schain': {
'cpu_shares': schain_cpu_alloc.dict(),
'mem': schain_mem_alloc.dict(),
'disk': schain_allocation_data[env_type]['disk'],
- 'volume_limits': schain_allocation_data[env_type]['volume'],
- 'storage_limit': compose_storage_limit(schain_allocation_data[env_type]['leveldb'])
+ 'volume_limits': schain_allocation_data[env_type]['volume_limits'], # noqa
+ 'leveldb_limits': schain_allocation_data[env_type]['leveldb_limits'] # noqa
},
'ima': {
'cpu_shares': ima_cpu_alloc.dict(),
@@ -130,16 +138,16 @@ def get_total_memory():
return sum(memory) / TIMES * MEMORY_FACTOR
-def get_memory_alloc(net_configs):
- mem_proportions = net_configs['common']['schain']['mem']
+def get_memory_alloc(env_configs):
+ mem_proportions = env_configs['common']['schain']['mem']
available_memory = get_total_memory()
schain_memory = mem_proportions['skaled'] * available_memory
ima_memory = mem_proportions['ima'] * available_memory
return ResourceAlloc(schain_memory), ResourceAlloc(ima_memory)
-def get_cpu_alloc(net_configs):
- cpu_proportions = net_configs['common']['schain']['cpu']
+def get_cpu_alloc(env_configs):
+ cpu_proportions = env_configs['common']['schain']['cpu']
schain_max_cpu_shares = int(cpu_proportions['skaled'] * MAX_CPU_SHARES)
ima_max_cpu_shares = int(cpu_proportions['ima'] * MAX_CPU_SHARES)
return (
@@ -148,32 +156,28 @@ def get_cpu_alloc(net_configs):
)
-def verify_disk_size(net_configs: dict, env_type: str) -> dict:
+def verify_disk_size(env_configs: dict, env_type: str) -> dict:
disk_size = get_disk_size()
- env_disk_size = net_configs['envs'][env_type]['server']['disk_size_bytes']
+ env_disk_size = env_configs['envs'][env_type]['server']['disk']
check_disk_size(disk_size, env_disk_size)
def check_disk_size(disk_size: int, env_disk_size: int):
if env_disk_size > disk_size:
- raise Exception(f'Disk size: {disk_size}, required disk size: {env_disk_size}')
+ raise NotEnoughDiskSpaceError(
+ f'Disk size: {disk_size}, required disk size: {env_disk_size}'
+ )
-def get_disk_size():
+def get_disk_size() -> int:
disk_path = get_disk_path()
disk_size_cmd = construct_disk_size_cmd(disk_path)
- try:
- res = run_cmd(disk_size_cmd, shell=True)
- stdout, _ = format_output(res)
- return int(stdout)
- except subprocess.CalledProcessError:
- raise Exception(
- "Couldn't get disk size, check disk mountpoint option."
- )
+ output = run_cmd(disk_size_cmd).stdout.decode('utf-8')
+ return int(output)
-def construct_disk_size_cmd(disk_path):
- return f'sudo blockdev --getsize64 {disk_path}'
+def construct_disk_size_cmd(disk_path: str) -> list:
+ return ['blockdev', '--getsize64', disk_path]
def check_is_partition(disk_path):
@@ -190,10 +194,12 @@ def get_allocation_option_name(schain):
def get_disk_path():
- f = open(DISK_MOUNTPOINT_FILEPATH, "r")
- return f.read()
+ with open(DISK_MOUNTPOINT_FILEPATH) as f:
+ return f.read().strip()
-def compose_storage_limit(leveldb_lim):
- """Helper function was the backward compatibility with old skale-admin"""
- return {k: leveldb_lim[k]['evm_storage_part'] for k in leveldb_lim.keys()}
+def init_shared_space_volume(env_type):
+ logger.info('Configuring shared space volume')
+ schain_allocation_data = safe_load_yml(ALLOCATION_FILEPATH)
+ size = schain_allocation_data[env_type]['shared_space']
+ ensure_volume(SNAPSHOTS_SHARED_VOLUME, size)
diff --git a/node_cli/core/schains.py b/node_cli/core/schains.py
new file mode 100644
index 00000000..4904def1
--- /dev/null
+++ b/node_cli/core/schains.py
@@ -0,0 +1,92 @@
+import logging
+import pprint
+
+from node_cli.utils.helper import get_request, post_request, error_exit
+from node_cli.utils.exit_codes import CLIExitCodes
+from node_cli.utils.print_formatters import (
+ print_dkg_statuses,
+ print_firewall_rules,
+ print_schain_info,
+ print_schains
+)
+
+
+logger = logging.getLogger(__name__)
+
+BLUEPRINT_NAME = 'schains'
+
+
+def get_schain_firewall_rules(schain: str) -> None:
+ status, payload = get_request(
+ blueprint=BLUEPRINT_NAME,
+ method='firewall-rules',
+ params={'schain_name': schain}
+ )
+ if status == 'ok':
+ print_firewall_rules(payload['endpoints'])
+ else:
+ error_exit(payload, exit_code=CLIExitCodes.BAD_API_RESPONSE)
+
+
+def show_schains() -> None:
+ status, payload = get_request(
+ blueprint=BLUEPRINT_NAME,
+ method='list'
+ )
+ if status == 'ok':
+ schains = payload
+ if not schains:
+ print('No sChains found')
+ return
+ print_schains(schains)
+ else:
+ error_exit(payload, exit_code=CLIExitCodes.BAD_API_RESPONSE)
+
+
+def show_dkg_info(all_: bool = False) -> None:
+ params = {'all': all_}
+ status, payload = get_request(
+ blueprint=BLUEPRINT_NAME,
+ method='dkg-statuses',
+ params=params
+ )
+ if status == 'ok':
+ print_dkg_statuses(payload)
+ else:
+ error_exit(payload, exit_code=CLIExitCodes.BAD_API_RESPONSE)
+
+
+def show_config(name: str) -> None:
+ status, payload = get_request(
+ blueprint=BLUEPRINT_NAME,
+ method='config',
+ params={'schain_name': name}
+ )
+ if status == 'ok':
+ pprint.pprint(payload)
+ else:
+ error_exit(payload, exit_code=CLIExitCodes.BAD_API_RESPONSE)
+
+
+def toggle_schain_repair_mode(schain: str) -> None:
+ status, payload = post_request(
+ blueprint=BLUEPRINT_NAME,
+ method='repair',
+ json={'schain_name': schain}
+ )
+ if status == 'ok':
+ print('Schain has been set for repair')
+ else:
+ error_exit(payload, exit_code=CLIExitCodes.BAD_API_RESPONSE)
+
+
+def describe(schain: str, raw=False) -> None:
+ status, payload = get_request(
+ blueprint=BLUEPRINT_NAME,
+ method='get',
+ params={'schain_name': schain}
+ )
+ if status == 'ok':
+ print_schain_info(payload, raw=raw)
+ else:
+ error_exit(payload, exit_code=CLIExitCodes.BAD_API_RESPONSE)
diff --git a/core/ssl.py b/node_cli/core/ssl.py
similarity index 93%
rename from core/ssl.py
rename to node_cli/core/ssl.py
index 177c59ef..38bfc73d 100644
--- a/core/ssl.py
+++ b/node_cli/core/ssl.py
@@ -7,26 +7,31 @@
import logging
-from configs import (
+from node_cli.configs import (
DEFAULT_SSL_CHECK_PORT,
SKALED_SSL_TEST_SCRIPT,
SSL_CERT_FILEPATH,
SSL_KEY_FILEPATH
)
-from core.helper import post_request, read_file
-from tools.helper import run_cmd
+from node_cli.utils.helper import post_request
+from node_cli.utils.helper import run_cmd
logger = logging.getLogger(__name__)
COMMUNICATION_TIMEOUT = 3
+def read_file_bytes(path, mode='rb'):
+ with open(path, mode) as f:
+ return f
+
+
def load_ssl_files(key_path, cert_path):
return {
'ssl_key': (os.path.basename(key_path),
- read_file(key_path), 'application/octet-stream'),
+ read_file_bytes(key_path), 'application/octet-stream'),
'ssl_cert': (os.path.basename(cert_path),
- read_file(cert_path), 'application/octet-stream')
+ read_file_bytes(cert_path), 'application/octet-stream')
}
@@ -214,7 +219,11 @@ def send_saving_cert_request(key_path, cert_path, force):
None, json.dumps({'force': force}),
'application/json'
)
- return post_request('ssl_upload', files=files_data)
+ return post_request(
+ blueprint='ssl',
+ method='upload',
+ files=files_data
+ )
def upload_cert(cert_path, key_path, force, no_client=False):
diff --git a/core/wallet.py b/node_cli/core/wallet.py
similarity index 70%
rename from core/wallet.py
rename to node_cli/core/wallet.py
index e55badd8..dd74d6eb 100644
--- a/core/wallet.py
+++ b/node_cli/core/wallet.py
@@ -19,19 +19,23 @@
import json
-from core.helper import get_request, post_request, logger
-from core.print_formatters import print_err_response, print_wallet_info, TEXTS
+from node_cli.utils.print_formatters import print_wallet_info, TEXTS
+from node_cli.utils.helper import error_exit, get_request, post_request, logger
+from node_cli.utils.exit_codes import CLIExitCodes
+
+
+BLUEPRINT_NAME = 'wallet'
def get_wallet_info(_format):
- status, payload = get_request('wallet_info')
+ status, payload = get_request(BLUEPRINT_NAME, 'info')
if status == 'ok':
if _format == 'json':
print(json.dumps(payload))
else:
print_wallet_info(payload)
else:
- print_err_response(payload)
+ error_exit(payload, exit_code=CLIExitCodes.BAD_API_RESPONSE)
def send_eth(address: str, amount: float, gas_limit: int, gas_price: int):
@@ -41,13 +45,11 @@ def send_eth(address: str, amount: float, gas_limit: int, gas_price: int):
'gas_limit': gas_limit,
'gas_price': gas_price
}
- status, payload = post_request('send_eth',
- json=json_data)
+ status, payload = post_request(BLUEPRINT_NAME, 'send-eth', json=json_data)
if status == 'ok':
msg = TEXTS['wallet']['successful_transfer']
logger.info(msg)
print(msg)
else:
- error_msg = payload
- logger.error(f'Sending error {error_msg}')
- print_err_response(error_msg)
+ logger.error(f'Sending error {payload}')
+ error_exit(payload, exit_code=CLIExitCodes.BAD_API_RESPONSE)
diff --git a/main.py b/node_cli/main.py
similarity index 75%
rename from main.py
rename to node_cli/main.py
index 9297a157..6a0abe83 100644
--- a/main.py
+++ b/node_cli/main.py
@@ -25,22 +25,20 @@
import click
-from cli import __version__
-from cli.containers import containers_cli
-from cli.info import BUILD_DATETIME, COMMIT, BRANCH, OS, VERSION
-from cli.logs import logs_cli
-from cli.node import node_cli
-from cli.schains import schains_cli
-from cli.wallet import wallet_cli
-from cli.ssl import ssl_cli
-from cli.sgx import sgx_cli
-from cli.exit import exit_cli
-from cli.validate import validate_cli
-from cli.resources_allocation import resources_allocation_cli
-
-from core.helper import (safe_load_texts, init_default_logger)
-from configs import LONG_LINE
-from core.host import init_logs_dir
+from node_cli.cli import __version__
+from node_cli.cli.health import health_cli
+from node_cli.cli.info import BUILD_DATETIME, COMMIT, BRANCH, OS, VERSION
+from node_cli.cli.logs import logs_cli
+from node_cli.cli.node import node_cli
+from node_cli.cli.schains import schains_cli
+from node_cli.cli.wallet import wallet_cli
+from node_cli.cli.ssl import ssl_cli
+from node_cli.cli.exit import exit_cli
+from node_cli.cli.validate import validate_cli
+from node_cli.cli.resources_allocation import resources_allocation_cli
+from node_cli.utils.helper import safe_load_texts, init_default_logger
+from node_cli.configs import LONG_LINE
+from node_cli.core.host import init_logs_dir
TEXTS = safe_load_texts()
@@ -96,14 +94,13 @@ def handle_exception(exc_type, exc_value, exc_traceback):
cmd_collection = click.CommandCollection(
sources=[
cli,
+ health_cli,
schains_cli,
- containers_cli,
logs_cli,
resources_allocation_cli,
node_cli,
wallet_cli,
ssl_cli,
- sgx_cli,
exit_cli,
validate_cli
])
@@ -112,5 +109,6 @@ def handle_exception(exc_type, exc_value, exc_traceback):
except Exception as err:
print(f'Command execution failed with {err}. Recheck your inputs')
traceback.print_exc()
- logger.error(err)
- logger.debug(f'execution time: {time.time() - start_time} seconds')
+ logger.exception(f'Command failed with {err}')
+ finally:
+ logger.debug(f'execution time: {time.time() - start_time} seconds')
diff --git a/core/operations/__init__.py b/node_cli/operations/__init__.py
similarity index 81%
rename from core/operations/__init__.py
rename to node_cli/operations/__init__.py
index 92a9f1dd..e511af44 100644
--- a/core/operations/__init__.py
+++ b/node_cli/operations/__init__.py
@@ -17,4 +17,10 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
-from core.operations.base import update as update_op # noqa
+from node_cli.operations.base import ( # noqa
+ update as update_op,
+ init as init_op,
+ turn_off as turn_off_op,
+ turn_on as turn_on_op,
+ restore as restore_op
+)
diff --git a/node_cli/operations/base.py b/node_cli/operations/base.py
new file mode 100644
index 00000000..5de3cbdc
--- /dev/null
+++ b/node_cli/operations/base.py
@@ -0,0 +1,149 @@
+# -*- coding: utf-8 -*-
+#
+# This file is part of node-cli
+#
+# Copyright (C) 2021 SKALE Labs
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+import logging
+from typing import Dict
+
+from node_cli.cli.info import VERSION
+from node_cli.core.host import ( # noqa
+ prepare_host, link_env_file, run_preinstall_checks
+)
+from node_cli.core.nginx import generate_nginx_config
+from node_cli.core.resources import update_resource_allocation, init_shared_space_volume
+
+from node_cli.operations.common import (
+ backup_old_contracts, download_contracts, download_filestorage_artifacts, configure_filebeat,
+ configure_flask, unpack_backup_archive
+)
+from node_cli.operations.docker_lvmpy import docker_lvmpy_update, docker_lvmpy_install
+from node_cli.operations.skale_node import sync_skale_node, update_images
+
+from node_cli.core.iptables import configure_iptables
+from node_cli.utils.docker_utils import compose_rm, compose_up, remove_dynamic_containers
+from node_cli.utils.meta import update_meta
+from node_cli.utils.print_formatters import print_failed_requirements_checks
+
+
+logger = logging.getLogger(__name__)
+
+
+def update(env_filepath: str, env: Dict) -> None:
+ compose_rm(env)
+ remove_dynamic_containers()
+
+ backup_old_contracts()
+ download_contracts(env)
+ sync_skale_node(env)
+
+ # failed_checks = run_preinstall_checks(env['ENV_TYPE'])
+ # if failed_checks:
+ # print_failed_requirements_checks(failed_checks)
+ # return False
+
+ download_filestorage_artifacts()
+ docker_lvmpy_update(env)
+ generate_nginx_config()
+
+ prepare_host(
+ env_filepath,
+ env['DISK_MOUNTPOINT'],
+ env['SGX_SERVER_URL'],
+ env['ENV_TYPE'],
+ allocation=True
+ )
+ init_shared_space_volume(env['ENV_TYPE'])
+
+ update_meta(
+ VERSION,
+ env['CONTAINER_CONFIGS_STREAM'],
+ env['DOCKER_LVMPY_STREAM']
+ )
+ update_images(env)
+ compose_up(env)
+
+
+def init(env_filepath: str, env: str) -> bool:
+ sync_skale_node(env)
+
+ failed_checks = run_preinstall_checks(env['ENV_TYPE'])
+ if failed_checks:
+ print_failed_requirements_checks(failed_checks)
+ return False
+
+ prepare_host(
+ env_filepath,
+ env['DISK_MOUNTPOINT'],
+ env['SGX_SERVER_URL'],
+ env_type=env['ENV_TYPE'],
+ )
+ link_env_file()
+ download_contracts(env)
+ download_filestorage_artifacts()
+
+ configure_filebeat()
+ configure_flask()
+ configure_iptables()
+ generate_nginx_config()
+
+ docker_lvmpy_install(env)
+ init_shared_space_volume(env['ENV_TYPE'])
+
+ update_meta(
+ VERSION,
+ env['CONTAINER_CONFIGS_STREAM'],
+ env['DOCKER_LVMPY_STREAM']
+ )
+ update_resource_allocation(env_type=env['ENV_TYPE'])
+ update_images(env)
+ compose_up(env)
+ return True
+
+
+def turn_off():
+ logger.info('Turning off the node...')
+ compose_rm()
+ remove_dynamic_containers()
+ logger.info('Node was successfully turned off')
+
+
+def turn_on(env):
+ logger.info('Turning on the node...')
+ compose_up(env)
+
+
+def restore(env, backup_path):
+ unpack_backup_archive(backup_path)
+
+ failed_checks = run_preinstall_checks(env['ENV_TYPE'])
+ if failed_checks:
+ print_failed_requirements_checks(failed_checks)
+ return False
+
+ link_env_file()
+ configure_iptables()
+ docker_lvmpy_install(env)
+ init_shared_space_volume(env['ENV_TYPE'])
+
+ update_meta(
+ VERSION,
+ env['CONTAINER_CONFIGS_STREAM'],
+ env['DOCKER_LVMPY_STREAM']
+ )
+ update_resource_allocation(env_type=env['ENV_TYPE'])
+ compose_up(env)
diff --git a/node_cli/operations/common.py b/node_cli/operations/common.py
new file mode 100644
index 00000000..5fd6bef7
--- /dev/null
+++ b/node_cli/operations/common.py
@@ -0,0 +1,86 @@
+# -*- coding: utf-8 -*-
+#
+# This file is part of node-cli
+#
+# Copyright (C) 2021 SKALE Labs
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+import os
+import stat
+import tarfile
+import logging
+import shutil
+import secrets
+
+import urllib.request
+from shutil import copyfile
+from distutils.dir_util import copy_tree
+
+from node_cli.configs import (
+ CONTRACTS_PATH, BACKUP_CONTRACTS_PATH,
+ MANAGER_CONTRACTS_FILEPATH, IMA_CONTRACTS_FILEPATH, SRC_FILEBEAT_CONFIG_PATH, G_CONF_HOME,
+ FILESTORAGE_INFO_FILE, FILESTORAGE_ARTIFACTS_FILE, FILEBEAT_CONFIG_PATH, FLASK_SECRET_KEY_FILE
+)
+from node_cli.utils.helper import read_json
+
+
+logger = logging.getLogger(__name__)
+
+
+def backup_old_contracts():
+ logging.info('Copying old contracts ABIs')
+ copy_tree(CONTRACTS_PATH, BACKUP_CONTRACTS_PATH)
+
+
+def download_contracts(env):
+ urllib.request.urlretrieve(env['MANAGER_CONTRACTS_ABI_URL'], MANAGER_CONTRACTS_FILEPATH)
+ urllib.request.urlretrieve(env['IMA_CONTRACTS_ABI_URL'], IMA_CONTRACTS_FILEPATH)
+
+
+def download_filestorage_artifacts():
+ logger.info('Updating filestorage artifacts')
+ fs_artifacts_url = read_json(FILESTORAGE_INFO_FILE)['artifacts_url']
+ logger.debug(f'Downloading {fs_artifacts_url} to {FILESTORAGE_ARTIFACTS_FILE}')
+ urllib.request.urlretrieve(fs_artifacts_url, FILESTORAGE_ARTIFACTS_FILE)
+
+
+def configure_filebeat():
+ logger.info('Configuring filebeat...')
+ copyfile(SRC_FILEBEAT_CONFIG_PATH, FILEBEAT_CONFIG_PATH)
+ shutil.chown(FILEBEAT_CONFIG_PATH, user='root')
+ os.chmod(
+ FILEBEAT_CONFIG_PATH,
+ stat.S_IREAD |
+ stat.S_IWRITE |
+ stat.S_IEXEC
+ )
+ logger.info('Filebeat configured')
+
+
+def configure_flask():
+ if os.path.isfile(FLASK_SECRET_KEY_FILE):
+ logger.info('Flask secret key already exists')
+ else:
+ logger.info('Generating Flask secret key...')
+ flask_secret_key = secrets.token_urlsafe(16)
+ with open(FLASK_SECRET_KEY_FILE, 'w') as f:
+ f.write(flask_secret_key)
+ logger.info('Flask secret key generated and saved')
+
+
+def unpack_backup_archive(backup_path: str) -> None:
+ logger.info('Unpacking backup archive...')
+ with tarfile.open(backup_path) as tar:
+ tar.extractall(path=G_CONF_HOME)
diff --git a/node_cli/operations/docker_lvmpy.py b/node_cli/operations/docker_lvmpy.py
new file mode 100644
index 00000000..e2b3a288
--- /dev/null
+++ b/node_cli/operations/docker_lvmpy.py
@@ -0,0 +1,74 @@
+# -*- coding: utf-8 -*-
+#
+# This file is part of node-cli
+#
+# Copyright (C) 2021 SKALE Labs
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+import os
+import logging
+
+from node_cli.utils.helper import run_cmd
+from node_cli.utils.git_utils import sync_repo, rm_local_repo
+from node_cli.configs import (DOCKER_LVMPY_PATH, DOCKER_LVMPY_REPO_URL,
+ FILESTORAGE_MAPPING, SCHAINS_MNT_DIR)
+
+logger = logging.getLogger(__name__)
+
+
+def update_docker_lvmpy_env(env):
+ env['PHYSICAL_VOLUME'] = env['DISK_MOUNTPOINT']
+ env['VOLUME_GROUP'] = 'schains'
+ env['FILESTORAGE_MAPPING'] = FILESTORAGE_MAPPING
+ env['SCHAINS_MNT_DIR'] = SCHAINS_MNT_DIR
+ env['PATH'] = os.environ.get('PATH', None)
+ return env
+
+
+def ensure_filestorage_mapping(mapping_dir=FILESTORAGE_MAPPING):
+ if not os.path.isdir(FILESTORAGE_MAPPING):
+ os.makedirs(FILESTORAGE_MAPPING)
+
+
+def sync_docker_lvmpy_repo(env):
+ rm_local_repo(DOCKER_LVMPY_PATH)
+ sync_repo(
+ DOCKER_LVMPY_REPO_URL,
+ DOCKER_LVMPY_PATH,
+ env["DOCKER_LVMPY_STREAM"]
+ )
+
+
+def docker_lvmpy_update(env):
+ sync_docker_lvmpy_repo(env)
+ ensure_filestorage_mapping()
+ logger.info('Running docker-lvmpy update script')
+ update_docker_lvmpy_env(env)
+ run_cmd(
+ cmd=f'sudo -H -E {DOCKER_LVMPY_PATH}/scripts/update.sh'.split(),
+ env=env
+ )
+ logger.info('docker-lvmpy update done')
+
+
+def docker_lvmpy_install(env):
+ sync_docker_lvmpy_repo(env)
+ ensure_filestorage_mapping()
+ update_docker_lvmpy_env(env)
+ run_cmd(
+ cmd=f'sudo -H -E {DOCKER_LVMPY_PATH}/scripts/install.sh'.split(),
+ env=env
+ )
+ logger.info('docker-lvmpy installed')
diff --git a/node_cli/operations/skale_node.py b/node_cli/operations/skale_node.py
new file mode 100644
index 00000000..547b5848
--- /dev/null
+++ b/node_cli/operations/skale_node.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+#
+# This file is part of node-cli
+#
+# Copyright (C) 2021 SKALE Labs
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+import logging
+
+from node_cli.utils.helper import run_cmd
+from node_cli.utils.git_utils import sync_repo, rm_local_repo
+from node_cli.utils.docker_utils import compose_pull, compose_build
+from node_cli.configs import CONTAINER_CONFIG_PATH, SKALE_NODE_REPO_URL
+
+
+logger = logging.getLogger(__name__)
+
+
+def update_images(env: dict) -> None:
+ if 'CONTAINER_CONFIGS_DIR' in env:
+ compose_build()
+ else:
+ compose_pull()
+
+
+def sync_skale_node(env: dict) -> None:
+ if 'CONTAINER_CONFIGS_DIR' in env:
+ sync_skale_node_dev(env)
+ else:
+ sync_skale_node_git(env)
+
+
+def sync_skale_node_git(env: dict) -> None:
+ rm_local_repo(CONTAINER_CONFIG_PATH)
+ sync_repo(
+ SKALE_NODE_REPO_URL,
+ CONTAINER_CONFIG_PATH,
+ env["CONTAINER_CONFIGS_STREAM"]
+ )
+
+
+def sync_skale_node_dev(env: dict) -> None:
+ logger.info(f'Syncing {CONTAINER_CONFIG_PATH} with {env["CONTAINER_CONFIGS_DIR"]}')
+ run_cmd(f'rsync -r {env["CONTAINER_CONFIGS_DIR"]}/ {CONTAINER_CONFIG_PATH}'.split())
+ run_cmd(f'rsync -r {env["CONTAINER_CONFIGS_DIR"]}/.git {CONTAINER_CONFIG_PATH}'.split())
diff --git a/tools/__init__.py b/node_cli/utils/__init__.py
similarity index 100%
rename from tools/__init__.py
rename to node_cli/utils/__init__.py
diff --git a/node_cli/utils/decorators.py b/node_cli/utils/decorators.py
new file mode 100644
index 00000000..95c822e8
--- /dev/null
+++ b/node_cli/utils/decorators.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+#
+# This file is part of node-cli
+#
+# Copyright (C) 2021 SKALE Labs
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+from functools import wraps
+
+from node_cli.core.host import is_node_inited
+from node_cli.utils.global_config import get_system_user
+from node_cli.utils.helper import error_exit, is_user_valid, get_g_conf_user
+from node_cli.utils.texts import Texts
+from node_cli.utils.exit_codes import CLIExitCodes
+
+
+TEXTS = Texts()
+
+
+def check_not_inited(f):
+ @wraps(f)
+ def inner(*args, **kwargs):
+ if is_node_inited():
+ error_exit(TEXTS['node']['already_inited'], exit_code=CLIExitCodes.NODE_STATE_ERROR)
+ return f(*args, **kwargs)
+ return inner
+
+
+def check_inited(f):
+ @wraps(f)
+ def inner(*args, **kwargs):
+ if not is_node_inited():
+ error_exit(TEXTS['node']['not_inited'], exit_code=CLIExitCodes.NODE_STATE_ERROR)
+ return f(*args, **kwargs)
+ return inner
+
+
+def check_user(f):
+ @wraps(f)
+ def inner(*args, **kwargs):
+ if not is_user_valid():
+ g_conf_user = get_g_conf_user()
+ current_user = get_system_user()
+ error_msg = f'You couldn\'t execute this command from user {current_user}. \
+Allowed: {g_conf_user} or root.'
+ error_exit(error_msg, exit_code=CLIExitCodes.BAD_USER_ERROR)
+ return f(*args, **kwargs)
+ return inner
diff --git a/tools/docker_utils.py b/node_cli/utils/docker_utils.py
similarity index 61%
rename from tools/docker_utils.py
rename to node_cli/utils/docker_utils.py
index fa6d52c4..5ea27aba 100644
--- a/tools/docker_utils.py
+++ b/node_cli/utils/docker_utils.py
@@ -25,9 +25,13 @@
from docker.client import DockerClient
from docker.models.containers import Container
-from tools.helper import run_cmd, str_to_bool
-from configs import (COMPOSE_PATH, SKALE_DIR, SGX_CERTIFICATES_DIR_NAME,
- REMOVED_CONTAINERS_FOLDER_PATH)
+from node_cli.utils.helper import run_cmd, str_to_bool
+from node_cli.configs import (
+ COMPOSE_PATH,
+ REMOVED_CONTAINERS_FOLDER_PATH,
+ SGX_CERTIFICATES_DIR_NAME,
+ SKALE_DIR
+)
logger = logging.getLogger(__name__)
@@ -35,10 +39,19 @@
SCHAIN_REMOVE_TIMEOUT = 60
IMA_REMOVE_TIMEOUT = 20
-MAIN_COMPOSE_CONTAINERS = 'skale-api bounty skale-admin'
-BASE_COMPOSE_SERVICES = 'transaction-manager skale-admin skale-api mysql bounty nginx watchdog filebeat' # noqa
-MONITORING_COMPOSE_SERVICES = 'node-exporter advisor'
-NOTIFICATION_COMPOSE_SERVICES = 'celery redis'
+MAIN_COMPOSE_CONTAINERS = ('skale-api', 'bounty', 'skale-admin')
+BASE_COMPOSE_SERVICES = (
+ 'transaction-manager',
+ 'skale-admin',
+ 'skale-api',
+ 'bounty',
+ 'nginx',
+ 'redis',
+ 'watchdog',
+ 'filebeat'
+)
+MONITORING_COMPOSE_SERVICES = ('node-exporter', 'advisor')
+NOTIFICATION_COMPOSE_SERVICES = ('celery',)
COMPOSE_TIMEOUT = 10
DOCKER_DEFAULT_STOP_TIMEOUT = 20
@@ -49,6 +62,14 @@ def docker_client() -> DockerClient:
return docker.from_env()
+def get_sanitized_container_name(container_info: dict) -> str:
+ return container_info['Names'][0].replace('/', '', 1)
+
+
+def get_containers(container_name_filter=None, _all=True) -> list:
+ return docker_client().containers.list(all=_all, filters={'name': container_name_filter})
+
+
def get_all_schain_containers(_all=True) -> list:
return docker_client().containers.list(all=_all, filters={'name': 'skale_schain_*'})
@@ -57,6 +78,13 @@ def get_all_ima_containers(_all=True) -> list:
return docker_client().containers.list(all=_all, filters={'name': 'skale_ima_*'})
+def remove_dynamic_containers():
+ logger.info('Removing sChains containers')
+ rm_all_schain_containers()
+ logger.info('Removing IMA containers')
+ rm_all_ima_containers()
+
+
def rm_all_schain_containers():
schain_containers = get_all_schain_containers()
remove_containers(schain_containers, stop_timeout=SCHAIN_REMOVE_TIMEOUT)
@@ -90,11 +118,20 @@ def safe_rm(container: Container, stop_timeout=DOCKER_DEFAULT_STOP_TIMEOUT, **kw
def backup_container_logs(container: Container, tail=DOCKER_DEFAULT_TAIL_LINES) -> None:
logger.info(f'Going to backup container logs: {container.name}')
logs_backup_filepath = get_logs_backup_filepath(container)
- with open(logs_backup_filepath, "wb") as out:
- out.write(container.logs(tail=tail))
+ save_container_logs(container, logs_backup_filepath, tail)
logger.debug(f'Old container logs saved to {logs_backup_filepath}, tail: {tail}')
+def save_container_logs(
+ container: Container,
+ log_filepath: str,
+ tail=DOCKER_DEFAULT_TAIL_LINES
+ ) -> None:
+ with open(log_filepath, "wb") as out:
+ out.write(container.logs(tail=tail))
+ logger.debug(f'Logs from {container.name} saved to {log_filepath}')
+
+
def get_logs_backup_filepath(container: Container) -> str:
container_index = sum(1 for f in os.listdir(REMOVED_CONTAINERS_FOLDER_PATH)
if f.startswith(f'{container.name}-'))
@@ -102,17 +139,45 @@ def get_logs_backup_filepath(container: Container) -> str:
return os.path.join(REMOVED_CONTAINERS_FOLDER_PATH, log_file_name)
-def compose_rm(env):
+def ensure_volume(name: str, size: int, dutils=None):
+ dutils = dutils or docker_client()
+ if is_volume_exists(name, dutils=dutils):
+ logger.info(f'Volume with name {name} already exits')
+ return
+ logger.info(f'Creating volume - size: {size}, name: {name}')
+ driver_opts = {'size': str(size)}
+ volume = dutils.volumes.create(
+ name=name,
+ driver='lvmpy',
+ driver_opts=driver_opts
+ )
+ return volume
+
+
+def is_volume_exists(name: str, dutils=None):
+ dutils = dutils or docker_client()
+ try:
+ dutils.volumes.get(name)
+ except docker.errors.NotFound:
+ return False
+ return True
+
+
+def compose_rm(env={}):
logger.info(f'Removing {MAIN_COMPOSE_CONTAINERS} containers')
run_cmd(
- cmd=f'docker-compose -f {COMPOSE_PATH} rm -s -f {MAIN_COMPOSE_CONTAINERS}'.split(),
+ cmd=(
+ 'docker-compose',
+ '-f', COMPOSE_PATH,
+ 'rm', '-s', '-f', *MAIN_COMPOSE_CONTAINERS
+ ),
env=env
)
logger.info(f'Sleeping for {COMPOSE_TIMEOUT} seconds')
sleep(COMPOSE_TIMEOUT)
logger.info('Removing all compose containers')
run_cmd(
- cmd=f'docker-compose -f {COMPOSE_PATH} rm -s -f'.split(),
+ cmd=('docker-compose', '-f', COMPOSE_PATH, 'rm', '-s', '-f'),
env=env
)
logger.info('Compose containers removed')
@@ -121,7 +186,7 @@ def compose_rm(env):
def compose_pull():
logger.info('Pulling compose containers')
run_cmd(
- cmd=f'docker-compose -f {COMPOSE_PATH} pull'.split(),
+ cmd=('docker-compose', '-f', COMPOSE_PATH, 'pull'),
env={
'SKALE_DIR': SKALE_DIR
}
@@ -131,32 +196,27 @@ def compose_pull():
def compose_build():
logger.info('Building compose containers')
run_cmd(
- cmd=f'docker-compose -f {COMPOSE_PATH} build'.split(),
+ cmd=('docker-compose', '-f', COMPOSE_PATH, 'build'),
env={
'SKALE_DIR': SKALE_DIR
}
)
+def get_up_compose_cmd(services):
+ return ('docker-compose', '-f', COMPOSE_PATH, 'up', '-d', *services)
+
+
def compose_up(env):
logger.info('Running base set of containers')
if 'SGX_CERTIFICATES_DIR_NAME' not in env:
env['SGX_CERTIFICATES_DIR_NAME'] = SGX_CERTIFICATES_DIR_NAME
- run_cmd(
- cmd=f'docker-compose -f {COMPOSE_PATH} up -d {BASE_COMPOSE_SERVICES}'.split(),
- env=env
- )
+ run_cmd(cmd=get_up_compose_cmd(BASE_COMPOSE_SERVICES), env=env)
if str_to_bool(env.get('MONITORING_CONTAINERS', '')):
logger.info('Running monitoring containers')
- run_cmd(
- cmd=f'docker-compose -f {COMPOSE_PATH} up -d {MONITORING_COMPOSE_SERVICES}'.split(),
- env=env
- )
+ run_cmd(cmd=get_up_compose_cmd(MONITORING_COMPOSE_SERVICES), env=env)
if 'TG_API_KEY' in env and 'TG_CHAT_ID' in env:
logger.info('Running containers for Telegram notifications')
- run_cmd(
- cmd=f'docker-compose -f {COMPOSE_PATH} up -d {NOTIFICATION_COMPOSE_SERVICES}'.split(),
- env=env
- )
+ run_cmd(cmd=get_up_compose_cmd(NOTIFICATION_COMPOSE_SERVICES), env=env)
diff --git a/core/texts.py b/node_cli/utils/exit_codes.py
similarity index 63%
rename from core/texts.py
rename to node_cli/utils/exit_codes.py
index 5b8ff4be..1173aad0 100644
--- a/core/texts.py
+++ b/node_cli/utils/exit_codes.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
#
-# This file is part of validator-cli
+# This file is part of node-cli
#
# Copyright (C) 2020 SKALE Labs
#
@@ -17,20 +17,16 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
-import yaml
-from configs import TEXT_FILE
+from enum import IntEnum
-class Texts():
- def __init__(self):
- self._texts = self._load()
-
- def __getitem__(self, key):
- return self._texts.get(key)
-
- def _load(self):
- with open(TEXT_FILE, 'r') as stream:
- try:
- return yaml.safe_load(stream)
- except yaml.YAMLError as exc:
- print(exc)
+class CLIExitCodes(IntEnum):
+ """This class contains exit codes for SKALE CLI tools"""
+ SUCCESS = 0
+ FAILURE = 1
+ BAD_API_RESPONSE = 3
+ OPERATION_EXECUTION_ERROR = 4
+ TRANSACTION_ERROR = 5
+ REVERT_ERROR = 6
+ BAD_USER_ERROR = 7
+ NODE_STATE_ERROR = 8
diff --git a/core/operations/git_helper.py b/node_cli/utils/git_utils.py
similarity index 100%
rename from core/operations/git_helper.py
rename to node_cli/utils/git_utils.py
diff --git a/node_cli/utils/global_config.py b/node_cli/utils/global_config.py
new file mode 100644
index 00000000..4a1c4eaa
--- /dev/null
+++ b/node_cli/utils/global_config.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+#
+# This file is part of node-cli
+#
+# Copyright (C) 2021 SKALE Labs
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+import os
+import sys
+import json
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+def get_system_user() -> str:
+ return 'root' if get_home_dir() == '/root' else os.getenv('SUDO_USER', os.getenv('USER'))
+
+
+def get_home_dir() -> str:
+ return os.path.expanduser('~')
+
+
+def read_g_config(g_skale_dir: str, g_skale_conf_filepath: str) -> dict:
+ """Read global SKALE config file, init if not exists"""
+ if not os.path.isfile(g_skale_conf_filepath):
+ return generate_g_config_file(g_skale_dir, g_skale_conf_filepath)
+ with open(g_skale_conf_filepath, encoding='utf-8') as data_file:
+ return json.loads(data_file.read())
+
+
+def generate_g_config_file(g_skale_dir: str, g_skale_conf_filepath: str) -> dict:
+ """Init global SKALE config file"""
+ print('Generating global SKALE config file...')
+ os.makedirs(g_skale_dir, exist_ok=True)
+ g_config = {
+ 'user': get_system_user(),
+ 'home_dir': get_home_dir()
+ }
+ print(f'{g_skale_conf_filepath} content: {g_config}')
+ try:
+ with open(g_skale_conf_filepath, 'w') as outfile:
+ json.dump(g_config, outfile, indent=4)
+ except PermissionError as e:
+ logger.exception(e)
+ print('No permissions to write into /etc directory')
+ sys.exit(7)
+ return g_config
diff --git a/core/helper.py b/node_cli/utils/helper.py
similarity index 52%
rename from core/helper.py
rename to node_cli/utils/helper.py
index 84c6c6a0..0eae5198 100644
--- a/core/helper.py
+++ b/node_cli/utils/helper.py
@@ -20,31 +20,46 @@
import json
import os
import re
-import shutil
import sys
+
+import yaml
+import shutil
+import requests
+import subprocess
+import urllib.request
+
import urllib.parse
from functools import wraps
import logging
-import logging.handlers as py_handlers
from logging import Formatter, StreamHandler
+import logging.handlers as py_handlers
-import requests
-import yaml
+import distutils
+import distutils.util
+
+import click
-from configs import (
- ADMIN_HOST, ADMIN_PORT,
- ROUTES,
- TEXT_FILE
+from jinja2 import Environment
+
+from node_cli.utils.print_formatters import print_err_response
+from node_cli.utils.exit_codes import CLIExitCodes
+from node_cli.configs.env import (
+ absent_params as absent_env_params,
+ get_params as get_env_params
+)
+from node_cli.configs import (
+ TEXT_FILE, ADMIN_HOST, ADMIN_PORT, HIDE_STREAM_LOG, GLOBAL_SKALE_DIR,
+ GLOBAL_SKALE_CONF_FILEPATH
)
-from configs.cli_logger import (
+from node_cli.configs.routes import get_route
+from node_cli.utils.global_config import read_g_config, get_system_user
+
+from node_cli.configs.cli_logger import (
FILE_LOG_FORMAT, LOG_BACKUP_COUNT, LOG_FILE_SIZE_BYTES,
LOG_FILEPATH, STREAM_LOG_FORMAT, DEBUG_LOG_FILEPATH)
-from core.print_formatters import print_err_response
-from tools.helper import session_config
-config = session_config()
logger = logging.getLogger(__name__)
@@ -56,24 +71,91 @@
}
+def read_json(path):
+ with open(path, encoding='utf-8') as data_file:
+ return json.loads(data_file.read())
+
+
+def write_json(path, content):
+ with open(path, 'w') as outfile:
+ json.dump(content, outfile, indent=4)
+
+
+def run_cmd(cmd, env={}, shell=False, secure=False, check_code=True):
+ if not secure:
+ logger.debug(f'Running: {cmd}')
+ else:
+ logger.debug('Running some secure command')
+ res = subprocess.run(cmd, shell=shell,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT, env={**env, **os.environ})
+ if check_code:
+ output = res.stdout.decode('utf-8')
+ if res.returncode:
+ logger.error(f'Error during shell execution: {output}')
+ res.check_returncode()
+ else:
+ logger.debug('Command is executed successfully. Command log:')
+ logger.debug(res.stdout.decode('UTF-8').rstrip())
+ return res
+
+
+def format_output(res: subprocess.CompletedProcess) -> str:
+ return res.stdout.decode('UTF-8').rstrip()
+
+
+def download_file(url, filepath):
+ return urllib.request.urlretrieve(url, filepath)
+
+
+def process_template(source, destination, data):
+ """
+ :param source: j2 template source path
+ :param destination: out file path
+ :param data: dictionary with fields for template
+ :return: Nothing
+ """
+ template = read_file(source)
+ processed_template = Environment().from_string(template).render(data)
+ with open(destination, "w") as f:
+ f.write(processed_template)
+
+
+def get_username():
+ return os.environ.get('USERNAME') or os.environ.get('USER')
+
+
+def extract_env_params(env_filepath):
+ env_params = get_env_params(env_filepath)
+
+ absent_params = ', '.join(absent_env_params(env_params))
+ if absent_params:
+ click.echo(f"Your env file({env_filepath}) have some absent params: "
+ f"{absent_params}.\n"
+ f"You should specify them to make sure that "
+ f"all services are working",
+ err=True)
+ return None
+ return env_params
+
+
+def str_to_bool(val):
+ return bool(distutils.util.strtobool(val))
+
+
+def error_exit(error_payload, exit_code=CLIExitCodes.FAILURE):
+ print_err_response(error_payload)
+ sys.exit(exit_code.value)
+
+
def safe_get_config(config, key):
try:
return config[key]
except KeyError as e:
logger.error(e)
- # print(f'No such key in config: {key}')
return None
-def no_node(f):
- @wraps(f)
- def inner(*args, **kwargs):
- # todo: check that node is not installed yet!
- return f(*args, **kwargs)
-
- return inner
-
-
def safe_load_texts():
with open(TEXT_FILE, 'r') as stream:
try:
@@ -99,8 +181,9 @@ def abort_if_false(ctx, param, value):
ctx.abort()
-def post_request(url_name, json=None, files=None):
- url = construct_url(ROUTES[url_name])
+def post_request(blueprint, method, json=None, files=None):
+ route = get_route(blueprint, method)
+ url = construct_url(route)
try:
response = requests.post(url, json=json, files=files)
data = response.json()
@@ -112,8 +195,9 @@ def post_request(url_name, json=None, files=None):
return status, payload
-def get_request(url_name, params=None):
- url = construct_url(ROUTES[url_name])
+def get_request(blueprint, method, params=None):
+ route = get_route(blueprint, method)
+ url = construct_url(route)
try:
response = requests.get(url, params=params)
data = response.json()
@@ -127,7 +211,8 @@ def get_request(url_name, params=None):
def download_dump(path, container_name=None):
- url = construct_url(ROUTES['logs_dump'])
+ route = get_route('logs', 'dump')
+ url = construct_url(route)
params = {}
if container_name:
params['container_name'] = container_name
@@ -136,7 +221,7 @@ def download_dump(path, container_name=None):
return None
if r.status_code != requests.codes.ok: # pylint: disable=no-member
print('Request failed, status code:', r.status_code)
- print_err_response(r.json())
+ error_exit(r.json())
return None
d = r.headers['Content-Disposition']
fname_q = re.findall("filename=(.+)", d)[0]
@@ -150,9 +235,16 @@ def download_dump(path, container_name=None):
def init_default_logger():
f_handler = get_file_handler(LOG_FILEPATH, logging.INFO)
debug_f_handler = get_file_handler(DEBUG_LOG_FILEPATH, logging.DEBUG)
- stream_handler = get_stream_handler()
- logging.basicConfig(level=logging.DEBUG, handlers=[
- f_handler, debug_f_handler, stream_handler])
+ logging.basicConfig(
+ level=logging.DEBUG, handlers=[f_handler, debug_f_handler])
+
+
+def get_stream_handler():
+ formatter = Formatter(STREAM_LOG_FORMAT)
+ stream_handler = StreamHandler(sys.stderr)
+ stream_handler.setFormatter(formatter)
+ stream_handler.setLevel(logging.INFO)
+ return stream_handler
def get_file_handler(log_filepath, log_level):
@@ -166,17 +258,9 @@ def get_file_handler(log_filepath, log_level):
return f_handler
-def get_stream_handler():
- formatter = Formatter(STREAM_LOG_FORMAT)
- stream_handler = StreamHandler(sys.stderr)
- stream_handler.setFormatter(formatter)
- stream_handler.setLevel(logging.INFO)
- return stream_handler
-
-
-def read_file(path, mode='rb'):
- with open(path, mode) as f:
- return f
+def read_file(path):
+ with open(path, 'r') as file:
+ return file.read()
def to_camel_case(snake_str):
@@ -196,3 +280,33 @@ def validate_abi(abi_filepath: str) -> dict:
return {'filepath': abi_filepath, 'status': 'error',
'msg': 'Failed to load abi file as json'}
return {'filepath': abi_filepath, 'status': 'ok', 'msg': ''}
+
+
+def streamed_cmd(func):
+ """ Decorator that allow function to print logs into stderr """
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ if HIDE_STREAM_LOG is None:
+ logging.getLogger('').addHandler(get_stream_handler())
+ return func(*args, **kwargs)
+ return wrapper
+
+
+def is_user_valid(allow_root=True):
+ current_user = get_system_user()
+ if current_user == 'root' and allow_root:
+ return True
+ g_conf_user = get_g_conf_user()
+ return current_user == g_conf_user
+
+
+def get_g_conf():
+ return read_g_config(GLOBAL_SKALE_DIR, GLOBAL_SKALE_CONF_FILEPATH)
+
+
+def get_g_conf_user():
+ return get_g_conf()['user']
+
+
+def get_g_conf_home():
+ return get_g_conf()['home_dir']
diff --git a/tools/meta.py b/node_cli/utils/meta.py
similarity index 97%
rename from tools/meta.py
rename to node_cli/utils/meta.py
index 2079c43c..c48c5e35 100644
--- a/tools/meta.py
+++ b/node_cli/utils/meta.py
@@ -1,7 +1,7 @@
import json
import os
from collections import namedtuple
-from configs import META_FILEPATH
+from node_cli.configs import META_FILEPATH
DEFAULT_VERSION = '1.0.0'
DEFAULT_CONFIG_STREAM = '1.1.0'
diff --git a/core/print_formatters.py b/node_cli/utils/print_formatters.py
similarity index 87%
rename from core/print_formatters.py
rename to node_cli/utils/print_formatters.py
index 9d3ccf6f..a1b89101 100644
--- a/core/print_formatters.py
+++ b/node_cli/utils/print_formatters.py
@@ -17,34 +17,21 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
-import json
import os
+import json
import datetime
import texttable
from dateutil import parser
import inspect
-from configs import LONG_LINE
-from configs.cli_logger import DEBUG_LOG_FILEPATH
-from tools.texts import Texts
+from node_cli.configs import LONG_LINE
+from node_cli.configs.cli_logger import DEBUG_LOG_FILEPATH
+from node_cli.utils.texts import Texts
TEXTS = Texts()
-def print_err_response(error_payload):
- if isinstance(error_payload, list):
- error_msg = '\n'.join(error_payload)
- else:
- error_msg = error_payload
-
- print('Command failed with following errors:')
- print(LONG_LINE)
- print(error_msg)
- print(LONG_LINE)
- print(f'You can find more info in {DEBUG_LOG_FILEPATH}')
-
-
def print_wallet_info(wallet):
print(inspect.cleandoc(f'''
{LONG_LINE}
@@ -63,7 +50,7 @@ def get_tty_width():
return int(width)
-class Formatter(object):
+class Formatter:
def table(self, headers, rows):
table = texttable.Texttable(max_width=get_tty_width())
table.set_cols_dtype(['t' for h in headers])
@@ -272,4 +259,44 @@ def print_abi_validation_errors(info: list, raw: bool = False) -> None:
def print_node_cmd_error():
- print(TEXTS['node']['cmd_failed'])
+ print(TEXTS['node']['cmd_failed'].format(DEBUG_LOG_FILEPATH))
+
+
+def print_node_info(node, node_status):
+ print(inspect.cleandoc(f'''
+ {LONG_LINE}
+ Node info
+ Name: {node['name']}
+ ID: {node['id']}
+ IP: {node['ip']}
+ Public IP: {node['publicIP']}
+ Port: {node['port']}
+ Domain name: {node['domain_name']}
+ Status: {node_status}
+ {LONG_LINE}
+ '''))
+
+
+def print_err_response(error_payload):
+ if isinstance(error_payload, list):
+ error_msg = '\n'.join(error_payload)
+ else:
+ error_msg = error_payload
+
+ print('Command failed with following errors:')
+ print(LONG_LINE)
+ print(error_msg)
+ print(LONG_LINE)
+ print(f'You can find more info in {DEBUG_LOG_FILEPATH}')
+
+
+def print_failed_requirements_checks(failed_checks: list) -> None:
+ headers = ['Check', 'Info']
+ rows = [[r.name, r.info] for r in failed_checks]
+ table = texttable.Texttable()
+ table.add_rows([headers, *rows])
+ drawing = table.draw()
+ main_header = ' Failed checks '
+ block_len = (len(drawing.split()[0]) - len(main_header)) // 2
+ print('=' * block_len + main_header + '=' * block_len)
+ print(drawing)
diff --git a/tools/schain_types.py b/node_cli/utils/schain_types.py
similarity index 100%
rename from tools/schain_types.py
rename to node_cli/utils/schain_types.py
diff --git a/tools/texts.py b/node_cli/utils/texts.py
similarity index 92%
rename from tools/texts.py
rename to node_cli/utils/texts.py
index 5352f1af..d4813d27 100644
--- a/tools/texts.py
+++ b/node_cli/utils/texts.py
@@ -8,10 +8,10 @@
# along with this program. If not, see .
import yaml
-from configs import TEXT_FILE
+from node_cli.configs import TEXT_FILE
-class Texts():
+class Texts:
def __init__(self):
self._texts = self._load()
diff --git a/scripts/build.sh b/scripts/build.sh
index 87dae043..432eb7d2 100755
--- a/scripts/build.sh
+++ b/scripts/build.sh
@@ -28,7 +28,7 @@ OS=`uname -s`-`uname -m`
#CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD)
LATEST_COMMIT=$(git rev-parse HEAD)
CURRENT_DATETIME="`date "+%Y-%m-%d %H:%M:%S"`";
-DIST_INFO_FILEPATH=$PARENT_DIR/cli/info.py
+DIST_INFO_FILEPATH=$PARENT_DIR/node_cli/cli/info.py
touch $DIST_INFO_FILEPATH
@@ -40,7 +40,7 @@ echo "VERSION = '$VERSION'" >> $DIST_INFO_FILEPATH
EXECUTABLE_NAME=skale-$VERSION-$OS
-pyinstaller --onefile main.spec --hidden-import=eth_hash.backends.pysha3
+pyinstaller --onefile main.spec
mv $PARENT_DIR/dist/main $PARENT_DIR/dist/$EXECUTABLE_NAME
diff --git a/scripts/run_tests.sh b/scripts/run_tests.sh
index 5ccc89b2..12cce17a 100755
--- a/scripts/run_tests.sh
+++ b/scripts/run_tests.sh
@@ -3,4 +3,4 @@
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
PROJECT_DIR=$(dirname $DIR)
-HOME_DIR='tests/' ENV=dev CONFIG_FILEPATH='tests/test-skalecli.yaml' DOTENV_FILEPATH='tests/test-env' py.test tests/ --ignore=tests/operations/ $@
+HIDE_STREAM_LOG=true TEST_HOME_DIR="$PROJECT_DIR/tests/" GLOBAL_SKALE_DIR="$PROJECT_DIR/tests/etc/skale" DOTENV_FILEPATH='tests/test-env' py.test --cov=$PROJECT_DIR/ tests/ --ignore=tests/operations/ $@
diff --git a/setup.py b/setup.py
index 5964e8fe..be5de1fb 100644
--- a/setup.py
+++ b/setup.py
@@ -21,16 +21,15 @@ def find_version(*file_paths):
extras_require = {
'linter': [
"flake8==3.7.9",
- "isort>=4.2.15,<5.4.3",
+ "isort>=4.2.15,<5.8.1",
],
'dev': [
- "boto3==1.13.19",
"bumpversion==0.6.0",
- "pytest==5.4.3",
+ "pytest==6.2.3",
"pytest-cov==2.9.0",
"twine==2.0.0",
"mock==4.0.2",
- "when-changed"
+ "freezegun==0.3.15"
]
}
@@ -43,7 +42,7 @@ def find_version(*file_paths):
name='node-cli',
# *IMPORTANT*: Don't manually change the version here.
# Use the 'bumpversion' utility instead.
- version=find_version("cli", "__init__.py"),
+ version=find_version("node_cli", "cli", "__init__.py"),
include_package_data=True,
description='SKALE client tools',
long_description_markdown_filename='README.md',
@@ -52,19 +51,20 @@ def find_version(*file_paths):
url='https://github.com/skalenetwork/node-cli',
install_requires=[
"click==7.1.2",
- "confuse",
"docker==4.2.2",
- "readsettings==3.4.5",
"PyInstaller==3.6",
"texttable==1.6.2",
"python-dateutil==2.8.1",
"Jinja2==2.11.2",
"psutil==5.7.0",
- "pycryptodome==3.9.7",
"python-dotenv==0.13.0",
"terminaltables==3.1.0",
"requests==2.23.0",
- "GitPython==3.1.12"
+ "GitPython==3.1.14",
+ "PyYAML==5.4.1",
+ "packaging==20.9",
+ "python-debian==0.1.39",
+ "python-iptables==1.0.0"
],
python_requires='>=3.6,<4',
extras_require=extras_require,
diff --git a/tests/.skale/.skale-cli-log/.gitkeep b/tests/.skale/.skale-cli-log/.gitkeep
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/.skale/.tmp/.keep b/tests/.skale/.tmp/.keep
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/.skale/config/configs.yml b/tests/.skale/config/configs.yml
deleted file mode 100644
index 7febcef7..00000000
--- a/tests/.skale/config/configs.yml
+++ /dev/null
@@ -1,29 +0,0 @@
-common:
- schain:
- cpu:
- skaled: 0.7
- ima: 0.3
- mem:
- skaled: 0.7
- ima: 0.3
- volume:
- max_consensus_storage_bytes: 0.3
- max_skaled_leveldb_storage_bytes: 0.3
- max_file_storage_bytes: 0.3
- max_reserved_storage_bytes: 0.1
- leveldb_storage:
- evm_storage_part: 0.6
- logs_storage_part: 0.4
-envs:
- mainnet:
- server:
- disk_size_bytes: 2000000000000
- testnet:
- server:
- disk_size_bytes: 200000000000
- qanet:
- server:
- disk_size_bytes: 200000000000
- devnet:
- server:
- disk_size_bytes: 80000000000
diff --git a/tests/.skale/config/environment_params.yaml b/tests/.skale/config/environment_params.yaml
new file mode 100644
index 00000000..2f6fdef1
--- /dev/null
+++ b/tests/.skale/config/environment_params.yaml
@@ -0,0 +1,98 @@
+common:
+ schain:
+ cpu:
+ skaled: 0.8
+ ima: 0.2
+ mem:
+ skaled: 0.8
+ ima: 0.2
+ volume_limits:
+ max_consensus_storage_bytes: 0.3
+ max_skaled_leveldb_storage_bytes: 0.3
+ max_file_storage_bytes: 0.3
+ max_reserved_storage_bytes: 0.1
+ leveldb_limits:
+ contract_storage: 0.6
+ db_storage: 0.4
+ base_rotate_after_block_divider: 61035.1
+ shared_space_coefficient: 1
+envs:
+ mainnet:
+ server:
+ cpu_total: 8
+ cpu_physical: 1
+ memory: 32000000000
+ swap: 16000000000
+ disk: 1900000000000
+
+ package:
+ iptables-persistent: 1.0.4
+ lvm2: 2.02.0
+ btrfs-progs: 4.15.1
+ lsof: "4.89"
+ psmisc: 23.1-1
+
+ docker:
+ docker-api: 1.41.0
+ docker-engine: 20.10.7
+ docker-compose: 1.27.4
+
+ testnet:
+ server:
+ cpu_total: 8
+ cpu_physical: 1
+ memory: 32000000000
+ swap: 16000000000
+ disk: 200000000000
+
+ package:
+ iptables-persistent: 1.0.4
+ lvm2: 2.02.0
+ btrfs-progs: 4.15.1
+ lsof: "4.89"
+ psmisc: 23.1-1
+
+ docker:
+ docker-api: 1.41.0
+ docker-engine: 20.10.7
+ docker-compose: 1.27.4
+
+ qanet:
+ server:
+ cpu_total: 8
+ cpu_physical: 1
+ memory: 32000000000
+ swap: 16000000000
+ disk: 200000000000
+
+ package:
+ iptables-persistent: 1.0.4
+ lvm2: 2.02.0
+ btrfs-progs: 4.15.1
+ lsof: "4.89"
+ psmisc: 23.1-1
+
+ docker:
+ docker-api: 1.41.0
+ docker-engine: 20.10.7
+ docker-compose: 1.27.4
+
+ devnet:
+ server:
+ cpu_total: 1
+ cpu_physical: 1
+ memory: 2000000000
+ swap: 2000000000
+ disk: 80000000000
+
+ package:
+ iptables-persistent: 1.0.4
+ lvm2: 2.02.0
+ btrfs-progs: 4.15.1
+ lsof: "4.89"
+ psmisc: 23.1-1
+
+ docker:
+ docker-api: 1.41.0
+ docker-engine: 20.10.7
+ docker-compose: 1.27.4
diff --git a/tests/.skale/config/schain_allocation.yml b/tests/.skale/config/schain_allocation.yml
index b56a0e15..0aebc880 100644
--- a/tests/.skale/config/schain_allocation.yml
+++ b/tests/.skale/config/schain_allocation.yml
@@ -3,197 +3,225 @@
devnet:
disk:
- large: 75999936512
- medium: 2374998016
- small: 593749504
- test: 2374998016
- test4: 2374998016
- leveldb:
+ large: 71039975424
+ medium: 8879996928
+ small: 554999808
+ test: 8879996928
+ test4: 8879996928
+ leveldb_limits:
large:
- evm_storage_part: 13679988571
- logs_storage_part: 9119992381
+ contract_storage: 12787195576
+ db_storage: 8524797050
medium:
- evm_storage_part: 427499642
- logs_storage_part: 284999761
+ contract_storage: 1598399446
+ db_storage: 1065599631
small:
- evm_storage_part: 106874910
- logs_storage_part: 71249940
+ contract_storage: 99899965
+ db_storage: 66599976
test:
- evm_storage_part: 427499642
- logs_storage_part: 284999761
+ contract_storage: 1598399446
+ db_storage: 1065599631
test4:
- evm_storage_part: 427499642
- logs_storage_part: 284999761
- volume:
+ contract_storage: 1598399446
+ db_storage: 1065599631
+ rotate_after_block:
+ large: 1310721
+ medium: 163840
+ small: 10240
+ test: 163840
+ test4: 163840
+ shared_space: 8959950848
+ volume_limits:
large:
- max_consensus_storage_bytes: 22799980953
- max_file_storage_bytes: 22799980953
- max_reserved_storage_bytes: 7599993651
- max_skaled_leveldb_storage_bytes: 22799980953
+ max_consensus_storage_bytes: 21311992627
+ max_file_storage_bytes: 21311992627
+ max_reserved_storage_bytes: 7103997542
+ max_skaled_leveldb_storage_bytes: 21311992627
medium:
- max_consensus_storage_bytes: 712499404
- max_file_storage_bytes: 712499404
- max_reserved_storage_bytes: 237499801
- max_skaled_leveldb_storage_bytes: 712499404
+ max_consensus_storage_bytes: 2663999078
+ max_file_storage_bytes: 2663999078
+ max_reserved_storage_bytes: 887999692
+ max_skaled_leveldb_storage_bytes: 2663999078
small:
- max_consensus_storage_bytes: 178124851
- max_file_storage_bytes: 178124851
- max_reserved_storage_bytes: 59374950
- max_skaled_leveldb_storage_bytes: 178124851
+ max_consensus_storage_bytes: 166499942
+ max_file_storage_bytes: 166499942
+ max_reserved_storage_bytes: 55499980
+ max_skaled_leveldb_storage_bytes: 166499942
test:
- max_consensus_storage_bytes: 712499404
- max_file_storage_bytes: 712499404
- max_reserved_storage_bytes: 237499801
- max_skaled_leveldb_storage_bytes: 712499404
+ max_consensus_storage_bytes: 2663999078
+ max_file_storage_bytes: 2663999078
+ max_reserved_storage_bytes: 887999692
+ max_skaled_leveldb_storage_bytes: 2663999078
test4:
- max_consensus_storage_bytes: 712499404
- max_file_storage_bytes: 712499404
- max_reserved_storage_bytes: 237499801
- max_skaled_leveldb_storage_bytes: 712499404
+ max_consensus_storage_bytes: 2663999078
+ max_file_storage_bytes: 2663999078
+ max_reserved_storage_bytes: 887999692
+ max_skaled_leveldb_storage_bytes: 2663999078
mainnet:
disk:
- large: 1899999985664
- medium: 59374999552
- small: 14843749888
- test: 59374999552
- test4: 59374999552
- leveldb:
+ large: 1687199940608
+ medium: 210899992576
+ small: 13181249536
+ test: 210899992576
+ test4: 210899992576
+ leveldb_limits:
large:
- evm_storage_part: 341999997419
- logs_storage_part: 227999998279
+ contract_storage: 303695989309
+ db_storage: 202463992872
medium:
- evm_storage_part: 10687499919
- logs_storage_part: 7124999946
+ contract_storage: 37961998663
+ db_storage: 25307999108
small:
- evm_storage_part: 2671874979
- logs_storage_part: 1781249986
+ contract_storage: 2372624916
+ db_storage: 1581749944
test:
- evm_storage_part: 10687499919
- logs_storage_part: 7124999946
+ contract_storage: 37961998663
+ db_storage: 25307999108
test4:
- evm_storage_part: 10687499919
- logs_storage_part: 7124999946
- volume:
+ contract_storage: 37961998663
+ db_storage: 25307999108
+ rotate_after_block:
+ large: 31129628
+ medium: 3891203
+ small: 243200
+ test: 3891203
+ test4: 3891203
+ shared_space: 212799979520
+ volume_limits:
large:
- max_consensus_storage_bytes: 569999995699
- max_file_storage_bytes: 569999995699
- max_reserved_storage_bytes: 189999998566
- max_skaled_leveldb_storage_bytes: 569999995699
+ max_consensus_storage_bytes: 506159982182
+ max_file_storage_bytes: 506159982182
+ max_reserved_storage_bytes: 168719994060
+ max_skaled_leveldb_storage_bytes: 506159982182
medium:
- max_consensus_storage_bytes: 17812499865
- max_file_storage_bytes: 17812499865
- max_reserved_storage_bytes: 5937499955
- max_skaled_leveldb_storage_bytes: 17812499865
+ max_consensus_storage_bytes: 63269997772
+ max_file_storage_bytes: 63269997772
+ max_reserved_storage_bytes: 21089999257
+ max_skaled_leveldb_storage_bytes: 63269997772
small:
- max_consensus_storage_bytes: 4453124966
- max_file_storage_bytes: 4453124966
- max_reserved_storage_bytes: 1484374988
- max_skaled_leveldb_storage_bytes: 4453124966
+ max_consensus_storage_bytes: 3954374860
+ max_file_storage_bytes: 3954374860
+ max_reserved_storage_bytes: 1318124953
+ max_skaled_leveldb_storage_bytes: 3954374860
test:
- max_consensus_storage_bytes: 17812499865
- max_file_storage_bytes: 17812499865
- max_reserved_storage_bytes: 5937499955
- max_skaled_leveldb_storage_bytes: 17812499865
+ max_consensus_storage_bytes: 63269997772
+ max_file_storage_bytes: 63269997772
+ max_reserved_storage_bytes: 21089999257
+ max_skaled_leveldb_storage_bytes: 63269997772
test4:
- max_consensus_storage_bytes: 17812499865
- max_file_storage_bytes: 17812499865
- max_reserved_storage_bytes: 5937499955
- max_skaled_leveldb_storage_bytes: 17812499865
+ max_consensus_storage_bytes: 63269997772
+ max_file_storage_bytes: 63269997772
+ max_reserved_storage_bytes: 21089999257
+ max_skaled_leveldb_storage_bytes: 63269997772
qanet:
disk:
- large: 189999939584
- medium: 5937498112
- small: 1484374528
- test: 5937498112
- test4: 5937498112
- leveldb:
+ large: 177599938560
+ medium: 22199992320
+ small: 1387499520
+ test: 22199992320
+ test4: 22199992320
+ leveldb_limits:
large:
- evm_storage_part: 34199989125
- logs_storage_part: 22799992750
+ contract_storage: 31967988940
+ db_storage: 21311992627
medium:
- evm_storage_part: 1068749659
- logs_storage_part: 712499773
+ contract_storage: 3995998617
+ db_storage: 2663999078
small:
- evm_storage_part: 267187414
- logs_storage_part: 178124943
+ contract_storage: 249749913
+ db_storage: 166499942
test:
- evm_storage_part: 1068749659
- logs_storage_part: 712499773
+ contract_storage: 3995998617
+ db_storage: 2663999078
test4:
- evm_storage_part: 1068749659
- logs_storage_part: 712499773
- volume:
+ contract_storage: 3995998617
+ db_storage: 2663999078
+ rotate_after_block:
+ large: 3276803
+ medium: 409600
+ small: 25600
+ test: 409600
+ test4: 409600
+ shared_space: 22399942656
+ volume_limits:
large:
- max_consensus_storage_bytes: 56999981875
- max_file_storage_bytes: 56999981875
- max_reserved_storage_bytes: 18999993958
- max_skaled_leveldb_storage_bytes: 56999981875
+ max_consensus_storage_bytes: 53279981568
+ max_file_storage_bytes: 53279981568
+ max_reserved_storage_bytes: 17759993856
+ max_skaled_leveldb_storage_bytes: 53279981568
medium:
- max_consensus_storage_bytes: 1781249433
- max_file_storage_bytes: 1781249433
- max_reserved_storage_bytes: 593749811
- max_skaled_leveldb_storage_bytes: 1781249433
+ max_consensus_storage_bytes: 6659997696
+ max_file_storage_bytes: 6659997696
+ max_reserved_storage_bytes: 2219999232
+ max_skaled_leveldb_storage_bytes: 6659997696
small:
- max_consensus_storage_bytes: 445312358
- max_file_storage_bytes: 445312358
- max_reserved_storage_bytes: 148437452
- max_skaled_leveldb_storage_bytes: 445312358
+ max_consensus_storage_bytes: 416249856
+ max_file_storage_bytes: 416249856
+ max_reserved_storage_bytes: 138749952
+ max_skaled_leveldb_storage_bytes: 416249856
test:
- max_consensus_storage_bytes: 1781249433
- max_file_storage_bytes: 1781249433
- max_reserved_storage_bytes: 593749811
- max_skaled_leveldb_storage_bytes: 1781249433
+ max_consensus_storage_bytes: 6659997696
+ max_file_storage_bytes: 6659997696
+ max_reserved_storage_bytes: 2219999232
+ max_skaled_leveldb_storage_bytes: 6659997696
test4:
- max_consensus_storage_bytes: 1781249433
- max_file_storage_bytes: 1781249433
- max_reserved_storage_bytes: 593749811
- max_skaled_leveldb_storage_bytes: 1781249433
+ max_consensus_storage_bytes: 6659997696
+ max_file_storage_bytes: 6659997696
+ max_reserved_storage_bytes: 2219999232
+ max_skaled_leveldb_storage_bytes: 6659997696
testnet:
disk:
- large: 189999939584
- medium: 5937498112
- small: 1484374528
- test: 5937498112
- test4: 5937498112
- leveldb:
+ large: 177599938560
+ medium: 22199992320
+ small: 1387499520
+ test: 22199992320
+ test4: 22199992320
+ leveldb_limits:
large:
- evm_storage_part: 34199989125
- logs_storage_part: 22799992750
+ contract_storage: 31967988940
+ db_storage: 21311992627
medium:
- evm_storage_part: 1068749659
- logs_storage_part: 712499773
+ contract_storage: 3995998617
+ db_storage: 2663999078
small:
- evm_storage_part: 267187414
- logs_storage_part: 178124943
+ contract_storage: 249749913
+ db_storage: 166499942
test:
- evm_storage_part: 1068749659
- logs_storage_part: 712499773
+ contract_storage: 3995998617
+ db_storage: 2663999078
test4:
- evm_storage_part: 1068749659
- logs_storage_part: 712499773
- volume:
+ contract_storage: 3995998617
+ db_storage: 2663999078
+ rotate_after_block:
+ large: 3276803
+ medium: 409600
+ small: 25600
+ test: 409600
+ test4: 409600
+ shared_space: 22399942656
+ volume_limits:
large:
- max_consensus_storage_bytes: 56999981875
- max_file_storage_bytes: 56999981875
- max_reserved_storage_bytes: 18999993958
- max_skaled_leveldb_storage_bytes: 56999981875
+ max_consensus_storage_bytes: 53279981568
+ max_file_storage_bytes: 53279981568
+ max_reserved_storage_bytes: 17759993856
+ max_skaled_leveldb_storage_bytes: 53279981568
medium:
- max_consensus_storage_bytes: 1781249433
- max_file_storage_bytes: 1781249433
- max_reserved_storage_bytes: 593749811
- max_skaled_leveldb_storage_bytes: 1781249433
+ max_consensus_storage_bytes: 6659997696
+ max_file_storage_bytes: 6659997696
+ max_reserved_storage_bytes: 2219999232
+ max_skaled_leveldb_storage_bytes: 6659997696
small:
- max_consensus_storage_bytes: 445312358
- max_file_storage_bytes: 445312358
- max_reserved_storage_bytes: 148437452
- max_skaled_leveldb_storage_bytes: 445312358
+ max_consensus_storage_bytes: 416249856
+ max_file_storage_bytes: 416249856
+ max_reserved_storage_bytes: 138749952
+ max_skaled_leveldb_storage_bytes: 416249856
test:
- max_consensus_storage_bytes: 1781249433
- max_file_storage_bytes: 1781249433
- max_reserved_storage_bytes: 593749811
- max_skaled_leveldb_storage_bytes: 1781249433
+ max_consensus_storage_bytes: 6659997696
+ max_file_storage_bytes: 6659997696
+ max_reserved_storage_bytes: 2219999232
+ max_skaled_leveldb_storage_bytes: 6659997696
test4:
- max_consensus_storage_bytes: 1781249433
- max_file_storage_bytes: 1781249433
- max_reserved_storage_bytes: 593749811
- max_skaled_leveldb_storage_bytes: 1781249433
+ max_consensus_storage_bytes: 6659997696
+ max_file_storage_bytes: 6659997696
+ max_reserved_storage_bytes: 2219999232
+ max_skaled_leveldb_storage_bytes: 6659997696
diff --git a/tests/.skale/node_data/log/.removed_containers/.keep b/tests/.skale/node_data/log/.removed_containers/.keep
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/.skale/node_data/meta.json b/tests/.skale/node_data/meta.json
deleted file mode 100644
index 93ebc927..00000000
--- a/tests/.skale/node_data/meta.json
+++ /dev/null
@@ -1 +0,0 @@
-{"version": "0.0.0", "config_stream": "master", "docker_lvmpy_stream": "master"}
diff --git a/tests/cli/containers_test.py b/tests/cli/containers_test.py
deleted file mode 100644
index ce056bd3..00000000
--- a/tests/cli/containers_test.py
+++ /dev/null
@@ -1,139 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# This file is part of node-cli
-#
-# Copyright (C) 2019 SKALE Labs
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Affero General Public License for more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with this program. If not, see .
-
-import requests
-
-from tests.helper import response_mock, run_command_mock
-from cli.containers import ls, schains
-
-
-OK_RESPONSE_DATA = {
- 'status': 'ok',
- 'payload': [{
- 'image': 'image-skale', 'name': 'skale_schain_test',
- 'state': {
- 'Status': 'running', 'Running': True, 'Paused': False,
- 'Restarting': False, 'OOMKilled': False, 'Dead': False,
- 'Pid': 123, 'ExitCode': 0, 'Error': '',
- 'StartedAt': '2019-10-08T13:59:54.52368097Z',
- 'FinishedAt': '0001-01-01T00:00:00Z'
- }
- }, {
- 'image': 'image-skale', 'name': 'skale_schain_test2',
- 'state': {
- 'Status': 'running', 'Running': False, 'Paused': True,
- 'Restarting': False, 'OOMKilled': False, 'Dead': False,
- 'Pid': 124, 'ExitCode': 0, 'Error': '',
- 'StartedAt': '2019-10-08T13:59:54.52368099Z',
- 'FinishedAt': '0001-01-01T00:00:00Z'
- }
- }
- ]}
-
-
-OK_LS_RESPONSE_DATA = {
- 'status': 'ok',
- 'payload':
- {'containers': [{'image': 'skalenetwork/schain:1.46-develop.21',
- 'name': 'skale_schain_shapely-alfecca-meridiana',
- 'state': {
- 'Status': 'running', 'Running': True,
- 'Paused': False, 'Restarting': False,
- 'OOMKilled': False, 'Dead': False,
- 'Pid': 232, 'ExitCode': 0,
- 'Error': '',
- 'StartedAt': '2020-07-31T11:56:35.732888232Z',
- 'FinishedAt': '0001-01-01T00:00:00Z'}
- },
- {'image': 'skale-admin:latest', 'name': 'skale_api',
- 'state': {
- 'Status': 'running',
- 'Running': True, 'Paused': False,
- 'Restarting': False, 'OOMKilled': False,
- 'Dead': False, 'Pid': 6710, 'ExitCode': 0,
- 'Error': '',
- 'StartedAt': '2020-07-31T11:55:17.28700307Z',
- 'FinishedAt': '0001-01-01T00:00:00Z'}}]
- }}
-
-
-def test_schains_ok_response(config):
- resp_mock = response_mock(
- requests.codes.ok,
- json_data=OK_RESPONSE_DATA
- )
- result = run_command_mock('core.helper.requests.get',
- resp_mock, schains)
- assert result.exit_code == 0
-
- output_list = result.output.splitlines()
- assert output_list[0] == ' Name Status Started At Image ' # noqa
- assert output_list[1] == '-------------------------------------------------------------------------------------' # noqa
- assert output_list[2] == 'skale_schain_test Running Oct 08 2019 13:59:54 image-skale' # noqa
- assert output_list[3] == 'skale_schain_test2 Running (Jan 01 1 00:00:00) Oct 08 2019 13:59:54 image-skale' # noqa
-
-
-def test_schain_error_response(config):
- resp_mock = response_mock(
- requests.codes.bad_request,
- json_data={'status': 'error', 'payload': 'Operation failed'}
- )
- result = run_command_mock('core.helper.requests.get',
- resp_mock, schains)
- assert result.exit_code == 0
- print(repr(result.output))
- assert result.output == ('Command failed with following errors:\n'
- '-----------------------------------------'
- '---------\nOperation failed\n--------------------'
- '------------------------------\n'
- 'You can find more info in tests/.skale/.skale-cli-log/debug-node-cli.log\n') # noqa
-
-
-def test_schain_empty_response(config):
- resp_mock = response_mock(
- requests.codes.ok,
- {'status': 'ok', 'payload': None}
- )
- result = run_command_mock('core.helper.requests.get',
- resp_mock, schains)
- assert result.exit_code == 1
- assert result.output == ''
-
-
-def test_schain_multi_error_response(config):
- resp_mock = response_mock(
- -1,
- {'payload': ['Error test', 'Error test2'], 'status': 'error'}
- )
- result = run_command_mock('core.helper.requests.get',
- resp_mock, schains)
- assert result.exit_code == 0
- print(repr(result.output))
- assert result.output == 'Command failed with following errors:\n--------------------------------------------------\nError test\nError test2\n--------------------------------------------------\nYou can find more info in tests/.skale/.skale-cli-log/debug-node-cli.log\n' # noqa
-
-
-def test_ls():
- resp_mock = response_mock(
- requests.codes.ok,
- json_data=OK_LS_RESPONSE_DATA
- )
- result = run_command_mock('core.helper.requests.get',
- resp_mock, ls)
- assert result.exit_code == 0
- assert result.output == ' Name Status Started At Image \n-------------------------------------------------------------------------------------------------------------\nskale_schain_shapely-alfecca-meridiana Running Jul 31 2020 11:56:35 skalenetwork/schain:1.46-develop.21\nskale_api Running Jul 31 2020 11:55:17 skale-admin:latest \n' # noqa
diff --git a/tests/cli/exit_test.py b/tests/cli/exit_test.py
index 25ebd9b2..ae6e9530 100644
--- a/tests/cli/exit_test.py
+++ b/tests/cli/exit_test.py
@@ -1,10 +1,10 @@
import requests
-from cli.exit import status
+from node_cli.cli.exit import status
from tests.helper import response_mock, run_command_mock
-def test_exit_status(config):
+def test_exit_status():
payload = {
'status': 'ACTIVE',
'data': [{'name': 'test', 'status': 'ACTIVE'}],
@@ -15,6 +15,7 @@ def test_exit_status(config):
requests.codes.ok,
json_data={'payload': payload, 'status': 'ok'}
)
- result = run_command_mock('core.helper.requests.get', resp_mock, status, ['--format', 'json'])
+ result = run_command_mock(
+ 'node_cli.utils.helper.requests.get', resp_mock, status, ['--format', 'json'])
assert result.exit_code == 0
assert result.output == "{'status': 'ACTIVE', 'data': [{'name': 'test', 'status': 'ACTIVE'}], 'exit_time': 0}\n" # noqa
diff --git a/tests/cli/health_test.py b/tests/cli/health_test.py
new file mode 100644
index 00000000..fbd69e41
--- /dev/null
+++ b/tests/cli/health_test.py
@@ -0,0 +1,103 @@
+import requests
+
+from tests.helper import response_mock, run_command_mock
+from node_cli.cli.health import containers, schains, sgx
+
+
+OK_LS_RESPONSE_DATA = {
+ 'status': 'ok',
+ 'payload':
+ [
+ {
+ 'image': 'skalenetwork/schain:1.46-develop.21',
+ 'name': 'skale_schain_shapely-alfecca-meridiana',
+ 'state': {
+ 'Status': 'running', 'Running': True,
+ 'Paused': False, 'Restarting': False,
+ 'OOMKilled': False, 'Dead': False,
+ 'Pid': 232, 'ExitCode': 0,
+ 'Error': '',
+ 'StartedAt': '2020-07-31T11:56:35.732888232Z',
+ 'FinishedAt': '0001-01-01T00:00:00Z'
+ }
+ },
+ {
+ 'image': 'skale-admin:latest', 'name': 'skale_api',
+ 'state': {
+ 'Status': 'running',
+ 'Running': True, 'Paused': False,
+ 'Restarting': False, 'OOMKilled': False,
+ 'Dead': False, 'Pid': 6710, 'ExitCode': 0,
+ 'Error': '',
+ 'StartedAt': '2020-07-31T11:55:17.28700307Z',
+ 'FinishedAt': '0001-01-01T00:00:00Z'
+ }
+ }
+ ]
+}
+
+
+def test_containers():
+ resp_mock = response_mock(
+ requests.codes.ok,
+ json_data=OK_LS_RESPONSE_DATA
+ )
+ result = run_command_mock('node_cli.utils.helper.requests.get',
+ resp_mock, containers)
+ assert result.exit_code == 0
+ assert result.output == ' Name Status Started At Image \n-------------------------------------------------------------------------------------------------------------\nskale_schain_shapely-alfecca-meridiana Running Jul 31 2020 11:56:35 skalenetwork/schain:1.46-develop.21\nskale_api Running Jul 31 2020 11:55:17 skale-admin:latest \n' # noqa
+
+
+def test_checks():
+ payload = [
+ {
+ "name": "test_schain",
+ "healthchecks": {
+ "data_dir": True,
+ "dkg": False,
+ "config": False,
+ "volume": False,
+ "container": False,
+ "ima_container": False,
+ "firewall_rules": False,
+ "rpc": False,
+ "blocks": False
+ }
+ }
+ ]
+ resp_mock = response_mock(
+ requests.codes.ok,
+ json_data={'payload': payload, 'status': 'ok'}
+ )
+ result = run_command_mock('node_cli.utils.helper.requests.get',
+ resp_mock, schains)
+
+ print(result)
+ print(result.output)
+
+ assert result.exit_code == 0
+ assert result.output == 'sChain Name Data directory DKG Config file Volume Container IMA Firewall RPC Blocks\n-----------------------------------------------------------------------------------------------------------\ntest_schain True False False False False False False False False \n' # noqa
+
+ result = run_command_mock('node_cli.utils.helper.requests.get',
+ resp_mock, schains, ['--json'])
+
+ assert result.exit_code == 0
+ assert result.output == '[{"name": "test_schain", "healthchecks": {"data_dir": true, "dkg": false, "config": false, "volume": false, "container": false, "ima_container": false, "firewall_rules": false, "rpc": false, "blocks": false}}]\n' # noqa
+
+
+def test_sgx_status():
+ payload = {
+ 'sgx_server_url': 'https://127.0.0.1:1026',
+ 'sgx_wallet_version': '1.50.1-stable.0',
+ 'sgx_keyname': 'test_keyname',
+ 'status_name': 'CONNECTED'
+ }
+ resp_mock = response_mock(
+ requests.codes.ok,
+ json_data={'payload': payload, 'status': 'ok'}
+ )
+ result = run_command_mock(
+ 'node_cli.utils.helper.requests.get', resp_mock, sgx)
+
+ assert result.exit_code == 0
+ assert result.output == '\x1b(0lqqqqqqqqqqqqqqqqqqqwqqqqqqqqqqqqqqqqqqqqqqqqk\x1b(B\n\x1b(0x\x1b(B SGX info \x1b(0x\x1b(B \x1b(0x\x1b(B\n\x1b(0tqqqqqqqqqqqqqqqqqqqnqqqqqqqqqqqqqqqqqqqqqqqqu\x1b(B\n\x1b(0x\x1b(B Server URL \x1b(0x\x1b(B https://127.0.0.1:1026 \x1b(0x\x1b(B\n\x1b(0x\x1b(B SGXWallet Version \x1b(0x\x1b(B 1.50.1-stable.0 \x1b(0x\x1b(B\n\x1b(0x\x1b(B Node SGX keyname \x1b(0x\x1b(B test_keyname \x1b(0x\x1b(B\n\x1b(0x\x1b(B Status \x1b(0x\x1b(B CONNECTED \x1b(0x\x1b(B\n\x1b(0mqqqqqqqqqqqqqqqqqqqvqqqqqqqqqqqqqqqqqqqqqqqqj\x1b(B\n' # noqa
diff --git a/tests/cli/logs_test.py b/tests/cli/logs_test.py
index 885263c8..3ba87431 100644
--- a/tests/cli/logs_test.py
+++ b/tests/cli/logs_test.py
@@ -17,31 +17,17 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
+import freezegun
-import os
+from node_cli.cli.logs import dump
+from node_cli.configs import G_CONF_HOME
-import mock
-import requests
+from tests.helper import run_command
+from tests.core.core_logs_test import backup_func, CURRENT_DATETIME, TEST_ARCHIVE_PATH # noqa
-from io import BytesIO
-from tests.helper import response_mock, run_command
-from cli.logs import dump
-
-def test_dump(config):
- archive_filename = 'skale-logs-dump-2019-10-08-17:40:00.tar.gz'
- resp_mock = response_mock(
- requests.codes.ok,
- headers={
- 'Content-Disposition': f'attachment; filename="{archive_filename}"'
- },
- raw=BytesIO()
- )
- with mock.patch('requests.get') as req_get_mock:
- req_get_mock.return_value.__enter__.return_value = resp_mock
- result = run_command(dump, ['.'])
- assert result.exit_code == 0
- assert result.output == f'File {archive_filename} downloaded\n'
-
- if os.path.exists(archive_filename):
- os.remove(archive_filename)
+@freezegun.freeze_time(CURRENT_DATETIME)
+def test_dump(backup_func): # noqa
+ result = run_command(dump, [G_CONF_HOME])
+ assert result.exit_code == 0
+ assert result.output == f'Logs dump created: {TEST_ARCHIVE_PATH}\n'
diff --git a/tests/cli/main_test.py b/tests/cli/main_test.py
index 6643cb4a..2703f0b5 100644
--- a/tests/cli/main_test.py
+++ b/tests/cli/main_test.py
@@ -18,12 +18,12 @@
# along with this program. If not, see .
-from cli import info
-from main import version
+from node_cli.cli import info
+from node_cli.main import version
from tests.helper import run_command
-def test_version(config):
+def test_version():
result = run_command(version, [])
expected = f'SKALE Node CLI version: {info.VERSION}\n'
assert result.output == expected
diff --git a/tests/cli/node_test.py b/tests/cli/node_test.py
index c4f2c183..8cf08f70 100644
--- a/tests/cli/node_test.py
+++ b/tests/cli/node_test.py
@@ -22,14 +22,14 @@
import mock
import requests
+import logging
-from configs import NODE_DATA_PATH, SKALE_DIR
-from core.resources import ResourceAlloc
-from cli.node import (init_node,
- node_about, node_info, register_node, signature,
- update_node, backup_node, restore_node,
- set_node_in_maintenance,
- remove_node_from_maintenance, _turn_off, _turn_on, _set_domain_name)
+from node_cli.configs import NODE_DATA_PATH, SKALE_DIR, G_CONF_HOME
+from node_cli.cli.node import (init_node, node_info, register_node, signature,
+ update_node, backup_node, restore_node,
+ set_node_in_maintenance,
+ remove_node_from_maintenance, _turn_off, _turn_on, _set_domain_name)
+from node_cli.utils.helper import init_default_logger
from tests.helper import (
response_mock, run_command_mock,
@@ -37,187 +37,151 @@
)
from tests.resources_test import BIG_DISK_SIZE
+logger = logging.getLogger(__name__)
+init_default_logger()
-def disk_alloc_mock(env_type):
- return ResourceAlloc(128)
-
-def test_register_node(resource_alloc, config):
+def test_register_node(resource_alloc, mocked_g_config):
resp_mock = response_mock(
requests.codes.ok,
{'status': 'ok', 'payload': None}
)
- result = run_command_mock(
- 'core.helper.requests.post',
- resp_mock,
- register_node,
- ['--name', 'test-node', '--ip', '0.0.0.0', '--port', '8080', '-d', 'skale.test'])
+ with mock.patch('node_cli.utils.decorators.is_node_inited', return_value=True):
+ result = run_command_mock(
+ 'node_cli.utils.helper.requests.post',
+ resp_mock,
+ register_node,
+ ['--name', 'test-node', '--ip', '0.0.0.0', '--port', '8080', '-d', 'skale.test'])
assert result.exit_code == 0
assert result.output == 'Node registered in SKALE manager.\nFor more info run < skale node info >\n' # noqa
-def test_register_node_with_error(resource_alloc, config):
+def test_register_node_with_error(resource_alloc, mocked_g_config):
resp_mock = response_mock(
requests.codes.ok,
{'status': 'error', 'payload': ['Strange error']},
)
- result = run_command_mock(
- 'core.helper.requests.post',
- resp_mock,
- register_node,
- ['--name', 'test-node2', '--ip', '0.0.0.0', '--port', '80', '-d', 'skale.test'])
- assert result.exit_code == 0
- assert result.output == 'Command failed with following errors:\n--------------------------------------------------\nStrange error\n--------------------------------------------------\nYou can find more info in tests/.skale/.skale-cli-log/debug-node-cli.log\n' # noqa
+ with mock.patch('node_cli.utils.decorators.is_node_inited', return_value=True):
+ result = run_command_mock(
+ 'node_cli.utils.helper.requests.post',
+ resp_mock,
+ register_node,
+ ['--name', 'test-node2', '--ip', '0.0.0.0', '--port', '80', '-d', 'skale.test'])
+ assert result.exit_code == 3
+ assert result.output == f'Command failed with following errors:\n--------------------------------------------------\nStrange error\n--------------------------------------------------\nYou can find more info in {G_CONF_HOME}.skale/.skale-cli-log/debug-node-cli.log\n' # noqa
-def test_register_node_with_prompted_ip(resource_alloc, config):
+def test_register_node_with_prompted_ip(resource_alloc, mocked_g_config):
resp_mock = response_mock(
requests.codes.ok,
{'status': 'ok', 'payload': None}
)
- result = run_command_mock(
- 'core.helper.requests.post',
- resp_mock,
- register_node,
- ['--name', 'test-node', '--port', '8080', '-d', 'skale.test'], input='0.0.0.0\n')
+ with mock.patch('node_cli.utils.decorators.is_node_inited', return_value=True):
+ result = run_command_mock(
+ 'node_cli.utils.helper.requests.post',
+ resp_mock,
+ register_node,
+ ['--name', 'test-node', '--port', '8080', '-d', 'skale.test'], input='0.0.0.0\n')
assert result.exit_code == 0
assert result.output == 'Enter node public IP: 0.0.0.0\nNode registered in SKALE manager.\nFor more info run < skale node info >\n' # noqa
-def test_register_node_with_default_port(resource_alloc, config):
+def test_register_node_with_default_port(resource_alloc, mocked_g_config):
resp_mock = response_mock(
requests.codes.ok,
{'status': 'ok', 'payload': None}
)
- result = run_command_mock(
- 'core.helper.requests.post',
- resp_mock,
- register_node,
- ['--name', 'test-node', '-d', 'skale.test'], input='0.0.0.0\n')
+ with mock.patch('node_cli.utils.decorators.is_node_inited', return_value=True):
+ result = run_command_mock(
+ 'node_cli.utils.helper.requests.post',
+ resp_mock,
+ register_node,
+ ['--name', 'test-node', '-d', 'skale.test'], input='0.0.0.0\n')
assert result.exit_code == 0
assert result.output == 'Enter node public IP: 0.0.0.0\nNode registered in SKALE manager.\nFor more info run < skale node info >\n' # noqa
-def test_register_with_no_alloc(config):
+def test_register_with_no_alloc(mocked_g_config):
resp_mock = response_mock(
requests.codes.ok,
{'status': 'ok', 'payload': None}
)
result = run_command_mock(
- 'core.helper.requests.post',
+ 'node_cli.utils.helper.requests.post',
resp_mock,
register_node,
['--name', 'test-node', '-d', 'skale.test'], input='0.0.0.0\n')
- assert result.exit_code == 0
+ assert result.exit_code == 8
print(repr(result.output))
- assert result.output == "Enter node public IP: 0.0.0.0\nNode hasn't been inited before.\nYou should run < skale node init >\n" # noqa
+ assert result.output == f'Enter node public IP: 0.0.0.0\nCommand failed with following errors:\n--------------------------------------------------\nNode hasn\'t been inited before.\nYou should run < skale node init >\n--------------------------------------------------\nYou can find more info in {G_CONF_HOME}.skale/.skale-cli-log/debug-node-cli.log\n' # noqa
-def test_init_node(config):
+def test_init_node(caplog): # todo: write new init node test
resp_mock = response_mock(requests.codes.created)
- with mock.patch('subprocess.run', new=subprocess_run_mock), \
- mock.patch('core.resources.get_disk_size', return_value=BIG_DISK_SIZE), \
- mock.patch('core.node.prepare_host'), \
- mock.patch('core.host.init_data_dir'), \
- mock.patch('core.node.is_base_containers_alive',
- return_value=True), \
- mock.patch('core.node.is_node_inited', return_value=False):
- result = run_command_mock(
- 'core.helper.post_request',
- resp_mock,
- init_node,
- ['./tests/test-env'])
- assert result.output == 'Waiting for transaction manager initialization ...\nInit procedure finished\n' # noqa
- assert result.exit_code == 0
-
-
-# def test_purge(config):
-# params = ['--yes']
-# resp_mock = response_mock(requests.codes.created)
-# with mock.patch('core.node.subprocess.run'):
-# result = run_command_mock(
-# 'core.node.post',
-# resp_mock,
-# purge_node,
-# params
-# )
-# assert result.exit_code == 0
-# assert result.output == '' # noqa
-
-
-def test_update_node(config):
+ with caplog.at_level(logging.INFO):
+ with mock.patch('subprocess.run', new=subprocess_run_mock), \
+ mock.patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE), \
+ mock.patch('node_cli.core.host.prepare_host'), \
+ mock.patch('node_cli.core.host.init_data_dir'), \
+ mock.patch('node_cli.core.node.init_op'), \
+ mock.patch('node_cli.core.node.is_base_containers_alive',
+ return_value=True), \
+ mock.patch('node_cli.utils.decorators.is_node_inited', return_value=False):
+ result = run_command_mock(
+ 'node_cli.utils.helper.post_request',
+ resp_mock,
+ init_node,
+ ['./tests/test-env'])
+ assert 'Init procedure finished' in caplog.text
+ assert result.exit_code == 0
+
+
+def test_update_node(mocked_g_config):
os.makedirs(NODE_DATA_PATH, exist_ok=True)
params = ['./tests/test-env', '--yes']
resp_mock = response_mock(requests.codes.created)
with mock.patch('subprocess.run', new=subprocess_run_mock), \
- mock.patch('core.node.update_op'), \
- mock.patch('core.node.get_flask_secret_key'), \
- mock.patch('core.node.save_env_params'), \
- mock.patch('core.node.prepare_host'), \
- mock.patch('core.node.is_base_containers_alive',
+ mock.patch('node_cli.core.node.update_op'), \
+ mock.patch('node_cli.core.node.get_flask_secret_key'), \
+ mock.patch('node_cli.core.node.save_env_params'), \
+ mock.patch('node_cli.core.node.configure_firewall_rules'), \
+ mock.patch('node_cli.core.host.prepare_host'), \
+ mock.patch('node_cli.core.node.is_base_containers_alive',
return_value=True), \
- mock.patch('core.resources.get_disk_size', return_value=BIG_DISK_SIZE), \
- mock.patch('core.host.init_data_dir'):
+ mock.patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE), \
+ mock.patch('node_cli.core.host.init_data_dir'):
result = run_command_mock(
- 'core.helper.post_request',
+ 'node_cli.utils.helper.post_request',
resp_mock,
update_node,
params,
input='/dev/sdp')
assert result.exit_code == 0
- # assert result.output == 'Updating the node...\nWaiting for transaction manager initialization ...\nUpdate procedure finished\n' # noqa
+ # assert result.output == 'Updating the node...\nWaiting for containers initialization ...\nUpdate procedure finished\n' # noqa
-def test_update_node_without_init(config):
+def test_update_node_without_init():
params = ['./tests/test-env', '--yes']
resp_mock = response_mock(requests.codes.created)
with mock.patch('subprocess.run', new=subprocess_run_mock), \
- mock.patch('core.node.get_flask_secret_key'), \
- mock.patch('core.node.save_env_params'), \
- mock.patch('core.node.prepare_host'), \
- mock.patch('core.host.init_data_dir'), \
- mock.patch('core.node.is_base_containers_alive',
+ mock.patch('node_cli.core.node.get_flask_secret_key'), \
+ mock.patch('node_cli.core.node.save_env_params'), \
+ mock.patch('node_cli.core.host.prepare_host'), \
+ mock.patch('node_cli.core.host.init_data_dir'), \
+ mock.patch('node_cli.core.node.is_base_containers_alive',
return_value=True), \
- mock.patch('core.node.is_node_inited', return_value=False):
+ mock.patch('node_cli.utils.decorators.is_node_inited', return_value=False):
result = run_command_mock(
- 'core.helper.post_request',
+ 'node_cli.utils.helper.post_request',
resp_mock,
update_node,
params,
input='/dev/sdp')
- assert result.exit_code == 0
- assert result.output == "Node hasn't been inited before.\nYou should run < skale node init >\n" # noqa
+ assert result.exit_code == 8
+ assert result.output == f'Command failed with following errors:\n--------------------------------------------------\nNode hasn\'t been inited before.\nYou should run < skale node init >\n--------------------------------------------------\nYou can find more info in {G_CONF_HOME}.skale/.skale-cli-log/debug-node-cli.log\n' # noqa
-def test_node_info_node_about(config):
- payload = {
- 'libraries': {
- 'javascript': 'N/A', 'python': '0.89.0'},
- 'contracts': {
- 'token': '0x3',
- 'manager': '0x23'
- },
- 'network': {
- 'endpoint': 'ws://0.0.0.0:8080'
- },
- 'local_wallet': {
- 'address': '0xf',
- 'eth_balance_wei': '15',
- 'skale_balance_wei': '84312304',
- 'eth_balance': '2.424',
- 'skale_balance': '323.123'
- }
- }
- resp_mock = response_mock(
- requests.codes.ok,
- {'status': 'ok', 'payload': payload}
- )
- result = run_command_mock('core.helper.requests.get', resp_mock, node_about)
- assert result.exit_code == 0
- assert result.output == "{'libraries': {'javascript': 'N/A', 'python': '0.89.0'}, 'contracts': {'token': '0x3', 'manager': '0x23'}, 'network': {'endpoint': 'ws://0.0.0.0:8080'}, 'local_wallet': {'address': '0xf', 'eth_balance_wei': '15', 'skale_balance_wei': '84312304', 'eth_balance': '2.424', 'skale_balance': '323.123'}}\n" # noqa
-
-
-def test_node_info_node_info(config):
+def test_node_info_node_info():
payload = {
'node_info': {
'name': 'test', 'ip': '0.0.0.0',
@@ -236,12 +200,12 @@ def test_node_info_node_info(config):
requests.codes.ok,
json_data={'payload': payload, 'status': 'ok'}
)
- result = run_command_mock('core.helper.requests.get', resp_mock, node_info)
+ result = run_command_mock('node_cli.utils.helper.requests.get', resp_mock, node_info)
assert result.exit_code == 0
assert result.output == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: Active\n--------------------------------------------------\n' # noqa
-def test_node_info_node_info_not_created(config):
+def test_node_info_node_info_not_created():
payload = {
'node_info': {
'name': 'test', 'ip': '0.0.0.0',
@@ -260,12 +224,12 @@ def test_node_info_node_info_not_created(config):
requests.codes.ok,
json_data={'payload': payload, 'status': 'ok'}
)
- result = run_command_mock('core.helper.requests.get', resp_mock, node_info)
+ result = run_command_mock('node_cli.utils.helper.requests.get', resp_mock, node_info)
assert result.exit_code == 0
assert result.output == 'This SKALE node is not registered on SKALE Manager yet\n'
-def test_node_info_node_info_frozen(config):
+def test_node_info_node_info_frozen():
payload = {
'node_info': {
'name': 'test', 'ip': '0.0.0.0',
@@ -284,12 +248,12 @@ def test_node_info_node_info_frozen(config):
requests.codes.ok,
json_data={'payload': payload, 'status': 'ok'}
)
- result = run_command_mock('core.helper.requests.get', resp_mock, node_info)
+ result = run_command_mock('node_cli.utils.helper.requests.get', resp_mock, node_info)
assert result.exit_code == 0
assert result.output == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: Frozen\n--------------------------------------------------\n' # noqa
-def test_node_info_node_info_left(config):
+def test_node_info_node_info_left():
payload = {
'node_info': {
'name': 'test', 'ip': '0.0.0.0',
@@ -308,12 +272,12 @@ def test_node_info_node_info_left(config):
requests.codes.ok,
json_data={'payload': payload, 'status': 'ok'}
)
- result = run_command_mock('core.helper.requests.get', resp_mock, node_info)
+ result = run_command_mock('node_cli.utils.helper.requests.get', resp_mock, node_info)
assert result.exit_code == 0
assert result.output == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: Left\n--------------------------------------------------\n' # noqa
-def test_node_info_node_info_leaving(config):
+def test_node_info_node_info_leaving():
payload = {
'node_info': {
'name': 'test', 'ip': '0.0.0.0',
@@ -332,12 +296,12 @@ def test_node_info_node_info_leaving(config):
requests.codes.ok,
json_data={'payload': payload, 'status': 'ok'}
)
- result = run_command_mock('core.helper.requests.get', resp_mock, node_info)
+ result = run_command_mock('node_cli.utils.helper.requests.get', resp_mock, node_info)
assert result.exit_code == 0
assert result.output == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: Leaving\n--------------------------------------------------\n' # noqa
-def test_node_info_node_info_in_maintenance(config):
+def test_node_info_node_info_in_maintenance():
payload = {
'node_info': {
'name': 'test', 'ip': '0.0.0.0',
@@ -356,7 +320,7 @@ def test_node_info_node_info_in_maintenance(config):
requests.codes.ok,
json_data={'payload': payload, 'status': 'ok'}
)
- result = run_command_mock('core.helper.requests.get', resp_mock, node_info)
+ result = run_command_mock('node_cli.utils.helper.requests.get', resp_mock, node_info)
assert result.exit_code == 0
assert result.output == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: In Maintenance\n--------------------------------------------------\n' # noqa
@@ -368,7 +332,7 @@ def test_node_signature():
'payload': {'signature': signature_sample}
}
resp_mock = response_mock(requests.codes.ok, json_data=response_data)
- result = run_command_mock('core.helper.requests.get',
+ result = run_command_mock('node_cli.utils.helper.requests.get',
resp_mock, signature, ['1'])
assert result.exit_code == 0
assert result.output == f'Signature: {signature_sample}\n'
@@ -376,19 +340,16 @@ def test_node_signature():
def test_backup():
Path(SKALE_DIR).mkdir(parents=True, exist_ok=True)
- with mock.patch('core.mysql_backup.run_mysql_cmd'):
- result = run_command(
- backup_node,
- [
- '/tmp',
- './tests/test-env'
- ]
- )
- assert result.exit_code == 0
- assert 'Backup archive successfully created: /tmp/skale-node-backup-' in result.output
+ result = run_command(
+ backup_node,
+ ['/tmp']
+ )
+ assert result.exit_code == 0
+ print(result.output)
+ assert 'Backup archive succesfully created ' in result.output
-def test_restore():
+def test_restore(mocked_g_config):
Path(SKALE_DIR).mkdir(parents=True, exist_ok=True)
result = run_command(
backup_node,
@@ -397,7 +358,9 @@ def test_restore():
backup_path = result.output.replace(
'Backup archive successfully created: ', '').replace('\n', '')
with mock.patch('subprocess.run', new=subprocess_run_mock), \
- mock.patch('core.resources.get_disk_size', return_value=BIG_DISK_SIZE):
+ mock.patch('node_cli.core.node.restore_op'), \
+ mock.patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE), \
+ mock.patch('node_cli.utils.decorators.is_node_inited', return_value=False):
result = run_command(
restore_node,
[backup_path, './tests/test-env']
@@ -412,7 +375,7 @@ def test_maintenance_on():
{'status': 'ok', 'payload': None}
)
result = run_command_mock(
- 'core.helper.requests.post',
+ 'node_cli.utils.helper.requests.post',
resp_mock,
set_node_in_maintenance,
['--yes'])
@@ -420,27 +383,29 @@ def test_maintenance_on():
assert result.output == 'Setting maintenance mode on...\nNode is successfully set in maintenance mode\n' # noqa
-def test_maintenance_off():
+def test_maintenance_off(mocked_g_config):
resp_mock = response_mock(
requests.codes.ok,
{'status': 'ok', 'payload': None}
)
result = run_command_mock(
- 'core.helper.requests.post',
+ 'node_cli.utils.helper.requests.post',
resp_mock,
remove_node_from_maintenance)
assert result.exit_code == 0
assert result.output == 'Setting maintenance mode off...\nNode is successfully removed from maintenance mode\n' # noqa
-def test_turn_off_maintenance_on():
+def test_turn_off_maintenance_on(mocked_g_config):
resp_mock = response_mock(
requests.codes.ok,
{'status': 'ok', 'payload': None}
)
- with mock.patch('subprocess.run', new=subprocess_run_mock):
+ with mock.patch('subprocess.run', new=subprocess_run_mock), \
+ mock.patch('node_cli.core.node.turn_off_op'), \
+ mock.patch('node_cli.core.node.is_node_inited', return_value=True):
result = run_command_mock(
- 'core.helper.requests.post',
+ 'node_cli.utils.helper.requests.post',
resp_mock,
_turn_off,
[
@@ -448,18 +413,21 @@ def test_turn_off_maintenance_on():
'--yes'
])
assert result.exit_code == 0
- assert result.output == 'Setting maintenance mode on...\nNode is successfully set in maintenance mode\nTuring off the node...\nNode was successfully turned off\n' # noqa
+ assert result.output == 'Setting maintenance mode on...\nNode is successfully set in maintenance mode\n' # noqa
-def test_turn_on_maintenance_off():
+def test_turn_on_maintenance_off(mocked_g_config):
resp_mock = response_mock(
requests.codes.ok,
{'status': 'ok', 'payload': None}
)
with mock.patch('subprocess.run', new=subprocess_run_mock), \
- mock.patch('core.node.get_flask_secret_key'):
+ mock.patch('node_cli.core.node.get_flask_secret_key'), \
+ mock.patch('node_cli.core.node.turn_on_op'), \
+ mock.patch('node_cli.core.node.is_base_containers_alive'), \
+ mock.patch('node_cli.core.node.is_node_inited', return_value=True):
result = run_command_mock(
- 'core.helper.requests.post',
+ 'node_cli.utils.helper.requests.post',
resp_mock,
_turn_on,
[
@@ -470,7 +438,7 @@ def test_turn_on_maintenance_off():
])
assert result.exit_code == 0
- assert result.output == 'Turning on the node...\nWaiting for transaction manager initialization ...\nNode was successfully turned on\nSetting maintenance mode off...\nNode is successfully removed from maintenance mode\n' # noqa
+ assert result.output == 'Setting maintenance mode off...\nNode is successfully removed from maintenance mode\n' # noqa, tmp fix
def test_set_domain_name():
@@ -478,9 +446,11 @@ def test_set_domain_name():
requests.codes.ok,
{'status': 'ok', 'payload': None}
)
- result = run_command_mock(
- 'core.helper.requests.post',
- resp_mock,
- _set_domain_name, ['-d', 'skale.test', '--yes'])
+
+ with mock.patch('node_cli.utils.decorators.is_node_inited', return_value=True):
+ result = run_command_mock(
+ 'node_cli.utils.helper.requests.post',
+ resp_mock,
+ _set_domain_name, ['-d', 'skale.test', '--yes'])
assert result.exit_code == 0
assert result.output == 'Setting new domain name: skale.test\nDomain name successfully changed\n' # noqa
diff --git a/tests/cli/resources_allocation_test.py b/tests/cli/resources_allocation_test.py
index 4d8b9f7d..ece9625d 100644
--- a/tests/cli/resources_allocation_test.py
+++ b/tests/cli/resources_allocation_test.py
@@ -24,14 +24,14 @@
import pytest
-from core.host import safe_mk_dirs
-from configs.resource_allocation import (
+from node_cli.core.host import safe_mk_dirs
+from node_cli.configs.resource_allocation import (
RESOURCE_ALLOCATION_FILEPATH, NODE_DATA_PATH
)
-from tools.helper import write_json
+from node_cli.utils.helper import write_json
from tests.helper import response_mock, run_command_mock
-from cli.resources_allocation import show, generate
+from node_cli.cli.resources_allocation import show, generate
from tests.resources_test import BIG_DISK_SIZE
@@ -51,12 +51,12 @@ def resource_alloc_config():
os.remove(RESOURCE_ALLOCATION_FILEPATH)
-def test_show(config, resource_alloc_config):
+def test_show(resource_alloc_config):
check_node_dir()
resp_mock = response_mock(requests.codes.created)
write_json(RESOURCE_ALLOCATION_FILEPATH, TEST_CONFIG)
result = run_command_mock(
- 'core.helper.post_request',
+ 'node_cli.utils.helper.post_request',
resp_mock,
show
)
@@ -67,9 +67,9 @@ def test_show(config, resource_alloc_config):
def test_generate():
check_node_dir()
resp_mock = response_mock(requests.codes.created)
- with mock.patch('core.resources.get_disk_size', return_value=BIG_DISK_SIZE):
+ with mock.patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE):
result = run_command_mock(
- 'core.helper.post_request',
+ 'node_cli.utils.helper.post_request',
resp_mock,
generate,
['./tests/test-env', '--yes']
@@ -82,9 +82,9 @@ def test_generate():
def test_generate_already_exists(resource_alloc_config):
check_node_dir()
resp_mock = response_mock(requests.codes.created)
- with mock.patch('core.resources.get_disk_size', return_value=BIG_DISK_SIZE):
+ with mock.patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE):
result = run_command_mock(
- 'core.helper.post_request',
+ 'node_cli.utils.helper.post_request',
resp_mock,
generate,
['./tests/test-env', '--yes']
@@ -93,7 +93,7 @@ def test_generate_already_exists(resource_alloc_config):
assert result.exit_code == 0
result = run_command_mock(
- 'core.helper.post_request',
+ 'node_cli.utils.helper.post_request',
resp_mock,
generate,
['./tests/test-env', '--yes', '--force']
diff --git a/tests/cli/schains_test.py b/tests/cli/schains_test.py
index f03cc80c..5889ba29 100644
--- a/tests/cli/schains_test.py
+++ b/tests/cli/schains_test.py
@@ -22,12 +22,13 @@
import requests
+from node_cli.configs import G_CONF_HOME
from tests.helper import response_mock, run_command_mock
-from cli.schains import (get_schain_config, ls, dkg, checks, show_rules,
- repair, info_)
+from node_cli.cli.schains import (get_schain_config, ls, dkg, show_rules,
+ repair, info_)
-def test_ls(config):
+def test_ls():
os.environ['TZ'] = 'Europe/London'
time.tzset()
payload = [
@@ -49,7 +50,7 @@ def test_ls(config):
requests.codes.ok,
json_data={'payload': payload, 'status': 'ok'}
)
- result = run_command_mock('core.helper.requests.get', resp_mock, ls)
+ result = run_command_mock('node_cli.utils.helper.requests.get', resp_mock, ls)
assert result.exit_code == 0
assert result.output == ' Name Owner Size Lifetime Created At Deposit \n-----------------------------------------------------------------------------------\ntest_schain1 0x123 0 5 Oct 03 2019 16:09:45 1000000000000000000\ncrazy_cats1 0x321 0 5 Oct 07 2019 18:30:10 1000000000000000000\n' # noqa
@@ -70,12 +71,12 @@ def test_dkg():
requests.codes.ok,
json_data={'payload': payload, 'status': 'ok'}
)
- result = run_command_mock('core.helper.requests.get',
+ result = run_command_mock('node_cli.utils.helper.requests.get',
resp_mock, dkg)
assert result.exit_code == 0
assert result.output == ' sChain Name DKG Status Added At sChain Status\n---------------------------------------------------------------------\nmelodic-aldhibah IN_PROGRESS Jan 08 2020 15:26:52 Exists \n' # noqa
- result = run_command_mock('core.helper.requests.get',
+ result = run_command_mock('node_cli.utils.helper.requests.get',
resp_mock, dkg, ['--all'])
assert result.exit_code == 0
assert result.output == ' sChain Name DKG Status Added At sChain Status\n---------------------------------------------------------------------\nmelodic-aldhibah IN_PROGRESS Jan 08 2020 15:26:52 Exists \n' # noqa
@@ -120,7 +121,7 @@ def test_get_schain_config():
requests.codes.ok,
json_data={'payload': payload, 'status': 'ok'}
)
- result = run_command_mock('core.helper.requests.get',
+ result = run_command_mock('node_cli.utils.helper.requests.get',
resp_mock,
get_schain_config, ['test1'])
assert result.exit_code == 0
@@ -145,49 +146,12 @@ def test_schain_rules():
json_data={'payload': payload, 'status': 'ok'}
)
result = run_command_mock(
- 'core.helper.requests.get', resp_mock, show_rules, ['schain-test'])
+ 'node_cli.utils.helper.requests.get', resp_mock, show_rules, ['schain-test'])
assert result.exit_code == 0
print(repr(result.output))
assert result.output == 'Port Ip \n-----------------\n10000 127.0.0.1\n10001 127.0.0.1\n10002 None \n10003 None \n10004 127.0.0.1\n10005 127.0.0.1\n10007 None \n10008 None \n' # noqa
-def test_checks():
- payload = [
- {
- "name": "test_schain",
- "healthchecks": {
- "data_dir": True,
- "dkg": False,
- "config": False,
- "volume": False,
- "container": False,
- "ima_container": False,
- "firewall_rules": False,
- "rpc": False,
- "blocks": False
- }
- }
- ]
- resp_mock = response_mock(
- requests.codes.ok,
- json_data={'payload': payload, 'status': 'ok'}
- )
- result = run_command_mock('core.helper.requests.get',
- resp_mock, checks)
-
- print(result)
- print(result.output)
-
- assert result.exit_code == 0
- assert result.output == 'sChain Name Data directory DKG Config file Volume Container IMA Firewall RPC Blocks\n-----------------------------------------------------------------------------------------------------------\ntest_schain True False False False False False False False False \n' # noqa
-
- result = run_command_mock('core.helper.requests.get',
- resp_mock, checks, ['--json'])
-
- assert result.exit_code == 0
- assert result.output == '[{"name": "test_schain", "healthchecks": {"data_dir": true, "dkg": false, "config": false, "volume": false, "container": false, "ima_container": false, "firewall_rules": false, "rpc": false, "blocks": false}}]\n' # noqa
-
-
def test_repair():
os.environ['TZ'] = 'Europe/London'
time.tzset()
@@ -196,7 +160,7 @@ def test_repair():
requests.codes.ok,
json_data={'payload': payload, 'status': 'ok'}
)
- result = run_command_mock('core.helper.requests.post', resp_mock, repair,
+ result = run_command_mock('node_cli.utils.helper.requests.post', resp_mock, repair,
['test-schain', '--yes'])
assert result.output == 'Schain has been set for repair\n'
assert result.exit_code == 0
@@ -206,11 +170,11 @@ def test_repair():
requests.codes.ok,
json_data={'payload': payload, 'status': 'error'}
)
- result = run_command_mock('core.helper.requests.post', resp_mock, repair,
+ result = run_command_mock('node_cli.utils.helper.requests.post', resp_mock, repair,
['test-schain', '--yes'])
print(repr(result.output))
- assert result.exit_code == 0
- assert result.output == 'Command failed with following errors:\n--------------------------------------------------\nerror\n--------------------------------------------------\nYou can find more info in tests/.skale/.skale-cli-log/debug-node-cli.log\n' # noqa
+ assert result.exit_code == 3
+ assert result.output == f'Command failed with following errors:\n--------------------------------------------------\nerror\n--------------------------------------------------\nYou can find more info in {G_CONF_HOME}.skale/.skale-cli-log/debug-node-cli.log\n' # noqa
def test_info():
@@ -225,7 +189,7 @@ def test_info():
requests.codes.ok,
json_data={'payload': payload, 'status': 'ok'}
)
- result = run_command_mock('core.helper.requests.get', resp_mock, info_,
+ result = run_command_mock('node_cli.utils.helper.requests.get', resp_mock, info_,
['attractive-ed-asich'])
assert result.output == ' Name Id Owner Part_of_node Dkg_status Is_deleted First_run Repair_mode\n--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\nattractive-ed-asich 0xfb3b68013fa494407b691b4b603d84c66076c0a5ac96a7d6b162d7341d74fa61 0x1111111111111111111111111111111111111111 0 3 False False False \n' # noqa
assert result.exit_code == 0
@@ -235,7 +199,7 @@ def test_info():
requests.codes.ok,
json_data={'payload': payload, 'status': 'error'}
)
- result = run_command_mock('core.helper.requests.get', resp_mock, info_,
+ result = run_command_mock('node_cli.utils.helper.requests.get', resp_mock, info_,
['schain not found'])
- assert result.output == 'Command failed with following errors:\n--------------------------------------------------\nerror\n--------------------------------------------------\nYou can find more info in tests/.skale/.skale-cli-log/debug-node-cli.log\n' # noqa
- assert result.exit_code == 0
+ assert result.output == f'Command failed with following errors:\n--------------------------------------------------\nerror\n--------------------------------------------------\nYou can find more info in {G_CONF_HOME}.skale/.skale-cli-log/debug-node-cli.log\n' # noqa
+ assert result.exit_code == 3
diff --git a/tests/cli/sgx_test.py b/tests/cli/sgx_test.py
deleted file mode 100644
index 06050bd3..00000000
--- a/tests/cli/sgx_test.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# This file is part of node-cli
-#
-# Copyright (C) 2019 SKALE Labs
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Affero General Public License for more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with this program. If not, see .
-
-import requests
-
-from tests.helper import response_mock, run_command_mock
-from cli.sgx import info
-
-
-def test_sgx_status():
- payload = {
- 'sgx_server_url': 'https://127.0.0.1:1026',
- 'sgx_wallet_version': '1.50.1-stable.0',
- 'sgx_keyname': 'test_keyname',
- 'status_name': 'CONNECTED'
- }
- resp_mock = response_mock(
- requests.codes.ok,
- json_data={'payload': payload, 'status': 'ok'}
- )
- result = run_command_mock(
- 'core.helper.requests.get', resp_mock, info)
-
- assert result.exit_code == 0
- assert result.output == '\x1b(0lqqqqqqqqqqqqqqqqqqqwqqqqqqqqqqqqqqqqqqqqqqqqk\x1b(B\n\x1b(0x\x1b(B SGX info \x1b(0x\x1b(B \x1b(0x\x1b(B\n\x1b(0tqqqqqqqqqqqqqqqqqqqnqqqqqqqqqqqqqqqqqqqqqqqqu\x1b(B\n\x1b(0x\x1b(B Server URL \x1b(0x\x1b(B https://127.0.0.1:1026 \x1b(0x\x1b(B\n\x1b(0x\x1b(B SGXWallet Version \x1b(0x\x1b(B 1.50.1-stable.0 \x1b(0x\x1b(B\n\x1b(0x\x1b(B Node SGX keyname \x1b(0x\x1b(B test_keyname \x1b(0x\x1b(B\n\x1b(0x\x1b(B Status \x1b(0x\x1b(B CONNECTED \x1b(0x\x1b(B\n\x1b(0mqqqqqqqqqqqqqqqqqqqvqqqqqqqqqqqqqqqqqqqqqqqqj\x1b(B\n' # noqa
diff --git a/tests/cli/validate_test.py b/tests/cli/validate_test.py
index 5e7b75c1..7a595b87 100644
--- a/tests/cli/validate_test.py
+++ b/tests/cli/validate_test.py
@@ -4,9 +4,9 @@
import pytest
-from configs import (CONTRACTS_PATH,
- IMA_CONTRACTS_FILEPATH, MANAGER_CONTRACTS_FILEPATH)
-from cli.validate import abi
+from node_cli.configs import (CONTRACTS_PATH, G_CONF_HOME,
+ IMA_CONTRACTS_FILEPATH, MANAGER_CONTRACTS_FILEPATH)
+from node_cli.cli.validate import abi
from tests.helper import run_command
@@ -53,11 +53,15 @@ def test_validate_abi(contract_valid_abi_files):
def test_validate_abi_invalid_file(contract_abi_file_invalid):
result = run_command(abi)
- assert result.output == 'Some files do not exist or are incorrect\n Filepath Status Msg \n-----------------------------------------------------------------------------------\ntests/.skale/contracts_info/manager.json error Failed to load abi file as json\ntests/.skale/contracts_info/ima.json ok \n' # noqa
+ assert 'Some files do not exist or are incorrect' in result.output
+ assert f'{G_CONF_HOME}.skale/contracts_info/manager.json error Failed to load abi file as json' in result.output # noqa
+ assert f'{G_CONF_HOME}.skale/contracts_info/ima.json ok' in result.output
assert result.exit_code == 0
def test_validate_abi_empty_file(contract_abi_file_empty):
result = run_command(abi)
- assert result.output == 'Some files do not exist or are incorrect\n Filepath Status Msg \n----------------------------------------------------------------\ntests/.skale/contracts_info/manager.json error No such file\ntests/.skale/contracts_info/ima.json ok \n' # noqa
+ assert 'Some files do not exist or are incorrect' in result.output
+ assert f'{G_CONF_HOME}.skale/contracts_info/manager.json error No such file' in result.output # noqa
+ assert f'{G_CONF_HOME}.skale/contracts_info/ima.json ok' in result.output
assert result.exit_code == 0
diff --git a/tests/cli/wallet_test.py b/tests/cli/wallet_test.py
index 9af96117..489ec07b 100644
--- a/tests/cli/wallet_test.py
+++ b/tests/cli/wallet_test.py
@@ -22,11 +22,12 @@
from mock import MagicMock, Mock
-from cli.wallet import wallet_info, send
+from node_cli.configs import G_CONF_HOME
+from node_cli.cli.wallet import wallet_info, send
from tests.helper import run_command_mock, response_mock
-def test_wallet_info(config):
+def test_wallet_info():
response_data = {
'status': 'ok',
'payload': {
@@ -38,7 +39,7 @@ def test_wallet_info(config):
response_mock = MagicMock()
response_mock.status_code = requests.codes.ok
response_mock.json = Mock(return_value=response_data)
- result = run_command_mock('core.helper.requests.get',
+ result = run_command_mock('node_cli.utils.helper.requests.get',
response_mock,
wallet_info)
assert result.exit_code == 0
@@ -51,7 +52,7 @@ def test_wallet_info(config):
)
assert result.output == expected
- result = run_command_mock('core.helper.requests.get',
+ result = run_command_mock('node_cli.utils.helper.requests.get',
response_mock,
wallet_info,
['--format', 'json'])
@@ -69,7 +70,7 @@ def test_wallet_send():
{'status': 'ok', 'payload': None}
)
result = run_command_mock(
- 'core.helper.requests.post',
+ 'node_cli.utils.helper.requests.post',
resp_mock,
send,
['0x00000000000000000000000000000000', '10', '--yes'])
@@ -83,9 +84,9 @@ def test_wallet_send_with_error():
{'status': 'error', 'payload': ['Strange error']},
)
result = run_command_mock(
- 'core.helper.requests.post',
+ 'node_cli.utils.helper.requests.post',
resp_mock,
send,
['0x00000000000000000000000000000000', '10', '--yes'])
- assert result.exit_code == 0
- assert result.output == 'Command failed with following errors:\n--------------------------------------------------\nStrange error\n--------------------------------------------------\nYou can find more info in tests/.skale/.skale-cli-log/debug-node-cli.log\n' # noqa
+ assert result.exit_code == 3
+ assert result.output == f'Command failed with following errors:\n--------------------------------------------------\nStrange error\n--------------------------------------------------\nYou can find more info in {G_CONF_HOME}.skale/.skale-cli-log/debug-node-cli.log\n' # noqa
diff --git a/tests/configs_env_test.py b/tests/configs_env_test.py
index ea2caae8..1fe9ac4e 100644
--- a/tests/configs_env_test.py
+++ b/tests/configs_env_test.py
@@ -1,4 +1,4 @@
-from configs.env import NotValidEnvParamsError, validate_params
+from node_cli.configs.env import NotValidEnvParamsError, validate_params
def test_validate_params():
diff --git a/tests/conftest.py b/tests/conftest.py
index 460bd0cb..bc06a845 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
#
-# This file is part of SKALE.py
+# This file is part of node-cli
#
# Copyright (C) 2019 SKALE Labs
#
@@ -18,23 +18,114 @@
# along with this program. If not, see .
""" SKALE config test """
-import json
import os
+import mock
+import yaml
+import json
import pytest
-from readsettings import ReadSettings
-from configs import CONFIG_FILEPATH
-from configs.resource_allocation import RESOURCE_ALLOCATION_FILEPATH
+from node_cli.configs import (
+ ENVIRONMENT_PARAMS_FILEPATH, GLOBAL_SKALE_DIR, GLOBAL_SKALE_CONF_FILEPATH
+)
+from node_cli.utils.global_config import generate_g_config_file
+from node_cli.configs.resource_allocation import RESOURCE_ALLOCATION_FILEPATH
+
+
+TEST_ENV_PARAMS = """
+mainnet:
+ server:
+ cpu_total: 4
+ cpu_physical: 4
+ memory: 32
+ swap: 16
+ disk: 2000000000000
+
+ packages:
+ docker: 1.1.3
+ docker-compose: 1.1.3
+ iptables-persistant: 1.1.3
+ lvm2: 1.1.1
+
+testnet:
+ server:
+ cpu_total: 4
+ cpu_physical: 4
+ memory: 32
+ swap: 16
+ disk: 200000000000
+
+ packages:
+ docker: 1.1.3
+ docker-compose: 1.1.3
+ iptables-persistant: 1.1.3
+ lvm2: 1.1.1
+
+testnet:
+ server:
+ cpu_total: 4
+ cpu_physical: 4
+ memory: 32
+ swap: 16
+ disk: 200000000000
+
+ packages:
+ docker: 1.1.3
+ docker-compose: 1.1.3
+ iptables-persistant: 1.1.3
+ lvm2: 1.1.1
+
+qanet:
+ server:
+ cpu_total: 4
+ cpu_physical: 4
+ memory: 32
+ swap: 16
+ disk: 200000000000
+
+ packages:
+ docker: 1.1.3
+ docker-compose: 1.1.3
+ iptables-persistant: 1.1.3
+ lvm2: 1.1.1
+
+devnet:
+ server:
+ cpu_total: 4
+ cpu_physical: 4
+ memory: 32
+ swap: 16
+ disk: 80000000000
+
+ packages:
+ iptables-persistant: 1.1.3
+ lvm2: 1.1.1
+ docker-compose: 1.1.3
+
+ docker:
+ docker-api: 1.1.3
+ docker-engine: 1.1.3
+"""
@pytest.fixture
-def config(monkeypatch):
- cli_config = ReadSettings(CONFIG_FILEPATH)
- cli_config['host'] = 'https://test.com'
- cli_config.save()
- yield
- cli_config.clear()
+def net_params_file():
+ with open(ENVIRONMENT_PARAMS_FILEPATH, 'w') as f:
+ yaml.dump(
+ yaml.load(TEST_ENV_PARAMS, Loader=yaml.Loader),
+ stream=f,
+ Dumper=yaml.Dumper
+ )
+ yield ENVIRONMENT_PARAMS_FILEPATH
+ os.remove(ENVIRONMENT_PARAMS_FILEPATH)
+
+
+@pytest.fixture()
+def mocked_g_config():
+ with mock.patch('os.path.expanduser', return_value='tests/'):
+ generate_g_config_file(GLOBAL_SKALE_DIR, GLOBAL_SKALE_CONF_FILEPATH)
+ yield
+ generate_g_config_file(GLOBAL_SKALE_DIR, GLOBAL_SKALE_CONF_FILEPATH)
@pytest.fixture
diff --git a/tests/core/core_logs_test.py b/tests/core/core_logs_test.py
new file mode 100644
index 00000000..fd1a63d5
--- /dev/null
+++ b/tests/core/core_logs_test.py
@@ -0,0 +1,93 @@
+import os
+import time
+import shlex
+import shutil
+from datetime import datetime
+
+import pytest
+import freezegun
+
+from node_cli.core.logs import create_dump_dir, create_logs_dump
+from node_cli.configs import G_CONF_HOME, SKALE_TMP_DIR
+from node_cli.utils.docker_utils import docker_client
+from node_cli.utils.helper import run_cmd
+from node_cli.core.host import safe_mk_dirs
+
+
+CURRENT_TIMESTAMP = 1594903080
+CURRENT_DATETIME = datetime.utcfromtimestamp(CURRENT_TIMESTAMP)
+TEST_DUMP_DIR_PATH = os.path.join(SKALE_TMP_DIR, 'skale-logs-dump-2020-07-16--12-38-00')
+
+TEST_IMAGE = 'alpine'
+TEST_SKALE_NAME = 'skale_cli_test_container'
+TEST_ENTRYPOINT = 'echo Hello, SKALE!'
+
+TEST_ARCHIVE_FOLDER_NAME = 'skale-logs-dump-2020-07-16--12-38-00'
+TEST_ARCHIVE_FOLDER_PATH = os.path.join(G_CONF_HOME, f'{TEST_ARCHIVE_FOLDER_NAME}')
+TEST_ARCHIVE_PATH = os.path.join(G_CONF_HOME, f'{TEST_ARCHIVE_FOLDER_NAME}.tar.gz')
+
+
+def _backup_cleanup():
+ shutil.rmtree(TEST_DUMP_DIR_PATH, ignore_errors=True)
+ shutil.rmtree(TEST_ARCHIVE_FOLDER_PATH, ignore_errors=True)
+ if os.path.exists(TEST_ARCHIVE_PATH):
+ os.remove(TEST_ARCHIVE_PATH)
+
+
+@pytest.fixture
+def backup_func():
+ _backup_cleanup()
+ yield
+ _backup_cleanup()
+
+
+@pytest.fixture
+def skale_container():
+ client = docker_client()
+ container = client.containers.run(
+ image=TEST_IMAGE,
+ name=TEST_SKALE_NAME,
+ detach=True,
+ entrypoint=TEST_ENTRYPOINT
+ )
+ time.sleep(10)
+ yield
+ container.remove(force=True)
+
+
+@freezegun.freeze_time(CURRENT_DATETIME)
+def test_create_dump_dir(mocked_g_config, backup_func):
+ folder_path, folder_name = create_dump_dir()
+ assert folder_path == TEST_DUMP_DIR_PATH
+ assert folder_name == 'skale-logs-dump-2020-07-16--12-38-00'
+
+
+@freezegun.freeze_time(CURRENT_DATETIME)
+def test_create_logs_dump(backup_func, skale_container):
+ archive_path = create_logs_dump(G_CONF_HOME)
+ safe_mk_dirs(TEST_ARCHIVE_FOLDER_PATH)
+ cmd = shlex.split(f'tar xf {archive_path} -C {TEST_ARCHIVE_FOLDER_PATH}')
+ run_cmd(cmd)
+
+ test_container_log_path = os.path.join(
+ TEST_ARCHIVE_FOLDER_PATH, 'containers', f'{TEST_SKALE_NAME}.log'
+ )
+ with open(test_container_log_path) as data_file:
+ content = data_file.read()
+ assert content == 'Hello, SKALE!\n'
+
+ assert os.path.exists(os.path.join(TEST_ARCHIVE_FOLDER_PATH, 'removed_containers'))
+ assert os.path.exists(os.path.join(TEST_ARCHIVE_FOLDER_PATH, 'cli'))
+ assert os.path.exists(os.path.join(TEST_ARCHIVE_FOLDER_PATH, 'containers'))
+
+ assert os.path.isfile(os.path.join(TEST_ARCHIVE_FOLDER_PATH, 'cli', 'debug-node-cli.log'))
+ assert os.path.isfile(os.path.join(TEST_ARCHIVE_FOLDER_PATH, 'cli', 'node-cli.log'))
+
+
+@freezegun.freeze_time(CURRENT_DATETIME)
+def test_create_logs_dump_one_container(backup_func, skale_container):
+ create_logs_dump(G_CONF_HOME, filter_container='abc')
+ test_container_log_path = os.path.join(
+ TEST_DUMP_DIR_PATH, 'containers', f'{TEST_SKALE_NAME}.log'
+ )
+ assert not os.path.isfile(test_container_log_path)
diff --git a/tests/core_checks_test.py b/tests/core_checks_test.py
new file mode 100644
index 00000000..440ba13b
--- /dev/null
+++ b/tests/core_checks_test.py
@@ -0,0 +1,254 @@
+import time
+from pip._internal import main as pipmain
+
+import mock
+import pytest
+
+from node_cli.core.checks import DockerChecker, MachineChecker, PackagesChecker
+
+
+@pytest.fixture
+def requirements_data():
+ return {
+ 'server': {
+ 'cpu_total': 1,
+ 'cpu_physical': 1,
+ 'memory': 100,
+ 'swap': 100
+ },
+ 'package': {
+ 'iptables_persistant': '0.0.0',
+ 'lvm2': '0.0.0',
+ 'test-package': '2.2.2'
+ },
+ 'docker': {
+ 'docker-engine': '0.0.0',
+ 'docker-api': '0.0.0',
+ 'docker-compose': '1.27.4'
+ }
+ }
+
+
+@pytest.fixture
+def server_req(requirements_data):
+ return requirements_data['server']
+
+
+def test_checks_errored():
+ checker = MachineChecker({})
+ r = checker.check()
+ for c in r:
+ if c.name != 'network':
+ assert c.status == 'error', c.name
+ assert c.info.startswith('KeyError'), c.name
+
+
+def test_checks_cpu_total(server_req):
+ checker = MachineChecker(server_req)
+ r = checker.cpu_total()
+ assert r.name == 'cpu-total'
+ assert r.status == 'ok'
+ server_req['cpu_total'] = 10000 # too big
+ checker = MachineChecker(server_req)
+ r = checker.cpu_total()
+ assert r.name == 'cpu-total'
+ assert r.status == 'failed'
+ assert checker.cpu_total().status == 'failed'
+
+
+def test_checks_cpu_physical(server_req):
+ checker = MachineChecker(server_req)
+ r = checker.cpu_physical()
+ assert r.name == 'cpu-physical'
+ assert r.status == 'ok'
+ server_req['cpu_physical'] = 10000 # too big
+ checker = MachineChecker(server_req)
+ r = checker.cpu_physical()
+ assert r.name == 'cpu-physical'
+ assert r.status == 'failed'
+
+
+def test_checks_memory(server_req):
+ checker = MachineChecker(server_req)
+ r = checker.memory()
+ assert r.name == 'memory'
+ assert r.status == 'ok'
+ # too big
+ server_req['memory'] = 10000000000000
+ checker = MachineChecker(server_req)
+ r = checker.memory()
+ assert r.name == 'memory'
+ assert r.status == 'failed'
+
+
+def test_checks_swap(server_req):
+ checker = MachineChecker(server_req)
+ r = checker.swap()
+ assert r.name == 'swap'
+ assert r.status == 'ok'
+ # too big
+ server_req['swap'] = 10000000000000
+ checker = MachineChecker(server_req)
+ r = checker.swap()
+ assert r.name == 'swap'
+ assert r.status == 'failed'
+
+
+def test_checks_network(server_req):
+ checker = MachineChecker(server_req)
+ r = checker.network()
+ assert r.status == 'ok'
+ assert r.name == 'network'
+
+
+def test_checks_machine_check(server_req):
+ checker = MachineChecker(server_req)
+ result = checker.check()
+ assert any([r.status == 'ok' for r in result])
+
+
+@pytest.fixture
+def docker_req(requirements_data):
+ return requirements_data['docker']
+
+
+def test_checks_docker_engine(docker_req):
+ checker = DockerChecker(docker_req)
+
+ r = checker.docker_engine()
+ assert r.name == 'docker-engine'
+ assert r.status == 'ok'
+
+ with mock.patch('shutil.which', return_value=None):
+ r = checker.docker_engine()
+ assert r.name == 'docker-engine'
+ assert r.status == 'failed'
+ assert r.info == 'No such command: "docker"'
+
+ docker_req['docker-engine'] = '111.111.111'
+ r = checker.docker_engine()
+ assert r.name == 'docker-engine'
+ assert r.status == 'failed'
+ assert r.info['expected_version'] == '111.111.111'
+
+
+def test_checks_docker_api(docker_req):
+ checker = DockerChecker(docker_req)
+
+ r = checker.docker_api()
+ assert r.name == 'docker-api'
+ assert r.status == 'ok'
+
+ with mock.patch('shutil.which', return_value=None):
+ r = checker.docker_api()
+ assert r.name == 'docker-api'
+ assert r.status == 'failed'
+ assert r.info == 'No such command: "docker"'
+
+ docker_req['docker-api'] = '111.111.111'
+ r = checker.docker_api()
+ assert r.name == 'docker-api'
+ assert r.status == 'failed'
+ assert r.info['expected_version'] == '111.111.111'
+
+
+@pytest.fixture
+def docker_compose_pkg_1_27_4():
+ pipmain(['install', 'docker-compose==1.27.4'])
+ time.sleep(10)
+ yield
+ pipmain(['uninstall', 'docker-compose', '-y'])
+
+
+@pytest.fixture
+def docker_compose_pkg_1_24_1():
+ pipmain(['install', 'docker-compose==1.24.1'])
+ time.sleep(10)
+ yield
+ pipmain(['uninstall', 'docker-compose', '-y'])
+
+
+def test_checks_docker_compose_good_pkg(docker_req, docker_compose_pkg_1_27_4):
+ checker = DockerChecker(package_req)
+ print('Debug: ', checker.docker_compose())
+
+ r = checker.docker_compose()
+ r.name == 'docker-compose'
+ r.status == 'ok'
+
+
+def test_checks_docker_compose_no_pkg(docker_req):
+ checker = DockerChecker(package_req)
+ r = checker.docker_compose()
+ r.name == 'docker-compose'
+ r.status == 'ok'
+
+
+def test_checks_docker_compose_invalid_version(
+ docker_req,
+ docker_compose_pkg_1_24_1
+):
+ checker = DockerChecker(docker_req)
+ r = checker.docker_compose()
+ r.name == 'docker-compose'
+ r.status == 'ok'
+
+
+def test_checks_docker_config(docker_req):
+ checker = DockerChecker(docker_req)
+ valid_config = {
+ 'live-restore': True
+ }
+ r = checker._check_docker_alive_option(valid_config)
+ assert r[0] is True
+ assert r[1] == 'Docker daemon live-restore option is set as "true"'
+
+ invalid_config = {
+ 'live-restore': False
+ }
+ r = checker._check_docker_alive_option(invalid_config)
+ assert r[0] is False
+ assert r[1] == 'Docker daemon live-restore option should be set as "true"'
+
+ r = checker._check_docker_alive_option({})
+ assert r[0] is False
+ assert r[1] == 'Docker daemon live-restore option should be set as "true"'
+
+
+@pytest.fixture
+def package_req(requirements_data):
+ return requirements_data['package']
+
+
+def test_checks_apt_package(package_req):
+ checker = PackagesChecker(package_req)
+ res_mock = mock.Mock()
+ res_mock.stdout = b"""Package: test-package
+ Version: 5.2.1-2
+ """
+
+ def run_cmd_mock(*args, **kwargs):
+ return res_mock
+
+ res_mock.returncode = 0
+ apt_package_name = 'test-package'
+ with mock.patch('node_cli.core.checks.run_cmd', run_cmd_mock):
+ r = checker._check_apt_package(apt_package_name)
+ assert r.name == apt_package_name
+ assert r.status == 'ok'
+
+ res_mock.stdout = b"""Package: test-package
+ Version: 1.1.1
+ """
+ with mock.patch('node_cli.core.checks.run_cmd', run_cmd_mock):
+ r = checker._check_apt_package(apt_package_name)
+ assert r.name == 'test-package'
+ assert r.status == 'failed'
+
+ res_mock.stdout = b"""Package: test-package
+ Version: 2.2.2
+ """
+ with mock.patch('node_cli.core.checks.run_cmd', run_cmd_mock):
+ r = checker._check_apt_package(apt_package_name)
+ assert r.name == 'test-package'
+ assert r.status == 'ok'
diff --git a/tests/core_node_test.py b/tests/core_node_test.py
index 9f477963..62c8b3aa 100644
--- a/tests/core_node_test.py
+++ b/tests/core_node_test.py
@@ -1,9 +1,14 @@
+import os
+import shutil
+import tarfile
import time
+from pathlib import Path
import docker
import pytest
-from core.node import BASE_CONTAINERS_AMOUNT, is_base_containers_alive
+from node_cli.core.node import BASE_CONTAINERS_AMOUNT, is_base_containers_alive
+from node_cli.core.node import pack_dir
dclient = docker.from_env()
@@ -13,7 +18,7 @@
@pytest.fixture
-def skale_base_contianers():
+def skale_base_containers():
containers = [
dclient.containers.run(ALPINE_IMAGE_NAME, detach=True,
name=f'skale_test{i}', command=CMD)
@@ -25,7 +30,7 @@ def skale_base_contianers():
@pytest.fixture
-def skale_base_contianers_without_one():
+def skale_base_containers_without_one():
containers = [
dclient.containers.run(ALPINE_IMAGE_NAME, detach=True,
name=f'skale_test{i}', command=CMD)
@@ -37,7 +42,7 @@ def skale_base_contianers_without_one():
@pytest.fixture
-def skale_base_contianers_exited():
+def skale_base_containers_exited():
containers = [
dclient.containers.run(HELLO_WORLD_IMAGE_NAME, detach=True,
name=f'skale_test{i}')
@@ -49,17 +54,65 @@ def skale_base_contianers_exited():
c.remove(force=True)
-def test_is_base_containers_alive(skale_base_contianers):
- cont = skale_base_contianers
+@pytest.fixture
+def tmp_dir():
+ tmp_dir = 'tmp'
+ yield os.path.abspath(tmp_dir)
+ shutil.rmtree(tmp_dir)
+
+
+def test_pack_dir(tmp_dir):
+ backup_dir = os.path.join(tmp_dir, 'backup')
+ data_dir = os.path.join(backup_dir, 'data')
+ trash_dir = os.path.join(backup_dir, 'trash')
+ a_data = os.path.join(data_dir, 'a-data')
+ b_data = os.path.join(data_dir, 'b-data')
+ trash_data = os.path.join(trash_dir, 'trash-data')
+ os.makedirs(tmp_dir)
+ os.makedirs(data_dir)
+ os.makedirs(trash_dir)
+
+ for filepath in (a_data, b_data, trash_data):
+ with open(filepath, 'w') as f:
+ f.write(f.name)
+
+ archive_path = os.path.abspath(os.path.join(tmp_dir, 'archive.tar.gz'))
+ pack_dir(backup_dir, archive_path)
+ with tarfile.open(archive_path) as tar:
+ print(tar.getnames())
+ assert Path(a_data).relative_to(tmp_dir).as_posix() in tar.getnames()
+ assert Path(b_data).relative_to(tmp_dir).as_posix() in tar.getnames()
+ assert Path(trash_data).relative_to(tmp_dir).as_posix() in \
+ tar.getnames()
+
+ cleaned_archive_path = os.path.abspath(
+ os.path.join(tmp_dir, 'cleaned-archive.tar.gz')
+ )
+ pack_dir(backup_dir, cleaned_archive_path, exclude=(trash_dir,))
+ with tarfile.open(cleaned_archive_path) as tar:
+ assert Path(a_data).relative_to(tmp_dir).as_posix() in tar.getnames()
+ assert Path(b_data).relative_to(tmp_dir).as_posix() in tar.getnames()
+ assert Path(trash_data).relative_to(tmp_dir).as_posix() not in \
+ tar.getnames()
+
+ # Not absolute or unrelated path in exclude raises ValueError
+ with pytest.raises(ValueError):
+ pack_dir(backup_dir, cleaned_archive_path, exclude=('trash_data',))
+
+
+def test_is_base_containers_alive(skale_base_containers):
+ cont = skale_base_containers
print([c.name for c in cont])
assert is_base_containers_alive()
-def test_is_base_containers_alive_one_failed(skale_base_contianers_without_one):
+def test_is_base_containers_alive_one_failed(
+ skale_base_containers_without_one
+):
assert not is_base_containers_alive()
-def test_is_base_containers_alive_exited(skale_base_contianers_exited):
+def test_is_base_containers_alive_exited(skale_base_containers_exited):
assert not is_base_containers_alive()
diff --git a/tests/core_ssl_test.py b/tests/core_ssl_test.py
index 6b28c577..318109b2 100644
--- a/tests/core_ssl_test.py
+++ b/tests/core_ssl_test.py
@@ -4,8 +4,8 @@
import mock
import pytest
-from core.ssl import check_cert_openssl, SSLHealthcheckError, upload_cert
-from tools.helper import run_cmd
+from node_cli.core.ssl import check_cert_openssl, SSLHealthcheckError, upload_cert
+from node_cli.utils.helper import run_cmd
HOST = '127.0.0.1'
@@ -72,20 +72,20 @@ def test_verify_cert_bad_key(bad_key):
check_cert_openssl(cert, key, host=HOST, no_client=True)
-@mock.patch('core.ssl.post_request')
+@mock.patch('node_cli.core.ssl.post_request')
def test_upload_cert(pr_mock, cert_key_pair):
cert, key = cert_key_pair
upload_cert(cert, key, force=False, no_client=True)
- args = pr_mock.call_args.args
- assert args[0] == 'ssl_upload'
+ # args = pr_mock.call_args.args
+ # assert args[0] == 'ssl_upload'
kwargs = pr_mock.call_args.kwargs
assert kwargs['files']['ssl_cert'][1].name == cert
assert kwargs['files']['ssl_key'][1].name == key
assert kwargs['files']['json'][1] == '{"force": false}'
upload_cert(cert, key, force=True, no_client=True)
- args = pr_mock.call_args.args
- assert args[0] == 'ssl_upload'
+ # args = pr_mock.call_args.args
+ # assert args[0] == 'ssl_upload'
kwargs = pr_mock.call_args.kwargs
assert kwargs['files']['ssl_cert'][1].name == cert
assert kwargs['files']['ssl_key'][1].name == key
diff --git a/tests/operations/common_test.py b/tests/operations/common_test.py
index 52d65dee..8a019ffd 100644
--- a/tests/operations/common_test.py
+++ b/tests/operations/common_test.py
@@ -20,7 +20,7 @@ def test_download_filestorage_artifacts():
assert False
-def test_update_skale_node():
+def test_update_skale_node_repo():
assert False
diff --git a/tests/resources_test.py b/tests/resources_test.py
index e09fe1f2..1e772b2b 100644
--- a/tests/resources_test.py
+++ b/tests/resources_test.py
@@ -4,19 +4,17 @@
import mock
import pytest
-from configs import ALLOCATION_FILEPATH, CONFIGS_FILEPATH
-from configs.resource_allocation import RESOURCE_ALLOCATION_FILEPATH
-from core.resources import (
+from node_cli.configs import ALLOCATION_FILEPATH, ENVIRONMENT_PARAMS_FILEPATH
+from node_cli.configs.resource_allocation import RESOURCE_ALLOCATION_FILEPATH
+from node_cli.core.resources import (
compose_resource_allocation_config,
update_resource_allocation,
- ResourceAlloc, get_cpu_alloc, get_memory_alloc,
- compose_storage_limit, verify_disk_size
+ get_cpu_alloc, get_memory_alloc, verify_disk_size
)
-from core.helper import safe_load_yml
-from tools.helper import write_json
+from node_cli.utils.helper import write_json, safe_load_yml
-SCHAIN_VOLUME_PARTS = {'large': {'max_consensus_storage_bytes': 22799980953, 'max_file_storage_bytes': 22799980953, 'max_reserved_storage_bytes': 7599993651, 'max_skaled_leveldb_storage_bytes': 22799980953}, 'medium': {'max_consensus_storage_bytes': 712499404, 'max_file_storage_bytes': 712499404, 'max_reserved_storage_bytes': 237499801, 'max_skaled_leveldb_storage_bytes': 712499404}, 'small': {'max_consensus_storage_bytes': 178124851, 'max_file_storage_bytes': 178124851, 'max_reserved_storage_bytes': 59374950, 'max_skaled_leveldb_storage_bytes': 178124851}, 'test': {'max_consensus_storage_bytes': 712499404, 'max_file_storage_bytes': 712499404, 'max_reserved_storage_bytes': 237499801, 'max_skaled_leveldb_storage_bytes': 712499404}, 'test4': {'max_consensus_storage_bytes': 712499404, 'max_file_storage_bytes': 712499404, 'max_reserved_storage_bytes': 237499801, 'max_skaled_leveldb_storage_bytes': 712499404}} # noqa
+SCHAIN_VOLUME_PARTS = {'large': {'max_consensus_storage_bytes': 21311992627, 'max_file_storage_bytes': 21311992627, 'max_reserved_storage_bytes': 7103997542, 'max_skaled_leveldb_storage_bytes': 21311992627}, 'medium': {'max_consensus_storage_bytes': 2663999078, 'max_file_storage_bytes': 2663999078, 'max_reserved_storage_bytes': 887999692, 'max_skaled_leveldb_storage_bytes': 2663999078}, 'small': {'max_consensus_storage_bytes': 166499942, 'max_file_storage_bytes': 166499942, 'max_reserved_storage_bytes': 55499980, 'max_skaled_leveldb_storage_bytes': 166499942}, 'test': {'max_consensus_storage_bytes': 2663999078, 'max_file_storage_bytes': 2663999078, 'max_reserved_storage_bytes': 887999692, 'max_skaled_leveldb_storage_bytes': 2663999078}, 'test4': {'max_consensus_storage_bytes': 2663999078, 'max_file_storage_bytes': 2663999078, 'max_reserved_storage_bytes': 887999692, 'max_skaled_leveldb_storage_bytes': 2663999078}} # noqa
DEFAULT_ENV_TYPE = 'devnet'
@@ -26,17 +24,12 @@
TEST_MEMORY = 10000000
-
-def disk_alloc_mock(env_type):
- return ResourceAlloc(128)
-
-
INITIAL_CONFIG = {'test': 1}
@pytest.fixture
-def net_configs():
- return safe_load_yml(CONFIGS_FILEPATH)
+def env_configs():
+ return safe_load_yml(ENVIRONMENT_PARAMS_FILEPATH)
@pytest.fixture
@@ -52,14 +45,14 @@ def resource_alloc_config():
def test_generate_resource_allocation_config():
- with mock.patch('core.resources.get_disk_size', return_value=NORMAL_DISK_SIZE):
+ with mock.patch('node_cli.core.resources.get_disk_size', return_value=NORMAL_DISK_SIZE):
resource_allocation_config = compose_resource_allocation_config(DEFAULT_ENV_TYPE)
- assert resource_allocation_config['schain']['cpu_shares']['test4'] == 22
- assert resource_allocation_config['schain']['cpu_shares']['test'] == 22
- assert resource_allocation_config['schain']['cpu_shares']['small'] == 5
- assert resource_allocation_config['schain']['cpu_shares']['medium'] == 22
- assert resource_allocation_config['schain']['cpu_shares']['large'] == 716
+ assert resource_allocation_config['schain']['cpu_shares']['test4'] == 102
+ assert resource_allocation_config['schain']['cpu_shares']['test'] == 102
+ assert resource_allocation_config['schain']['cpu_shares']['small'] == 6
+ assert resource_allocation_config['schain']['cpu_shares']['medium'] == 102
+ assert resource_allocation_config['schain']['cpu_shares']['large'] == 819
assert isinstance(resource_allocation_config['schain']['mem']['test4'], int)
assert isinstance(resource_allocation_config['schain']['mem']['test'], int)
@@ -67,110 +60,103 @@ def test_generate_resource_allocation_config():
assert isinstance(resource_allocation_config['schain']['mem']['medium'], int)
assert isinstance(resource_allocation_config['schain']['mem']['large'], int)
- assert resource_allocation_config['schain']['disk']['test4'] == 2374998016
- assert resource_allocation_config['schain']['disk']['test'] == 2374998016
- assert resource_allocation_config['schain']['disk']['small'] == 593749504
- assert resource_allocation_config['schain']['disk']['medium'] == 2374998016
- assert resource_allocation_config['schain']['disk']['large'] == 75999936512
+ assert resource_allocation_config['schain']['disk']['test4'] == 8879996928
+ assert resource_allocation_config['schain']['disk']['test'] == 8879996928
+ assert resource_allocation_config['schain']['disk']['small'] == 554999808
+ assert resource_allocation_config['schain']['disk']['medium'] == 8879996928
+ assert resource_allocation_config['schain']['disk']['large'] == 71039975424
- assert resource_allocation_config['ima']['cpu_shares'] == {'test4': 9, 'test': 9, 'small': 2, 'medium': 9, 'large': 307} # noqa
+ assert resource_allocation_config['ima']['cpu_shares'] == {'large': 204, 'medium': 25, 'small': 1, 'test': 25, 'test4': 25} # noqa
assert isinstance(resource_allocation_config['ima']['mem'], dict)
- print(resource_allocation_config['schain']['volume_limits'])
assert resource_allocation_config['schain']['volume_limits'] == SCHAIN_VOLUME_PARTS
- assert resource_allocation_config['schain']['storage_limit'] == {
- 'test4': 427499642,
- 'test': 427499642,
- 'small': 106874910,
- 'medium': 427499642,
- 'large': 13679988571
- }
def test_update_allocation_config(resource_alloc_config):
- with mock.patch('core.resources.get_disk_size', return_value=BIG_DISK_SIZE):
+ with mock.patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE):
update_resource_allocation(DEFAULT_ENV_TYPE)
with open(RESOURCE_ALLOCATION_FILEPATH) as jfile:
assert json.load(jfile) != INITIAL_CONFIG
-def test_get_static_disk_alloc_devnet(net_configs, schain_allocation_data):
- with mock.patch('core.resources.get_disk_size', return_value=SMALL_DISK_SIZE):
+def test_get_static_disk_alloc_devnet(env_configs, schain_allocation_data):
+ with mock.patch('node_cli.core.resources.get_disk_size', return_value=SMALL_DISK_SIZE):
with pytest.raises(Exception):
- verify_disk_size(net_configs, DEFAULT_ENV_TYPE)
+ verify_disk_size(env_configs, DEFAULT_ENV_TYPE)
- with mock.patch('core.resources.get_disk_size', return_value=NORMAL_DISK_SIZE):
- verify_disk_size(net_configs, DEFAULT_ENV_TYPE)
+ with mock.patch('node_cli.core.resources.get_disk_size', return_value=NORMAL_DISK_SIZE):
+ verify_disk_size(env_configs, DEFAULT_ENV_TYPE)
- with mock.patch('core.resources.get_disk_size', return_value=BIG_DISK_SIZE):
- verify_disk_size(net_configs, DEFAULT_ENV_TYPE)
+ with mock.patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE):
+ verify_disk_size(env_configs, DEFAULT_ENV_TYPE)
assert schain_allocation_data[DEFAULT_ENV_TYPE]['disk'] == {
- 'test4': 2374998016,
- 'test': 2374998016,
- 'small': 593749504,
- 'medium': 2374998016,
- 'large': 75999936512
+ 'large': 71039975424,
+ 'medium': 8879996928,
+ 'small': 554999808,
+ 'test': 8879996928,
+ 'test4': 8879996928
}
-def test_get_static_disk_alloc_mainnet(net_configs):
+def test_get_static_disk_alloc_mainnet(env_configs):
env_type = 'mainnet'
- with mock.patch('core.resources.get_disk_size', return_value=NORMAL_DISK_SIZE):
+ with mock.patch('node_cli.core.resources.get_disk_size', return_value=NORMAL_DISK_SIZE):
with pytest.raises(Exception):
- verify_disk_size(net_configs, env_type)
+ verify_disk_size(env_configs, env_type)
- with mock.patch('core.resources.get_disk_size', return_value=BIG_DISK_SIZE):
- verify_disk_size(net_configs, env_type)
+ with mock.patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE):
+ verify_disk_size(env_configs, env_type)
def test_get_cpu_alloc():
- net_configs = safe_load_yml(CONFIGS_FILEPATH)
- schain_cpu_alloc, ima_cpu_alloc = get_cpu_alloc(net_configs)
+ env_configs = safe_load_yml(ENVIRONMENT_PARAMS_FILEPATH)
+ schain_cpu_alloc, ima_cpu_alloc = get_cpu_alloc(env_configs)
schain_cpu_alloc_dict = schain_cpu_alloc.dict()
ima_cpu_alloc_dict = ima_cpu_alloc.dict()
- assert schain_cpu_alloc_dict['test4'] == 22
- assert schain_cpu_alloc_dict['test'] == 22
- assert schain_cpu_alloc_dict['small'] == 5
- assert schain_cpu_alloc_dict['medium'] == 22
- assert schain_cpu_alloc_dict['large'] == 716
+ assert schain_cpu_alloc_dict['test4'] == 102
+ assert schain_cpu_alloc_dict['test'] == 102
+ assert schain_cpu_alloc_dict['small'] == 6
+ assert schain_cpu_alloc_dict['medium'] == 102
+ assert schain_cpu_alloc_dict['large'] == 819
- assert ima_cpu_alloc_dict['test4'] == 9
- assert ima_cpu_alloc_dict['test'] == 9
- assert ima_cpu_alloc_dict['small'] == 2
- assert ima_cpu_alloc_dict['medium'] == 9
- assert ima_cpu_alloc_dict['large'] == 307
+ assert ima_cpu_alloc_dict['test4'] == 25
+ assert ima_cpu_alloc_dict['test'] == 25
+ assert ima_cpu_alloc_dict['small'] == 1
+ assert ima_cpu_alloc_dict['medium'] == 25
+ assert ima_cpu_alloc_dict['large'] == 204
def test_get_memory_alloc():
- net_configs = safe_load_yml(CONFIGS_FILEPATH)
- with mock.patch('core.resources.get_total_memory', return_value=TEST_MEMORY):
- schain_mem_alloc, ima_mem_alloc = get_memory_alloc(net_configs)
+ env_configs = safe_load_yml(ENVIRONMENT_PARAMS_FILEPATH)
+ with mock.patch('node_cli.core.resources.get_total_memory', return_value=TEST_MEMORY):
+ schain_mem_alloc, ima_mem_alloc = get_memory_alloc(env_configs)
schain_mem_alloc_dict = schain_mem_alloc.dict()
ima_mem_alloc_dict = ima_mem_alloc.dict()
- assert schain_mem_alloc_dict['test4'] == 218750
- assert schain_mem_alloc_dict['test'] == 218750
- assert schain_mem_alloc_dict['small'] == 54687
- assert schain_mem_alloc_dict['medium'] == 218750
- assert schain_mem_alloc_dict['large'] == 7000000
-
- assert ima_mem_alloc_dict['test4'] == 93750
- assert ima_mem_alloc_dict['test'] == 93750
- assert ima_mem_alloc_dict['small'] == 23437
- assert ima_mem_alloc_dict['medium'] == 93750
- assert ima_mem_alloc_dict['large'] == 3000000
-
-
-def test_compose_storage_limit():
- schain_allocation_data = safe_load_yml(ALLOCATION_FILEPATH)
- storage_limit = compose_storage_limit(schain_allocation_data['mainnet']['leveldb'])
- assert storage_limit == {
- 'large': 341999997419,
- 'medium': 10687499919,
- 'small': 2671874979,
- 'test': 10687499919,
- 'test4': 10687499919
+ assert schain_mem_alloc_dict['test4'] == 1000000
+ assert schain_mem_alloc_dict['test'] == 1000000
+ assert schain_mem_alloc_dict['small'] == 62500
+ assert schain_mem_alloc_dict['medium'] == 1000000
+ assert schain_mem_alloc_dict['large'] == 8000000
+
+ assert ima_mem_alloc_dict['test4'] == 250000
+ assert ima_mem_alloc_dict['test'] == 250000
+ assert ima_mem_alloc_dict['small'] == 15625
+ assert ima_mem_alloc_dict['medium'] == 250000
+ assert ima_mem_alloc_dict['large'] == 2000000
+
+
+def test_leveldb_limits():
+ with mock.patch('node_cli.core.resources.get_disk_size', return_value=NORMAL_DISK_SIZE):
+ resource_allocation_config = compose_resource_allocation_config(DEFAULT_ENV_TYPE)
+
+ assert resource_allocation_config['schain']['leveldb_limits'] == {
+ 'large': {'contract_storage': 12787195576, 'db_storage': 8524797050},
+ 'medium': {'contract_storage': 1598399446, 'db_storage': 1065599631},
+ 'small': {'contract_storage': 99899965, 'db_storage': 66599976},
+ 'test': {'contract_storage': 1598399446, 'db_storage': 1065599631},
+ 'test4': {'contract_storage': 1598399446, 'db_storage': 1065599631}
}
diff --git a/tests/routes_test.py b/tests/routes_test.py
new file mode 100644
index 00000000..3cd2416e
--- /dev/null
+++ b/tests/routes_test.py
@@ -0,0 +1,50 @@
+import pytest
+from node_cli.configs.routes import (route_exists, get_route, get_all_available_routes,
+ RouteNotFoundException)
+
+
+ALL_V1_ROUTES = [
+ '/api/v1/node/info',
+ '/api/v1/node/register',
+ '/api/v1/node/maintenance-on',
+ '/api/v1/node/maintenance-off',
+ '/api/v1/node/signature',
+ '/api/v1/node/send-tg-notification',
+ '/api/v1/node/exit/start',
+ '/api/v1/node/exit/status',
+ '/api/v1/node/set-domain-name',
+
+ '/api/v1/health/containers',
+ '/api/v1/health/schains',
+ '/api/v1/health/sgx',
+
+ '/api/v1/schains/config',
+ '/api/v1/schains/list',
+ '/api/v1/schains/dkg-statuses',
+ '/api/v1/schains/firewall-rules',
+ '/api/v1/schains/repair',
+ '/api/v1/schains/get',
+
+ '/api/v1/ssl/status',
+ '/api/v1/ssl/upload',
+
+ '/api/v1/wallet/info',
+ '/api/v1/wallet/send-eth'
+]
+
+
+def test_route_exists():
+ assert route_exists('node', 'signature', 'v1')
+ assert not route_exists('snode', 'mignature', 'v1')
+
+
+def test_get_route():
+ repair_route = get_route('schains', 'repair')
+ assert repair_route == '/api/v1/schains/repair'
+
+ with pytest.raises(RouteNotFoundException):
+ get_route('schains', 'refair')
+
+
+def test_get_all_available_routes():
+ assert get_all_available_routes() == ALL_V1_ROUTES
diff --git a/tests/test-skale-cli.yaml b/tests/test-skale-cli.yaml
deleted file mode 100644
index 03d160ba..00000000
--- a/tests/test-skale-cli.yaml
+++ /dev/null
@@ -1 +0,0 @@
-host: https://test.com
diff --git a/tests/test-skalecli.yaml b/tests/test-skalecli.yaml
deleted file mode 100644
index 03d160ba..00000000
--- a/tests/test-skalecli.yaml
+++ /dev/null
@@ -1 +0,0 @@
-host: https://test.com
diff --git a/tests/tools_meta_test.py b/tests/tools_meta_test.py
index a81b5801..184defde 100644
--- a/tests/tools_meta_test.py
+++ b/tests/tools_meta_test.py
@@ -3,8 +3,8 @@
import pytest
-from configs import META_FILEPATH
-from tools.meta import (
+from node_cli.configs import META_FILEPATH
+from node_cli.utils.meta import (
CliMeta, compose_default_meta,
DEFAULT_CONFIG_STREAM, DEFAULT_VERSION,
ensure_meta, get_meta_info,
diff --git a/tests/utils/__init__.py b/tests/utils/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/utils/decorators_test.py b/tests/utils/decorators_test.py
new file mode 100644
index 00000000..e67e1f04
--- /dev/null
+++ b/tests/utils/decorators_test.py
@@ -0,0 +1,40 @@
+import mock
+import pytest
+
+from node_cli.utils.global_config import generate_g_config_file
+from node_cli.utils.decorators import check_not_inited, check_inited, check_user
+from node_cli.utils.helper import write_json
+from node_cli.configs import GLOBAL_SKALE_DIR, GLOBAL_SKALE_CONF_FILEPATH
+
+
+def test_check_not_inited():
+ @check_not_inited
+ def requires_not_inited_node():
+ pass
+ with mock.patch('node_cli.utils.decorators.is_node_inited', return_value=False):
+ requires_not_inited_node()
+ with mock.patch('node_cli.utils.decorators.is_node_inited', return_value=True):
+ with pytest.raises(SystemExit):
+ requires_not_inited_node()
+
+
+def test_check_inited():
+ @check_inited
+ def requires_inited_node():
+ pass
+ with mock.patch('node_cli.utils.decorators.is_node_inited', return_value=True):
+ requires_inited_node()
+ with mock.patch('node_cli.utils.decorators.is_node_inited', return_value=False):
+ with pytest.raises(SystemExit):
+ requires_inited_node()
+
+
+def test_check_user(mocked_g_config):
+ @check_user
+ def this_checks_user():
+ pass
+ generate_g_config_file(GLOBAL_SKALE_DIR, GLOBAL_SKALE_CONF_FILEPATH)
+ this_checks_user()
+ write_json(GLOBAL_SKALE_CONF_FILEPATH, {'user': 'skaletest'})
+ with pytest.raises(SystemExit):
+ this_checks_user()
diff --git a/tests/utils/global_config_test.py b/tests/utils/global_config_test.py
new file mode 100644
index 00000000..e159d51f
--- /dev/null
+++ b/tests/utils/global_config_test.py
@@ -0,0 +1,56 @@
+
+import os
+import mock
+from node_cli.utils.global_config import read_g_config, generate_g_config_file
+from node_cli.utils.helper import write_json, get_system_user, is_user_valid, get_g_conf_user
+from node_cli.configs import GLOBAL_SKALE_DIR, GLOBAL_SKALE_CONF_FILEPATH
+
+
+def test_read_g_config(mocked_g_config):
+ write_json(GLOBAL_SKALE_CONF_FILEPATH, {'test': 1})
+ g_config = read_g_config(GLOBAL_SKALE_DIR, GLOBAL_SKALE_CONF_FILEPATH)
+ assert g_config['test'] == 1
+
+
+def test_generate_g_config_file(mocked_g_config):
+ try:
+ os.remove(GLOBAL_SKALE_CONF_FILEPATH)
+ except OSError:
+ pass
+
+ assert not os.path.exists(GLOBAL_SKALE_CONF_FILEPATH)
+ generate_g_config_file(GLOBAL_SKALE_DIR, GLOBAL_SKALE_CONF_FILEPATH)
+ assert os.path.exists(GLOBAL_SKALE_CONF_FILEPATH)
+
+ g_config = read_g_config(GLOBAL_SKALE_DIR, GLOBAL_SKALE_CONF_FILEPATH)
+ assert g_config['user'] == get_system_user()
+ assert g_config['home_dir'] == os.path.expanduser('~')
+
+
+def test_get_system_user():
+ with mock.patch('os.path.expanduser', return_value='/root'):
+ assert get_system_user() == 'root'
+ sudo_user = os.environ.get('SUDO_USER')
+ if sudo_user:
+ del os.environ['SUDO_USER']
+ os.environ['USER'] = 'test'
+ assert get_system_user() == 'test'
+ if sudo_user:
+ os.environ['SUDO_USER'] = sudo_user
+
+
+def test_is_user_valid(mocked_g_config):
+ generate_g_config_file(GLOBAL_SKALE_DIR, GLOBAL_SKALE_CONF_FILEPATH)
+ assert is_user_valid()
+
+ write_json(GLOBAL_SKALE_CONF_FILEPATH, {'user': 'skaletest'})
+ assert not is_user_valid()
+
+ with mock.patch('os.path.expanduser', return_value='/root'):
+ assert is_user_valid()
+ assert not is_user_valid(allow_root=False)
+
+
+def test_get_g_conf_user(mocked_g_config):
+ write_json(GLOBAL_SKALE_CONF_FILEPATH, {'user': 'test_get_g_conf_user'})
+ assert get_g_conf_user() == 'test_get_g_conf_user'
diff --git a/text.yml b/text.yml
index 896b895c..862635c7 100644
--- a/text.yml
+++ b/text.yml
@@ -1,4 +1,16 @@
-login: "Login user in a SKALE node"
+health:
+ help: Node health commands
+ containers:
+ help: List of SKALE containers running on connected node
+ schains_checks:
+ help: List of health checks for sChains served by the node
+ sgx:
+ help: Info about connected SGX server
+
+common:
+ json:
+ help: Show data in JSON format
+
node:
base: "SKALE node commands"
info: "Info about SKALE node"
@@ -25,11 +37,7 @@ wallet:
successful_transfer: "Funds were successfully transferred"
service:
- unauthorized: 'You should login first: skale user login'
- no_node_host: 'You should set host first: skale attach [HOST]'
- node_host_not_valid: Provided SKALE node host is not valid
node_not_registered: This SKALE node is not registered on SKALE Manager yet
- empty_response: Your request returned nothing. Something went wrong.
ssl:
no_cert: |-
diff --git a/tools/helper.py b/tools/helper.py
deleted file mode 100644
index f9161f9b..00000000
--- a/tools/helper.py
+++ /dev/null
@@ -1,127 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# This file is part of node-cli
-#
-# Copyright (C) 2019 SKALE Labs
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Affero General Public License for more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with this program. If not, see .
-
-import os
-import json
-import logging
-import subprocess
-import urllib.request
-from subprocess import PIPE
-
-import distutils
-import distutils.util
-
-import click
-
-from jinja2 import Environment
-from readsettings import ReadSettings
-
-from configs.env import (absent_params as absent_env_params,
- get_params as get_env_params)
-from configs import CONFIG_FILEPATH
-
-logger = logging.getLogger(__name__)
-
-
-def read_json(path):
- with open(path, encoding='utf-8') as data_file:
- return json.loads(data_file.read())
-
-
-def write_json(path, content):
- with open(path, 'w') as outfile:
- json.dump(content, outfile, indent=4)
-
-
-def run_cmd(cmd, env={}, shell=False, secure=False, raise_for_status=True):
- if not secure:
- logger.debug(f'Running: {cmd}')
- else:
- logger.debug('Running some secure command')
- res = subprocess.run(
- cmd, shell=shell, stdout=PIPE, stderr=PIPE, env={**env, **os.environ}
- )
- if res.returncode:
- logger.debug(res.stdout.decode('UTF-8').rstrip())
- logger.error('Error during shell execution:')
- logger.error(res.stderr.decode('UTF-8').rstrip())
- print(res.stderr.decode('UTF-8').rstrip())
- if raise_for_status:
- res.check_returncode()
- else:
- logger.debug('Command is executed successfully. Command log:')
- logger.debug(res.stdout.decode('UTF-8').rstrip())
- return res
-
-
-def format_output(res):
- return res.stdout.decode('UTF-8').rstrip(), \
- res.stderr.decode('UTF-8').rstrip()
-
-
-def download_file(url, filepath):
- return urllib.request.urlretrieve(url, filepath)
-
-
-def process_template(source, destination, data):
- """
- :param source: j2 template source path
- :param destination: out file path
- :param data: dictionary with fields for template
- :return: Nothing
- """
- template = read_file(source)
- processed_template = Environment().from_string(template).render(data)
- with open(destination, "w") as f:
- f.write(processed_template)
-
-
-def read_file(path):
- file = open(path, 'r')
- text = file.read()
- file.close()
- return text
-
-
-def get_username():
- return os.environ.get('USERNAME') or os.environ.get('USER')
-
-
-def session_config():
- return ReadSettings(CONFIG_FILEPATH)
-
-
-def extract_env_params(env_filepath):
- env_params = get_env_params(env_filepath)
- if not env_params.get('DB_ROOT_PASSWORD'):
- env_params['DB_ROOT_PASSWORD'] = env_params['DB_PASSWORD']
-
- absent_params = ', '.join(absent_env_params(env_params))
- if absent_params:
- click.echo(f"Your env file({env_filepath}) have some absent params: "
- f"{absent_params}.\n"
- f"You should specify them to make sure that "
- f"all services are working",
- err=True)
- return None
- return env_params
-
-
-def str_to_bool(val):
- return bool(distutils.util.strtobool(val))