From fe10e73eefb1991fe7e7946a4be91f9ec883fffc Mon Sep 17 00:00:00 2001 From: jhughesbiot <50999916+jhughesbiot@users.noreply.github.com> Date: Tue, 25 Apr 2023 08:20:35 -0700 Subject: [PATCH 01/48] Implement test orchestrator (#4) * Initial work on test-orchestrator * Ignore runtime folder * Update runtime directory for test modules * Fix logging Add initial framework for running tests * logging and misc cleanup * logging changes * Add a stop hook after all tests complete * Refactor test_orc code * Add arg passing Add option to use locally cloned via install or remote via main project network orchestrator * Fix baseline module Fix orchestrator exiting only after timeout * Add result file to baseline test module Change result format to match closer to design doc * Refactor pylint * Skip test module if it failed to start * Refactor * Check for valid log level --------- Co-authored-by: Jacob Boddey --- .gitignore | 134 ++++++++++- cmd/install | 3 +- cmd/start | 8 +- conf/system.json.example | 10 +- framework/logger.py | 45 +++- framework/run.py | 45 +++- framework/testrun.py | 217 +++++++++-------- test_orc/modules/base/base.Dockerfile | 23 ++ test_orc/modules/base/bin/capture | 20 ++ test_orc/modules/base/bin/setup_binaries | 10 + test_orc/modules/base/bin/start_grpc | 17 ++ test_orc/modules/base/bin/start_module | 76 ++++++ test_orc/modules/base/bin/wait_for_interface | 10 + test_orc/modules/base/conf/module_config.json | 12 + test_orc/modules/base/python/requirements.txt | 2 + .../base/python/src/grpc/start_server.py | 34 +++ test_orc/modules/base/python/src/logger.py | 45 ++++ test_orc/modules/baseline/baseline.Dockerfile | 11 + .../modules/baseline/bin/start_test_module | 40 ++++ .../modules/baseline/conf/module_config.json | 21 ++ .../modules/baseline/python/src/logger.py | 46 ++++ test_orc/modules/baseline/python/src/run.py | 50 ++++ .../baseline/python/src/test_module.py | 63 +++++ test_orc/python/requirements.txt | 0 test_orc/python/src/test_orchestrator.py | 221 ++++++++++++++++++ 25 files changed, 1042 insertions(+), 121 deletions(-) create mode 100644 test_orc/modules/base/base.Dockerfile create mode 100644 test_orc/modules/base/bin/capture create mode 100644 test_orc/modules/base/bin/setup_binaries create mode 100644 test_orc/modules/base/bin/start_grpc create mode 100644 test_orc/modules/base/bin/start_module create mode 100644 test_orc/modules/base/bin/wait_for_interface create mode 100644 test_orc/modules/base/conf/module_config.json create mode 100644 test_orc/modules/base/python/requirements.txt create mode 100644 test_orc/modules/base/python/src/grpc/start_server.py create mode 100644 test_orc/modules/base/python/src/logger.py create mode 100644 test_orc/modules/baseline/baseline.Dockerfile create mode 100644 test_orc/modules/baseline/bin/start_test_module create mode 100644 test_orc/modules/baseline/conf/module_config.json create mode 100644 test_orc/modules/baseline/python/src/logger.py create mode 100644 test_orc/modules/baseline/python/src/run.py create mode 100644 test_orc/modules/baseline/python/src/test_module.py create mode 100644 test_orc/python/requirements.txt create mode 100644 test_orc/python/src/test_orchestrator.py diff --git a/.gitignore b/.gitignore index 93fe84e64..4016b6901 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,135 @@ +# Runtime folder +runtime/ venv/ net_orc/ -.vscode/ \ No newline at end of file +.vscode/ + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ diff --git a/cmd/install b/cmd/install index 351eb4129..61722e273 100755 --- a/cmd/install +++ b/cmd/install @@ -2,6 +2,7 @@ GIT_URL=https://github.com/auto-iot NET_ORC_DIR=net_orc +NET_ORC_VERSION="dev" python3 -m venv venv @@ -10,7 +11,7 @@ source venv/bin/activate pip3 install -r etc/requirements.txt rm -rf $NET_ORC_DIR -git clone $GIT_URL/network-orchestrator $NET_ORC_DIR +git clone -b $NET_ORC_VERSION $GIT_URL/network-orchestrator $NET_ORC_DIR chown -R $USER $NET_ORC_DIR pip3 install -r $NET_ORC_DIR/python/requirements.txt diff --git a/cmd/start b/cmd/start index 43a295338..fa6bbc1e1 100755 --- a/cmd/start +++ b/cmd/start @@ -5,6 +5,12 @@ if [[ "$EUID" -ne 0 ]]; then exit 1 fi +# Ensure that /var/run/netns folder exists +mkdir -p /var/run/netns + +# Clear up existing runtime files +rm -rf runtime + # Check if python modules exist. Install if not [ ! -d "venv" ] && cmd/install @@ -12,6 +18,6 @@ fi source venv/bin/activate # TODO: Execute python code -python -u framework/run.py +python -u framework/run.py $@ deactivate \ No newline at end of file diff --git a/conf/system.json.example b/conf/system.json.example index 379545ad6..2d4b737d0 100644 --- a/conf/system.json.example +++ b/conf/system.json.example @@ -1,7 +1,7 @@ { - "network": { - "device_intf": "enx123456789123", - "internet_intf": "enx123456789124" - }, - "log_level": "INFO" + "network": { + "device_intf": "enx123456789123", + "internet_intf": "enx123456789124" + }, + "log_level": "INFO" } \ No newline at end of file diff --git a/framework/logger.py b/framework/logger.py index 25970bd21..64d8fdb97 100644 --- a/framework/logger.py +++ b/framework/logger.py @@ -1,4 +1,4 @@ -"""Manages all things logging.""" +"""Manages stream and file loggers.""" import json import logging import os @@ -6,18 +6,43 @@ LOGGERS = {} _LOG_FORMAT = "%(asctime)s %(name)-8s %(levelname)-7s %(message)s" _DATE_FORMAT = '%b %02d %H:%M:%S' -_CONF_DIR="conf" -_CONF_FILE_NAME="system.json" +_DEFAULT_LOG_LEVEL = logging.INFO +_LOG_LEVEL = logging.INFO +_CONF_DIR = "conf" +_CONF_FILE_NAME = "system.json" +_LOG_DIR = "runtime/testing/" -with open(os.path.join(_CONF_DIR, _CONF_FILE_NAME), encoding='utf-8') as config_file: - system_conf_json = json.load(config_file) - log_level_str = system_conf_json['log_level'] - log_level = logging.getLevelName(log_level_str) +# Set log level +with open(os.path.join(_CONF_DIR, _CONF_FILE_NAME), encoding='utf-8') as system_conf_file: + system_conf_json = json.load(system_conf_file) +log_level_str = system_conf_json['log_level'] -logging.basicConfig(format=_LOG_FORMAT, datefmt=_DATE_FORMAT, level=log_level) +temp_log = logging.getLogger('temp') +try: + temp_log.setLevel(logging.getLevelName(log_level_str)) + _LOG_LEVEL = logging.getLevelName(log_level_str) +except ValueError: + print('Invalid log level set in ' + _CONF_DIR + '/' + _CONF_FILE_NAME + + '. Using INFO as log level') + _LOG_LEVEL = _DEFAULT_LOG_LEVEL -def get_logger(name): - """Returns the logger belonging to the class calling the method.""" +log_format = logging.Formatter(fmt=_LOG_FORMAT, datefmt=_DATE_FORMAT) + +def add_file_handler(log, log_file): + handler = logging.FileHandler(_LOG_DIR + log_file + ".log") + handler.setFormatter(log_format) + log.addHandler(handler) + +def add_stream_handler(log): + handler = logging.StreamHandler() + handler.setFormatter(log_format) + log.addHandler(handler) + +def get_logger(name, log_file=None): if name not in LOGGERS: LOGGERS[name] = logging.getLogger(name) + LOGGERS[name].setLevel(_LOG_LEVEL) + add_stream_handler(LOGGERS[name]) + if log_file is not None: + add_file_handler(LOGGERS[name], log_file) return LOGGERS[name] diff --git a/framework/run.py b/framework/run.py index ad7c038ee..d2643d956 100644 --- a/framework/run.py +++ b/framework/run.py @@ -1,5 +1,40 @@ -"""Starts Test Run.""" - -from testrun import TestRun - -testrun = TestRun() +"""Starts Test Run.""" + +import argparse +import sys +from testrun import TestRun +import logger + +LOGGER = logger.get_logger('runner') + +class TestRunner: + + def __init__(self, local_net=True): + + LOGGER.info('Starting Test Run') + + testrun = TestRun(local_net) + + testrun.load_config() + + testrun.start_network() + + testrun.run_tests() + + testrun.stop_network() + + +def run(argv): + parser = argparse.ArgumentParser(description="Test Run", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument("-r", "--remote-net", action="store_false", + help='''Use the network orchestrator from the parent directory instead + of the one downloaded locally from the install script.''') + + args, unknown = parser.parse_known_args() + + TestRunner(args.remote_net) + + +if __name__ == "__main__": + run(sys.argv) diff --git a/framework/testrun.py b/framework/testrun.py index 225bed853..22fa0295a 100644 --- a/framework/testrun.py +++ b/framework/testrun.py @@ -1,98 +1,119 @@ -"""The overall control of the Test Run application. - -This file provides the integration between all of the -Test Run components, such as net_orc, test_orc and test_ui. - -Run using the provided command scripts in the cmd folder. -E.g sudo cmd/start -""" - -import os -import sys -import json -import signal -import time -import logger - -# Locate parent directory -current_dir = os.path.dirname(os.path.realpath(__file__)) -parent_dir = os.path.dirname(current_dir) - -# Add net_orc to Python path -net_orc_dir = os.path.join(parent_dir, 'net_orc', 'python', 'src') -sys.path.append(net_orc_dir) - -import network_orchestrator as net_orc # pylint: disable=wrong-import-position - -LOGGER = logger.get_logger('test_run') -CONFIG_FILE = "conf/system.json" -EXAMPLE_CONFIG_FILE = "conf/system.json.example" -RUNTIME = 300 - -class TestRun: # pylint: disable=too-few-public-methods - """Test Run controller. - - Creates an instance of the network orchestrator, test - orchestrator and user interface. - """ - - def __init__(self): - LOGGER.info("Starting Test Run") - - # Catch any exit signals - self._register_exits() - - self._start_network() - - # Keep application running - time.sleep(RUNTIME) - - self._stop_network() - - def _register_exits(self): - signal.signal(signal.SIGINT, self._exit_handler) - signal.signal(signal.SIGTERM, self._exit_handler) - signal.signal(signal.SIGABRT, self._exit_handler) - signal.signal(signal.SIGQUIT, self._exit_handler) - - def _exit_handler(self, signum, arg): # pylint: disable=unused-argument - LOGGER.debug("Exit signal received: " + str(signum)) - if signum in (2, signal.SIGTERM): - LOGGER.info("Exit signal received.") - self._stop_network() - - def _load_config(self): - """Loads all settings from the config file into memory.""" - if not os.path.isfile(CONFIG_FILE): - LOGGER.error("Configuration file is not present at " + CONFIG_FILE) - LOGGER.info("An example is present in " + EXAMPLE_CONFIG_FILE) - sys.exit(1) - - with open(CONFIG_FILE, 'r', encoding='UTF-8') as config_file_open: - config_json = json.load(config_file_open) - self._net_orc.import_config(config_json) - - def _start_network(self): - # Create an instance of the network orchestrator - self._net_orc = net_orc.NetworkOrchestrator() - - # Load config file and pass to other components - self._load_config() - - # Load and build any unbuilt network containers - self._net_orc.load_network_modules() - self._net_orc.build_network_modules() - - # Create baseline network - self._net_orc.create_net() - - # Launch network service containers - self._net_orc.start_network_services() - - LOGGER.info("Network is ready.") - - def _stop_network(self): - LOGGER.info("Stopping Test Run") - self._net_orc.stop_networking_services(kill=True) - self._net_orc.restore_net() - sys.exit(0) +"""The overall control of the Test Run application. + +This file provides the integration between all of the +Test Run components, such as net_orc, test_orc and test_ui. + +Run using the provided command scripts in the cmd folder. +E.g sudo cmd/start +""" + +import os +import sys +import json +import signal +import logger + +# Locate parent directory +current_dir = os.path.dirname(os.path.realpath(__file__)) +parent_dir = os.path.dirname(current_dir) + +LOGGER = logger.get_logger('test_run') +CONFIG_FILE = "conf/system.json" +EXAMPLE_CONFIG_FILE = "conf/system.json.example" +RUNTIME = 300 + +class TestRun: # pylint: disable=too-few-public-methods + """Test Run controller. + + Creates an instance of the network orchestrator, test + orchestrator and user interface. + """ + + def __init__(self,local_net=True): + + # Catch any exit signals + self._register_exits() + + # Import the correct net orchestrator + self.import_orchestrators(local_net) + + self._net_orc = net_orc.NetworkOrchestrator() + self._test_orc = test_orc.TestOrchestrator() + + def import_orchestrators(self,local_net=True): + if local_net: + # Add local net_orc to Python path + net_orc_dir = os.path.join(parent_dir, 'net_orc', 'python', 'src') + else: + # Resolve the path to the test-run parent folder + root_dir = os.path.abspath(os.path.join(parent_dir, os.pardir)) + # Add manually cloned network orchestrator from parent folder + net_orc_dir = os.path.join(root_dir, 'network-orchestrator', 'python', 'src') + # Add net_orc to Python path + sys.path.append(net_orc_dir) + # Import the network orchestrator + global net_orc + import network_orchestrator as net_orc # pylint: disable=wrong-import-position,import-outside-toplevel + + # Add test_orc to Python path + test_orc_dir = os.path.join(parent_dir, 'test_orc', 'python', 'src') + sys.path.append(test_orc_dir) + global test_orc + import test_orchestrator as test_orc # pylint: disable=wrong-import-position,import-outside-toplevel + + def _register_exits(self): + signal.signal(signal.SIGINT, self._exit_handler) + signal.signal(signal.SIGTERM, self._exit_handler) + signal.signal(signal.SIGABRT, self._exit_handler) + signal.signal(signal.SIGQUIT, self._exit_handler) + + def _exit_handler(self, signum, arg): # pylint: disable=unused-argument + LOGGER.debug("Exit signal received: " + str(signum)) + if signum in (2, signal.SIGTERM): + LOGGER.info("Exit signal received.") + self.stop_network() + + def load_config(self): + """Loads all settings from the config file into memory.""" + if not os.path.isfile(CONFIG_FILE): + LOGGER.error("Configuration file is not present at " + CONFIG_FILE) + LOGGER.info("An example is present in " + EXAMPLE_CONFIG_FILE) + sys.exit(1) + + with open(CONFIG_FILE, 'r', encoding='UTF-8') as config_file_open: + config_json = json.load(config_file_open) + self._net_orc.import_config(config_json) + self._test_orc.import_config(config_json) + + def start_network(self): + """Starts the network orchestrator and network services.""" + + # Load and build any unbuilt network containers + self._net_orc.load_network_modules() + self._net_orc.build_network_modules() + + self._net_orc.stop_networking_services(kill=True) + self._net_orc.restore_net() + + # Create baseline network + self._net_orc.create_net() + + # Launch network service containers + self._net_orc.start_network_services() + + LOGGER.info("Network is ready.") + + def run_tests(self): + """Iterate through and start all test modules.""" + + self._test_orc.load_test_modules() + self._test_orc.build_test_modules() + + # Begin testing + self._test_orc.run_test_modules() + + def stop_network(self): + """Commands the net_orc to stop the network and clean up.""" + self._net_orc.stop_networking_services(kill=True) + self._net_orc.restore_net() + sys.exit(0) diff --git a/test_orc/modules/base/base.Dockerfile b/test_orc/modules/base/base.Dockerfile new file mode 100644 index 000000000..b5f35326a --- /dev/null +++ b/test_orc/modules/base/base.Dockerfile @@ -0,0 +1,23 @@ +# Image name: test-run/base-test +FROM ubuntu:jammy + +# Install common software +RUN apt-get update && apt-get install -y net-tools iputils-ping tcpdump iproute2 jq python3 python3-pip dos2unix + +# Setup the base python requirements +COPY modules/base/python /testrun/python + +# Install all python requirements for the module +RUN pip3 install -r /testrun/python/requirements.txt + +# Add the bin files +COPY modules/base/bin /testrun/bin + +# Remove incorrect line endings +RUN dos2unix /testrun/bin/* + +# Make sure all the bin files are executable +RUN chmod u+x /testrun/bin/* + +# Start the test module +ENTRYPOINT [ "/testrun/bin/start_module" ] \ No newline at end of file diff --git a/test_orc/modules/base/bin/capture b/test_orc/modules/base/bin/capture new file mode 100644 index 000000000..dccafb0c5 --- /dev/null +++ b/test_orc/modules/base/bin/capture @@ -0,0 +1,20 @@ +#!/bin/bash -e + +# Fetch module name +MODULE_NAME=$1 + +# Define the local file location for the capture to be saved +PCAP_DIR="/runtime/output/" +PCAP_FILE=$MODULE_NAME.pcap + +# Allow a user to define an interface by passing it into this script +INTERFACE=$2 + +# Create the output directory and start the capture +mkdir -p $PCAP_DIR +chown $HOST_USER:$HOST_USER $PCAP_DIR +echo "PCAP Dir: $PCAP_DIR/$PCAP_FILE" +tcpdump -i $INTERFACE -w $PCAP_DIR/$PCAP_FILE -Z $HOST_USER & + +# Small pause to let the capture to start +sleep 1 \ No newline at end of file diff --git a/test_orc/modules/base/bin/setup_binaries b/test_orc/modules/base/bin/setup_binaries new file mode 100644 index 000000000..3535ead3c --- /dev/null +++ b/test_orc/modules/base/bin/setup_binaries @@ -0,0 +1,10 @@ +#!/bin/bash -e + +# Directory where all binaries will be loaded +BIN_DIR=$1 + +# Remove incorrect line endings +dos2unix $BIN_DIR/* + +# Make sure all the bin files are executable +chmod u+x $BIN_DIR/* \ No newline at end of file diff --git a/test_orc/modules/base/bin/start_grpc b/test_orc/modules/base/bin/start_grpc new file mode 100644 index 000000000..917381e89 --- /dev/null +++ b/test_orc/modules/base/bin/start_grpc @@ -0,0 +1,17 @@ +#!/bin/bash -e + +GRPC_DIR="/testrun/python/src/grpc" +GRPC_PROTO_DIR="proto" +GRPC_PROTO_FILE="grpc.proto" + +# Move into the grpc directory +pushd $GRPC_DIR >/dev/null 2>&1 + +# Build the grpc proto file every time before starting server +python3 -m grpc_tools.protoc --proto_path=. ./$GRPC_PROTO_DIR/$GRPC_PROTO_FILE --python_out=. --grpc_python_out=. + +popd >/dev/null 2>&1 + +# Start the grpc server +python3 -u $GRPC_DIR/start_server.py $@ + diff --git a/test_orc/modules/base/bin/start_module b/test_orc/modules/base/bin/start_module new file mode 100644 index 000000000..a9f5402f4 --- /dev/null +++ b/test_orc/modules/base/bin/start_module @@ -0,0 +1,76 @@ +#!/bin/bash + +# Directory where all binaries will be loaded +BIN_DIR="/testrun/bin" + +# Default interface should be veth0 for all containers +DEFAULT_IFACE=veth0 + +# Create a local user that matches the same as the host +# to be used for correct file ownership for various logs +# HOST_USER mapped in via docker container environemnt variables +useradd $HOST_USER + +# Enable IPv6 for all containers +sysctl net.ipv6.conf.all.disable_ipv6=0 +sysctl -p + +# Read in the config file +CONF_FILE="/testrun/conf/module_config.json" +CONF=`cat $CONF_FILE` + +if [[ -z $CONF ]] +then + echo "No config file present at $CONF_FILE. Exiting startup." + exit 1 +fi + +# Extract the necessary config parameters +MODULE_NAME=$(echo "$CONF" | jq -r '.config.meta.name') +DEFINED_IFACE=$(echo "$CONF" | jq -r '.config.network.interface') +GRPC=$(echo "$CONF" | jq -r '.config.grpc') + +# Validate the module name is present +if [[ -z "$MODULE_NAME" || "$MODULE_NAME" == "null" ]] +then + echo "No module name present in $CONF_FILE. Exiting startup." + exit 1 +fi + +# Select which interace to use +if [[ -z $DEFINED_IFACE || "$DEFINED_IFACE" == "null" ]] +then + echo "No Interface Defined, defaulting to veth0" + INTF=$DEFAULT_IFACE +else + INTF=$DEFINED_IFACE +fi + +echo "Starting module $MODULE_NAME..." + +$BIN_DIR/setup_binaries $BIN_DIR + +# Wait for interface to become ready +$BIN_DIR/wait_for_interface $INTF + +# Start network capture +$BIN_DIR/capture $MODULE_NAME $INTF + +# Start the grpc server +if [[ ! -z $GRPC && ! $GRPC == "null" ]] +then + GRPC_PORT=$(echo "$GRPC" | jq -r '.port') + if [[ ! -z $GRPC_PORT && ! $GRPC_PORT == "null" ]] + then + echo "gRPC port resolved from config: $GRPC_PORT" + $BIN_DIR/start_grpc "-p $GRPC_PORT" & + else + $BIN_DIR/start_grpc & + fi +fi + +# Small pause to let all core services stabalize +sleep 3 + +# Start the networking service +$BIN_DIR/start_test_module $MODULE_NAME $INTF \ No newline at end of file diff --git a/test_orc/modules/base/bin/wait_for_interface b/test_orc/modules/base/bin/wait_for_interface new file mode 100644 index 000000000..c9c1682f0 --- /dev/null +++ b/test_orc/modules/base/bin/wait_for_interface @@ -0,0 +1,10 @@ +#!/bin/bash + +# Allow a user to define an interface by passing it into this script +INTF=$1 + +# Wait for local interface to be ready +while ! ip link show $INTF; do + echo $INTF is not yet ready. Waiting 3 seconds + sleep 3 +done \ No newline at end of file diff --git a/test_orc/modules/base/conf/module_config.json b/test_orc/modules/base/conf/module_config.json new file mode 100644 index 000000000..1f3a47ba2 --- /dev/null +++ b/test_orc/modules/base/conf/module_config.json @@ -0,0 +1,12 @@ +{ + "config": { + "meta": { + "name": "base", + "display_name": "Base", + "description": "Base image" + }, + "docker": { + "enable_container": false + } + } +} \ No newline at end of file diff --git a/test_orc/modules/base/python/requirements.txt b/test_orc/modules/base/python/requirements.txt new file mode 100644 index 000000000..9c4e2b056 --- /dev/null +++ b/test_orc/modules/base/python/requirements.txt @@ -0,0 +1,2 @@ +grpcio +grpcio-tools \ No newline at end of file diff --git a/test_orc/modules/base/python/src/grpc/start_server.py b/test_orc/modules/base/python/src/grpc/start_server.py new file mode 100644 index 000000000..9ed31ffcf --- /dev/null +++ b/test_orc/modules/base/python/src/grpc/start_server.py @@ -0,0 +1,34 @@ +from concurrent import futures +import grpc +import proto.grpc_pb2_grpc as pb2_grpc +import proto.grpc_pb2 as pb2 +from network_service import NetworkService +import logging +import sys +import argparse + +DEFAULT_PORT = '5001' + +def serve(PORT): + server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) + pb2_grpc.add_NetworkModuleServicer_to_server(NetworkService(), server) + server.add_insecure_port('[::]:' + PORT) + server.start() + server.wait_for_termination() + +def run(argv): + parser = argparse.ArgumentParser(description="GRPC Server for Network Module", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument("-p", "--port", default=DEFAULT_PORT, + help="Define the default port to run the server on.") + + args = parser.parse_args() + + PORT = args.port + + print("gRPC server starting on port " + PORT) + serve(PORT) + + +if __name__ == "__main__": + run(sys.argv) \ No newline at end of file diff --git a/test_orc/modules/base/python/src/logger.py b/test_orc/modules/base/python/src/logger.py new file mode 100644 index 000000000..0eb7b9ccf --- /dev/null +++ b/test_orc/modules/base/python/src/logger.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python3 + +import json +import logging +import os + +LOGGERS = {} +_LOG_FORMAT = "%(asctime)s %(name)-8s %(levelname)-7s %(message)s" +_DATE_FORMAT = '%b %02d %H:%M:%S' +_DEFAULT_LEVEL = logging.INFO +_CONF_DIR = "conf" +_CONF_FILE_NAME = "system.json" +_LOG_DIR = "/runtime/network/" + +# Set log level +try: + system_conf_json = json.load( + open(os.path.join(_CONF_DIR, _CONF_FILE_NAME), encoding='utf-8')) + log_level_str = system_conf_json['log_level'] + log_level = logging.getLevelName(log_level_str) +except: + # TODO: Print out warning that log level is incorrect or missing + log_level = _DEFAULT_LEVEL + +log_format = logging.Formatter(fmt=_LOG_FORMAT, datefmt=_DATE_FORMAT) + + +def add_file_handler(log, log_file): + handler = logging.FileHandler(_LOG_DIR+log_file+".log") + handler.setFormatter(log_format) + log.addHandler(handler) + +def add_stream_handler(log): + handler = logging.StreamHandler() + handler.setFormatter(log_format) + log.addHandler(handler) + +def get_logger(name, log_file=None): + if name not in LOGGERS: + LOGGERS[name] = logging.getLogger(name) + LOGGERS[name].setLevel(log_level) + add_stream_handler(LOGGERS[name]) + if log_file is not None: + add_file_handler(LOGGERS[name], log_file) + return LOGGERS[name] diff --git a/test_orc/modules/baseline/baseline.Dockerfile b/test_orc/modules/baseline/baseline.Dockerfile new file mode 100644 index 000000000..5b634e6ee --- /dev/null +++ b/test_orc/modules/baseline/baseline.Dockerfile @@ -0,0 +1,11 @@ +# Image name: test-run/baseline-test +FROM test-run/base-test:latest + +# Copy over all configuration files +COPY modules/baseline/conf /testrun/conf + +# Load device binary files +COPY modules/baseline/bin /testrun/bin + +# Copy over all python files +COPY modules/baseline/python /testrun/python \ No newline at end of file diff --git a/test_orc/modules/baseline/bin/start_test_module b/test_orc/modules/baseline/bin/start_test_module new file mode 100644 index 000000000..292b57de2 --- /dev/null +++ b/test_orc/modules/baseline/bin/start_test_module @@ -0,0 +1,40 @@ +#!/bin/bash + +# An example startup script that does the bare minimum to start +# a test module via a pyhon script. Each test module should include a +# start_test_module file that overwrites this one to boot all of its +# specific requirements to run. + +# Define where the python source files are located +PYTHON_SRC_DIR=/testrun/python/src + +# Fetch module name +MODULE_NAME=$1 + +# Default interface should be veth0 for all containers +DEFAULT_IFACE=veth0 + +# Allow a user to define an interface by passing it into this script +DEFINED_IFACE=$2 + +# Select which interace to use +if [[ -z $DEFINED_IFACE || "$DEFINED_IFACE" == "null" ]] +then + echo "No interface defined, defaulting to veth0" + INTF=$DEFAULT_IFACE +else + INTF=$DEFINED_IFACE +fi + +# Create and set permissions on the log files +LOG_FILE=/runtime/output/$MODULE_NAME.log +RESULT_FILE=/runtime/output/$MODULE_NAME-result.json +touch $LOG_FILE +touch $RESULT_FILE +chown $HOST_USER:$HOST_USER $LOG_FILE +chown $HOST_USER:$HOST_USER $RESULT_FILE + +# Run the python scrip that will execute the tests for this module +# -u flag allows python print statements +# to be logged by docker by running unbuffered +python3 -u $PYTHON_SRC_DIR/run.py "-m $MODULE_NAME" \ No newline at end of file diff --git a/test_orc/modules/baseline/conf/module_config.json b/test_orc/modules/baseline/conf/module_config.json new file mode 100644 index 000000000..1b8b7b9ba --- /dev/null +++ b/test_orc/modules/baseline/conf/module_config.json @@ -0,0 +1,21 @@ +{ + "config": { + "meta": { + "name": "baseline", + "display_name": "Baseline", + "description": "Baseline test" + }, + "network": { + "interface": "eth0", + "enable_wan": false, + "ip_index": 9 + }, + "grpc": { + "port": 50001 + }, + "docker": { + "enable_container": true, + "timeout": 30 + } + } +} \ No newline at end of file diff --git a/test_orc/modules/baseline/python/src/logger.py b/test_orc/modules/baseline/python/src/logger.py new file mode 100644 index 000000000..641aa16b4 --- /dev/null +++ b/test_orc/modules/baseline/python/src/logger.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python3 + +import json +import logging +import os + +LOGGERS = {} +_LOG_FORMAT = "%(asctime)s %(name)-8s %(levelname)-7s %(message)s" +_DATE_FORMAT = '%b %02d %H:%M:%S' +_DEFAULT_LEVEL = logging.INFO +_CONF_DIR = "conf" +_CONF_FILE_NAME = "system.json" +_LOG_DIR = "/runtime/output/" + +# Set log level +try: + system_conf_json = json.load( + open(os.path.join(_CONF_DIR, _CONF_FILE_NAME))) + log_level_str = system_conf_json['log_level'] + log_level = logging.getLevelName(log_level_str) +except: + # TODO: Print out warning that log level is incorrect or missing + log_level = _DEFAULT_LEVEL + +log_format = logging.Formatter(fmt=_LOG_FORMAT, datefmt=_DATE_FORMAT) + +def add_file_handler(log, logFile): + handler = logging.FileHandler(_LOG_DIR+logFile+".log") + handler.setFormatter(log_format) + log.addHandler(handler) + + +def add_stream_handler(log): + handler = logging.StreamHandler() + handler.setFormatter(log_format) + log.addHandler(handler) + + +def get_logger(name, logFile=None): + if name not in LOGGERS: + LOGGERS[name] = logging.getLogger(name) + LOGGERS[name].setLevel(log_level) + add_stream_handler(LOGGERS[name]) + if logFile is not None: + add_file_handler(LOGGERS[name], logFile) + return LOGGERS[name] diff --git a/test_orc/modules/baseline/python/src/run.py b/test_orc/modules/baseline/python/src/run.py new file mode 100644 index 000000000..7ff11559f --- /dev/null +++ b/test_orc/modules/baseline/python/src/run.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 + +import argparse +import signal +import sys +import logger + +from test_module import TestModule + +LOGGER = logger.get_logger('test_module') +RUNTIME = 300 + +class TestModuleRunner: + + def __init__(self,module): + + signal.signal(signal.SIGINT, self._handler) + signal.signal(signal.SIGTERM, self._handler) + signal.signal(signal.SIGABRT, self._handler) + signal.signal(signal.SIGQUIT, self._handler) + + LOGGER.info("Starting Test Module Template") + + self._test_module = TestModule(module) + self._test_module.run_tests() + self._test_module.generate_results() + + def _handler(self, signum, *other): + LOGGER.debug("SigtermEnum: " + str(signal.SIGTERM)) + LOGGER.debug("Exit signal received: " + str(signum)) + if signum in (2, signal.SIGTERM): + LOGGER.info("Exit signal received. Stopping test module...") + LOGGER.info("Test module stopped") + sys.exit(1) + +def run(argv): + parser = argparse.ArgumentParser(description="Test Module Template", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + parser.add_argument( + "-m", "--module", help="Define the module name to be used to create the log file") + + args = parser.parse_args() + + # For some reason passing in the args from bash adds an extra + # space before the argument so we'll just strip out extra space + TestModuleRunner(args.module.strip()) + +if __name__ == "__main__": + run(sys.argv) diff --git a/test_orc/modules/baseline/python/src/test_module.py b/test_orc/modules/baseline/python/src/test_module.py new file mode 100644 index 000000000..440b87f7f --- /dev/null +++ b/test_orc/modules/baseline/python/src/test_module.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python3 + +import json +import time +import logger + +LOG_NAME = "test_baseline" +RESULTS_DIR = "/runtime/output/" +LOGGER = logger.get_logger(LOG_NAME) + +class TestModule: + + def __init__(self, module): + + self.module_test1 = None + self.module_test2 = None + self.module_test3 = None + self.module = module + self.add_logger(module) + + def add_logger(self, module): + global LOGGER + LOGGER = logger.get_logger(LOG_NAME, module) + + # Make up some fake test results + def run_tests(self): + LOGGER.info("Running test 1...") + self.module_test1 = True + LOGGER.info("Test 1 complete.") + + LOGGER.info("Running test 2...") + self.module_test2 = False + LOGGER.info("Test 2 complete.") + + time.sleep(10) + + def generate_results(self): + results = [] + results.append(self.generate_result("Test 1", self.module_test1)) + results.append(self.generate_result("Test 2", self.module_test2)) + results.append(self.generate_result("Test 3", self.module_test3)) + json_results = json.dumps({"results":results}, indent=2) + self.write_results(json_results) + + def write_results(self,results): + results_file=RESULTS_DIR+self.module+"-result.json" + LOGGER.info("Writing results to " + results_file) + f = open(results_file, "w", encoding="utf-8") + f.write(results) + f.close() + + def generate_result(self, test_name, test_result): + if test_result is not None: + result = "compliant" if test_result else "non-compliant" + else: + result = "skipped" + LOGGER.info(test_name + ": " + result) + res_dict = { + "name": test_name, + "result": result, + "description": "The device is " + result + } + return res_dict diff --git a/test_orc/python/requirements.txt b/test_orc/python/requirements.txt new file mode 100644 index 000000000..e69de29bb diff --git a/test_orc/python/src/test_orchestrator.py b/test_orc/python/src/test_orchestrator.py new file mode 100644 index 000000000..396f533fa --- /dev/null +++ b/test_orc/python/src/test_orchestrator.py @@ -0,0 +1,221 @@ +"""Provides high level management of the test orchestrator.""" +import os +import json +import time +import shutil +import docker +from docker.types import Mount +import logger + +LOG_NAME = "test_orc" +LOGGER = logger.get_logger('test_orc') +RUNTIME_DIR = "runtime" +TEST_MODULES_DIR = "modules" +MODULE_CONFIG = "conf/module_config.json" + +class TestOrchestrator: + """Manages and controls the test modules.""" + + def __init__(self): + self._test_modules = [] + self._module_config = None + + self._path = os.path.dirname(os.path.dirname( + os.path.dirname(os.path.realpath(__file__)))) + + # Resolve the path to the test-run folder + self._root_path = os.path.abspath(os.path.join(self._path, os.pardir)) + + shutil.rmtree(os.path.join(self._root_path, RUNTIME_DIR), ignore_errors=True) + os.makedirs(os.path.join(self._root_path, RUNTIME_DIR), exist_ok=True) + + def import_config(self, json_config): + """Load settings from JSON object into memory.""" + + # No relevant config options in system.json as of yet + + def get_test_module(self, name): + """Returns a test module by the module name.""" + for module in self._test_modules: + if name == module.name: + return module + return None + + def run_test_modules(self): + """Iterates through each test module and starts the container.""" + LOGGER.info("Running test modules...") + for module in self._test_modules: + self.run_test_module(module) + LOGGER.info("All tests complete") + + def run_test_module(self, module): + """Start the test container and extract the results.""" + + if module is None or not module.enable_container: + return + + LOGGER.info("Running test module " + module.display_name) + try: + + container_runtime_dir = os.path.join(self._root_path, "runtime/test/" + module.name) + os.makedirs(container_runtime_dir) + + client = docker.from_env() + + module.container = client.containers.run( + module.image_name, + auto_remove=True, + cap_add=["NET_ADMIN"], + name=module.container_name, + hostname=module.container_name, + privileged=True, + detach=True, + mounts=[Mount( + target="/runtime/output", + source=container_runtime_dir, + type='bind' + )], + environment={"HOST_USER": os.getlogin()} + ) + except (docker.errors.APIError, docker.errors.ContainerError) as container_error: + LOGGER.error("Test module " + module.display_name + " has failed to start") + LOGGER.debug(container_error) + return + + # Determine the module timeout time + test_module_timeout = time.time() + module.timeout + status = self._get_module_status(module) + + while time.time() < test_module_timeout and status == 'running': + time.sleep(1) + status = self._get_module_status(module) + + LOGGER.info("Test module " + module.display_name + " has finished") + + def _get_module_status(self,module): + container = self._get_module_container(module) + if container is not None: + return container.status + return None + + def _get_module_container(self, module): + container = None + try: + client = docker.from_env() + container = client.containers.get(module.container_name) + except docker.errors.NotFound: + LOGGER.debug("Container " + + module.container_name + " not found") + except docker.errors.APIError as error: + LOGGER.error("Failed to resolve container") + LOGGER.error(error) + return container + + def load_test_modules(self): + """Import module configuration from module_config.json.""" + + modules_dir = os.path.join(self._path, TEST_MODULES_DIR) + + LOGGER.debug("Loading test modules from /" + modules_dir) + loaded_modules = "Loaded the following test modules: " + + for module_dir in os.listdir(modules_dir): + + LOGGER.debug("Loading module from: " + module_dir) + + # Load basic module information + module = TestModule() + with open(os.path.join( + self._path, + modules_dir, + module_dir, + MODULE_CONFIG), + encoding='UTF-8') as module_config_file: + module_json = json.load(module_config_file) + + module.name = module_json['config']['meta']['name'] + module.display_name = module_json['config']['meta']['display_name'] + module.description = module_json['config']['meta']['description'] + module.dir = os.path.join(self._path, modules_dir, module_dir) + module.dir_name = module_dir + module.build_file = module_dir + ".Dockerfile" + module.container_name = "tr-ct-" + module.dir_name + "-test" + module.image_name = "test-run/" + module.dir_name + "-test" + + if 'timeout' in module_json['config']['docker']: + module.timeout = module_json['config']['docker']['timeout'] + + # Determine if this is a container or just an image/template + if "enable_container" in module_json['config']['docker']: + module.enable_container = module_json['config']['docker']['enable_container'] + + self._test_modules.append(module) + + loaded_modules += module.dir_name + " " + + LOGGER.info(loaded_modules) + + def build_test_modules(self): + """Build all test modules.""" + LOGGER.info("Building test modules...") + for module in self._test_modules: + self._build_test_module(module) + + def _build_test_module(self, module): + LOGGER.debug("Building docker image for module " + module.dir_name) + client = docker.from_env() + try: + client.images.build( + dockerfile=os.path.join(module.dir, module.build_file), + path=self._path, + forcerm=True, # Cleans up intermediate containers during build + tag=module.image_name + ) + except docker.errors.BuildError as error: + LOGGER.error(error) + + def _stop_modules(self, kill=False): + LOGGER.debug("Stopping test modules") + for module in self._test_modules: + # Test modules may just be Docker images, so we do not want to stop them + if not module.enable_container: + continue + self._stop_module(module, kill) + + def _stop_module(self, module, kill=False): + LOGGER.debug("Stopping test module " + module.container_name) + try: + container = module.container + if container is not None: + if kill: + LOGGER.debug("Killing container:" + + module.container_name) + container.kill() + else: + LOGGER.debug("Stopping container:" + + module.container_name) + container.stop() + LOGGER.debug("Container stopped:" + module.container_name) + except Exception as error: + LOGGER.error("Container stop error") + LOGGER.error(error) + +class TestModule: # pylint: disable=too-few-public-methods,too-many-instance-attributes + """Represents a test module.""" + + def __init__(self): + self.name = None + self.display_name = None + self.description = None + + self.build_file = None + self.container = None + self.container_name = None + self.image_name = None + self.enable_container = True + + self.timeout = 60 + + # Absolute path + self.dir = None + self.dir_name = None From 6f3a7fedd198d584fd217579dda66f30d02fad1b Mon Sep 17 00:00:00 2001 From: J Boddey Date: Wed, 26 Apr 2023 11:29:55 +0100 Subject: [PATCH 02/48] Add issue report templates (#7) * Add issue templates * Update README.md --- .github/ISSUE_TEMPLATE/bug_report.md | 32 +++++++++++++++++++++++ .github/ISSUE_TEMPLATE/feature_request.md | 17 ++++++++++++ README.md | 3 +++ 3 files changed, 52 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/bug_report.md create mode 100644 .github/ISSUE_TEMPLATE/feature_request.md diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 000000000..852476aeb --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,32 @@ +--- +name: Bug report +about: Create a report to help us identify and resolve bugs +title: '' +labels: bug +assignees: '' + +--- + +**Describe the bug** +A clear and concise description of what the bug is. + +**To Reproduce** +Steps to reproduce the behavior: +1. Go to '...' +2. Click on '....' +3. Scroll down to '....' +4. See error + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Error logs** +If applicable, provide a log from https://gist.github.com/ + +**Environment (please provide the following information about your setup):** + - OS: [e.g. Ubuntu] + - Version [e.g. 22.04] + - Additional hardware (network adapters) + +**Additional context** +Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 000000000..9fd0ca896 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,17 @@ +--- +name: Feature request +about: Suggest a new feature or change request +title: '' +labels: request +assignees: '' + +--- + +**What is the problem your feature is trying to solve?** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you think would solve the problem** +A clear and concise description of what you want to happen. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/README.md b/README.md index b374bdbf5..41c559499 100644 --- a/README.md +++ b/README.md @@ -37,6 +37,9 @@ Test Run cannot automate everything, and so additional manual testing may be req ## Roadmap :chart_with_upwards_trend: Test Run will constantly evolve to further support end-users by automating device network behaviour against industry standards. +## Issue reporting :triangular_flag_on_post: +If the application has come across a problem at any point during setup or use, please raise an issue under the [issues tab](https://github.com/auto-iot/test-run/issues). Issue templates exist for both bug reports and feature requests. If neither of these are appropriate for your issue, raise a blank issue instead. + ## Contributing :keyboard: The contributing requirements can be found in [CONTRIBUTING.md](CONTRIBUTING.md). In short, checkout the [Google CLA](https://cla.developers.google.com/) site to get started. From e05c383fe65b5468d58bb6ae4b8747319c9635c8 Mon Sep 17 00:00:00 2001 From: J Boddey Date: Wed, 26 Apr 2023 12:13:34 +0100 Subject: [PATCH 03/48] Discover devices on the network (#5) --- .pylintrc | 429 ++++++++++++++++++ etc/requirements.txt | 3 +- framework/device.py | 10 + framework/run.py | 2 +- framework/testrun.py | 255 +++++++---- .../Teltonika TRB140/device_config.json | 5 + 6 files changed, 605 insertions(+), 99 deletions(-) create mode 100644 .pylintrc create mode 100644 framework/device.py create mode 100644 local/devices/Teltonika TRB140/device_config.json diff --git a/.pylintrc b/.pylintrc new file mode 100644 index 000000000..4e89b0c10 --- /dev/null +++ b/.pylintrc @@ -0,0 +1,429 @@ +# This Pylint rcfile contains a best-effort configuration to uphold the +# best-practices and style described in the Google Python style guide: +# https://google.github.io/styleguide/pyguide.html +# +# Its canonical open-source location is: +# https://google.github.io/styleguide/pylintrc + +[MASTER] + +# Files or directories to be skipped. They should be base names, not paths. +ignore=third_party + +# Files or directories matching the regex patterns are skipped. The regex +# matches against base names, not paths. +ignore-patterns= + +# Pickle collected data for later comparisons. +persistent=no + +# List of plugins (as comma separated values of python modules names) to load, +# usually to register additional checkers. +load-plugins= + +# Use multiple processes to speed up Pylint. +jobs=4 + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED +confidence= + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +#enable= + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once).You can also use "--disable=all" to +# disable everything first and then reenable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use"--disable=all --enable=classes +# --disable=W" +disable=abstract-method, + apply-builtin, + arguments-differ, + attribute-defined-outside-init, + backtick, + bad-option-value, + basestring-builtin, + buffer-builtin, + c-extension-no-member, + consider-using-enumerate, + cmp-builtin, + cmp-method, + coerce-builtin, + coerce-method, + delslice-method, + div-method, + duplicate-code, + eq-without-hash, + execfile-builtin, + file-builtin, + filter-builtin-not-iterating, + fixme, + getslice-method, + global-statement, + hex-method, + idiv-method, + implicit-str-concat, + import-error, + import-self, + import-star-module-level, + inconsistent-return-statements, + input-builtin, + intern-builtin, + invalid-str-codec, + locally-disabled, + long-builtin, + long-suffix, + map-builtin-not-iterating, + misplaced-comparison-constant, + missing-function-docstring, + metaclass-assignment, + next-method-called, + next-method-defined, + no-absolute-import, + no-else-break, + no-else-continue, + no-else-raise, + no-else-return, + no-init, # added + no-member, + no-name-in-module, + no-self-use, + nonzero-method, + oct-method, + old-division, + old-ne-operator, + old-octal-literal, + old-raise-syntax, + parameter-unpacking, + print-statement, + raising-string, + range-builtin-not-iterating, + raw_input-builtin, + rdiv-method, + reduce-builtin, + relative-import, + reload-builtin, + round-builtin, + setslice-method, + signature-differs, + standarderror-builtin, + suppressed-message, + sys-max-int, + too-few-public-methods, + too-many-ancestors, + too-many-arguments, + too-many-boolean-expressions, + too-many-branches, + too-many-instance-attributes, + too-many-locals, + too-many-nested-blocks, + too-many-public-methods, + too-many-return-statements, + too-many-statements, + trailing-newlines, + unichr-builtin, + unicode-builtin, + unnecessary-pass, + unpacking-in-except, + useless-else-on-loop, + useless-object-inheritance, + useless-suppression, + using-cmp-argument, + wrong-import-order, + xrange-builtin, + zip-builtin-not-iterating, + + +[REPORTS] + +# Set the output format. Available formats are text, parseable, colorized, msvs +# (visual studio) and html. You can also give a reporter class, eg +# mypackage.mymodule.MyReporterClass. +output-format=text + +# Tells whether to display a full report or only the messages +reports=no + +# Python expression which should return a note less than 10 (10 is the highest +# note). You have access to the variables errors warning, statement which +# respectively contain the number of errors / warnings messages and the total +# number of statements analyzed. This is used by the global evaluation report +# (RP0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details +#msg-template= + + +[BASIC] + +# Good variable names which should always be accepted, separated by a comma +good-names=main,_ + +# Bad variable names which should always be refused, separated by a comma +bad-names= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Include a hint for the correct naming format with invalid-name +include-naming-hint=no + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +property-classes=abc.abstractproperty,cached_property.cached_property,cached_property.threaded_cached_property,cached_property.cached_property_with_ttl,cached_property.threaded_cached_property_with_ttl + +# Regular expression matching correct function names +function-rgx=^(?:(?PsetUp|tearDown|setUpModule|tearDownModule)|(?P_?[A-Z][a-zA-Z0-9]*)|(?P_?[a-z][a-z0-9_]*))$ + +# Regular expression matching correct variable names +variable-rgx=^[a-z][a-z0-9_]*$ + +# Regular expression matching correct constant names +const-rgx=^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$ + +# Regular expression matching correct attribute names +attr-rgx=^_{0,2}[a-z][a-z0-9_]*$ + +# Regular expression matching correct argument names +argument-rgx=^[a-z][a-z0-9_]*$ + +# Regular expression matching correct class attribute names +class-attribute-rgx=^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$ + +# Regular expression matching correct inline iteration names +inlinevar-rgx=^[a-z][a-z0-9_]*$ + +# Regular expression matching correct class names +class-rgx=^_?[A-Z][a-zA-Z0-9]*$ + +# Regular expression matching correct module names +module-rgx=^(_?[a-z][a-z0-9_]*|__init__)$ + +# Regular expression matching correct method names +method-rgx=(?x)^(?:(?P_[a-z0-9_]+__|runTest|setUp|tearDown|setUpTestCase|tearDownTestCase|setupSelf|tearDownClass|setUpClass|(test|assert)_*[A-Z0-9][a-zA-Z0-9_]*|next)|(?P_{0,2}[A-Z][a-zA-Z0-9_]*)|(?P_{0,2}[a-z][a-z0-9_]*))$ + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=(__.*__|main|test.*|.*test|.*Test)$ + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=10 + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager,contextlib2.contextmanager + +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis. It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules= + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + + +[FORMAT] + +# Maximum number of characters on a single line. +max-line-length=80 + +# TODO(https://github.com/PyCQA/pylint/issues/3352): Direct pylint to exempt +# lines made too long by directives to pytype. + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=(?x)( + ^\s*(\#\ )??$| + ^\s*(from\s+\S+\s+)?import\s+.+$) + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=yes + +# Maximum number of lines in a module +max-module-lines=99999 + +# String used as indentation unit. The internal Google style guide mandates 2 +# spaces. Google's externaly-published style guide says 4, consistent with +# PEP 8. Here, we use 2 spaces, for conformity with many open-sourced Google +# projects (like TensorFlow). +indent-string=' ' + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=TODO + + +[STRING] + +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=yes + + +[VARIABLES] + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# A regular expression matching the name of dummy variables (i.e. expectedly +# not used). +dummy-variables-rgx=^\*{0,2}(_$|unused_|dummy_) + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid to define new builtins when possible. +additional-builtins= + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_,_cb + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six,six.moves,past.builtins,future.builtins,functools + + +[LOGGING] + +# Logging modules to check that the string format arguments are in logging +# function parameter format +logging-modules=logging,absl.logging,tensorflow.io.logging + + +[SIMILARITIES] + +# Minimum lines number of a similarity. +min-similarity-lines=4 + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + +# Ignore imports when computing similarities. +ignore-imports=no + + +[SPELLING] + +# Spelling dictionary name. Available dictionaries: none. To make it working +# install python-enchant package. +spelling-dict= + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to indicated private dictionary in +# --spelling-private-dict-file option instead of raising a message. +spelling-store-unknown-words=no + + +[IMPORTS] + +# Deprecated modules which should not be used, separated by a comma +deprecated-modules=regsub, + TERMIOS, + Bastion, + rexec, + sets + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report RP0402 must not be disabled) +import-graph= + +# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled) +ext-import-graph= + +# Create a graph of internal dependencies in the given file (report RP0402 must +# not be disabled) +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant, absl + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + + +[CLASSES] + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict, + _fields, + _replace, + _source, + _make + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls, + class_ + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when being caught. Defaults to +# "Exception" +overgeneral-exceptions=StandardError, + Exception, + BaseException \ No newline at end of file diff --git a/etc/requirements.txt b/etc/requirements.txt index 56b8f0f66..979b408bd 100644 --- a/etc/requirements.txt +++ b/etc/requirements.txt @@ -1 +1,2 @@ -netifaces \ No newline at end of file +netifaces +scapy \ No newline at end of file diff --git a/framework/device.py b/framework/device.py new file mode 100644 index 000000000..08014c127 --- /dev/null +++ b/framework/device.py @@ -0,0 +1,10 @@ +"""Track device object information.""" +from dataclasses import dataclass + +@dataclass +class Device: + """Represents a physical device and it's configuration.""" + + make: str + model: str + mac_addr: str diff --git a/framework/run.py b/framework/run.py index d2643d956..fc6c197e3 100644 --- a/framework/run.py +++ b/framework/run.py @@ -17,7 +17,7 @@ def __init__(self, local_net=True): testrun.load_config() - testrun.start_network() + testrun.start() testrun.run_tests() diff --git a/framework/testrun.py b/framework/testrun.py index 22fa0295a..372a64692 100644 --- a/framework/testrun.py +++ b/framework/testrun.py @@ -12,108 +12,169 @@ import json import signal import logger +from device import Device # Locate parent directory current_dir = os.path.dirname(os.path.realpath(__file__)) parent_dir = os.path.dirname(current_dir) LOGGER = logger.get_logger('test_run') -CONFIG_FILE = "conf/system.json" -EXAMPLE_CONFIG_FILE = "conf/system.json.example" +CONFIG_FILE = 'conf/system.json' +EXAMPLE_CONFIG_FILE = 'conf/system.json.example' RUNTIME = 300 -class TestRun: # pylint: disable=too-few-public-methods - """Test Run controller. - - Creates an instance of the network orchestrator, test - orchestrator and user interface. - """ - - def __init__(self,local_net=True): - - # Catch any exit signals - self._register_exits() - - # Import the correct net orchestrator - self.import_orchestrators(local_net) - - self._net_orc = net_orc.NetworkOrchestrator() - self._test_orc = test_orc.TestOrchestrator() - - def import_orchestrators(self,local_net=True): - if local_net: - # Add local net_orc to Python path - net_orc_dir = os.path.join(parent_dir, 'net_orc', 'python', 'src') - else: - # Resolve the path to the test-run parent folder - root_dir = os.path.abspath(os.path.join(parent_dir, os.pardir)) - # Add manually cloned network orchestrator from parent folder - net_orc_dir = os.path.join(root_dir, 'network-orchestrator', 'python', 'src') - # Add net_orc to Python path - sys.path.append(net_orc_dir) - # Import the network orchestrator - global net_orc - import network_orchestrator as net_orc # pylint: disable=wrong-import-position,import-outside-toplevel - - # Add test_orc to Python path - test_orc_dir = os.path.join(parent_dir, 'test_orc', 'python', 'src') - sys.path.append(test_orc_dir) - global test_orc - import test_orchestrator as test_orc # pylint: disable=wrong-import-position,import-outside-toplevel - - def _register_exits(self): - signal.signal(signal.SIGINT, self._exit_handler) - signal.signal(signal.SIGTERM, self._exit_handler) - signal.signal(signal.SIGABRT, self._exit_handler) - signal.signal(signal.SIGQUIT, self._exit_handler) - - def _exit_handler(self, signum, arg): # pylint: disable=unused-argument - LOGGER.debug("Exit signal received: " + str(signum)) - if signum in (2, signal.SIGTERM): - LOGGER.info("Exit signal received.") - self.stop_network() - - def load_config(self): - """Loads all settings from the config file into memory.""" - if not os.path.isfile(CONFIG_FILE): - LOGGER.error("Configuration file is not present at " + CONFIG_FILE) - LOGGER.info("An example is present in " + EXAMPLE_CONFIG_FILE) - sys.exit(1) - - with open(CONFIG_FILE, 'r', encoding='UTF-8') as config_file_open: - config_json = json.load(config_file_open) - self._net_orc.import_config(config_json) - self._test_orc.import_config(config_json) - - def start_network(self): - """Starts the network orchestrator and network services.""" - - # Load and build any unbuilt network containers - self._net_orc.load_network_modules() - self._net_orc.build_network_modules() - - self._net_orc.stop_networking_services(kill=True) - self._net_orc.restore_net() - - # Create baseline network - self._net_orc.create_net() - - # Launch network service containers - self._net_orc.start_network_services() - - LOGGER.info("Network is ready.") - - def run_tests(self): - """Iterate through and start all test modules.""" - - self._test_orc.load_test_modules() - self._test_orc.build_test_modules() - - # Begin testing - self._test_orc.run_test_modules() - - def stop_network(self): - """Commands the net_orc to stop the network and clean up.""" - self._net_orc.stop_networking_services(kill=True) - self._net_orc.restore_net() - sys.exit(0) +DEVICES_DIR = 'local/devices' +DEVICE_CONFIG = 'device_config.json' +DEVICE_MAKE = 'make' +DEVICE_MODEL = 'model' +DEVICE_MAC_ADDR = 'mac_addr' + + +class TestRun: # pylint: disable=too-few-public-methods + """Test Run controller. + + Creates an instance of the network orchestrator, test + orchestrator and user interface. + """ + + def __init__(self, local_net=True): + self._devices = [] + + # Catch any exit signals + self._register_exits() + + # Import the correct net orchestrator + self.import_dependencies(local_net) + + self._net_orc = net_orc.NetworkOrchestrator() + self._test_orc = test_orc.TestOrchestrator() + + def start(self): + + self._load_devices() + + self.start_network() + + # Register callbacks + self._net_orc.listener.register_callback( + self._device_discovered, + [NetworkEvent.DEVICE_DISCOVERED]) + + def import_dependencies(self, local_net=True): + """Imports both net and test orchestrators from relevant directories.""" + if local_net: + # Add local net_orc to Python path + net_orc_dir = os.path.join( + parent_dir, 'net_orc', 'python', 'src') + else: + # Resolve the path to the test-run parent folder + root_dir = os.path.abspath(os.path.join(parent_dir, os.pardir)) + # Add manually cloned network orchestrator from parent folder + net_orc_dir = os.path.join( + root_dir, 'network-orchestrator', 'python', 'src') + # Add net_orc to Python path + sys.path.append(net_orc_dir) + # Import the network orchestrator + global net_orc + import network_orchestrator as net_orc # pylint: disable=wrong-import-position,import-outside-toplevel + + # Add test_orc to Python path + test_orc_dir = os.path.join( + parent_dir, 'test_orc', 'python', 'src') + sys.path.append(test_orc_dir) + global test_orc + import test_orchestrator as test_orc # pylint: disable=wrong-import-position,import-outside-toplevel + + global NetworkEvent + from listener import NetworkEvent # pylint: disable=wrong-import-position,import-outside-toplevel + + def _register_exits(self): + signal.signal(signal.SIGINT, self._exit_handler) + signal.signal(signal.SIGTERM, self._exit_handler) + signal.signal(signal.SIGABRT, self._exit_handler) + signal.signal(signal.SIGQUIT, self._exit_handler) + + def _exit_handler(self, signum, arg): # pylint: disable=unused-argument + LOGGER.debug('Exit signal received: ' + str(signum)) + if signum in (2, signal.SIGTERM): + LOGGER.info('Exit signal received.') + self.stop_network() + + def load_config(self): + """Loads all settings from the config file into memory.""" + if not os.path.isfile(CONFIG_FILE): + LOGGER.error( + 'Configuration file is not present at ' + CONFIG_FILE) + LOGGER.info('An example is present in ' + EXAMPLE_CONFIG_FILE) + sys.exit(1) + + with open(CONFIG_FILE, 'r', encoding='UTF-8') as config_file_open: + config_json = json.load(config_file_open) + self._net_orc.import_config(config_json) + self._test_orc.import_config(config_json) + + def start_network(self): + """Starts the network orchestrator and network services.""" + + # Load and build any unbuilt network containers + self._net_orc.load_network_modules() + self._net_orc.build_network_modules() + + self._net_orc.stop_networking_services(kill=True) + self._net_orc.restore_net() + + # Create baseline network + self._net_orc.create_net() + + # Launch network service containers + self._net_orc.start_network_services() + + LOGGER.info('Network is ready.') + + def run_tests(self): + """Iterate through and start all test modules.""" + self._test_orc.load_test_modules() + self._test_orc.build_test_modules() + + # Begin testing + self._test_orc.run_test_modules() + + def stop_network(self): + """Commands the net_orc to stop the network and clean up.""" + self._net_orc.stop_networking_services(kill=True) + self._net_orc.restore_net() + sys.exit(0) + + def _load_devices(self): + LOGGER.debug('Loading devices from ' + DEVICES_DIR) + + for device_folder in os.listdir(DEVICES_DIR): + with open(os.path.join(DEVICES_DIR, device_folder, DEVICE_CONFIG), + encoding='utf-8') as device_config_file: + device_config_json = json.load(device_config_file) + + device_make = device_config_json.get(DEVICE_MAKE) + device_model = device_config_json.get(DEVICE_MODEL) + mac_addr = device_config_json.get(DEVICE_MAC_ADDR) + + device = Device(device_make, device_model, + mac_addr=mac_addr) + self._devices.append(device) + + LOGGER.info('Loaded ' + str(len(self._devices)) + ' devices') + + def get_device(self, mac_addr): + """Returns a loaded device object from the device mac address.""" + for device in self._devices: + if device.mac_addr == mac_addr: + return device + return None + + def _device_discovered(self, mac_addr): + device = self.get_device(mac_addr) + if device is not None: + LOGGER.info( + f'Discovered {device.make} {device.model} on the network') + else: + LOGGER.info( + f'A new device has been discovered with mac address {device.mac_addr}') diff --git a/local/devices/Teltonika TRB140/device_config.json b/local/devices/Teltonika TRB140/device_config.json new file mode 100644 index 000000000..759c1e9b4 --- /dev/null +++ b/local/devices/Teltonika TRB140/device_config.json @@ -0,0 +1,5 @@ +{ + "make": "Teltonika", + "model": "TRB140", + "mac_addr": "00:1e:42:35:73:c4" +} \ No newline at end of file From 823709e25338f48dfd9dee2004eced63965bac76 Mon Sep 17 00:00:00 2001 From: jhughesbiot <50999916+jhughesbiot@users.noreply.github.com> Date: Fri, 28 Apr 2023 05:45:33 -0700 Subject: [PATCH 04/48] Test run sync (#8) * Initial work on test-orchestrator * Ignore runtime folder * Update runtime directory for test modules * Fix logging Add initial framework for running tests * logging and misc cleanup * logging changes * Add a stop hook after all tests complete * Refactor test_orc code * Add arg passing Add option to use locally cloned via install or remote via main project network orchestrator * Fix baseline module Fix orchestrator exiting only after timeout * Add result file to baseline test module Change result format to match closer to design doc * Refactor pylint * Skip test module if it failed to start * Refactor * Check for valid log level * Add config file arg Misc changes to network start procedure * fix merge issues * Update runner and test orch procedure Add useful runtiem args * Restructure test run startup process Misc updates to work with net orch updates * Refactor --------- --- .gitignore | 270 ++++++++++----------- cmd/install | 2 +- cmd/start | 2 +- framework/run.py | 40 ---- framework/test_runner.py | 73 ++++++ framework/testrun.py | 288 +++++++++++------------ test_orc/python/src/module.py | 23 ++ test_orc/python/src/runner.py | 40 ++++ test_orc/python/src/test_orchestrator.py | 42 ++-- 9 files changed, 433 insertions(+), 347 deletions(-) delete mode 100644 framework/run.py create mode 100644 framework/test_runner.py create mode 100644 test_orc/python/src/module.py create mode 100644 test_orc/python/src/runner.py diff --git a/.gitignore b/.gitignore index 4016b6901..f79a6efcb 100644 --- a/.gitignore +++ b/.gitignore @@ -1,135 +1,135 @@ -# Runtime folder -runtime/ -venv/ -net_orc/ -.vscode/ - -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so - -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -pip-wheel-metadata/ -share/python-wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.nox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -*.py,cover -.hypothesis/ -.pytest_cache/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py -db.sqlite3 -db.sqlite3-journal - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# IPython -profile_default/ -ipython_config.py - -# pyenv -.python-version - -# pipenv -# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. -# However, in case of collaboration, if having platform-specific dependencies or dependencies -# having no cross-platform support, pipenv may install dependencies that don't work, or not -# install all needed dependencies. -#Pipfile.lock - -# PEP 582; used by e.g. github.com/David-OConnor/pyflow -__pypackages__/ - -# Celery stuff -celerybeat-schedule -celerybeat.pid - -# SageMath parsed files -*.sage.py - -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ -.dmypy.json -dmypy.json - -# Pyre type checker -.pyre/ +# Runtime folder +runtime/ +venv/ +net_orc/ +.vscode/ + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ diff --git a/cmd/install b/cmd/install index 61722e273..6dee1c635 100755 --- a/cmd/install +++ b/cmd/install @@ -2,7 +2,7 @@ GIT_URL=https://github.com/auto-iot NET_ORC_DIR=net_orc -NET_ORC_VERSION="dev" +NET_ORC_VERSION="main" python3 -m venv venv diff --git a/cmd/start b/cmd/start index fa6bbc1e1..113f14b3e 100755 --- a/cmd/start +++ b/cmd/start @@ -18,6 +18,6 @@ rm -rf runtime source venv/bin/activate # TODO: Execute python code -python -u framework/run.py $@ +python -u framework/test_runner.py $@ deactivate \ No newline at end of file diff --git a/framework/run.py b/framework/run.py deleted file mode 100644 index fc6c197e3..000000000 --- a/framework/run.py +++ /dev/null @@ -1,40 +0,0 @@ -"""Starts Test Run.""" - -import argparse -import sys -from testrun import TestRun -import logger - -LOGGER = logger.get_logger('runner') - -class TestRunner: - - def __init__(self, local_net=True): - - LOGGER.info('Starting Test Run') - - testrun = TestRun(local_net) - - testrun.load_config() - - testrun.start() - - testrun.run_tests() - - testrun.stop_network() - - -def run(argv): - parser = argparse.ArgumentParser(description="Test Run", - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument("-r", "--remote-net", action="store_false", - help='''Use the network orchestrator from the parent directory instead - of the one downloaded locally from the install script.''') - - args, unknown = parser.parse_known_args() - - TestRunner(args.remote_net) - - -if __name__ == "__main__": - run(sys.argv) diff --git a/framework/test_runner.py b/framework/test_runner.py new file mode 100644 index 000000000..91ff4cb1a --- /dev/null +++ b/framework/test_runner.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python3 + +"""Wrapper for the TestRun that simplifies +virtual testing procedure by allowing direct calling +from the command line. + +Run using the provided command scripts in the cmd folder. +E.g sudo cmd/start +""" + +import argparse +import sys +from testrun import TestRun +import logger +import signal + +LOGGER = logger.get_logger('runner') + + +class TestRunner: + + def __init__(self, local_net=True, config_file=None, validate=True, net_only=False): + self._register_exits() + self.test_run = TestRun(local_net=local_net, config_file=config_file, + validate=validate, net_only=net_only) + + def _register_exits(self): + signal.signal(signal.SIGINT, self._exit_handler) + signal.signal(signal.SIGTERM, self._exit_handler) + signal.signal(signal.SIGABRT, self._exit_handler) + signal.signal(signal.SIGQUIT, self._exit_handler) + + def _exit_handler(self, signum, arg): # pylint: disable=unused-argument + LOGGER.debug("Exit signal received: " + str(signum)) + if signum in (2, signal.SIGTERM): + LOGGER.info("Exit signal received.") + # Kill all container services quickly + # If we're here, we want everything to stop immediately + # and don't care about a gracefully shutdown + self._stop(True) + sys.exit(1) + + def stop(self, kill=False): + self.test_run.stop(kill) + + def start(self): + self.test_run.start() + LOGGER.info("Test Run has finished") + + +def parse_args(argv): + parser = argparse.ArgumentParser(description="Test Run", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument("-r", "--remote-net", action="store_false", + help='''Use the network orchestrator from the parent directory instead + of the one downloaded locally from the install script.''') + parser.add_argument("-f", "--config-file", default=None, + help="Define the configuration file for Test Run and Network Orchestrator") + parser.add_argument("--no-validate", action="store_true", + help="Turn off the validation of the network after network boot") + parser.add_argument("-net", "--net-only", action="store_true", + help="Run the network only, do not run tests") + args, unknown = parser.parse_known_args() + return args + + +if __name__ == "__main__": + args = parse_args(sys.argv) + runner = TestRunner(local_net=args.remote_net, + config_file=args.config_file, + validate=not args.no_validate, + net_only=args.net_only) + runner.start() diff --git a/framework/testrun.py b/framework/testrun.py index 372a64692..4a29b4e20 100644 --- a/framework/testrun.py +++ b/framework/testrun.py @@ -19,8 +19,8 @@ parent_dir = os.path.dirname(current_dir) LOGGER = logger.get_logger('test_run') -CONFIG_FILE = 'conf/system.json' -EXAMPLE_CONFIG_FILE = 'conf/system.json.example' +CONFIG_FILE = "conf/system.json" +EXAMPLE_CONFIG_FILE = "conf/system.json.example" RUNTIME = 300 DEVICES_DIR = 'local/devices' @@ -31,150 +31,142 @@ class TestRun: # pylint: disable=too-few-public-methods - """Test Run controller. - - Creates an instance of the network orchestrator, test - orchestrator and user interface. - """ - - def __init__(self, local_net=True): - self._devices = [] - - # Catch any exit signals - self._register_exits() - - # Import the correct net orchestrator - self.import_dependencies(local_net) - - self._net_orc = net_orc.NetworkOrchestrator() - self._test_orc = test_orc.TestOrchestrator() - - def start(self): - - self._load_devices() - - self.start_network() - - # Register callbacks - self._net_orc.listener.register_callback( - self._device_discovered, - [NetworkEvent.DEVICE_DISCOVERED]) - - def import_dependencies(self, local_net=True): - """Imports both net and test orchestrators from relevant directories.""" - if local_net: - # Add local net_orc to Python path - net_orc_dir = os.path.join( - parent_dir, 'net_orc', 'python', 'src') - else: - # Resolve the path to the test-run parent folder - root_dir = os.path.abspath(os.path.join(parent_dir, os.pardir)) - # Add manually cloned network orchestrator from parent folder - net_orc_dir = os.path.join( - root_dir, 'network-orchestrator', 'python', 'src') - # Add net_orc to Python path - sys.path.append(net_orc_dir) - # Import the network orchestrator - global net_orc - import network_orchestrator as net_orc # pylint: disable=wrong-import-position,import-outside-toplevel - - # Add test_orc to Python path - test_orc_dir = os.path.join( - parent_dir, 'test_orc', 'python', 'src') - sys.path.append(test_orc_dir) - global test_orc - import test_orchestrator as test_orc # pylint: disable=wrong-import-position,import-outside-toplevel - - global NetworkEvent - from listener import NetworkEvent # pylint: disable=wrong-import-position,import-outside-toplevel - - def _register_exits(self): - signal.signal(signal.SIGINT, self._exit_handler) - signal.signal(signal.SIGTERM, self._exit_handler) - signal.signal(signal.SIGABRT, self._exit_handler) - signal.signal(signal.SIGQUIT, self._exit_handler) - - def _exit_handler(self, signum, arg): # pylint: disable=unused-argument - LOGGER.debug('Exit signal received: ' + str(signum)) - if signum in (2, signal.SIGTERM): - LOGGER.info('Exit signal received.') - self.stop_network() - - def load_config(self): - """Loads all settings from the config file into memory.""" - if not os.path.isfile(CONFIG_FILE): - LOGGER.error( - 'Configuration file is not present at ' + CONFIG_FILE) - LOGGER.info('An example is present in ' + EXAMPLE_CONFIG_FILE) - sys.exit(1) - - with open(CONFIG_FILE, 'r', encoding='UTF-8') as config_file_open: - config_json = json.load(config_file_open) - self._net_orc.import_config(config_json) - self._test_orc.import_config(config_json) - - def start_network(self): - """Starts the network orchestrator and network services.""" - - # Load and build any unbuilt network containers - self._net_orc.load_network_modules() - self._net_orc.build_network_modules() - - self._net_orc.stop_networking_services(kill=True) - self._net_orc.restore_net() - - # Create baseline network - self._net_orc.create_net() - - # Launch network service containers - self._net_orc.start_network_services() - - LOGGER.info('Network is ready.') - - def run_tests(self): - """Iterate through and start all test modules.""" - self._test_orc.load_test_modules() - self._test_orc.build_test_modules() - - # Begin testing - self._test_orc.run_test_modules() - - def stop_network(self): - """Commands the net_orc to stop the network and clean up.""" - self._net_orc.stop_networking_services(kill=True) - self._net_orc.restore_net() - sys.exit(0) - - def _load_devices(self): - LOGGER.debug('Loading devices from ' + DEVICES_DIR) - - for device_folder in os.listdir(DEVICES_DIR): - with open(os.path.join(DEVICES_DIR, device_folder, DEVICE_CONFIG), - encoding='utf-8') as device_config_file: - device_config_json = json.load(device_config_file) - - device_make = device_config_json.get(DEVICE_MAKE) - device_model = device_config_json.get(DEVICE_MODEL) - mac_addr = device_config_json.get(DEVICE_MAC_ADDR) - - device = Device(device_make, device_model, - mac_addr=mac_addr) - self._devices.append(device) - - LOGGER.info('Loaded ' + str(len(self._devices)) + ' devices') - - def get_device(self, mac_addr): - """Returns a loaded device object from the device mac address.""" - for device in self._devices: - if device.mac_addr == mac_addr: + """Test Run controller. + + Creates an instance of the network orchestrator, test + orchestrator and user interface. + """ + + def __init__(self, local_net=True, config_file=CONFIG_FILE,validate=True, net_only=False): + self._devices = [] + self._net_only = net_only + + # Catch any exit signals + self._register_exits() + + # Import the correct net orchestrator + self.import_dependencies(local_net) + + # Expand the config file to absolute pathing + config_file_abs=self._get_config_abs(config_file=config_file) + + self._net_orc = net_orc.NetworkOrchestrator(config_file=config_file_abs,validate=validate,async_monitor=not self._net_only) + self._test_orc = test_orc.TestOrchestrator() + + def start(self): + + self._load_devices() + + if self._net_only: + LOGGER.info("Network only option configured, no tests will be run") + self._start_network() + else: + self._start_network() + self._start_tests() + + self.stop() + + # Register callbacks + # Disable for now as this is causing boot failures when no devices are discovered + # self._net_orc.listener.register_callback( + # self._device_discovered, + # [NetworkEvent.DEVICE_DISCOVERED]) + + def stop(self,kill=False): + self._stop_tests() + self._stop_network(kill=kill) + + def import_dependencies(self, local_net=True): + if local_net: + # Add local net_orc to Python path + net_orc_dir = os.path.join(parent_dir, 'net_orc', 'python', 'src') + else: + # Resolve the path to the test-run parent folder + root_dir = os.path.abspath(os.path.join(parent_dir, os.pardir)) + # Add manually cloned network orchestrator from parent folder + net_orc_dir = os.path.join( + root_dir, 'network-orchestrator', 'python', 'src') + # Add net_orc to Python path + sys.path.append(net_orc_dir) + # Import the network orchestrator + global net_orc + import network_orchestrator as net_orc # pylint: disable=wrong-import-position,import-outside-toplevel + + # Add test_orc to Python path + test_orc_dir = os.path.join(parent_dir, 'test_orc', 'python', 'src') + sys.path.append(test_orc_dir) + global test_orc + import test_orchestrator as test_orc # pylint: disable=wrong-import-position,import-outside-toplevel + + global NetworkEvent + from listener import NetworkEvent # pylint: disable=wrong-import-position,import-outside-toplevel + + def _register_exits(self): + signal.signal(signal.SIGINT, self._exit_handler) + signal.signal(signal.SIGTERM, self._exit_handler) + signal.signal(signal.SIGABRT, self._exit_handler) + signal.signal(signal.SIGQUIT, self._exit_handler) + + def _exit_handler(self, signum, arg): # pylint: disable=unused-argument + LOGGER.debug("Exit signal received: " + str(signum)) + if signum in (2, signal.SIGTERM): + LOGGER.info("Exit signal received.") + self.stop(kill=True) + sys.exit(1) + + def _get_config_abs(self,config_file=None): + if config_file is None: + # If not defined, use relative pathing to local file + config_file = os.path.join(parent_dir, CONFIG_FILE) + + # Expand the config file to absolute pathing + return os.path.abspath(config_file) + + def _start_network(self): + self._net_orc.start() + + def _start_tests(self): + """Iterate through and start all test modules.""" + + self._test_orc.start() + + def _stop_network(self,kill=False): + self._net_orc.stop(kill=kill) + + def _stop_tests(self): + self._test_orc.stop() + + def _load_devices(self): + LOGGER.debug('Loading devices from ' + DEVICES_DIR) + + for device_folder in os.listdir(DEVICES_DIR): + with open(os.path.join(DEVICES_DIR, device_folder, DEVICE_CONFIG), + encoding='utf-8') as device_config_file: + device_config_json = json.load(device_config_file) + + device_make = device_config_json.get(DEVICE_MAKE) + device_model = device_config_json.get(DEVICE_MODEL) + mac_addr = device_config_json.get(DEVICE_MAC_ADDR) + + device = Device(device_make, device_model, + mac_addr=mac_addr) + self._devices.append(device) + + LOGGER.info('Loaded ' + str(len(self._devices)) + ' devices') + + def get_device(self, mac_addr): + """Returns a loaded device object from the device mac address.""" + for device in self._devices: + if device.mac_addr == mac_addr: + return device + return None + + def _device_discovered(self, mac_addr): + device = self.get_device(mac_addr) + if device is not None: + LOGGER.info( + f'Discovered {device.make} {device.model} on the network') + else: + LOGGER.info( + f'A new device has been discovered with mac address {device.mac_addr}') return device - return None - - def _device_discovered(self, mac_addr): - device = self.get_device(mac_addr) - if device is not None: - LOGGER.info( - f'Discovered {device.make} {device.model} on the network') - else: - LOGGER.info( - f'A new device has been discovered with mac address {device.mac_addr}') diff --git a/test_orc/python/src/module.py b/test_orc/python/src/module.py new file mode 100644 index 000000000..6d24d7e1e --- /dev/null +++ b/test_orc/python/src/module.py @@ -0,0 +1,23 @@ +"""Represemts a test module.""" +from dataclasses import dataclass +from docker.client.Container import Container + +@dataclass +class TestModule: # pylint: disable=too-few-public-methods,too-many-instance-attributes + """Represents a test module.""" + + name: str = None + display_name: str = None + description: str = None + + build_file: str = None + container: Container = None + container_name: str = None + image_name :str = None + enable_container: bool = True + + timeout: int = 60 + + # Absolute path + dir: str = None + dir_name: str = None diff --git a/test_orc/python/src/runner.py b/test_orc/python/src/runner.py new file mode 100644 index 000000000..cc495bf8d --- /dev/null +++ b/test_orc/python/src/runner.py @@ -0,0 +1,40 @@ +"""Provides high level management of the test orchestrator.""" +import time +import logger + +LOGGER = logger.get_logger('runner') + +class Runner: + """Holds the state of the testing for one device.""" + + def __init__(self, test_orc, device): + self._test_orc = test_orc + self._device = device + + def run(self): + self._run_test_modules() + + def _run_test_modules(self): + """Iterates through each test module and starts the container.""" + LOGGER.info('Running test modules...') + for module in self._test_modules: + self.run_test_module(module) + LOGGER.info('All tests complete') + + def run_test_module(self, module): + """Start the test container and extract the results.""" + + if module is None or not module.enable_container: + return + + self._test_orc.start_test_module(module) + + # Determine the module timeout time + test_module_timeout = time.time() + module.timeout + status = self._test_orc.get_module_status(module) + + while time.time() < test_module_timeout and status == 'running': + time.sleep(1) + status = self._test_orc.get_module_status(module) + + LOGGER.info(f'Test module {module.display_name} has finished') diff --git a/test_orc/python/src/test_orchestrator.py b/test_orc/python/src/test_orchestrator.py index 396f533fa..77f73f407 100644 --- a/test_orc/python/src/test_orchestrator.py +++ b/test_orc/python/src/test_orchestrator.py @@ -29,32 +29,29 @@ def __init__(self): shutil.rmtree(os.path.join(self._root_path, RUNTIME_DIR), ignore_errors=True) os.makedirs(os.path.join(self._root_path, RUNTIME_DIR), exist_ok=True) - def import_config(self, json_config): - """Load settings from JSON object into memory.""" + def start(self): + LOGGER.info("Starting Test Orchestrator") + self._load_test_modules() + self._run_test_modules() - # No relevant config options in system.json as of yet + def stop(self): + """Stop any running tests""" + self._stop_modules() - def get_test_module(self, name): - """Returns a test module by the module name.""" - for module in self._test_modules: - if name == module.name: - return module - return None - - def run_test_modules(self): + def _run_test_modules(self): """Iterates through each test module and starts the container.""" LOGGER.info("Running test modules...") for module in self._test_modules: - self.run_test_module(module) + self._run_test_module(module) LOGGER.info("All tests complete") - def run_test_module(self, module): + def _run_test_module(self, module): """Start the test container and extract the results.""" if module is None or not module.enable_container: return - LOGGER.info("Running test module " + module.display_name) + LOGGER.info("Running test module " + module.name) try: container_runtime_dir = os.path.join(self._root_path, "runtime/test/" + module.name) @@ -78,7 +75,7 @@ def run_test_module(self, module): environment={"HOST_USER": os.getlogin()} ) except (docker.errors.APIError, docker.errors.ContainerError) as container_error: - LOGGER.error("Test module " + module.display_name + " has failed to start") + LOGGER.error("Test module " + module.name + " has failed to start") LOGGER.debug(container_error) return @@ -90,7 +87,7 @@ def run_test_module(self, module): time.sleep(1) status = self._get_module_status(module) - LOGGER.info("Test module " + module.display_name + " has finished") + LOGGER.info("Test module " + module.name + " has finished") def _get_module_status(self,module): container = self._get_module_container(module) @@ -111,7 +108,7 @@ def _get_module_container(self, module): LOGGER.error(error) return container - def load_test_modules(self): + def _load_test_modules(self): """Import module configuration from module_config.json.""" modules_dir = os.path.join(self._path, TEST_MODULES_DIR) @@ -151,7 +148,8 @@ def load_test_modules(self): self._test_modules.append(module) - loaded_modules += module.dir_name + " " + if module.enable_container: + loaded_modules += module.dir_name + " " LOGGER.info(loaded_modules) @@ -175,12 +173,13 @@ def _build_test_module(self, module): LOGGER.error(error) def _stop_modules(self, kill=False): - LOGGER.debug("Stopping test modules") + LOGGER.info("Stopping test modules") for module in self._test_modules: # Test modules may just be Docker images, so we do not want to stop them if not module.enable_container: continue self._stop_module(module, kill) + LOGGER.info("All test modules have been stopped") def _stop_module(self, module, kill=False): LOGGER.debug("Stopping test module " + module.container_name) @@ -196,9 +195,8 @@ def _stop_module(self, module, kill=False): module.container_name) container.stop() LOGGER.debug("Container stopped:" + module.container_name) - except Exception as error: - LOGGER.error("Container stop error") - LOGGER.error(error) + except docker.errors.NotFound: + pass class TestModule: # pylint: disable=too-few-public-methods,too-many-instance-attributes """Represents a test module.""" From ba6afc416717e883edb297572d97b03fd28ee171 Mon Sep 17 00:00:00 2001 From: J Boddey Date: Fri, 28 Apr 2023 14:47:14 +0100 Subject: [PATCH 05/48] Quick refactor (#9) --- framework/testrun.py | 30 +++++++++++-------- .../modules/baseline/bin/start_test_module | 4 ++- .../baseline/python/src/test_module.py | 2 -- test_orc/python/src/module.py | 2 +- test_orc/python/src/test_orchestrator.py | 25 ++-------------- 5 files changed, 24 insertions(+), 39 deletions(-) diff --git a/framework/testrun.py b/framework/testrun.py index 4a29b4e20..df6006411 100644 --- a/framework/testrun.py +++ b/framework/testrun.py @@ -11,6 +11,7 @@ import sys import json import signal +import time import logger from device import Device @@ -57,21 +58,22 @@ def start(self): self._load_devices() + self._start_network() + if self._net_only: LOGGER.info("Network only option configured, no tests will be run") - self._start_network() + time.sleep(RUNTIME) else: - self._start_network() - self._start_tests() + self._net_orc.listener.register_callback( + self._device_discovered, + [NetworkEvent.DEVICE_DISCOVERED]) + + LOGGER.info("Waiting for devices on the network...") + # Check timeout and whether testing is currently in progress before stopping + time.sleep(RUNTIME) self.stop() - # Register callbacks - # Disable for now as this is causing boot failures when no devices are discovered - # self._net_orc.listener.register_callback( - # self._device_discovered, - # [NetworkEvent.DEVICE_DISCOVERED]) - def stop(self,kill=False): self._stop_tests() self._stop_network(kill=kill) @@ -125,9 +127,8 @@ def _get_config_abs(self,config_file=None): def _start_network(self): self._net_orc.start() - def _start_tests(self): + def _run_tests(self): """Iterate through and start all test modules.""" - self._test_orc.start() def _stop_network(self,kill=False): @@ -167,6 +168,9 @@ def _device_discovered(self, mac_addr): LOGGER.info( f'Discovered {device.make} {device.model} on the network') else: + device = Device(make=None, model=None, mac_addr=mac_addr) LOGGER.info( - f'A new device has been discovered with mac address {device.mac_addr}') - return device + f'A new device has been discovered with mac address {mac_addr}') + + # TODO: Pass device information to test orchestrator/runner + self._run_tests() diff --git a/test_orc/modules/baseline/bin/start_test_module b/test_orc/modules/baseline/bin/start_test_module index 292b57de2..2938eb0f8 100644 --- a/test_orc/modules/baseline/bin/start_test_module +++ b/test_orc/modules/baseline/bin/start_test_module @@ -37,4 +37,6 @@ chown $HOST_USER:$HOST_USER $RESULT_FILE # Run the python scrip that will execute the tests for this module # -u flag allows python print statements # to be logged by docker by running unbuffered -python3 -u $PYTHON_SRC_DIR/run.py "-m $MODULE_NAME" \ No newline at end of file +python3 -u $PYTHON_SRC_DIR/run.py "-m $MODULE_NAME" + +echo Module has finished \ No newline at end of file diff --git a/test_orc/modules/baseline/python/src/test_module.py b/test_orc/modules/baseline/python/src/test_module.py index 440b87f7f..d4065cde3 100644 --- a/test_orc/modules/baseline/python/src/test_module.py +++ b/test_orc/modules/baseline/python/src/test_module.py @@ -32,8 +32,6 @@ def run_tests(self): self.module_test2 = False LOGGER.info("Test 2 complete.") - time.sleep(10) - def generate_results(self): results = [] results.append(self.generate_result("Test 1", self.module_test1)) diff --git a/test_orc/python/src/module.py b/test_orc/python/src/module.py index 6d24d7e1e..8121c34db 100644 --- a/test_orc/python/src/module.py +++ b/test_orc/python/src/module.py @@ -1,6 +1,6 @@ """Represemts a test module.""" from dataclasses import dataclass -from docker.client.Container import Container +from docker.models.containers import Container @dataclass class TestModule: # pylint: disable=too-few-public-methods,too-many-instance-attributes diff --git a/test_orc/python/src/test_orchestrator.py b/test_orc/python/src/test_orchestrator.py index 77f73f407..f68a13579 100644 --- a/test_orc/python/src/test_orchestrator.py +++ b/test_orc/python/src/test_orchestrator.py @@ -6,9 +6,10 @@ import docker from docker.types import Mount import logger +from module import TestModule LOG_NAME = "test_orc" -LOGGER = logger.get_logger('test_orc') +LOGGER = logger.get_logger("test_orc") RUNTIME_DIR = "runtime" TEST_MODULES_DIR = "modules" MODULE_CONFIG = "conf/module_config.json" @@ -196,24 +197,4 @@ def _stop_module(self, module, kill=False): container.stop() LOGGER.debug("Container stopped:" + module.container_name) except docker.errors.NotFound: - pass - -class TestModule: # pylint: disable=too-few-public-methods,too-many-instance-attributes - """Represents a test module.""" - - def __init__(self): - self.name = None - self.display_name = None - self.description = None - - self.build_file = None - self.container = None - self.container_name = None - self.image_name = None - self.enable_container = True - - self.timeout = 60 - - # Absolute path - self.dir = None - self.dir_name = None + pass \ No newline at end of file From c87a976eeceb804aa9f0bd43a878210700b13bc0 Mon Sep 17 00:00:00 2001 From: jhughesbiot Date: Fri, 28 Apr 2023 10:56:50 -0600 Subject: [PATCH 06/48] Fix duplicate sleep calls --- framework/testrun.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/framework/testrun.py b/framework/testrun.py index df6006411..42534265a 100644 --- a/framework/testrun.py +++ b/framework/testrun.py @@ -58,20 +58,20 @@ def start(self): self._load_devices() - self._start_network() - if self._net_only: LOGGER.info("Network only option configured, no tests will be run") - time.sleep(RUNTIME) + self._start_network() else: + self._start_network() self._net_orc.listener.register_callback( self._device_discovered, [NetworkEvent.DEVICE_DISCOVERED]) LOGGER.info("Waiting for devices on the network...") - - # Check timeout and whether testing is currently in progress before stopping - time.sleep(RUNTIME) + + # Check timeout and whether testing is currently in progress before stopping + time.sleep(RUNTIME) + self.stop() def stop(self,kill=False): From 34ce2112fc7283d19e68037ee2075ad56d3993f9 Mon Sep 17 00:00:00 2001 From: jhughesbiot <50999916+jhughesbiot@users.noreply.github.com> Date: Tue, 2 May 2023 01:56:38 -0700 Subject: [PATCH 07/48] Add net orc (#11) * Add network orchestrator repository * cleanup duplicate start and install scripts * Temporary fix for python dependencies * Remove duplicate python requirements * remove duplicate conf files * remove remote-net option * cleanp unecessary files --- .gitignore | 1 - cmd/install | 14 +- cmd/start | 21 + etc/requirements.txt | 2 - framework/.gitignore | 1 - framework/test_runner.py | 10 +- framework/testrun.py | 16 +- net_orc/LICENSE | 201 ++++++ net_orc/README.md | 66 ++ net_orc/docker-compose.yml | 64 ++ .../devices/faux-dev/bin/get_default_gateway | 3 + .../devices/faux-dev/bin/start_dhcp_client | 16 + .../faux-dev/bin/start_network_service | 39 ++ .../devices/faux-dev/conf/module_config.json | 11 + .../devices/faux-dev/faux-dev.Dockerfile | 20 + .../devices/faux-dev/python/src/dhcp_check.py | 85 +++ .../devices/faux-dev/python/src/dns_check.py | 109 ++++ .../faux-dev/python/src/gateway_check.py | 40 ++ .../devices/faux-dev/python/src/logger.py | 43 ++ .../devices/faux-dev/python/src/ntp_check.py | 79 +++ .../devices/faux-dev/python/src/run.py | 114 ++++ .../devices/faux-dev/python/src/util.py | 28 + net_orc/network/modules/base/base.Dockerfile | 23 + net_orc/network/modules/base/bin/capture | 30 + .../network/modules/base/bin/setup_binaries | 10 + net_orc/network/modules/base/bin/start_grpc | 17 + net_orc/network/modules/base/bin/start_module | 79 +++ .../modules/base/bin/start_network_service | 10 + .../modules/base/bin/wait_for_interface | 21 + .../modules/base/conf/module_config.json | 12 + .../modules/base/python/requirements.txt | 2 + .../base/python/src/grpc/start_server.py | 34 + .../network/modules/base/python/src/logger.py | 47 ++ .../modules/dhcp-1/bin/start_network_service | 77 +++ .../network/modules/dhcp-1/conf/dhcpd.conf | 26 + .../modules/dhcp-1/conf/module_config.json | 25 + .../network/modules/dhcp-1/conf/radvd.conf | 12 + .../network/modules/dhcp-1/dhcp-1.Dockerfile | 14 + .../dhcp-1/python/src/grpc/__init__.py | 0 .../dhcp-1/python/src/grpc/dhcp_config.py | 267 ++++++++ .../dhcp-1/python/src/grpc/network_service.py | 44 ++ .../dhcp-1/python/src/grpc/proto/grpc.proto | 36 ++ .../network/modules/dhcp-1/python/src/run.py | 40 ++ .../modules/dhcp-2/bin/start_network_service | 77 +++ .../network/modules/dhcp-2/conf/dhcpd.conf | 24 + .../modules/dhcp-2/conf/module_config.json | 25 + .../network/modules/dhcp-2/conf/radvd.conf | 12 + .../network/modules/dhcp-2/dhcp-2.Dockerfile | 14 + .../dhcp-2/python/src/grpc/__init__.py | 0 .../dhcp-2/python/src/grpc/dhcp_config.py | 267 ++++++++ .../dhcp-2/python/src/grpc/network_service.py | 44 ++ .../dhcp-2/python/src/grpc/proto/grpc.proto | 36 ++ .../network/modules/dhcp-2/python/src/run.py | 40 ++ .../modules/dns/bin/start_network_service | 48 ++ net_orc/network/modules/dns/conf/dnsmasq.conf | 5 + .../modules/dns/conf/module_config.json | 22 + net_orc/network/modules/dns/dns.Dockerfile | 14 + .../modules/gateway/bin/start_network_service | 30 + .../modules/gateway/conf/module_config.json | 22 + .../modules/gateway/gateway.Dockerfile | 11 + .../modules/ntp/bin/start_network_service | 13 + .../modules/ntp/conf/module_config.json | 22 + net_orc/network/modules/ntp/ntp-server.py | 315 +++++++++ net_orc/network/modules/ntp/ntp.Dockerfile | 13 + .../modules/ntp/python/src/ntp_server.py | 315 +++++++++ .../modules/ovs/bin/start_network_service | 22 + .../modules/ovs/conf/module_config.json | 23 + net_orc/network/modules/ovs/ovs.Dockerfile | 20 + .../modules/ovs/python/requirements.txt | 0 .../network/modules/ovs/python/src/logger.py | 17 + .../modules/ovs/python/src/ovs_control.py | 107 ++++ net_orc/network/modules/ovs/python/src/run.py | 53 ++ .../network/modules/ovs/python/src/util.py | 19 + .../modules/radius/bin/start_network_service | 20 + net_orc/network/modules/radius/conf/ca.crt | 26 + net_orc/network/modules/radius/conf/eap | 602 ++++++++++++++++++ .../modules/radius/conf/module_config.json | 22 + .../modules/radius/python/requirements.txt | 3 + .../radius/python/src/authenticator.py | 31 + .../network/modules/radius/radius.Dockerfile | 26 + .../template/bin/start_network_service | 13 + .../modules/template/conf/module_config.json | 26 + .../template/python/src/template_main.py | 4 + .../modules/template/template.Dockerfile | 11 + net_orc/orchestrator.Dockerfile | 22 + net_orc/python/requirements.txt | 4 + net_orc/python/src/listener.py | 68 ++ net_orc/python/src/logger.py | 27 + net_orc/python/src/network_event.py | 10 + net_orc/python/src/network_orchestrator.py | 573 +++++++++++++++++ net_orc/python/src/network_runner.py | 68 ++ net_orc/python/src/network_validator.py | 274 ++++++++ net_orc/python/src/run_validator.py | 52 ++ net_orc/python/src/util.py | 30 + 94 files changed, 5318 insertions(+), 33 deletions(-) delete mode 100644 etc/requirements.txt delete mode 100644 framework/.gitignore create mode 100644 net_orc/LICENSE create mode 100644 net_orc/README.md create mode 100644 net_orc/docker-compose.yml create mode 100644 net_orc/network/devices/faux-dev/bin/get_default_gateway create mode 100644 net_orc/network/devices/faux-dev/bin/start_dhcp_client create mode 100644 net_orc/network/devices/faux-dev/bin/start_network_service create mode 100644 net_orc/network/devices/faux-dev/conf/module_config.json create mode 100644 net_orc/network/devices/faux-dev/faux-dev.Dockerfile create mode 100644 net_orc/network/devices/faux-dev/python/src/dhcp_check.py create mode 100644 net_orc/network/devices/faux-dev/python/src/dns_check.py create mode 100644 net_orc/network/devices/faux-dev/python/src/gateway_check.py create mode 100644 net_orc/network/devices/faux-dev/python/src/logger.py create mode 100644 net_orc/network/devices/faux-dev/python/src/ntp_check.py create mode 100644 net_orc/network/devices/faux-dev/python/src/run.py create mode 100644 net_orc/network/devices/faux-dev/python/src/util.py create mode 100644 net_orc/network/modules/base/base.Dockerfile create mode 100644 net_orc/network/modules/base/bin/capture create mode 100644 net_orc/network/modules/base/bin/setup_binaries create mode 100644 net_orc/network/modules/base/bin/start_grpc create mode 100644 net_orc/network/modules/base/bin/start_module create mode 100644 net_orc/network/modules/base/bin/start_network_service create mode 100644 net_orc/network/modules/base/bin/wait_for_interface create mode 100644 net_orc/network/modules/base/conf/module_config.json create mode 100644 net_orc/network/modules/base/python/requirements.txt create mode 100644 net_orc/network/modules/base/python/src/grpc/start_server.py create mode 100644 net_orc/network/modules/base/python/src/logger.py create mode 100644 net_orc/network/modules/dhcp-1/bin/start_network_service create mode 100644 net_orc/network/modules/dhcp-1/conf/dhcpd.conf create mode 100644 net_orc/network/modules/dhcp-1/conf/module_config.json create mode 100644 net_orc/network/modules/dhcp-1/conf/radvd.conf create mode 100644 net_orc/network/modules/dhcp-1/dhcp-1.Dockerfile create mode 100644 net_orc/network/modules/dhcp-1/python/src/grpc/__init__.py create mode 100644 net_orc/network/modules/dhcp-1/python/src/grpc/dhcp_config.py create mode 100644 net_orc/network/modules/dhcp-1/python/src/grpc/network_service.py create mode 100644 net_orc/network/modules/dhcp-1/python/src/grpc/proto/grpc.proto create mode 100644 net_orc/network/modules/dhcp-1/python/src/run.py create mode 100644 net_orc/network/modules/dhcp-2/bin/start_network_service create mode 100644 net_orc/network/modules/dhcp-2/conf/dhcpd.conf create mode 100644 net_orc/network/modules/dhcp-2/conf/module_config.json create mode 100644 net_orc/network/modules/dhcp-2/conf/radvd.conf create mode 100644 net_orc/network/modules/dhcp-2/dhcp-2.Dockerfile create mode 100644 net_orc/network/modules/dhcp-2/python/src/grpc/__init__.py create mode 100644 net_orc/network/modules/dhcp-2/python/src/grpc/dhcp_config.py create mode 100644 net_orc/network/modules/dhcp-2/python/src/grpc/network_service.py create mode 100644 net_orc/network/modules/dhcp-2/python/src/grpc/proto/grpc.proto create mode 100644 net_orc/network/modules/dhcp-2/python/src/run.py create mode 100644 net_orc/network/modules/dns/bin/start_network_service create mode 100644 net_orc/network/modules/dns/conf/dnsmasq.conf create mode 100644 net_orc/network/modules/dns/conf/module_config.json create mode 100644 net_orc/network/modules/dns/dns.Dockerfile create mode 100644 net_orc/network/modules/gateway/bin/start_network_service create mode 100644 net_orc/network/modules/gateway/conf/module_config.json create mode 100644 net_orc/network/modules/gateway/gateway.Dockerfile create mode 100644 net_orc/network/modules/ntp/bin/start_network_service create mode 100644 net_orc/network/modules/ntp/conf/module_config.json create mode 100644 net_orc/network/modules/ntp/ntp-server.py create mode 100644 net_orc/network/modules/ntp/ntp.Dockerfile create mode 100644 net_orc/network/modules/ntp/python/src/ntp_server.py create mode 100644 net_orc/network/modules/ovs/bin/start_network_service create mode 100644 net_orc/network/modules/ovs/conf/module_config.json create mode 100644 net_orc/network/modules/ovs/ovs.Dockerfile create mode 100644 net_orc/network/modules/ovs/python/requirements.txt create mode 100644 net_orc/network/modules/ovs/python/src/logger.py create mode 100644 net_orc/network/modules/ovs/python/src/ovs_control.py create mode 100644 net_orc/network/modules/ovs/python/src/run.py create mode 100644 net_orc/network/modules/ovs/python/src/util.py create mode 100644 net_orc/network/modules/radius/bin/start_network_service create mode 100644 net_orc/network/modules/radius/conf/ca.crt create mode 100644 net_orc/network/modules/radius/conf/eap create mode 100644 net_orc/network/modules/radius/conf/module_config.json create mode 100644 net_orc/network/modules/radius/python/requirements.txt create mode 100644 net_orc/network/modules/radius/python/src/authenticator.py create mode 100644 net_orc/network/modules/radius/radius.Dockerfile create mode 100644 net_orc/network/modules/template/bin/start_network_service create mode 100644 net_orc/network/modules/template/conf/module_config.json create mode 100644 net_orc/network/modules/template/python/src/template_main.py create mode 100644 net_orc/network/modules/template/template.Dockerfile create mode 100644 net_orc/orchestrator.Dockerfile create mode 100644 net_orc/python/requirements.txt create mode 100644 net_orc/python/src/listener.py create mode 100644 net_orc/python/src/logger.py create mode 100644 net_orc/python/src/network_event.py create mode 100644 net_orc/python/src/network_orchestrator.py create mode 100644 net_orc/python/src/network_runner.py create mode 100644 net_orc/python/src/network_validator.py create mode 100644 net_orc/python/src/run_validator.py create mode 100644 net_orc/python/src/util.py diff --git a/.gitignore b/.gitignore index f79a6efcb..15aae1278 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,6 @@ # Runtime folder runtime/ venv/ -net_orc/ .vscode/ # Byte-compiled / optimized / DLL files diff --git a/cmd/install b/cmd/install index 6dee1c635..539234006 100755 --- a/cmd/install +++ b/cmd/install @@ -1,19 +1,13 @@ #!/bin/bash -e -GIT_URL=https://github.com/auto-iot -NET_ORC_DIR=net_orc -NET_ORC_VERSION="main" - python3 -m venv venv source venv/bin/activate -pip3 install -r etc/requirements.txt +pip3 install --upgrade requests -rm -rf $NET_ORC_DIR -git clone -b $NET_ORC_VERSION $GIT_URL/network-orchestrator $NET_ORC_DIR -chown -R $USER $NET_ORC_DIR +pip3 install -r net_orc/python/requirements.txt -pip3 install -r $NET_ORC_DIR/python/requirements.txt +pip3 install -r test_orc/python/requirements.txt -deactivate \ No newline at end of file +deactivate diff --git a/cmd/start b/cmd/start index 113f14b3e..d146f413d 100755 --- a/cmd/start +++ b/cmd/start @@ -20,4 +20,25 @@ source venv/bin/activate # TODO: Execute python code python -u framework/test_runner.py $@ +# TODO: Work in progress code for containerization of OVS module +# asyncRun() { +# "$@" & +# pid="$!" +# echo "PID Running: " $pid +# trap "echo 'Stopping PID $pid'; kill -SIGTERM $pid" SIGINT SIGTERM + +# sleep 10 + +# # A signal emitted while waiting will make the wait command return code > 128 +# # Let's wrap it in a loop that doesn't end before the process is indeed stopped +# while kill -0 $pid > /dev/null 2>&1; do +# #while $(kill -0 $pid 2>/dev/null); do +# wait +# done +# } + +# # -u flag allows python print statements +# # to be logged by docker by running unbuffered +# asyncRun python3 -u python/src/run.py $@ + deactivate \ No newline at end of file diff --git a/etc/requirements.txt b/etc/requirements.txt deleted file mode 100644 index 979b408bd..000000000 --- a/etc/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -netifaces -scapy \ No newline at end of file diff --git a/framework/.gitignore b/framework/.gitignore deleted file mode 100644 index ba0430d26..000000000 --- a/framework/.gitignore +++ /dev/null @@ -1 +0,0 @@ -__pycache__/ \ No newline at end of file diff --git a/framework/test_runner.py b/framework/test_runner.py index 91ff4cb1a..14cadf3e1 100644 --- a/framework/test_runner.py +++ b/framework/test_runner.py @@ -19,9 +19,9 @@ class TestRunner: - def __init__(self, local_net=True, config_file=None, validate=True, net_only=False): + def __init__(self, config_file=None, validate=True, net_only=False): self._register_exits() - self.test_run = TestRun(local_net=local_net, config_file=config_file, + self.test_run = TestRun(config_file=config_file, validate=validate, net_only=net_only) def _register_exits(self): @@ -51,9 +51,6 @@ def start(self): def parse_args(argv): parser = argparse.ArgumentParser(description="Test Run", formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument("-r", "--remote-net", action="store_false", - help='''Use the network orchestrator from the parent directory instead - of the one downloaded locally from the install script.''') parser.add_argument("-f", "--config-file", default=None, help="Define the configuration file for Test Run and Network Orchestrator") parser.add_argument("--no-validate", action="store_true", @@ -66,8 +63,7 @@ def parse_args(argv): if __name__ == "__main__": args = parse_args(sys.argv) - runner = TestRunner(local_net=args.remote_net, - config_file=args.config_file, + runner = TestRunner(config_file=args.config_file, validate=not args.no_validate, net_only=args.net_only) runner.start() diff --git a/framework/testrun.py b/framework/testrun.py index 42534265a..0561163ac 100644 --- a/framework/testrun.py +++ b/framework/testrun.py @@ -38,7 +38,7 @@ class TestRun: # pylint: disable=too-few-public-methods orchestrator and user interface. """ - def __init__(self, local_net=True, config_file=CONFIG_FILE,validate=True, net_only=False): + def __init__(self, config_file=CONFIG_FILE,validate=True, net_only=False): self._devices = [] self._net_only = net_only @@ -46,7 +46,7 @@ def __init__(self, local_net=True, config_file=CONFIG_FILE,validate=True, net_on self._register_exits() # Import the correct net orchestrator - self.import_dependencies(local_net) + self.import_dependencies() # Expand the config file to absolute pathing config_file_abs=self._get_config_abs(config_file=config_file) @@ -78,17 +78,9 @@ def stop(self,kill=False): self._stop_tests() self._stop_network(kill=kill) - def import_dependencies(self, local_net=True): - if local_net: - # Add local net_orc to Python path - net_orc_dir = os.path.join(parent_dir, 'net_orc', 'python', 'src') - else: - # Resolve the path to the test-run parent folder - root_dir = os.path.abspath(os.path.join(parent_dir, os.pardir)) - # Add manually cloned network orchestrator from parent folder - net_orc_dir = os.path.join( - root_dir, 'network-orchestrator', 'python', 'src') + def import_dependencies(self): # Add net_orc to Python path + net_orc_dir = os.path.join(parent_dir, 'net_orc', 'python', 'src') sys.path.append(net_orc_dir) # Import the network orchestrator global net_orc diff --git a/net_orc/LICENSE b/net_orc/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/net_orc/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/net_orc/README.md b/net_orc/README.md new file mode 100644 index 000000000..9cb1eec1a --- /dev/null +++ b/net_orc/README.md @@ -0,0 +1,66 @@ +Testrun logo + +## Introduction :wave: +The network orchestrator is a tool to automate the management of a test lab network and provide essential services to begin device testing in just a few minutes. + +## Motivation :bulb: +Test labs may be maintaining a large and complex network using equipment such as: A managed layer 3 switch, an enterprise-grade network router, virtualized or physical servers to provide DNS, NTP, 802.1x etc. With this amount of moving parts, all with dynamic configuration files and constant software updates, more time is likely to be spent on preparation and clean up of functinality or penetration testing - not forgetting the number of software tools required to perform the testing. + +## How it works :triangular_ruler: +The network orchestrator creates an isolated and controlled network environment to fully simulate enterprise network deployments in your device testing lab. +This removes the necessity for complex hardware, advanced knowledge and networking experience whilst enabling semi-technical engineers to validate device +behaviour against industry cyber standards. + +The network orchestrator will provide the network and some tools to assist an engineer performing the additional testing. At the same time, packet captures of the device behaviour will be recorded, alongside logs for each network service, for further debugging. + +## Minimum Requirements :computer: +### Hardware + - PC running Ubuntu LTS (laptop or desktop) + - 2x USB ethernet adapter (One may be built in ethernet) + - Connect one adapter to your router (for internet access) + - Connect one adapter to your device under test + - Internet connection +### Software + - Python 3 with pip3 (Already available on Ubuntu LTS) + - Docker - [Install guide](https://docs.docker.com/engine/install/ubuntu/) + - Open vSwitch ``sudo apt-get install openvswitch-common openvswitch-switch`` + +An additional network interface (even wifi) with internet access can be used to maintain internet connection during use of the network orchestrator. + +## How to use :arrow_forward: +1) Ensure you have a device with the minimum hardware and software requirements setup +2) Clone the project using ```git clone https://github.com/auto-iot/network-orchestrator``` +3) Navigate into the project using ```cd network-orchestrator``` +4) Copy conf/system.json.example to conf/system.json (after setting the correct interfaces in the file) +5) Start the tool using ```sudo cmd/start``` + +## Issue reporting :triangular_flag_on_post: +If the application has come across a problem at any point during setup or use, please raise an issue under the [issues tab](https://github.com/auto-iot/network-orchestrator/issues). Issue templates exist for both bug reports and feature requests. If neither of these are appropriate for your issue, raise a blank issue instead. + +## Roadmap :chart_with_upwards_trend: + - Ability to modify configuration files of each network service during use (via GRPC) + - IPv6 internet routing + +## Contributing :keyboard: +The contributing requirements can be found in [CONTRIBUTING.md](CONTRIBUTING.md). In short, checkout the [Google CLA](https://cla.developers.google.com/) site to get started. + +## FAQ :raising_hand: +1) What services are provided on the virtual network? + + The following are network services that are containerized and accessible to the device under test though are likely to change over time: + - DHCP in failover configuration with internet connectivity + - IPv6 router advertisements + - DNS (and DNS over HTTPS) + - NTPv4 + - 802.1x Port Based Authentication + +2) Can I run the network orchestrator on a virtual machine? + + Probably. Provided that the required 2x USB ethernet adapters are passed to the virtual machine as USB devices rather than network adapters, the tool should + still work. We will look to test and approve the use of virtualisation in the future. + +3) Can I connect multiple devices to the Network Orchestrator? + + In short, Yes you can. The way in which multiple devices could be tested simultaneously is yet to be decided. However, if you simply want to add field/peer devices during runtime (even another laptop performing manual testing) then you may connect the USB ethernet adapter to an unmanaged switch. + +4) Raise an issue with the label 'question' if your question has not been answered in this readme. \ No newline at end of file diff --git a/net_orc/docker-compose.yml b/net_orc/docker-compose.yml new file mode 100644 index 000000000..8c50d766a --- /dev/null +++ b/net_orc/docker-compose.yml @@ -0,0 +1,64 @@ +version: "3.7" + +services: + + base: + build: + context: network/modules/base + dockerfile: base.Dockerfile + image: test-run/base + container_name: tr-ct-base + + ovs: + depends_on: + - base + build: + context: network/modules/ovs + dockerfile: ovs.Dockerfile + image: test-run/ovs + network_mode: host + container_name: tr-ct-ovs + stdin_open: true + privileged: true + volumes: + - $PWD/network/modules/ovs/python:/ovs/python + # Mount host open vswitch socket to allow container + # access to control open vswitch on the host + - /var/run/openvswitch/db.sock:/var/run/openvswitch/db.sock + # Mount host network namespace to allow container + # access to assign proper namespaces to containers + - /var/run/netns:/var/run/netns + + netorch: + depends_on: + - base + build: + context: . + dockerfile: orchestrator.Dockerfile + image: test-run/orchestrator + network_mode: host + privileged: true + volumes: + - $PWD/cmd:/orchestrator/cmd + - $PWD/network:/orchestrator/network + - $PWD/python:/orchestrator/python + # Mount host docker socket to allow container access + # control docker containers on the host + - /var/run/docker.sock:/var/run/docker.sock + # Mount host open vswitch socket to allow container + # access to control open vswitch on the host + - /var/run/openvswitch/db.sock:/var/run/openvswitch/db.sock + # Mount host network namespace to allow container + # access to assign proper namespaces to containers + - /var/run/netns:/var/run/netns + # Mount the host process information to allow container + # access to configure docker containers and namespaces properly + - /proc:/proc + container_name: network_orchestrator + stdin_open: true + working_dir: /orchestrator + #entrypoint: ["cmd/start"] + # Give more time for stopping so when we stop the container it has + # time to stop all network services gracefuly + stop_grace_period: 60s + entrypoint: ["python3","-u","python/src/run.py"] diff --git a/net_orc/network/devices/faux-dev/bin/get_default_gateway b/net_orc/network/devices/faux-dev/bin/get_default_gateway new file mode 100644 index 000000000..f6f1e2a0d --- /dev/null +++ b/net_orc/network/devices/faux-dev/bin/get_default_gateway @@ -0,0 +1,3 @@ +#!/bin/bash -e + +route | grep default | awk '{print $2}' \ No newline at end of file diff --git a/net_orc/network/devices/faux-dev/bin/start_dhcp_client b/net_orc/network/devices/faux-dev/bin/start_dhcp_client new file mode 100644 index 000000000..de9270c82 --- /dev/null +++ b/net_orc/network/devices/faux-dev/bin/start_dhcp_client @@ -0,0 +1,16 @@ +#!/bin/bash -e + +# Fetch the interface +INTF=$1 + +PID_FILE=/var/run/dhclient.pid + +echo "Starting DHCP Client on interface $INTF" + +#Kill any existing running dhclient process +if [ -f $PID_FILE ]; then + kill -9 $(cat $PID_FILE) || true + rm -f $PID_FILE +fi + +dhclient $INTF \ No newline at end of file diff --git a/net_orc/network/devices/faux-dev/bin/start_network_service b/net_orc/network/devices/faux-dev/bin/start_network_service new file mode 100644 index 000000000..b727d2091 --- /dev/null +++ b/net_orc/network/devices/faux-dev/bin/start_network_service @@ -0,0 +1,39 @@ +#!/bin/bash -e + +# Directory where all binaries will be loaded +BIN_DIR="/testrun/bin" + +# Fetch module name +MODULE_NAME=$1 + +# Default interface should be veth0 for all containers +DEFAULT_IFACE=veth0 + +# Allow a user to define an interface by passing it into this script +DEFINED_IFACE=$2 + +# Select which interace to use +if [[ -z $DEFINED_IFACE || "$DEFINED_IFACE" == "null" ]] +then + echo "No interface defined, defaulting to veth0" + INTF=$DEFAULT_IFACE +else + INTF=$DEFINED_IFACE +fi + +#Create and set permissions on the output files +LOG_FILE=/runtime/validation/$MODULE_NAME.log +RESULT_FILE=/runtime/validation/result.json +touch $LOG_FILE +touch $RESULT_FILE +chown $HOST_USER:$HOST_USER $LOG_FILE +chown $HOST_USER:$HOST_USER $RESULT_FILE + +# Start dhclient +$BIN_DIR/start_dhcp_client $INTF + +# -u flag allows python print statements +# to be logged by docker by running unbuffered +exec python3 -u /testrun/python/src/run.py "-m $MODULE_NAME" + +echo Network validator complete \ No newline at end of file diff --git a/net_orc/network/devices/faux-dev/conf/module_config.json b/net_orc/network/devices/faux-dev/conf/module_config.json new file mode 100644 index 000000000..afde8c629 --- /dev/null +++ b/net_orc/network/devices/faux-dev/conf/module_config.json @@ -0,0 +1,11 @@ +{ + "config": { + "meta": { + "name": "faux-dev", + "description": "Faux device to test network modules are functioning properly" + }, + "docker": { + "timeout": 60 + } + } +} \ No newline at end of file diff --git a/net_orc/network/devices/faux-dev/faux-dev.Dockerfile b/net_orc/network/devices/faux-dev/faux-dev.Dockerfile new file mode 100644 index 000000000..1686341b5 --- /dev/null +++ b/net_orc/network/devices/faux-dev/faux-dev.Dockerfile @@ -0,0 +1,20 @@ +# Image name: test-run/faux-dev +FROM test-run/base:latest + +#Update and get all additional requirements not contained in the base image +RUN apt-get update --fix-missing + +# NTP requireds interactive installation so we're going to turn that off +ARG DEBIAN_FRONTEND=noninteractive + +# Install dhcp client and ntp client +RUN apt-get install -y isc-dhcp-client ntp ntpdate + +# Copy over all configuration files +COPY network/devices/faux-dev/conf /testrun/conf + +# Load device binary files +COPY network/devices/faux-dev/bin /testrun/bin + +# Copy over all python files +COPY network/devices/faux-dev/python /testrun/python \ No newline at end of file diff --git a/net_orc/network/devices/faux-dev/python/src/dhcp_check.py b/net_orc/network/devices/faux-dev/python/src/dhcp_check.py new file mode 100644 index 000000000..ab7defc39 --- /dev/null +++ b/net_orc/network/devices/faux-dev/python/src/dhcp_check.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python3 + +import time +import logger + +LOGGER = None +LOG_NAME = "dhcp_validator" +DHCP_LEASE_FILE = "/var/lib/dhcp/dhclient.leases" +IP_ADDRESS_KEY = "fixed-address" +DNS_OPTION_KEY = "option domain-name-servers" +GATEWAY_OPTION_KEY = "option routers" +NTP_OPTION_KEY = "option ntp-servers" + + +class DHCPValidator: + def __init__(self, module): + self._dhcp_lease = None + self.dhcp_lease_test = False + self.add_logger(module) + + def add_logger(self, module): + global LOGGER + LOGGER = logger.get_logger(LOG_NAME, module) + + def print_test_results(self): + self.print_test_result("DHCP lease test", self.dhcp_lease_test) + + def print_test_result(self, test_name, result): + LOGGER.info(test_name + ": Pass" if result else test_name + ": Fail") + + def get_dhcp_lease(self): + """Returns the current DHCP lease.""" + return self._dhcp_lease + + def validate(self): + self._resolve_dhcp_lease() + LOGGER.info("IP Addr: " + self._dhcp_lease.ip_addr) + LOGGER.info("Gateway: " + self._dhcp_lease.gateway) + LOGGER.info("DNS Server: " + self._dhcp_lease.dns_server) + LOGGER.info("NTP Server: " + self._dhcp_lease.ntp_server) + + def _resolve_dhcp_lease(self): + LOGGER.info("Resolving DHCP lease...") + while self._dhcp_lease is None: + time.sleep(5) + try: + lease_file = open(DHCP_LEASE_FILE) + lines = lease_file.read() + LOGGER.debug("Lease file:\n" + lines) + leases = lines.split("lease ") + # Last lease is the current lease + cur_lease = leases[-1] + if cur_lease is not None: + LOGGER.debug("Current lease: " + cur_lease) + self._dhcp_lease = DHCPLease() + self.dhcp_lease_test = True + # Iterate over entire lease and pick the parts we care about + lease_parts = cur_lease.split("\n") + for part in lease_parts: + part_clean = part.strip() + if part_clean.startswith(IP_ADDRESS_KEY): + self._dhcp_lease.ip_addr = part_clean[len( + IP_ADDRESS_KEY):-1].strip() + elif part_clean.startswith(DNS_OPTION_KEY): + self._dhcp_lease.dns_server = part_clean[len( + DNS_OPTION_KEY):-1].strip() + elif part_clean.startswith(GATEWAY_OPTION_KEY): + self._dhcp_lease.gateway = part_clean[len( + GATEWAY_OPTION_KEY):-1].strip() + elif part_clean.startswith(NTP_OPTION_KEY): + self._dhcp_lease.ntp_server = part_clean[len( + NTP_OPTION_KEY):-1].strip() + except Exception: + LOGGER.error("DHCP Resolved Error") + LOGGER.info("DHCP lease resolved") + + +class DHCPLease: + """Stores information about a device's DHCP lease.""" + + def __init__(self): + self.ip_addr = None + self.gateway = None + self.dns_server = None + self.ntp_server = None diff --git a/net_orc/network/devices/faux-dev/python/src/dns_check.py b/net_orc/network/devices/faux-dev/python/src/dns_check.py new file mode 100644 index 000000000..d3d709d6e --- /dev/null +++ b/net_orc/network/devices/faux-dev/python/src/dns_check.py @@ -0,0 +1,109 @@ +#!/usr/bin/env python3 + +import logger +import time +import util +import subprocess + +from dhcp_check import DHCPLease + +LOGGER = None +LOG_NAME = "dns_validator" +HOST_PING = "google.com" +CAPTURE_FILE = "/runtime/network/faux-dev.pcap" +DNS_CONFIG_FILE = "/etc/resolv.conf" + + +class DNSValidator: + + def __init__(self, module): + self._dns_server = None + self._dns_resolution_test = False + self._dns_dhcp_server_test = False + self.add_logger(module) + + def add_logger(self, module): + global LOGGER + LOGGER = logger.get_logger(LOG_NAME, module) + + def print_test_results(self): + self.print_test_result( + "DNS resolution test", self._dns_resolution_test) + self.print_test_result( + "DNS DHCP server test", self._dns_dhcp_server_test) + + def print_test_result(self, test_name, result): + LOGGER.info(test_name + ": Pass" if result else test_name + ": Fail") + + def validate(self, dhcp_lease): + self._dns_server = dhcp_lease.dns_server + self._set_dns_server() + self._check_dns_traffic() + + def _check_dns_traffic(self): + LOGGER.info("Checking DNS traffic for DNS server: " + self._dns_server) + + # Ping a host to generate DNS traffic + if self._ping(HOST_PING)[0]: + LOGGER.info("Ping success") + self._dns_resolution_test = True + else: + LOGGER.info("Ping failed") + + # Some delay between pings and DNS traffic in the capture file + # so give some delay before we try to query again + time.sleep(5) + + # Check if the device has sent any DNS requests + filter_to_dns = 'dst port 53 and dst host {}'.format( + self._dns_server) + to_dns = self._exec_tcpdump(filter_to_dns) + num_query_dns = len(to_dns) + LOGGER.info("DNS queries found: " + str(num_query_dns)) + dns_traffic_detected = len(to_dns) > 0 + if dns_traffic_detected: + LOGGER.info("DNS traffic detected to configured DHCP DNS server") + self._dns_dhcp_server_test = True + else: + LOGGER.error("No DNS traffic detected") + + # Docker containeres resolve DNS servers from the host + # and do not play nice with normal networking methods + # so we need to set our DNS servers manually + def _set_dns_server(self): + f = open(DNS_CONFIG_FILE, "w", encoding="utf-8") + f.write("nameserver " + self._dns_server) + f.close() + + # Generate DNS traffic by doing a simple ping by hostname + def _ping(self, host): + cmd = "ping -c 5 " + host + success = util.run_command(cmd, LOGGER) + return success + + def _exec_tcpdump(self, tcpdump_filter): + """ + Args + tcpdump_filter: Filter to pass onto tcpdump file + capture_file: Optional capture file to look + Returns + List of packets matching the filter + """ + command = 'tcpdump -tttt -n -r {} {}'.format( + CAPTURE_FILE, tcpdump_filter) + + LOGGER.debug("tcpdump command: " + command) + + process = subprocess.Popen(command, + universal_newlines=True, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + text = str(process.stdout.read()).rstrip() + + LOGGER.debug("tcpdump response: " + text) + + if text: + return text.split("\n") + + return [] \ No newline at end of file diff --git a/net_orc/network/devices/faux-dev/python/src/gateway_check.py b/net_orc/network/devices/faux-dev/python/src/gateway_check.py new file mode 100644 index 000000000..17457874a --- /dev/null +++ b/net_orc/network/devices/faux-dev/python/src/gateway_check.py @@ -0,0 +1,40 @@ +import logger +import util + +from dhcp_check import DHCPLease + +LOGGER = None +LOG_NAME = "gateway_validator" + + +class GatewayValidator: + + def __init__(self, module): + self._gateway = None + self._default_gateway_test = False + self.add_logger(module) + + def add_logger(self, module): + global LOGGER + LOGGER = logger.get_logger(LOG_NAME, module) + + def print_test_results(self): + self.print_test_result("Default gateway test", + self._default_gateway_test) + + def print_test_result(self, test_name, result): + LOGGER.info(test_name + ": Pass" if result else test_name + ": Fail") + + + def validate(self, dhcp_lease): + self._gateway = dhcp_lease.gateway + self.check_default_gateway() + + def check_default_gateway(self): + LOGGER.info( + "Checking default gateway matches DHCP gateway: " + self._gateway) + cmd = "/testrun/bin/get_default_gateway" + success, default_gateway, stderr = util.run_command(cmd, LOGGER) + LOGGER.info("Default gateway resolved: " + default_gateway) + if default_gateway == self._gateway: + self._default_gateway_test = True \ No newline at end of file diff --git a/net_orc/network/devices/faux-dev/python/src/logger.py b/net_orc/network/devices/faux-dev/python/src/logger.py new file mode 100644 index 000000000..bf692c85e --- /dev/null +++ b/net_orc/network/devices/faux-dev/python/src/logger.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python3 + +import json +import logging +import os + +LOGGERS = {} +_LOG_FORMAT = "%(asctime)s %(name)-8s %(levelname)-7s %(message)s" +_DATE_FORMAT = '%b %02d %H:%M:%S' +_CONF_DIR = "conf" +_CONF_FILE_NAME = "system.json" +_LOG_DIR = "/runtime/validation" + +# Set log level +with open(os.path.join(_CONF_DIR, _CONF_FILE_NAME), encoding='utf-8') as conf_file: + system_conf_json = json.load(conf_file) + +log_level_str = system_conf_json['log_level'] +log_level = logging.getLevelName(log_level_str) + +log_format = logging.Formatter(fmt=_LOG_FORMAT, datefmt=_DATE_FORMAT) + +def add_file_handler(log, log_file): + """Add file handler to existing log.""" + handler = logging.FileHandler(os.path.join(_LOG_DIR, log_file + ".log")) + handler.setFormatter(log_format) + log.addHandler(handler) + +def add_stream_handler(log): + """Add stream handler to existing log.""" + handler = logging.StreamHandler() + handler.setFormatter(log_format) + log.addHandler(handler) + +def get_logger(name, log_file=None): + """Return logger for requesting class.""" + if name not in LOGGERS: + LOGGERS[name] = logging.getLogger(name) + LOGGERS[name].setLevel(log_level) + add_stream_handler(LOGGERS[name]) + if log_file is not None: + add_file_handler(LOGGERS[name], log_file) + return LOGGERS[name] diff --git a/net_orc/network/devices/faux-dev/python/src/ntp_check.py b/net_orc/network/devices/faux-dev/python/src/ntp_check.py new file mode 100644 index 000000000..a50bf337e --- /dev/null +++ b/net_orc/network/devices/faux-dev/python/src/ntp_check.py @@ -0,0 +1,79 @@ +import time +import logger +import util + +LOGGER = None +LOG_NAME = "ntp_validator" +ATTEMPTS = 3 + + +class NTPValidator: + """Perform testing of the NTP server.""" + + def __init__(self, module): + self._ntp_server = None + self._ntp_sync_test = False + self.add_logger(module) + + def add_logger(self, module): + global LOGGER + LOGGER = logger.get_logger(LOG_NAME, module) + + def print_test_results(self): + """Print all test results to log.""" + self.print_test_result("NTP sync test", + self._ntp_sync_test) + + def print_test_result(self, test_name, result): + """Output test result to log.""" + LOGGER.info(test_name + ": Pass" if result else test_name + ": Fail") + + def validate(self, dhcp_lease): + """Call NTP sync test.""" + self._ntp_server = dhcp_lease.ntp_server + self.check_ntp() + + def check_ntp(self): + """Perform NTP sync test.""" + if self._ntp_server is not None: + attempt = 0 + LOGGER.info(f"Attempting to sync to NTP server: {self._ntp_server}") + LOGGER.info("Attempts allowed: " + str(ATTEMPTS)) + + # If we don't ping before syncing, this will fail. + while attempt < ATTEMPTS and not self._ntp_sync_test: + attempt += 1 + if self.ping_ntp_server(): + self.sync_ntp() + if not self._ntp_sync_test: + LOGGER.info("Waiting 5 seconds before next attempt") + time.sleep(5) + else: + LOGGER.info("No NTP server available from DHCP lease") + + def sync_ntp(self): + """Send NTP request to server.""" + LOGGER.info("Sending NTP Sync Request to: " + self._ntp_server) + cmd = "ntpdate " + self._ntp_server + ntp_response = util.run_command(cmd, LOGGER)[1] + LOGGER.info("NTP sync response: " + ntp_response) + if "adjust time server " + self._ntp_server in ntp_response: + LOGGER.info("NTP sync succesful") + self._ntp_sync_test = True + else: + LOGGER.info("NTP client failed to sync to server") + + def ping_ntp_server(self): + """Ping NTP server before sending a time request.""" + LOGGER.info("Pinging NTP server before syncing...") + if self.ping(self._ntp_server): + LOGGER.info("NTP server successfully pinged") + return True + LOGGER.info("NTP server did not respond to ping") + return False + + def ping(self, host): + """Send ping request to host.""" + cmd = "ping -c 1 " + host + success = util.run_command(cmd, LOGGER) + return success diff --git a/net_orc/network/devices/faux-dev/python/src/run.py b/net_orc/network/devices/faux-dev/python/src/run.py new file mode 100644 index 000000000..5891b8c4b --- /dev/null +++ b/net_orc/network/devices/faux-dev/python/src/run.py @@ -0,0 +1,114 @@ +#!/usr/bin/env python3 + +import argparse +import json +import os +import signal +import sys + +import logger +from dns_check import DNSValidator +from dhcp_check import DHCPValidator +from gateway_check import GatewayValidator +from ntp_check import NTPValidator + +RESULTS_DIR = '/runtime/validation/' +LOGGER = logger.get_logger('validator') + +class FauxDevice: + """Represents a virtual testing device.""" + + def __init__(self, module): + + signal.signal(signal.SIGINT, self._handler) + signal.signal(signal.SIGTERM, self._handler) + signal.signal(signal.SIGABRT, self._handler) + signal.signal(signal.SIGQUIT, self._handler) + + self.dhcp_validator = DHCPValidator(module) + self.dns_validator = DNSValidator(module) + self.gateway_validator = GatewayValidator(module) + self.ntp_validator = NTPValidator(module) + + self._module = module + self.run_tests() + results = self.generate_results() + self.write_results(results) + + def run_tests(self): + """Execute configured network tests.""" + + # Run DHCP tests first since everything hinges on basic DHCP compliance first + self.dhcp_validator.validate() + + dhcp_lease = self.dhcp_validator.get_dhcp_lease() + + # Use current lease from dhcp tests to validate DNS behaviors + self.dns_validator.validate(dhcp_lease) + + # Use current lease from dhcp tests to validate default gateway + self.gateway_validator.validate(dhcp_lease) + + # Use current lease from dhcp tests to validate ntp server + self.ntp_validator.validate(dhcp_lease) + + def print_test_results(self): + """Print test results to log.""" + self.dhcp_validator.print_test_results() + self.dns_validator.print_test_results() + self.gateway_validator.print_test_results() + self.ntp_validator.print_test_results() + + def generate_results(self): + """Transform test results into JSON format.""" + + results = [] + results.append(self.generate_result("dhcp_lease", self.dhcp_validator.dhcp_lease_test)) + results.append(self.generate_result("dns_from_dhcp", self.dns_validator._dns_dhcp_server_test)) + results.append(self.generate_result("dns_resolution", self.dns_validator._dns_resolution_test)) + results.append(self.generate_result("gateway_default", self.gateway_validator._default_gateway_test)) + results.append(self.generate_result("ntp_sync", self.ntp_validator._ntp_sync_test)) + json_results = json.dumps({"results":results}, indent=2) + + return json_results + + def write_results(self, results): + """Write test results to file.""" + results_file = os.path.join(RESULTS_DIR, "result.json") + LOGGER.info("Writing results to " + results_file) + f = open(results_file, "w", encoding="utf-8") + f.write(results) + f.close() + + def generate_result(self, test_name, test_result): + """Return JSON object for test result.""" + if test_result is not None: + result = "compliant" if test_result else "non-compliant" + else: + result = "skipped" + LOGGER.info(test_name + ": " + result) + res_dict = { + "name": test_name, + "result": result + } + return res_dict + + def _handler(self, signum, frame): # pylint: disable=unused-argument + if signum in (2, signal.SIGTERM): + sys.exit(1) + +def run(argv): # pylint: disable=unused-argument + """Run the network validator.""" + parser = argparse.ArgumentParser(description="Faux Device _validator", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument("-m","--module", + help="Define the module name to be used to create the log file") + + args = parser.parse_args() + + # For some reason passing in the args from bash adds an extra + # space before the argument so we'll just strip out extra space + FauxDevice(args.module.strip()) + +if __name__ == "__main__": + run(sys.argv) diff --git a/net_orc/network/devices/faux-dev/python/src/util.py b/net_orc/network/devices/faux-dev/python/src/util.py new file mode 100644 index 000000000..605af1132 --- /dev/null +++ b/net_orc/network/devices/faux-dev/python/src/util.py @@ -0,0 +1,28 @@ +import subprocess +import shlex + +# Runs a process at the os level +# By default, returns the standard output and error output +# If the caller sets optional output parameter to False, +# will only return a boolean result indicating if it was +# succesful in running the command. Failure is indicated +# by any return code from the process other than zero. + + +def run_command(cmd, logger, output=True): + success = False + process = subprocess.Popen(shlex.split( + cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = process.communicate() + + if process.returncode != 0: + err_msg = "%s. Code: %s" % (stderr.strip(), process.returncode) + logger.error("Command Failed: " + cmd) + logger.error("Error: " + err_msg) + else: + success = True + + if output: + return success, stdout.strip().decode('utf-8'), stderr + else: + return success, None, stderr diff --git a/net_orc/network/modules/base/base.Dockerfile b/net_orc/network/modules/base/base.Dockerfile new file mode 100644 index 000000000..2400fd1c6 --- /dev/null +++ b/net_orc/network/modules/base/base.Dockerfile @@ -0,0 +1,23 @@ +# Image name: test-run/base +FROM ubuntu:jammy + +# Install common software +RUN apt-get update && apt-get install -y net-tools iputils-ping tcpdump iproute2 jq python3 python3-pip dos2unix + +#Setup the base python requirements +COPY network/modules/base/python /testrun/python + +# Install all python requirements for the module +RUN pip3 install -r /testrun/python/requirements.txt + +# Add the bin files +COPY network/modules/base/bin /testrun/bin + +# Remove incorrect line endings +RUN dos2unix /testrun/bin/* + +# Make sure all the bin files are executable +RUN chmod u+x /testrun/bin/* + +#Start the network module +ENTRYPOINT [ "/testrun/bin/start_module" ] \ No newline at end of file diff --git a/net_orc/network/modules/base/bin/capture b/net_orc/network/modules/base/bin/capture new file mode 100644 index 000000000..8a8430feb --- /dev/null +++ b/net_orc/network/modules/base/bin/capture @@ -0,0 +1,30 @@ +#!/bin/bash -e + +# Fetch module name +MODULE_NAME=$1 + +# Define the local file location for the capture to be saved +PCAP_DIR="/runtime/network/" +PCAP_FILE=$MODULE_NAME.pcap + +# Default interface should be veth0 for all containers +DEFAULT_IFACE=veth0 + +# Allow a user to define an interface by passing it into this script +DEFINED_IFACE=$2 + +# Select which interace to use +if [[ -z $DEFINED_IFACE ]] +then + INTERFACE=$DEFAULT_IFACE +else + INTERFACE=$DEFINED_IFACE +fi + +# Create the output directory and start the capture +mkdir -p $PCAP_DIR +chown $HOST_USER:$HOST_USER $PCAP_DIR +tcpdump -i $INTERFACE -w $PCAP_DIR/$PCAP_FILE -Z $HOST_USER & + +#Small pause to let the capture to start +sleep 1 \ No newline at end of file diff --git a/net_orc/network/modules/base/bin/setup_binaries b/net_orc/network/modules/base/bin/setup_binaries new file mode 100644 index 000000000..3535ead3c --- /dev/null +++ b/net_orc/network/modules/base/bin/setup_binaries @@ -0,0 +1,10 @@ +#!/bin/bash -e + +# Directory where all binaries will be loaded +BIN_DIR=$1 + +# Remove incorrect line endings +dos2unix $BIN_DIR/* + +# Make sure all the bin files are executable +chmod u+x $BIN_DIR/* \ No newline at end of file diff --git a/net_orc/network/modules/base/bin/start_grpc b/net_orc/network/modules/base/bin/start_grpc new file mode 100644 index 000000000..9792b4bd4 --- /dev/null +++ b/net_orc/network/modules/base/bin/start_grpc @@ -0,0 +1,17 @@ +#!/bin/bash -e + +GRPC_DIR="/testrun/python/src/grpc" +GRPC_PROTO_DIR="proto" +GRPC_PROTO_FILE="grpc.proto" + +#Move into the grpc directory +pushd $GRPC_DIR >/dev/null 2>&1 + +#Build the grpc proto file every time before starting server +python3 -m grpc_tools.protoc --proto_path=. ./$GRPC_PROTO_DIR/$GRPC_PROTO_FILE --python_out=. --grpc_python_out=. + +popd >/dev/null 2>&1 + +#Start the grpc server +python3 -u $GRPC_DIR/start_server.py $@ + diff --git a/net_orc/network/modules/base/bin/start_module b/net_orc/network/modules/base/bin/start_module new file mode 100644 index 000000000..7fdcbc404 --- /dev/null +++ b/net_orc/network/modules/base/bin/start_module @@ -0,0 +1,79 @@ +#!/bin/bash + +# Directory where all binaries will be loaded +BIN_DIR="/testrun/bin" + +# Default interface should be veth0 for all containers +DEFAULT_IFACE=veth0 + +# Create a local user that matches the same as the host +# to be used for correct file ownership for various logs +# HOST_USER mapped in via docker container environemnt variables +useradd $HOST_USER + +# Enable IPv6 for all containers +sysctl net.ipv6.conf.all.disable_ipv6=0 +sysctl -p + +#Read in the config file +CONF_FILE="/testrun/conf/module_config.json" +CONF=`cat $CONF_FILE` + +if [[ -z $CONF ]] +then + echo "No config file present at $CONF_FILE. Exiting startup." + exit 1 +fi + +# Extract the necessary config parameters +MODULE_NAME=$(echo "$CONF" | jq -r '.config.meta.name') +DEFINED_IFACE=$(echo "$CONF" | jq -r '.config.network.interface') +GRPC=$(echo "$CONF" | jq -r '.config.grpc') + +# Validate the module name is present +if [[ -z "$MODULE_NAME" || "$MODULE_NAME" == "null" ]] +then + echo "No module name present in $CONF_FILE. Exiting startup." + exit 1 +fi + +# Select which interace to use +if [[ -z $DEFINED_IFACE || "$DEFINED_IFACE" == "null" ]] +then + echo "No Interface Defined, defaulting to veth0" + INTF=$DEFAULT_IFACE +else + INTF=$DEFINED_IFACE +fi + +echo "Starting module $MODULE_NAME on local interface $INTF..." + +$BIN_DIR/setup_binaries $BIN_DIR + +# Wait for interface to become ready +$BIN_DIR/wait_for_interface $INTF + +# Small pause to let the interface stabalize before starting the capture +#sleep 1 + +# Start network capture +$BIN_DIR/capture $MODULE_NAME $INTF + +# Start the grpc server +if [[ ! -z $GRPC && ! $GRPC == "null" ]] +then + GRPC_PORT=$(echo "$GRPC" | jq -r '.port') + if [[ ! -z $GRPC_PORT && ! $GRPC_PORT == "null" ]] + then + echo "gRPC port resolved from config: $GRPC_PORT" + $BIN_DIR/start_grpc "-p $GRPC_PORT" & + else + $BIN_DIR/start_grpc & + fi +fi + +#Small pause to let all core services stabalize +sleep 3 + +#Start the networking service +$BIN_DIR/start_network_service $MODULE_NAME $INTF \ No newline at end of file diff --git a/net_orc/network/modules/base/bin/start_network_service b/net_orc/network/modules/base/bin/start_network_service new file mode 100644 index 000000000..7d13750b8 --- /dev/null +++ b/net_orc/network/modules/base/bin/start_network_service @@ -0,0 +1,10 @@ +#!/bin/bash + +# Place holder function for testing and validation +# Each network module should include a start_networkig_service +# file that overwrites this one to boot all of the its specific +# requirements to run. + +echo "Starting network service..." +echo "This is not a real network service, just a test" +echo "Network service started" \ No newline at end of file diff --git a/net_orc/network/modules/base/bin/wait_for_interface b/net_orc/network/modules/base/bin/wait_for_interface new file mode 100644 index 000000000..1377705d8 --- /dev/null +++ b/net_orc/network/modules/base/bin/wait_for_interface @@ -0,0 +1,21 @@ +#!/bin/bash + +# Default interface should be veth0 for all containers +DEFAULT_IFACE=veth0 + +# Allow a user to define an interface by passing it into this script +DEFINED_IFACE=$1 + +# Select which interace to use +if [[ -z $DEFINED_IFACE ]] +then + INTF=$DEFAULT_IFACE +else + INTF=$DEFINED_IFACE +fi + +# Wait for local interface to be ready +while ! ip link show $INTF; do + echo $INTF is not yet ready. Waiting 3 seconds + sleep 3 +done \ No newline at end of file diff --git a/net_orc/network/modules/base/conf/module_config.json b/net_orc/network/modules/base/conf/module_config.json new file mode 100644 index 000000000..1f3a47ba2 --- /dev/null +++ b/net_orc/network/modules/base/conf/module_config.json @@ -0,0 +1,12 @@ +{ + "config": { + "meta": { + "name": "base", + "display_name": "Base", + "description": "Base image" + }, + "docker": { + "enable_container": false + } + } +} \ No newline at end of file diff --git a/net_orc/network/modules/base/python/requirements.txt b/net_orc/network/modules/base/python/requirements.txt new file mode 100644 index 000000000..9c4e2b056 --- /dev/null +++ b/net_orc/network/modules/base/python/requirements.txt @@ -0,0 +1,2 @@ +grpcio +grpcio-tools \ No newline at end of file diff --git a/net_orc/network/modules/base/python/src/grpc/start_server.py b/net_orc/network/modules/base/python/src/grpc/start_server.py new file mode 100644 index 000000000..9ed31ffcf --- /dev/null +++ b/net_orc/network/modules/base/python/src/grpc/start_server.py @@ -0,0 +1,34 @@ +from concurrent import futures +import grpc +import proto.grpc_pb2_grpc as pb2_grpc +import proto.grpc_pb2 as pb2 +from network_service import NetworkService +import logging +import sys +import argparse + +DEFAULT_PORT = '5001' + +def serve(PORT): + server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) + pb2_grpc.add_NetworkModuleServicer_to_server(NetworkService(), server) + server.add_insecure_port('[::]:' + PORT) + server.start() + server.wait_for_termination() + +def run(argv): + parser = argparse.ArgumentParser(description="GRPC Server for Network Module", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument("-p", "--port", default=DEFAULT_PORT, + help="Define the default port to run the server on.") + + args = parser.parse_args() + + PORT = args.port + + print("gRPC server starting on port " + PORT) + serve(PORT) + + +if __name__ == "__main__": + run(sys.argv) \ No newline at end of file diff --git a/net_orc/network/modules/base/python/src/logger.py b/net_orc/network/modules/base/python/src/logger.py new file mode 100644 index 000000000..4924512c6 --- /dev/null +++ b/net_orc/network/modules/base/python/src/logger.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python3 + +import json +import logging +import os + +LOGGERS = {} +_LOG_FORMAT = "%(asctime)s %(name)-8s %(levelname)-7s %(message)s" +_DATE_FORMAT = '%b %02d %H:%M:%S' +_DEFAULT_LEVEL = logging.INFO +_CONF_DIR = "conf" +_CONF_FILE_NAME = "system.json" +_LOG_DIR = "/runtime/network/" + +# Set log level +try: + system_conf_json = json.load( + open(os.path.join(_CONF_DIR, _CONF_FILE_NAME))) + log_level_str = system_conf_json['log_level'] + log_level = logging.getLevelName(log_level_str) +except: + # TODO: Print out warning that log level is incorrect or missing + log_level = _DEFAULT_LEVEL + +log_format = logging.Formatter(fmt=_LOG_FORMAT, datefmt=_DATE_FORMAT) + + +def add_file_handler(log, logFile): + handler = logging.FileHandler(_LOG_DIR+logFile+".log") + handler.setFormatter(log_format) + log.addHandler(handler) + + +def add_stream_handler(log): + handler = logging.StreamHandler() + handler.setFormatter(log_format) + log.addHandler(handler) + + +def get_logger(name, logFile=None): + if name not in LOGGERS: + LOGGERS[name] = logging.getLogger(name) + LOGGERS[name].setLevel(log_level) + add_stream_handler(LOGGERS[name]) + if logFile is not None: + add_file_handler(LOGGERS[name], logFile) + return LOGGERS[name] diff --git a/net_orc/network/modules/dhcp-1/bin/start_network_service b/net_orc/network/modules/dhcp-1/bin/start_network_service new file mode 100644 index 000000000..e8e0ad06c --- /dev/null +++ b/net_orc/network/modules/dhcp-1/bin/start_network_service @@ -0,0 +1,77 @@ +#!/bin/bash + +CONFIG_FILE=/etc/dhcp/dhcpd.conf +DHCP_PID_FILE=/var/run/dhcpd.pid +DHCP_LOG_FILE=/runtime/network/dhcp1-dhcpd.log +RA_PID_FILE=/var/run/radvd/radvd.pid +RA_LOG_FILE=/runtime/network/dhcp1-radvd.log + +echo "Starrting Network Service..." + +#Enable IPv6 Forwarding +sysctl net.ipv6.conf.all.forwarding=1 +sysctl -p + +# Create leases file if needed +touch /var/lib/dhcp/dhcpd.leases + +#Create directory for radvd +mkdir /var/run/radvd + +#Create and set permissions on the log files +touch $DHCP_LOG_FILE +touch $RA_LOG_FILE +chown $HOST_USER:$HOST_USER $DHCP_LOG_FILE +chown $HOST_USER:$HOST_USER $RA_LOG_FILE + + +#Move the config files to the correct location +cp /testrun/conf/dhcpd.conf /etc/dhcp/dhcpd.conf +cp /testrun/conf/radvd.conf /etc/radvd.conf + +# Restart dhcp server when config changes +while true; do + + new_checksum=$(md5sum $CONFIG_FILE) + + if [ "$checksum" == "$new_checksum" ]; then + sleep 2 + continue + fi + + echo Config changed. Restarting dhcp server at $(date).. + + if [ -f $DHCP_PID_FILE ]; then + kill -9 $(cat $DHCP_PID_FILE) || true + rm -f $DHCP_PID_FILE + fi + + if [ -f $RA_PID_FILE ]; then + kill -9 $(cat $RA_PID_FILE) || true + rm -f $RA_PID_FILE + fi + + checksum=$new_checksum + + echo Starting isc-dhcp-server at $(date) + + radvd -m logfile -l $RA_LOG_FILE -p $RA_PID_FILE + dhcpd -d &> $DHCP_LOG_FILE & + + while [ ! -f $DHCP_PID_FILE ]; do + echo Waiting for $DHCP_PID_FILE... + sleep 2 + done + + echo $DHCP_PID_FILE now available + + while [ ! -f $RA_PID_FILE ]; do + echo Waiting for $RA_PID_FILE... + sleep 2 + done + + echo $RA_PID_FILE now available + + echo Server now stable + +done \ No newline at end of file diff --git a/net_orc/network/modules/dhcp-1/conf/dhcpd.conf b/net_orc/network/modules/dhcp-1/conf/dhcpd.conf new file mode 100644 index 000000000..9f4fe1c28 --- /dev/null +++ b/net_orc/network/modules/dhcp-1/conf/dhcpd.conf @@ -0,0 +1,26 @@ +default-lease-time 300; + +failover peer "failover-peer" { + primary; + address 10.10.10.2; + port 847; + peer address 10.10.10.3; + peer port 647; + max-response-delay 60; + max-unacked-updates 10; + mclt 3600; + split 128; + load balance max seconds 3; +} + +subnet 10.10.10.0 netmask 255.255.255.0 { + option ntp-servers 10.10.10.5; + option subnet-mask 255.255.255.0; + option broadcast-address 10.10.10.255; + option routers 10.10.10.1; + option domain-name-servers 10.10.10.4; + pool { + failover peer "failover-peer"; + range 10.10.10.10 10.10.10.20; + } +} diff --git a/net_orc/network/modules/dhcp-1/conf/module_config.json b/net_orc/network/modules/dhcp-1/conf/module_config.json new file mode 100644 index 000000000..56d9aa271 --- /dev/null +++ b/net_orc/network/modules/dhcp-1/conf/module_config.json @@ -0,0 +1,25 @@ +{ + "config": { + "meta": { + "name": "dhcp-1", + "display_name": "DHCP Primary", + "description": "Primary DHCP server with IPv6 SLAAC" + }, + "network": { + "interface": "veth0", + "enable_wan": false, + "ip_index": 2 + }, + "grpc":{ + "port": 5001 + }, + "docker": { + "mounts": [ + { + "source": "runtime/network", + "target": "/runtime/network" + } + ] + } + } +} \ No newline at end of file diff --git a/net_orc/network/modules/dhcp-1/conf/radvd.conf b/net_orc/network/modules/dhcp-1/conf/radvd.conf new file mode 100644 index 000000000..f6d6f30d9 --- /dev/null +++ b/net_orc/network/modules/dhcp-1/conf/radvd.conf @@ -0,0 +1,12 @@ +interface veth0 +{ + AdvSendAdvert on; + AdvManagedFlag off; + MinRtrAdvInterval 30; + MaxRtrAdvInterval 60; + prefix fd10:77be:4186::/64 { + AdvOnLink on; + AdvAutonomous on; + AdvRouterAddr on; + }; +}; \ No newline at end of file diff --git a/net_orc/network/modules/dhcp-1/dhcp-1.Dockerfile b/net_orc/network/modules/dhcp-1/dhcp-1.Dockerfile new file mode 100644 index 000000000..99804e0e3 --- /dev/null +++ b/net_orc/network/modules/dhcp-1/dhcp-1.Dockerfile @@ -0,0 +1,14 @@ +# Image name: test-run/dhcp-primary +FROM test-run/base:latest + +# Install dhcp server +RUN apt-get install -y isc-dhcp-server radvd + +# Copy over all configuration files +COPY network/modules/dhcp-1/conf /testrun/conf + +# Copy over all binary files +COPY network/modules/dhcp-1/bin /testrun/bin + +# Copy over all python files +COPY network/modules/dhcp-1/python /testrun/python diff --git a/net_orc/network/modules/dhcp-1/python/src/grpc/__init__.py b/net_orc/network/modules/dhcp-1/python/src/grpc/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/net_orc/network/modules/dhcp-1/python/src/grpc/dhcp_config.py b/net_orc/network/modules/dhcp-1/python/src/grpc/dhcp_config.py new file mode 100644 index 000000000..f5445ca44 --- /dev/null +++ b/net_orc/network/modules/dhcp-1/python/src/grpc/dhcp_config.py @@ -0,0 +1,267 @@ +import re + +CONFIG_FILE = "/etc/dhcp/dhcpd.conf" +CONFIG_FILE_TEST = "network/modules/dhcp-1/conf/dhcpd.conf" + +DEFAULT_LEASE_TIME_KEY = "default-lease-time" + + +class DHCPConfig: + + def __init__(self): + self._default_lease_time = 300 + self._subnets = [] + self._peer = None + + def write_config(self): + conf = str(self) + print("Writing config: \n" + conf) + f = open(CONFIG_FILE, "w") + f.write(conf) + + def resolve_config(self): + with open(CONFIG_FILE) as f: + conf = f.read() + self.resolve_subnets(conf) + self.peer = DHCPFailoverPeer(conf) + + def resolve_subnets(self, conf): + self._subnets = [] + regex = r"(subnet.*)" + subnets = re.findall(regex, conf, re.MULTILINE | re.DOTALL) + for subnet in subnets: + dhcp_subnet = DHCPSubnet(subnet) + self._subnets.append(dhcp_subnet) + + def set_range(self, start, end, subnet=0, pool=0): + print("Setting Range for pool ") + print(self._subnets[subnet]._pools[pool]) + self._subnets[subnet]._pools[pool]._range_start = start + self._subnets[subnet]._pools[pool]._range_end = end + + def resolve_settings(self, conf): + lines = conf.split("\n") + for line in lines: + if DEFAULT_LEASE_TIME_KEY in line: + self._default_lease_time = line.strip().split(DEFAULT_LEASE_TIME_KEY)[ + 1].strip().split(";")[0] + + self.peer = peer + + def __str__(self): + + config = """\r{DEFAULT_LEASE_TIME_KEY} {DEFAULT_LEASE_TIME};""" + + config = config.format(length='multi-line', + DEFAULT_LEASE_TIME_KEY=DEFAULT_LEASE_TIME_KEY, DEFAULT_LEASE_TIME=self._default_lease_time + ) + + config += "\n\n"+str(self.peer) + for subnet in self._subnets: + config += "\n\n"+str(subnet) + return str(config) + + +FAILOVER_PEER_KEY = "failover peer" +PRIMARY_KEY = "primary" +ADDRESS_KEY = "address" +PORT_KEY = "port" +PEER_ADDRESS_KEY = "peer address" +PEER_PORT_KEY = "peer port" +MAX_RESPONSE_DELAY_KEY = "max-response-delay" +MAX_UNACKED_UPDATES_KEY = "max-unacked-updates" +MCLT_KEY = "mclt" +SPLIT_KEY = "split" +LOAD_BALANCE_MAX_SECONDS_KEY = "load balance max seconds" + + +class DHCPFailoverPeer: + def __init__(self, config): + self.name = None + self.primary = False + self.address = None + self.port = None + self.peer_address = None + self.peer_port = None + self.max_response_delay = None + self.max_unacked_updates = None + self.mclt = None + self.split = None + self.load_balance_max_seconds = None + self.peer = None + + self.resolve_peer(config) + + def __str__(self): + config = "{FAILOVER_PEER_KEY} \"{FAILOVER_PEER}\" {{\n" + config += "\tprimary;" if self.primary else "secondary;" + config += """\n\t{ADDRESS_KEY} {ADDRESS}; + {PORT_KEY} {PORT}; + {PEER_ADDRESS_KEY} {PEER_ADDRESS}; + {PEER_PORT_KEY} {PEER_PORT}; + {MAX_RESPONSE_DELAY_KEY} {MAX_RESPONSE_DELAY}; + {MAX_UNACKED_UPDATES_KEY} {MAX_UNACKED_UPDATES}; + {MCLT_KEY} {MCLT}; + {SPLIT_KEY} {SPLIT}; + {LOAD_BALANCE_MAX_SECONDS_KEY} {LOAD_BALANCE_MAX_SECONDS}; + \r}}""" + + return config.format(length='multi-line', + FAILOVER_PEER_KEY=FAILOVER_PEER_KEY, FAILOVER_PEER=self.name, + ADDRESS_KEY=ADDRESS_KEY, ADDRESS=self.address, + PORT_KEY=PORT_KEY, PORT=self.port, + PEER_ADDRESS_KEY=PEER_ADDRESS_KEY, PEER_ADDRESS=self.peer_address, + PEER_PORT_KEY=PEER_PORT_KEY, PEER_PORT=self.peer_port, + MAX_RESPONSE_DELAY_KEY=MAX_RESPONSE_DELAY_KEY, MAX_RESPONSE_DELAY=self.max_response_delay, + MAX_UNACKED_UPDATES_KEY=MAX_UNACKED_UPDATES_KEY, MAX_UNACKED_UPDATES=self.max_unacked_updates, + MCLT_KEY=MCLT_KEY, MCLT=self.mclt, + SPLIT_KEY=SPLIT_KEY, SPLIT=self.split, + LOAD_BALANCE_MAX_SECONDS_KEY=LOAD_BALANCE_MAX_SECONDS_KEY, LOAD_BALANCE_MAX_SECONDS=self.load_balance_max_seconds + ) + + def resolve_peer(self, conf): + peer = "" + lines = conf.split("\n") + for line in lines: + if line.startswith(FAILOVER_PEER_KEY) or len(peer) > 0: + if(len(peer) <= 0): + self.name = line.strip().split(FAILOVER_PEER_KEY)[ + 1].strip().split("{")[0].split("\"")[1] + peer += line+"\n" + if PRIMARY_KEY in line: + self.primary = True + elif ADDRESS_KEY in line and PEER_ADDRESS_KEY not in line: + self.address = line.strip().split(ADDRESS_KEY)[ + 1].strip().split(";")[0] + elif PORT_KEY in line and PEER_PORT_KEY not in line: + self.port = line.strip().split(PORT_KEY)[ + 1].strip().split(";")[0] + elif PEER_ADDRESS_KEY in line: + self.peer_address = line.strip().split(PEER_ADDRESS_KEY)[ + 1].strip().split(";")[0] + elif PEER_PORT_KEY in line: + self.peer_port = line.strip().split(PEER_PORT_KEY)[ + 1].strip().split(";")[0] + elif MAX_RESPONSE_DELAY_KEY in line: + self.max_response_delay = line.strip().split(MAX_RESPONSE_DELAY_KEY)[ + 1].strip().split(";")[0] + elif MAX_UNACKED_UPDATES_KEY in line: + self.max_unacked_updates = line.strip().split(MAX_UNACKED_UPDATES_KEY)[ + 1].strip().split(";")[0] + elif MCLT_KEY in line: + self.mclt = line.strip().split(MCLT_KEY)[ + 1].strip().split(";")[0] + elif SPLIT_KEY in line: + self.split = line.strip().split(SPLIT_KEY)[ + 1].strip().split(";")[0] + elif LOAD_BALANCE_MAX_SECONDS_KEY in line: + self.load_balance_max_seconds = line.strip().split(LOAD_BALANCE_MAX_SECONDS_KEY)[ + 1].strip().split(";")[0] + if line.endswith("}") and len(peer) > 0: + break + self.peer = peer + + +NTP_OPTION_KEY = "option ntp-servers" +SUBNET_MASK_OPTION_KEY = "option subnet-mask" +BROADCAST_OPTION_KEY = "option broadcast-address" +ROUTER_OPTION_KEY = "option routers" +DNS_OPTION_KEY = "option domain-name-servers" + + +class DHCPSubnet: + def __init__(self, subnet): + self._ntp_servers = None + self._subnet_mask = None + self._broadcast = None + self._routers = None + self._dns_servers = None + self._pools = [] + + self.resolve_subnet(subnet) + self.resolve_pools(subnet) + + def __str__(self): + config = """subnet 10.10.10.0 netmask {SUBNET_MASK_OPTION} {{ + \r\t{NTP_OPTION_KEY} {NTP_OPTION}; + \r\t{SUBNET_MASK_OPTION_KEY} {SUBNET_MASK_OPTION}; + \r\t{BROADCAST_OPTION_KEY} {BROADCAST_OPTION}; + \r\t{ROUTER_OPTION_KEY} {ROUTER_OPTION}; + \r\t{DNS_OPTION_KEY} {DNS_OPTION};""" + + config = config.format(length='multi-line', + NTP_OPTION_KEY=NTP_OPTION_KEY, NTP_OPTION=self._ntp_servers, + SUBNET_MASK_OPTION_KEY=SUBNET_MASK_OPTION_KEY, SUBNET_MASK_OPTION=self._subnet_mask, + BROADCAST_OPTION_KEY=BROADCAST_OPTION_KEY, BROADCAST_OPTION=self._broadcast, + ROUTER_OPTION_KEY=ROUTER_OPTION_KEY, ROUTER_OPTION=self._routers, + DNS_OPTION_KEY=DNS_OPTION_KEY, DNS_OPTION=self._dns_servers + ) + for pool in self._pools: + config += "\n\t"+str(pool) + + config += "\n\r}" + return config + + def resolve_subnet(self, subnet): + subnet_parts = subnet.split("\n") + for part in subnet_parts: + if NTP_OPTION_KEY in part: + self._ntp_servers = part.strip().split(NTP_OPTION_KEY)[ + 1].strip().split(";")[0] + elif SUBNET_MASK_OPTION_KEY in part: + self._subnet_mask = part.strip().split(SUBNET_MASK_OPTION_KEY)[ + 1].strip().split(";")[0] + elif BROADCAST_OPTION_KEY in part: + self._broadcast = part.strip().split(BROADCAST_OPTION_KEY)[ + 1].strip().split(";")[0] + elif ROUTER_OPTION_KEY in part: + self._routers = part.strip().split(ROUTER_OPTION_KEY)[ + 1].strip().split(";")[0] + elif DNS_OPTION_KEY in part: + self._dns_servers = part.strip().split(DNS_OPTION_KEY)[ + 1].strip().split(";")[0] + + def resolve_pools(self, subnet): + regex = r"(pool.*)\}" + pools = re.findall(regex, subnet, re.MULTILINE | re.DOTALL) + for pool in pools: + dhcp_pool = DHCPPool(pool) + self._pools.append(dhcp_pool) + + +FAILOVER_KEY = "failover peer" +RANGE_KEY = "range" + + +class DHCPPool: + + def __init__(self, pool): + self._failover_peer = None + self._range_start = None + self._range_end = None + self.resolve_pool(pool) + + def __str__(self): + + config = """pool {{ + \r\t\t{FAILOVER_KEY} "{FAILOVER}"; + \r\t\t{RANGE_KEY} {RANGE_START} {RANGE_END}; + \r\t}}""" + + return config.format(length='multi-line', + FAILOVER_KEY=FAILOVER_KEY, FAILOVER=self._failover_peer, + RANGE_KEY=RANGE_KEY, RANGE_START=self._range_start, RANGE_END=self._range_end, + ) + + def resolve_pool(self, pool): + pool_parts = pool.split("\n") + # pool_parts = pool.split("\n") + for part in pool_parts: + if FAILOVER_KEY in part: + self._failover_peer = part.strip().split( + FAILOVER_KEY)[1].strip().split(";")[0].replace("\"", "") + if RANGE_KEY in part: + range = part.strip().split(RANGE_KEY)[ + 1].strip().split(";")[0] + self._range_start = range.split(" ")[0].strip() + self._range_end = range.split(" ")[1].strip() diff --git a/net_orc/network/modules/dhcp-1/python/src/grpc/network_service.py b/net_orc/network/modules/dhcp-1/python/src/grpc/network_service.py new file mode 100644 index 000000000..f90cb6b51 --- /dev/null +++ b/net_orc/network/modules/dhcp-1/python/src/grpc/network_service.py @@ -0,0 +1,44 @@ +import proto.grpc_pb2_grpc as pb2_grpc +import proto.grpc_pb2 as pb2 + +from dhcp_config import DHCPConfig + + +class NetworkService(pb2_grpc.NetworkModule): + + def __init__(self): + self._dhcp_config = DHCPConfig() + + """ + Resolve the current DHCP configuration and return + the first range from the first subnet in the file + """ + + def GetDHCPRange(self, request, context): + self._dhcp_config.resolve_config() + pool = self._dhcp_config._subnets[0]._pools[0] + return pb2.DHCPRange(code=200, start=pool._range_start, end=pool._range_end) + + """ + Change DHCP configuration and set the + the first range from the first subnet in the configuration + """ + + def SetDHCPRange(self, request, context): + print("Setting DHCPRange") + print("Start: " + request.start) + print("End: " + request.end) + self._dhcp_config.resolve_config() + self._dhcp_config.set_range(request.start, request.end, 0, 0) + self._dhcp_config.write_config() + return pb2.Response(code=200, message="DHCP Range Set") + + """ + Return the current status of the network module + """ + + def GetStatus(self, request, context): + # ToDo: Figure out how to resolve the current DHCP status + dhcpStatus = True + message = str({"dhcpStatus":dhcpStatus}) + return pb2.Response(code=200, message=message) diff --git a/net_orc/network/modules/dhcp-1/python/src/grpc/proto/grpc.proto b/net_orc/network/modules/dhcp-1/python/src/grpc/proto/grpc.proto new file mode 100644 index 000000000..8e2732620 --- /dev/null +++ b/net_orc/network/modules/dhcp-1/python/src/grpc/proto/grpc.proto @@ -0,0 +1,36 @@ +syntax = "proto3"; + +service NetworkModule { + + rpc GetDHCPRange(GetDHCPRangeRequest) returns (DHCPRange) {}; + + rpc SetDHCPRange(DHCPRange) returns (Response) {}; + + rpc GetStatus(GetStatusRequest) returns (Response) {}; + + rpc GetIPAddress(GetIPAddressRequest) returns (Response) {}; + + rpc SetLeaseAddress(SetLeaseAddressRequest) returns (Response) {}; + +} + +message Response { + int32 code = 1; + string message = 2; +} + +message DHCPRange { + int32 code = 1; + string start = 2; + string end = 3; +} + +message GetDHCPRangeRequest {} + +message GetIPAddressRequest {} + +message GetStatusRequest {} + +message SetLeaseAddressRequest { + string ipAddress = 1; +} \ No newline at end of file diff --git a/net_orc/network/modules/dhcp-1/python/src/run.py b/net_orc/network/modules/dhcp-1/python/src/run.py new file mode 100644 index 000000000..830f048cf --- /dev/null +++ b/net_orc/network/modules/dhcp-1/python/src/run.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 + +import signal +import sys +import argparse + +from grpc.dhcp_config import DHCPConfig + + +class DHCPServer: + + def __init__(self, module): + + signal.signal(signal.SIGINT, self.handler) + signal.signal(signal.SIGTERM, self.handler) + signal.signal(signal.SIGABRT, self.handler) + signal.signal(signal.SIGQUIT, self.handler) + + config = DHCPConfig() + config.resolve_config() + config.write_config() + + def handler(self, signum, frame): + if (signum == 2 or signal == signal.SIGTERM): + exit(1) + + +def run(argv): + parser = argparse.ArgumentParser(description="Faux Device Validator", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument( + "-m", "--module", help="Define the module name to be used to create the log file") + + args = parser.parse_args() + + server = DHCPServer(args.module) + + +if __name__ == "__main__": + run(sys.argv) diff --git a/net_orc/network/modules/dhcp-2/bin/start_network_service b/net_orc/network/modules/dhcp-2/bin/start_network_service new file mode 100644 index 000000000..d58174695 --- /dev/null +++ b/net_orc/network/modules/dhcp-2/bin/start_network_service @@ -0,0 +1,77 @@ +#!/bin/bash + +CONFIG_FILE=/etc/dhcp/dhcpd.conf +DHCP_PID_FILE=/var/run/dhcpd.pid +DHCP_LOG_FILE=/runtime/network/dhcp2-dhcpd.log +RA_PID_FILE=/var/run/radvd/radvd.pid +RA_LOG_FILE=/runtime/network/dhcp2-radvd.log + +echo "Starrting Network Service..." + +#Enable IPv6 Forwarding +sysctl net.ipv6.conf.all.forwarding=1 +sysctl -p + +# Create leases file if needed +touch /var/lib/dhcp/dhcpd.leases + +#Create directory for radvd +mkdir /var/run/radvd + +#Create and set permissions on the log files +touch $DHCP_LOG_FILE +touch $RA_LOG_FILE +chown $HOST_USER:$HOST_USER $DHCP_LOG_FILE +chown $HOST_USER:$HOST_USER $RA_LOG_FILE + + +#Move the config files to the correct location +cp /testrun/conf/dhcpd.conf /etc/dhcp/dhcpd.conf +cp /testrun/conf/radvd.conf /etc/radvd.conf + +# Restart dhcp server when config changes +while true; do + + new_checksum=$(md5sum $CONFIG_FILE) + + if [ "$checksum" == "$new_checksum" ]; then + sleep 2 + continue + fi + + echo Config changed. Restarting dhcp server at $(date).. + + if [ -f $DHCP_PID_FILE ]; then + kill -9 $(cat $DHCP_PID_FILE) || true + rm -f $DHCP_PID_FILE + fi + + if [ -f $RA_PID_FILE ]; then + kill -9 $(cat $RA_PID_FILE) || true + rm -f $RA_PID_FILE + fi + + checksum=$new_checksum + + echo Starting isc-dhcp-server at $(date) + + radvd -m logfile -l $RA_LOG_FILE -p $RA_PID_FILE + dhcpd -d &> $DHCP_LOG_FILE & + + while [ ! -f $DHCP_PID_FILE ]; do + echo Waiting for $DHCP_PID_FILE... + sleep 2 + done + + echo $DHCP_PID_FILE now available + + while [ ! -f $RA_PID_FILE ]; do + echo Waiting for $RA_PID_FILE... + sleep 2 + done + + echo $RA_PID_FILE now available + + echo Server now stable + +done \ No newline at end of file diff --git a/net_orc/network/modules/dhcp-2/conf/dhcpd.conf b/net_orc/network/modules/dhcp-2/conf/dhcpd.conf new file mode 100644 index 000000000..e73a81441 --- /dev/null +++ b/net_orc/network/modules/dhcp-2/conf/dhcpd.conf @@ -0,0 +1,24 @@ +default-lease-time 300; + +failover peer "failover-peer" { + secondary; + address 10.10.10.3; + port 647; + peer address 10.10.10.2; + peer port 847; + max-response-delay 60; + max-unacked-updates 10; + load balance max seconds 3; +} + +subnet 10.10.10.0 netmask 255.255.255.0 { + option ntp-servers 10.10.10.5; + option subnet-mask 255.255.255.0; + option broadcast-address 10.10.10.255; + option routers 10.10.10.1; + option domain-name-servers 10.10.10.4; + pool { + failover peer "failover-peer"; + range 10.10.10.10 10.10.10.20; + } +} diff --git a/net_orc/network/modules/dhcp-2/conf/module_config.json b/net_orc/network/modules/dhcp-2/conf/module_config.json new file mode 100644 index 000000000..2a978ca8c --- /dev/null +++ b/net_orc/network/modules/dhcp-2/conf/module_config.json @@ -0,0 +1,25 @@ +{ + "config": { + "meta": { + "name": "dhcp-2", + "display_name": "DHCP Secondary", + "description": "Secondary DHCP server with IPv6 SLAAC" + }, + "network": { + "interface": "veth0", + "enable_wan": false, + "ip_index": 3 + }, + "grpc":{ + "port": 5001 + }, + "docker": { + "mounts": [ + { + "source": "runtime/network", + "target": "/runtime/network" + } + ] + } + } +} \ No newline at end of file diff --git a/net_orc/network/modules/dhcp-2/conf/radvd.conf b/net_orc/network/modules/dhcp-2/conf/radvd.conf new file mode 100644 index 000000000..f6d6f30d9 --- /dev/null +++ b/net_orc/network/modules/dhcp-2/conf/radvd.conf @@ -0,0 +1,12 @@ +interface veth0 +{ + AdvSendAdvert on; + AdvManagedFlag off; + MinRtrAdvInterval 30; + MaxRtrAdvInterval 60; + prefix fd10:77be:4186::/64 { + AdvOnLink on; + AdvAutonomous on; + AdvRouterAddr on; + }; +}; \ No newline at end of file diff --git a/net_orc/network/modules/dhcp-2/dhcp-2.Dockerfile b/net_orc/network/modules/dhcp-2/dhcp-2.Dockerfile new file mode 100644 index 000000000..989992570 --- /dev/null +++ b/net_orc/network/modules/dhcp-2/dhcp-2.Dockerfile @@ -0,0 +1,14 @@ +# Image name: test-run/dhcp-primary +FROM test-run/base:latest + +# Install dhcp server +RUN apt-get install -y isc-dhcp-server radvd + +# Copy over all configuration files +COPY network/modules/dhcp-2/conf /testrun/conf + +# Copy over all binary files +COPY network/modules/dhcp-2/bin /testrun/bin + +# Copy over all python files +COPY network/modules/dhcp-2/python /testrun/python diff --git a/net_orc/network/modules/dhcp-2/python/src/grpc/__init__.py b/net_orc/network/modules/dhcp-2/python/src/grpc/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/net_orc/network/modules/dhcp-2/python/src/grpc/dhcp_config.py b/net_orc/network/modules/dhcp-2/python/src/grpc/dhcp_config.py new file mode 100644 index 000000000..f5445ca44 --- /dev/null +++ b/net_orc/network/modules/dhcp-2/python/src/grpc/dhcp_config.py @@ -0,0 +1,267 @@ +import re + +CONFIG_FILE = "/etc/dhcp/dhcpd.conf" +CONFIG_FILE_TEST = "network/modules/dhcp-1/conf/dhcpd.conf" + +DEFAULT_LEASE_TIME_KEY = "default-lease-time" + + +class DHCPConfig: + + def __init__(self): + self._default_lease_time = 300 + self._subnets = [] + self._peer = None + + def write_config(self): + conf = str(self) + print("Writing config: \n" + conf) + f = open(CONFIG_FILE, "w") + f.write(conf) + + def resolve_config(self): + with open(CONFIG_FILE) as f: + conf = f.read() + self.resolve_subnets(conf) + self.peer = DHCPFailoverPeer(conf) + + def resolve_subnets(self, conf): + self._subnets = [] + regex = r"(subnet.*)" + subnets = re.findall(regex, conf, re.MULTILINE | re.DOTALL) + for subnet in subnets: + dhcp_subnet = DHCPSubnet(subnet) + self._subnets.append(dhcp_subnet) + + def set_range(self, start, end, subnet=0, pool=0): + print("Setting Range for pool ") + print(self._subnets[subnet]._pools[pool]) + self._subnets[subnet]._pools[pool]._range_start = start + self._subnets[subnet]._pools[pool]._range_end = end + + def resolve_settings(self, conf): + lines = conf.split("\n") + for line in lines: + if DEFAULT_LEASE_TIME_KEY in line: + self._default_lease_time = line.strip().split(DEFAULT_LEASE_TIME_KEY)[ + 1].strip().split(";")[0] + + self.peer = peer + + def __str__(self): + + config = """\r{DEFAULT_LEASE_TIME_KEY} {DEFAULT_LEASE_TIME};""" + + config = config.format(length='multi-line', + DEFAULT_LEASE_TIME_KEY=DEFAULT_LEASE_TIME_KEY, DEFAULT_LEASE_TIME=self._default_lease_time + ) + + config += "\n\n"+str(self.peer) + for subnet in self._subnets: + config += "\n\n"+str(subnet) + return str(config) + + +FAILOVER_PEER_KEY = "failover peer" +PRIMARY_KEY = "primary" +ADDRESS_KEY = "address" +PORT_KEY = "port" +PEER_ADDRESS_KEY = "peer address" +PEER_PORT_KEY = "peer port" +MAX_RESPONSE_DELAY_KEY = "max-response-delay" +MAX_UNACKED_UPDATES_KEY = "max-unacked-updates" +MCLT_KEY = "mclt" +SPLIT_KEY = "split" +LOAD_BALANCE_MAX_SECONDS_KEY = "load balance max seconds" + + +class DHCPFailoverPeer: + def __init__(self, config): + self.name = None + self.primary = False + self.address = None + self.port = None + self.peer_address = None + self.peer_port = None + self.max_response_delay = None + self.max_unacked_updates = None + self.mclt = None + self.split = None + self.load_balance_max_seconds = None + self.peer = None + + self.resolve_peer(config) + + def __str__(self): + config = "{FAILOVER_PEER_KEY} \"{FAILOVER_PEER}\" {{\n" + config += "\tprimary;" if self.primary else "secondary;" + config += """\n\t{ADDRESS_KEY} {ADDRESS}; + {PORT_KEY} {PORT}; + {PEER_ADDRESS_KEY} {PEER_ADDRESS}; + {PEER_PORT_KEY} {PEER_PORT}; + {MAX_RESPONSE_DELAY_KEY} {MAX_RESPONSE_DELAY}; + {MAX_UNACKED_UPDATES_KEY} {MAX_UNACKED_UPDATES}; + {MCLT_KEY} {MCLT}; + {SPLIT_KEY} {SPLIT}; + {LOAD_BALANCE_MAX_SECONDS_KEY} {LOAD_BALANCE_MAX_SECONDS}; + \r}}""" + + return config.format(length='multi-line', + FAILOVER_PEER_KEY=FAILOVER_PEER_KEY, FAILOVER_PEER=self.name, + ADDRESS_KEY=ADDRESS_KEY, ADDRESS=self.address, + PORT_KEY=PORT_KEY, PORT=self.port, + PEER_ADDRESS_KEY=PEER_ADDRESS_KEY, PEER_ADDRESS=self.peer_address, + PEER_PORT_KEY=PEER_PORT_KEY, PEER_PORT=self.peer_port, + MAX_RESPONSE_DELAY_KEY=MAX_RESPONSE_DELAY_KEY, MAX_RESPONSE_DELAY=self.max_response_delay, + MAX_UNACKED_UPDATES_KEY=MAX_UNACKED_UPDATES_KEY, MAX_UNACKED_UPDATES=self.max_unacked_updates, + MCLT_KEY=MCLT_KEY, MCLT=self.mclt, + SPLIT_KEY=SPLIT_KEY, SPLIT=self.split, + LOAD_BALANCE_MAX_SECONDS_KEY=LOAD_BALANCE_MAX_SECONDS_KEY, LOAD_BALANCE_MAX_SECONDS=self.load_balance_max_seconds + ) + + def resolve_peer(self, conf): + peer = "" + lines = conf.split("\n") + for line in lines: + if line.startswith(FAILOVER_PEER_KEY) or len(peer) > 0: + if(len(peer) <= 0): + self.name = line.strip().split(FAILOVER_PEER_KEY)[ + 1].strip().split("{")[0].split("\"")[1] + peer += line+"\n" + if PRIMARY_KEY in line: + self.primary = True + elif ADDRESS_KEY in line and PEER_ADDRESS_KEY not in line: + self.address = line.strip().split(ADDRESS_KEY)[ + 1].strip().split(";")[0] + elif PORT_KEY in line and PEER_PORT_KEY not in line: + self.port = line.strip().split(PORT_KEY)[ + 1].strip().split(";")[0] + elif PEER_ADDRESS_KEY in line: + self.peer_address = line.strip().split(PEER_ADDRESS_KEY)[ + 1].strip().split(";")[0] + elif PEER_PORT_KEY in line: + self.peer_port = line.strip().split(PEER_PORT_KEY)[ + 1].strip().split(";")[0] + elif MAX_RESPONSE_DELAY_KEY in line: + self.max_response_delay = line.strip().split(MAX_RESPONSE_DELAY_KEY)[ + 1].strip().split(";")[0] + elif MAX_UNACKED_UPDATES_KEY in line: + self.max_unacked_updates = line.strip().split(MAX_UNACKED_UPDATES_KEY)[ + 1].strip().split(";")[0] + elif MCLT_KEY in line: + self.mclt = line.strip().split(MCLT_KEY)[ + 1].strip().split(";")[0] + elif SPLIT_KEY in line: + self.split = line.strip().split(SPLIT_KEY)[ + 1].strip().split(";")[0] + elif LOAD_BALANCE_MAX_SECONDS_KEY in line: + self.load_balance_max_seconds = line.strip().split(LOAD_BALANCE_MAX_SECONDS_KEY)[ + 1].strip().split(";")[0] + if line.endswith("}") and len(peer) > 0: + break + self.peer = peer + + +NTP_OPTION_KEY = "option ntp-servers" +SUBNET_MASK_OPTION_KEY = "option subnet-mask" +BROADCAST_OPTION_KEY = "option broadcast-address" +ROUTER_OPTION_KEY = "option routers" +DNS_OPTION_KEY = "option domain-name-servers" + + +class DHCPSubnet: + def __init__(self, subnet): + self._ntp_servers = None + self._subnet_mask = None + self._broadcast = None + self._routers = None + self._dns_servers = None + self._pools = [] + + self.resolve_subnet(subnet) + self.resolve_pools(subnet) + + def __str__(self): + config = """subnet 10.10.10.0 netmask {SUBNET_MASK_OPTION} {{ + \r\t{NTP_OPTION_KEY} {NTP_OPTION}; + \r\t{SUBNET_MASK_OPTION_KEY} {SUBNET_MASK_OPTION}; + \r\t{BROADCAST_OPTION_KEY} {BROADCAST_OPTION}; + \r\t{ROUTER_OPTION_KEY} {ROUTER_OPTION}; + \r\t{DNS_OPTION_KEY} {DNS_OPTION};""" + + config = config.format(length='multi-line', + NTP_OPTION_KEY=NTP_OPTION_KEY, NTP_OPTION=self._ntp_servers, + SUBNET_MASK_OPTION_KEY=SUBNET_MASK_OPTION_KEY, SUBNET_MASK_OPTION=self._subnet_mask, + BROADCAST_OPTION_KEY=BROADCAST_OPTION_KEY, BROADCAST_OPTION=self._broadcast, + ROUTER_OPTION_KEY=ROUTER_OPTION_KEY, ROUTER_OPTION=self._routers, + DNS_OPTION_KEY=DNS_OPTION_KEY, DNS_OPTION=self._dns_servers + ) + for pool in self._pools: + config += "\n\t"+str(pool) + + config += "\n\r}" + return config + + def resolve_subnet(self, subnet): + subnet_parts = subnet.split("\n") + for part in subnet_parts: + if NTP_OPTION_KEY in part: + self._ntp_servers = part.strip().split(NTP_OPTION_KEY)[ + 1].strip().split(";")[0] + elif SUBNET_MASK_OPTION_KEY in part: + self._subnet_mask = part.strip().split(SUBNET_MASK_OPTION_KEY)[ + 1].strip().split(";")[0] + elif BROADCAST_OPTION_KEY in part: + self._broadcast = part.strip().split(BROADCAST_OPTION_KEY)[ + 1].strip().split(";")[0] + elif ROUTER_OPTION_KEY in part: + self._routers = part.strip().split(ROUTER_OPTION_KEY)[ + 1].strip().split(";")[0] + elif DNS_OPTION_KEY in part: + self._dns_servers = part.strip().split(DNS_OPTION_KEY)[ + 1].strip().split(";")[0] + + def resolve_pools(self, subnet): + regex = r"(pool.*)\}" + pools = re.findall(regex, subnet, re.MULTILINE | re.DOTALL) + for pool in pools: + dhcp_pool = DHCPPool(pool) + self._pools.append(dhcp_pool) + + +FAILOVER_KEY = "failover peer" +RANGE_KEY = "range" + + +class DHCPPool: + + def __init__(self, pool): + self._failover_peer = None + self._range_start = None + self._range_end = None + self.resolve_pool(pool) + + def __str__(self): + + config = """pool {{ + \r\t\t{FAILOVER_KEY} "{FAILOVER}"; + \r\t\t{RANGE_KEY} {RANGE_START} {RANGE_END}; + \r\t}}""" + + return config.format(length='multi-line', + FAILOVER_KEY=FAILOVER_KEY, FAILOVER=self._failover_peer, + RANGE_KEY=RANGE_KEY, RANGE_START=self._range_start, RANGE_END=self._range_end, + ) + + def resolve_pool(self, pool): + pool_parts = pool.split("\n") + # pool_parts = pool.split("\n") + for part in pool_parts: + if FAILOVER_KEY in part: + self._failover_peer = part.strip().split( + FAILOVER_KEY)[1].strip().split(";")[0].replace("\"", "") + if RANGE_KEY in part: + range = part.strip().split(RANGE_KEY)[ + 1].strip().split(";")[0] + self._range_start = range.split(" ")[0].strip() + self._range_end = range.split(" ")[1].strip() diff --git a/net_orc/network/modules/dhcp-2/python/src/grpc/network_service.py b/net_orc/network/modules/dhcp-2/python/src/grpc/network_service.py new file mode 100644 index 000000000..f90cb6b51 --- /dev/null +++ b/net_orc/network/modules/dhcp-2/python/src/grpc/network_service.py @@ -0,0 +1,44 @@ +import proto.grpc_pb2_grpc as pb2_grpc +import proto.grpc_pb2 as pb2 + +from dhcp_config import DHCPConfig + + +class NetworkService(pb2_grpc.NetworkModule): + + def __init__(self): + self._dhcp_config = DHCPConfig() + + """ + Resolve the current DHCP configuration and return + the first range from the first subnet in the file + """ + + def GetDHCPRange(self, request, context): + self._dhcp_config.resolve_config() + pool = self._dhcp_config._subnets[0]._pools[0] + return pb2.DHCPRange(code=200, start=pool._range_start, end=pool._range_end) + + """ + Change DHCP configuration and set the + the first range from the first subnet in the configuration + """ + + def SetDHCPRange(self, request, context): + print("Setting DHCPRange") + print("Start: " + request.start) + print("End: " + request.end) + self._dhcp_config.resolve_config() + self._dhcp_config.set_range(request.start, request.end, 0, 0) + self._dhcp_config.write_config() + return pb2.Response(code=200, message="DHCP Range Set") + + """ + Return the current status of the network module + """ + + def GetStatus(self, request, context): + # ToDo: Figure out how to resolve the current DHCP status + dhcpStatus = True + message = str({"dhcpStatus":dhcpStatus}) + return pb2.Response(code=200, message=message) diff --git a/net_orc/network/modules/dhcp-2/python/src/grpc/proto/grpc.proto b/net_orc/network/modules/dhcp-2/python/src/grpc/proto/grpc.proto new file mode 100644 index 000000000..8e2732620 --- /dev/null +++ b/net_orc/network/modules/dhcp-2/python/src/grpc/proto/grpc.proto @@ -0,0 +1,36 @@ +syntax = "proto3"; + +service NetworkModule { + + rpc GetDHCPRange(GetDHCPRangeRequest) returns (DHCPRange) {}; + + rpc SetDHCPRange(DHCPRange) returns (Response) {}; + + rpc GetStatus(GetStatusRequest) returns (Response) {}; + + rpc GetIPAddress(GetIPAddressRequest) returns (Response) {}; + + rpc SetLeaseAddress(SetLeaseAddressRequest) returns (Response) {}; + +} + +message Response { + int32 code = 1; + string message = 2; +} + +message DHCPRange { + int32 code = 1; + string start = 2; + string end = 3; +} + +message GetDHCPRangeRequest {} + +message GetIPAddressRequest {} + +message GetStatusRequest {} + +message SetLeaseAddressRequest { + string ipAddress = 1; +} \ No newline at end of file diff --git a/net_orc/network/modules/dhcp-2/python/src/run.py b/net_orc/network/modules/dhcp-2/python/src/run.py new file mode 100644 index 000000000..830f048cf --- /dev/null +++ b/net_orc/network/modules/dhcp-2/python/src/run.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 + +import signal +import sys +import argparse + +from grpc.dhcp_config import DHCPConfig + + +class DHCPServer: + + def __init__(self, module): + + signal.signal(signal.SIGINT, self.handler) + signal.signal(signal.SIGTERM, self.handler) + signal.signal(signal.SIGABRT, self.handler) + signal.signal(signal.SIGQUIT, self.handler) + + config = DHCPConfig() + config.resolve_config() + config.write_config() + + def handler(self, signum, frame): + if (signum == 2 or signal == signal.SIGTERM): + exit(1) + + +def run(argv): + parser = argparse.ArgumentParser(description="Faux Device Validator", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument( + "-m", "--module", help="Define the module name to be used to create the log file") + + args = parser.parse_args() + + server = DHCPServer(args.module) + + +if __name__ == "__main__": + run(sys.argv) diff --git a/net_orc/network/modules/dns/bin/start_network_service b/net_orc/network/modules/dns/bin/start_network_service new file mode 100644 index 000000000..4537033c0 --- /dev/null +++ b/net_orc/network/modules/dns/bin/start_network_service @@ -0,0 +1,48 @@ +#!/bin/bash + +CONFIG_FILE=/etc/dnsmasq.conf +PID_FILE=/var/run/dnsmasq.pid +LOG_FILE=/runtime/network/dns.log + +echo Starting dns + +cp /testrun/conf/dnsmasq.conf /etc/dnsmasq.conf + +# Route internet traffic through gateway +ip route add default via 10.10.10.1 dev veth0 + +# Restart dnsmasq when config changes +while true; do + + new_checksum=$(md5sum $CONFIG_FILE) + + if [ "$checksum" == "$new_checksum" ]; then + sleep 2 + continue + fi + + echo Config changed. Restarting dnsmasq at $(date).. + + if [ -f $PID_FILE ]; then + kill -9 $(cat $PID_FILE) || true + rm -f $PID_FILE + fi + + checksum=$new_checksum + + echo Starting dnsmasq at $(date) + + dnsmasq --log-facility=$LOG_FILE -u $HOST_USER & + + while [ ! -f $PID_FILE ]; do + echo Waiting for $PID_FILE... + sleep 2 + done + + # Group flag doesn't work for some reason on dnsmasq + # so we'll manually change the group to the log file + chgrp $HOST_USER $LOG_FILE + + echo $PID_FILE now available + +done \ No newline at end of file diff --git a/net_orc/network/modules/dns/conf/dnsmasq.conf b/net_orc/network/modules/dns/conf/dnsmasq.conf new file mode 100644 index 000000000..5513a9220 --- /dev/null +++ b/net_orc/network/modules/dns/conf/dnsmasq.conf @@ -0,0 +1,5 @@ +server=8.8.8.8 + +interface=veth0 + +log-queries \ No newline at end of file diff --git a/net_orc/network/modules/dns/conf/module_config.json b/net_orc/network/modules/dns/conf/module_config.json new file mode 100644 index 000000000..73f890d28 --- /dev/null +++ b/net_orc/network/modules/dns/conf/module_config.json @@ -0,0 +1,22 @@ +{ + "config": { + "meta": { + "name": "dns", + "display_name": "DNS", + "description": "A DNS server" + }, + "network": { + "interface": "veth0", + "enable_wan": false, + "ip_index": 4 + }, + "docker": { + "mounts": [ + { + "source": "runtime/network", + "target": "/runtime/network" + } + ] + } + } +} \ No newline at end of file diff --git a/net_orc/network/modules/dns/dns.Dockerfile b/net_orc/network/modules/dns/dns.Dockerfile new file mode 100644 index 000000000..84c1c7eb1 --- /dev/null +++ b/net_orc/network/modules/dns/dns.Dockerfile @@ -0,0 +1,14 @@ +# Image name: test-run/dns +FROM test-run/base:latest + +#Update and get all additional requirements not contained in the base image +RUN apt-get update --fix-missing + +#Install dnsmasq +RUN apt-get install -y dnsmasq + +# Copy over all configuration files +COPY network/modules/dns/conf /testrun/conf + +# Copy over all binary files +COPY network/modules/dns/bin /testrun/bin diff --git a/net_orc/network/modules/gateway/bin/start_network_service b/net_orc/network/modules/gateway/bin/start_network_service new file mode 100644 index 000000000..b1b31d335 --- /dev/null +++ b/net_orc/network/modules/gateway/bin/start_network_service @@ -0,0 +1,30 @@ +#!/bin/bash + +LOCAL_INTF=veth0 +EXT_INTF=eth1 + +echo Starting gateway + +/testrun/bin/wait_for_interface $EXT_INT + +# Enable IPv6 forwarding +sysctl net.ipv6.conf.eth1.accept_ra=1 +sysctl net.ipv6.conf.default.forwarding=1 +sysctl -p + +# Start dhclient if external interface does not have IP +if ! ip addr show $EXT_INTF | fgrep 'inet '; then + echo No inet address for $EXT_INTF, initiating dhcp client... + dhclient $EXT_INTF +fi + +# Enable NAT to the outside world +echo 1 > /proc/sys/net/ipv4/ip_forward +iptables -t nat -A POSTROUTING -o $EXT_INTF -j MASQUERADE +iptables -A FORWARD -i $EXT_INTF -o $LOCAL_INTF -m state --state RELATED,ESTABLISHED -j ACCEPT +iptables -A FORWARD -i $LOCAL_INTF -o $EXT_INTF -j ACCEPT + +# Keep gateway running until killed by framework +while true; do + sleep 10 +done diff --git a/net_orc/network/modules/gateway/conf/module_config.json b/net_orc/network/modules/gateway/conf/module_config.json new file mode 100644 index 000000000..35bd34392 --- /dev/null +++ b/net_orc/network/modules/gateway/conf/module_config.json @@ -0,0 +1,22 @@ +{ + "config": { + "meta": { + "name": "gateway", + "display_name": "Gateway", + "description": "Enable internet connectivity on device bridge" + }, + "network": { + "interface": "veth0", + "enable_wan": true, + "ip_index": 1 + }, + "docker": { + "mounts": [ + { + "source": "runtime/network", + "target": "/runtime/network" + } + ] + } + } +} \ No newline at end of file diff --git a/net_orc/network/modules/gateway/gateway.Dockerfile b/net_orc/network/modules/gateway/gateway.Dockerfile new file mode 100644 index 000000000..b7085ebac --- /dev/null +++ b/net_orc/network/modules/gateway/gateway.Dockerfile @@ -0,0 +1,11 @@ +# Image name: test-run/gateway +FROM test-run/base:latest + +# Install required packages +RUN apt-get install -y iptables isc-dhcp-client + +# Copy over all configuration files +COPY network/modules/gateway/conf /testrun/conf + +# Copy over all binary files +COPY network/modules/gateway/bin /testrun/bin diff --git a/net_orc/network/modules/ntp/bin/start_network_service b/net_orc/network/modules/ntp/bin/start_network_service new file mode 100644 index 000000000..4c0c5dc74 --- /dev/null +++ b/net_orc/network/modules/ntp/bin/start_network_service @@ -0,0 +1,13 @@ +#!/bin/bash + +PYTHON_SRC_DIR=/testrun/python/src +LOG_FILE="/runtime/network/ntp.log" + +echo Starting ntp + +#Create and set permissions on the log file +touch $LOG_FILE +chown $HOST_USER:$HOST_USER $LOG_FILE + +#Start the NTP server +python3 -u $PYTHON_SRC_DIR/ntp_server.py > $LOG_FILE diff --git a/net_orc/network/modules/ntp/conf/module_config.json b/net_orc/network/modules/ntp/conf/module_config.json new file mode 100644 index 000000000..781521263 --- /dev/null +++ b/net_orc/network/modules/ntp/conf/module_config.json @@ -0,0 +1,22 @@ +{ + "config": { + "meta": { + "name": "ntp", + "display_name": "NTP", + "description": "An NTP server" + }, + "network": { + "interface": "veth0", + "enable_wan": false, + "ip_index": 5 + }, + "docker": { + "mounts": [ + { + "source": "runtime/network", + "target": "/runtime/network" + } + ] + } + } +} \ No newline at end of file diff --git a/net_orc/network/modules/ntp/ntp-server.py b/net_orc/network/modules/ntp/ntp-server.py new file mode 100644 index 000000000..ace3099b0 --- /dev/null +++ b/net_orc/network/modules/ntp/ntp-server.py @@ -0,0 +1,315 @@ +import datetime +import socket +import struct +import time +import queue + +import threading +import select + +taskQueue = queue.Queue() +stopFlag = False + +def system_to_ntp_time(timestamp): + """Convert a system time to a NTP time. + + Parameters: + timestamp -- timestamp in system time + + Returns: + corresponding NTP time + """ + return timestamp + NTP.NTP_DELTA + +def _to_int(timestamp): + """Return the integral part of a timestamp. + + Parameters: + timestamp -- NTP timestamp + + Retuns: + integral part + """ + return int(timestamp) + +def _to_frac(timestamp, n=32): + """Return the fractional part of a timestamp. + + Parameters: + timestamp -- NTP timestamp + n -- number of bits of the fractional part + + Retuns: + fractional part + """ + return int(abs(timestamp - _to_int(timestamp)) * 2**n) + +def _to_time(integ, frac, n=32): + """Return a timestamp from an integral and fractional part. + + Parameters: + integ -- integral part + frac -- fractional part + n -- number of bits of the fractional part + + Retuns: + timestamp + """ + return integ + float(frac)/2**n + + + +class NTPException(Exception): + """Exception raised by this module.""" + pass + + +class NTP: + """Helper class defining constants.""" + + _SYSTEM_EPOCH = datetime.date(*time.gmtime(0)[0:3]) + """system epoch""" + _NTP_EPOCH = datetime.date(1900, 1, 1) + """NTP epoch""" + NTP_DELTA = (_SYSTEM_EPOCH - _NTP_EPOCH).days * 24 * 3600 + """delta between system and NTP time""" + + REF_ID_TABLE = { + 'DNC': "DNC routing protocol", + 'NIST': "NIST public modem", + 'TSP': "TSP time protocol", + 'DTS': "Digital Time Service", + 'ATOM': "Atomic clock (calibrated)", + 'VLF': "VLF radio (OMEGA, etc)", + 'callsign': "Generic radio", + 'LORC': "LORAN-C radionavidation", + 'GOES': "GOES UHF environment satellite", + 'GPS': "GPS UHF satellite positioning", + } + """reference identifier table""" + + STRATUM_TABLE = { + 0: "unspecified", + 1: "primary reference", + } + """stratum table""" + + MODE_TABLE = { + 0: "unspecified", + 1: "symmetric active", + 2: "symmetric passive", + 3: "client", + 4: "server", + 5: "broadcast", + 6: "reserved for NTP control messages", + 7: "reserved for private use", + } + """mode table""" + + LEAP_TABLE = { + 0: "no warning", + 1: "last minute has 61 seconds", + 2: "last minute has 59 seconds", + 3: "alarm condition (clock not synchronized)", + } + """leap indicator table""" + +class NTPPacket: + """NTP packet class. + + This represents an NTP packet. + """ + + _PACKET_FORMAT = "!B B B b 11I" + """packet format to pack/unpack""" + + def __init__(self, version=4, mode=3, tx_timestamp=0): + """Constructor. + + Parameters: + version -- NTP version + mode -- packet mode (client, server) + tx_timestamp -- packet transmit timestamp + """ + self.leap = 0 + """leap second indicator""" + self.version = version + """version""" + self.mode = mode + """mode""" + self.stratum = 0 + """stratum""" + self.poll = 0 + """poll interval""" + self.precision = 0 + """precision""" + self.root_delay = 0 + """root delay""" + self.root_dispersion = 0 + """root dispersion""" + self.ref_id = 0 + """reference clock identifier""" + self.ref_timestamp = 0 + """reference timestamp""" + self.orig_timestamp = 0 + self.orig_timestamp_high = 0 + self.orig_timestamp_low = 0 + """originate timestamp""" + self.recv_timestamp = 0 + """receive timestamp""" + self.tx_timestamp = tx_timestamp + self.tx_timestamp_high = 0 + self.tx_timestamp_low = 0 + """tansmit timestamp""" + + def to_data(self): + """Convert this NTPPacket to a buffer that can be sent over a socket. + + Returns: + buffer representing this packet + + Raises: + NTPException -- in case of invalid field + """ + try: + packed = struct.pack(NTPPacket._PACKET_FORMAT, + (self.leap << 6 | self.version << 3 | self.mode), + self.stratum, + self.poll, + self.precision, + _to_int(self.root_delay) << 16 | _to_frac(self.root_delay, 16), + _to_int(self.root_dispersion) << 16 | + _to_frac(self.root_dispersion, 16), + self.ref_id, + _to_int(self.ref_timestamp), + _to_frac(self.ref_timestamp), + #Change by lichen, avoid loss of precision + self.orig_timestamp_high, + self.orig_timestamp_low, + _to_int(self.recv_timestamp), + _to_frac(self.recv_timestamp), + _to_int(self.tx_timestamp), + _to_frac(self.tx_timestamp)) + except struct.error: + raise NTPException("Invalid NTP packet fields.") + return packed + + def from_data(self, data): + """Populate this instance from a NTP packet payload received from + the network. + + Parameters: + data -- buffer payload + + Raises: + NTPException -- in case of invalid packet format + """ + try: + unpacked = struct.unpack(NTPPacket._PACKET_FORMAT, + data[0:struct.calcsize(NTPPacket._PACKET_FORMAT)]) + except struct.error: + raise NTPException("Invalid NTP packet.") + + self.leap = unpacked[0] >> 6 & 0x3 + self.version = unpacked[0] >> 3 & 0x7 + self.mode = unpacked[0] & 0x7 + self.stratum = unpacked[1] + self.poll = unpacked[2] + self.precision = unpacked[3] + self.root_delay = float(unpacked[4])/2**16 + self.root_dispersion = float(unpacked[5])/2**16 + self.ref_id = unpacked[6] + self.ref_timestamp = _to_time(unpacked[7], unpacked[8]) + self.orig_timestamp = _to_time(unpacked[9], unpacked[10]) + self.orig_timestamp_high = unpacked[9] + self.orig_timestamp_low = unpacked[10] + self.recv_timestamp = _to_time(unpacked[11], unpacked[12]) + self.tx_timestamp = _to_time(unpacked[13], unpacked[14]) + self.tx_timestamp_high = unpacked[13] + self.tx_timestamp_low = unpacked[14] + + def GetTxTimeStamp(self): + return (self.tx_timestamp_high,self.tx_timestamp_low) + + def SetOriginTimeStamp(self,high,low): + self.orig_timestamp_high = high + self.orig_timestamp_low = low + + +class RecvThread(threading.Thread): + def __init__(self,socket): + threading.Thread.__init__(self) + self.socket = socket + def run(self): + global t,stopFlag + while True: + if stopFlag == True: + print("RecvThread Ended") + break + rlist,wlist,elist = select.select([self.socket],[],[],1); + if len(rlist) != 0: + print("Received %d packets" % len(rlist)) + for tempSocket in rlist: + try: + data,addr = tempSocket.recvfrom(1024) + recvTimestamp = recvTimestamp = system_to_ntp_time(time.time()) + taskQueue.put((data,addr,recvTimestamp)) + except socket.error as msg: + print(msg) + +class WorkThread(threading.Thread): + def __init__(self,socket): + threading.Thread.__init__(self) + self.socket = socket + def run(self): + global taskQueue,stopFlag + while True: + if stopFlag == True: + print("WorkThread Ended") + break + try: + data,addr,recvTimestamp = taskQueue.get(timeout=1) + recvPacket = NTPPacket() + recvPacket.from_data(data) + timeStamp_high,timeStamp_low = recvPacket.GetTxTimeStamp() + sendPacket = NTPPacket(version=4,mode=4) + sendPacket.stratum = 2 + sendPacket.poll = 10 + ''' + sendPacket.precision = 0xfa + sendPacket.root_delay = 0x0bfa + sendPacket.root_dispersion = 0x0aa7 + sendPacket.ref_id = 0x808a8c2c + ''' + sendPacket.ref_timestamp = recvTimestamp-5 + sendPacket.SetOriginTimeStamp(timeStamp_high,timeStamp_low) + sendPacket.recv_timestamp = recvTimestamp + sendPacket.tx_timestamp = system_to_ntp_time(time.time()) + socket.sendto(sendPacket.to_data(),addr) + print("Sent to %s:%d" % (addr[0],addr[1])) + except queue.Empty: + continue + + +listenIp = "0.0.0.0" +listenPort = 123 +socket = socket.socket(socket.AF_INET,socket.SOCK_DGRAM) +socket.bind((listenIp,listenPort)) +print("local socket: ", socket.getsockname()); +recvThread = RecvThread(socket) +recvThread.start() +workThread = WorkThread(socket) +workThread.start() + +while True: + try: + time.sleep(0.5) + except KeyboardInterrupt: + print("Exiting...") + stopFlag = True + recvThread.join() + workThread.join() + #socket.close() + print("Exited") + break + diff --git a/net_orc/network/modules/ntp/ntp.Dockerfile b/net_orc/network/modules/ntp/ntp.Dockerfile new file mode 100644 index 000000000..3474a504e --- /dev/null +++ b/net_orc/network/modules/ntp/ntp.Dockerfile @@ -0,0 +1,13 @@ +# Image name: test-run/ntp +FROM test-run/base:latest + +# Copy over all configuration files +COPY network/modules/ntp/conf /testrun/conf + +# Copy over all binary files +COPY network/modules/ntp/bin /testrun/bin + +# Copy over all python files +COPY network/modules/ntp/python /testrun/python + +EXPOSE 123/udp diff --git a/net_orc/network/modules/ntp/python/src/ntp_server.py b/net_orc/network/modules/ntp/python/src/ntp_server.py new file mode 100644 index 000000000..a53134fe7 --- /dev/null +++ b/net_orc/network/modules/ntp/python/src/ntp_server.py @@ -0,0 +1,315 @@ +import datetime +import socket +import struct +import time +import queue + +import threading +import select + +taskQueue = queue.Queue() +stopFlag = False + +def system_to_ntp_time(timestamp): + """Convert a system time to a NTP time. + + Parameters: + timestamp -- timestamp in system time + + Returns: + corresponding NTP time + """ + return timestamp + NTP.NTP_DELTA + +def _to_int(timestamp): + """Return the integral part of a timestamp. + + Parameters: + timestamp -- NTP timestamp + + Retuns: + integral part + """ + return int(timestamp) + +def _to_frac(timestamp, n=32): + """Return the fractional part of a timestamp. + + Parameters: + timestamp -- NTP timestamp + n -- number of bits of the fractional part + + Retuns: + fractional part + """ + return int(abs(timestamp - _to_int(timestamp)) * 2**n) + +def _to_time(integ, frac, n=32): + """Return a timestamp from an integral and fractional part. + + Parameters: + integ -- integral part + frac -- fractional part + n -- number of bits of the fractional part + + Retuns: + timestamp + """ + return integ + float(frac)/2**n + + + +class NTPException(Exception): + """Exception raised by this module.""" + pass + + +class NTP: + """Helper class defining constants.""" + + _SYSTEM_EPOCH = datetime.date(*time.gmtime(0)[0:3]) + """system epoch""" + _NTP_EPOCH = datetime.date(1900, 1, 1) + """NTP epoch""" + NTP_DELTA = (_SYSTEM_EPOCH - _NTP_EPOCH).days * 24 * 3600 + """delta between system and NTP time""" + + REF_ID_TABLE = { + 'DNC': "DNC routing protocol", + 'NIST': "NIST public modem", + 'TSP': "TSP time protocol", + 'DTS': "Digital Time Service", + 'ATOM': "Atomic clock (calibrated)", + 'VLF': "VLF radio (OMEGA, etc)", + 'callsign': "Generic radio", + 'LORC': "LORAN-C radionavidation", + 'GOES': "GOES UHF environment satellite", + 'GPS': "GPS UHF satellite positioning", + } + """reference identifier table""" + + STRATUM_TABLE = { + 0: "unspecified", + 1: "primary reference", + } + """stratum table""" + + MODE_TABLE = { + 0: "unspecified", + 1: "symmetric active", + 2: "symmetric passive", + 3: "client", + 4: "server", + 5: "broadcast", + 6: "reserved for NTP control messages", + 7: "reserved for private use", + } + """mode table""" + + LEAP_TABLE = { + 0: "no warning", + 1: "last minute has 61 seconds", + 2: "last minute has 59 seconds", + 3: "alarm condition (clock not synchronized)", + } + """leap indicator table""" + +class NTPPacket: + """NTP packet class. + + This represents an NTP packet. + """ + + _PACKET_FORMAT = "!B B B b 11I" + """packet format to pack/unpack""" + + def __init__(self, version=4, mode=3, tx_timestamp=0): + """Constructor. + + Parameters: + version -- NTP version + mode -- packet mode (client, server) + tx_timestamp -- packet transmit timestamp + """ + self.leap = 0 + """leap second indicator""" + self.version = version + """version""" + self.mode = mode + """mode""" + self.stratum = 0 + """stratum""" + self.poll = 0 + """poll interval""" + self.precision = 0 + """precision""" + self.root_delay = 0 + """root delay""" + self.root_dispersion = 0 + """root dispersion""" + self.ref_id = 0 + """reference clock identifier""" + self.ref_timestamp = 0 + """reference timestamp""" + self.orig_timestamp = 0 + self.orig_timestamp_high = 0 + self.orig_timestamp_low = 0 + """originate timestamp""" + self.recv_timestamp = 0 + """receive timestamp""" + self.tx_timestamp = tx_timestamp + self.tx_timestamp_high = 0 + self.tx_timestamp_low = 0 + """tansmit timestamp""" + + def to_data(self): + """Convert this NTPPacket to a buffer that can be sent over a socket. + + Returns: + buffer representing this packet + + Raises: + NTPException -- in case of invalid field + """ + try: + packed = struct.pack(NTPPacket._PACKET_FORMAT, + (self.leap << 6 | self.version << 3 | self.mode), + self.stratum, + self.poll, + self.precision, + _to_int(self.root_delay) << 16 | _to_frac(self.root_delay, 16), + _to_int(self.root_dispersion) << 16 | + _to_frac(self.root_dispersion, 16), + self.ref_id, + _to_int(self.ref_timestamp), + _to_frac(self.ref_timestamp), + #Change by lichen, avoid loss of precision + self.orig_timestamp_high, + self.orig_timestamp_low, + _to_int(self.recv_timestamp), + _to_frac(self.recv_timestamp), + _to_int(self.tx_timestamp), + _to_frac(self.tx_timestamp)) + except struct.error: + raise NTPException("Invalid NTP packet fields.") + return packed + + def from_data(self, data): + """Populate this instance from a NTP packet payload received from + the network. + + Parameters: + data -- buffer payload + + Raises: + NTPException -- in case of invalid packet format + """ + try: + unpacked = struct.unpack(NTPPacket._PACKET_FORMAT, + data[0:struct.calcsize(NTPPacket._PACKET_FORMAT)]) + except struct.error: + raise NTPException("Invalid NTP packet.") + + self.leap = unpacked[0] >> 6 & 0x3 + self.version = unpacked[0] >> 3 & 0x7 + self.mode = unpacked[0] & 0x7 + self.stratum = unpacked[1] + self.poll = unpacked[2] + self.precision = unpacked[3] + self.root_delay = float(unpacked[4])/2**16 + self.root_dispersion = float(unpacked[5])/2**16 + self.ref_id = unpacked[6] + self.ref_timestamp = _to_time(unpacked[7], unpacked[8]) + self.orig_timestamp = _to_time(unpacked[9], unpacked[10]) + self.orig_timestamp_high = unpacked[9] + self.orig_timestamp_low = unpacked[10] + self.recv_timestamp = _to_time(unpacked[11], unpacked[12]) + self.tx_timestamp = _to_time(unpacked[13], unpacked[14]) + self.tx_timestamp_high = unpacked[13] + self.tx_timestamp_low = unpacked[14] + + def GetTxTimeStamp(self): + return (self.tx_timestamp_high,self.tx_timestamp_low) + + def SetOriginTimeStamp(self,high,low): + self.orig_timestamp_high = high + self.orig_timestamp_low = low + + +class RecvThread(threading.Thread): + def __init__(self,socket): + threading.Thread.__init__(self) + self.socket = socket + def run(self): + global t,stopFlag + while True: + if stopFlag == True: + print("RecvThread Ended") + break + rlist,wlist,elist = select.select([self.socket],[],[],1); + if len(rlist) != 0: + print("Received %d packets" % len(rlist)) + for tempSocket in rlist: + try: + data,addr = tempSocket.recvfrom(1024) + recvTimestamp = recvTimestamp = system_to_ntp_time(time.time()) + taskQueue.put((data,addr,recvTimestamp)) + except socket.error as msg: + print(msg) + +class WorkThread(threading.Thread): + def __init__(self,socket): + threading.Thread.__init__(self) + self.socket = socket + def run(self): + global taskQueue,stopFlag + while True: + if stopFlag == True: + print("WorkThread Ended") + break + try: + data,addr,recvTimestamp = taskQueue.get(timeout=1) + recvPacket = NTPPacket() + recvPacket.from_data(data) + timeStamp_high,timeStamp_low = recvPacket.GetTxTimeStamp() + sendPacket = NTPPacket(version=4,mode=4) + sendPacket.stratum = 2 + sendPacket.poll = 10 + ''' + sendPacket.precision = 0xfa + sendPacket.root_delay = 0x0bfa + sendPacket.root_dispersion = 0x0aa7 + sendPacket.ref_id = 0x808a8c2c + ''' + sendPacket.ref_timestamp = recvTimestamp-5 + sendPacket.SetOriginTimeStamp(timeStamp_high,timeStamp_low) + sendPacket.recv_timestamp = recvTimestamp + sendPacket.tx_timestamp = system_to_ntp_time(time.time()) + socket.sendto(sendPacket.to_data(),addr) + print("Sent to %s:%d" % (addr[0],addr[1])) + except queue.Empty: + continue + + +listenIp = "0.0.0.0" +listenPort = 123 +socket = socket.socket(socket.AF_INET,socket.SOCK_DGRAM) +socket.bind((listenIp,listenPort)) +print("local socket: ", socket.getsockname()); +recvThread = RecvThread(socket) +recvThread.start() +workThread = WorkThread(socket) +workThread.start() + +while True: + try: + time.sleep(0.5) + except KeyboardInterrupt: + print("Exiting...") + stopFlag = True + recvThread.join() + workThread.join() + #socket.close() + print("Exited") + break + diff --git a/net_orc/network/modules/ovs/bin/start_network_service b/net_orc/network/modules/ovs/bin/start_network_service new file mode 100644 index 000000000..7c38f484a --- /dev/null +++ b/net_orc/network/modules/ovs/bin/start_network_service @@ -0,0 +1,22 @@ +#!/bin/bash -e + +if [[ "$EUID" -ne 0 ]]; then + echo "Must run as root." + exit 1 +fi + +asyncRun() { + "$@" & + pid="$!" + trap "echo 'Stopping PID $pid'; kill -SIGTERM $pid" SIGINT SIGTERM + + # A signal emitted while waiting will make the wait command return code > 128 + # Let's wrap it in a loop that doesn't end before the process is indeed stopped + while kill -0 $pid > /dev/null 2>&1; do + wait + done +} + +# -u flag allows python print statements +# to be logged by docker by running unbuffered +asyncRun exec python3 -u /ovs/python/src/run.py \ No newline at end of file diff --git a/net_orc/network/modules/ovs/conf/module_config.json b/net_orc/network/modules/ovs/conf/module_config.json new file mode 100644 index 000000000..f6a1eff50 --- /dev/null +++ b/net_orc/network/modules/ovs/conf/module_config.json @@ -0,0 +1,23 @@ +{ + "config": { + "meta": { + "name": "ovs", + "display_name": "OVS", + "description": "Setup and configure Open vSwitch" + }, + "network": { + "interface": "veth0", + "enable_wan": false, + "ip_index": 6, + "host": true + }, + "docker": { + "mounts": [ + { + "source": "runtime/network", + "target": "/runtime/network" + } + ] + } + } +} \ No newline at end of file diff --git a/net_orc/network/modules/ovs/ovs.Dockerfile b/net_orc/network/modules/ovs/ovs.Dockerfile new file mode 100644 index 000000000..cd4710e66 --- /dev/null +++ b/net_orc/network/modules/ovs/ovs.Dockerfile @@ -0,0 +1,20 @@ +# Image name: test-run/orchestrator +FROM test-run/base:latest + +#Update and get all additional requirements not contained in the base image +RUN apt-get update --fix-missing + +#Install openvswitch +RUN apt-get install -y openvswitch-switch + +# Copy over all configuration files +COPY network/modules/ovs/conf /testrun/conf + +# Copy over all binary files +COPY network/modules/ovs/bin /testrun/bin + +# Copy over all python files +COPY network/modules/ovs/python /testrun/python + +#Install all python requirements for the module +RUN pip3 install -r /testrun/python/requirements.txt \ No newline at end of file diff --git a/net_orc/network/modules/ovs/python/requirements.txt b/net_orc/network/modules/ovs/python/requirements.txt new file mode 100644 index 000000000..e69de29bb diff --git a/net_orc/network/modules/ovs/python/src/logger.py b/net_orc/network/modules/ovs/python/src/logger.py new file mode 100644 index 000000000..50dfb4f50 --- /dev/null +++ b/net_orc/network/modules/ovs/python/src/logger.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python3 + +import logging +import os +import sys + +LOGGERS = {} +_LOG_FORMAT = "%(asctime)s %(name)-8s %(levelname)-7s %(message)s" +_DATE_FORMAT = '%b %02d %H:%M:%S' + +# Set level to debug if set as runtime flag +logging.basicConfig(format=_LOG_FORMAT, datefmt=_DATE_FORMAT, level=logging.INFO) + +def get_logger(name): + if name not in LOGGERS: + LOGGERS[name] = logging.getLogger(name) + return LOGGERS[name] \ No newline at end of file diff --git a/net_orc/network/modules/ovs/python/src/ovs_control.py b/net_orc/network/modules/ovs/python/src/ovs_control.py new file mode 100644 index 000000000..6647dc89e --- /dev/null +++ b/net_orc/network/modules/ovs/python/src/ovs_control.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python3 + +#import ipaddress +import json +import logger +#import os +import util + +CONFIG_FILE = "/ovs/conf/system.json" +DEVICE_BRIDGE = "tr-d" +INTERNET_BRIDGE = "tr-c" +LOGGER = logger.get_logger('ovs_ctrl') + +class OVSControl: + + def __init__(self): + self._int_intf = None + self._dev_intf = None + self._load_config() + + def add_bridge(self,bridgeName): + LOGGER.info("Adding OVS Bridge: " + bridgeName) + # Create the bridge using ovs-vsctl commands + # Uses the --may-exist option to prevent failures + # if this bridge already exists by this name it won't fail + # and will not modify the existing bridge + success=util.run_command("ovs-vsctl --may-exist add-br " + bridgeName) + return success + + def add_port(self,port, bridgeName): + LOGGER.info("Adding Port " + port + " to OVS Bridge: " + bridgeName) + # Add a port to the bridge using ovs-vsctl commands + # Uses the --may-exist option to prevent failures + # if this port already exists on the bridge and will not + # modify the existing bridge + success=util.run_command("ovs-vsctl --may-exist add-port " + bridgeName + " " + port) + return success + + def create_net(self): + LOGGER.info("Creating baseline network") + + # Create data plane + self.add_bridge(DEVICE_BRIDGE) + + # Create control plane + self.add_bridge(INTERNET_BRIDGE) + + # Remove IP from internet adapter + self.set_interface_ip(self._int_intf,"0.0.0.0") + + # Add external interfaces to data and control plane + self.add_port(self._dev_intf,DEVICE_BRIDGE) + self.add_port(self._int_intf,INTERNET_BRIDGE) + + # # Set ports up + self.set_bridge_up(DEVICE_BRIDGE) + self.set_bridge_up(INTERNET_BRIDGE) + + def delete_bridge(self,bridgeName): + LOGGER.info("Deleting OVS Bridge: " + bridgeName) + # Delete the bridge using ovs-vsctl commands + # Uses the --if-exists option to prevent failures + # if this bridge does not exists + success=util.run_command("ovs-vsctl --if-exists del-br " + bridgeName) + return success + + def _load_config(self): + LOGGER.info("Loading Configuration: " + CONFIG_FILE) + config_json = json.load(open(CONFIG_FILE, 'r')) + self._int_intf = config_json['internet_intf'] + self._dev_intf = config_json['device_intf'] + LOGGER.info("Configuration Loaded") + LOGGER.info("Internet Interface: " + self._int_intf) + LOGGER.info("Device Interface: " + self._dev_intf) + + def restore_net(self): + LOGGER.info("Restoring Network...") + # Delete data plane + self.delete_bridge(DEVICE_BRIDGE) + + # Delete control plane + self.delete_bridge(INTERNET_BRIDGE) + + LOGGER.info("Network is restored") + + def show_config(self): + LOGGER.info("Show current config of OVS") + success=util.run_command("ovs-vsctl show") + return success + + def set_bridge_up(self,bridgeName): + LOGGER.info("Setting Bridge device to up state: " + bridgeName) + success=util.run_command("ip link set dev " + bridgeName + " up") + return success + + def set_interface_ip(self,interface, ipAddr): + LOGGER.info("Setting interface " + interface + " to " + ipAddr) + # Remove IP from internet adapter + util.run_command("ifconfig " + interface + " 0.0.0.0") + +if __name__ == '__main__': + ovs = OVSControl() + ovs.create_net() + ovs.show_config() + ovs.restore_net() + ovs.show_config() + diff --git a/net_orc/network/modules/ovs/python/src/run.py b/net_orc/network/modules/ovs/python/src/run.py new file mode 100644 index 000000000..4c1474e74 --- /dev/null +++ b/net_orc/network/modules/ovs/python/src/run.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python3 + +import logger +import signal +import time + +from ovs_control import OVSControl + +LOGGER = logger.get_logger('ovs_control_run') + +class OVSControlRun: + + def __init__(self): + + signal.signal(signal.SIGINT, self.handler) + signal.signal(signal.SIGTERM, self.handler) + signal.signal(signal.SIGABRT, self.handler) + signal.signal(signal.SIGQUIT, self.handler) + + LOGGER.info("Starting OVS Control") + + # Get all components ready + self._ovs_control = OVSControl() + + self._ovs_control.restore_net() + + self._ovs_control.create_net() + + self._ovs_control.show_config() + + # Get network ready (via Network orchestrator) + LOGGER.info("Network is ready. Waiting for device information...") + + #Loop forever until process is stopped + while True: + LOGGER.info("OVS Running") + time.sleep(1000) + + # TODO: This time should be configurable (How long to hold before exiting, this could be infinite too) + #time.sleep(300) + + # Tear down network + #self._ovs_control.shutdown() + + def handler(self, signum, frame): + LOGGER.info("SigtermEnum: " + str(signal.SIGTERM)) + LOGGER.info("Exit signal received: " + str(signum)) + if (signum == 2 or signal == signal.SIGTERM): + LOGGER.info("Exit signal received. Restoring network...") + self._ovs_control.shutdown() + exit(1) + +ovs = OVSControlRun() diff --git a/net_orc/network/modules/ovs/python/src/util.py b/net_orc/network/modules/ovs/python/src/util.py new file mode 100644 index 000000000..8bb0439bc --- /dev/null +++ b/net_orc/network/modules/ovs/python/src/util.py @@ -0,0 +1,19 @@ +import subprocess +import logger + + +def run_command(cmd): + success = False + LOGGER = logger.get_logger('util') + process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = process.communicate() + if process.returncode !=0: + err_msg = "%s. Code: %s" % (stderr.strip(), process.returncode) + LOGGER.error("Command Failed: " + cmd) + LOGGER.error("Error: " + err_msg) + else: + succ_msg = "%s. Code: %s" % (stdout.strip().decode('utf-8'), process.returncode) + LOGGER.info("Command Success: " + cmd) + LOGGER.info("Success: " + succ_msg) + success = True + return success \ No newline at end of file diff --git a/net_orc/network/modules/radius/bin/start_network_service b/net_orc/network/modules/radius/bin/start_network_service new file mode 100644 index 000000000..e27a828dd --- /dev/null +++ b/net_orc/network/modules/radius/bin/start_network_service @@ -0,0 +1,20 @@ +#!/bin/bash + +PYTHON_SRC_DIR=/testrun/python/src +CONF_DIR="/testrun/conf" +LOG_FILE="/runtime/network/radius.log" + +echo Starting authenticator.py + +cp $CONF_DIR/eap /etc/freeradius/3.0/mods-available/eap + +# Do we want to mount resources/network/{module} to the network module to avoid file copying during build? +cp $CONF_DIR/ca.crt /etc/ssl/certs/ca-certificates.crt + +python3 -u $PYTHON_SRC_DIR/authenticator.py & + +#Create and set permissions on the log file +touch $LOG_FILE +chown $HOST_USER:$HOST_USER $LOG_FILE + +freeradius -f -X &> $LOG_FILE \ No newline at end of file diff --git a/net_orc/network/modules/radius/conf/ca.crt b/net_orc/network/modules/radius/conf/ca.crt new file mode 100644 index 000000000..d009cb1ab --- /dev/null +++ b/net_orc/network/modules/radius/conf/ca.crt @@ -0,0 +1,26 @@ +-----BEGIN CERTIFICATE----- +MIIEYTCCA0mgAwIBAgIUQJ4F8hBCnCp7ASPZqG/tNQgoUR4wDQYJKoZIhvcNAQEL +BQAwgb8xCzAJBgNVBAYTAkdCMRswGQYDVQQIDBIbWzN+TGVpY2VzdGVyc2hpcmUx +FTATBgNVBAcMDExvdWdoYm9yb3VnaDEUMBIGA1UECgwLRm9yZXN0IFJvY2sxDjAM +BgNVBAsMBUN5YmVyMR8wHQYDVQQDDBZjeWJlci5mb3Jlc3Ryb2NrLmNvLnVrMTUw +MwYJKoZIhvcNAQkBFiZjeWJlcnNlY3VyaXR5LnRlc3RpbmdAZm9yZXN0cm9jay5j +by51azAeFw0yMjAzMDQxMjEzMTBaFw0yNzAzMDMxMjEzMTBaMIG/MQswCQYDVQQG +EwJHQjEbMBkGA1UECAwSG1szfkxlaWNlc3RlcnNoaXJlMRUwEwYDVQQHDAxMb3Vn +aGJvcm91Z2gxFDASBgNVBAoMC0ZvcmVzdCBSb2NrMQ4wDAYDVQQLDAVDeWJlcjEf +MB0GA1UEAwwWY3liZXIuZm9yZXN0cm9jay5jby51azE1MDMGCSqGSIb3DQEJARYm +Y3liZXJzZWN1cml0eS50ZXN0aW5nQGZvcmVzdHJvY2suY28udWswggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDDNz3vJiZ5nX8lohEhqXvxEme3srip8qF7 +r5ScIeQzsTKuPNAmoefx9TcU3SyA2BnREuDX+OCYMN62xxWG2PndOl0LNezAY22C +PJwHbaBntLKY/ZhxYSTyratM7zxKSVLtClamA/bJXBhdfZZKYOP3xlZQEQTygtzK +j5hZwDrpDARtjRZIMWPLqVcoaW9ow2urJVsdD4lYAhpQU2UIgiWo7BG3hJsUfcYX +EQyyrMKJ7xaCwzIU7Sem1PETrzeiWg4KhDijc7A0RMPWlU5ljf0CnY/IZwiDsMRl +hGmGBPvR+ddiWPZPtSKj6TPWpsaMUR9UwncLmSSrhf1otX4Mw0vbAgMBAAGjUzBR +MB0GA1UdDgQWBBR0Qxx2mDTPIfpnzO5YtycGs6t8ijAfBgNVHSMEGDAWgBR0Qxx2 +mDTPIfpnzO5YtycGs6t8ijAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUA +A4IBAQCpTMBMZGXF74WCxrIk23MUsu0OKzMs8B16Wy8BHz+7hInLZwbkx71Z0TP5 +rsMITetSANtM/k4jH7Vmr1xmzU7oSz5zKU1+7rIjKjGtih48WZdJay0uqfKe0K2s +vsRS0LVLY6IiTFWK9YrLC0QFSK7z5GDl1oc/D5yIZAkbsL6PRQJ5RQsYf5BhHfyB +PRV/KcF7c9iKVYW2vILJzbyYLHTDADTHbtfCe5+pAGxagswDjSMVkQu5iJNjbtUO +5iv7PRkgzUFru9Kk6q+LrXbzyPPCwlc3Xbh1q5jSkJLkcV3K26E7+uX5HI+Hxpeh +a8kOsdnw+N8wX6bc7eXIaGBDMine +-----END CERTIFICATE----- diff --git a/net_orc/network/modules/radius/conf/eap b/net_orc/network/modules/radius/conf/eap new file mode 100644 index 000000000..a868f16cd --- /dev/null +++ b/net_orc/network/modules/radius/conf/eap @@ -0,0 +1,602 @@ +eap { + + default_eap_type = tls + + # A list is maintained to correlate EAP-Response + # packets with EAP-Request packets. After a + # configurable length of time, entries in the list + # expire, and are deleted. + # + timer_expire = 60 + + # There are many EAP types, but the server has support + # for only a limited subset. If the server receives + # a request for an EAP type it does not support, then + # it normally rejects the request. By setting this + # configuration to "yes", you can tell the server to + # instead keep processing the request. Another module + # MUST then be configured to proxy the request to + # another RADIUS server which supports that EAP type. + # + # If another module is NOT configured to handle the + # request, then the request will still end up being + # rejected. + # + ignore_unknown_eap_types = no + + # Cisco AP1230B firmware 12.2(13)JA1 has a bug. When given + # a User-Name attribute in an Access-Accept, it copies one + # more byte than it should. + # + # We can work around it by configurably adding an extra + # zero byte. + # + cisco_accounting_username_bug = no + + # Help prevent DoS attacks by limiting the number of + # sessions that the server is tracking. For simplicity, + # this is taken from the "max_requests" directive in + # radiusd.conf. + # + max_sessions = ${max_requests} + + # Common TLS configuration for TLS-based EAP types + # ------------------------------------------------ + # + # See raddb/certs/README.md for additional comments + # on certificates. + # + # If OpenSSL was not found at the time the server was + # built, the "tls", "ttls", and "peap" sections will + # be ignored. + # + # If you do not currently have certificates signed by + # a trusted CA you may use the 'snakeoil' certificates. + # Included with the server in raddb/certs. + # + # If these certificates have not been auto-generated: + # cd raddb/certs + # make + # + # These test certificates SHOULD NOT be used in a normal + # deployment. They are created only to make it easier + # to install the server, and to perform some simple + # tests with EAP-TLS, TTLS, or PEAP. + # + # Note that you should NOT use a globally known CA here! + # e.g. using a Verisign cert as a "known CA" means that + # ANYONE who has a certificate signed by them can + # authenticate via EAP-TLS! This is likely not what you want. + # + tls-config tls-common { + private_key_password = whatever + private_key_file = /etc/ssl/private/ssl-cert-snakeoil.key + + # If Private key & Certificate are located in + # the same file, then private_key_file & + # certificate_file must contain the same file + # name. + # + # If ca_file (below) is not used, then the + # certificate_file below SHOULD also include all of + # the intermediate CA certificates used to sign the + # server certificate, but NOT the root CA. + # + # Including the ROOT CA certificate is not useful and + # merely inflates the exchanged data volume during + # the TLS negotiation. + # + # This file should contain the server certificate, + # followed by intermediate certificates, in order. + # i.e. If we have a server certificate signed by CA1, + # which is signed by CA2, which is signed by a root + # CA, then the "certificate_file" should contain + # server.pem, followed by CA1.pem, followed by + # CA2.pem. + # + # When using "ca_file" or "ca_dir", the + # "certificate_file" should contain only + # "server.pem". And then you may (or may not) need + # to set "auto_chain", depending on your version of + # OpenSSL. + # + # In short, SSL / TLS certificates are complex. + # There are many versions of software, each of which + # behave slightly differently. It is impossible to + # give advice which will work everywhere. Instead, + # we give general guidelines. + # + certificate_file = /etc/ssl/certs/ssl-cert-snakeoil.pem + + # Trusted Root CA list + # + # This file can contain multiple CA certificates. + # ALL of the CA's in this list will be trusted to + # issue client certificates for authentication. + # + # In general, you should use self-signed + # certificates for 802.1x (EAP) authentication. + # In that case, this CA file should contain + # *one* CA certificate. + # + ca_file = /etc/ssl/certs/ca-certificates.crt + + # Check the Certificate Revocation List + # + # 1) Copy CA certificates and CRLs to same directory. + # 2) Execute 'c_rehash '. + # 'c_rehash' is OpenSSL's command. + # 3) uncomment the lines below. + # 5) Restart radiusd + # check_crl = yes + + # Check if intermediate CAs have been revoked. + # check_all_crl = yes + + ca_path = ${cadir} + + # OpenSSL does not reload contents of ca_path dir over time. + # That means that if check_crl is enabled and CRLs are loaded + # from ca_path dir, at some point CRLs will expire and + # RADIUSd will stop authenticating users. + # If ca_path_reload_interval is non-zero, it will force OpenSSL + # to reload all data from ca_path periodically + # + # Flush ca_path each hour + # ca_path_reload_interval = 3600 + + + # Accept an expired Certificate Revocation List + # + # allow_expired_crl = no + + # If check_cert_issuer is set, the value will + # be checked against the DN of the issuer in + # the client certificate. If the values do not + # match, the certificate verification will fail, + # rejecting the user. + # + # This check can be done more generally by checking + # the value of the TLS-Client-Cert-Issuer attribute. + # This check can be done via any mechanism you + # choose. + # + # check_cert_issuer = "/C=GB/ST=Berkshire/L=Newbury/O=My Company Ltd" + + # If check_cert_cn is set, the value will + # be xlat'ed and checked against the CN + # in the client certificate. If the values + # do not match, the certificate verification + # will fail rejecting the user. + # + # This check is done only if the previous + # "check_cert_issuer" is not set, or if + # the check succeeds. + # + # This check can be done more generally by writing + # "unlang" statements to examine the value of the + # TLS-Client-Cert-Common-Name attribute. + # + # check_cert_cn = %{User-Name} + + # + # This configuration item only applies when there is + # an intermediate CA between the "root" CA, and the + # client certificate. If we trust the root CA, then + # by definition we also trust ANY intermediate CA + # which is signed by that root. This means ANOTHER + # intermediate CA can issue client certificates, and + # have them accepted by the EAP module. + # + # The solution is to list ONLY the trusted CAs in the + # FreeRADIUS configuration, and then set this + # configuration item to "yes". + # + # Then, when the server receives a client certificate + # from an untrusted CA, that authentication request + # can be rejected. + # + # It is possible to do these checks in "unlang", by + # checking for unknown names in the + # TLS-Cert-Common-Name attribute, but that is + # more complex. So we add a configuration option + # which can be set once, and which works for all + # possible intermediate CAs, no matter what their + # value. + # + # reject_unknown_intermediate_ca = no + + # Set this option to specify the allowed + # TLS cipher suites. The format is listed + # in "man 1 ciphers". + # + cipher_list = "DEFAULT" + + # If enabled, OpenSSL will use server cipher list + # (possibly defined by cipher_list option above) + # for choosing right cipher suite rather than + # using client-specified list which is OpenSSl default + # behavior. Setting this to "yes" means that OpenSSL + # will choose the servers ciphers, even if they do not + # best match what the client sends. + # + # TLS negotiation is usually good, but can be imperfect. + # This setting allows administrators to "fine tune" it + # if necessary. + # + cipher_server_preference = no + + # You can selectively disable TLS versions for + # compatability with old client devices. + # + # If your system has OpenSSL 1.1.0 or greater, do NOT + # use these. Instead, set tls_min_version and + # tls_max_version. + # +# disable_tlsv1_2 = yes +# disable_tlsv1_1 = yes +# disable_tlsv1 = yes + + + # Set min / max TLS version. + # + # Generally speaking you should NOT use TLS 1.0 or + # TLS 1.1. They are old, possibly insecure, and + # deprecated. However, it is sometimes necessary to + # enable it for compatibility with legact systems. + # We recommend replacing those legacy systems, and + # using at least TLS 1.2. + # + # Some Debian versions disable older versions of TLS, + # and requires the application to manually enable + # them. + # + # If you are running such a distribution, you should + # set these options, otherwise older clients will not + # be able to connect. + # + # Allowed values are "1.0", "1.1", "1.2", and "1.3". + # + # As of 2021, it is STRONGLY RECOMMENDED to set + # + # tls_min_version = "1.2" + # + # Older TLS versions are insecure and deprecated. + # + # In order to enable TLS 1.0 and TLS 1.1, you may + # also need to update cipher_list below to: + # + # * OpenSSL >= 3.x + # + # cipher_list = "DEFAULT@SECLEVEL=0" + # + # * OpenSSL < 3.x + # + # cipher_list = "DEFAULT@SECLEVEL=1" + # + # The values must be in quotes. + # + # We also STRONGLY RECOMMEND to set + # + # tls_max_version = "1.2" + # + # While the server will accept "1.3" as a value, + # most EAP supplicants WILL NOT DO TLS 1.3 PROPERLY. + # + # i.e. they WILL NOT WORK, SO DO NOT ASK QUESTIONS ON + # THE LIST ABOUT WHY IT DOES NOT WORK. + # + # The TLS 1.3 support is here for future + # compatibility, as clients get upgraded, and people + # don't upgrade their copies of FreeRADIUS. + # + # Also note that we only support TLS 1.3 for EAP-TLS. + # Other versions of EAP (PEAP, TTLS, FAST) DO NOT + # SUPPORT TLS 1.3. + # + tls_min_version = "1.2" + tls_max_version = "1.2" + + # Elliptical cryptography configuration + # + # This configuration should be one of the following: + # + # * a name of the curve to use, e.g. "prime256v1". + # + # * a colon separated list of curve NIDs or names. + # + # * an empty string, in which case OpenSSL will choose + # the "best" curve for the situation. + # + # For supported curve names, please run + # + # openssl ecparam -list_curves + # + ecdh_curve = "" + + # Session resumption / fast reauthentication + # cache. + # + # The cache contains the following information: + # + # session Id - unique identifier, managed by SSL + # User-Name - from the Access-Accept + # Stripped-User-Name - from the Access-Request + # Cached-Session-Policy - from the Access-Accept + # + # See also the "store" subsection below for + # additional attributes which can be cached. + # + # The "Cached-Session-Policy" is the name of a + # policy which should be applied to the cached + # session. This policy can be used to assign + # VLANs, IP addresses, etc. It serves as a useful + # way to re-apply the policy from the original + # Access-Accept to the subsequent Access-Accept + # for the cached session. + # + # On session resumption, these attributes are + # copied from the cache, and placed into the + # reply list. + # + # You probably also want "use_tunneled_reply = yes" + # when using fast session resumption. + # + # You can check if a session has been resumed by + # looking for the existence of the EAP-Session-Resumed + # attribute. Note that this attribute will *only* + # exist in the "post-auth" section. + # + # CAVEATS: The cache is stored and reloaded BEFORE + # the "post-auth" section is run. This limitation + # makes caching more difficult than it should be. In + # practice, it means that the first authentication + # session must set the reply attributes before the + # post-auth section is run. + # + # When the session is resumed, the attributes are + # restored and placed into the session-state list. + # + cache { + # Enable it. The default is "no". Deleting the entire "cache" + # subsection also disables caching. + # + # The session cache requires the use of the + # "name" and "persist_dir" configuration + # items, below. + # + # The internal OpenSSL session cache has been permanently + # disabled. + # + # You can disallow resumption for a particular user by adding the + # following attribute to the control item list: + # + # Allow-Session-Resumption = No + # + # If "enable = no" below, you CANNOT enable resumption for just one + # user by setting the above attribute to "yes". + # + enable = no + + # Lifetime of the cached entries, in hours. The sessions will be + # deleted/invalidated after this time. + # + lifetime = 24 # hours + + # Internal "name" of the session cache. Used to + # distinguish which TLS context sessions belong to. + # + # The server will generate a random value if unset. + # This will change across server restart so you MUST + # set the "name" if you want to persist sessions (see + # below). + # + # name = "EAP module" + + # Simple directory-based storage of sessions. + # Two files per session will be written, the SSL + # state and the cached VPs. This will persist session + # across server restarts. + # + # The default directory is ${logdir}, for historical + # reasons. You should ${db_dir} instead. And check + # the value of db_dir in the main radiusd.conf file. + # It should not point to ${raddb} + # + # The server will need write perms, and the directory + # should be secured from anyone else. You might want + # a script to remove old files from here periodically: + # + # find ${logdir}/tlscache -mtime +2 -exec rm -f {} \; + # + # This feature REQUIRES "name" option be set above. + # + # persist_dir = "${logdir}/tlscache" + + # + # As of 3.0.20, it is possible to partially + # control which attributes exist in the + # session cache. This subsection lists + # attributes which are taken from the reply, + # and saved to the on-disk cache. When the + # session is resumed, these attributes are + # added to the "session-state" list. The + # default configuration will then take care + # of copying them to the reply. + # + store { + Tunnel-Private-Group-Id + } + } + + # Client certificates can be validated via an + # external command. This allows dynamic CRLs or OCSP + # to be used. + # + # This configuration is commented out in the + # default configuration. Uncomment it, and configure + # the correct paths below to enable it. + # + # If OCSP checking is enabled, and the OCSP checks fail, + # the verify section is not run. + # + # If OCSP checking is disabled, the verify section is + # run on successful certificate validation. + # + verify { + # If the OCSP checks succeed, the verify section + # is run to allow additional checks. + # + # If you want to skip verify on OCSP success, + # uncomment this configuration item, and set it + # to "yes". + # + # skip_if_ocsp_ok = no + + # A temporary directory where the client + # certificates are stored. This directory + # MUST be owned by the UID of the server, + # and MUST not be accessible by any other + # users. When the server starts, it will do + # "chmod go-rwx" on the directory, for + # security reasons. The directory MUST + # exist when the server starts. + # + # You should also delete all of the files + # in the directory when the server starts. + # + # tmpdir = /tmp/radiusd + + # The command used to verify the client cert. + # We recommend using the OpenSSL command-line + # tool. + # + # The ${..ca_path} text is a reference to + # the ca_path variable defined above. + # + # The %{TLS-Client-Cert-Filename} is the name + # of the temporary file containing the cert + # in PEM format. This file is automatically + # deleted by the server when the command + # returns. + # + # client = "/path/to/openssl verify -CApath ${..ca_path} %{TLS-Client-Cert-Filename}" + } + + # OCSP Configuration + # + # Certificates can be verified against an OCSP + # Responder. This makes it possible to immediately + # revoke certificates without the distribution of + # new Certificate Revocation Lists (CRLs). + # + ocsp { + # Enable it. The default is "no". + # Deleting the entire "ocsp" subsection + # also disables ocsp checking + # + enable = no + + # The OCSP Responder URL can be automatically + # extracted from the certificate in question. + # To override the OCSP Responder URL set + # "override_cert_url = yes". + # + override_cert_url = yes + + # If the OCSP Responder address is not extracted from + # the certificate, the URL can be defined here. + # + url = "http://127.0.0.1/ocsp/" + + # If the OCSP Responder can not cope with nonce + # in the request, then it can be disabled here. + # + # For security reasons, disabling this option + # is not recommended as nonce protects against + # replay attacks. + # + # Note that Microsoft AD Certificate Services OCSP + # Responder does not enable nonce by default. It is + # more secure to enable nonce on the responder than + # to disable it in the query here. + # See http://technet.microsoft.com/en-us/library/cc770413%28WS.10%29.aspx + # + # use_nonce = yes + + # Number of seconds before giving up waiting + # for OCSP response. 0 uses system default. + # + # timeout = 0 + + # Normally an error in querying the OCSP + # responder (no response from server, server did + # not understand the request, etc) will result in + # a validation failure. + # + # To treat these errors as 'soft' failures and + # still accept the certificate, enable this + # option. + # + # Warning: this may enable clients with revoked + # certificates to connect if the OCSP responder + # is not available. Use with caution. + # + # softfail = no + } + + # + # The server can present different certificates based + # on the realm presented in EAP. See + # raddb/certs/realms/README.md for examples of how to + # configure this. + # + # Note that the default is to use the same set of + # realm certificates for both EAP and RadSec! If + # this is not what you want, you should use different + # subdirectories or each, e.g. ${certdir}/realms/radsec/, + # and ${certdir}/realms/eap/ + # + # realm_dir = ${certdir}/realms/ + } + + # EAP-TLS + # + # The TLS configuration for TLS-based EAP types is held in + # the "tls-config" section, above. + # + tls { + # Point to the common TLS configuration + # + tls = tls-common + + # As part of checking a client certificate, the EAP-TLS + # sets some attributes such as TLS-Client-Cert-Common-Name. This + # virtual server has access to these attributes, and can + # be used to accept or reject the request. + # + # virtual_server = check-eap-tls + + # You can control whether or not EAP-TLS requires a + # client certificate by setting + # + # configurable_client_cert = yes + # + # Once that setting has been changed, you can then set + # + # EAP-TLS-Require-Client-Cert = No + # + # in the control items for a request, and the EAP-TLS + # module will not require a client certificate from + # the supplicant. + # + # WARNING: This configuration should only be used + # when the users are placed into a "captive portal" + # or "walled garden", where they have limited network + # access. Otherwise the configuraton will allow + # anyone on the network, without authenticating them! + # +# configurable_client_cert = no + } + +} diff --git a/net_orc/network/modules/radius/conf/module_config.json b/net_orc/network/modules/radius/conf/module_config.json new file mode 100644 index 000000000..153d951df --- /dev/null +++ b/net_orc/network/modules/radius/conf/module_config.json @@ -0,0 +1,22 @@ +{ + "config": { + "meta": { + "name": "radius", + "display_name": "Radius", + "description": "Enable port based authentication" + }, + "network": { + "interface": "veth0", + "enable_wan": false, + "ip_index": 7 + }, + "docker": { + "mounts": [ + { + "source": "runtime/network", + "target": "/runtime/network" + } + ] + } + } +} \ No newline at end of file diff --git a/net_orc/network/modules/radius/python/requirements.txt b/net_orc/network/modules/radius/python/requirements.txt new file mode 100644 index 000000000..37d126cb1 --- /dev/null +++ b/net_orc/network/modules/radius/python/requirements.txt @@ -0,0 +1,3 @@ +eventlet +pbr +transitions \ No newline at end of file diff --git a/net_orc/network/modules/radius/python/src/authenticator.py b/net_orc/network/modules/radius/python/src/authenticator.py new file mode 100644 index 000000000..55fa51d87 --- /dev/null +++ b/net_orc/network/modules/radius/python/src/authenticator.py @@ -0,0 +1,31 @@ +from chewie.chewie import Chewie +import logging + +_LOG_FORMAT = "%(asctime)s %(name)-8s %(levelname)-7s %(message)s" +_DATE_FORMAT = '%b %02d %H:%M:%S' +INTERFACE_NAME="veth0" +RADIUS_SERVER_IP="127.0.0.1" +RADIUS_SERVER_PORT=1812 +RADIUS_SERVER_SECRET="testing123" + +class Authenticator(): + + def __init__(self): + self.chewie = Chewie(INTERFACE_NAME, self._get_logger(), self._auth_handler, self._failure_handler, self._logoff_handler, radius_server_ip=RADIUS_SERVER_IP, radius_server_port=RADIUS_SERVER_PORT, radius_server_secret=RADIUS_SERVER_SECRET) + self.chewie.run() + + def _get_logger(self): + logging.basicConfig(format=_LOG_FORMAT, datefmt=_DATE_FORMAT, level=logging.INFO) + logger = logging.getLogger("chewie") + return logger + + def _auth_handler(self, address, group_address, *args, **kwargs): + print("Successful auth for " + str(address) + " on port " + str(group_address)) + + def _failure_handler(self, address, group_address): + print("Failed auth for " + str(address) + " on port " + str(group_address)) + + def _logoff_handler(self, address, group_address): + print("Log off reported for " + str(address) + " on port " + str(group_address)) + +authenticator = Authenticator() \ No newline at end of file diff --git a/net_orc/network/modules/radius/radius.Dockerfile b/net_orc/network/modules/radius/radius.Dockerfile new file mode 100644 index 000000000..a72313826 --- /dev/null +++ b/net_orc/network/modules/radius/radius.Dockerfile @@ -0,0 +1,26 @@ +# Image name: test-run/radius +FROM test-run/base:latest + +# Install radius and git +RUN apt-get update && apt-get install -y openssl freeradius git + +# Clone chewie from source. +RUN git clone --branch 0.0.25 https://github.com/faucetsdn/chewie + +# Install chewie as Python module +RUN pip3 install chewie/ + +EXPOSE 1812/udp +EXPOSE 1813/udp + +# Copy over all configuration files +COPY network/modules/radius/conf /testrun/conf + +# Copy over all binary files +COPY network/modules/radius/bin /testrun/bin + +# Copy over all python files +COPY network/modules/radius/python /testrun/python + +# Install all python requirements for the module +RUN pip3 install -r /testrun/python/requirements.txt \ No newline at end of file diff --git a/net_orc/network/modules/template/bin/start_network_service b/net_orc/network/modules/template/bin/start_network_service new file mode 100644 index 000000000..94ae0def9 --- /dev/null +++ b/net_orc/network/modules/template/bin/start_network_service @@ -0,0 +1,13 @@ +#!/bin/bash + +# Place holder function for testing and validation +# Each network module should include a start_networkig_service +# file that overwrites this one to boot all of the its specific +# requirements to run. + +echo "Starting network service..." +echo "This is not a real network service, just a test" +echo "Network service started" + +# Do Nothing, just keep the module alive +while true; do sleep 1; done \ No newline at end of file diff --git a/net_orc/network/modules/template/conf/module_config.json b/net_orc/network/modules/template/conf/module_config.json new file mode 100644 index 000000000..bcea3808e --- /dev/null +++ b/net_orc/network/modules/template/conf/module_config.json @@ -0,0 +1,26 @@ +{ + "config": { + "meta": { + "name": "template", + "display_name": "Template", + "description": "Template for building network service modules" + }, + "network": { + "interface": "veth0", + "enable_wan": false, + "ip_index": 9 + }, + "grpc": { + "port": 50001 + }, + "docker": { + "enable_container": false, + "mounts": [ + { + "source": "runtime/network", + "target": "/runtime/network" + } + ] + } + } +} \ No newline at end of file diff --git a/net_orc/network/modules/template/python/src/template_main.py b/net_orc/network/modules/template/python/src/template_main.py new file mode 100644 index 000000000..50c425c23 --- /dev/null +++ b/net_orc/network/modules/template/python/src/template_main.py @@ -0,0 +1,4 @@ +"""Python code for the template module.""" + +if __name__ == "__main__": + print ("Template main") diff --git a/net_orc/network/modules/template/template.Dockerfile b/net_orc/network/modules/template/template.Dockerfile new file mode 100644 index 000000000..54bfb9628 --- /dev/null +++ b/net_orc/network/modules/template/template.Dockerfile @@ -0,0 +1,11 @@ +# Image name: test-run/dhcp-primary +FROM test-run/base:latest + +# Copy over all configuration files +COPY network/modules/template/conf /testrun/conf + +# Load device binary files +COPY network/modules/template/bin /testrun/bin + +# Copy over all python files +COPY network/modules/template/python /testrun/python \ No newline at end of file diff --git a/net_orc/orchestrator.Dockerfile b/net_orc/orchestrator.Dockerfile new file mode 100644 index 000000000..f062a33d4 --- /dev/null +++ b/net_orc/orchestrator.Dockerfile @@ -0,0 +1,22 @@ +# Image name: test-run/orchestrator +FROM test-run/base:latest + +#Update and get all additional requirements not contained in the base image +RUN apt-get update + +RUN apt-get install -y python3-pip curl openvswitch-switch + +#Download and install docker client +ENV DOCKERVERSION=20.10.2 +RUN curl -fsSLO https://download.docker.com/linux/static/stable/x86_64/docker-${DOCKERVERSION}.tgz \ + && tar xzvf docker-${DOCKERVERSION}.tgz --strip 1 -C /usr/local/bin docker/docker \ + && rm docker-${DOCKERVERSION}.tgz + +#Create a directory to load all the app files into +RUN mkdir /python + +#Load the requirements file +COPY python/requirements.txt /python + +#Install all python requirements for the module +RUN pip3 install -r python/requirements.txt diff --git a/net_orc/python/requirements.txt b/net_orc/python/requirements.txt new file mode 100644 index 000000000..5d8f29214 --- /dev/null +++ b/net_orc/python/requirements.txt @@ -0,0 +1,4 @@ +docker +ipaddress +netifaces +scapy \ No newline at end of file diff --git a/net_orc/python/src/listener.py b/net_orc/python/src/listener.py new file mode 100644 index 000000000..d07de4686 --- /dev/null +++ b/net_orc/python/src/listener.py @@ -0,0 +1,68 @@ +"""Intercepts network traffic between network services and the device +under test.""" +from scapy.all import AsyncSniffer, DHCP, get_if_hwaddr +import logger +from network_event import NetworkEvent + +LOGGER = logger.get_logger('listener') + +DHCP_DISCOVER = 1 +DHCP_OFFER = 2 +DHCP_REQUEST = 3 +DHCP_ACK = 5 +CONTAINER_MAC_PREFIX = '9a:02:57:1e:8f' + + +class Listener: + """Methods to start and stop the network listener.""" + + def __init__(self, device_intf): + self._device_intf = device_intf + self._device_intf_mac = get_if_hwaddr(self._device_intf) + + self._sniffer = AsyncSniffer( + iface=self._device_intf, prn=self._packet_callback) + + self._callbacks = [] + self._discovered_devices = [] + + def start_listener(self): + """Start sniffing packets on the device interface.""" + self._sniffer.start() + + def stop_listener(self): + """Stop sniffing packets on the device interface.""" + self._sniffer.stop() + + def is_running(self): + """Determine whether the sniffer is running.""" + return self._sniffer.running + + def register_callback(self, callback, events=[]): # pylint: disable=dangerous-default-value + """Register a callback for specified events.""" + self._callbacks.append( + { + 'callback': callback, + 'events': events + } + ) + + def _packet_callback(self, packet): + + # Ignore packets originating from our containers + if packet.src.startswith(CONTAINER_MAC_PREFIX) or packet.src == self._device_intf_mac: + return + + if not packet.src is None and packet.src not in self._discovered_devices: + self._device_discovered(packet.src) + + def _get_dhcp_type(self, packet): + return packet[DHCP].options[0][1] + + def _device_discovered(self, mac_addr): + LOGGER.debug(f'Discovered device with address {mac_addr}') + self._discovered_devices.append(mac_addr) + + for callback in self._callbacks: + if NetworkEvent.DEVICE_DISCOVERED in callback['events']: + callback['callback'](mac_addr) diff --git a/net_orc/python/src/logger.py b/net_orc/python/src/logger.py new file mode 100644 index 000000000..e930f1953 --- /dev/null +++ b/net_orc/python/src/logger.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python3 + +import json +import logging +import os + +LOGGERS = {} +_LOG_FORMAT = "%(asctime)s %(name)-8s %(levelname)-7s %(message)s" +_DATE_FORMAT = '%b %02d %H:%M:%S' +_DEFAULT_LEVEL = logging.INFO +_CONF_DIR="conf" +_CONF_FILE_NAME="system.json" + +# Set log level +try: + system_conf_json = json.load(open(os.path.join(_CONF_DIR, _CONF_FILE_NAME), encoding='UTF-8')) + log_level_str = system_conf_json['log_level'] + LOG_LEVEL = logging.getLevelName(log_level_str) +except OSError: + LOG_LEVEL = _DEFAULT_LEVEL + +logging.basicConfig(format=_LOG_FORMAT, datefmt=_DATE_FORMAT, level=LOG_LEVEL) + +def get_logger(name): + if name not in LOGGERS: + LOGGERS[name] = logging.getLogger(name) + return LOGGERS[name] diff --git a/net_orc/python/src/network_event.py b/net_orc/python/src/network_event.py new file mode 100644 index 000000000..c77dfa706 --- /dev/null +++ b/net_orc/python/src/network_event.py @@ -0,0 +1,10 @@ +"""Specify the various types of network events to be reported.""" +from enum import Enum + +class NetworkEvent(Enum): + """All possible network events.""" + + ALL = 0 + DEVICE_DISCOVERED = 1 + DHCP_LEASE_NEW = 2 + DHCP_LEASE_RENEWED = 3 diff --git a/net_orc/python/src/network_orchestrator.py b/net_orc/python/src/network_orchestrator.py new file mode 100644 index 000000000..828ad58a7 --- /dev/null +++ b/net_orc/python/src/network_orchestrator.py @@ -0,0 +1,573 @@ +#!/usr/bin/env python3 + +import ipaddress +import json +import os +import shutil +import sys +import time +import threading + +import docker +from docker.types import Mount + +import logger +import util +from listener import Listener +from network_validator import NetworkValidator + +LOGGER = logger.get_logger("net_orc") +CONFIG_FILE = "conf/system.json" +EXAMPLE_CONFIG_FILE = "conf/system.json.example" +RUNTIME_DIR = "runtime/network" +NETWORK_MODULES_DIR = "network/modules" +NETWORK_MODULE_METADATA = "conf/module_config.json" +DEVICE_BRIDGE = "tr-d" +INTERNET_BRIDGE = "tr-c" +PRIVATE_DOCKER_NET = "tr-private-net" +CONTAINER_NAME = "network_orchestrator" +RUNTIME = 300 + + +class NetworkOrchestrator: + """Manage and controls a virtual testing network.""" + + def __init__(self, config_file=CONFIG_FILE, validate=True, async_monitor=False): + self._int_intf = None + self._dev_intf = None + + self.listener = None + + self._net_modules = [] + + self.validate = validate + + self.async_monitor = async_monitor + + self._path = os.path.dirname(os.path.dirname( + os.path.dirname(os.path.realpath(__file__)))) + + self.validator = NetworkValidator() + + shutil.rmtree(os.path.join(os.getcwd(), RUNTIME_DIR), ignore_errors=True) + + self.network_config = NetworkConfig() + + self.load_config(config_file) + + def start(self): + """Start the network orchestrator.""" + + LOGGER.info("Starting Network Orchestrator") + # Get all components ready + self.load_network_modules() + + # Restore the network first if required + self.stop(kill=True) + + self.start_network() + + if self.async_monitor: + # Run the monitor method asynchronously to keep this method non-blocking + self._monitor_thread = threading.Thread( + target=self.monitor_network) + self._monitor_thread.daemon = True + self._monitor_thread.start() + else: + self.monitor_network() + + def start_network(self): + """Start the virtual testing network.""" + LOGGER.info("Starting network") + + self.build_network_modules() + self.create_net() + self.start_network_services() + + if self.validate: + # Start the validator after network is ready + self.validator.start() + + # Get network ready (via Network orchestrator) + LOGGER.info("Network is ready.") + + def stop(self, kill=False): + """Stop the network orchestrator.""" + self.stop_validator(kill=kill) + self.stop_network(kill=kill) + + def stop_validator(self, kill=False): + """Stop the network validator.""" + # Shutdown the validator + self.validator.stop(kill=kill) + + def stop_network(self, kill=False): + """Stop the virtual testing network.""" + # Shutdown network + self.stop_networking_services(kill=kill) + self.restore_net() + + def monitor_network(self): + # TODO: This time should be configurable (How long to hold before exiting, this could be infinite too) + time.sleep(RUNTIME) + + self.stop() + + def load_config(self,config_file=None): + if config_file is None: + # If not defined, use relative pathing to local file + self._config_file=os.path.join(self._path, CONFIG_FILE) + else: + # If defined, use as provided + self._config_file=config_file + + if not os.path.isfile(self._config_file): + LOGGER.error("Configuration file is not present at " + config_file) + LOGGER.info("An example is present in " + EXAMPLE_CONFIG_FILE) + sys.exit(1) + + LOGGER.info("Loading config file: " + os.path.abspath(self._config_file)) + with open(self._config_file, encoding='UTF-8') as config_json_file: + config_json = json.load(config_json_file) + self.import_config(config_json) + + def import_config(self, json_config): + self._int_intf = json_config['network']['internet_intf'] + self._dev_intf = json_config['network']['device_intf'] + + def _check_network_services(self): + LOGGER.debug("Checking network modules...") + for net_module in self._net_modules: + if net_module.enable_container: + LOGGER.debug("Checking network module: " + + net_module.display_name) + success = self._ping(net_module) + if success: + LOGGER.debug(net_module.display_name + + " responded succesfully: " + str(success)) + else: + LOGGER.error(net_module.display_name + + " failed to respond to ping") + + def _ping(self, net_module): + host = net_module.net_config.ipv4_address + namespace = "tr-ctns-" + net_module.dir_name + cmd = "ip netns exec " + namespace + " ping -c 1 " + str(host) + success = util.run_command(cmd, output=False) + return success + + def _create_private_net(self): + client = docker.from_env() + try: + network = client.networks.get(PRIVATE_DOCKER_NET) + network.remove() + except docker.errors.NotFound: + pass + + # TODO: These should be made into variables + ipam_pool = docker.types.IPAMPool( + subnet='100.100.0.0/16', + iprange='100.100.100.0/24' + ) + + ipam_config = docker.types.IPAMConfig( + pool_configs=[ipam_pool] + ) + + client.networks.create( + PRIVATE_DOCKER_NET, + ipam=ipam_config, + internal=True, + check_duplicate=True, + driver="macvlan" + ) + + def create_net(self): + LOGGER.info("Creating baseline network") + + if not util.interface_exists(self._int_intf) or not util.interface_exists(self._dev_intf): + LOGGER.error("Configured interfaces are not ready for use. " + + "Ensure both interfaces are connected.") + sys.exit(1) + + # Create data plane + util.run_command("ovs-vsctl add-br " + DEVICE_BRIDGE) + + # Create control plane + util.run_command("ovs-vsctl add-br " + INTERNET_BRIDGE) + + # Add external interfaces to data and control plane + util.run_command("ovs-vsctl add-port " + + DEVICE_BRIDGE + " " + self._dev_intf) + util.run_command("ovs-vsctl add-port " + + INTERNET_BRIDGE + " " + self._int_intf) + + # Enable forwarding of eapol packets + util.run_command("ovs-ofctl add-flow " + DEVICE_BRIDGE + + " 'table=0, dl_dst=01:80:c2:00:00:03, actions=flood'") + + # Remove IP from internet adapter + util.run_command("ifconfig " + self._int_intf + " 0.0.0.0") + + # Set ports up + util.run_command("ip link set dev " + DEVICE_BRIDGE + " up") + util.run_command("ip link set dev " + INTERNET_BRIDGE + " up") + + self._create_private_net() + + self.listener = Listener(self._dev_intf) + self.listener.start_listener() + + def load_network_modules(self): + """Load network modules from module_config.json.""" + LOGGER.debug("Loading network modules from /" + NETWORK_MODULES_DIR) + + loaded_modules = "Loaded the following network modules: " + net_modules_dir = os.path.join(self._path, NETWORK_MODULES_DIR) + + for module_dir in os.listdir(net_modules_dir): + + net_module = NetworkModule() + + # Load basic module information + + net_module_json = json.load(open(os.path.join( + self._path, net_modules_dir, module_dir, NETWORK_MODULE_METADATA), encoding='UTF-8')) + + net_module.name = net_module_json['config']['meta']['name'] + net_module.display_name = net_module_json['config']['meta']['display_name'] + net_module.description = net_module_json['config']['meta']['description'] + net_module.dir = os.path.join( + self._path, net_modules_dir, module_dir) + net_module.dir_name = module_dir + net_module.build_file = module_dir + ".Dockerfile" + net_module.container_name = "tr-ct-" + net_module.dir_name + net_module.image_name = "test-run/" + net_module.dir_name + + # Attach folder mounts to network module + if "docker" in net_module_json['config']: + if "mounts" in net_module_json['config']['docker']: + for mount_point in net_module_json['config']['docker']['mounts']: + net_module.mounts.append(Mount( + target=mount_point['target'], + source=os.path.join( + os.getcwd(), mount_point['source']), + type='bind' + )) + + # Determine if this is a container or just an image/template + if "enable_container" in net_module_json['config']['docker']: + net_module.enable_container = net_module_json['config']['docker']['enable_container'] + + # Load network service networking configuration + if net_module.enable_container: + + net_module.net_config.enable_wan = net_module_json['config']['network']['enable_wan'] + net_module.net_config.ip_index = net_module_json['config']['network']['ip_index'] + + net_module.net_config.host = False if not "host" in net_module_json[ + 'config']['network'] else net_module_json['config']['network']['host'] + + net_module.net_config.ipv4_address = self.network_config.ipv4_network[ + net_module.net_config.ip_index] + net_module.net_config.ipv4_network = self.network_config.ipv4_network + + net_module.net_config.ipv6_address = self.network_config.ipv6_network[ + net_module.net_config.ip_index] + net_module.net_config.ipv6_network = self.network_config.ipv6_network + + loaded_modules += net_module.dir_name + " " + + self._net_modules.append(net_module) + + LOGGER.info(loaded_modules) + + def build_network_modules(self): + LOGGER.info("Building network modules...") + for net_module in self._net_modules: + self._build_module(net_module) + + def _build_module(self, net_module): + LOGGER.debug("Building network module " + net_module.dir_name) + client = docker.from_env() + client.images.build( + dockerfile=os.path.join(net_module.dir, net_module.build_file), + path=self._path, + forcerm=True, + tag="test-run/" + net_module.dir_name + ) + + def _get_network_module(self, name): + for net_module in self._net_modules: + if name == net_module.display_name: + return net_module + return None + + # Start the OVS network module + # This should always be called before loading all + # other modules to allow for a properly setup base + # network + def _start_ovs_module(self): + self._start_network_service(self._get_network_module("OVS")) + + def _start_network_service(self, net_module): + + LOGGER.debug("Starting net service " + net_module.display_name) + network = "host" if net_module.net_config.host else PRIVATE_DOCKER_NET + LOGGER.debug(f"""Network: {network}, image name: {net_module.image_name}, + container name: {net_module.container_name}""") + try: + client = docker.from_env() + net_module.container = client.containers.run( + net_module.image_name, + auto_remove=True, + cap_add=["NET_ADMIN"], + name=net_module.container_name, + hostname=net_module.container_name, + network=PRIVATE_DOCKER_NET, + privileged=True, + detach=True, + mounts=net_module.mounts, + environment={"HOST_USER": os.getlogin()} + ) + except docker.errors.ContainerError as error: + LOGGER.error("Container run error") + LOGGER.error(error) + + if network != "host": + self._attach_service_to_network(net_module) + + def _stop_service_module(self, net_module, kill=False): + LOGGER.debug("Stopping Service container " + net_module.container_name) + try: + container = self._get_service_container(net_module) + if container is not None: + if kill: + LOGGER.debug("Killing container:" + + net_module.container_name) + container.kill() + else: + LOGGER.debug("Stopping container:" + + net_module.container_name) + container.stop() + LOGGER.debug("Container stopped:" + net_module.container_name) + except Exception as error: + LOGGER.error("Container stop error") + LOGGER.error(error) + + def _get_service_container(self, net_module): + LOGGER.debug("Resolving service container: " + + net_module.container_name) + container = None + try: + client = docker.from_env() + container = client.containers.get(net_module.container_name) + except docker.errors.NotFound: + LOGGER.debug("Container " + + net_module.container_name + " not found") + except Exception as e: + LOGGER.error("Failed to resolve container") + LOGGER.error(e) + return container + + def stop_networking_services(self, kill=False): + LOGGER.info("Stopping network services") + for net_module in self._net_modules: + # Network modules may just be Docker images, so we do not want to stop them + if not net_module.enable_container: + continue + self._stop_service_module(net_module, kill) + + def start_network_services(self): + LOGGER.info("Starting network services") + + os.makedirs(os.path.join(os.getcwd(), RUNTIME_DIR), exist_ok=True) + + for net_module in self._net_modules: + + # TODO: There should be a better way of doing this + # Do not try starting OVS module again, as it should already be running + if "OVS" != net_module.display_name: + + # Network modules may just be Docker images, so we do not want to start them as containers + if not net_module.enable_container: + continue + + self._start_network_service(net_module) + + LOGGER.info("All network services are running") + self._check_network_services() + + # TODO: Let's move this into a separate script? It does not look great + def _attach_service_to_network(self, net_module): + LOGGER.debug("Attaching net service " + + net_module.display_name + " to device bridge") + + # Device bridge interface example: tr-di-dhcp (Test Run Device Interface for DHCP container) + bridge_intf = DEVICE_BRIDGE + "i-" + net_module.dir_name + + # Container interface example: tr-cti-dhcp (Test Run Container Interface for DHCP container) + container_intf = "tr-cti-" + net_module.dir_name + + # Container network namespace name + container_net_ns = "tr-ctns-" + net_module.dir_name + + # Create interface pair + util.run_command("ip link add " + bridge_intf + + " type veth peer name " + container_intf) + + # Add bridge interface to device bridge + util.run_command("ovs-vsctl add-port " + + DEVICE_BRIDGE + " " + bridge_intf) + + # Get PID for running container + # TODO: Some error checking around missing PIDs might be required + container_pid = util.run_command( + "docker inspect -f {{.State.Pid}} " + net_module.container_name)[0] + + # Create symlink for container network namespace + util.run_command("ln -sf /proc/" + container_pid + + "/ns/net /var/run/netns/" + container_net_ns) + + # Attach container interface to container network namespace + util.run_command("ip link set " + container_intf + + " netns " + container_net_ns) + + # Rename container interface name to veth0 + util.run_command("ip netns exec " + container_net_ns + + " ip link set dev " + container_intf + " name veth0") + + # Set MAC address of container interface + util.run_command("ip netns exec " + container_net_ns + " ip link set dev veth0 address 9a:02:57:1e:8f:" + str(net_module.net_config.ip_index)) + + # Set IP address of container interface + util.run_command("ip netns exec " + container_net_ns + " ip addr add " + + net_module.net_config.get_ipv4_addr_with_prefix() + " dev veth0") + + util.run_command("ip netns exec " + container_net_ns + " ip addr add " + + net_module.net_config.get_ipv6_addr_with_prefix() + " dev veth0") + + # Set interfaces up + util.run_command("ip link set dev " + bridge_intf + " up") + util.run_command("ip netns exec " + container_net_ns + + " ip link set dev veth0 up") + + if net_module.net_config.enable_wan: + LOGGER.debug("Attaching net service " + + net_module.display_name + " to internet bridge") + + # Internet bridge interface example: tr-ci-dhcp (Test Run Control (Internet) Interface for DHCP container) + bridge_intf = INTERNET_BRIDGE + "i-" + net_module.dir_name + + # Container interface example: tr-cti-dhcp (Test Run Container Interface for DHCP container) + container_intf = "tr-cti-" + net_module.dir_name + + # Create interface pair + util.run_command("ip link add " + bridge_intf + + " type veth peer name " + container_intf) + + # Attach bridge interface to internet bridge + util.run_command("ovs-vsctl add-port " + + INTERNET_BRIDGE + " " + bridge_intf) + + # Attach container interface to container network namespace + util.run_command("ip link set " + container_intf + + " netns " + container_net_ns) + + # Rename container interface name to eth1 + util.run_command("ip netns exec " + container_net_ns + + " ip link set dev " + container_intf + " name eth1") + + # Set MAC address of container interface + util.run_command("ip netns exec " + container_net_ns + + " ip link set dev eth1 address 9a:02:57:1e:8f:0" + str(net_module.net_config.ip_index)) + + # Set interfaces up + util.run_command("ip link set dev " + bridge_intf + " up") + util.run_command("ip netns exec " + + container_net_ns + " ip link set dev eth1 up") + + def restore_net(self): + + LOGGER.info("Clearing baseline network") + + if hasattr(self, 'listener') and self.listener is not None and self.listener.is_running(): + self.listener.stop_listener() + + client = docker.from_env() + + # Stop all network containers if still running + for net_module in self._net_modules: + try: + container = client.containers.get( + "tr-ct-" + net_module.dir_name) + container.kill() + except Exception: + continue + + # Delete data plane + util.run_command("ovs-vsctl --if-exists del-br tr-d") + + # Delete control plane + util.run_command("ovs-vsctl --if-exists del-br tr-c") + + # Restart internet interface + if util.interface_exists(self._int_intf): + util.run_command("ip link set " + self._int_intf + " down") + util.run_command("ip link set " + self._int_intf + " up") + + LOGGER.info("Network is restored") + + +class NetworkModule: + + def __init__(self): + self.name = None + self.display_name = None + self.description = None + + self.container = None + self.container_name = None + self.image_name = None + + # Absolute path + self.dir = None + self.dir_name = None + self.build_file = None + self.mounts = [] + + self.enable_container = True + + self.net_config = NetworkModuleNetConfig() + +# The networking configuration for a network module + + +class NetworkModuleNetConfig: + + def __init__(self): + + self.enable_wan = False + + self.ip_index = 0 + self.ipv4_address = None + self.ipv4_network = None + self.ipv6_address = None + self.ipv6_network = None + + self.host = False + + def get_ipv4_addr_with_prefix(self): + return format(self.ipv4_address) + "/" + str(self.ipv4_network.prefixlen) + + def get_ipv6_addr_with_prefix(self): + return format(self.ipv6_address) + "/" + str(self.ipv6_network.prefixlen) + +# Represents the current configuration of the network for the device bridge + +class NetworkConfig: + + # TODO: Let's get this from a configuration file + def __init__(self): + self.ipv4_network = ipaddress.ip_network('10.10.10.0/24') + self.ipv6_network = ipaddress.ip_network('fd10:77be:4186::/64') diff --git a/net_orc/python/src/network_runner.py b/net_orc/python/src/network_runner.py new file mode 100644 index 000000000..3fe9e8a41 --- /dev/null +++ b/net_orc/python/src/network_runner.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python3 + +"""Wrapper for the NetworkOrchestrator that simplifies +virtual network start process by allowing direct calling +from the command line. + +Run using the provided command scripts in the cmd folder. +E.g sudo cmd/start +""" + +import argparse +import signal +import sys +import time + +import logger + +from network_orchestrator import NetworkOrchestrator + +LOGGER = logger.get_logger('net_runner') + +class NetworkRunner: + def __init__(self, config_file=None, validate=True, async_monitor=False): + self._monitor_thread = None + self._register_exits() + self.net_orc = NetworkOrchestrator(config_file=config_file,validate=validate,async_monitor=async_monitor) + + def _register_exits(self): + signal.signal(signal.SIGINT, self._exit_handler) + signal.signal(signal.SIGTERM, self._exit_handler) + signal.signal(signal.SIGABRT, self._exit_handler) + signal.signal(signal.SIGQUIT, self._exit_handler) + + def _exit_handler(self, signum, arg): # pylint: disable=unused-argument + LOGGER.debug("Exit signal received: " + str(signum)) + if signum in (2, signal.SIGTERM): + LOGGER.info("Exit signal received.") + # Kill all container services quickly + # If we're here, we want everything to stop immediately + # and don't care about a gracefully shutdown + self.stop(True) + sys.exit(1) + + def stop(self, kill=False): + self.net_orc.stop(kill) + + def start(self): + self.net_orc.start() + +def parse_args(argv): + parser = argparse.ArgumentParser(description="Test Run Help", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument("--no-validate", action="store_true", + help="Turn off the validation of the network after network boot") + parser.add_argument("-f", "--config-file", default=None, + help="Define the configuration file for the Network Orchestrator") + parser.add_argument("-d", "--daemon", action="store_true", + help="Run the network monitor process in the background as a daemon thread") + + args, unknown = parser.parse_known_args() + return args + +if __name__ == "__main__": + args=parse_args(sys.argv) + runner = NetworkRunner(config_file=args.config_file, + validate=not args.no_validate, + async_monitor=args.daemon) + runner.start() \ No newline at end of file diff --git a/net_orc/python/src/network_validator.py b/net_orc/python/src/network_validator.py new file mode 100644 index 000000000..53fbcdbd0 --- /dev/null +++ b/net_orc/python/src/network_validator.py @@ -0,0 +1,274 @@ +"""Holds logic for validation of network services prior to runtime.""" +import json +import os +import shutil +import time +import docker +from docker.types import Mount +import logger +import util + +LOGGER = logger.get_logger("validator") +OUTPUT_DIR = "runtime/validation" +DEVICES_DIR = "network/devices" +DEVICE_METADATA = "conf/module_config.json" +DEVICE_BRIDGE = "tr-d" +CONF_DIR = "conf" +CONF_FILE = "system.json" + +class NetworkValidator: + """Perform validation of network services.""" + + def __init__(self): + self._net_devices = [] + + self._path = os.path.dirname(os.path.dirname( + os.path.dirname(os.path.realpath(__file__)))) + + self._device_dir = os.path.join(self._path, DEVICES_DIR) + + shutil.rmtree(os.path.join(self._path, OUTPUT_DIR), ignore_errors=True) + + def start(self): + """Start the network validator.""" + LOGGER.info("Starting validator") + self._load_devices() + self._build_network_devices() + self._start_network_devices() + + def stop(self, kill=False): + """Stop the network validator.""" + LOGGER.info("Stopping validator") + self._stop_network_devices(kill) + LOGGER.info("Validator stopped") + + def _build_network_devices(self): + LOGGER.debug("Building network validators...") + for net_device in self._net_devices: + self._build_device(net_device) + + def _build_device(self, net_device): + LOGGER.debug("Building network validator " + net_device.dir_name) + try: + client = docker.from_env() + client.images.build( + dockerfile=os.path.join(net_device.dir, net_device.build_file), + path=self._path, + forcerm=True, + tag="test-run/" + net_device.dir_name + ) + LOGGER.debug("Validator device built: " + net_device.dir_name) + except docker.errors.BuildError as error: + LOGGER.error("Container build error") + LOGGER.error(error) + + def _load_devices(self): + + LOGGER.info(f"Loading validators from {DEVICES_DIR}") + + loaded_devices = "Loaded the following validators: " + + for module_dir in os.listdir(self._device_dir): + + device = FauxDevice() + + # Load basic module information + with open(os.path.join(self._device_dir, module_dir, DEVICE_METADATA), + encoding='utf-8') as device_config_file: + device_json = json.load(device_config_file) + + device.name = device_json['config']['meta']['name'] + device.description = device_json['config']['meta']['description'] + + device.dir = os.path.join(self._path, self._device_dir, module_dir) + device.dir_name = module_dir + device.build_file = module_dir + ".Dockerfile" + device.container_name = "tr-ct-" + device.dir_name + device.image_name = "test-run/" + device.dir_name + + runtime_source = os.path.join(os.getcwd(), OUTPUT_DIR, device.name) + conf_source = os.path.join(os.getcwd(), CONF_DIR) + os.makedirs(runtime_source, exist_ok=True) + + device.mounts = [ + Mount( + target='/runtime/validation', + source=runtime_source, + type = 'bind' + ), + Mount( + target='/conf', + source=conf_source, + type='bind', + read_only=True + ), + Mount( + target='/runtime/network', + source=runtime_source, + type='bind' + ) + ] + + if 'timeout' in device_json['config']['docker']: + device.timeout = device_json['config']['docker']['timeout'] + + # Determine if this is a container or just an image/template + if "enable_container" in device_json['config']['docker']: + device.enable_container = device_json['config']['docker']['enable_container'] + + self._net_devices.append(device) + + loaded_devices += device.dir_name + " " + + LOGGER.info(loaded_devices) + + def _start_network_devices(self): + LOGGER.debug("Starting network devices") + for net_device in self._net_devices: + self._start_network_device(net_device) + + def _start_network_device(self, device): + LOGGER.info("Starting device " + device.name) + LOGGER.debug("Image name: " + device.image_name) + LOGGER.debug("Container name: " + device.container_name) + + try: + client = docker.from_env() + device.container = client.containers.run( + device.image_name, + auto_remove=True, + cap_add=["NET_ADMIN"], + name=device.container_name, + hostname=device.container_name, + network="none", + privileged=True, + detach=True, + mounts=device.mounts, + environment={"HOST_USER": os.getlogin()} + ) + except docker.errors.ContainerError as error: + LOGGER.error("Container run error") + LOGGER.error(error) + + self._attach_device_to_network(device) + + # Determine the module timeout time + test_module_timeout = time.time() + device.timeout + status = self._get_device_status(device) + + while time.time() < test_module_timeout and status == 'running': + time.sleep(1) + status = self._get_device_status(device) + + LOGGER.info("Validation device " + device.name + " has finished") + + def _get_device_status(self,module): + container = self._get_device_container(module) + if container is not None: + return container.status + return None + + def _attach_device_to_network(self, device): + LOGGER.debug("Attaching device " + device.name + " to device bridge") + + # Device bridge interface example: tr-di-dhcp + # (Test Run Device Interface for DHCP container) + bridge_intf = DEVICE_BRIDGE + "i-" + device.dir_name + + # Container interface example: tr-cti-dhcp (Test Run Container Interface for DHCP container) + container_intf = "tr-cti-" + device.dir_name + + # Container network namespace name + container_net_ns = "tr-ctns-" + device.dir_name + + # Create interface pair + util.run_command("ip link add " + bridge_intf + + " type veth peer name " + container_intf) + + # Add bridge interface to device bridge + util.run_command("ovs-vsctl add-port " + + DEVICE_BRIDGE + " " + bridge_intf) + + # Get PID for running container + # TODO: Some error checking around missing PIDs might be required + container_pid = util.run_command( + "docker inspect -f {{.State.Pid}} " + device.container_name)[0] + + # Create symlink for container network namespace + util.run_command("ln -sf /proc/" + container_pid + + "/ns/net /var/run/netns/" + container_net_ns) + + # Attach container interface to container network namespace + util.run_command("ip link set " + container_intf + + " netns " + container_net_ns) + + # Rename container interface name to veth0 + util.run_command("ip netns exec " + container_net_ns + + " ip link set dev " + container_intf + " name veth0") + + # Set interfaces up + util.run_command("ip link set dev " + bridge_intf + " up") + util.run_command("ip netns exec " + container_net_ns + + " ip link set dev veth0 up") + + def _stop_network_device(self, net_device, kill=False): + LOGGER.debug("Stopping device container " + net_device.container_name) + try: + container = self._get_device_container(net_device) + if container is not None: + if kill: + LOGGER.debug("Killing container:" + + net_device.container_name) + container.kill() + else: + LOGGER.debug("Stopping container:" + + net_device.container_name) + container.stop() + LOGGER.debug("Container stopped:" + net_device.container_name) + except Exception as e: + LOGGER.error("Container stop error") + LOGGER.error(e) + + def _get_device_container(self, net_device): + LOGGER.debug("Resolving device container: " + + net_device.container_name) + container = None + try: + client = docker.from_env() + container = client.containers.get(net_device.container_name) + except docker.errors.NotFound: + LOGGER.debug("Container " + + net_device.container_name + " not found") + except Exception as e: + LOGGER.error("Failed to resolve container") + LOGGER.error(e) + return container + + def _stop_network_devices(self, kill=False): + LOGGER.debug("Stopping devices") + for net_device in self._net_devices: + # Devices may just be Docker images, so we do not want to stop them + if not net_device.enable_container: + continue + self._stop_network_device(net_device, kill) + +class FauxDevice: # pylint: disable=too-few-public-methods,too-many-instance-attributes + """Represent a faux device.""" + + def __init__(self): + self.name = "Unknown device" + self.description = "Unknown description" + + self.container = None + self.container_name = None + self.image_name = None + + # Absolute path + self.dir = None + + self.dir_name = None + self.build_file = None + self.mounts = [] + + self.enable_container = True + self.timeout = 60 diff --git a/net_orc/python/src/run_validator.py b/net_orc/python/src/run_validator.py new file mode 100644 index 000000000..318456083 --- /dev/null +++ b/net_orc/python/src/run_validator.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python3 + +import os +import logger +import signal +import time +import os + +from network_orchestrator import NetworkOrchestrator +from network_orchestrator_validator import NetworkOrchestratorValidator + +LOGGER = logger.get_logger('test_run') +RUNTIME_FOLDER = "runtime/network" + +class ValidatorRun: + + def __init__(self): + + signal.signal(signal.SIGINT, self.handler) + signal.signal(signal.SIGTERM, self.handler) + signal.signal(signal.SIGABRT, self.handler) + signal.signal(signal.SIGQUIT, self.handler) + + LOGGER.info("Starting Network Orchestrator") + #os.makedirs(RUNTIME_FOLDER) + + # Cleanup any old validator components + self._validator = NetworkOrchestratorValidator() + self._validator._stop_validator(True); + + # Start the validator after network is ready + self._validator._start_validator() + + # TODO: Kill validator once all faux devices are no longer running + time.sleep(2000) + + # Gracefully shutdown network + self._validator._stop_validator(); + + def handler(self, signum, frame): + LOGGER.debug("SigtermEnum: " + str(signal.SIGTERM)) + LOGGER.debug("Exit signal received: " + str(signum)) + if (signum == 2 or signum == signal.SIGTERM): + LOGGER.info("Exit signal received. Stopping validator...") + # Kill all container services quickly + # If we're here, we want everything to stop immediately + # and don't care about a gracefully shutdown. + self._validator._stop_validator(True); + LOGGER.info("Validator stopped") + exit(1) + +test_run = ValidatorRun() diff --git a/net_orc/python/src/util.py b/net_orc/python/src/util.py new file mode 100644 index 000000000..a5cfe205f --- /dev/null +++ b/net_orc/python/src/util.py @@ -0,0 +1,30 @@ +import subprocess +import shlex +import logger +import netifaces + + +# Runs a process at the os level +# By default, returns the standard output and error output +# If the caller sets optional output parameter to False, +# will only return a boolean result indicating if it was +# succesful in running the command. Failure is indicated +# by any return code from the process other than zero. +def run_command(cmd, output=True): + success = False + LOGGER = logger.get_logger('util') + process = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = process.communicate() + if process.returncode !=0 and output: + err_msg = "%s. Code: %s" % (stderr.strip(), process.returncode) + LOGGER.error("Command Failed: " + cmd) + LOGGER.error("Error: " + err_msg) + else: + success = True + if output: + return stdout.strip().decode('utf-8'), stderr + else: + return success + +def interface_exists(interface): + return interface in netifaces.interfaces() \ No newline at end of file From ceba4533cf87022f16f1d65c8c0e0bbbbc2abda6 Mon Sep 17 00:00:00 2001 From: jhughesbiot <50999916+jhughesbiot@users.noreply.github.com> Date: Thu, 4 May 2023 03:21:35 -0700 Subject: [PATCH 08/48] Add the DNS test module (#12) * Add network orchestrator repository * cleanup duplicate start and install scripts * Temporary fix for python dependencies * Remove duplicate python requirements * remove duplicate conf files * remove remote-net option * cleanp unecessary files * Add dns test module Fix test module build process * Add mac address of device under test to test container Update dns test to use mac address filter * Update dns module tests * Change result output * logging update * Update test module for better reusability * Load in module config to test module * logging cleanup * Update baseline module to new template Misc cleanup * Add ability to disable individual tests * remove duplicate readme * Update device directories * Remove local folder * Update device template Update test module to work with new device config file format * Change test module network config options Do not start network services for modules not configured for network * Refactor --------- --- .gitignore | 2 + cmd/install | 2 +- framework/device.py | 10 +- framework/requirements.txt | 1 + framework/testrun.py | 281 ++-- .../Teltonika TRB140/device_config.json | 5 - net_orc/.gitignore | 133 ++ net_orc/conf/.gitignore | 1 + net_orc/conf/network/radius/ca.crt | 26 + net_orc/conf/system.json.example | 7 + .../modules/template/template.Dockerfile | 2 +- net_orc/python/src/network_orchestrator.py | 1143 ++++++++--------- resources/devices/Template/device_config.json | 32 + test_orc/modules/base/bin/capture | 3 +- test_orc/modules/base/bin/start_module | 27 +- test_orc/modules/base/conf/module_config.json | 1 + test_orc/modules/base/python/src/logger.py | 17 +- .../modules/base/python/src/test_module.py | 84 ++ .../modules/baseline/conf/module_config.json | 28 +- .../baseline/python/src/baseline_module.py | 31 + .../modules/baseline/python/src/logger.py | 46 - test_orc/modules/baseline/python/src/run.py | 13 +- .../baseline/python/src/test_module.py | 61 - test_orc/modules/dns/bin/start_test_module | 42 + test_orc/modules/dns/conf/module_config.json | 26 + test_orc/modules/dns/dns.Dockerfile | 11 + test_orc/modules/dns/python/src/dns_module.py | 77 ++ test_orc/modules/dns/python/src/run.py | 58 + test_orc/python/src/test_orchestrator.py | 59 +- 29 files changed, 1337 insertions(+), 892 deletions(-) create mode 100644 framework/requirements.txt delete mode 100644 local/devices/Teltonika TRB140/device_config.json create mode 100644 net_orc/.gitignore create mode 100644 net_orc/conf/.gitignore create mode 100644 net_orc/conf/network/radius/ca.crt create mode 100644 net_orc/conf/system.json.example create mode 100644 resources/devices/Template/device_config.json create mode 100644 test_orc/modules/base/python/src/test_module.py create mode 100644 test_orc/modules/baseline/python/src/baseline_module.py delete mode 100644 test_orc/modules/baseline/python/src/logger.py delete mode 100644 test_orc/modules/baseline/python/src/test_module.py create mode 100644 test_orc/modules/dns/bin/start_test_module create mode 100644 test_orc/modules/dns/conf/module_config.json create mode 100644 test_orc/modules/dns/dns.Dockerfile create mode 100644 test_orc/modules/dns/python/src/dns_module.py create mode 100644 test_orc/modules/dns/python/src/run.py diff --git a/.gitignore b/.gitignore index 15aae1278..db1580ffb 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,8 @@ runtime/ venv/ .vscode/ +local/ + # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] diff --git a/cmd/install b/cmd/install index 539234006..23e463158 100755 --- a/cmd/install +++ b/cmd/install @@ -4,7 +4,7 @@ python3 -m venv venv source venv/bin/activate -pip3 install --upgrade requests +pip3 install -r framework/requirements.txt pip3 install -r net_orc/python/requirements.txt diff --git a/framework/device.py b/framework/device.py index 08014c127..d41199612 100644 --- a/framework/device.py +++ b/framework/device.py @@ -1,10 +1,12 @@ """Track device object information.""" from dataclasses import dataclass + @dataclass class Device: - """Represents a physical device and it's configuration.""" + """Represents a physical device and it's configuration.""" - make: str - model: str - mac_addr: str + make: str + model: str + mac_addr: str + test_modules: str = None diff --git a/framework/requirements.txt b/framework/requirements.txt new file mode 100644 index 000000000..ca56948f4 --- /dev/null +++ b/framework/requirements.txt @@ -0,0 +1 @@ +requests<2.29.0 \ No newline at end of file diff --git a/framework/testrun.py b/framework/testrun.py index 0561163ac..40076108b 100644 --- a/framework/testrun.py +++ b/framework/testrun.py @@ -6,7 +6,6 @@ Run using the provided command scripts in the cmd folder. E.g sudo cmd/start """ - import os import sys import json @@ -19,150 +18,156 @@ current_dir = os.path.dirname(os.path.realpath(__file__)) parent_dir = os.path.dirname(current_dir) +# Add net_orc to Python path +net_orc_dir = os.path.join(parent_dir, 'net_orc', 'python', 'src') +sys.path.append(net_orc_dir) + +# Add test_orc to Python path +test_orc_dir = os.path.join(parent_dir, 'test_orc', 'python', 'src') +sys.path.append(test_orc_dir) + +from listener import NetworkEvent # pylint: disable=wrong-import-position,import-outside-toplevel +import test_orchestrator as test_orc # pylint: disable=wrong-import-position,import-outside-toplevel +import network_orchestrator as net_orc # pylint: disable=wrong-import-position,import-outside-toplevel + LOGGER = logger.get_logger('test_run') -CONFIG_FILE = "conf/system.json" -EXAMPLE_CONFIG_FILE = "conf/system.json.example" +CONFIG_FILE = 'conf/system.json' +EXAMPLE_CONFIG_FILE = 'conf/system.json.example' RUNTIME = 300 -DEVICES_DIR = 'local/devices' +LOCAL_DEVICES_DIR = 'local/devices' +RESOURCE_DEVICES_DIR = 'resources/devices' DEVICE_CONFIG = 'device_config.json' DEVICE_MAKE = 'make' DEVICE_MODEL = 'model' DEVICE_MAC_ADDR = 'mac_addr' +DEVICE_TEST_MODULES = 'test_modules' class TestRun: # pylint: disable=too-few-public-methods - """Test Run controller. - - Creates an instance of the network orchestrator, test - orchestrator and user interface. - """ - - def __init__(self, config_file=CONFIG_FILE,validate=True, net_only=False): - self._devices = [] - self._net_only = net_only - - # Catch any exit signals - self._register_exits() - - # Import the correct net orchestrator - self.import_dependencies() - - # Expand the config file to absolute pathing - config_file_abs=self._get_config_abs(config_file=config_file) - - self._net_orc = net_orc.NetworkOrchestrator(config_file=config_file_abs,validate=validate,async_monitor=not self._net_only) - self._test_orc = test_orc.TestOrchestrator() - - def start(self): - - self._load_devices() - - if self._net_only: - LOGGER.info("Network only option configured, no tests will be run") - self._start_network() - else: - self._start_network() - self._net_orc.listener.register_callback( - self._device_discovered, - [NetworkEvent.DEVICE_DISCOVERED]) - - LOGGER.info("Waiting for devices on the network...") - - # Check timeout and whether testing is currently in progress before stopping - time.sleep(RUNTIME) - - self.stop() - - def stop(self,kill=False): - self._stop_tests() - self._stop_network(kill=kill) - - def import_dependencies(self): - # Add net_orc to Python path - net_orc_dir = os.path.join(parent_dir, 'net_orc', 'python', 'src') - sys.path.append(net_orc_dir) - # Import the network orchestrator - global net_orc - import network_orchestrator as net_orc # pylint: disable=wrong-import-position,import-outside-toplevel - - # Add test_orc to Python path - test_orc_dir = os.path.join(parent_dir, 'test_orc', 'python', 'src') - sys.path.append(test_orc_dir) - global test_orc - import test_orchestrator as test_orc # pylint: disable=wrong-import-position,import-outside-toplevel - - global NetworkEvent - from listener import NetworkEvent # pylint: disable=wrong-import-position,import-outside-toplevel - - def _register_exits(self): - signal.signal(signal.SIGINT, self._exit_handler) - signal.signal(signal.SIGTERM, self._exit_handler) - signal.signal(signal.SIGABRT, self._exit_handler) - signal.signal(signal.SIGQUIT, self._exit_handler) - - def _exit_handler(self, signum, arg): # pylint: disable=unused-argument - LOGGER.debug("Exit signal received: " + str(signum)) - if signum in (2, signal.SIGTERM): - LOGGER.info("Exit signal received.") - self.stop(kill=True) - sys.exit(1) - - def _get_config_abs(self,config_file=None): - if config_file is None: - # If not defined, use relative pathing to local file - config_file = os.path.join(parent_dir, CONFIG_FILE) - - # Expand the config file to absolute pathing - return os.path.abspath(config_file) - - def _start_network(self): - self._net_orc.start() - - def _run_tests(self): - """Iterate through and start all test modules.""" - self._test_orc.start() - - def _stop_network(self,kill=False): - self._net_orc.stop(kill=kill) - - def _stop_tests(self): - self._test_orc.stop() - - def _load_devices(self): - LOGGER.debug('Loading devices from ' + DEVICES_DIR) - - for device_folder in os.listdir(DEVICES_DIR): - with open(os.path.join(DEVICES_DIR, device_folder, DEVICE_CONFIG), - encoding='utf-8') as device_config_file: - device_config_json = json.load(device_config_file) - - device_make = device_config_json.get(DEVICE_MAKE) - device_model = device_config_json.get(DEVICE_MODEL) - mac_addr = device_config_json.get(DEVICE_MAC_ADDR) - - device = Device(device_make, device_model, - mac_addr=mac_addr) - self._devices.append(device) - - LOGGER.info('Loaded ' + str(len(self._devices)) + ' devices') - - def get_device(self, mac_addr): - """Returns a loaded device object from the device mac address.""" - for device in self._devices: - if device.mac_addr == mac_addr: - return device - return None - - def _device_discovered(self, mac_addr): - device = self.get_device(mac_addr) - if device is not None: - LOGGER.info( - f'Discovered {device.make} {device.model} on the network') - else: - device = Device(make=None, model=None, mac_addr=mac_addr) - LOGGER.info( - f'A new device has been discovered with mac address {mac_addr}') - - # TODO: Pass device information to test orchestrator/runner - self._run_tests() + """Test Run controller. + + Creates an instance of the network orchestrator, test + orchestrator and user interface. + """ + + def __init__(self, config_file=CONFIG_FILE, validate=True, net_only=False): + self._devices = [] + self._net_only = net_only + + # Catch any exit signals + self._register_exits() + + # Expand the config file to absolute pathing + config_file_abs = self._get_config_abs(config_file=config_file) + + self._net_orc = net_orc.NetworkOrchestrator( + config_file=config_file_abs, validate=validate, async_monitor=not self._net_only) + self._test_orc = test_orc.TestOrchestrator() + + def start(self): + + self._load_all_devices() + + if self._net_only: + LOGGER.info( + "Network only option configured, no tests will be run") + self._start_network() + else: + self._start_network() + self._test_orc.start() + self._net_orc.listener.register_callback( + self._device_discovered, + [NetworkEvent.DEVICE_DISCOVERED]) + + LOGGER.info("Waiting for devices on the network...") + + # Check timeout and whether testing is currently in progress before stopping + time.sleep(RUNTIME) + + self.stop() + + def stop(self, kill=False): + self._stop_tests() + self._stop_network(kill=kill) + + def _register_exits(self): + signal.signal(signal.SIGINT, self._exit_handler) + signal.signal(signal.SIGTERM, self._exit_handler) + signal.signal(signal.SIGABRT, self._exit_handler) + signal.signal(signal.SIGQUIT, self._exit_handler) + + def _exit_handler(self, signum, arg): # pylint: disable=unused-argument + LOGGER.debug("Exit signal received: " + str(signum)) + if signum in (2, signal.SIGTERM): + LOGGER.info("Exit signal received.") + self.stop(kill=True) + sys.exit(1) + + def _get_config_abs(self, config_file=None): + if config_file is None: + # If not defined, use relative pathing to local file + config_file = os.path.join(parent_dir, CONFIG_FILE) + + # Expand the config file to absolute pathing + return os.path.abspath(config_file) + + def _start_network(self): + self._net_orc.start() + + def _run_tests(self, device): + """Iterate through and start all test modules.""" + + # TODO: Make this configurable + time.sleep(60) # Let device bootup + + self._test_orc.run_test_modules(device) + + def _stop_network(self, kill=False): + self._net_orc.stop(kill=kill) + + def _stop_tests(self): + self._test_orc.stop() + + def _load_all_devices(self): + self._load_devices(device_dir=LOCAL_DEVICES_DIR) + LOGGER.info('Loaded ' + str(len(self._devices)) + ' devices') + + def _load_devices(self, device_dir): + LOGGER.debug('Loading devices from ' + device_dir) + + os.makedirs(device_dir, exist_ok=True) + + for device_folder in os.listdir(device_dir): + with open(os.path.join(device_dir, device_folder, DEVICE_CONFIG), + encoding='utf-8') as device_config_file: + device_config_json = json.load(device_config_file) + + device_make = device_config_json.get(DEVICE_MAKE) + device_model = device_config_json.get(DEVICE_MODEL) + mac_addr = device_config_json.get(DEVICE_MAC_ADDR) + test_modules = device_config_json.get(DEVICE_TEST_MODULES) + + device = Device(make=device_make, model=device_model, + mac_addr=mac_addr, test_modules=json.dumps(test_modules)) + self._devices.append(device) + + def get_device(self, mac_addr): + """Returns a loaded device object from the device mac address.""" + for device in self._devices: + if device.mac_addr == mac_addr: + return device + return None + + def _device_discovered(self, mac_addr): + device = self.get_device(mac_addr) + if device is not None: + LOGGER.info( + f'Discovered {device.make} {device.model} on the network') + else: + device = Device(make=None, model=None, mac_addr=mac_addr) + LOGGER.info( + f'A new device has been discovered with mac address {mac_addr}') + + # TODO: Pass device information to test orchestrator/runner + self._run_tests(device) diff --git a/local/devices/Teltonika TRB140/device_config.json b/local/devices/Teltonika TRB140/device_config.json deleted file mode 100644 index 759c1e9b4..000000000 --- a/local/devices/Teltonika TRB140/device_config.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "make": "Teltonika", - "model": "TRB140", - "mac_addr": "00:1e:42:35:73:c4" -} \ No newline at end of file diff --git a/net_orc/.gitignore b/net_orc/.gitignore new file mode 100644 index 000000000..2d77147eb --- /dev/null +++ b/net_orc/.gitignore @@ -0,0 +1,133 @@ +# Runtime folder +runtime/ +.vscode/ + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ diff --git a/net_orc/conf/.gitignore b/net_orc/conf/.gitignore new file mode 100644 index 000000000..41b89ceb1 --- /dev/null +++ b/net_orc/conf/.gitignore @@ -0,0 +1 @@ +system.json \ No newline at end of file diff --git a/net_orc/conf/network/radius/ca.crt b/net_orc/conf/network/radius/ca.crt new file mode 100644 index 000000000..d009cb1ab --- /dev/null +++ b/net_orc/conf/network/radius/ca.crt @@ -0,0 +1,26 @@ +-----BEGIN CERTIFICATE----- +MIIEYTCCA0mgAwIBAgIUQJ4F8hBCnCp7ASPZqG/tNQgoUR4wDQYJKoZIhvcNAQEL +BQAwgb8xCzAJBgNVBAYTAkdCMRswGQYDVQQIDBIbWzN+TGVpY2VzdGVyc2hpcmUx +FTATBgNVBAcMDExvdWdoYm9yb3VnaDEUMBIGA1UECgwLRm9yZXN0IFJvY2sxDjAM +BgNVBAsMBUN5YmVyMR8wHQYDVQQDDBZjeWJlci5mb3Jlc3Ryb2NrLmNvLnVrMTUw +MwYJKoZIhvcNAQkBFiZjeWJlcnNlY3VyaXR5LnRlc3RpbmdAZm9yZXN0cm9jay5j +by51azAeFw0yMjAzMDQxMjEzMTBaFw0yNzAzMDMxMjEzMTBaMIG/MQswCQYDVQQG +EwJHQjEbMBkGA1UECAwSG1szfkxlaWNlc3RlcnNoaXJlMRUwEwYDVQQHDAxMb3Vn +aGJvcm91Z2gxFDASBgNVBAoMC0ZvcmVzdCBSb2NrMQ4wDAYDVQQLDAVDeWJlcjEf +MB0GA1UEAwwWY3liZXIuZm9yZXN0cm9jay5jby51azE1MDMGCSqGSIb3DQEJARYm +Y3liZXJzZWN1cml0eS50ZXN0aW5nQGZvcmVzdHJvY2suY28udWswggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDDNz3vJiZ5nX8lohEhqXvxEme3srip8qF7 +r5ScIeQzsTKuPNAmoefx9TcU3SyA2BnREuDX+OCYMN62xxWG2PndOl0LNezAY22C +PJwHbaBntLKY/ZhxYSTyratM7zxKSVLtClamA/bJXBhdfZZKYOP3xlZQEQTygtzK +j5hZwDrpDARtjRZIMWPLqVcoaW9ow2urJVsdD4lYAhpQU2UIgiWo7BG3hJsUfcYX +EQyyrMKJ7xaCwzIU7Sem1PETrzeiWg4KhDijc7A0RMPWlU5ljf0CnY/IZwiDsMRl +hGmGBPvR+ddiWPZPtSKj6TPWpsaMUR9UwncLmSSrhf1otX4Mw0vbAgMBAAGjUzBR +MB0GA1UdDgQWBBR0Qxx2mDTPIfpnzO5YtycGs6t8ijAfBgNVHSMEGDAWgBR0Qxx2 +mDTPIfpnzO5YtycGs6t8ijAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUA +A4IBAQCpTMBMZGXF74WCxrIk23MUsu0OKzMs8B16Wy8BHz+7hInLZwbkx71Z0TP5 +rsMITetSANtM/k4jH7Vmr1xmzU7oSz5zKU1+7rIjKjGtih48WZdJay0uqfKe0K2s +vsRS0LVLY6IiTFWK9YrLC0QFSK7z5GDl1oc/D5yIZAkbsL6PRQJ5RQsYf5BhHfyB +PRV/KcF7c9iKVYW2vILJzbyYLHTDADTHbtfCe5+pAGxagswDjSMVkQu5iJNjbtUO +5iv7PRkgzUFru9Kk6q+LrXbzyPPCwlc3Xbh1q5jSkJLkcV3K26E7+uX5HI+Hxpeh +a8kOsdnw+N8wX6bc7eXIaGBDMine +-----END CERTIFICATE----- diff --git a/net_orc/conf/system.json.example b/net_orc/conf/system.json.example new file mode 100644 index 000000000..77c981394 --- /dev/null +++ b/net_orc/conf/system.json.example @@ -0,0 +1,7 @@ +{ + "network": { + "device_intf": "enx207bd2620617", + "internet_intf": "enx207bd26205e9" + }, + "log_level": "INFO" +} \ No newline at end of file diff --git a/net_orc/network/modules/template/template.Dockerfile b/net_orc/network/modules/template/template.Dockerfile index 54bfb9628..45f9da6d9 100644 --- a/net_orc/network/modules/template/template.Dockerfile +++ b/net_orc/network/modules/template/template.Dockerfile @@ -1,4 +1,4 @@ -# Image name: test-run/dhcp-primary +# Image name: test-run/template FROM test-run/base:latest # Copy over all configuration files diff --git a/net_orc/python/src/network_orchestrator.py b/net_orc/python/src/network_orchestrator.py index 828ad58a7..63391a24f 100644 --- a/net_orc/python/src/network_orchestrator.py +++ b/net_orc/python/src/network_orchestrator.py @@ -1,573 +1,570 @@ -#!/usr/bin/env python3 - -import ipaddress -import json -import os -import shutil -import sys -import time -import threading - -import docker -from docker.types import Mount - -import logger -import util -from listener import Listener -from network_validator import NetworkValidator - -LOGGER = logger.get_logger("net_orc") -CONFIG_FILE = "conf/system.json" -EXAMPLE_CONFIG_FILE = "conf/system.json.example" -RUNTIME_DIR = "runtime/network" -NETWORK_MODULES_DIR = "network/modules" -NETWORK_MODULE_METADATA = "conf/module_config.json" -DEVICE_BRIDGE = "tr-d" -INTERNET_BRIDGE = "tr-c" -PRIVATE_DOCKER_NET = "tr-private-net" -CONTAINER_NAME = "network_orchestrator" -RUNTIME = 300 - - -class NetworkOrchestrator: - """Manage and controls a virtual testing network.""" - - def __init__(self, config_file=CONFIG_FILE, validate=True, async_monitor=False): - self._int_intf = None - self._dev_intf = None - - self.listener = None - - self._net_modules = [] - - self.validate = validate - - self.async_monitor = async_monitor - - self._path = os.path.dirname(os.path.dirname( - os.path.dirname(os.path.realpath(__file__)))) - - self.validator = NetworkValidator() - - shutil.rmtree(os.path.join(os.getcwd(), RUNTIME_DIR), ignore_errors=True) - - self.network_config = NetworkConfig() - - self.load_config(config_file) - - def start(self): - """Start the network orchestrator.""" - - LOGGER.info("Starting Network Orchestrator") - # Get all components ready - self.load_network_modules() - - # Restore the network first if required - self.stop(kill=True) - - self.start_network() - - if self.async_monitor: - # Run the monitor method asynchronously to keep this method non-blocking - self._monitor_thread = threading.Thread( - target=self.monitor_network) - self._monitor_thread.daemon = True - self._monitor_thread.start() - else: - self.monitor_network() - - def start_network(self): - """Start the virtual testing network.""" - LOGGER.info("Starting network") - - self.build_network_modules() - self.create_net() - self.start_network_services() - - if self.validate: - # Start the validator after network is ready - self.validator.start() - - # Get network ready (via Network orchestrator) - LOGGER.info("Network is ready.") - - def stop(self, kill=False): - """Stop the network orchestrator.""" - self.stop_validator(kill=kill) - self.stop_network(kill=kill) - - def stop_validator(self, kill=False): - """Stop the network validator.""" - # Shutdown the validator - self.validator.stop(kill=kill) - - def stop_network(self, kill=False): - """Stop the virtual testing network.""" - # Shutdown network - self.stop_networking_services(kill=kill) - self.restore_net() - - def monitor_network(self): - # TODO: This time should be configurable (How long to hold before exiting, this could be infinite too) - time.sleep(RUNTIME) - - self.stop() - - def load_config(self,config_file=None): - if config_file is None: - # If not defined, use relative pathing to local file - self._config_file=os.path.join(self._path, CONFIG_FILE) - else: - # If defined, use as provided - self._config_file=config_file - - if not os.path.isfile(self._config_file): - LOGGER.error("Configuration file is not present at " + config_file) - LOGGER.info("An example is present in " + EXAMPLE_CONFIG_FILE) - sys.exit(1) - - LOGGER.info("Loading config file: " + os.path.abspath(self._config_file)) - with open(self._config_file, encoding='UTF-8') as config_json_file: - config_json = json.load(config_json_file) - self.import_config(config_json) - - def import_config(self, json_config): - self._int_intf = json_config['network']['internet_intf'] - self._dev_intf = json_config['network']['device_intf'] - - def _check_network_services(self): - LOGGER.debug("Checking network modules...") - for net_module in self._net_modules: - if net_module.enable_container: - LOGGER.debug("Checking network module: " + - net_module.display_name) - success = self._ping(net_module) - if success: - LOGGER.debug(net_module.display_name + - " responded succesfully: " + str(success)) - else: - LOGGER.error(net_module.display_name + - " failed to respond to ping") - - def _ping(self, net_module): - host = net_module.net_config.ipv4_address - namespace = "tr-ctns-" + net_module.dir_name - cmd = "ip netns exec " + namespace + " ping -c 1 " + str(host) - success = util.run_command(cmd, output=False) - return success - - def _create_private_net(self): - client = docker.from_env() - try: - network = client.networks.get(PRIVATE_DOCKER_NET) - network.remove() - except docker.errors.NotFound: - pass - - # TODO: These should be made into variables - ipam_pool = docker.types.IPAMPool( - subnet='100.100.0.0/16', - iprange='100.100.100.0/24' - ) - - ipam_config = docker.types.IPAMConfig( - pool_configs=[ipam_pool] - ) - - client.networks.create( - PRIVATE_DOCKER_NET, - ipam=ipam_config, - internal=True, - check_duplicate=True, - driver="macvlan" - ) - - def create_net(self): - LOGGER.info("Creating baseline network") - - if not util.interface_exists(self._int_intf) or not util.interface_exists(self._dev_intf): - LOGGER.error("Configured interfaces are not ready for use. " + - "Ensure both interfaces are connected.") - sys.exit(1) - - # Create data plane - util.run_command("ovs-vsctl add-br " + DEVICE_BRIDGE) - - # Create control plane - util.run_command("ovs-vsctl add-br " + INTERNET_BRIDGE) - - # Add external interfaces to data and control plane - util.run_command("ovs-vsctl add-port " + - DEVICE_BRIDGE + " " + self._dev_intf) - util.run_command("ovs-vsctl add-port " + - INTERNET_BRIDGE + " " + self._int_intf) - - # Enable forwarding of eapol packets - util.run_command("ovs-ofctl add-flow " + DEVICE_BRIDGE + - " 'table=0, dl_dst=01:80:c2:00:00:03, actions=flood'") - - # Remove IP from internet adapter - util.run_command("ifconfig " + self._int_intf + " 0.0.0.0") - - # Set ports up - util.run_command("ip link set dev " + DEVICE_BRIDGE + " up") - util.run_command("ip link set dev " + INTERNET_BRIDGE + " up") - - self._create_private_net() - - self.listener = Listener(self._dev_intf) - self.listener.start_listener() - - def load_network_modules(self): - """Load network modules from module_config.json.""" - LOGGER.debug("Loading network modules from /" + NETWORK_MODULES_DIR) - - loaded_modules = "Loaded the following network modules: " - net_modules_dir = os.path.join(self._path, NETWORK_MODULES_DIR) - - for module_dir in os.listdir(net_modules_dir): - - net_module = NetworkModule() - - # Load basic module information - - net_module_json = json.load(open(os.path.join( - self._path, net_modules_dir, module_dir, NETWORK_MODULE_METADATA), encoding='UTF-8')) - - net_module.name = net_module_json['config']['meta']['name'] - net_module.display_name = net_module_json['config']['meta']['display_name'] - net_module.description = net_module_json['config']['meta']['description'] - net_module.dir = os.path.join( - self._path, net_modules_dir, module_dir) - net_module.dir_name = module_dir - net_module.build_file = module_dir + ".Dockerfile" - net_module.container_name = "tr-ct-" + net_module.dir_name - net_module.image_name = "test-run/" + net_module.dir_name - - # Attach folder mounts to network module - if "docker" in net_module_json['config']: - if "mounts" in net_module_json['config']['docker']: - for mount_point in net_module_json['config']['docker']['mounts']: - net_module.mounts.append(Mount( - target=mount_point['target'], - source=os.path.join( - os.getcwd(), mount_point['source']), - type='bind' - )) - - # Determine if this is a container or just an image/template - if "enable_container" in net_module_json['config']['docker']: - net_module.enable_container = net_module_json['config']['docker']['enable_container'] - - # Load network service networking configuration - if net_module.enable_container: - - net_module.net_config.enable_wan = net_module_json['config']['network']['enable_wan'] - net_module.net_config.ip_index = net_module_json['config']['network']['ip_index'] - - net_module.net_config.host = False if not "host" in net_module_json[ - 'config']['network'] else net_module_json['config']['network']['host'] - - net_module.net_config.ipv4_address = self.network_config.ipv4_network[ - net_module.net_config.ip_index] - net_module.net_config.ipv4_network = self.network_config.ipv4_network - - net_module.net_config.ipv6_address = self.network_config.ipv6_network[ - net_module.net_config.ip_index] - net_module.net_config.ipv6_network = self.network_config.ipv6_network - - loaded_modules += net_module.dir_name + " " - - self._net_modules.append(net_module) - - LOGGER.info(loaded_modules) - - def build_network_modules(self): - LOGGER.info("Building network modules...") - for net_module in self._net_modules: - self._build_module(net_module) - - def _build_module(self, net_module): - LOGGER.debug("Building network module " + net_module.dir_name) - client = docker.from_env() - client.images.build( - dockerfile=os.path.join(net_module.dir, net_module.build_file), - path=self._path, - forcerm=True, - tag="test-run/" + net_module.dir_name - ) - - def _get_network_module(self, name): - for net_module in self._net_modules: - if name == net_module.display_name: - return net_module - return None - - # Start the OVS network module - # This should always be called before loading all - # other modules to allow for a properly setup base - # network - def _start_ovs_module(self): - self._start_network_service(self._get_network_module("OVS")) - - def _start_network_service(self, net_module): - - LOGGER.debug("Starting net service " + net_module.display_name) - network = "host" if net_module.net_config.host else PRIVATE_DOCKER_NET - LOGGER.debug(f"""Network: {network}, image name: {net_module.image_name}, - container name: {net_module.container_name}""") - try: - client = docker.from_env() - net_module.container = client.containers.run( - net_module.image_name, - auto_remove=True, - cap_add=["NET_ADMIN"], - name=net_module.container_name, - hostname=net_module.container_name, - network=PRIVATE_DOCKER_NET, - privileged=True, - detach=True, - mounts=net_module.mounts, - environment={"HOST_USER": os.getlogin()} - ) - except docker.errors.ContainerError as error: - LOGGER.error("Container run error") - LOGGER.error(error) - - if network != "host": - self._attach_service_to_network(net_module) - - def _stop_service_module(self, net_module, kill=False): - LOGGER.debug("Stopping Service container " + net_module.container_name) - try: - container = self._get_service_container(net_module) - if container is not None: - if kill: - LOGGER.debug("Killing container:" + - net_module.container_name) - container.kill() - else: - LOGGER.debug("Stopping container:" + - net_module.container_name) - container.stop() - LOGGER.debug("Container stopped:" + net_module.container_name) - except Exception as error: - LOGGER.error("Container stop error") - LOGGER.error(error) - - def _get_service_container(self, net_module): - LOGGER.debug("Resolving service container: " + - net_module.container_name) - container = None - try: - client = docker.from_env() - container = client.containers.get(net_module.container_name) - except docker.errors.NotFound: - LOGGER.debug("Container " + - net_module.container_name + " not found") - except Exception as e: - LOGGER.error("Failed to resolve container") - LOGGER.error(e) - return container - - def stop_networking_services(self, kill=False): - LOGGER.info("Stopping network services") - for net_module in self._net_modules: - # Network modules may just be Docker images, so we do not want to stop them - if not net_module.enable_container: - continue - self._stop_service_module(net_module, kill) - - def start_network_services(self): - LOGGER.info("Starting network services") - - os.makedirs(os.path.join(os.getcwd(), RUNTIME_DIR), exist_ok=True) - - for net_module in self._net_modules: - - # TODO: There should be a better way of doing this - # Do not try starting OVS module again, as it should already be running - if "OVS" != net_module.display_name: - - # Network modules may just be Docker images, so we do not want to start them as containers - if not net_module.enable_container: - continue - - self._start_network_service(net_module) - - LOGGER.info("All network services are running") - self._check_network_services() - - # TODO: Let's move this into a separate script? It does not look great - def _attach_service_to_network(self, net_module): - LOGGER.debug("Attaching net service " + - net_module.display_name + " to device bridge") - - # Device bridge interface example: tr-di-dhcp (Test Run Device Interface for DHCP container) - bridge_intf = DEVICE_BRIDGE + "i-" + net_module.dir_name - - # Container interface example: tr-cti-dhcp (Test Run Container Interface for DHCP container) - container_intf = "tr-cti-" + net_module.dir_name - - # Container network namespace name - container_net_ns = "tr-ctns-" + net_module.dir_name - - # Create interface pair - util.run_command("ip link add " + bridge_intf + - " type veth peer name " + container_intf) - - # Add bridge interface to device bridge - util.run_command("ovs-vsctl add-port " + - DEVICE_BRIDGE + " " + bridge_intf) - - # Get PID for running container - # TODO: Some error checking around missing PIDs might be required - container_pid = util.run_command( - "docker inspect -f {{.State.Pid}} " + net_module.container_name)[0] - - # Create symlink for container network namespace - util.run_command("ln -sf /proc/" + container_pid + - "/ns/net /var/run/netns/" + container_net_ns) - - # Attach container interface to container network namespace - util.run_command("ip link set " + container_intf + - " netns " + container_net_ns) - - # Rename container interface name to veth0 - util.run_command("ip netns exec " + container_net_ns + - " ip link set dev " + container_intf + " name veth0") - - # Set MAC address of container interface - util.run_command("ip netns exec " + container_net_ns + " ip link set dev veth0 address 9a:02:57:1e:8f:" + str(net_module.net_config.ip_index)) - - # Set IP address of container interface - util.run_command("ip netns exec " + container_net_ns + " ip addr add " + - net_module.net_config.get_ipv4_addr_with_prefix() + " dev veth0") - - util.run_command("ip netns exec " + container_net_ns + " ip addr add " + - net_module.net_config.get_ipv6_addr_with_prefix() + " dev veth0") - - # Set interfaces up - util.run_command("ip link set dev " + bridge_intf + " up") - util.run_command("ip netns exec " + container_net_ns + - " ip link set dev veth0 up") - - if net_module.net_config.enable_wan: - LOGGER.debug("Attaching net service " + - net_module.display_name + " to internet bridge") - - # Internet bridge interface example: tr-ci-dhcp (Test Run Control (Internet) Interface for DHCP container) - bridge_intf = INTERNET_BRIDGE + "i-" + net_module.dir_name - - # Container interface example: tr-cti-dhcp (Test Run Container Interface for DHCP container) - container_intf = "tr-cti-" + net_module.dir_name - - # Create interface pair - util.run_command("ip link add " + bridge_intf + - " type veth peer name " + container_intf) - - # Attach bridge interface to internet bridge - util.run_command("ovs-vsctl add-port " + - INTERNET_BRIDGE + " " + bridge_intf) - - # Attach container interface to container network namespace - util.run_command("ip link set " + container_intf + - " netns " + container_net_ns) - - # Rename container interface name to eth1 - util.run_command("ip netns exec " + container_net_ns + - " ip link set dev " + container_intf + " name eth1") - - # Set MAC address of container interface - util.run_command("ip netns exec " + container_net_ns + - " ip link set dev eth1 address 9a:02:57:1e:8f:0" + str(net_module.net_config.ip_index)) - - # Set interfaces up - util.run_command("ip link set dev " + bridge_intf + " up") - util.run_command("ip netns exec " + - container_net_ns + " ip link set dev eth1 up") - - def restore_net(self): - - LOGGER.info("Clearing baseline network") - - if hasattr(self, 'listener') and self.listener is not None and self.listener.is_running(): - self.listener.stop_listener() - - client = docker.from_env() - - # Stop all network containers if still running - for net_module in self._net_modules: - try: - container = client.containers.get( - "tr-ct-" + net_module.dir_name) - container.kill() - except Exception: - continue - - # Delete data plane - util.run_command("ovs-vsctl --if-exists del-br tr-d") - - # Delete control plane - util.run_command("ovs-vsctl --if-exists del-br tr-c") - - # Restart internet interface - if util.interface_exists(self._int_intf): - util.run_command("ip link set " + self._int_intf + " down") - util.run_command("ip link set " + self._int_intf + " up") - - LOGGER.info("Network is restored") - - -class NetworkModule: - - def __init__(self): - self.name = None - self.display_name = None - self.description = None - - self.container = None - self.container_name = None - self.image_name = None - - # Absolute path - self.dir = None - self.dir_name = None - self.build_file = None - self.mounts = [] - - self.enable_container = True - - self.net_config = NetworkModuleNetConfig() - -# The networking configuration for a network module - - -class NetworkModuleNetConfig: - - def __init__(self): - - self.enable_wan = False - - self.ip_index = 0 - self.ipv4_address = None - self.ipv4_network = None - self.ipv6_address = None - self.ipv6_network = None - - self.host = False - - def get_ipv4_addr_with_prefix(self): - return format(self.ipv4_address) + "/" + str(self.ipv4_network.prefixlen) - - def get_ipv6_addr_with_prefix(self): - return format(self.ipv6_address) + "/" + str(self.ipv6_network.prefixlen) - -# Represents the current configuration of the network for the device bridge - -class NetworkConfig: - - # TODO: Let's get this from a configuration file - def __init__(self): - self.ipv4_network = ipaddress.ip_network('10.10.10.0/24') - self.ipv6_network = ipaddress.ip_network('fd10:77be:4186::/64') +#!/usr/bin/env python3 + +import ipaddress +import json +import os +import sys +import time +import threading + +import docker +from docker.types import Mount + +import logger +import util +from listener import Listener +from network_validator import NetworkValidator + +LOGGER = logger.get_logger("net_orc") +CONFIG_FILE = "conf/system.json" +EXAMPLE_CONFIG_FILE = "conf/system.json.example" +RUNTIME_DIR = "runtime/network" +NETWORK_MODULES_DIR = "network/modules" +NETWORK_MODULE_METADATA = "conf/module_config.json" +DEVICE_BRIDGE = "tr-d" +INTERNET_BRIDGE = "tr-c" +PRIVATE_DOCKER_NET = "tr-private-net" +CONTAINER_NAME = "network_orchestrator" +RUNTIME = 300 + + +class NetworkOrchestrator: + """Manage and controls a virtual testing network.""" + + def __init__(self, config_file=CONFIG_FILE, validate=True, async_monitor=False): + self._int_intf = None + self._dev_intf = None + + self.listener = None + + self._net_modules = [] + + self.validate = validate + + self.async_monitor = async_monitor + + self._path = os.path.dirname(os.path.dirname( + os.path.dirname(os.path.realpath(__file__)))) + + self.validator = NetworkValidator() + + self.network_config = NetworkConfig() + + self.load_config(config_file) + + def start(self): + """Start the network orchestrator.""" + + LOGGER.info("Starting Network Orchestrator") + # Get all components ready + self.load_network_modules() + + # Restore the network first if required + self.stop(kill=True) + + self.start_network() + + if self.async_monitor: + # Run the monitor method asynchronously to keep this method non-blocking + self._monitor_thread = threading.Thread( + target=self.monitor_network) + self._monitor_thread.daemon = True + self._monitor_thread.start() + else: + self.monitor_network() + + def start_network(self): + """Start the virtual testing network.""" + LOGGER.info("Starting network") + + self.build_network_modules() + self.create_net() + self.start_network_services() + + if self.validate: + # Start the validator after network is ready + self.validator.start() + + # Get network ready (via Network orchestrator) + LOGGER.info("Network is ready.") + + def stop(self, kill=False): + """Stop the network orchestrator.""" + self.stop_validator(kill=kill) + self.stop_network(kill=kill) + + def stop_validator(self, kill=False): + """Stop the network validator.""" + # Shutdown the validator + self.validator.stop(kill=kill) + + def stop_network(self, kill=False): + """Stop the virtual testing network.""" + # Shutdown network + self.stop_networking_services(kill=kill) + self.restore_net() + + def monitor_network(self): + # TODO: This time should be configurable (How long to hold before exiting, this could be infinite too) + time.sleep(RUNTIME) + + self.stop() + + def load_config(self,config_file=None): + if config_file is None: + # If not defined, use relative pathing to local file + self._config_file=os.path.join(self._path, CONFIG_FILE) + else: + # If defined, use as provided + self._config_file=config_file + + if not os.path.isfile(self._config_file): + LOGGER.error("Configuration file is not present at " + config_file) + LOGGER.info("An example is present in " + EXAMPLE_CONFIG_FILE) + sys.exit(1) + + LOGGER.info("Loading config file: " + os.path.abspath(self._config_file)) + with open(self._config_file, encoding='UTF-8') as config_json_file: + config_json = json.load(config_json_file) + self.import_config(config_json) + + def import_config(self, json_config): + self._int_intf = json_config['network']['internet_intf'] + self._dev_intf = json_config['network']['device_intf'] + + def _check_network_services(self): + LOGGER.debug("Checking network modules...") + for net_module in self._net_modules: + if net_module.enable_container: + LOGGER.debug("Checking network module: " + + net_module.display_name) + success = self._ping(net_module) + if success: + LOGGER.debug(net_module.display_name + + " responded succesfully: " + str(success)) + else: + LOGGER.error(net_module.display_name + + " failed to respond to ping") + + def _ping(self, net_module): + host = net_module.net_config.ipv4_address + namespace = "tr-ctns-" + net_module.dir_name + cmd = "ip netns exec " + namespace + " ping -c 1 " + str(host) + success = util.run_command(cmd, output=False) + return success + + def _create_private_net(self): + client = docker.from_env() + try: + network = client.networks.get(PRIVATE_DOCKER_NET) + network.remove() + except docker.errors.NotFound: + pass + + # TODO: These should be made into variables + ipam_pool = docker.types.IPAMPool( + subnet='100.100.0.0/16', + iprange='100.100.100.0/24' + ) + + ipam_config = docker.types.IPAMConfig( + pool_configs=[ipam_pool] + ) + + client.networks.create( + PRIVATE_DOCKER_NET, + ipam=ipam_config, + internal=True, + check_duplicate=True, + driver="macvlan" + ) + + def create_net(self): + LOGGER.info("Creating baseline network") + + if not util.interface_exists(self._int_intf) or not util.interface_exists(self._dev_intf): + LOGGER.error("Configured interfaces are not ready for use. " + + "Ensure both interfaces are connected.") + sys.exit(1) + + # Create data plane + util.run_command("ovs-vsctl add-br " + DEVICE_BRIDGE) + + # Create control plane + util.run_command("ovs-vsctl add-br " + INTERNET_BRIDGE) + + # Add external interfaces to data and control plane + util.run_command("ovs-vsctl add-port " + + DEVICE_BRIDGE + " " + self._dev_intf) + util.run_command("ovs-vsctl add-port " + + INTERNET_BRIDGE + " " + self._int_intf) + + # Enable forwarding of eapol packets + util.run_command("ovs-ofctl add-flow " + DEVICE_BRIDGE + + " 'table=0, dl_dst=01:80:c2:00:00:03, actions=flood'") + + # Remove IP from internet adapter + util.run_command("ifconfig " + self._int_intf + " 0.0.0.0") + + # Set ports up + util.run_command("ip link set dev " + DEVICE_BRIDGE + " up") + util.run_command("ip link set dev " + INTERNET_BRIDGE + " up") + + self._create_private_net() + + self.listener = Listener(self._dev_intf) + self.listener.start_listener() + + def load_network_modules(self): + """Load network modules from module_config.json.""" + LOGGER.debug("Loading network modules from /" + NETWORK_MODULES_DIR) + + loaded_modules = "Loaded the following network modules: " + net_modules_dir = os.path.join(self._path, NETWORK_MODULES_DIR) + + for module_dir in os.listdir(net_modules_dir): + + net_module = NetworkModule() + + # Load basic module information + + net_module_json = json.load(open(os.path.join( + self._path, net_modules_dir, module_dir, NETWORK_MODULE_METADATA), encoding='UTF-8')) + + net_module.name = net_module_json['config']['meta']['name'] + net_module.display_name = net_module_json['config']['meta']['display_name'] + net_module.description = net_module_json['config']['meta']['description'] + net_module.dir = os.path.join( + self._path, net_modules_dir, module_dir) + net_module.dir_name = module_dir + net_module.build_file = module_dir + ".Dockerfile" + net_module.container_name = "tr-ct-" + net_module.dir_name + net_module.image_name = "test-run/" + net_module.dir_name + + # Attach folder mounts to network module + if "docker" in net_module_json['config']: + if "mounts" in net_module_json['config']['docker']: + for mount_point in net_module_json['config']['docker']['mounts']: + net_module.mounts.append(Mount( + target=mount_point['target'], + source=os.path.join( + os.getcwd(), mount_point['source']), + type='bind' + )) + + # Determine if this is a container or just an image/template + if "enable_container" in net_module_json['config']['docker']: + net_module.enable_container = net_module_json['config']['docker']['enable_container'] + + # Load network service networking configuration + if net_module.enable_container: + + net_module.net_config.enable_wan = net_module_json['config']['network']['enable_wan'] + net_module.net_config.ip_index = net_module_json['config']['network']['ip_index'] + + net_module.net_config.host = False if not "host" in net_module_json[ + 'config']['network'] else net_module_json['config']['network']['host'] + + net_module.net_config.ipv4_address = self.network_config.ipv4_network[ + net_module.net_config.ip_index] + net_module.net_config.ipv4_network = self.network_config.ipv4_network + + net_module.net_config.ipv6_address = self.network_config.ipv6_network[ + net_module.net_config.ip_index] + net_module.net_config.ipv6_network = self.network_config.ipv6_network + + loaded_modules += net_module.dir_name + " " + + self._net_modules.append(net_module) + + LOGGER.info(loaded_modules) + + def build_network_modules(self): + LOGGER.info("Building network modules...") + for net_module in self._net_modules: + self._build_module(net_module) + + def _build_module(self, net_module): + LOGGER.debug("Building network module " + net_module.dir_name) + client = docker.from_env() + client.images.build( + dockerfile=os.path.join(net_module.dir, net_module.build_file), + path=self._path, + forcerm=True, + tag="test-run/" + net_module.dir_name + ) + + def _get_network_module(self, name): + for net_module in self._net_modules: + if name == net_module.display_name: + return net_module + return None + + # Start the OVS network module + # This should always be called before loading all + # other modules to allow for a properly setup base + # network + def _start_ovs_module(self): + self._start_network_service(self._get_network_module("OVS")) + + def _start_network_service(self, net_module): + + LOGGER.debug("Starting net service " + net_module.display_name) + network = "host" if net_module.net_config.host else PRIVATE_DOCKER_NET + LOGGER.debug(f"""Network: {network}, image name: {net_module.image_name}, + container name: {net_module.container_name}""") + try: + client = docker.from_env() + net_module.container = client.containers.run( + net_module.image_name, + auto_remove=True, + cap_add=["NET_ADMIN"], + name=net_module.container_name, + hostname=net_module.container_name, + network=PRIVATE_DOCKER_NET, + privileged=True, + detach=True, + mounts=net_module.mounts, + environment={"HOST_USER": os.getlogin()} + ) + except docker.errors.ContainerError as error: + LOGGER.error("Container run error") + LOGGER.error(error) + + if network != "host": + self._attach_service_to_network(net_module) + + def _stop_service_module(self, net_module, kill=False): + LOGGER.debug("Stopping Service container " + net_module.container_name) + try: + container = self._get_service_container(net_module) + if container is not None: + if kill: + LOGGER.debug("Killing container:" + + net_module.container_name) + container.kill() + else: + LOGGER.debug("Stopping container:" + + net_module.container_name) + container.stop() + LOGGER.debug("Container stopped:" + net_module.container_name) + except Exception as error: + LOGGER.error("Container stop error") + LOGGER.error(error) + + def _get_service_container(self, net_module): + LOGGER.debug("Resolving service container: " + + net_module.container_name) + container = None + try: + client = docker.from_env() + container = client.containers.get(net_module.container_name) + except docker.errors.NotFound: + LOGGER.debug("Container " + + net_module.container_name + " not found") + except Exception as e: + LOGGER.error("Failed to resolve container") + LOGGER.error(e) + return container + + def stop_networking_services(self, kill=False): + LOGGER.info("Stopping network services") + for net_module in self._net_modules: + # Network modules may just be Docker images, so we do not want to stop them + if not net_module.enable_container: + continue + self._stop_service_module(net_module, kill) + + def start_network_services(self): + LOGGER.info("Starting network services") + + os.makedirs(os.path.join(os.getcwd(), RUNTIME_DIR), exist_ok=True) + + for net_module in self._net_modules: + + # TODO: There should be a better way of doing this + # Do not try starting OVS module again, as it should already be running + if "OVS" != net_module.display_name: + + # Network modules may just be Docker images, so we do not want to start them as containers + if not net_module.enable_container: + continue + + self._start_network_service(net_module) + + LOGGER.info("All network services are running") + self._check_network_services() + + # TODO: Let's move this into a separate script? It does not look great + def _attach_service_to_network(self, net_module): + LOGGER.debug("Attaching net service " + + net_module.display_name + " to device bridge") + + # Device bridge interface example: tr-di-dhcp (Test Run Device Interface for DHCP container) + bridge_intf = DEVICE_BRIDGE + "i-" + net_module.dir_name + + # Container interface example: tr-cti-dhcp (Test Run Container Interface for DHCP container) + container_intf = "tr-cti-" + net_module.dir_name + + # Container network namespace name + container_net_ns = "tr-ctns-" + net_module.dir_name + + # Create interface pair + util.run_command("ip link add " + bridge_intf + + " type veth peer name " + container_intf) + + # Add bridge interface to device bridge + util.run_command("ovs-vsctl add-port " + + DEVICE_BRIDGE + " " + bridge_intf) + + # Get PID for running container + # TODO: Some error checking around missing PIDs might be required + container_pid = util.run_command( + "docker inspect -f {{.State.Pid}} " + net_module.container_name)[0] + + # Create symlink for container network namespace + util.run_command("ln -sf /proc/" + container_pid + + "/ns/net /var/run/netns/" + container_net_ns) + + # Attach container interface to container network namespace + util.run_command("ip link set " + container_intf + + " netns " + container_net_ns) + + # Rename container interface name to veth0 + util.run_command("ip netns exec " + container_net_ns + + " ip link set dev " + container_intf + " name veth0") + + # Set MAC address of container interface + util.run_command("ip netns exec " + container_net_ns + " ip link set dev veth0 address 9a:02:57:1e:8f:" + str(net_module.net_config.ip_index)) + + # Set IP address of container interface + util.run_command("ip netns exec " + container_net_ns + " ip addr add " + + net_module.net_config.get_ipv4_addr_with_prefix() + " dev veth0") + + util.run_command("ip netns exec " + container_net_ns + " ip addr add " + + net_module.net_config.get_ipv6_addr_with_prefix() + " dev veth0") + + # Set interfaces up + util.run_command("ip link set dev " + bridge_intf + " up") + util.run_command("ip netns exec " + container_net_ns + + " ip link set dev veth0 up") + + if net_module.net_config.enable_wan: + LOGGER.debug("Attaching net service " + + net_module.display_name + " to internet bridge") + + # Internet bridge interface example: tr-ci-dhcp (Test Run Control (Internet) Interface for DHCP container) + bridge_intf = INTERNET_BRIDGE + "i-" + net_module.dir_name + + # Container interface example: tr-cti-dhcp (Test Run Container Interface for DHCP container) + container_intf = "tr-cti-" + net_module.dir_name + + # Create interface pair + util.run_command("ip link add " + bridge_intf + + " type veth peer name " + container_intf) + + # Attach bridge interface to internet bridge + util.run_command("ovs-vsctl add-port " + + INTERNET_BRIDGE + " " + bridge_intf) + + # Attach container interface to container network namespace + util.run_command("ip link set " + container_intf + + " netns " + container_net_ns) + + # Rename container interface name to eth1 + util.run_command("ip netns exec " + container_net_ns + + " ip link set dev " + container_intf + " name eth1") + + # Set MAC address of container interface + util.run_command("ip netns exec " + container_net_ns + + " ip link set dev eth1 address 9a:02:57:1e:8f:0" + str(net_module.net_config.ip_index)) + + # Set interfaces up + util.run_command("ip link set dev " + bridge_intf + " up") + util.run_command("ip netns exec " + + container_net_ns + " ip link set dev eth1 up") + + def restore_net(self): + + LOGGER.info("Clearing baseline network") + + if hasattr(self, 'listener') and self.listener is not None and self.listener.is_running(): + self.listener.stop_listener() + + client = docker.from_env() + + # Stop all network containers if still running + for net_module in self._net_modules: + try: + container = client.containers.get( + "tr-ct-" + net_module.dir_name) + container.kill() + except Exception: + continue + + # Delete data plane + util.run_command("ovs-vsctl --if-exists del-br tr-d") + + # Delete control plane + util.run_command("ovs-vsctl --if-exists del-br tr-c") + + # Restart internet interface + if util.interface_exists(self._int_intf): + util.run_command("ip link set " + self._int_intf + " down") + util.run_command("ip link set " + self._int_intf + " up") + + LOGGER.info("Network is restored") + + +class NetworkModule: + + def __init__(self): + self.name = None + self.display_name = None + self.description = None + + self.container = None + self.container_name = None + self.image_name = None + + # Absolute path + self.dir = None + self.dir_name = None + self.build_file = None + self.mounts = [] + + self.enable_container = True + + self.net_config = NetworkModuleNetConfig() + +# The networking configuration for a network module + + +class NetworkModuleNetConfig: + + def __init__(self): + + self.enable_wan = False + + self.ip_index = 0 + self.ipv4_address = None + self.ipv4_network = None + self.ipv6_address = None + self.ipv6_network = None + + self.host = False + + def get_ipv4_addr_with_prefix(self): + return format(self.ipv4_address) + "/" + str(self.ipv4_network.prefixlen) + + def get_ipv6_addr_with_prefix(self): + return format(self.ipv6_address) + "/" + str(self.ipv6_network.prefixlen) + +# Represents the current configuration of the network for the device bridge + +class NetworkConfig: + + # TODO: Let's get this from a configuration file + def __init__(self): + self.ipv4_network = ipaddress.ip_network('10.10.10.0/24') + self.ipv6_network = ipaddress.ip_network('fd10:77be:4186::/64') diff --git a/resources/devices/Template/device_config.json b/resources/devices/Template/device_config.json new file mode 100644 index 000000000..f8b56b7a3 --- /dev/null +++ b/resources/devices/Template/device_config.json @@ -0,0 +1,32 @@ +{ + "make": "Manufacturer X", + "model": "Device X", + "mac_addr": "aa:bb:cc:dd:ee:ff", + "test_modules": { + "dns": { + "enabled": true, + "tests": { + "dns.network.from_device": { + "enabled": true + }, + "dns.network.from_dhcp": { + "enabled": true + } + } + }, + "baseline": { + "enabled": true, + "tests": { + "baseline.passe": { + "enabled": true + }, + "baseline.pass": { + "enabled": true + }, + "baseline.skip": { + "enabled": true + } + } + } + } +} \ No newline at end of file diff --git a/test_orc/modules/base/bin/capture b/test_orc/modules/base/bin/capture index dccafb0c5..facb6acf7 100644 --- a/test_orc/modules/base/bin/capture +++ b/test_orc/modules/base/bin/capture @@ -4,7 +4,7 @@ MODULE_NAME=$1 # Define the local file location for the capture to be saved -PCAP_DIR="/runtime/output/" +PCAP_DIR="/runtime/output" PCAP_FILE=$MODULE_NAME.pcap # Allow a user to define an interface by passing it into this script @@ -13,7 +13,6 @@ INTERFACE=$2 # Create the output directory and start the capture mkdir -p $PCAP_DIR chown $HOST_USER:$HOST_USER $PCAP_DIR -echo "PCAP Dir: $PCAP_DIR/$PCAP_FILE" tcpdump -i $INTERFACE -w $PCAP_DIR/$PCAP_FILE -Z $HOST_USER & # Small pause to let the capture to start diff --git a/test_orc/modules/base/bin/start_module b/test_orc/modules/base/bin/start_module index a9f5402f4..6adc53f58 100644 --- a/test_orc/modules/base/bin/start_module +++ b/test_orc/modules/base/bin/start_module @@ -4,7 +4,7 @@ BIN_DIR="/testrun/bin" # Default interface should be veth0 for all containers -DEFAULT_IFACE=veth0 +IFACE=veth0 # Create a local user that matches the same as the host # to be used for correct file ownership for various logs @@ -27,7 +27,7 @@ fi # Extract the necessary config parameters MODULE_NAME=$(echo "$CONF" | jq -r '.config.meta.name') -DEFINED_IFACE=$(echo "$CONF" | jq -r '.config.network.interface') +NETWORK_REQUIRED=$(echo "$CONF" | jq -r '.config.network') GRPC=$(echo "$CONF" | jq -r '.config.grpc') # Validate the module name is present @@ -37,24 +37,19 @@ then exit 1 fi -# Select which interace to use -if [[ -z $DEFINED_IFACE || "$DEFINED_IFACE" == "null" ]] -then - echo "No Interface Defined, defaulting to veth0" - INTF=$DEFAULT_IFACE -else - INTF=$DEFINED_IFACE -fi - echo "Starting module $MODULE_NAME..." $BIN_DIR/setup_binaries $BIN_DIR -# Wait for interface to become ready -$BIN_DIR/wait_for_interface $INTF +# Only start network services if the test container needs +# a network connection to run its tests +if [ $NETWORK_REQUIRED == "true" ];then + # Wait for interface to become ready + $BIN_DIR/wait_for_interface $IFACE -# Start network capture -$BIN_DIR/capture $MODULE_NAME $INTF + # Start network capture + $BIN_DIR/capture $MODULE_NAME $IFACE +fi # Start the grpc server if [[ ! -z $GRPC && ! $GRPC == "null" ]] @@ -73,4 +68,4 @@ fi sleep 3 # Start the networking service -$BIN_DIR/start_test_module $MODULE_NAME $INTF \ No newline at end of file +$BIN_DIR/start_test_module $MODULE_NAME $IFACE \ No newline at end of file diff --git a/test_orc/modules/base/conf/module_config.json b/test_orc/modules/base/conf/module_config.json index 1f3a47ba2..7288dacfd 100644 --- a/test_orc/modules/base/conf/module_config.json +++ b/test_orc/modules/base/conf/module_config.json @@ -5,6 +5,7 @@ "display_name": "Base", "description": "Base image" }, + "network": false, "docker": { "enable_container": false } diff --git a/test_orc/modules/base/python/src/logger.py b/test_orc/modules/base/python/src/logger.py index 0eb7b9ccf..641aa16b4 100644 --- a/test_orc/modules/base/python/src/logger.py +++ b/test_orc/modules/base/python/src/logger.py @@ -10,12 +10,12 @@ _DEFAULT_LEVEL = logging.INFO _CONF_DIR = "conf" _CONF_FILE_NAME = "system.json" -_LOG_DIR = "/runtime/network/" +_LOG_DIR = "/runtime/output/" # Set log level try: system_conf_json = json.load( - open(os.path.join(_CONF_DIR, _CONF_FILE_NAME), encoding='utf-8')) + open(os.path.join(_CONF_DIR, _CONF_FILE_NAME))) log_level_str = system_conf_json['log_level'] log_level = logging.getLevelName(log_level_str) except: @@ -24,22 +24,23 @@ log_format = logging.Formatter(fmt=_LOG_FORMAT, datefmt=_DATE_FORMAT) - -def add_file_handler(log, log_file): - handler = logging.FileHandler(_LOG_DIR+log_file+".log") +def add_file_handler(log, logFile): + handler = logging.FileHandler(_LOG_DIR+logFile+".log") handler.setFormatter(log_format) log.addHandler(handler) + def add_stream_handler(log): handler = logging.StreamHandler() handler.setFormatter(log_format) log.addHandler(handler) -def get_logger(name, log_file=None): + +def get_logger(name, logFile=None): if name not in LOGGERS: LOGGERS[name] = logging.getLogger(name) LOGGERS[name].setLevel(log_level) add_stream_handler(LOGGERS[name]) - if log_file is not None: - add_file_handler(LOGGERS[name], log_file) + if logFile is not None: + add_file_handler(LOGGERS[name], logFile) return LOGGERS[name] diff --git a/test_orc/modules/base/python/src/test_module.py b/test_orc/modules/base/python/src/test_module.py new file mode 100644 index 000000000..6f7f48c3a --- /dev/null +++ b/test_orc/modules/base/python/src/test_module.py @@ -0,0 +1,84 @@ +import json +import logger +import os + +LOGGER = None +RESULTS_DIR = "/runtime/output/" +CONF_FILE = "/testrun/conf/module_config.json" + + +class TestModule: + + def __init__(self, module_name, log_name): + self._module_name = module_name + self._device_mac = os.environ['DEVICE_MAC'] + self._add_logger(log_name=log_name, module_name=module_name) + self._config = self._read_config() + + def _add_logger(self, log_name, module_name): + global LOGGER + LOGGER = logger.get_logger(log_name, module_name) + + def _get_logger(self): + return LOGGER + + def _get_tests(self): + device_test_module = self._get_device_test_module() + return self._get_device_tests(device_test_module) + + def _get_device_tests(self, device_test_module): + module_tests = self._config["config"]["tests"] + if device_test_module is None: + return module_tests + elif not device_test_module["enabled"]: + return [] + else: + for test in module_tests: + if test["name"] in device_test_module["tests"]: + test["enabled"] = device_test_module["tests"][test["name"]]["enabled"] + return module_tests + + def _get_device_test_module(self): + test_modules = json.loads(os.environ['DEVICE_TEST_MODULES']) + if self._module_name in test_modules: + return test_modules[self._module_name] + return None + + def run_tests(self): + tests = self._get_tests() + device_modules = os.environ['DEVICE_TEST_MODULES'] + for test in tests: + test_method_name = "_" + test["name"].replace(".", "_") + result = None + if ("enabled" in test and test["enabled"]) or "enabled" not in test: + LOGGER.info("Attempting to run test: " + test["name"]) + + # Resolve the correct python method by test name and run test + if hasattr(self, test_method_name): + result = getattr(self, test_method_name)() + else: + LOGGER.info("Test " + test["name"] + + " not resolved. Skipping") + result = None + else: + LOGGER.info("Test " + test["name"] + + " disabled. Skipping") + if result is not None: + test["result"] = "compliant" if result else "non-compliant" + else: + test["result"] = "skipped" + json_results = json.dumps({"results": tests}, indent=2) + self._write_results(json_results) + + def _read_config(self): + f = open(CONF_FILE, encoding="utf-8") + config = json.load(f) + f.close() + return config + + def _write_results(self, results): + results_file = RESULTS_DIR + self._module_name + "-result.json" + LOGGER.info("Writing results to " + results_file) + f = open(results_file, "w", encoding="utf-8") + f.write(results) + f.close() diff --git a/test_orc/modules/baseline/conf/module_config.json b/test_orc/modules/baseline/conf/module_config.json index 1b8b7b9ba..ba337267a 100644 --- a/test_orc/modules/baseline/conf/module_config.json +++ b/test_orc/modules/baseline/conf/module_config.json @@ -5,17 +5,27 @@ "display_name": "Baseline", "description": "Baseline test" }, - "network": { - "interface": "eth0", - "enable_wan": false, - "ip_index": 9 - }, - "grpc": { - "port": 50001 - }, + "network": false, "docker": { "enable_container": true, "timeout": 30 - } + }, + "tests":[ + { + "name": "baseline.pass", + "description": "Simulate a compliant test", + "expected_behavior": "A compliant test result is generated" + }, + { + "name": "baseline.fail", + "description": "Simulate a non-compliant test", + "expected_behavior": "A non-compliant test result is generated" + }, + { + "name": "baseline.skip", + "description": "Simulate a skipped test", + "expected_behavior": "A skipped test result is generated" + } + ] } } \ No newline at end of file diff --git a/test_orc/modules/baseline/python/src/baseline_module.py b/test_orc/modules/baseline/python/src/baseline_module.py new file mode 100644 index 000000000..80c04ef48 --- /dev/null +++ b/test_orc/modules/baseline/python/src/baseline_module.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python3 + +from test_module import TestModule + +LOG_NAME = "test_baseline" +LOGGER = None + +class BaselineModule(TestModule): + + def __init__(self, module): + super().__init__(module_name=module, log_name=LOG_NAME) + global LOGGER + LOGGER = self._get_logger() + + def _baseline_pass(self): + LOGGER.info( + "Running baseline pass test") + LOGGER.info("Baseline pass test finished") + return True + + def _baseline_fail(self): + LOGGER.info( + "Running baseline pass test") + LOGGER.info("Baseline pass test finished") + return False + + def _baseline_skip(self): + LOGGER.info( + "Running baseline pass test") + LOGGER.info("Baseline pass test finished") + return None \ No newline at end of file diff --git a/test_orc/modules/baseline/python/src/logger.py b/test_orc/modules/baseline/python/src/logger.py deleted file mode 100644 index 641aa16b4..000000000 --- a/test_orc/modules/baseline/python/src/logger.py +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env python3 - -import json -import logging -import os - -LOGGERS = {} -_LOG_FORMAT = "%(asctime)s %(name)-8s %(levelname)-7s %(message)s" -_DATE_FORMAT = '%b %02d %H:%M:%S' -_DEFAULT_LEVEL = logging.INFO -_CONF_DIR = "conf" -_CONF_FILE_NAME = "system.json" -_LOG_DIR = "/runtime/output/" - -# Set log level -try: - system_conf_json = json.load( - open(os.path.join(_CONF_DIR, _CONF_FILE_NAME))) - log_level_str = system_conf_json['log_level'] - log_level = logging.getLevelName(log_level_str) -except: - # TODO: Print out warning that log level is incorrect or missing - log_level = _DEFAULT_LEVEL - -log_format = logging.Formatter(fmt=_LOG_FORMAT, datefmt=_DATE_FORMAT) - -def add_file_handler(log, logFile): - handler = logging.FileHandler(_LOG_DIR+logFile+".log") - handler.setFormatter(log_format) - log.addHandler(handler) - - -def add_stream_handler(log): - handler = logging.StreamHandler() - handler.setFormatter(log_format) - log.addHandler(handler) - - -def get_logger(name, logFile=None): - if name not in LOGGERS: - LOGGERS[name] = logging.getLogger(name) - LOGGERS[name].setLevel(log_level) - add_stream_handler(LOGGERS[name]) - if logFile is not None: - add_file_handler(LOGGERS[name], logFile) - return LOGGERS[name] diff --git a/test_orc/modules/baseline/python/src/run.py b/test_orc/modules/baseline/python/src/run.py index 7ff11559f..ffa171e17 100644 --- a/test_orc/modules/baseline/python/src/run.py +++ b/test_orc/modules/baseline/python/src/run.py @@ -5,12 +5,12 @@ import sys import logger -from test_module import TestModule +from baseline_module import BaselineModule LOGGER = logger.get_logger('test_module') RUNTIME = 300 -class TestModuleRunner: +class BaselineModuleRunner: def __init__(self,module): @@ -19,11 +19,10 @@ def __init__(self,module): signal.signal(signal.SIGABRT, self._handler) signal.signal(signal.SIGQUIT, self._handler) - LOGGER.info("Starting Test Module Template") + LOGGER.info("Starting Baseline Module") - self._test_module = TestModule(module) + self._test_module = BaselineModule(module) self._test_module.run_tests() - self._test_module.generate_results() def _handler(self, signum, *other): LOGGER.debug("SigtermEnum: " + str(signal.SIGTERM)) @@ -34,7 +33,7 @@ def _handler(self, signum, *other): sys.exit(1) def run(argv): - parser = argparse.ArgumentParser(description="Test Module Template", + parser = argparse.ArgumentParser(description="Baseline Module Help", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( @@ -44,7 +43,7 @@ def run(argv): # For some reason passing in the args from bash adds an extra # space before the argument so we'll just strip out extra space - TestModuleRunner(args.module.strip()) + BaselineModuleRunner(args.module.strip()) if __name__ == "__main__": run(sys.argv) diff --git a/test_orc/modules/baseline/python/src/test_module.py b/test_orc/modules/baseline/python/src/test_module.py deleted file mode 100644 index d4065cde3..000000000 --- a/test_orc/modules/baseline/python/src/test_module.py +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/env python3 - -import json -import time -import logger - -LOG_NAME = "test_baseline" -RESULTS_DIR = "/runtime/output/" -LOGGER = logger.get_logger(LOG_NAME) - -class TestModule: - - def __init__(self, module): - - self.module_test1 = None - self.module_test2 = None - self.module_test3 = None - self.module = module - self.add_logger(module) - - def add_logger(self, module): - global LOGGER - LOGGER = logger.get_logger(LOG_NAME, module) - - # Make up some fake test results - def run_tests(self): - LOGGER.info("Running test 1...") - self.module_test1 = True - LOGGER.info("Test 1 complete.") - - LOGGER.info("Running test 2...") - self.module_test2 = False - LOGGER.info("Test 2 complete.") - - def generate_results(self): - results = [] - results.append(self.generate_result("Test 1", self.module_test1)) - results.append(self.generate_result("Test 2", self.module_test2)) - results.append(self.generate_result("Test 3", self.module_test3)) - json_results = json.dumps({"results":results}, indent=2) - self.write_results(json_results) - - def write_results(self,results): - results_file=RESULTS_DIR+self.module+"-result.json" - LOGGER.info("Writing results to " + results_file) - f = open(results_file, "w", encoding="utf-8") - f.write(results) - f.close() - - def generate_result(self, test_name, test_result): - if test_result is not None: - result = "compliant" if test_result else "non-compliant" - else: - result = "skipped" - LOGGER.info(test_name + ": " + result) - res_dict = { - "name": test_name, - "result": result, - "description": "The device is " + result - } - return res_dict diff --git a/test_orc/modules/dns/bin/start_test_module b/test_orc/modules/dns/bin/start_test_module new file mode 100644 index 000000000..2938eb0f8 --- /dev/null +++ b/test_orc/modules/dns/bin/start_test_module @@ -0,0 +1,42 @@ +#!/bin/bash + +# An example startup script that does the bare minimum to start +# a test module via a pyhon script. Each test module should include a +# start_test_module file that overwrites this one to boot all of its +# specific requirements to run. + +# Define where the python source files are located +PYTHON_SRC_DIR=/testrun/python/src + +# Fetch module name +MODULE_NAME=$1 + +# Default interface should be veth0 for all containers +DEFAULT_IFACE=veth0 + +# Allow a user to define an interface by passing it into this script +DEFINED_IFACE=$2 + +# Select which interace to use +if [[ -z $DEFINED_IFACE || "$DEFINED_IFACE" == "null" ]] +then + echo "No interface defined, defaulting to veth0" + INTF=$DEFAULT_IFACE +else + INTF=$DEFINED_IFACE +fi + +# Create and set permissions on the log files +LOG_FILE=/runtime/output/$MODULE_NAME.log +RESULT_FILE=/runtime/output/$MODULE_NAME-result.json +touch $LOG_FILE +touch $RESULT_FILE +chown $HOST_USER:$HOST_USER $LOG_FILE +chown $HOST_USER:$HOST_USER $RESULT_FILE + +# Run the python scrip that will execute the tests for this module +# -u flag allows python print statements +# to be logged by docker by running unbuffered +python3 -u $PYTHON_SRC_DIR/run.py "-m $MODULE_NAME" + +echo Module has finished \ No newline at end of file diff --git a/test_orc/modules/dns/conf/module_config.json b/test_orc/modules/dns/conf/module_config.json new file mode 100644 index 000000000..d21f6bca6 --- /dev/null +++ b/test_orc/modules/dns/conf/module_config.json @@ -0,0 +1,26 @@ +{ + "config": { + "meta": { + "name": "dns", + "display_name": "DNS", + "description": "DNS test" + }, + "network": false, + "docker": { + "enable_container": true, + "timeout": 30 + }, + "tests":[ + { + "name": "dns.network.from_device", + "description": "Verify the device sends DNS requests", + "expected_behavior": "The device sends DNS requests." + }, + { + "name": "dns.network.from_dhcp", + "description": "Verify the device allows for a DNS server to be entered automatically", + "expected_behavior": "The device sends DNS requests to the DNS server provided by the DHCP server" + } + ] + } +} \ No newline at end of file diff --git a/test_orc/modules/dns/dns.Dockerfile b/test_orc/modules/dns/dns.Dockerfile new file mode 100644 index 000000000..7c3497bc3 --- /dev/null +++ b/test_orc/modules/dns/dns.Dockerfile @@ -0,0 +1,11 @@ +# Image name: test-run/baseline-test +FROM test-run/base-test:latest + +# Copy over all configuration files +COPY modules/dns/conf /testrun/conf + +# Load device binary files +COPY modules/dns/bin /testrun/bin + +# Copy over all python files +COPY modules/dns/python /testrun/python \ No newline at end of file diff --git a/test_orc/modules/dns/python/src/dns_module.py b/test_orc/modules/dns/python/src/dns_module.py new file mode 100644 index 000000000..f1333ce14 --- /dev/null +++ b/test_orc/modules/dns/python/src/dns_module.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python3 + +import subprocess +from test_module import TestModule + +LOG_NAME = "test_dns" +CAPTURE_FILE = "/runtime/network/dns.pcap" +LOGGER = None + +class DNSModule(TestModule): + + def __init__(self, module): + super().__init__(module_name=module, log_name=LOG_NAME) + self._dns_server = "10.10.10.4" + global LOGGER + LOGGER = self._get_logger() + + def _check_dns_traffic(self, tcpdump_filter): + to_dns = self._exec_tcpdump(tcpdump_filter) + num_query_dns = len(to_dns) + LOGGER.info("DNS queries found: " + str(num_query_dns)) + dns_traffic_detected = len(to_dns) > 0 + LOGGER.info("DNS traffic detected: " + str(dns_traffic_detected)) + return dns_traffic_detected + + def _dns_network_from_dhcp(self): + LOGGER.info( + "Checking DNS traffic for configured DHCP DNS server: " + self._dns_server) + + # Check if the device DNS traffic is to appropriate server + tcpdump_filter = 'dst port 53 and dst host {} and ether src {}'.format( + self._dns_server, self._device_mac) + + result = self._check_dns_traffic(tcpdump_filter=tcpdump_filter) + + LOGGER.info( + "DNS traffic detected to configured DHCP DNS server: " + str(result)) + return result + + def _dns_network_from_device(self): + LOGGER.info("Checking DNS traffic from device: " + self._device_mac) + + # Check if the device DNS traffic is to appropriate server + tcpdump_filter = 'dst port 53 and ether src {}'.format( + self._device_mac) + + result = self._check_dns_traffic(tcpdump_filter=tcpdump_filter) + + LOGGER.info("DNS traffic detected from device: " + str(result)) + return result + + def _exec_tcpdump(self, tcpdump_filter): + """ + Args + tcpdump_filter: Filter to pass onto tcpdump file + capture_file: Optional capture file to look + Returns + List of packets matching the filter + """ + command = 'tcpdump -tttt -n -r {} {}'.format( + CAPTURE_FILE, tcpdump_filter) + + LOGGER.debug("tcpdump command: " + command) + + process = subprocess.Popen(command, + universal_newlines=True, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + text = str(process.stdout.read()).rstrip() + + LOGGER.debug("tcpdump response: " + text) + + if text: + return text.split("\n") + + return [] diff --git a/test_orc/modules/dns/python/src/run.py b/test_orc/modules/dns/python/src/run.py new file mode 100644 index 000000000..7ee5e7833 --- /dev/null +++ b/test_orc/modules/dns/python/src/run.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 + +import argparse +import signal +import sys +import logger +import time + +from dns_module import DNSModule + +LOG_NAME = "dns_module" +LOGGER = logger.get_logger(LOG_NAME) +RUNTIME = 300 + +class DNSModuleRunner: + + def __init__(self,module): + + signal.signal(signal.SIGINT, self._handler) + signal.signal(signal.SIGTERM, self._handler) + signal.signal(signal.SIGABRT, self._handler) + signal.signal(signal.SIGQUIT, self._handler) + self.add_logger(module) + + LOGGER.info("Starting DNS Test Module") + + self._test_module = DNSModule(module) + self._test_module.run_tests() + + LOGGER.info("DNS Test Module Finished") + + def add_logger(self, module): + global LOGGER + LOGGER = logger.get_logger(LOG_NAME, module) + + def _handler(self, signum, *other): + LOGGER.debug("SigtermEnum: " + str(signal.SIGTERM)) + LOGGER.debug("Exit signal received: " + str(signum)) + if signum in (2, signal.SIGTERM): + LOGGER.info("Exit signal received. Stopping test module...") + LOGGER.info("Test module stopped") + sys.exit(1) + +def run(argv): + parser = argparse.ArgumentParser(description="Test Module DNS", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + parser.add_argument( + "-m", "--module", help="Define the module name to be used to create the log file") + + args = parser.parse_args() + + # For some reason passing in the args from bash adds an extra + # space before the argument so we'll just strip out extra space + DNSModuleRunner(args.module.strip()) + +if __name__ == "__main__": + run(sys.argv) diff --git a/test_orc/python/src/test_orchestrator.py b/test_orc/python/src/test_orchestrator.py index f68a13579..85c6fb631 100644 --- a/test_orc/python/src/test_orchestrator.py +++ b/test_orc/python/src/test_orchestrator.py @@ -14,6 +14,7 @@ TEST_MODULES_DIR = "modules" MODULE_CONFIG = "conf/module_config.json" + class TestOrchestrator: """Manages and controls the test modules.""" @@ -27,26 +28,27 @@ def __init__(self): # Resolve the path to the test-run folder self._root_path = os.path.abspath(os.path.join(self._path, os.pardir)) - shutil.rmtree(os.path.join(self._root_path, RUNTIME_DIR), ignore_errors=True) + shutil.rmtree(os.path.join(self._root_path, + RUNTIME_DIR), ignore_errors=True) os.makedirs(os.path.join(self._root_path, RUNTIME_DIR), exist_ok=True) def start(self): LOGGER.info("Starting Test Orchestrator") self._load_test_modules() - self._run_test_modules() + self.build_test_modules() def stop(self): """Stop any running tests""" self._stop_modules() - def _run_test_modules(self): + def run_test_modules(self, device): """Iterates through each test module and starts the container.""" LOGGER.info("Running test modules...") for module in self._test_modules: - self._run_test_module(module) + self._run_test_module(module, device) LOGGER.info("All tests complete") - def _run_test_module(self, module): + def _run_test_module(self, module, device): """Start the test container and extract the results.""" if module is None or not module.enable_container: @@ -55,7 +57,10 @@ def _run_test_module(self, module): LOGGER.info("Running test module " + module.name) try: - container_runtime_dir = os.path.join(self._root_path, "runtime/test/" + module.name) + container_runtime_dir = os.path.join( + self._root_path, "runtime/test/" + device.mac_addr.replace(":","") + "/" + module.name) + network_runtime_dir = os.path.join( + self._root_path, "runtime/network") os.makedirs(container_runtime_dir) client = docker.from_env() @@ -68,12 +73,24 @@ def _run_test_module(self, module): hostname=module.container_name, privileged=True, detach=True, - mounts=[Mount( - target="/runtime/output", - source=container_runtime_dir, - type='bind' - )], - environment={"HOST_USER": os.getlogin()} + mounts=[ + Mount( + target="/runtime/output", + source=container_runtime_dir, + type='bind' + ), + Mount( + target="/runtime/network", + source=network_runtime_dir, + type='bind', + read_only=True + ), + ], + environment={ + "HOST_USER": os.getlogin(), + "DEVICE_MAC": device.mac_addr, + "DEVICE_TEST_MODULES": device.test_modules + } ) except (docker.errors.APIError, docker.errors.ContainerError) as container_error: LOGGER.error("Test module " + module.name + " has failed to start") @@ -90,7 +107,7 @@ def _run_test_module(self, module): LOGGER.info("Test module " + module.name + " has finished") - def _get_module_status(self,module): + def _get_module_status(self, module): container = self._get_module_container(module) if container is not None: return container.status @@ -124,11 +141,11 @@ def _load_test_modules(self): # Load basic module information module = TestModule() with open(os.path.join( - self._path, - modules_dir, - module_dir, - MODULE_CONFIG), - encoding='UTF-8') as module_config_file: + self._path, + modules_dir, + module_dir, + MODULE_CONFIG), + encoding='UTF-8') as module_config_file: module_json = json.load(module_config_file) module.name = module_json['config']['meta']['name'] @@ -150,7 +167,7 @@ def _load_test_modules(self): self._test_modules.append(module) if module.enable_container: - loaded_modules += module.dir_name + " " + loaded_modules += module.dir_name + " " LOGGER.info(loaded_modules) @@ -167,7 +184,7 @@ def _build_test_module(self, module): client.images.build( dockerfile=os.path.join(module.dir, module.build_file), path=self._path, - forcerm=True, # Cleans up intermediate containers during build + forcerm=True, # Cleans up intermediate containers during build tag=module.image_name ) except docker.errors.BuildError as error: @@ -197,4 +214,4 @@ def _stop_module(self, module, kill=False): container.stop() LOGGER.debug("Container stopped:" + module.container_name) except docker.errors.NotFound: - pass \ No newline at end of file + pass From 0837a9cc8a947ff2edac37a058f3516c0bf415f2 Mon Sep 17 00:00:00 2001 From: Noureddine Date: Tue, 16 May 2023 15:49:46 +0100 Subject: [PATCH 09/48] Add baseline and pylint tests (#25) --- .github/workflows/testing.yml | 30 +++++++++ framework/test_runner.py | 11 +++- framework/testrun.py | 10 ++- net_orc/python/src/network_orchestrator.py | 47 ++++++++++++- net_orc/python/src/network_validator.py | 3 +- test_orc/modules/baseline/python/src/run.py | 2 +- test_orc/modules/dns/python/src/run.py | 2 +- test_orc/python/src/test_orchestrator.py | 3 +- testing/docker/ci_baseline/Dockerfile | 10 +++ testing/docker/ci_baseline/entrypoint.sh | 56 ++++++++++++++++ testing/test_baseline | 73 +++++++++++++++++++++ testing/test_baseline.py | 49 ++++++++++++++ testing/test_pylint | 26 ++++++++ 13 files changed, 309 insertions(+), 13 deletions(-) create mode 100644 .github/workflows/testing.yml create mode 100644 testing/docker/ci_baseline/Dockerfile create mode 100755 testing/docker/ci_baseline/entrypoint.sh create mode 100755 testing/test_baseline create mode 100644 testing/test_baseline.py create mode 100755 testing/test_pylint diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml new file mode 100644 index 000000000..fbdbe442c --- /dev/null +++ b/.github/workflows/testing.yml @@ -0,0 +1,30 @@ +name: Testrun test suite + +on: + push: + pull_request: + schedule: + - cron: '0 13 * * *' + +jobs: + testrun: + name: Baseline + runs-on: ubuntu-20.04 + timeout-minutes: 20 + steps: + - name: Checkout source + uses: actions/checkout@v2.3.4 + - name: Run tests + shell: bash {0} + run: testing/test_baseline + + pylint: + name: Pylint + runs-on: ubuntu-20.04 + timeout-minutes: 20 + steps: + - name: Checkout source + uses: actions/checkout@v2.3.4 + - name: Run tests + shell: bash {0} + run: testing/test_pylint diff --git a/framework/test_runner.py b/framework/test_runner.py index 14cadf3e1..5c4bf1472 100644 --- a/framework/test_runner.py +++ b/framework/test_runner.py @@ -19,10 +19,12 @@ class TestRunner: - def __init__(self, config_file=None, validate=True, net_only=False): + def __init__(self, config_file=None, validate=True, net_only=False, single_intf=False): self._register_exits() self.test_run = TestRun(config_file=config_file, - validate=validate, net_only=net_only) + validate=validate, + net_only=net_only, + single_intf=single_intf) def _register_exits(self): signal.signal(signal.SIGINT, self._exit_handler) @@ -57,6 +59,8 @@ def parse_args(argv): help="Turn off the validation of the network after network boot") parser.add_argument("-net", "--net-only", action="store_true", help="Run the network only, do not run tests") + parser.add_argument("--single-intf", action="store_true", + help="Single interface mode (experimental)") args, unknown = parser.parse_known_args() return args @@ -65,5 +69,6 @@ def parse_args(argv): args = parse_args(sys.argv) runner = TestRunner(config_file=args.config_file, validate=not args.no_validate, - net_only=args.net_only) + net_only=args.net_only, + single_intf=args.single_intf) runner.start() diff --git a/framework/testrun.py b/framework/testrun.py index 40076108b..55719d968 100644 --- a/framework/testrun.py +++ b/framework/testrun.py @@ -33,7 +33,7 @@ LOGGER = logger.get_logger('test_run') CONFIG_FILE = 'conf/system.json' EXAMPLE_CONFIG_FILE = 'conf/system.json.example' -RUNTIME = 300 +RUNTIME = 1500 LOCAL_DEVICES_DIR = 'local/devices' RESOURCE_DEVICES_DIR = 'resources/devices' @@ -51,9 +51,10 @@ class TestRun: # pylint: disable=too-few-public-methods orchestrator and user interface. """ - def __init__(self, config_file=CONFIG_FILE, validate=True, net_only=False): + def __init__(self, config_file=CONFIG_FILE, validate=True, net_only=False, single_intf=False): self._devices = [] self._net_only = net_only + self._single_intf = single_intf # Catch any exit signals self._register_exits() @@ -62,7 +63,10 @@ def __init__(self, config_file=CONFIG_FILE, validate=True, net_only=False): config_file_abs = self._get_config_abs(config_file=config_file) self._net_orc = net_orc.NetworkOrchestrator( - config_file=config_file_abs, validate=validate, async_monitor=not self._net_only) + config_file=config_file_abs, + validate=validate, + async_monitor=not self._net_only, + single_intf = self._single_intf) self._test_orc = test_orc.TestOrchestrator() def start(self): diff --git a/net_orc/python/src/network_orchestrator.py b/net_orc/python/src/network_orchestrator.py index 63391a24f..56ae93c3f 100644 --- a/net_orc/python/src/network_orchestrator.py +++ b/net_orc/python/src/network_orchestrator.py @@ -1,8 +1,10 @@ #!/usr/bin/env python3 +import getpass import ipaddress import json import os +import subprocess import sys import time import threading @@ -25,15 +27,16 @@ INTERNET_BRIDGE = "tr-c" PRIVATE_DOCKER_NET = "tr-private-net" CONTAINER_NAME = "network_orchestrator" -RUNTIME = 300 +RUNTIME = 1500 class NetworkOrchestrator: """Manage and controls a virtual testing network.""" - def __init__(self, config_file=CONFIG_FILE, validate=True, async_monitor=False): + def __init__(self, config_file=CONFIG_FILE, validate=True, async_monitor=False, single_intf = False): self._int_intf = None self._dev_intf = None + self._single_intf = single_intf self.listener = None @@ -153,6 +156,38 @@ def _ping(self, net_module): success = util.run_command(cmd, output=False) return success + def _ci_pre_network_create(self): + """ Stores network properties to restore network after + network creation and flushes internet interface + """ + + self._ethmac = subprocess.check_output( + f"cat /sys/class/net/{self._int_intf}/address", shell=True).decode("utf-8").strip() + self._gateway = subprocess.check_output( + "ip route | head -n 1 | awk '{print $3}'", shell=True).decode("utf-8").strip() + self._ipv4 = subprocess.check_output( + f"ip a show {self._int_intf} | grep \"inet \" | awk '{{print $2}}'", shell=True).decode("utf-8").strip() + self._ipv6 = subprocess.check_output( + f"ip a show {self._int_intf} | grep inet6 | awk '{{print $2}}'", shell=True).decode("utf-8").strip() + self._brd = subprocess.check_output( + f"ip a show {self._int_intf} | grep \"inet \" | awk '{{print $4}}'", shell=True).decode("utf-8").strip() + + def _ci_post_network_create(self): + """ Restore network connection in CI environment """ + LOGGER.info("post cr") + util.run_command(f"ip address del {self._ipv4} dev {self._int_intf}") + util.run_command(f"ip -6 address del {self._ipv6} dev {self._int_intf}") + util.run_command(f"ip link set dev {self._int_intf} address 00:B0:D0:63:C2:26") + util.run_command(f"ip addr flush dev {self._int_intf}") + util.run_command(f"ip addr add dev {self._int_intf} 0.0.0.0") + util.run_command(f"ip addr add dev {INTERNET_BRIDGE} {self._ipv4} broadcast {self._brd}") + util.run_command(f"ip -6 addr add {self._ipv6} dev {INTERNET_BRIDGE} ") + util.run_command(f"systemd-resolve --interface {INTERNET_BRIDGE} --set-dns 8.8.8.8") + util.run_command(f"ip link set dev {INTERNET_BRIDGE} up") + util.run_command(f"dhclient {INTERNET_BRIDGE}") + util.run_command(f"ip route del default via 10.1.0.1") + util.run_command(f"ip route add default via {self._gateway} src {self._ipv4[:-3]} metric 100 dev {INTERNET_BRIDGE}") + def _create_private_net(self): client = docker.from_env() try: @@ -186,6 +221,9 @@ def create_net(self): LOGGER.error("Configured interfaces are not ready for use. " + "Ensure both interfaces are connected.") sys.exit(1) + + if self._single_intf: + self._ci_pre_network_create() # Create data plane util.run_command("ovs-vsctl add-br " + DEVICE_BRIDGE) @@ -210,6 +248,9 @@ def create_net(self): util.run_command("ip link set dev " + DEVICE_BRIDGE + " up") util.run_command("ip link set dev " + INTERNET_BRIDGE + " up") + if self._single_intf: + self._ci_post_network_create() + self._create_private_net() self.listener = Listener(self._dev_intf) @@ -325,7 +366,7 @@ def _start_network_service(self, net_module): privileged=True, detach=True, mounts=net_module.mounts, - environment={"HOST_USER": os.getlogin()} + environment={"HOST_USER": getpass.getuser()} ) except docker.errors.ContainerError as error: LOGGER.error("Container run error") diff --git a/net_orc/python/src/network_validator.py b/net_orc/python/src/network_validator.py index 53fbcdbd0..2f01a06e9 100644 --- a/net_orc/python/src/network_validator.py +++ b/net_orc/python/src/network_validator.py @@ -5,6 +5,7 @@ import time import docker from docker.types import Mount +import getpass import logger import util @@ -144,7 +145,7 @@ def _start_network_device(self, device): privileged=True, detach=True, mounts=device.mounts, - environment={"HOST_USER": os.getlogin()} + environment={"HOST_USER": getpass.getuser()} ) except docker.errors.ContainerError as error: LOGGER.error("Container run error") diff --git a/test_orc/modules/baseline/python/src/run.py b/test_orc/modules/baseline/python/src/run.py index ffa171e17..8b55484ae 100644 --- a/test_orc/modules/baseline/python/src/run.py +++ b/test_orc/modules/baseline/python/src/run.py @@ -8,7 +8,7 @@ from baseline_module import BaselineModule LOGGER = logger.get_logger('test_module') -RUNTIME = 300 +RUNTIME = 1500 class BaselineModuleRunner: diff --git a/test_orc/modules/dns/python/src/run.py b/test_orc/modules/dns/python/src/run.py index 7ee5e7833..e5fedb67b 100644 --- a/test_orc/modules/dns/python/src/run.py +++ b/test_orc/modules/dns/python/src/run.py @@ -10,7 +10,7 @@ LOG_NAME = "dns_module" LOGGER = logger.get_logger(LOG_NAME) -RUNTIME = 300 +RUNTIME = 1500 class DNSModuleRunner: diff --git a/test_orc/python/src/test_orchestrator.py b/test_orc/python/src/test_orchestrator.py index 85c6fb631..ee5cc5b45 100644 --- a/test_orc/python/src/test_orchestrator.py +++ b/test_orc/python/src/test_orchestrator.py @@ -1,4 +1,5 @@ """Provides high level management of the test orchestrator.""" +import getpass import os import json import time @@ -87,7 +88,7 @@ def _run_test_module(self, module, device): ), ], environment={ - "HOST_USER": os.getlogin(), + "HOST_USER": getpass.getuser(), "DEVICE_MAC": device.mac_addr, "DEVICE_TEST_MODULES": device.test_modules } diff --git a/testing/docker/ci_baseline/Dockerfile b/testing/docker/ci_baseline/Dockerfile new file mode 100644 index 000000000..7c3c1eebd --- /dev/null +++ b/testing/docker/ci_baseline/Dockerfile @@ -0,0 +1,10 @@ +FROM ubuntu:jammy + +#Update and get all additional requirements not contained in the base image +RUN apt-get update && apt-get -y upgrade + +RUN apt-get install -y isc-dhcp-client ntpdate coreutils moreutils inetutils-ping curl jq dnsutils + +COPY entrypoint.sh /entrypoint.sh + +ENTRYPOINT ["/entrypoint.sh"] \ No newline at end of file diff --git a/testing/docker/ci_baseline/entrypoint.sh b/testing/docker/ci_baseline/entrypoint.sh new file mode 100755 index 000000000..bc2da3ec2 --- /dev/null +++ b/testing/docker/ci_baseline/entrypoint.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +OUT=/out/testrun_ci.json + +NTP_SERVER=10.10.10.5 +DNS_SERVER=10.10.10.4 + +function wout(){ + temp=${1//./\".\"} + key=${temp:1}\" + echo $key + value=$2 + jq "$key+=\"$value\"" $OUT | sponge $OUT +} + + +dig @8.8.8.8 +short www.google.com + +# DHCP +ip addr flush dev eth0 +PID_FILE=/var/run/dhclient.pid +if [ -f $PID_FILE ]; then + kill -9 $(cat $PID_FILE) || true + rm -f $PID_FILE +fi +dhclient -v eth0 + +echo "{}" > $OUT + +# Gen network +main_intf=$(ip route | grep '^default' | awk '{print $NF}') + +wout .network.main_intf $main_intf +wout .network.gateway $(ip route | head -n 1 | awk '{print $3}') +wout .network.ipv4 $(ip a show $main_intf | grep "inet " | awk '{print $2}') +wout .network.ipv6 $(ip a show $main_intf | grep inet6 | awk '{print $2}') +wout .network.ethmac $(cat /sys/class/net/$main_intf/address) + +wout .dns_response $(dig @$DNS_SERVER +short www.google.com | tail -1) +wout .ntp_offset $(ntpdate -q $NTP_SERVER | tail -1 | sed -E 's/.*offset ([-=0-9\.]*) sec/\1/') + +# INTERNET CONNECTION +google_com_response=$(curl -LI http://www.google.com -o /dev/null -w '%{http_code}\n' -s) +wout .network.internet $google_com_response + +# DHCP LEASE +while read pre name value; do + if [[ $pre != option ]]; then + continue; + fi + + wout .dhcp.$name $(echo "${value%;}" | tr -d '\"\\') + +done < <(grep -B 99 -m 1 "}" /var/lib/dhcp/dhclient.leases) + +cat $OUT \ No newline at end of file diff --git a/testing/test_baseline b/testing/test_baseline new file mode 100755 index 000000000..d7fc1e5c5 --- /dev/null +++ b/testing/test_baseline @@ -0,0 +1,73 @@ + +#!/bin/bash -e + +TESTRUN_OUT=/tmp/testrun.log + +# Setup requirements +sudo apt-get update +sudo apt-get install openvswitch-common openvswitch-switch tcpdump jq moreutils coreutils + +pip3 install pytest + +# Setup device network +sudo ip link add dev endev0a type veth peer name endev0b +sudo ip link set dev endev0a up +sudo ip link set dev endev0b up +sudo docker network create -d macvlan -o parent=endev0b endev0 + +# Start OVS +sudo /usr/share/openvswitch/scripts/ovs-ctl start + +# Fix due to ordering +sudo docker build ./net_orc/ -t test-run/base -f net_orc/network/modules/base/base.Dockerfile + +# Build Test Container +sudo docker build ./testing/docker/ci_baseline -t ci1 -f ./testing/docker/ci_baseline/Dockerfile + +cat <conf/system.json +{ + "network": { + "device_intf": "endev0a", + "internet_intf": "eth0" + }, + "log_level": "DEBUG" +} +EOF + +sudo cmd/install + +sudo cmd/start --single-intf > $TESTRUN_OUT 2>&1 & +TPID=$! + +# Time to wait for testrun to be ready +WAITING=600 +for i in `seq 1 $WAITING`; do + if [[ -n $(fgrep "Waiting for devices on the network" $TESTRUN_OUT) ]]; then + break + fi + + if [[ ! -d /proc/$TPID ]]; then + cat $TESTRUN_OUT + echo "error encountered starting test run" + exit 1 + fi + + sleep 1 +done + +if [[ $i -eq $WAITING ]]; then + cat $TESTRUN_OUT + echo "failed after waiting $WAITING seconds for test-run start" + exit 1 +fi + +# Load Test Container +sudo docker run --network=endev0 --cap-add=NET_ADMIN -v /tmp:/out --privileged ci1 + +echo "Done baseline test" + +more $TESTRUN_OUT + +pytest testing/ + +exit $? diff --git a/testing/test_baseline.py b/testing/test_baseline.py new file mode 100644 index 000000000..3ab30a7c0 --- /dev/null +++ b/testing/test_baseline.py @@ -0,0 +1,49 @@ +import json +import pytest +import re +import os + +NTP_SERVER = '10.10.10.5' +DNS_SERVER = '10.10.10.4' + +CI_BASELINE_OUT = '/tmp/testrun_ci.json' + +@pytest.fixture +def container_data(): + dir = os.path.dirname(os.path.abspath(__file__)) + with open(CI_BASELINE_OUT) as f: + return json.load(f) + +@pytest.fixture +def validator_results(): + dir = os.path.dirname(os.path.abspath(__file__)) + with open(os.path.join(dir, '../', 'runtime/validation/faux-dev/result.json')) as f: + return json.load(f) + +def test_internet_connectivity(container_data): + assert container_data['network']['internet'] == 200 + +def test_dhcp_ntp_option(container_data): + """ Check DHCP gives NTP server as option """ + assert container_data['dhcp']['ntp-servers'] == NTP_SERVER + +def test_dhcp_dns_option(container_data): + assert container_data['dhcp']['domain-name-servers'] == DNS_SERVER + +def test_assigned_ipv4_address(container_data): + assert int(container_data['network']['ipv4'].split('.')[-1][:-3]) > 10 + +def test_ntp_server_reachable(container_data): + assert not 'no servers' in container_data['ntp_offset'] + +def test_dns_server_reachable(container_data): + assert not 'no servers' in container_data['dns_response'] + +def test_dns_server_resolves(container_data): + assert re.match(r'[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}', + container_data['dns_response']) + +def test_validator_results_compliant(validator_results): + results = [True if x['result'] == 'compliant' else False + for x in validator_results['results']] + assert all(results) diff --git a/testing/test_pylint b/testing/test_pylint new file mode 100755 index 000000000..833961d94 --- /dev/null +++ b/testing/test_pylint @@ -0,0 +1,26 @@ +#!/bin/bash + +ERROR_LIMIT=2534 + +sudo cmd/install + +source venv/bin/activate +sudo pip3 install pylint + +files=$(find . -path ./venv -prune -o -name '*.py' -print) + +OUT=pylint.out + +rm -f $OUT && touch $OUT +pylint $files -ry --extension-pkg-allow-list=docker 2>/dev/null | tee -a $OUT + +new_errors=$(cat $OUT | grep "statements analysed." | awk '{print $1}') + +echo "$new_errors > $ERROR_LIMIT?" +if (( $new_errors > $ERROR_LIMIT)); then + echo new errors $new_errors > error limit $ERROR_LIMIT + echo failing .. + exit 1 +fi + +exit 0 From 4171e5f343149b5f49433c4155d4af41647b40e9 Mon Sep 17 00:00:00 2001 From: J Boddey Date: Tue, 16 May 2023 17:27:18 +0100 Subject: [PATCH 10/48] Discover devices on the network (#22) * Discover devices on the network * Add defaults when missing from config Implement monitor wait period from config * Add steady state monitor Remove duplicate callback registrations * Load devices into network orchestrator during testrun start --------- Co-authored-by: jhughesbiot --- conf/system.json.example | 5 +- framework/device.py | 8 +- framework/testrun.py | 22 +- net_orc/python/src/listener.py | 31 +- net_orc/python/src/network_device.py | 9 + net_orc/python/src/network_event.py | 6 +- net_orc/python/src/network_orchestrator.py | 1298 +++++++++++--------- net_orc/python/src/network_runner.py | 85 +- net_orc/python/src/util.py | 49 +- test_orc/python/src/test_orchestrator.py | 2 +- 10 files changed, 811 insertions(+), 704 deletions(-) create mode 100644 net_orc/python/src/network_device.py diff --git a/conf/system.json.example b/conf/system.json.example index 2d4b737d0..ecf480104 100644 --- a/conf/system.json.example +++ b/conf/system.json.example @@ -3,5 +3,8 @@ "device_intf": "enx123456789123", "internet_intf": "enx123456789124" }, - "log_level": "INFO" + "log_level": "INFO", + "startup_timeout": 60, + "monitor_period": 300, + "runtime": 1200 } \ No newline at end of file diff --git a/framework/device.py b/framework/device.py index d41199612..c17dd8e3a 100644 --- a/framework/device.py +++ b/framework/device.py @@ -1,12 +1,12 @@ """Track device object information.""" from dataclasses import dataclass +from network_device import NetworkDevice @dataclass -class Device: +class Device(NetworkDevice): """Represents a physical device and it's configuration.""" - make: str - model: str - mac_addr: str + make: str = None + model: str = None test_modules: str = None diff --git a/framework/testrun.py b/framework/testrun.py index 55719d968..b9cb6a0e5 100644 --- a/framework/testrun.py +++ b/framework/testrun.py @@ -12,7 +12,6 @@ import signal import time import logger -from device import Device # Locate parent directory current_dir = os.path.dirname(os.path.realpath(__file__)) @@ -30,6 +29,8 @@ import test_orchestrator as test_orc # pylint: disable=wrong-import-position,import-outside-toplevel import network_orchestrator as net_orc # pylint: disable=wrong-import-position,import-outside-toplevel +from device import Device # pylint: disable=wrong-import-position,import-outside-toplevel + LOGGER = logger.get_logger('test_run') CONFIG_FILE = 'conf/system.json' EXAMPLE_CONFIG_FILE = 'conf/system.json.example' @@ -80,9 +81,11 @@ def start(self): else: self._start_network() self._test_orc.start() + self._net_orc.listener.register_callback( - self._device_discovered, - [NetworkEvent.DEVICE_DISCOVERED]) + self._device_stable, + [NetworkEvent.DEVICE_STABLE] + ) LOGGER.info("Waiting for devices on the network...") @@ -117,6 +120,10 @@ def _get_config_abs(self, config_file=None): return os.path.abspath(config_file) def _start_network(self): + # Load in local device configs to the network orchestrator + self._net_orc._devices = self._devices + + # Start the network orchestrator self._net_orc.start() def _run_tests(self, device): @@ -169,9 +176,12 @@ def _device_discovered(self, mac_addr): LOGGER.info( f'Discovered {device.make} {device.model} on the network') else: - device = Device(make=None, model=None, mac_addr=mac_addr) + device = Device(mac_addr=mac_addr) + self._devices.append(device) LOGGER.info( f'A new device has been discovered with mac address {mac_addr}') - # TODO: Pass device information to test orchestrator/runner - self._run_tests(device) + def _device_stable(self, mac_addr): + device = self.get_device(mac_addr) + LOGGER.info(f'Device with mac address {mac_addr} is ready for testing.') + self._test_orc.run_test_modules(device) diff --git a/net_orc/python/src/listener.py b/net_orc/python/src/listener.py index d07de4686..0323fd9f6 100644 --- a/net_orc/python/src/listener.py +++ b/net_orc/python/src/listener.py @@ -1,5 +1,6 @@ """Intercepts network traffic between network services and the device under test.""" +import threading from scapy.all import AsyncSniffer, DHCP, get_if_hwaddr import logger from network_event import NetworkEvent @@ -12,7 +13,6 @@ DHCP_ACK = 5 CONTAINER_MAC_PREFIX = '9a:02:57:1e:8f' - class Listener: """Methods to start and stop the network listener.""" @@ -47,22 +47,25 @@ def register_callback(self, callback, events=[]): # pylint: disable=dangerous-d } ) + def call_callback(self, net_event, *args): + for callback in self._callbacks: + if net_event in callback['events']: + callback_thread = threading.Thread(target=callback['callback'], name="Callback thread", args=args) + callback_thread.start() + def _packet_callback(self, packet): - # Ignore packets originating from our containers - if packet.src.startswith(CONTAINER_MAC_PREFIX) or packet.src == self._device_intf_mac: - return + # DHCP ACK callback + if DHCP in packet and self._get_dhcp_type(packet) == DHCP_ACK: + self.call_callback(NetworkEvent.DHCP_LEASE_ACK, packet) + # New device discovered callback if not packet.src is None and packet.src not in self._discovered_devices: - self._device_discovered(packet.src) + # Ignore packets originating from our containers + if packet.src.startswith(CONTAINER_MAC_PREFIX) or packet.src == self._device_intf_mac: + return + self._discovered_devices.append(packet.src) + self.call_callback(NetworkEvent.DEVICE_DISCOVERED, packet.src) def _get_dhcp_type(self, packet): - return packet[DHCP].options[0][1] - - def _device_discovered(self, mac_addr): - LOGGER.debug(f'Discovered device with address {mac_addr}') - self._discovered_devices.append(mac_addr) - - for callback in self._callbacks: - if NetworkEvent.DEVICE_DISCOVERED in callback['events']: - callback['callback'](mac_addr) + return packet[DHCP].options[0][1] \ No newline at end of file diff --git a/net_orc/python/src/network_device.py b/net_orc/python/src/network_device.py new file mode 100644 index 000000000..f54a273b6 --- /dev/null +++ b/net_orc/python/src/network_device.py @@ -0,0 +1,9 @@ +"""Track device object information.""" +from dataclasses import dataclass + +@dataclass +class NetworkDevice: + """Represents a physical device and it's configuration.""" + + mac_addr: str + ip_addr: str = None diff --git a/net_orc/python/src/network_event.py b/net_orc/python/src/network_event.py index c77dfa706..dc08cf892 100644 --- a/net_orc/python/src/network_event.py +++ b/net_orc/python/src/network_event.py @@ -3,8 +3,6 @@ class NetworkEvent(Enum): """All possible network events.""" - - ALL = 0 DEVICE_DISCOVERED = 1 - DHCP_LEASE_NEW = 2 - DHCP_LEASE_RENEWED = 3 + DEVICE_STABLE = 2 + DHCP_LEASE_ACK = 3 diff --git a/net_orc/python/src/network_orchestrator.py b/net_orc/python/src/network_orchestrator.py index 56ae93c3f..690e974c2 100644 --- a/net_orc/python/src/network_orchestrator.py +++ b/net_orc/python/src/network_orchestrator.py @@ -1,611 +1,687 @@ -#!/usr/bin/env python3 - -import getpass -import ipaddress -import json -import os -import subprocess -import sys -import time -import threading - -import docker -from docker.types import Mount - -import logger -import util -from listener import Listener -from network_validator import NetworkValidator - -LOGGER = logger.get_logger("net_orc") -CONFIG_FILE = "conf/system.json" -EXAMPLE_CONFIG_FILE = "conf/system.json.example" -RUNTIME_DIR = "runtime/network" -NETWORK_MODULES_DIR = "network/modules" -NETWORK_MODULE_METADATA = "conf/module_config.json" -DEVICE_BRIDGE = "tr-d" -INTERNET_BRIDGE = "tr-c" -PRIVATE_DOCKER_NET = "tr-private-net" -CONTAINER_NAME = "network_orchestrator" -RUNTIME = 1500 - - -class NetworkOrchestrator: - """Manage and controls a virtual testing network.""" - - def __init__(self, config_file=CONFIG_FILE, validate=True, async_monitor=False, single_intf = False): - self._int_intf = None - self._dev_intf = None - self._single_intf = single_intf - - self.listener = None - - self._net_modules = [] - - self.validate = validate - - self.async_monitor = async_monitor - - self._path = os.path.dirname(os.path.dirname( - os.path.dirname(os.path.realpath(__file__)))) - - self.validator = NetworkValidator() - - self.network_config = NetworkConfig() - - self.load_config(config_file) - - def start(self): - """Start the network orchestrator.""" - - LOGGER.info("Starting Network Orchestrator") - # Get all components ready - self.load_network_modules() - - # Restore the network first if required - self.stop(kill=True) - - self.start_network() - - if self.async_monitor: - # Run the monitor method asynchronously to keep this method non-blocking - self._monitor_thread = threading.Thread( - target=self.monitor_network) - self._monitor_thread.daemon = True - self._monitor_thread.start() - else: - self.monitor_network() - - def start_network(self): - """Start the virtual testing network.""" - LOGGER.info("Starting network") - - self.build_network_modules() - self.create_net() - self.start_network_services() - - if self.validate: - # Start the validator after network is ready - self.validator.start() - - # Get network ready (via Network orchestrator) - LOGGER.info("Network is ready.") - - def stop(self, kill=False): - """Stop the network orchestrator.""" - self.stop_validator(kill=kill) - self.stop_network(kill=kill) - - def stop_validator(self, kill=False): - """Stop the network validator.""" - # Shutdown the validator - self.validator.stop(kill=kill) - - def stop_network(self, kill=False): - """Stop the virtual testing network.""" - # Shutdown network - self.stop_networking_services(kill=kill) - self.restore_net() - - def monitor_network(self): - # TODO: This time should be configurable (How long to hold before exiting, this could be infinite too) - time.sleep(RUNTIME) - - self.stop() - - def load_config(self,config_file=None): - if config_file is None: - # If not defined, use relative pathing to local file - self._config_file=os.path.join(self._path, CONFIG_FILE) - else: - # If defined, use as provided - self._config_file=config_file - - if not os.path.isfile(self._config_file): - LOGGER.error("Configuration file is not present at " + config_file) - LOGGER.info("An example is present in " + EXAMPLE_CONFIG_FILE) - sys.exit(1) - - LOGGER.info("Loading config file: " + os.path.abspath(self._config_file)) - with open(self._config_file, encoding='UTF-8') as config_json_file: - config_json = json.load(config_json_file) - self.import_config(config_json) - - def import_config(self, json_config): - self._int_intf = json_config['network']['internet_intf'] - self._dev_intf = json_config['network']['device_intf'] - - def _check_network_services(self): - LOGGER.debug("Checking network modules...") - for net_module in self._net_modules: - if net_module.enable_container: - LOGGER.debug("Checking network module: " + - net_module.display_name) - success = self._ping(net_module) - if success: - LOGGER.debug(net_module.display_name + - " responded succesfully: " + str(success)) - else: - LOGGER.error(net_module.display_name + - " failed to respond to ping") - - def _ping(self, net_module): - host = net_module.net_config.ipv4_address - namespace = "tr-ctns-" + net_module.dir_name - cmd = "ip netns exec " + namespace + " ping -c 1 " + str(host) - success = util.run_command(cmd, output=False) - return success - - def _ci_pre_network_create(self): - """ Stores network properties to restore network after - network creation and flushes internet interface - """ - - self._ethmac = subprocess.check_output( - f"cat /sys/class/net/{self._int_intf}/address", shell=True).decode("utf-8").strip() - self._gateway = subprocess.check_output( - "ip route | head -n 1 | awk '{print $3}'", shell=True).decode("utf-8").strip() - self._ipv4 = subprocess.check_output( - f"ip a show {self._int_intf} | grep \"inet \" | awk '{{print $2}}'", shell=True).decode("utf-8").strip() - self._ipv6 = subprocess.check_output( - f"ip a show {self._int_intf} | grep inet6 | awk '{{print $2}}'", shell=True).decode("utf-8").strip() - self._brd = subprocess.check_output( - f"ip a show {self._int_intf} | grep \"inet \" | awk '{{print $4}}'", shell=True).decode("utf-8").strip() - - def _ci_post_network_create(self): - """ Restore network connection in CI environment """ - LOGGER.info("post cr") - util.run_command(f"ip address del {self._ipv4} dev {self._int_intf}") - util.run_command(f"ip -6 address del {self._ipv6} dev {self._int_intf}") - util.run_command(f"ip link set dev {self._int_intf} address 00:B0:D0:63:C2:26") - util.run_command(f"ip addr flush dev {self._int_intf}") - util.run_command(f"ip addr add dev {self._int_intf} 0.0.0.0") - util.run_command(f"ip addr add dev {INTERNET_BRIDGE} {self._ipv4} broadcast {self._brd}") - util.run_command(f"ip -6 addr add {self._ipv6} dev {INTERNET_BRIDGE} ") - util.run_command(f"systemd-resolve --interface {INTERNET_BRIDGE} --set-dns 8.8.8.8") - util.run_command(f"ip link set dev {INTERNET_BRIDGE} up") - util.run_command(f"dhclient {INTERNET_BRIDGE}") - util.run_command(f"ip route del default via 10.1.0.1") - util.run_command(f"ip route add default via {self._gateway} src {self._ipv4[:-3]} metric 100 dev {INTERNET_BRIDGE}") - - def _create_private_net(self): - client = docker.from_env() - try: - network = client.networks.get(PRIVATE_DOCKER_NET) - network.remove() - except docker.errors.NotFound: - pass - - # TODO: These should be made into variables - ipam_pool = docker.types.IPAMPool( - subnet='100.100.0.0/16', - iprange='100.100.100.0/24' - ) - - ipam_config = docker.types.IPAMConfig( - pool_configs=[ipam_pool] - ) - - client.networks.create( - PRIVATE_DOCKER_NET, - ipam=ipam_config, - internal=True, - check_duplicate=True, - driver="macvlan" - ) - - def create_net(self): - LOGGER.info("Creating baseline network") - - if not util.interface_exists(self._int_intf) or not util.interface_exists(self._dev_intf): - LOGGER.error("Configured interfaces are not ready for use. " + - "Ensure both interfaces are connected.") - sys.exit(1) - - if self._single_intf: - self._ci_pre_network_create() - - # Create data plane - util.run_command("ovs-vsctl add-br " + DEVICE_BRIDGE) - - # Create control plane - util.run_command("ovs-vsctl add-br " + INTERNET_BRIDGE) - - # Add external interfaces to data and control plane - util.run_command("ovs-vsctl add-port " + - DEVICE_BRIDGE + " " + self._dev_intf) - util.run_command("ovs-vsctl add-port " + - INTERNET_BRIDGE + " " + self._int_intf) - - # Enable forwarding of eapol packets - util.run_command("ovs-ofctl add-flow " + DEVICE_BRIDGE + - " 'table=0, dl_dst=01:80:c2:00:00:03, actions=flood'") - - # Remove IP from internet adapter - util.run_command("ifconfig " + self._int_intf + " 0.0.0.0") - - # Set ports up - util.run_command("ip link set dev " + DEVICE_BRIDGE + " up") - util.run_command("ip link set dev " + INTERNET_BRIDGE + " up") - - if self._single_intf: - self._ci_post_network_create() - - self._create_private_net() - - self.listener = Listener(self._dev_intf) - self.listener.start_listener() - - def load_network_modules(self): - """Load network modules from module_config.json.""" - LOGGER.debug("Loading network modules from /" + NETWORK_MODULES_DIR) - - loaded_modules = "Loaded the following network modules: " - net_modules_dir = os.path.join(self._path, NETWORK_MODULES_DIR) - - for module_dir in os.listdir(net_modules_dir): - - net_module = NetworkModule() - - # Load basic module information - - net_module_json = json.load(open(os.path.join( - self._path, net_modules_dir, module_dir, NETWORK_MODULE_METADATA), encoding='UTF-8')) - - net_module.name = net_module_json['config']['meta']['name'] - net_module.display_name = net_module_json['config']['meta']['display_name'] - net_module.description = net_module_json['config']['meta']['description'] - net_module.dir = os.path.join( - self._path, net_modules_dir, module_dir) - net_module.dir_name = module_dir - net_module.build_file = module_dir + ".Dockerfile" - net_module.container_name = "tr-ct-" + net_module.dir_name - net_module.image_name = "test-run/" + net_module.dir_name - - # Attach folder mounts to network module - if "docker" in net_module_json['config']: - if "mounts" in net_module_json['config']['docker']: - for mount_point in net_module_json['config']['docker']['mounts']: - net_module.mounts.append(Mount( - target=mount_point['target'], - source=os.path.join( - os.getcwd(), mount_point['source']), - type='bind' - )) - - # Determine if this is a container or just an image/template - if "enable_container" in net_module_json['config']['docker']: - net_module.enable_container = net_module_json['config']['docker']['enable_container'] - - # Load network service networking configuration - if net_module.enable_container: - - net_module.net_config.enable_wan = net_module_json['config']['network']['enable_wan'] - net_module.net_config.ip_index = net_module_json['config']['network']['ip_index'] - - net_module.net_config.host = False if not "host" in net_module_json[ - 'config']['network'] else net_module_json['config']['network']['host'] - - net_module.net_config.ipv4_address = self.network_config.ipv4_network[ - net_module.net_config.ip_index] - net_module.net_config.ipv4_network = self.network_config.ipv4_network - - net_module.net_config.ipv6_address = self.network_config.ipv6_network[ - net_module.net_config.ip_index] - net_module.net_config.ipv6_network = self.network_config.ipv6_network - - loaded_modules += net_module.dir_name + " " - - self._net_modules.append(net_module) - - LOGGER.info(loaded_modules) - - def build_network_modules(self): - LOGGER.info("Building network modules...") - for net_module in self._net_modules: - self._build_module(net_module) - - def _build_module(self, net_module): - LOGGER.debug("Building network module " + net_module.dir_name) - client = docker.from_env() - client.images.build( - dockerfile=os.path.join(net_module.dir, net_module.build_file), - path=self._path, - forcerm=True, - tag="test-run/" + net_module.dir_name - ) - - def _get_network_module(self, name): - for net_module in self._net_modules: - if name == net_module.display_name: - return net_module - return None - - # Start the OVS network module - # This should always be called before loading all - # other modules to allow for a properly setup base - # network - def _start_ovs_module(self): - self._start_network_service(self._get_network_module("OVS")) - - def _start_network_service(self, net_module): - - LOGGER.debug("Starting net service " + net_module.display_name) - network = "host" if net_module.net_config.host else PRIVATE_DOCKER_NET - LOGGER.debug(f"""Network: {network}, image name: {net_module.image_name}, - container name: {net_module.container_name}""") - try: - client = docker.from_env() - net_module.container = client.containers.run( - net_module.image_name, - auto_remove=True, - cap_add=["NET_ADMIN"], - name=net_module.container_name, - hostname=net_module.container_name, - network=PRIVATE_DOCKER_NET, - privileged=True, - detach=True, - mounts=net_module.mounts, - environment={"HOST_USER": getpass.getuser()} - ) - except docker.errors.ContainerError as error: - LOGGER.error("Container run error") - LOGGER.error(error) - - if network != "host": - self._attach_service_to_network(net_module) - - def _stop_service_module(self, net_module, kill=False): - LOGGER.debug("Stopping Service container " + net_module.container_name) - try: - container = self._get_service_container(net_module) - if container is not None: - if kill: - LOGGER.debug("Killing container:" + - net_module.container_name) - container.kill() - else: - LOGGER.debug("Stopping container:" + - net_module.container_name) - container.stop() - LOGGER.debug("Container stopped:" + net_module.container_name) - except Exception as error: - LOGGER.error("Container stop error") - LOGGER.error(error) - - def _get_service_container(self, net_module): - LOGGER.debug("Resolving service container: " + - net_module.container_name) - container = None - try: - client = docker.from_env() - container = client.containers.get(net_module.container_name) - except docker.errors.NotFound: - LOGGER.debug("Container " + - net_module.container_name + " not found") - except Exception as e: - LOGGER.error("Failed to resolve container") - LOGGER.error(e) - return container - - def stop_networking_services(self, kill=False): - LOGGER.info("Stopping network services") - for net_module in self._net_modules: - # Network modules may just be Docker images, so we do not want to stop them - if not net_module.enable_container: - continue - self._stop_service_module(net_module, kill) - - def start_network_services(self): - LOGGER.info("Starting network services") - - os.makedirs(os.path.join(os.getcwd(), RUNTIME_DIR), exist_ok=True) - - for net_module in self._net_modules: - - # TODO: There should be a better way of doing this - # Do not try starting OVS module again, as it should already be running - if "OVS" != net_module.display_name: - - # Network modules may just be Docker images, so we do not want to start them as containers - if not net_module.enable_container: - continue - - self._start_network_service(net_module) - - LOGGER.info("All network services are running") - self._check_network_services() - - # TODO: Let's move this into a separate script? It does not look great - def _attach_service_to_network(self, net_module): - LOGGER.debug("Attaching net service " + - net_module.display_name + " to device bridge") - - # Device bridge interface example: tr-di-dhcp (Test Run Device Interface for DHCP container) - bridge_intf = DEVICE_BRIDGE + "i-" + net_module.dir_name - - # Container interface example: tr-cti-dhcp (Test Run Container Interface for DHCP container) - container_intf = "tr-cti-" + net_module.dir_name - - # Container network namespace name - container_net_ns = "tr-ctns-" + net_module.dir_name - - # Create interface pair - util.run_command("ip link add " + bridge_intf + - " type veth peer name " + container_intf) - - # Add bridge interface to device bridge - util.run_command("ovs-vsctl add-port " + - DEVICE_BRIDGE + " " + bridge_intf) - - # Get PID for running container - # TODO: Some error checking around missing PIDs might be required - container_pid = util.run_command( - "docker inspect -f {{.State.Pid}} " + net_module.container_name)[0] - - # Create symlink for container network namespace - util.run_command("ln -sf /proc/" + container_pid + - "/ns/net /var/run/netns/" + container_net_ns) - - # Attach container interface to container network namespace - util.run_command("ip link set " + container_intf + - " netns " + container_net_ns) - - # Rename container interface name to veth0 - util.run_command("ip netns exec " + container_net_ns + - " ip link set dev " + container_intf + " name veth0") - - # Set MAC address of container interface - util.run_command("ip netns exec " + container_net_ns + " ip link set dev veth0 address 9a:02:57:1e:8f:" + str(net_module.net_config.ip_index)) - - # Set IP address of container interface - util.run_command("ip netns exec " + container_net_ns + " ip addr add " + - net_module.net_config.get_ipv4_addr_with_prefix() + " dev veth0") - - util.run_command("ip netns exec " + container_net_ns + " ip addr add " + - net_module.net_config.get_ipv6_addr_with_prefix() + " dev veth0") - - # Set interfaces up - util.run_command("ip link set dev " + bridge_intf + " up") - util.run_command("ip netns exec " + container_net_ns + - " ip link set dev veth0 up") - - if net_module.net_config.enable_wan: - LOGGER.debug("Attaching net service " + - net_module.display_name + " to internet bridge") - - # Internet bridge interface example: tr-ci-dhcp (Test Run Control (Internet) Interface for DHCP container) - bridge_intf = INTERNET_BRIDGE + "i-" + net_module.dir_name - - # Container interface example: tr-cti-dhcp (Test Run Container Interface for DHCP container) - container_intf = "tr-cti-" + net_module.dir_name - - # Create interface pair - util.run_command("ip link add " + bridge_intf + - " type veth peer name " + container_intf) - - # Attach bridge interface to internet bridge - util.run_command("ovs-vsctl add-port " + - INTERNET_BRIDGE + " " + bridge_intf) - - # Attach container interface to container network namespace - util.run_command("ip link set " + container_intf + - " netns " + container_net_ns) - - # Rename container interface name to eth1 - util.run_command("ip netns exec " + container_net_ns + - " ip link set dev " + container_intf + " name eth1") - - # Set MAC address of container interface - util.run_command("ip netns exec " + container_net_ns + - " ip link set dev eth1 address 9a:02:57:1e:8f:0" + str(net_module.net_config.ip_index)) - - # Set interfaces up - util.run_command("ip link set dev " + bridge_intf + " up") - util.run_command("ip netns exec " + - container_net_ns + " ip link set dev eth1 up") - - def restore_net(self): - - LOGGER.info("Clearing baseline network") - - if hasattr(self, 'listener') and self.listener is not None and self.listener.is_running(): - self.listener.stop_listener() - - client = docker.from_env() - - # Stop all network containers if still running - for net_module in self._net_modules: - try: - container = client.containers.get( - "tr-ct-" + net_module.dir_name) - container.kill() - except Exception: - continue - - # Delete data plane - util.run_command("ovs-vsctl --if-exists del-br tr-d") - - # Delete control plane - util.run_command("ovs-vsctl --if-exists del-br tr-c") - - # Restart internet interface - if util.interface_exists(self._int_intf): - util.run_command("ip link set " + self._int_intf + " down") - util.run_command("ip link set " + self._int_intf + " up") - - LOGGER.info("Network is restored") - - -class NetworkModule: - - def __init__(self): - self.name = None - self.display_name = None - self.description = None - - self.container = None - self.container_name = None - self.image_name = None - - # Absolute path - self.dir = None - self.dir_name = None - self.build_file = None - self.mounts = [] - - self.enable_container = True - - self.net_config = NetworkModuleNetConfig() - -# The networking configuration for a network module - - -class NetworkModuleNetConfig: - - def __init__(self): - - self.enable_wan = False - - self.ip_index = 0 - self.ipv4_address = None - self.ipv4_network = None - self.ipv6_address = None - self.ipv6_network = None - - self.host = False - - def get_ipv4_addr_with_prefix(self): - return format(self.ipv4_address) + "/" + str(self.ipv4_network.prefixlen) - - def get_ipv6_addr_with_prefix(self): - return format(self.ipv6_address) + "/" + str(self.ipv6_network.prefixlen) - -# Represents the current configuration of the network for the device bridge - -class NetworkConfig: - - # TODO: Let's get this from a configuration file - def __init__(self): - self.ipv4_network = ipaddress.ip_network('10.10.10.0/24') - self.ipv6_network = ipaddress.ip_network('fd10:77be:4186::/64') +#!/usr/bin/env python3 + +import binascii +import getpass +import ipaddress +import json +import os +from scapy.all import BOOTP +import subprocess +import sys +import time +import threading +from threading import Timer + +import docker +from docker.types import Mount + +import logger +import util +from listener import Listener +from network_device import NetworkDevice +from network_event import NetworkEvent +from network_validator import NetworkValidator + +LOGGER = logger.get_logger("net_orc") +CONFIG_FILE = "conf/system.json" +EXAMPLE_CONFIG_FILE = "conf/system.json.example" +RUNTIME_DIR = "runtime/network" +NETWORK_MODULES_DIR = "network/modules" +NETWORK_MODULE_METADATA = "conf/module_config.json" +DEVICE_BRIDGE = "tr-d" +INTERNET_BRIDGE = "tr-c" +PRIVATE_DOCKER_NET = "tr-private-net" +CONTAINER_NAME = "network_orchestrator" + +RUNTIME_KEY = "runtime" +MONITOR_PERIOD_KEY = "monitor_period" +STARTUP_TIMEOUT_KEY = "startup_timeout" +DEFAULT_STARTUP_TIMEOUT = 60 +DEFAULT_RUNTIME = 1200 +DEFAULT_MONITOR_PERIOD = 300 + +RUNTIME = 1500 + + +class NetworkOrchestrator: + """Manage and controls a virtual testing network.""" + + def __init__(self, config_file=CONFIG_FILE, validate=True, async_monitor=False, single_intf = False): + + self._runtime = DEFAULT_RUNTIME + self._startup_timeout = DEFAULT_STARTUP_TIMEOUT + self._monitor_period = DEFAULT_MONITOR_PERIOD + + self._int_intf = None + self._dev_intf = None + self._single_intf = single_intf + + self.listener = None + + self._net_modules = [] + + self.validate = validate + + self.async_monitor = async_monitor + + self._path = os.path.dirname(os.path.dirname( + os.path.dirname(os.path.realpath(__file__)))) + + self.validator = NetworkValidator() + + self.network_config = NetworkConfig() + + self.load_config(config_file) + + def start(self): + """Start the network orchestrator.""" + + LOGGER.info("Starting Network Orchestrator") + # Get all components ready + self.load_network_modules() + + # Restore the network first if required + self.stop(kill=True) + + self.start_network() + + if self.async_monitor: + # Run the monitor method asynchronously to keep this method non-blocking + self._monitor_thread = threading.Thread( + target=self.monitor_network) + self._monitor_thread.daemon = True + self._monitor_thread.start() + else: + self.monitor_network() + + def start_network(self): + """Start the virtual testing network.""" + LOGGER.info("Starting network") + + self.build_network_modules() + self.create_net() + self.start_network_services() + + if self.validate: + # Start the validator after network is ready + self.validator.start() + + # Get network ready (via Network orchestrator) + LOGGER.info("Network is ready.") + + # Start the listener + self.listener = Listener(self._dev_intf) + self.listener.start_listener() + + def stop(self, kill=False): + """Stop the network orchestrator.""" + self.stop_validator(kill=kill) + self.stop_network(kill=kill) + + def stop_validator(self, kill=False): + """Stop the network validator.""" + # Shutdown the validator + self.validator.stop(kill=kill) + + def stop_network(self, kill=False): + """Stop the virtual testing network.""" + # Shutdown network + self.stop_networking_services(kill=kill) + self.restore_net() + + def monitor_network(self): + self.listener.register_callback(self._device_discovered, [ + NetworkEvent.DEVICE_DISCOVERED]) + self.listener.register_callback( + self._dhcp_lease_ack, [NetworkEvent.DHCP_LEASE_ACK]) + # TODO: This time should be configurable (How long to hold before exiting, this could be infinite too) + time.sleep(self._runtime) + + self.stop() + + def _device_discovered(self, mac_addr): + + LOGGER.debug(f'Discovered device {mac_addr}. Waiting for device to obtain IP') + device = self._get_device(mac_addr=mac_addr) + + timeout = time.time() + self._startup_timeout + + while time.time() < timeout: + if device.ip_addr is None: + time.sleep(3) + else: + break + + if device.ip_addr is None: + LOGGER.info(f"Timed out whilst waiting for {mac_addr} to obtain an IP address") + return + + LOGGER.info(f"Device with mac addr {device.mac_addr} has obtained IP address {device.ip_addr}") + + self._start_device_monitor(device) + + def _dhcp_lease_ack(self, packet): + mac_addr = packet[BOOTP].chaddr.hex(":")[0:17] + device = self._get_device(mac_addr=mac_addr) + device.ip_addr = packet[BOOTP].yiaddr + + def _start_device_monitor(self, device): + """Start a timer until the steady state has been reached and + callback the steady state method for this device.""" + LOGGER.info(f"Monitoring device with mac addr {device.mac_addr} for {str(self._monitor_period)} seconds") + timer = Timer(self._monitor_period, + self.listener.call_callback, + args=(NetworkEvent.DEVICE_STABLE, device.mac_addr,)) + timer.start() + + def _get_device(self, mac_addr): + for device in self._devices: + if device.mac_addr == mac_addr: + return device + device = NetworkDevice(mac_addr=mac_addr) + self._devices.append(device) + return device + + def load_config(self, config_file=None): + if config_file is None: + # If not defined, use relative pathing to local file + self._config_file = os.path.join(self._path, CONFIG_FILE) + else: + # If defined, use as provided + self._config_file = config_file + + if not os.path.isfile(self._config_file): + LOGGER.error("Configuration file is not present at " + config_file) + LOGGER.info("An example is present in " + EXAMPLE_CONFIG_FILE) + sys.exit(1) + + LOGGER.info("Loading config file: " + + os.path.abspath(self._config_file)) + with open(self._config_file, encoding='UTF-8') as config_json_file: + config_json = json.load(config_json_file) + self.import_config(config_json) + + def import_config(self, json_config): + self._int_intf = json_config['network']['internet_intf'] + self._dev_intf = json_config['network']['device_intf'] + if RUNTIME_KEY in json_config: + self._runtime = json_config[RUNTIME_KEY] + if STARTUP_TIMEOUT_KEY in json_config: + self._startup_timeout = json_config[STARTUP_TIMEOUT_KEY] + if MONITOR_PERIOD_KEY in json_config: + self._monitor_period = json_config[MONITOR_PERIOD_KEY] + + def _check_network_services(self): + LOGGER.debug("Checking network modules...") + for net_module in self._net_modules: + if net_module.enable_container: + LOGGER.debug("Checking network module: " + + net_module.display_name) + success = self._ping(net_module) + if success: + LOGGER.debug(net_module.display_name + + " responded succesfully: " + str(success)) + else: + LOGGER.error(net_module.display_name + + " failed to respond to ping") + + def _ping(self, net_module): + host = net_module.net_config.ipv4_address + namespace = "tr-ctns-" + net_module.dir_name + cmd = "ip netns exec " + namespace + " ping -c 1 " + str(host) + success = util.run_command(cmd, output=False) + return success + + def _ci_pre_network_create(self): + """ Stores network properties to restore network after + network creation and flushes internet interface + """ + + self._ethmac = subprocess.check_output( + f"cat /sys/class/net/{self._int_intf}/address", shell=True).decode("utf-8").strip() + self._gateway = subprocess.check_output( + "ip route | head -n 1 | awk '{print $3}'", shell=True).decode("utf-8").strip() + self._ipv4 = subprocess.check_output( + f"ip a show {self._int_intf} | grep \"inet \" | awk '{{print $2}}'", shell=True).decode("utf-8").strip() + self._ipv6 = subprocess.check_output( + f"ip a show {self._int_intf} | grep inet6 | awk '{{print $2}}'", shell=True).decode("utf-8").strip() + self._brd = subprocess.check_output( + f"ip a show {self._int_intf} | grep \"inet \" | awk '{{print $4}}'", shell=True).decode("utf-8").strip() + + def _ci_post_network_create(self): + """ Restore network connection in CI environment """ + LOGGER.info("post cr") + util.run_command(f"ip address del {self._ipv4} dev {self._int_intf}") + util.run_command(f"ip -6 address del {self._ipv6} dev {self._int_intf}") + util.run_command(f"ip link set dev {self._int_intf} address 00:B0:D0:63:C2:26") + util.run_command(f"ip addr flush dev {self._int_intf}") + util.run_command(f"ip addr add dev {self._int_intf} 0.0.0.0") + util.run_command(f"ip addr add dev {INTERNET_BRIDGE} {self._ipv4} broadcast {self._brd}") + util.run_command(f"ip -6 addr add {self._ipv6} dev {INTERNET_BRIDGE} ") + util.run_command(f"systemd-resolve --interface {INTERNET_BRIDGE} --set-dns 8.8.8.8") + util.run_command(f"ip link set dev {INTERNET_BRIDGE} up") + util.run_command(f"dhclient {INTERNET_BRIDGE}") + util.run_command(f"ip route del default via 10.1.0.1") + util.run_command(f"ip route add default via {self._gateway} src {self._ipv4[:-3]} metric 100 dev {INTERNET_BRIDGE}") + + def _create_private_net(self): + client = docker.from_env() + try: + network = client.networks.get(PRIVATE_DOCKER_NET) + network.remove() + except docker.errors.NotFound: + pass + + # TODO: These should be made into variables + ipam_pool = docker.types.IPAMPool( + subnet='100.100.0.0/16', + iprange='100.100.100.0/24' + ) + + ipam_config = docker.types.IPAMConfig( + pool_configs=[ipam_pool] + ) + + client.networks.create( + PRIVATE_DOCKER_NET, + ipam=ipam_config, + internal=True, + check_duplicate=True, + driver="macvlan" + ) + + def create_net(self): + LOGGER.info("Creating baseline network") + + if not util.interface_exists(self._int_intf) or not util.interface_exists(self._dev_intf): + LOGGER.error("Configured interfaces are not ready for use. " + + "Ensure both interfaces are connected.") + sys.exit(1) + + if self._single_intf: + self._ci_pre_network_create() + + # Create data plane + util.run_command("ovs-vsctl add-br " + DEVICE_BRIDGE) + + # Create control plane + util.run_command("ovs-vsctl add-br " + INTERNET_BRIDGE) + + # Add external interfaces to data and control plane + util.run_command("ovs-vsctl add-port " + + DEVICE_BRIDGE + " " + self._dev_intf) + util.run_command("ovs-vsctl add-port " + + INTERNET_BRIDGE + " " + self._int_intf) + + # Enable forwarding of eapol packets + util.run_command("ovs-ofctl add-flow " + DEVICE_BRIDGE + + " 'table=0, dl_dst=01:80:c2:00:00:03, actions=flood'") + + # Remove IP from internet adapter + util.run_command("ifconfig " + self._int_intf + " 0.0.0.0") + + # Set ports up + util.run_command("ip link set dev " + DEVICE_BRIDGE + " up") + util.run_command("ip link set dev " + INTERNET_BRIDGE + " up") + + if self._single_intf: + self._ci_post_network_create() + + self._create_private_net() + + self.listener = Listener(self._dev_intf) + self.listener.start_listener() + + def load_network_modules(self): + """Load network modules from module_config.json.""" + LOGGER.debug("Loading network modules from /" + NETWORK_MODULES_DIR) + + loaded_modules = "Loaded the following network modules: " + net_modules_dir = os.path.join(self._path, NETWORK_MODULES_DIR) + + for module_dir in os.listdir(net_modules_dir): + + net_module = NetworkModule() + + # Load basic module information + + net_module_json = json.load(open(os.path.join( + self._path, net_modules_dir, module_dir, NETWORK_MODULE_METADATA), encoding='UTF-8')) + + net_module.name = net_module_json['config']['meta']['name'] + net_module.display_name = net_module_json['config']['meta']['display_name'] + net_module.description = net_module_json['config']['meta']['description'] + net_module.dir = os.path.join( + self._path, net_modules_dir, module_dir) + net_module.dir_name = module_dir + net_module.build_file = module_dir + ".Dockerfile" + net_module.container_name = "tr-ct-" + net_module.dir_name + net_module.image_name = "test-run/" + net_module.dir_name + + # Attach folder mounts to network module + if "docker" in net_module_json['config']: + if "mounts" in net_module_json['config']['docker']: + for mount_point in net_module_json['config']['docker']['mounts']: + net_module.mounts.append(Mount( + target=mount_point['target'], + source=os.path.join( + os.getcwd(), mount_point['source']), + type='bind' + )) + + # Determine if this is a container or just an image/template + if "enable_container" in net_module_json['config']['docker']: + net_module.enable_container = net_module_json['config']['docker']['enable_container'] + + # Load network service networking configuration + if net_module.enable_container: + + net_module.net_config.enable_wan = net_module_json['config']['network']['enable_wan'] + net_module.net_config.ip_index = net_module_json['config']['network']['ip_index'] + + net_module.net_config.host = False if not "host" in net_module_json[ + 'config']['network'] else net_module_json['config']['network']['host'] + + net_module.net_config.ipv4_address = self.network_config.ipv4_network[ + net_module.net_config.ip_index] + net_module.net_config.ipv4_network = self.network_config.ipv4_network + + net_module.net_config.ipv6_address = self.network_config.ipv6_network[ + net_module.net_config.ip_index] + net_module.net_config.ipv6_network = self.network_config.ipv6_network + + loaded_modules += net_module.dir_name + " " + + self._net_modules.append(net_module) + + LOGGER.info(loaded_modules) + + def build_network_modules(self): + LOGGER.info("Building network modules...") + for net_module in self._net_modules: + self._build_module(net_module) + + def _build_module(self, net_module): + LOGGER.debug("Building network module " + net_module.dir_name) + client = docker.from_env() + client.images.build( + dockerfile=os.path.join(net_module.dir, net_module.build_file), + path=self._path, + forcerm=True, + tag="test-run/" + net_module.dir_name + ) + + def _get_network_module(self, name): + for net_module in self._net_modules: + if name == net_module.display_name: + return net_module + return None + + # Start the OVS network module + # This should always be called before loading all + # other modules to allow for a properly setup base + # network + def _start_ovs_module(self): + self._start_network_service(self._get_network_module("OVS")) + + def _start_network_service(self, net_module): + + LOGGER.debug("Starting net service " + net_module.display_name) + network = "host" if net_module.net_config.host else PRIVATE_DOCKER_NET + LOGGER.debug(f"""Network: {network}, image name: {net_module.image_name}, + container name: {net_module.container_name}""") + try: + client = docker.from_env() + net_module.container = client.containers.run( + net_module.image_name, + auto_remove=True, + cap_add=["NET_ADMIN"], + name=net_module.container_name, + hostname=net_module.container_name, + network=PRIVATE_DOCKER_NET, + privileged=True, + detach=True, + mounts=net_module.mounts, + environment={"HOST_USER": getpass.getuser()} + ) + except docker.errors.ContainerError as error: + LOGGER.error("Container run error") + LOGGER.error(error) + + if network != "host": + self._attach_service_to_network(net_module) + + def _stop_service_module(self, net_module, kill=False): + LOGGER.debug("Stopping Service container " + net_module.container_name) + try: + container = self._get_service_container(net_module) + if container is not None: + if kill: + LOGGER.debug("Killing container:" + + net_module.container_name) + container.kill() + else: + LOGGER.debug("Stopping container:" + + net_module.container_name) + container.stop() + LOGGER.debug("Container stopped:" + net_module.container_name) + except Exception as error: + LOGGER.error("Container stop error") + LOGGER.error(error) + + def _get_service_container(self, net_module): + LOGGER.debug("Resolving service container: " + + net_module.container_name) + container = None + try: + client = docker.from_env() + container = client.containers.get(net_module.container_name) + except docker.errors.NotFound: + LOGGER.debug("Container " + + net_module.container_name + " not found") + except Exception as e: + LOGGER.error("Failed to resolve container") + LOGGER.error(e) + return container + + def stop_networking_services(self, kill=False): + LOGGER.info("Stopping network services") + for net_module in self._net_modules: + # Network modules may just be Docker images, so we do not want to stop them + if not net_module.enable_container: + continue + self._stop_service_module(net_module, kill) + + def start_network_services(self): + LOGGER.info("Starting network services") + + os.makedirs(os.path.join(os.getcwd(), RUNTIME_DIR), exist_ok=True) + + for net_module in self._net_modules: + + # TODO: There should be a better way of doing this + # Do not try starting OVS module again, as it should already be running + if "OVS" != net_module.display_name: + + # Network modules may just be Docker images, so we do not want to start them as containers + if not net_module.enable_container: + continue + + self._start_network_service(net_module) + + LOGGER.info("All network services are running") + self._check_network_services() + + # TODO: Let's move this into a separate script? It does not look great + def _attach_service_to_network(self, net_module): + LOGGER.debug("Attaching net service " + + net_module.display_name + " to device bridge") + + # Device bridge interface example: tr-di-dhcp (Test Run Device Interface for DHCP container) + bridge_intf = DEVICE_BRIDGE + "i-" + net_module.dir_name + + # Container interface example: tr-cti-dhcp (Test Run Container Interface for DHCP container) + container_intf = "tr-cti-" + net_module.dir_name + + # Container network namespace name + container_net_ns = "tr-ctns-" + net_module.dir_name + + # Create interface pair + util.run_command("ip link add " + bridge_intf + + " type veth peer name " + container_intf) + + # Add bridge interface to device bridge + util.run_command("ovs-vsctl add-port " + + DEVICE_BRIDGE + " " + bridge_intf) + + # Get PID for running container + # TODO: Some error checking around missing PIDs might be required + container_pid = util.run_command( + "docker inspect -f {{.State.Pid}} " + net_module.container_name)[0] + + # Create symlink for container network namespace + util.run_command("ln -sf /proc/" + container_pid + + "/ns/net /var/run/netns/" + container_net_ns) + + # Attach container interface to container network namespace + util.run_command("ip link set " + container_intf + + " netns " + container_net_ns) + + # Rename container interface name to veth0 + util.run_command("ip netns exec " + container_net_ns + + " ip link set dev " + container_intf + " name veth0") + + # Set MAC address of container interface + util.run_command("ip netns exec " + container_net_ns + " ip link set dev veth0 address 9a:02:57:1e:8f:" + str(net_module.net_config.ip_index)) + + # Set IP address of container interface + util.run_command("ip netns exec " + container_net_ns + " ip addr add " + + net_module.net_config.get_ipv4_addr_with_prefix() + " dev veth0") + + util.run_command("ip netns exec " + container_net_ns + " ip addr add " + + net_module.net_config.get_ipv6_addr_with_prefix() + " dev veth0") + + # Set interfaces up + util.run_command("ip link set dev " + bridge_intf + " up") + util.run_command("ip netns exec " + container_net_ns + + " ip link set dev veth0 up") + + if net_module.net_config.enable_wan: + LOGGER.debug("Attaching net service " + + net_module.display_name + " to internet bridge") + + # Internet bridge interface example: tr-ci-dhcp (Test Run Control (Internet) Interface for DHCP container) + bridge_intf = INTERNET_BRIDGE + "i-" + net_module.dir_name + + # Container interface example: tr-cti-dhcp (Test Run Container Interface for DHCP container) + container_intf = "tr-cti-" + net_module.dir_name + + # Create interface pair + util.run_command("ip link add " + bridge_intf + + " type veth peer name " + container_intf) + + # Attach bridge interface to internet bridge + util.run_command("ovs-vsctl add-port " + + INTERNET_BRIDGE + " " + bridge_intf) + + # Attach container interface to container network namespace + util.run_command("ip link set " + container_intf + + " netns " + container_net_ns) + + # Rename container interface name to eth1 + util.run_command("ip netns exec " + container_net_ns + + " ip link set dev " + container_intf + " name eth1") + + # Set MAC address of container interface + util.run_command("ip netns exec " + container_net_ns + + " ip link set dev eth1 address 9a:02:57:1e:8f:0" + str(net_module.net_config.ip_index)) + + # Set interfaces up + util.run_command("ip link set dev " + bridge_intf + " up") + util.run_command("ip netns exec " + + container_net_ns + " ip link set dev eth1 up") + + def restore_net(self): + + LOGGER.info("Clearing baseline network") + + if hasattr(self, 'listener') and self.listener is not None and self.listener.is_running(): + self.listener.stop_listener() + + client = docker.from_env() + + # Stop all network containers if still running + for net_module in self._net_modules: + try: + container = client.containers.get( + "tr-ct-" + net_module.dir_name) + container.kill() + except Exception: + continue + + # Delete data plane + util.run_command("ovs-vsctl --if-exists del-br tr-d") + + # Delete control plane + util.run_command("ovs-vsctl --if-exists del-br tr-c") + + # Restart internet interface + if util.interface_exists(self._int_intf): + util.run_command("ip link set " + self._int_intf + " down") + util.run_command("ip link set " + self._int_intf + " up") + + LOGGER.info("Network is restored") + + +class NetworkModule: + + def __init__(self): + self.name = None + self.display_name = None + self.description = None + + self.container = None + self.container_name = None + self.image_name = None + + # Absolute path + self.dir = None + self.dir_name = None + self.build_file = None + self.mounts = [] + + self.enable_container = True + + self.net_config = NetworkModuleNetConfig() + +# The networking configuration for a network module + + +class NetworkModuleNetConfig: + + def __init__(self): + + self.enable_wan = False + + self.ip_index = 0 + self.ipv4_address = None + self.ipv4_network = None + self.ipv6_address = None + self.ipv6_network = None + + self.host = False + + def get_ipv4_addr_with_prefix(self): + return format(self.ipv4_address) + "/" + str(self.ipv4_network.prefixlen) + + def get_ipv6_addr_with_prefix(self): + return format(self.ipv6_address) + "/" + str(self.ipv6_network.prefixlen) + +# Represents the current configuration of the network for the device bridge + +class NetworkConfig: + + # TODO: Let's get this from a configuration file + def __init__(self): + self.ipv4_network = ipaddress.ip_network('10.10.10.0/24') + self.ipv6_network = ipaddress.ip_network('fd10:77be:4186::/64') \ No newline at end of file diff --git a/net_orc/python/src/network_runner.py b/net_orc/python/src/network_runner.py index 3fe9e8a41..0b7573fb3 100644 --- a/net_orc/python/src/network_runner.py +++ b/net_orc/python/src/network_runner.py @@ -11,58 +11,59 @@ import argparse import signal import sys -import time - import logger - from network_orchestrator import NetworkOrchestrator -LOGGER = logger.get_logger('net_runner') +LOGGER = logger.get_logger("net_runner") class NetworkRunner: - def __init__(self, config_file=None, validate=True, async_monitor=False): - self._monitor_thread = None - self._register_exits() - self.net_orc = NetworkOrchestrator(config_file=config_file,validate=validate,async_monitor=async_monitor) + """Entry point to the Network Orchestrator.""" + + def __init__(self, config_file=None, validate=True, async_monitor=False): + self._monitor_thread = None + self._register_exits() + self.net_orc = NetworkOrchestrator(config_file=config_file, + validate=validate, + async_monitor=async_monitor) - def _register_exits(self): - signal.signal(signal.SIGINT, self._exit_handler) - signal.signal(signal.SIGTERM, self._exit_handler) - signal.signal(signal.SIGABRT, self._exit_handler) - signal.signal(signal.SIGQUIT, self._exit_handler) + def _register_exits(self): + signal.signal(signal.SIGINT, self._exit_handler) + signal.signal(signal.SIGTERM, self._exit_handler) + signal.signal(signal.SIGABRT, self._exit_handler) + signal.signal(signal.SIGQUIT, self._exit_handler) - def _exit_handler(self, signum, arg): # pylint: disable=unused-argument - LOGGER.debug("Exit signal received: " + str(signum)) - if signum in (2, signal.SIGTERM): - LOGGER.info("Exit signal received.") - # Kill all container services quickly - # If we're here, we want everything to stop immediately - # and don't care about a gracefully shutdown - self.stop(True) - sys.exit(1) + def _exit_handler(self, signum, arg): # pylint: disable=unused-argument + LOGGER.debug("Exit signal received: " + str(signum)) + if signum in (2, signal.SIGTERM): + LOGGER.info("Exit signal received.") + # Kill all container services quickly + # If we're here, we want everything to stop immediately + # and don't care about a graceful shutdown + self.stop(True) + sys.exit(1) - def stop(self, kill=False): - self.net_orc.stop(kill) + def stop(self, kill=False): + self.net_orc.stop(kill) - def start(self): - self.net_orc.start() + def start(self): + self.net_orc.start() -def parse_args(argv): - parser = argparse.ArgumentParser(description="Test Run Help", - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument("--no-validate", action="store_true", - help="Turn off the validation of the network after network boot") - parser.add_argument("-f", "--config-file", default=None, - help="Define the configuration file for the Network Orchestrator") - parser.add_argument("-d", "--daemon", action="store_true", - help="Run the network monitor process in the background as a daemon thread") +def parse_args(): + parser = argparse.ArgumentParser(description="Test Run Help", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument("--no-validate", action="store_true", + help="Turn off the validation of the network after network boot") + parser.add_argument("-f", "--config-file", default=None, + help="Define the configuration file for the Network Orchestrator") + parser.add_argument("-d", "--daemon", action="store_true", + help="Run the network monitor process in the background as a daemon thread") - args, unknown = parser.parse_known_args() - return args + args = parser.parse_known_args()[0] + return args if __name__ == "__main__": - args=parse_args(sys.argv) - runner = NetworkRunner(config_file=args.config_file, - validate=not args.no_validate, - async_monitor=args.daemon) - runner.start() \ No newline at end of file + arguments = parse_args() + runner = NetworkRunner(config_file=arguments.config_file, + validate=not arguments.no_validate, + async_monitor=arguments.daemon) + runner.start() diff --git a/net_orc/python/src/util.py b/net_orc/python/src/util.py index a5cfe205f..e4a4bd5fd 100644 --- a/net_orc/python/src/util.py +++ b/net_orc/python/src/util.py @@ -1,30 +1,37 @@ +"""Provides basic utilities for the network orchestrator.""" import subprocess import shlex import logger import netifaces +LOGGER = logger.get_logger("util") -# Runs a process at the os level -# By default, returns the standard output and error output -# If the caller sets optional output parameter to False, -# will only return a boolean result indicating if it was -# succesful in running the command. Failure is indicated -# by any return code from the process other than zero. def run_command(cmd, output=True): - success = False - LOGGER = logger.get_logger('util') - process = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE) - stdout, stderr = process.communicate() - if process.returncode !=0 and output: - err_msg = "%s. Code: %s" % (stderr.strip(), process.returncode) - LOGGER.error("Command Failed: " + cmd) - LOGGER.error("Error: " + err_msg) - else: - success = True - if output: - return stdout.strip().decode('utf-8'), stderr - else: - return success + """Runs a process at the os level + By default, returns the standard output and error output + If the caller sets optional output parameter to False, + will only return a boolean result indicating if it was + succesful in running the command. Failure is indicated + by any return code from the process other than zero.""" + + success = False + process = subprocess.Popen(shlex.split(cmd), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + stdout, stderr = process.communicate() + if process.returncode !=0 and output: + err_msg = "%s. Code: %s" % (stderr.strip(), process.returncode) + LOGGER.error("Command Failed: " + cmd) + LOGGER.error("Error: " + err_msg) + else: + success = True + if output: + return stdout.strip().decode("utf-8"), stderr + else: + return success def interface_exists(interface): - return interface in netifaces.interfaces() \ No newline at end of file + return interface in netifaces.interfaces() + +def prettify(mac_string): + return ':'.join('%02x' % ord(b) for b in mac_string) diff --git a/test_orc/python/src/test_orchestrator.py b/test_orc/python/src/test_orchestrator.py index ee5cc5b45..f9f906af5 100644 --- a/test_orc/python/src/test_orchestrator.py +++ b/test_orc/python/src/test_orchestrator.py @@ -44,7 +44,7 @@ def stop(self): def run_test_modules(self, device): """Iterates through each test module and starts the container.""" - LOGGER.info("Running test modules...") + LOGGER.info(f"Running test modules on device with mac addr {device.mac_addr}") for module in self._test_modules: self._run_test_module(module, device) LOGGER.info("All tests complete") From be829a3457b37484563441a064af989eb99d65fe Mon Sep 17 00:00:00 2001 From: J Boddey Date: Tue, 16 May 2023 20:13:45 +0100 Subject: [PATCH 11/48] Build dependencies first (#21) * Build dependencies first * Remove debug message * Add depend on option to test modules * Re-add single interface option * Import subprocess --------- Co-authored-by: jhughesbiot --- .../modules/dhcp-1/conf/module_config.json | 1 + .../modules/dhcp-2/conf/module_config.json | 1 + .../modules/dns/conf/module_config.json | 1 + .../modules/gateway/conf/module_config.json | 1 + .../modules/ntp/conf/module_config.json | 1 + .../modules/ovs/conf/module_config.json | 1 + .../modules/radius/conf/module_config.json | 1 + .../modules/template/conf/module_config.json | 1 + net_orc/python/src/network_orchestrator.py | 1307 ++++++++--------- .../modules/baseline/conf/module_config.json | 1 + test_orc/modules/dns/conf/module_config.json | 1 + test_orc/python/src/test_orchestrator.py | 89 +- 12 files changed, 682 insertions(+), 724 deletions(-) diff --git a/net_orc/network/modules/dhcp-1/conf/module_config.json b/net_orc/network/modules/dhcp-1/conf/module_config.json index 56d9aa271..4a41eee3f 100644 --- a/net_orc/network/modules/dhcp-1/conf/module_config.json +++ b/net_orc/network/modules/dhcp-1/conf/module_config.json @@ -14,6 +14,7 @@ "port": 5001 }, "docker": { + "depends_on": "base", "mounts": [ { "source": "runtime/network", diff --git a/net_orc/network/modules/dhcp-2/conf/module_config.json b/net_orc/network/modules/dhcp-2/conf/module_config.json index 2a978ca8c..bd719604d 100644 --- a/net_orc/network/modules/dhcp-2/conf/module_config.json +++ b/net_orc/network/modules/dhcp-2/conf/module_config.json @@ -14,6 +14,7 @@ "port": 5001 }, "docker": { + "depends_on": "base", "mounts": [ { "source": "runtime/network", diff --git a/net_orc/network/modules/dns/conf/module_config.json b/net_orc/network/modules/dns/conf/module_config.json index 73f890d28..cad1c02ef 100644 --- a/net_orc/network/modules/dns/conf/module_config.json +++ b/net_orc/network/modules/dns/conf/module_config.json @@ -11,6 +11,7 @@ "ip_index": 4 }, "docker": { + "depends_on": "base", "mounts": [ { "source": "runtime/network", diff --git a/net_orc/network/modules/gateway/conf/module_config.json b/net_orc/network/modules/gateway/conf/module_config.json index 35bd34392..5b39339ce 100644 --- a/net_orc/network/modules/gateway/conf/module_config.json +++ b/net_orc/network/modules/gateway/conf/module_config.json @@ -11,6 +11,7 @@ "ip_index": 1 }, "docker": { + "depends_on": "base", "mounts": [ { "source": "runtime/network", diff --git a/net_orc/network/modules/ntp/conf/module_config.json b/net_orc/network/modules/ntp/conf/module_config.json index 781521263..e3dbdc8f1 100644 --- a/net_orc/network/modules/ntp/conf/module_config.json +++ b/net_orc/network/modules/ntp/conf/module_config.json @@ -11,6 +11,7 @@ "ip_index": 5 }, "docker": { + "depends_on": "base", "mounts": [ { "source": "runtime/network", diff --git a/net_orc/network/modules/ovs/conf/module_config.json b/net_orc/network/modules/ovs/conf/module_config.json index f6a1eff50..8a440d0ae 100644 --- a/net_orc/network/modules/ovs/conf/module_config.json +++ b/net_orc/network/modules/ovs/conf/module_config.json @@ -12,6 +12,7 @@ "host": true }, "docker": { + "depends_on": "base", "mounts": [ { "source": "runtime/network", diff --git a/net_orc/network/modules/radius/conf/module_config.json b/net_orc/network/modules/radius/conf/module_config.json index 153d951df..ce8fbd52f 100644 --- a/net_orc/network/modules/radius/conf/module_config.json +++ b/net_orc/network/modules/radius/conf/module_config.json @@ -11,6 +11,7 @@ "ip_index": 7 }, "docker": { + "depends_on": "base", "mounts": [ { "source": "runtime/network", diff --git a/net_orc/network/modules/template/conf/module_config.json b/net_orc/network/modules/template/conf/module_config.json index bcea3808e..c767c9ad6 100644 --- a/net_orc/network/modules/template/conf/module_config.json +++ b/net_orc/network/modules/template/conf/module_config.json @@ -15,6 +15,7 @@ }, "docker": { "enable_container": false, + "depends_on": "base", "mounts": [ { "source": "runtime/network", diff --git a/net_orc/python/src/network_orchestrator.py b/net_orc/python/src/network_orchestrator.py index 690e974c2..6930f22be 100644 --- a/net_orc/python/src/network_orchestrator.py +++ b/net_orc/python/src/network_orchestrator.py @@ -1,687 +1,620 @@ -#!/usr/bin/env python3 - -import binascii -import getpass -import ipaddress -import json -import os -from scapy.all import BOOTP -import subprocess -import sys -import time -import threading -from threading import Timer - -import docker -from docker.types import Mount - -import logger -import util -from listener import Listener -from network_device import NetworkDevice -from network_event import NetworkEvent -from network_validator import NetworkValidator - -LOGGER = logger.get_logger("net_orc") -CONFIG_FILE = "conf/system.json" -EXAMPLE_CONFIG_FILE = "conf/system.json.example" -RUNTIME_DIR = "runtime/network" -NETWORK_MODULES_DIR = "network/modules" -NETWORK_MODULE_METADATA = "conf/module_config.json" -DEVICE_BRIDGE = "tr-d" -INTERNET_BRIDGE = "tr-c" -PRIVATE_DOCKER_NET = "tr-private-net" -CONTAINER_NAME = "network_orchestrator" - -RUNTIME_KEY = "runtime" -MONITOR_PERIOD_KEY = "monitor_period" -STARTUP_TIMEOUT_KEY = "startup_timeout" -DEFAULT_STARTUP_TIMEOUT = 60 -DEFAULT_RUNTIME = 1200 -DEFAULT_MONITOR_PERIOD = 300 - -RUNTIME = 1500 - - -class NetworkOrchestrator: - """Manage and controls a virtual testing network.""" - - def __init__(self, config_file=CONFIG_FILE, validate=True, async_monitor=False, single_intf = False): - - self._runtime = DEFAULT_RUNTIME - self._startup_timeout = DEFAULT_STARTUP_TIMEOUT - self._monitor_period = DEFAULT_MONITOR_PERIOD - - self._int_intf = None - self._dev_intf = None - self._single_intf = single_intf - - self.listener = None - - self._net_modules = [] - - self.validate = validate - - self.async_monitor = async_monitor - - self._path = os.path.dirname(os.path.dirname( - os.path.dirname(os.path.realpath(__file__)))) - - self.validator = NetworkValidator() - - self.network_config = NetworkConfig() - - self.load_config(config_file) - - def start(self): - """Start the network orchestrator.""" - - LOGGER.info("Starting Network Orchestrator") - # Get all components ready - self.load_network_modules() - - # Restore the network first if required - self.stop(kill=True) - - self.start_network() - - if self.async_monitor: - # Run the monitor method asynchronously to keep this method non-blocking - self._monitor_thread = threading.Thread( - target=self.monitor_network) - self._monitor_thread.daemon = True - self._monitor_thread.start() - else: - self.monitor_network() - - def start_network(self): - """Start the virtual testing network.""" - LOGGER.info("Starting network") - - self.build_network_modules() - self.create_net() - self.start_network_services() - - if self.validate: - # Start the validator after network is ready - self.validator.start() - - # Get network ready (via Network orchestrator) - LOGGER.info("Network is ready.") - - # Start the listener - self.listener = Listener(self._dev_intf) - self.listener.start_listener() - - def stop(self, kill=False): - """Stop the network orchestrator.""" - self.stop_validator(kill=kill) - self.stop_network(kill=kill) - - def stop_validator(self, kill=False): - """Stop the network validator.""" - # Shutdown the validator - self.validator.stop(kill=kill) - - def stop_network(self, kill=False): - """Stop the virtual testing network.""" - # Shutdown network - self.stop_networking_services(kill=kill) - self.restore_net() - - def monitor_network(self): - self.listener.register_callback(self._device_discovered, [ - NetworkEvent.DEVICE_DISCOVERED]) - self.listener.register_callback( - self._dhcp_lease_ack, [NetworkEvent.DHCP_LEASE_ACK]) - # TODO: This time should be configurable (How long to hold before exiting, this could be infinite too) - time.sleep(self._runtime) - - self.stop() - - def _device_discovered(self, mac_addr): - - LOGGER.debug(f'Discovered device {mac_addr}. Waiting for device to obtain IP') - device = self._get_device(mac_addr=mac_addr) - - timeout = time.time() + self._startup_timeout - - while time.time() < timeout: - if device.ip_addr is None: - time.sleep(3) - else: - break - - if device.ip_addr is None: - LOGGER.info(f"Timed out whilst waiting for {mac_addr} to obtain an IP address") - return - - LOGGER.info(f"Device with mac addr {device.mac_addr} has obtained IP address {device.ip_addr}") - - self._start_device_monitor(device) - - def _dhcp_lease_ack(self, packet): - mac_addr = packet[BOOTP].chaddr.hex(":")[0:17] - device = self._get_device(mac_addr=mac_addr) - device.ip_addr = packet[BOOTP].yiaddr - - def _start_device_monitor(self, device): - """Start a timer until the steady state has been reached and - callback the steady state method for this device.""" - LOGGER.info(f"Monitoring device with mac addr {device.mac_addr} for {str(self._monitor_period)} seconds") - timer = Timer(self._monitor_period, - self.listener.call_callback, - args=(NetworkEvent.DEVICE_STABLE, device.mac_addr,)) - timer.start() - - def _get_device(self, mac_addr): - for device in self._devices: - if device.mac_addr == mac_addr: - return device - device = NetworkDevice(mac_addr=mac_addr) - self._devices.append(device) - return device - - def load_config(self, config_file=None): - if config_file is None: - # If not defined, use relative pathing to local file - self._config_file = os.path.join(self._path, CONFIG_FILE) - else: - # If defined, use as provided - self._config_file = config_file - - if not os.path.isfile(self._config_file): - LOGGER.error("Configuration file is not present at " + config_file) - LOGGER.info("An example is present in " + EXAMPLE_CONFIG_FILE) - sys.exit(1) - - LOGGER.info("Loading config file: " + - os.path.abspath(self._config_file)) - with open(self._config_file, encoding='UTF-8') as config_json_file: - config_json = json.load(config_json_file) - self.import_config(config_json) - - def import_config(self, json_config): - self._int_intf = json_config['network']['internet_intf'] - self._dev_intf = json_config['network']['device_intf'] - if RUNTIME_KEY in json_config: - self._runtime = json_config[RUNTIME_KEY] - if STARTUP_TIMEOUT_KEY in json_config: - self._startup_timeout = json_config[STARTUP_TIMEOUT_KEY] - if MONITOR_PERIOD_KEY in json_config: - self._monitor_period = json_config[MONITOR_PERIOD_KEY] - - def _check_network_services(self): - LOGGER.debug("Checking network modules...") - for net_module in self._net_modules: - if net_module.enable_container: - LOGGER.debug("Checking network module: " + - net_module.display_name) - success = self._ping(net_module) - if success: - LOGGER.debug(net_module.display_name + - " responded succesfully: " + str(success)) - else: - LOGGER.error(net_module.display_name + - " failed to respond to ping") - - def _ping(self, net_module): - host = net_module.net_config.ipv4_address - namespace = "tr-ctns-" + net_module.dir_name - cmd = "ip netns exec " + namespace + " ping -c 1 " + str(host) - success = util.run_command(cmd, output=False) - return success - - def _ci_pre_network_create(self): - """ Stores network properties to restore network after - network creation and flushes internet interface - """ - - self._ethmac = subprocess.check_output( - f"cat /sys/class/net/{self._int_intf}/address", shell=True).decode("utf-8").strip() - self._gateway = subprocess.check_output( - "ip route | head -n 1 | awk '{print $3}'", shell=True).decode("utf-8").strip() - self._ipv4 = subprocess.check_output( - f"ip a show {self._int_intf} | grep \"inet \" | awk '{{print $2}}'", shell=True).decode("utf-8").strip() - self._ipv6 = subprocess.check_output( - f"ip a show {self._int_intf} | grep inet6 | awk '{{print $2}}'", shell=True).decode("utf-8").strip() - self._brd = subprocess.check_output( - f"ip a show {self._int_intf} | grep \"inet \" | awk '{{print $4}}'", shell=True).decode("utf-8").strip() - - def _ci_post_network_create(self): - """ Restore network connection in CI environment """ - LOGGER.info("post cr") - util.run_command(f"ip address del {self._ipv4} dev {self._int_intf}") - util.run_command(f"ip -6 address del {self._ipv6} dev {self._int_intf}") - util.run_command(f"ip link set dev {self._int_intf} address 00:B0:D0:63:C2:26") - util.run_command(f"ip addr flush dev {self._int_intf}") - util.run_command(f"ip addr add dev {self._int_intf} 0.0.0.0") - util.run_command(f"ip addr add dev {INTERNET_BRIDGE} {self._ipv4} broadcast {self._brd}") - util.run_command(f"ip -6 addr add {self._ipv6} dev {INTERNET_BRIDGE} ") - util.run_command(f"systemd-resolve --interface {INTERNET_BRIDGE} --set-dns 8.8.8.8") - util.run_command(f"ip link set dev {INTERNET_BRIDGE} up") - util.run_command(f"dhclient {INTERNET_BRIDGE}") - util.run_command(f"ip route del default via 10.1.0.1") - util.run_command(f"ip route add default via {self._gateway} src {self._ipv4[:-3]} metric 100 dev {INTERNET_BRIDGE}") - - def _create_private_net(self): - client = docker.from_env() - try: - network = client.networks.get(PRIVATE_DOCKER_NET) - network.remove() - except docker.errors.NotFound: - pass - - # TODO: These should be made into variables - ipam_pool = docker.types.IPAMPool( - subnet='100.100.0.0/16', - iprange='100.100.100.0/24' - ) - - ipam_config = docker.types.IPAMConfig( - pool_configs=[ipam_pool] - ) - - client.networks.create( - PRIVATE_DOCKER_NET, - ipam=ipam_config, - internal=True, - check_duplicate=True, - driver="macvlan" - ) - - def create_net(self): - LOGGER.info("Creating baseline network") - - if not util.interface_exists(self._int_intf) or not util.interface_exists(self._dev_intf): - LOGGER.error("Configured interfaces are not ready for use. " + - "Ensure both interfaces are connected.") - sys.exit(1) - - if self._single_intf: - self._ci_pre_network_create() - - # Create data plane - util.run_command("ovs-vsctl add-br " + DEVICE_BRIDGE) - - # Create control plane - util.run_command("ovs-vsctl add-br " + INTERNET_BRIDGE) - - # Add external interfaces to data and control plane - util.run_command("ovs-vsctl add-port " + - DEVICE_BRIDGE + " " + self._dev_intf) - util.run_command("ovs-vsctl add-port " + - INTERNET_BRIDGE + " " + self._int_intf) - - # Enable forwarding of eapol packets - util.run_command("ovs-ofctl add-flow " + DEVICE_BRIDGE + - " 'table=0, dl_dst=01:80:c2:00:00:03, actions=flood'") - - # Remove IP from internet adapter - util.run_command("ifconfig " + self._int_intf + " 0.0.0.0") - - # Set ports up - util.run_command("ip link set dev " + DEVICE_BRIDGE + " up") - util.run_command("ip link set dev " + INTERNET_BRIDGE + " up") - - if self._single_intf: - self._ci_post_network_create() - - self._create_private_net() - - self.listener = Listener(self._dev_intf) - self.listener.start_listener() - - def load_network_modules(self): - """Load network modules from module_config.json.""" - LOGGER.debug("Loading network modules from /" + NETWORK_MODULES_DIR) - - loaded_modules = "Loaded the following network modules: " - net_modules_dir = os.path.join(self._path, NETWORK_MODULES_DIR) - - for module_dir in os.listdir(net_modules_dir): - - net_module = NetworkModule() - - # Load basic module information - - net_module_json = json.load(open(os.path.join( - self._path, net_modules_dir, module_dir, NETWORK_MODULE_METADATA), encoding='UTF-8')) - - net_module.name = net_module_json['config']['meta']['name'] - net_module.display_name = net_module_json['config']['meta']['display_name'] - net_module.description = net_module_json['config']['meta']['description'] - net_module.dir = os.path.join( - self._path, net_modules_dir, module_dir) - net_module.dir_name = module_dir - net_module.build_file = module_dir + ".Dockerfile" - net_module.container_name = "tr-ct-" + net_module.dir_name - net_module.image_name = "test-run/" + net_module.dir_name - - # Attach folder mounts to network module - if "docker" in net_module_json['config']: - if "mounts" in net_module_json['config']['docker']: - for mount_point in net_module_json['config']['docker']['mounts']: - net_module.mounts.append(Mount( - target=mount_point['target'], - source=os.path.join( - os.getcwd(), mount_point['source']), - type='bind' - )) - - # Determine if this is a container or just an image/template - if "enable_container" in net_module_json['config']['docker']: - net_module.enable_container = net_module_json['config']['docker']['enable_container'] - - # Load network service networking configuration - if net_module.enable_container: - - net_module.net_config.enable_wan = net_module_json['config']['network']['enable_wan'] - net_module.net_config.ip_index = net_module_json['config']['network']['ip_index'] - - net_module.net_config.host = False if not "host" in net_module_json[ - 'config']['network'] else net_module_json['config']['network']['host'] - - net_module.net_config.ipv4_address = self.network_config.ipv4_network[ - net_module.net_config.ip_index] - net_module.net_config.ipv4_network = self.network_config.ipv4_network - - net_module.net_config.ipv6_address = self.network_config.ipv6_network[ - net_module.net_config.ip_index] - net_module.net_config.ipv6_network = self.network_config.ipv6_network - - loaded_modules += net_module.dir_name + " " - - self._net_modules.append(net_module) - - LOGGER.info(loaded_modules) - - def build_network_modules(self): - LOGGER.info("Building network modules...") - for net_module in self._net_modules: - self._build_module(net_module) - - def _build_module(self, net_module): - LOGGER.debug("Building network module " + net_module.dir_name) - client = docker.from_env() - client.images.build( - dockerfile=os.path.join(net_module.dir, net_module.build_file), - path=self._path, - forcerm=True, - tag="test-run/" + net_module.dir_name - ) - - def _get_network_module(self, name): - for net_module in self._net_modules: - if name == net_module.display_name: - return net_module - return None - - # Start the OVS network module - # This should always be called before loading all - # other modules to allow for a properly setup base - # network - def _start_ovs_module(self): - self._start_network_service(self._get_network_module("OVS")) - - def _start_network_service(self, net_module): - - LOGGER.debug("Starting net service " + net_module.display_name) - network = "host" if net_module.net_config.host else PRIVATE_DOCKER_NET - LOGGER.debug(f"""Network: {network}, image name: {net_module.image_name}, - container name: {net_module.container_name}""") - try: - client = docker.from_env() - net_module.container = client.containers.run( - net_module.image_name, - auto_remove=True, - cap_add=["NET_ADMIN"], - name=net_module.container_name, - hostname=net_module.container_name, - network=PRIVATE_DOCKER_NET, - privileged=True, - detach=True, - mounts=net_module.mounts, - environment={"HOST_USER": getpass.getuser()} - ) - except docker.errors.ContainerError as error: - LOGGER.error("Container run error") - LOGGER.error(error) - - if network != "host": - self._attach_service_to_network(net_module) - - def _stop_service_module(self, net_module, kill=False): - LOGGER.debug("Stopping Service container " + net_module.container_name) - try: - container = self._get_service_container(net_module) - if container is not None: - if kill: - LOGGER.debug("Killing container:" + - net_module.container_name) - container.kill() - else: - LOGGER.debug("Stopping container:" + - net_module.container_name) - container.stop() - LOGGER.debug("Container stopped:" + net_module.container_name) - except Exception as error: - LOGGER.error("Container stop error") - LOGGER.error(error) - - def _get_service_container(self, net_module): - LOGGER.debug("Resolving service container: " + - net_module.container_name) - container = None - try: - client = docker.from_env() - container = client.containers.get(net_module.container_name) - except docker.errors.NotFound: - LOGGER.debug("Container " + - net_module.container_name + " not found") - except Exception as e: - LOGGER.error("Failed to resolve container") - LOGGER.error(e) - return container - - def stop_networking_services(self, kill=False): - LOGGER.info("Stopping network services") - for net_module in self._net_modules: - # Network modules may just be Docker images, so we do not want to stop them - if not net_module.enable_container: - continue - self._stop_service_module(net_module, kill) - - def start_network_services(self): - LOGGER.info("Starting network services") - - os.makedirs(os.path.join(os.getcwd(), RUNTIME_DIR), exist_ok=True) - - for net_module in self._net_modules: - - # TODO: There should be a better way of doing this - # Do not try starting OVS module again, as it should already be running - if "OVS" != net_module.display_name: - - # Network modules may just be Docker images, so we do not want to start them as containers - if not net_module.enable_container: - continue - - self._start_network_service(net_module) - - LOGGER.info("All network services are running") - self._check_network_services() - - # TODO: Let's move this into a separate script? It does not look great - def _attach_service_to_network(self, net_module): - LOGGER.debug("Attaching net service " + - net_module.display_name + " to device bridge") - - # Device bridge interface example: tr-di-dhcp (Test Run Device Interface for DHCP container) - bridge_intf = DEVICE_BRIDGE + "i-" + net_module.dir_name - - # Container interface example: tr-cti-dhcp (Test Run Container Interface for DHCP container) - container_intf = "tr-cti-" + net_module.dir_name - - # Container network namespace name - container_net_ns = "tr-ctns-" + net_module.dir_name - - # Create interface pair - util.run_command("ip link add " + bridge_intf + - " type veth peer name " + container_intf) - - # Add bridge interface to device bridge - util.run_command("ovs-vsctl add-port " + - DEVICE_BRIDGE + " " + bridge_intf) - - # Get PID for running container - # TODO: Some error checking around missing PIDs might be required - container_pid = util.run_command( - "docker inspect -f {{.State.Pid}} " + net_module.container_name)[0] - - # Create symlink for container network namespace - util.run_command("ln -sf /proc/" + container_pid + - "/ns/net /var/run/netns/" + container_net_ns) - - # Attach container interface to container network namespace - util.run_command("ip link set " + container_intf + - " netns " + container_net_ns) - - # Rename container interface name to veth0 - util.run_command("ip netns exec " + container_net_ns + - " ip link set dev " + container_intf + " name veth0") - - # Set MAC address of container interface - util.run_command("ip netns exec " + container_net_ns + " ip link set dev veth0 address 9a:02:57:1e:8f:" + str(net_module.net_config.ip_index)) - - # Set IP address of container interface - util.run_command("ip netns exec " + container_net_ns + " ip addr add " + - net_module.net_config.get_ipv4_addr_with_prefix() + " dev veth0") - - util.run_command("ip netns exec " + container_net_ns + " ip addr add " + - net_module.net_config.get_ipv6_addr_with_prefix() + " dev veth0") - - # Set interfaces up - util.run_command("ip link set dev " + bridge_intf + " up") - util.run_command("ip netns exec " + container_net_ns + - " ip link set dev veth0 up") - - if net_module.net_config.enable_wan: - LOGGER.debug("Attaching net service " + - net_module.display_name + " to internet bridge") - - # Internet bridge interface example: tr-ci-dhcp (Test Run Control (Internet) Interface for DHCP container) - bridge_intf = INTERNET_BRIDGE + "i-" + net_module.dir_name - - # Container interface example: tr-cti-dhcp (Test Run Container Interface for DHCP container) - container_intf = "tr-cti-" + net_module.dir_name - - # Create interface pair - util.run_command("ip link add " + bridge_intf + - " type veth peer name " + container_intf) - - # Attach bridge interface to internet bridge - util.run_command("ovs-vsctl add-port " + - INTERNET_BRIDGE + " " + bridge_intf) - - # Attach container interface to container network namespace - util.run_command("ip link set " + container_intf + - " netns " + container_net_ns) - - # Rename container interface name to eth1 - util.run_command("ip netns exec " + container_net_ns + - " ip link set dev " + container_intf + " name eth1") - - # Set MAC address of container interface - util.run_command("ip netns exec " + container_net_ns + - " ip link set dev eth1 address 9a:02:57:1e:8f:0" + str(net_module.net_config.ip_index)) - - # Set interfaces up - util.run_command("ip link set dev " + bridge_intf + " up") - util.run_command("ip netns exec " + - container_net_ns + " ip link set dev eth1 up") - - def restore_net(self): - - LOGGER.info("Clearing baseline network") - - if hasattr(self, 'listener') and self.listener is not None and self.listener.is_running(): - self.listener.stop_listener() - - client = docker.from_env() - - # Stop all network containers if still running - for net_module in self._net_modules: - try: - container = client.containers.get( - "tr-ct-" + net_module.dir_name) - container.kill() - except Exception: - continue - - # Delete data plane - util.run_command("ovs-vsctl --if-exists del-br tr-d") - - # Delete control plane - util.run_command("ovs-vsctl --if-exists del-br tr-c") - - # Restart internet interface - if util.interface_exists(self._int_intf): - util.run_command("ip link set " + self._int_intf + " down") - util.run_command("ip link set " + self._int_intf + " up") - - LOGGER.info("Network is restored") - - -class NetworkModule: - - def __init__(self): - self.name = None - self.display_name = None - self.description = None - - self.container = None - self.container_name = None - self.image_name = None - - # Absolute path - self.dir = None - self.dir_name = None - self.build_file = None - self.mounts = [] - - self.enable_container = True - - self.net_config = NetworkModuleNetConfig() - -# The networking configuration for a network module - - -class NetworkModuleNetConfig: - - def __init__(self): - - self.enable_wan = False - - self.ip_index = 0 - self.ipv4_address = None - self.ipv4_network = None - self.ipv6_address = None - self.ipv6_network = None - - self.host = False - - def get_ipv4_addr_with_prefix(self): - return format(self.ipv4_address) + "/" + str(self.ipv4_network.prefixlen) - - def get_ipv6_addr_with_prefix(self): - return format(self.ipv6_address) + "/" + str(self.ipv6_network.prefixlen) - -# Represents the current configuration of the network for the device bridge - -class NetworkConfig: - - # TODO: Let's get this from a configuration file - def __init__(self): - self.ipv4_network = ipaddress.ip_network('10.10.10.0/24') - self.ipv6_network = ipaddress.ip_network('fd10:77be:4186::/64') \ No newline at end of file +#!/usr/bin/env python3 + +import getpass +import ipaddress +import json +import os +import subprocess +import sys +import time +import threading + +import docker +from docker.types import Mount + +import logger +import util +from listener import Listener +from network_validator import NetworkValidator + +LOGGER = logger.get_logger("net_orc") +CONFIG_FILE = "conf/system.json" +EXAMPLE_CONFIG_FILE = "conf/system.json.example" +RUNTIME_DIR = "runtime/network" +NETWORK_MODULES_DIR = "network/modules" +NETWORK_MODULE_METADATA = "conf/module_config.json" +DEVICE_BRIDGE = "tr-d" +INTERNET_BRIDGE = "tr-c" +PRIVATE_DOCKER_NET = "tr-private-net" +CONTAINER_NAME = "network_orchestrator" +RUNTIME = 300 + + +class NetworkOrchestrator: + """Manage and controls a virtual testing network.""" + + def __init__(self, config_file=CONFIG_FILE, validate=True, async_monitor=False, single_intf = False): + self._int_intf = None + self._dev_intf = None + self._single_intf = single_intf + self.listener = None + + self._net_modules = [] + + self.validate = validate + + self.async_monitor = async_monitor + + self._path = os.path.dirname(os.path.dirname( + os.path.dirname(os.path.realpath(__file__)))) + + self.validator = NetworkValidator() + + self.network_config = NetworkConfig() + + self.load_config(config_file) + + def start(self): + """Start the network orchestrator.""" + + LOGGER.info("Starting Network Orchestrator") + # Get all components ready + self.load_network_modules() + + # Restore the network first if required + self.stop(kill=True) + + self.start_network() + + if self.async_monitor: + # Run the monitor method asynchronously to keep this method non-blocking + self._monitor_thread = threading.Thread( + target=self.monitor_network) + self._monitor_thread.daemon = True + self._monitor_thread.start() + else: + self.monitor_network() + + def start_network(self): + """Start the virtual testing network.""" + LOGGER.info("Starting network") + + self.build_network_modules() + self.create_net() + self.start_network_services() + + if self.validate: + # Start the validator after network is ready + self.validator.start() + + # Get network ready (via Network orchestrator) + LOGGER.info("Network is ready.") + + def stop(self, kill=False): + """Stop the network orchestrator.""" + self.stop_validator(kill=kill) + self.stop_network(kill=kill) + + def stop_validator(self, kill=False): + """Stop the network validator.""" + # Shutdown the validator + self.validator.stop(kill=kill) + + def stop_network(self, kill=False): + """Stop the virtual testing network.""" + # Shutdown network + self.stop_networking_services(kill=kill) + self.restore_net() + + def monitor_network(self): + # TODO: This time should be configurable (How long to hold before exiting, this could be infinite too) + time.sleep(RUNTIME) + + self.stop() + + def load_config(self,config_file=None): + if config_file is None: + # If not defined, use relative pathing to local file + self._config_file=os.path.join(self._path, CONFIG_FILE) + else: + # If defined, use as provided + self._config_file=config_file + + if not os.path.isfile(self._config_file): + LOGGER.error("Configuration file is not present at " + config_file) + LOGGER.info("An example is present in " + EXAMPLE_CONFIG_FILE) + sys.exit(1) + + LOGGER.info("Loading config file: " + os.path.abspath(self._config_file)) + with open(self._config_file, encoding='UTF-8') as config_json_file: + config_json = json.load(config_json_file) + self.import_config(config_json) + + def import_config(self, json_config): + self._int_intf = json_config['network']['internet_intf'] + self._dev_intf = json_config['network']['device_intf'] + + def _check_network_services(self): + LOGGER.debug("Checking network modules...") + for net_module in self._net_modules: + if net_module.enable_container: + LOGGER.debug("Checking network module: " + + net_module.display_name) + success = self._ping(net_module) + if success: + LOGGER.debug(net_module.display_name + + " responded succesfully: " + str(success)) + else: + LOGGER.error(net_module.display_name + + " failed to respond to ping") + + def _ping(self, net_module): + host = net_module.net_config.ipv4_address + namespace = "tr-ctns-" + net_module.dir_name + cmd = "ip netns exec " + namespace + " ping -c 1 " + str(host) + success = util.run_command(cmd, output=False) + return success + + def _create_private_net(self): + client = docker.from_env() + try: + network = client.networks.get(PRIVATE_DOCKER_NET) + network.remove() + except docker.errors.NotFound: + pass + + # TODO: These should be made into variables + ipam_pool = docker.types.IPAMPool( + subnet='100.100.0.0/16', + iprange='100.100.100.0/24' + ) + + ipam_config = docker.types.IPAMConfig( + pool_configs=[ipam_pool] + ) + + client.networks.create( + PRIVATE_DOCKER_NET, + ipam=ipam_config, + internal=True, + check_duplicate=True, + driver="macvlan" + ) + + def _ci_pre_network_create(self): + """ Stores network properties to restore network after + network creation and flushes internet interface + """ + + self._ethmac = subprocess.check_output( + f"cat /sys/class/net/{self._int_intf}/address", shell=True).decode("utf-8").strip() + self._gateway = subprocess.check_output( + "ip route | head -n 1 | awk '{print $3}'", shell=True).decode("utf-8").strip() + self._ipv4 = subprocess.check_output( + f"ip a show {self._int_intf} | grep \"inet \" | awk '{{print $2}}'", shell=True).decode("utf-8").strip() + self._ipv6 = subprocess.check_output( + f"ip a show {self._int_intf} | grep inet6 | awk '{{print $2}}'", shell=True).decode("utf-8").strip() + self._brd = subprocess.check_output( + f"ip a show {self._int_intf} | grep \"inet \" | awk '{{print $4}}'", shell=True).decode("utf-8").strip() + + def _ci_post_network_create(self): + """ Restore network connection in CI environment """ + LOGGER.info("post cr") + util.run_command(f"ip address del {self._ipv4} dev {self._int_intf}") + util.run_command(f"ip -6 address del {self._ipv6} dev {self._int_intf}") + util.run_command(f"ip link set dev {self._int_intf} address 00:B0:D0:63:C2:26") + util.run_command(f"ip addr flush dev {self._int_intf}") + util.run_command(f"ip addr add dev {self._int_intf} 0.0.0.0") + util.run_command(f"ip addr add dev {INTERNET_BRIDGE} {self._ipv4} broadcast {self._brd}") + util.run_command(f"ip -6 addr add {self._ipv6} dev {INTERNET_BRIDGE} ") + util.run_command(f"systemd-resolve --interface {INTERNET_BRIDGE} --set-dns 8.8.8.8") + util.run_command(f"ip link set dev {INTERNET_BRIDGE} up") + util.run_command(f"dhclient {INTERNET_BRIDGE}") + util.run_command(f"ip route del default via 10.1.0.1") + util.run_command(f"ip route add default via {self._gateway} src {self._ipv4[:-3]} metric 100 dev {INTERNET_BRIDGE}") + + def create_net(self): + LOGGER.info("Creating baseline network") + + if not util.interface_exists(self._int_intf) or not util.interface_exists(self._dev_intf): + LOGGER.error("Configured interfaces are not ready for use. " + + "Ensure both interfaces are connected.") + sys.exit(1) + + if self._single_intf: + self._ci_pre_network_create() + + # Create data plane + util.run_command("ovs-vsctl add-br " + DEVICE_BRIDGE) + + # Create control plane + util.run_command("ovs-vsctl add-br " + INTERNET_BRIDGE) + + # Add external interfaces to data and control plane + util.run_command("ovs-vsctl add-port " + + DEVICE_BRIDGE + " " + self._dev_intf) + util.run_command("ovs-vsctl add-port " + + INTERNET_BRIDGE + " " + self._int_intf) + + # Enable forwarding of eapol packets + util.run_command("ovs-ofctl add-flow " + DEVICE_BRIDGE + + " 'table=0, dl_dst=01:80:c2:00:00:03, actions=flood'") + + # Remove IP from internet adapter + util.run_command("ifconfig " + self._int_intf + " 0.0.0.0") + + # Set ports up + util.run_command("ip link set dev " + DEVICE_BRIDGE + " up") + util.run_command("ip link set dev " + INTERNET_BRIDGE + " up") + + if self._single_intf: + self._ci_post_network_create() + + self._create_private_net() + + self.listener = Listener(self._dev_intf) + self.listener.start_listener() + + def load_network_modules(self): + """Load network modules from module_config.json.""" + LOGGER.debug("Loading network modules from /" + NETWORK_MODULES_DIR) + + loaded_modules = "Loaded the following network modules: " + net_modules_dir = os.path.join(self._path, NETWORK_MODULES_DIR) + + for module_dir in os.listdir(net_modules_dir): + + if self._get_network_module(module_dir) is None: + loaded_module = self._load_network_module(module_dir) + loaded_modules += loaded_module.dir_name + " " + + LOGGER.info(loaded_modules) + + def _load_network_module(self, module_dir): + + net_modules_dir = os.path.join(self._path, NETWORK_MODULES_DIR) + + net_module = NetworkModule() + + # Load basic module information + net_module_json = json.load(open(os.path.join( + self._path, net_modules_dir, module_dir, NETWORK_MODULE_METADATA), encoding='UTF-8')) + + net_module.name = net_module_json['config']['meta']['name'] + net_module.display_name = net_module_json['config']['meta']['display_name'] + net_module.description = net_module_json['config']['meta']['description'] + net_module.dir = os.path.join( + self._path, net_modules_dir, module_dir) + net_module.dir_name = module_dir + net_module.build_file = module_dir + ".Dockerfile" + net_module.container_name = "tr-ct-" + net_module.dir_name + net_module.image_name = "test-run/" + net_module.dir_name + + # Attach folder mounts to network module + if "docker" in net_module_json['config']: + + if "mounts" in net_module_json['config']['docker']: + for mount_point in net_module_json['config']['docker']['mounts']: + net_module.mounts.append(Mount( + target=mount_point['target'], + source=os.path.join( + os.getcwd(), mount_point['source']), + type='bind' + )) + + if "depends_on" in net_module_json['config']['docker']: + depends_on_module = net_module_json['config']['docker']['depends_on'] + if self._get_network_module(depends_on_module) is None: + self._load_network_module(depends_on_module) + + # Determine if this is a container or just an image/template + if "enable_container" in net_module_json['config']['docker']: + net_module.enable_container = net_module_json['config']['docker']['enable_container'] + + # Load network service networking configuration + if net_module.enable_container: + + net_module.net_config.enable_wan = net_module_json['config']['network']['enable_wan'] + net_module.net_config.ip_index = net_module_json['config']['network']['ip_index'] + + net_module.net_config.host = False if not "host" in net_module_json[ + 'config']['network'] else net_module_json['config']['network']['host'] + + net_module.net_config.ipv4_address = self.network_config.ipv4_network[ + net_module.net_config.ip_index] + net_module.net_config.ipv4_network = self.network_config.ipv4_network + + net_module.net_config.ipv6_address = self.network_config.ipv6_network[ + net_module.net_config.ip_index] + net_module.net_config.ipv6_network = self.network_config.ipv6_network + + self._net_modules.append(net_module) + return net_module + + def build_network_modules(self): + LOGGER.info("Building network modules...") + for net_module in self._net_modules: + self._build_module(net_module) + + def _build_module(self, net_module): + LOGGER.debug("Building network module " + net_module.dir_name) + client = docker.from_env() + client.images.build( + dockerfile=os.path.join(net_module.dir, net_module.build_file), + path=self._path, + forcerm=True, + tag="test-run/" + net_module.dir_name + ) + + def _get_network_module(self, name): + for net_module in self._net_modules: + if name == net_module.display_name or name == net_module.name or name == net_module.dir_name: + return net_module + return None + + # Start the OVS network module + # This should always be called before loading all + # other modules to allow for a properly setup base + # network + def _start_ovs_module(self): + self._start_network_service(self._get_network_module("OVS")) + + def _start_network_service(self, net_module): + + LOGGER.debug("Starting net service " + net_module.display_name) + network = "host" if net_module.net_config.host else PRIVATE_DOCKER_NET + LOGGER.debug(f"""Network: {network}, image name: {net_module.image_name}, + container name: {net_module.container_name}""") + try: + client = docker.from_env() + net_module.container = client.containers.run( + net_module.image_name, + auto_remove=True, + cap_add=["NET_ADMIN"], + name=net_module.container_name, + hostname=net_module.container_name, + network=PRIVATE_DOCKER_NET, + privileged=True, + detach=True, + mounts=net_module.mounts, + environment={"HOST_USER": getpass.getuser()} + ) + except docker.errors.ContainerError as error: + LOGGER.error("Container run error") + LOGGER.error(error) + + if network != "host": + self._attach_service_to_network(net_module) + + def _stop_service_module(self, net_module, kill=False): + LOGGER.debug("Stopping Service container " + net_module.container_name) + try: + container = self._get_service_container(net_module) + if container is not None: + if kill: + LOGGER.debug("Killing container:" + + net_module.container_name) + container.kill() + else: + LOGGER.debug("Stopping container:" + + net_module.container_name) + container.stop() + LOGGER.debug("Container stopped:" + net_module.container_name) + except Exception as error: + LOGGER.error("Container stop error") + LOGGER.error(error) + + def _get_service_container(self, net_module): + LOGGER.debug("Resolving service container: " + + net_module.container_name) + container = None + try: + client = docker.from_env() + container = client.containers.get(net_module.container_name) + except docker.errors.NotFound: + LOGGER.debug("Container " + + net_module.container_name + " not found") + except Exception as e: + LOGGER.error("Failed to resolve container") + LOGGER.error(e) + return container + + def stop_networking_services(self, kill=False): + LOGGER.info("Stopping network services") + for net_module in self._net_modules: + # Network modules may just be Docker images, so we do not want to stop them + if not net_module.enable_container: + continue + self._stop_service_module(net_module, kill) + + def start_network_services(self): + LOGGER.info("Starting network services") + + os.makedirs(os.path.join(os.getcwd(), RUNTIME_DIR), exist_ok=True) + + for net_module in self._net_modules: + + # TODO: There should be a better way of doing this + # Do not try starting OVS module again, as it should already be running + if "OVS" != net_module.display_name: + + # Network modules may just be Docker images, so we do not want to start them as containers + if not net_module.enable_container: + continue + + self._start_network_service(net_module) + + LOGGER.info("All network services are running") + self._check_network_services() + + # TODO: Let's move this into a separate script? It does not look great + def _attach_service_to_network(self, net_module): + LOGGER.debug("Attaching net service " + + net_module.display_name + " to device bridge") + + # Device bridge interface example: tr-di-dhcp (Test Run Device Interface for DHCP container) + bridge_intf = DEVICE_BRIDGE + "i-" + net_module.dir_name + + # Container interface example: tr-cti-dhcp (Test Run Container Interface for DHCP container) + container_intf = "tr-cti-" + net_module.dir_name + + # Container network namespace name + container_net_ns = "tr-ctns-" + net_module.dir_name + + # Create interface pair + util.run_command("ip link add " + bridge_intf + + " type veth peer name " + container_intf) + + # Add bridge interface to device bridge + util.run_command("ovs-vsctl add-port " + + DEVICE_BRIDGE + " " + bridge_intf) + + # Get PID for running container + # TODO: Some error checking around missing PIDs might be required + container_pid = util.run_command( + "docker inspect -f {{.State.Pid}} " + net_module.container_name)[0] + + # Create symlink for container network namespace + util.run_command("ln -sf /proc/" + container_pid + + "/ns/net /var/run/netns/" + container_net_ns) + + # Attach container interface to container network namespace + util.run_command("ip link set " + container_intf + + " netns " + container_net_ns) + + # Rename container interface name to veth0 + util.run_command("ip netns exec " + container_net_ns + + " ip link set dev " + container_intf + " name veth0") + + # Set MAC address of container interface + util.run_command("ip netns exec " + container_net_ns + " ip link set dev veth0 address 9a:02:57:1e:8f:" + str(net_module.net_config.ip_index)) + + # Set IP address of container interface + util.run_command("ip netns exec " + container_net_ns + " ip addr add " + + net_module.net_config.get_ipv4_addr_with_prefix() + " dev veth0") + + util.run_command("ip netns exec " + container_net_ns + " ip addr add " + + net_module.net_config.get_ipv6_addr_with_prefix() + " dev veth0") + + # Set interfaces up + util.run_command("ip link set dev " + bridge_intf + " up") + util.run_command("ip netns exec " + container_net_ns + + " ip link set dev veth0 up") + + if net_module.net_config.enable_wan: + LOGGER.debug("Attaching net service " + + net_module.display_name + " to internet bridge") + + # Internet bridge interface example: tr-ci-dhcp (Test Run Control (Internet) Interface for DHCP container) + bridge_intf = INTERNET_BRIDGE + "i-" + net_module.dir_name + + # Container interface example: tr-cti-dhcp (Test Run Container Interface for DHCP container) + container_intf = "tr-cti-" + net_module.dir_name + + # Create interface pair + util.run_command("ip link add " + bridge_intf + + " type veth peer name " + container_intf) + + # Attach bridge interface to internet bridge + util.run_command("ovs-vsctl add-port " + + INTERNET_BRIDGE + " " + bridge_intf) + + # Attach container interface to container network namespace + util.run_command("ip link set " + container_intf + + " netns " + container_net_ns) + + # Rename container interface name to eth1 + util.run_command("ip netns exec " + container_net_ns + + " ip link set dev " + container_intf + " name eth1") + + # Set MAC address of container interface + util.run_command("ip netns exec " + container_net_ns + + " ip link set dev eth1 address 9a:02:57:1e:8f:0" + str(net_module.net_config.ip_index)) + + # Set interfaces up + util.run_command("ip link set dev " + bridge_intf + " up") + util.run_command("ip netns exec " + + container_net_ns + " ip link set dev eth1 up") + + def restore_net(self): + + LOGGER.info("Clearing baseline network") + + if hasattr(self, 'listener') and self.listener is not None and self.listener.is_running(): + self.listener.stop_listener() + + client = docker.from_env() + + # Stop all network containers if still running + for net_module in self._net_modules: + try: + container = client.containers.get( + "tr-ct-" + net_module.dir_name) + container.kill() + except Exception: + continue + + # Delete data plane + util.run_command("ovs-vsctl --if-exists del-br tr-d") + + # Delete control plane + util.run_command("ovs-vsctl --if-exists del-br tr-c") + + # Restart internet interface + if util.interface_exists(self._int_intf): + util.run_command("ip link set " + self._int_intf + " down") + util.run_command("ip link set " + self._int_intf + " up") + + LOGGER.info("Network is restored") + +class NetworkModule: + + def __init__(self): + self.name = None + self.display_name = None + self.description = None + + self.container = None + self.container_name = None + self.image_name = None + + # Absolute path + self.dir = None + self.dir_name = None + self.build_file = None + self.mounts = [] + + self.enable_container = True + + self.net_config = NetworkModuleNetConfig() + +# The networking configuration for a network module + +class NetworkModuleNetConfig: + + def __init__(self): + + self.enable_wan = False + + self.ip_index = 0 + self.ipv4_address = None + self.ipv4_network = None + self.ipv6_address = None + self.ipv6_network = None + + self.host = False + + def get_ipv4_addr_with_prefix(self): + return format(self.ipv4_address) + "/" + str(self.ipv4_network.prefixlen) + + def get_ipv6_addr_with_prefix(self): + return format(self.ipv6_address) + "/" + str(self.ipv6_network.prefixlen) + +# Represents the current configuration of the network for the device bridge + +class NetworkConfig: + + # TODO: Let's get this from a configuration file + def __init__(self): + self.ipv4_network = ipaddress.ip_network('10.10.10.0/24') + self.ipv6_network = ipaddress.ip_network('fd10:77be:4186::/64') diff --git a/test_orc/modules/baseline/conf/module_config.json b/test_orc/modules/baseline/conf/module_config.json index ba337267a..4c0cd08d8 100644 --- a/test_orc/modules/baseline/conf/module_config.json +++ b/test_orc/modules/baseline/conf/module_config.json @@ -7,6 +7,7 @@ }, "network": false, "docker": { + "depends_on": "base", "enable_container": true, "timeout": 30 }, diff --git a/test_orc/modules/dns/conf/module_config.json b/test_orc/modules/dns/conf/module_config.json index d21f6bca6..b8ff36c97 100644 --- a/test_orc/modules/dns/conf/module_config.json +++ b/test_orc/modules/dns/conf/module_config.json @@ -7,6 +7,7 @@ }, "network": false, "docker": { + "depends_on": "base", "enable_container": true, "timeout": 30 }, diff --git a/test_orc/python/src/test_orchestrator.py b/test_orc/python/src/test_orchestrator.py index f9f906af5..c257cd901 100644 --- a/test_orc/python/src/test_orchestrator.py +++ b/test_orc/python/src/test_orchestrator.py @@ -114,6 +114,12 @@ def _get_module_status(self, module): return container.status return None + def _get_test_module(self, name): + for test_module in self._test_modules: + if name == test_module.display_name or name == test_module.name or name == test_module.dir_name: + return test_module + return None + def _get_module_container(self, module): container = None try: @@ -128,49 +134,58 @@ def _get_module_container(self, module): return container def _load_test_modules(self): - """Import module configuration from module_config.json.""" - - modules_dir = os.path.join(self._path, TEST_MODULES_DIR) - - LOGGER.debug("Loading test modules from /" + modules_dir) + """Load network modules from module_config.json.""" + LOGGER.debug("Loading test modules from /" + TEST_MODULES_DIR) + loaded_modules = "Loaded the following test modules: " + test_modules_dir = os.path.join(self._path, TEST_MODULES_DIR) + + for module_dir in os.listdir(test_modules_dir): - for module_dir in os.listdir(modules_dir): - - LOGGER.debug("Loading module from: " + module_dir) + if self._get_test_module(module_dir) is None: + loaded_module = self._load_test_module(module_dir) + loaded_modules += loaded_module.dir_name + " " - # Load basic module information - module = TestModule() - with open(os.path.join( - self._path, - modules_dir, - module_dir, - MODULE_CONFIG), - encoding='UTF-8') as module_config_file: - module_json = json.load(module_config_file) - - module.name = module_json['config']['meta']['name'] - module.display_name = module_json['config']['meta']['display_name'] - module.description = module_json['config']['meta']['description'] - module.dir = os.path.join(self._path, modules_dir, module_dir) - module.dir_name = module_dir - module.build_file = module_dir + ".Dockerfile" - module.container_name = "tr-ct-" + module.dir_name + "-test" - module.image_name = "test-run/" + module.dir_name + "-test" - - if 'timeout' in module_json['config']['docker']: - module.timeout = module_json['config']['docker']['timeout'] - - # Determine if this is a container or just an image/template - if "enable_container" in module_json['config']['docker']: - module.enable_container = module_json['config']['docker']['enable_container'] + LOGGER.info(loaded_modules) - self._test_modules.append(module) + def _load_test_module(self,module_dir): + """Import module configuration from module_config.json.""" - if module.enable_container: - loaded_modules += module.dir_name + " " + modules_dir = os.path.join(self._path, TEST_MODULES_DIR) - LOGGER.info(loaded_modules) + # Load basic module information + module = TestModule() + with open(os.path.join( + self._path, + modules_dir, + module_dir, + MODULE_CONFIG), + encoding='UTF-8') as module_config_file: + module_json = json.load(module_config_file) + + module.name = module_json['config']['meta']['name'] + module.display_name = module_json['config']['meta']['display_name'] + module.description = module_json['config']['meta']['description'] + module.dir = os.path.join(self._path, modules_dir, module_dir) + module.dir_name = module_dir + module.build_file = module_dir + ".Dockerfile" + module.container_name = "tr-ct-" + module.dir_name + "-test" + module.image_name = "test-run/" + module.dir_name + "-test" + + if 'timeout' in module_json['config']['docker']: + module.timeout = module_json['config']['docker']['timeout'] + + # Determine if this is a container or just an image/template + if "enable_container" in module_json['config']['docker']: + module.enable_container = module_json['config']['docker']['enable_container'] + + if "depends_on" in module_json['config']['docker']: + depends_on_module = module_json['config']['docker']['depends_on'] + if self._get_test_module(depends_on_module) is None: + self._load_test_module(depends_on_module) + + self._test_modules.append(module) + return module def build_test_modules(self): """Build all test modules.""" From 84d9ff992afe59032f1b05f0c054def9d083f028 Mon Sep 17 00:00:00 2001 From: jhughesbiot <50999916+jhughesbiot@users.noreply.github.com> Date: Wed, 17 May 2023 02:06:25 -0700 Subject: [PATCH 12/48] Port scan test module (#23) * Add network orchestrator repository * cleanup duplicate start and install scripts * Temporary fix for python dependencies * Remove duplicate python requirements * remove duplicate conf files * remove remote-net option * cleanp unecessary files * Add dns test module Fix test module build process * Add mac address of device under test to test container Update dns test to use mac address filter * Update dns module tests * Change result output * logging update * Update test module for better reusability * Load in module config to test module * logging cleanup * Update baseline module to new template Misc cleanup * Add ability to disable individual tests * remove duplicate readme * Update device directories * Remove local folder * Update device template Update test module to work with new device config file format * Change test module network config options Do not start network services for modules not configured for network * Initial nmap test module add Add device ip resolving to base module Add network mounting for test modules * Update ipv4 device resolving in test modules * Map in ip subnets and remove hard coded references * Add ftp port test * Add ability to pass config for individual tests within a module Update nmap module scan to run tests based on config * Add full module check for compliance * Add all tcp port scans to config * Update nmap commands to match existing DAQ tests Add udp scanning and tests * logging cleanup * Update TCP port scanning range Update logging * Merge device config into module config Update device template * fix merge issues * Update timeouts Add multi-threading for multiple scanns to run simultaneously Add option to use scan scripts for services * Fix merge issues --- cmd/install | 2 + framework/device.py | 26 +- framework/testrun.py | 208 +-- net_orc/python/src/network_orchestrator.py | 1374 +++++++++-------- resources/devices/Template/device_config.json | 115 ++ test_orc/modules/base/base.Dockerfile | 2 +- test_orc/modules/base/bin/get_ipv4_addr | 8 + .../modules/base/python/src/test_module.py | 28 +- test_orc/modules/base/python/src/util.py | 25 + test_orc/modules/nmap/bin/start_test_module | 42 + test_orc/modules/nmap/conf/module_config.json | 176 +++ test_orc/modules/nmap/nmap.Dockerfile | 11 + .../modules/nmap/python/src/nmap_module.py | 227 +++ test_orc/modules/nmap/python/src/run.py | 48 + test_orc/python/src/module.py | 4 + test_orc/python/src/test_orchestrator.py | 12 +- 16 files changed, 1566 insertions(+), 742 deletions(-) create mode 100644 test_orc/modules/base/bin/get_ipv4_addr create mode 100644 test_orc/modules/base/python/src/util.py create mode 100644 test_orc/modules/nmap/bin/start_test_module create mode 100644 test_orc/modules/nmap/conf/module_config.json create mode 100644 test_orc/modules/nmap/nmap.Dockerfile create mode 100644 test_orc/modules/nmap/python/src/nmap_module.py create mode 100644 test_orc/modules/nmap/python/src/run.py diff --git a/cmd/install b/cmd/install index 23e463158..f5af3a5d3 100755 --- a/cmd/install +++ b/cmd/install @@ -4,6 +4,8 @@ python3 -m venv venv source venv/bin/activate +pip3 install --upgrade requests + pip3 install -r framework/requirements.txt pip3 install -r net_orc/python/requirements.txt diff --git a/framework/device.py b/framework/device.py index c17dd8e3a..74d62d495 100644 --- a/framework/device.py +++ b/framework/device.py @@ -1,12 +1,14 @@ -"""Track device object information.""" -from dataclasses import dataclass -from network_device import NetworkDevice - - -@dataclass -class Device(NetworkDevice): - """Represents a physical device and it's configuration.""" - - make: str = None - model: str = None - test_modules: str = None +"""Track device object information.""" + +from network_device import NetworkDevice +from dataclasses import dataclass + + +@dataclass +class Device(NetworkDevice): + """Represents a physical device and it's configuration.""" + + make: str = None + model: str = None + mac_addr: str + test_modules: str = None diff --git a/framework/testrun.py b/framework/testrun.py index b9cb6a0e5..44c3bca6d 100644 --- a/framework/testrun.py +++ b/framework/testrun.py @@ -46,142 +46,142 @@ class TestRun: # pylint: disable=too-few-public-methods - """Test Run controller. + """Test Run controller. - Creates an instance of the network orchestrator, test - orchestrator and user interface. - """ + Creates an instance of the network orchestrator, test + orchestrator and user interface. + """ - def __init__(self, config_file=CONFIG_FILE, validate=True, net_only=False, single_intf=False): - self._devices = [] - self._net_only = net_only - self._single_intf = single_intf + def __init__(self, config_file=CONFIG_FILE, validate=True, net_only=False, single_intf=False): + self._devices = [] + self._net_only = net_only + self._single_intf = single_intf - # Catch any exit signals - self._register_exits() + # Catch any exit signals + self._register_exits() - # Expand the config file to absolute pathing - config_file_abs = self._get_config_abs(config_file=config_file) + # Expand the config file to absolute pathing + config_file_abs = self._get_config_abs(config_file=config_file) - self._net_orc = net_orc.NetworkOrchestrator( - config_file=config_file_abs, - validate=validate, - async_monitor=not self._net_only, - single_intf = self._single_intf) - self._test_orc = test_orc.TestOrchestrator() + self._net_orc = net_orc.NetworkOrchestrator( + config_file=config_file_abs, + validate=validate, + async_monitor=not self._net_only, + single_intf = self._single_intf) + self._test_orc = test_orc.TestOrchestrator(self._net_orc) - def start(self): + def start(self): - self._load_all_devices() + self._load_all_devices() - if self._net_only: - LOGGER.info( - "Network only option configured, no tests will be run") - self._start_network() - else: - self._start_network() - self._test_orc.start() - self._net_orc.listener.register_callback( + if self._net_only: + LOGGER.info("Network only option configured, no tests will be run") + self._start_network() + else: + self._start_network() + self._test_orc.start() + + self._net_orc.listener.register_callback( self._device_stable, [NetworkEvent.DEVICE_STABLE] ) - LOGGER.info("Waiting for devices on the network...") + LOGGER.info("Waiting for devices on the network...") - # Check timeout and whether testing is currently in progress before stopping - time.sleep(RUNTIME) + # Check timeout and whether testing is currently in progress before stopping + time.sleep(RUNTIME) - self.stop() + self.stop() - def stop(self, kill=False): - self._stop_tests() - self._stop_network(kill=kill) + def stop(self, kill=False): + self._stop_tests() + self._stop_network(kill=kill) - def _register_exits(self): - signal.signal(signal.SIGINT, self._exit_handler) - signal.signal(signal.SIGTERM, self._exit_handler) - signal.signal(signal.SIGABRT, self._exit_handler) - signal.signal(signal.SIGQUIT, self._exit_handler) + def _register_exits(self): + signal.signal(signal.SIGINT, self._exit_handler) + signal.signal(signal.SIGTERM, self._exit_handler) + signal.signal(signal.SIGABRT, self._exit_handler) + signal.signal(signal.SIGQUIT, self._exit_handler) - def _exit_handler(self, signum, arg): # pylint: disable=unused-argument - LOGGER.debug("Exit signal received: " + str(signum)) - if signum in (2, signal.SIGTERM): - LOGGER.info("Exit signal received.") - self.stop(kill=True) - sys.exit(1) + def _exit_handler(self, signum, arg): # pylint: disable=unused-argument + LOGGER.debug("Exit signal received: " + str(signum)) + if signum in (2, signal.SIGTERM): + LOGGER.info("Exit signal received.") + self.stop(kill=True) + sys.exit(1) - def _get_config_abs(self, config_file=None): - if config_file is None: - # If not defined, use relative pathing to local file - config_file = os.path.join(parent_dir, CONFIG_FILE) + def _get_config_abs(self, config_file=None): + if config_file is None: + # If not defined, use relative pathing to local file + config_file = os.path.join(parent_dir, CONFIG_FILE) - # Expand the config file to absolute pathing - return os.path.abspath(config_file) + # Expand the config file to absolute pathing + return os.path.abspath(config_file) - def _start_network(self): - # Load in local device configs to the network orchestrator - self._net_orc._devices = self._devices + def _start_network(self): + # Load in local device configs to the network orchestrator + self._net_orc._devices = self._devices - # Start the network orchestrator - self._net_orc.start() + # Start the network orchestrator + self._net_orc.start() - def _run_tests(self, device): - """Iterate through and start all test modules.""" + def _run_tests(self, device): + """Iterate through and start all test modules.""" - # TODO: Make this configurable - time.sleep(60) # Let device bootup + # To Do: Make this configurable + time.sleep(60) # Let device bootup - self._test_orc.run_test_modules(device) + self._test_orc._run_test_modules(device) - def _stop_network(self, kill=False): - self._net_orc.stop(kill=kill) + def _stop_network(self, kill=False): + self._net_orc.stop(kill=kill) - def _stop_tests(self): - self._test_orc.stop() + def _stop_tests(self): + self._test_orc.stop() - def _load_all_devices(self): - self._load_devices(device_dir=LOCAL_DEVICES_DIR) - LOGGER.info('Loaded ' + str(len(self._devices)) + ' devices') + def _load_all_devices(self): + self._load_devices(device_dir=LOCAL_DEVICES_DIR) + self._load_devices(device_dir=RESOURCE_DEVICES_DIR) - def _load_devices(self, device_dir): - LOGGER.debug('Loading devices from ' + device_dir) + def _load_devices(self, device_dir): + LOGGER.debug('Loading devices from ' + device_dir) - os.makedirs(device_dir, exist_ok=True) + os.makedirs(device_dir, exist_ok=True) - for device_folder in os.listdir(device_dir): - with open(os.path.join(device_dir, device_folder, DEVICE_CONFIG), - encoding='utf-8') as device_config_file: - device_config_json = json.load(device_config_file) + for device_folder in os.listdir(device_dir): + with open(os.path.join(device_dir, device_folder, DEVICE_CONFIG), + encoding='utf-8') as device_config_file: + device_config_json = json.load(device_config_file) - device_make = device_config_json.get(DEVICE_MAKE) - device_model = device_config_json.get(DEVICE_MODEL) - mac_addr = device_config_json.get(DEVICE_MAC_ADDR) - test_modules = device_config_json.get(DEVICE_TEST_MODULES) + device_make = device_config_json.get(DEVICE_MAKE) + device_model = device_config_json.get(DEVICE_MODEL) + mac_addr = device_config_json.get(DEVICE_MAC_ADDR) + test_modules = device_config_json.get(DEVICE_TEST_MODULES) - device = Device(make=device_make, model=device_model, - mac_addr=mac_addr, test_modules=json.dumps(test_modules)) - self._devices.append(device) - - def get_device(self, mac_addr): - """Returns a loaded device object from the device mac address.""" - for device in self._devices: - if device.mac_addr == mac_addr: - return device - return None - - def _device_discovered(self, mac_addr): - device = self.get_device(mac_addr) - if device is not None: - LOGGER.info( - f'Discovered {device.make} {device.model} on the network') - else: - device = Device(mac_addr=mac_addr) + device = Device(make=device_make, model=device_model, + mac_addr=mac_addr, test_modules=json.dumps(test_modules)) self._devices.append(device) - LOGGER.info( - f'A new device has been discovered with mac address {mac_addr}') - def _device_stable(self, mac_addr): - device = self.get_device(mac_addr) - LOGGER.info(f'Device with mac address {mac_addr} is ready for testing.') - self._test_orc.run_test_modules(device) + def get_device(self, mac_addr): + """Returns a loaded device object from the device mac address.""" + for device in self._devices: + if device.mac_addr == mac_addr: + return device + return None + + def _device_discovered(self, mac_addr): + device = self.get_device(mac_addr) + if device is not None: + LOGGER.info( + f'Discovered {device.make} {device.model} on the network') + else: + device = Device(mac_addr=mac_addr) + self._devices.append(device) + LOGGER.info( + f'A new device has been discovered with mac address {mac_addr}') + + def _device_stable(self, mac_addr): + device = self.get_device(mac_addr) + LOGGER.info(f'Device with mac address {mac_addr} is ready for testing.') + self._test_orc.run_test_modules(device) diff --git a/net_orc/python/src/network_orchestrator.py b/net_orc/python/src/network_orchestrator.py index 6930f22be..2950f97fb 100644 --- a/net_orc/python/src/network_orchestrator.py +++ b/net_orc/python/src/network_orchestrator.py @@ -1,620 +1,754 @@ -#!/usr/bin/env python3 - -import getpass -import ipaddress -import json -import os -import subprocess -import sys -import time -import threading - -import docker -from docker.types import Mount - -import logger -import util -from listener import Listener -from network_validator import NetworkValidator - -LOGGER = logger.get_logger("net_orc") -CONFIG_FILE = "conf/system.json" -EXAMPLE_CONFIG_FILE = "conf/system.json.example" -RUNTIME_DIR = "runtime/network" -NETWORK_MODULES_DIR = "network/modules" -NETWORK_MODULE_METADATA = "conf/module_config.json" -DEVICE_BRIDGE = "tr-d" -INTERNET_BRIDGE = "tr-c" -PRIVATE_DOCKER_NET = "tr-private-net" -CONTAINER_NAME = "network_orchestrator" -RUNTIME = 300 - - -class NetworkOrchestrator: - """Manage and controls a virtual testing network.""" - - def __init__(self, config_file=CONFIG_FILE, validate=True, async_monitor=False, single_intf = False): - self._int_intf = None - self._dev_intf = None - self._single_intf = single_intf - self.listener = None - - self._net_modules = [] - - self.validate = validate - - self.async_monitor = async_monitor - - self._path = os.path.dirname(os.path.dirname( - os.path.dirname(os.path.realpath(__file__)))) - - self.validator = NetworkValidator() - - self.network_config = NetworkConfig() - - self.load_config(config_file) - - def start(self): - """Start the network orchestrator.""" - - LOGGER.info("Starting Network Orchestrator") - # Get all components ready - self.load_network_modules() - - # Restore the network first if required - self.stop(kill=True) - - self.start_network() - - if self.async_monitor: - # Run the monitor method asynchronously to keep this method non-blocking - self._monitor_thread = threading.Thread( - target=self.monitor_network) - self._monitor_thread.daemon = True - self._monitor_thread.start() - else: - self.monitor_network() - - def start_network(self): - """Start the virtual testing network.""" - LOGGER.info("Starting network") - - self.build_network_modules() - self.create_net() - self.start_network_services() - - if self.validate: - # Start the validator after network is ready - self.validator.start() - - # Get network ready (via Network orchestrator) - LOGGER.info("Network is ready.") - - def stop(self, kill=False): - """Stop the network orchestrator.""" - self.stop_validator(kill=kill) - self.stop_network(kill=kill) - - def stop_validator(self, kill=False): - """Stop the network validator.""" - # Shutdown the validator - self.validator.stop(kill=kill) - - def stop_network(self, kill=False): - """Stop the virtual testing network.""" - # Shutdown network - self.stop_networking_services(kill=kill) - self.restore_net() - - def monitor_network(self): - # TODO: This time should be configurable (How long to hold before exiting, this could be infinite too) - time.sleep(RUNTIME) - - self.stop() - - def load_config(self,config_file=None): - if config_file is None: - # If not defined, use relative pathing to local file - self._config_file=os.path.join(self._path, CONFIG_FILE) - else: - # If defined, use as provided - self._config_file=config_file - - if not os.path.isfile(self._config_file): - LOGGER.error("Configuration file is not present at " + config_file) - LOGGER.info("An example is present in " + EXAMPLE_CONFIG_FILE) - sys.exit(1) - - LOGGER.info("Loading config file: " + os.path.abspath(self._config_file)) - with open(self._config_file, encoding='UTF-8') as config_json_file: - config_json = json.load(config_json_file) - self.import_config(config_json) - - def import_config(self, json_config): - self._int_intf = json_config['network']['internet_intf'] - self._dev_intf = json_config['network']['device_intf'] - - def _check_network_services(self): - LOGGER.debug("Checking network modules...") - for net_module in self._net_modules: - if net_module.enable_container: - LOGGER.debug("Checking network module: " + - net_module.display_name) - success = self._ping(net_module) - if success: - LOGGER.debug(net_module.display_name + - " responded succesfully: " + str(success)) - else: - LOGGER.error(net_module.display_name + - " failed to respond to ping") - - def _ping(self, net_module): - host = net_module.net_config.ipv4_address - namespace = "tr-ctns-" + net_module.dir_name - cmd = "ip netns exec " + namespace + " ping -c 1 " + str(host) - success = util.run_command(cmd, output=False) - return success - - def _create_private_net(self): - client = docker.from_env() - try: - network = client.networks.get(PRIVATE_DOCKER_NET) - network.remove() - except docker.errors.NotFound: - pass - - # TODO: These should be made into variables - ipam_pool = docker.types.IPAMPool( - subnet='100.100.0.0/16', - iprange='100.100.100.0/24' - ) - - ipam_config = docker.types.IPAMConfig( - pool_configs=[ipam_pool] - ) - - client.networks.create( - PRIVATE_DOCKER_NET, - ipam=ipam_config, - internal=True, - check_duplicate=True, - driver="macvlan" - ) - - def _ci_pre_network_create(self): - """ Stores network properties to restore network after - network creation and flushes internet interface - """ - - self._ethmac = subprocess.check_output( - f"cat /sys/class/net/{self._int_intf}/address", shell=True).decode("utf-8").strip() - self._gateway = subprocess.check_output( - "ip route | head -n 1 | awk '{print $3}'", shell=True).decode("utf-8").strip() - self._ipv4 = subprocess.check_output( - f"ip a show {self._int_intf} | grep \"inet \" | awk '{{print $2}}'", shell=True).decode("utf-8").strip() - self._ipv6 = subprocess.check_output( - f"ip a show {self._int_intf} | grep inet6 | awk '{{print $2}}'", shell=True).decode("utf-8").strip() - self._brd = subprocess.check_output( - f"ip a show {self._int_intf} | grep \"inet \" | awk '{{print $4}}'", shell=True).decode("utf-8").strip() - - def _ci_post_network_create(self): - """ Restore network connection in CI environment """ - LOGGER.info("post cr") - util.run_command(f"ip address del {self._ipv4} dev {self._int_intf}") - util.run_command(f"ip -6 address del {self._ipv6} dev {self._int_intf}") - util.run_command(f"ip link set dev {self._int_intf} address 00:B0:D0:63:C2:26") - util.run_command(f"ip addr flush dev {self._int_intf}") - util.run_command(f"ip addr add dev {self._int_intf} 0.0.0.0") - util.run_command(f"ip addr add dev {INTERNET_BRIDGE} {self._ipv4} broadcast {self._brd}") - util.run_command(f"ip -6 addr add {self._ipv6} dev {INTERNET_BRIDGE} ") - util.run_command(f"systemd-resolve --interface {INTERNET_BRIDGE} --set-dns 8.8.8.8") - util.run_command(f"ip link set dev {INTERNET_BRIDGE} up") - util.run_command(f"dhclient {INTERNET_BRIDGE}") - util.run_command(f"ip route del default via 10.1.0.1") - util.run_command(f"ip route add default via {self._gateway} src {self._ipv4[:-3]} metric 100 dev {INTERNET_BRIDGE}") - - def create_net(self): - LOGGER.info("Creating baseline network") - - if not util.interface_exists(self._int_intf) or not util.interface_exists(self._dev_intf): - LOGGER.error("Configured interfaces are not ready for use. " + - "Ensure both interfaces are connected.") - sys.exit(1) - - if self._single_intf: - self._ci_pre_network_create() - - # Create data plane - util.run_command("ovs-vsctl add-br " + DEVICE_BRIDGE) - - # Create control plane - util.run_command("ovs-vsctl add-br " + INTERNET_BRIDGE) - - # Add external interfaces to data and control plane - util.run_command("ovs-vsctl add-port " + - DEVICE_BRIDGE + " " + self._dev_intf) - util.run_command("ovs-vsctl add-port " + - INTERNET_BRIDGE + " " + self._int_intf) - - # Enable forwarding of eapol packets - util.run_command("ovs-ofctl add-flow " + DEVICE_BRIDGE + - " 'table=0, dl_dst=01:80:c2:00:00:03, actions=flood'") - - # Remove IP from internet adapter - util.run_command("ifconfig " + self._int_intf + " 0.0.0.0") - - # Set ports up - util.run_command("ip link set dev " + DEVICE_BRIDGE + " up") - util.run_command("ip link set dev " + INTERNET_BRIDGE + " up") - - if self._single_intf: - self._ci_post_network_create() - - self._create_private_net() - - self.listener = Listener(self._dev_intf) - self.listener.start_listener() - - def load_network_modules(self): - """Load network modules from module_config.json.""" - LOGGER.debug("Loading network modules from /" + NETWORK_MODULES_DIR) - - loaded_modules = "Loaded the following network modules: " - net_modules_dir = os.path.join(self._path, NETWORK_MODULES_DIR) - - for module_dir in os.listdir(net_modules_dir): - - if self._get_network_module(module_dir) is None: - loaded_module = self._load_network_module(module_dir) - loaded_modules += loaded_module.dir_name + " " - - LOGGER.info(loaded_modules) - - def _load_network_module(self, module_dir): - - net_modules_dir = os.path.join(self._path, NETWORK_MODULES_DIR) - - net_module = NetworkModule() - - # Load basic module information - net_module_json = json.load(open(os.path.join( - self._path, net_modules_dir, module_dir, NETWORK_MODULE_METADATA), encoding='UTF-8')) - - net_module.name = net_module_json['config']['meta']['name'] - net_module.display_name = net_module_json['config']['meta']['display_name'] - net_module.description = net_module_json['config']['meta']['description'] - net_module.dir = os.path.join( - self._path, net_modules_dir, module_dir) - net_module.dir_name = module_dir - net_module.build_file = module_dir + ".Dockerfile" - net_module.container_name = "tr-ct-" + net_module.dir_name - net_module.image_name = "test-run/" + net_module.dir_name - - # Attach folder mounts to network module - if "docker" in net_module_json['config']: - - if "mounts" in net_module_json['config']['docker']: - for mount_point in net_module_json['config']['docker']['mounts']: - net_module.mounts.append(Mount( - target=mount_point['target'], - source=os.path.join( - os.getcwd(), mount_point['source']), - type='bind' - )) - - if "depends_on" in net_module_json['config']['docker']: - depends_on_module = net_module_json['config']['docker']['depends_on'] - if self._get_network_module(depends_on_module) is None: - self._load_network_module(depends_on_module) - - # Determine if this is a container or just an image/template - if "enable_container" in net_module_json['config']['docker']: - net_module.enable_container = net_module_json['config']['docker']['enable_container'] - - # Load network service networking configuration - if net_module.enable_container: - - net_module.net_config.enable_wan = net_module_json['config']['network']['enable_wan'] - net_module.net_config.ip_index = net_module_json['config']['network']['ip_index'] - - net_module.net_config.host = False if not "host" in net_module_json[ - 'config']['network'] else net_module_json['config']['network']['host'] - - net_module.net_config.ipv4_address = self.network_config.ipv4_network[ - net_module.net_config.ip_index] - net_module.net_config.ipv4_network = self.network_config.ipv4_network - - net_module.net_config.ipv6_address = self.network_config.ipv6_network[ - net_module.net_config.ip_index] - net_module.net_config.ipv6_network = self.network_config.ipv6_network - - self._net_modules.append(net_module) - return net_module - - def build_network_modules(self): - LOGGER.info("Building network modules...") - for net_module in self._net_modules: - self._build_module(net_module) - - def _build_module(self, net_module): - LOGGER.debug("Building network module " + net_module.dir_name) - client = docker.from_env() - client.images.build( - dockerfile=os.path.join(net_module.dir, net_module.build_file), - path=self._path, - forcerm=True, - tag="test-run/" + net_module.dir_name - ) - - def _get_network_module(self, name): - for net_module in self._net_modules: - if name == net_module.display_name or name == net_module.name or name == net_module.dir_name: - return net_module - return None - - # Start the OVS network module - # This should always be called before loading all - # other modules to allow for a properly setup base - # network - def _start_ovs_module(self): - self._start_network_service(self._get_network_module("OVS")) - - def _start_network_service(self, net_module): - - LOGGER.debug("Starting net service " + net_module.display_name) - network = "host" if net_module.net_config.host else PRIVATE_DOCKER_NET - LOGGER.debug(f"""Network: {network}, image name: {net_module.image_name}, - container name: {net_module.container_name}""") - try: - client = docker.from_env() - net_module.container = client.containers.run( - net_module.image_name, - auto_remove=True, - cap_add=["NET_ADMIN"], - name=net_module.container_name, - hostname=net_module.container_name, - network=PRIVATE_DOCKER_NET, - privileged=True, - detach=True, - mounts=net_module.mounts, - environment={"HOST_USER": getpass.getuser()} - ) - except docker.errors.ContainerError as error: - LOGGER.error("Container run error") - LOGGER.error(error) - - if network != "host": - self._attach_service_to_network(net_module) - - def _stop_service_module(self, net_module, kill=False): - LOGGER.debug("Stopping Service container " + net_module.container_name) - try: - container = self._get_service_container(net_module) - if container is not None: - if kill: - LOGGER.debug("Killing container:" + - net_module.container_name) - container.kill() - else: - LOGGER.debug("Stopping container:" + - net_module.container_name) - container.stop() - LOGGER.debug("Container stopped:" + net_module.container_name) - except Exception as error: - LOGGER.error("Container stop error") - LOGGER.error(error) - - def _get_service_container(self, net_module): - LOGGER.debug("Resolving service container: " + - net_module.container_name) - container = None - try: - client = docker.from_env() - container = client.containers.get(net_module.container_name) - except docker.errors.NotFound: - LOGGER.debug("Container " + - net_module.container_name + " not found") - except Exception as e: - LOGGER.error("Failed to resolve container") - LOGGER.error(e) - return container - - def stop_networking_services(self, kill=False): - LOGGER.info("Stopping network services") - for net_module in self._net_modules: - # Network modules may just be Docker images, so we do not want to stop them - if not net_module.enable_container: - continue - self._stop_service_module(net_module, kill) - - def start_network_services(self): - LOGGER.info("Starting network services") - - os.makedirs(os.path.join(os.getcwd(), RUNTIME_DIR), exist_ok=True) - - for net_module in self._net_modules: - - # TODO: There should be a better way of doing this - # Do not try starting OVS module again, as it should already be running - if "OVS" != net_module.display_name: - - # Network modules may just be Docker images, so we do not want to start them as containers - if not net_module.enable_container: - continue - - self._start_network_service(net_module) - - LOGGER.info("All network services are running") - self._check_network_services() - - # TODO: Let's move this into a separate script? It does not look great - def _attach_service_to_network(self, net_module): - LOGGER.debug("Attaching net service " + - net_module.display_name + " to device bridge") - - # Device bridge interface example: tr-di-dhcp (Test Run Device Interface for DHCP container) - bridge_intf = DEVICE_BRIDGE + "i-" + net_module.dir_name - - # Container interface example: tr-cti-dhcp (Test Run Container Interface for DHCP container) - container_intf = "tr-cti-" + net_module.dir_name - - # Container network namespace name - container_net_ns = "tr-ctns-" + net_module.dir_name - - # Create interface pair - util.run_command("ip link add " + bridge_intf + - " type veth peer name " + container_intf) - - # Add bridge interface to device bridge - util.run_command("ovs-vsctl add-port " + - DEVICE_BRIDGE + " " + bridge_intf) - - # Get PID for running container - # TODO: Some error checking around missing PIDs might be required - container_pid = util.run_command( - "docker inspect -f {{.State.Pid}} " + net_module.container_name)[0] - - # Create symlink for container network namespace - util.run_command("ln -sf /proc/" + container_pid + - "/ns/net /var/run/netns/" + container_net_ns) - - # Attach container interface to container network namespace - util.run_command("ip link set " + container_intf + - " netns " + container_net_ns) - - # Rename container interface name to veth0 - util.run_command("ip netns exec " + container_net_ns + - " ip link set dev " + container_intf + " name veth0") - - # Set MAC address of container interface - util.run_command("ip netns exec " + container_net_ns + " ip link set dev veth0 address 9a:02:57:1e:8f:" + str(net_module.net_config.ip_index)) - - # Set IP address of container interface - util.run_command("ip netns exec " + container_net_ns + " ip addr add " + - net_module.net_config.get_ipv4_addr_with_prefix() + " dev veth0") - - util.run_command("ip netns exec " + container_net_ns + " ip addr add " + - net_module.net_config.get_ipv6_addr_with_prefix() + " dev veth0") - - # Set interfaces up - util.run_command("ip link set dev " + bridge_intf + " up") - util.run_command("ip netns exec " + container_net_ns + - " ip link set dev veth0 up") - - if net_module.net_config.enable_wan: - LOGGER.debug("Attaching net service " + - net_module.display_name + " to internet bridge") - - # Internet bridge interface example: tr-ci-dhcp (Test Run Control (Internet) Interface for DHCP container) - bridge_intf = INTERNET_BRIDGE + "i-" + net_module.dir_name - - # Container interface example: tr-cti-dhcp (Test Run Container Interface for DHCP container) - container_intf = "tr-cti-" + net_module.dir_name - - # Create interface pair - util.run_command("ip link add " + bridge_intf + - " type veth peer name " + container_intf) - - # Attach bridge interface to internet bridge - util.run_command("ovs-vsctl add-port " + - INTERNET_BRIDGE + " " + bridge_intf) - - # Attach container interface to container network namespace - util.run_command("ip link set " + container_intf + - " netns " + container_net_ns) - - # Rename container interface name to eth1 - util.run_command("ip netns exec " + container_net_ns + - " ip link set dev " + container_intf + " name eth1") - - # Set MAC address of container interface - util.run_command("ip netns exec " + container_net_ns + - " ip link set dev eth1 address 9a:02:57:1e:8f:0" + str(net_module.net_config.ip_index)) - - # Set interfaces up - util.run_command("ip link set dev " + bridge_intf + " up") - util.run_command("ip netns exec " + - container_net_ns + " ip link set dev eth1 up") - - def restore_net(self): - - LOGGER.info("Clearing baseline network") - - if hasattr(self, 'listener') and self.listener is not None and self.listener.is_running(): - self.listener.stop_listener() - - client = docker.from_env() - - # Stop all network containers if still running - for net_module in self._net_modules: - try: - container = client.containers.get( - "tr-ct-" + net_module.dir_name) - container.kill() - except Exception: - continue - - # Delete data plane - util.run_command("ovs-vsctl --if-exists del-br tr-d") - - # Delete control plane - util.run_command("ovs-vsctl --if-exists del-br tr-c") - - # Restart internet interface - if util.interface_exists(self._int_intf): - util.run_command("ip link set " + self._int_intf + " down") - util.run_command("ip link set " + self._int_intf + " up") - - LOGGER.info("Network is restored") - -class NetworkModule: - - def __init__(self): - self.name = None - self.display_name = None - self.description = None - - self.container = None - self.container_name = None - self.image_name = None - - # Absolute path - self.dir = None - self.dir_name = None - self.build_file = None - self.mounts = [] - - self.enable_container = True - - self.net_config = NetworkModuleNetConfig() - -# The networking configuration for a network module - -class NetworkModuleNetConfig: - - def __init__(self): - - self.enable_wan = False - - self.ip_index = 0 - self.ipv4_address = None - self.ipv4_network = None - self.ipv6_address = None - self.ipv6_network = None - - self.host = False - - def get_ipv4_addr_with_prefix(self): - return format(self.ipv4_address) + "/" + str(self.ipv4_network.prefixlen) - - def get_ipv6_addr_with_prefix(self): - return format(self.ipv6_address) + "/" + str(self.ipv6_network.prefixlen) - -# Represents the current configuration of the network for the device bridge - -class NetworkConfig: - - # TODO: Let's get this from a configuration file - def __init__(self): - self.ipv4_network = ipaddress.ip_network('10.10.10.0/24') - self.ipv6_network = ipaddress.ip_network('fd10:77be:4186::/64') +#!/usr/bin/env python3 + +import binascii +import getpass +import ipaddress +import json +import os +from scapy.all import BOOTP +import shutil +import subprocess +import sys +import time +import threading +from threading import Timer +import docker +from docker.types import Mount +import logger +import util +from listener import Listener +from network_device import NetworkDevice +from network_event import NetworkEvent +from network_validator import NetworkValidator + +LOGGER = logger.get_logger("net_orc") +CONFIG_FILE = "conf/system.json" +EXAMPLE_CONFIG_FILE = "conf/system.json.example" +RUNTIME_DIR = "runtime/network" +NETWORK_MODULES_DIR = "network/modules" +NETWORK_MODULE_METADATA = "conf/module_config.json" +DEVICE_BRIDGE = "tr-d" +INTERNET_BRIDGE = "tr-c" +PRIVATE_DOCKER_NET = "tr-private-net" +CONTAINER_NAME = "network_orchestrator" + +RUNTIME_KEY = "runtime" +MONITOR_PERIOD_KEY = "monitor_period" +STARTUP_TIMEOUT_KEY = "startup_timeout" +DEFAULT_STARTUP_TIMEOUT = 60 +DEFAULT_RUNTIME = 1200 +DEFAULT_MONITOR_PERIOD = 300 + +RUNTIME = 1500 + + +class NetworkOrchestrator: + """Manage and controls a virtual testing network.""" + + def __init__(self, config_file=CONFIG_FILE, validate=True, async_monitor=False, single_intf = False): + + self._runtime = DEFAULT_RUNTIME + self._startup_timeout = DEFAULT_STARTUP_TIMEOUT + self._monitor_period = DEFAULT_MONITOR_PERIOD + + self._int_intf = None + self._dev_intf = None + self._single_intf = single_intf + + self.listener = None + + self._net_modules = [] + + self.validate = validate + + self.async_monitor = async_monitor + + self._path = os.path.dirname(os.path.dirname( + os.path.dirname(os.path.realpath(__file__)))) + + self.validator = NetworkValidator() + + shutil.rmtree(os.path.join(os.getcwd(), RUNTIME_DIR), ignore_errors=True) + + self.network_config = NetworkConfig() + + self.load_config(config_file) + + def start(self): + """Start the network orchestrator.""" + + LOGGER.info("Starting Network Orchestrator") + # Get all components ready + self.load_network_modules() + + # Restore the network first if required + self.stop(kill=True) + + self.start_network() + + if self.async_monitor: + # Run the monitor method asynchronously to keep this method non-blocking + self._monitor_thread = threading.Thread( + target=self.monitor_network) + self._monitor_thread.daemon = True + self._monitor_thread.start() + else: + self.monitor_network() + + def start_network(self): + """Start the virtual testing network.""" + LOGGER.info("Starting network") + + self.build_network_modules() + self.create_net() + self.start_network_services() + + if self.validate: + # Start the validator after network is ready + self.validator.start() + + # Get network ready (via Network orchestrator) + LOGGER.info("Network is ready.") + + def stop(self, kill=False): + """Stop the network orchestrator.""" + self.stop_validator(kill=kill) + self.stop_network(kill=kill) + + def stop_validator(self, kill=False): + """Stop the network validator.""" + # Shutdown the validator + self.validator.stop(kill=kill) + + def stop_network(self, kill=False): + """Stop the virtual testing network.""" + # Shutdown network + self.stop_networking_services(kill=kill) + self.restore_net() + + def monitor_network(self): + # TODO: This time should be configurable (How long to hold before exiting, this could be infinite too) + time.sleep(RUNTIME) + + self.stop() + + def load_config(self,config_file=None): + if config_file is None: + # If not defined, use relative pathing to local file + self._config_file=os.path.join(self._path, CONFIG_FILE) + else: + # If defined, use as provided + self._config_file=config_file + + if not os.path.isfile(self._config_file): + LOGGER.error("Configuration file is not present at " + config_file) + LOGGER.info("An example is present in " + EXAMPLE_CONFIG_FILE) + sys.exit(1) + + LOGGER.info("Loading config file: " + os.path.abspath(self._config_file)) + with open(self._config_file, encoding='UTF-8') as config_json_file: + config_json = json.load(config_json_file) + self.import_config(config_json) + + def _device_discovered(self, mac_addr): + + LOGGER.debug(f'Discovered device {mac_addr}. Waiting for device to obtain IP') + device = self._get_device(mac_addr=mac_addr) + + timeout = time.time() + self._startup_timeout + + while time.time() < timeout: + if device.ip_addr is None: + time.sleep(3) + else: + break + + if device.ip_addr is None: + LOGGER.info(f"Timed out whilst waiting for {mac_addr} to obtain an IP address") + return + + LOGGER.info(f"Device with mac addr {device.mac_addr} has obtained IP address {device.ip_addr}") + + self._start_device_monitor(device) + + def _dhcp_lease_ack(self, packet): + mac_addr = packet[BOOTP].chaddr.hex(":")[0:17] + device = self._get_device(mac_addr=mac_addr) + device.ip_addr = packet[BOOTP].yiaddr + + def _start_device_monitor(self, device): + """Start a timer until the steady state has been reached and + callback the steady state method for this device.""" + LOGGER.info(f"Monitoring device with mac addr {device.mac_addr} for {str(self._monitor_period)} seconds") + timer = Timer(self._monitor_period, + self.listener.call_callback, + args=(NetworkEvent.DEVICE_STABLE, device.mac_addr,)) + timer.start() + + def _get_device(self, mac_addr): + for device in self._devices: + if device.mac_addr == mac_addr: + return device + device = NetworkDevice(mac_addr=mac_addr) + self._devices.append(device) + return device + + def import_config(self, json_config): + self._int_intf = json_config['network']['internet_intf'] + self._dev_intf = json_config['network']['device_intf'] + + if RUNTIME_KEY in json_config: + self._runtime = json_config[RUNTIME_KEY] + if STARTUP_TIMEOUT_KEY in json_config: + self._startup_timeout = json_config[STARTUP_TIMEOUT_KEY] + if MONITOR_PERIOD_KEY in json_config: + self._monitor_period = json_config[MONITOR_PERIOD_KEY] + + def _check_network_services(self): + LOGGER.debug("Checking network modules...") + for net_module in self._net_modules: + if net_module.enable_container: + LOGGER.debug("Checking network module: " + + net_module.display_name) + success = self._ping(net_module) + if success: + LOGGER.debug(net_module.display_name + + " responded succesfully: " + str(success)) + else: + LOGGER.error(net_module.display_name + + " failed to respond to ping") + + def _ping(self, net_module): + host = net_module.net_config.ipv4_address + namespace = "tr-ctns-" + net_module.dir_name + cmd = "ip netns exec " + namespace + " ping -c 1 " + str(host) + success = util.run_command(cmd, output=False) + return success + + def _create_private_net(self): + client = docker.from_env() + try: + network = client.networks.get(PRIVATE_DOCKER_NET) + network.remove() + except docker.errors.NotFound: + pass + + # TODO: These should be made into variables + ipam_pool = docker.types.IPAMPool( + subnet='100.100.0.0/16', + iprange='100.100.100.0/24' + ) + + ipam_config = docker.types.IPAMConfig( + pool_configs=[ipam_pool] + ) + + client.networks.create( + PRIVATE_DOCKER_NET, + ipam=ipam_config, + internal=True, + check_duplicate=True, + driver="macvlan" + ) + + def _ci_pre_network_create(self): + """ Stores network properties to restore network after + network creation and flushes internet interface + """ + + self._ethmac = subprocess.check_output( + f"cat /sys/class/net/{self._int_intf}/address", shell=True).decode("utf-8").strip() + self._gateway = subprocess.check_output( + "ip route | head -n 1 | awk '{print $3}'", shell=True).decode("utf-8").strip() + self._ipv4 = subprocess.check_output( + f"ip a show {self._int_intf} | grep \"inet \" | awk '{{print $2}}'", shell=True).decode("utf-8").strip() + self._ipv6 = subprocess.check_output( + f"ip a show {self._int_intf} | grep inet6 | awk '{{print $2}}'", shell=True).decode("utf-8").strip() + self._brd = subprocess.check_output( + f"ip a show {self._int_intf} | grep \"inet \" | awk '{{print $4}}'", shell=True).decode("utf-8").strip() + + def _ci_post_network_create(self): + """ Restore network connection in CI environment """ + LOGGER.info("post cr") + util.run_command(f"ip address del {self._ipv4} dev {self._int_intf}") + util.run_command(f"ip -6 address del {self._ipv6} dev {self._int_intf}") + util.run_command(f"ip link set dev {self._int_intf} address 00:B0:D0:63:C2:26") + util.run_command(f"ip addr flush dev {self._int_intf}") + util.run_command(f"ip addr add dev {self._int_intf} 0.0.0.0") + util.run_command(f"ip addr add dev {INTERNET_BRIDGE} {self._ipv4} broadcast {self._brd}") + util.run_command(f"ip -6 addr add {self._ipv6} dev {INTERNET_BRIDGE} ") + util.run_command(f"systemd-resolve --interface {INTERNET_BRIDGE} --set-dns 8.8.8.8") + util.run_command(f"ip link set dev {INTERNET_BRIDGE} up") + util.run_command(f"dhclient {INTERNET_BRIDGE}") + util.run_command(f"ip route del default via 10.1.0.1") + util.run_command(f"ip route add default via {self._gateway} src {self._ipv4[:-3]} metric 100 dev {INTERNET_BRIDGE}") + + def create_net(self): + LOGGER.info("Creating baseline network") + + if not util.interface_exists(self._int_intf) or not util.interface_exists(self._dev_intf): + LOGGER.error("Configured interfaces are not ready for use. " + + "Ensure both interfaces are connected.") + sys.exit(1) + + if self._single_intf: + self._ci_pre_network_create() + + # Create data plane + util.run_command("ovs-vsctl add-br " + DEVICE_BRIDGE) + + # Create control plane + util.run_command("ovs-vsctl add-br " + INTERNET_BRIDGE) + + # Add external interfaces to data and control plane + util.run_command("ovs-vsctl add-port " + + DEVICE_BRIDGE + " " + self._dev_intf) + util.run_command("ovs-vsctl add-port " + + INTERNET_BRIDGE + " " + self._int_intf) + + # Enable forwarding of eapol packets + util.run_command("ovs-ofctl add-flow " + DEVICE_BRIDGE + + " 'table=0, dl_dst=01:80:c2:00:00:03, actions=flood'") + + # Remove IP from internet adapter + util.run_command("ifconfig " + self._int_intf + " 0.0.0.0") + + # Set ports up + util.run_command("ip link set dev " + DEVICE_BRIDGE + " up") + util.run_command("ip link set dev " + INTERNET_BRIDGE + " up") + + if self._single_intf: + self._ci_post_network_create() + + self._create_private_net() + + self.listener = Listener(self._dev_intf) + self.listener.register_callback(self._device_discovered, [ + NetworkEvent.DEVICE_DISCOVERED]) + self.listener.register_callback( + self._dhcp_lease_ack, [NetworkEvent.DHCP_LEASE_ACK]) + self.listener.start_listener() + + def load_network_modules(self): + """Load network modules from module_config.json.""" + LOGGER.debug("Loading network modules from /" + NETWORK_MODULES_DIR) + + loaded_modules = "Loaded the following network modules: " + net_modules_dir = os.path.join(self._path, NETWORK_MODULES_DIR) + + for module_dir in os.listdir(net_modules_dir): + + if self._get_network_module(module_dir) is None: + loaded_module = self._load_network_module(module_dir) + loaded_modules += loaded_module.dir_name + " " + + LOGGER.info(loaded_modules) + + def _load_network_module(self, module_dir): + + net_modules_dir = os.path.join(self._path, NETWORK_MODULES_DIR) + + net_module = NetworkModule() + + # Load basic module information + net_module_json = json.load(open(os.path.join( + self._path, net_modules_dir, module_dir, NETWORK_MODULE_METADATA), encoding='UTF-8')) + + net_module.name = net_module_json['config']['meta']['name'] + net_module.display_name = net_module_json['config']['meta']['display_name'] + net_module.description = net_module_json['config']['meta']['description'] + net_module.dir = os.path.join( + self._path, net_modules_dir, module_dir) + net_module.dir_name = module_dir + net_module.build_file = module_dir + ".Dockerfile" + net_module.container_name = "tr-ct-" + net_module.dir_name + net_module.image_name = "test-run/" + net_module.dir_name + + # Attach folder mounts to network module + if "docker" in net_module_json['config']: + + if "mounts" in net_module_json['config']['docker']: + for mount_point in net_module_json['config']['docker']['mounts']: + net_module.mounts.append(Mount( + target=mount_point['target'], + source=os.path.join( + os.getcwd(), mount_point['source']), + type='bind' + )) + + if "depends_on" in net_module_json['config']['docker']: + depends_on_module = net_module_json['config']['docker']['depends_on'] + if self._get_network_module(depends_on_module) is None: + self._load_network_module(depends_on_module) + + # Determine if this is a container or just an image/template + if "enable_container" in net_module_json['config']['docker']: + net_module.enable_container = net_module_json['config']['docker']['enable_container'] + + # Load network service networking configuration + if net_module.enable_container: + + net_module.net_config.enable_wan = net_module_json['config']['network']['enable_wan'] + net_module.net_config.ip_index = net_module_json['config']['network']['ip_index'] + + net_module.net_config.host = False if not "host" in net_module_json[ + 'config']['network'] else net_module_json['config']['network']['host'] + + net_module.net_config.ipv4_address = self.network_config.ipv4_network[ + net_module.net_config.ip_index] + net_module.net_config.ipv4_network = self.network_config.ipv4_network + + net_module.net_config.ipv6_address = self.network_config.ipv6_network[ + net_module.net_config.ip_index] + net_module.net_config.ipv6_network = self.network_config.ipv6_network + + self._net_modules.append(net_module) + return net_module + + def build_network_modules(self): + LOGGER.info("Building network modules...") + for net_module in self._net_modules: + self._build_module(net_module) + + def _build_module(self, net_module): + LOGGER.debug("Building network module " + net_module.dir_name) + client = docker.from_env() + client.images.build( + dockerfile=os.path.join(net_module.dir, net_module.build_file), + path=self._path, + forcerm=True, + tag="test-run/" + net_module.dir_name + ) + + def _get_network_module(self, name): + for net_module in self._net_modules: + if name == net_module.display_name or name == net_module.name or name == net_module.dir_name: + return net_module + return None + + # Start the OVS network module + # This should always be called before loading all + # other modules to allow for a properly setup base + # network + def _start_ovs_module(self): + self._start_network_service(self._get_network_module("OVS")) + + def _start_network_service(self, net_module): + + LOGGER.debug("Starting net service " + net_module.display_name) + network = "host" if net_module.net_config.host else PRIVATE_DOCKER_NET + LOGGER.debug(f"""Network: {network}, image name: {net_module.image_name}, + container name: {net_module.container_name}""") + try: + client = docker.from_env() + net_module.container = client.containers.run( + net_module.image_name, + auto_remove=True, + cap_add=["NET_ADMIN"], + name=net_module.container_name, + hostname=net_module.container_name, + network=PRIVATE_DOCKER_NET, + privileged=True, + detach=True, + mounts=net_module.mounts, + environment={"HOST_USER": getpass.getuser()} + ) + except docker.errors.ContainerError as error: + LOGGER.error("Container run error") + LOGGER.error(error) + + if network != "host": + self._attach_service_to_network(net_module) + + def _stop_service_module(self, net_module, kill=False): + LOGGER.debug("Stopping Service container " + net_module.container_name) + try: + container = self._get_service_container(net_module) + if container is not None: + if kill: + LOGGER.debug("Killing container:" + + net_module.container_name) + container.kill() + else: + LOGGER.debug("Stopping container:" + + net_module.container_name) + container.stop() + LOGGER.debug("Container stopped:" + net_module.container_name) + except Exception as error: + LOGGER.error("Container stop error") + LOGGER.error(error) + + def _get_service_container(self, net_module): + LOGGER.debug("Resolving service container: " + + net_module.container_name) + container = None + try: + client = docker.from_env() + container = client.containers.get(net_module.container_name) + except docker.errors.NotFound: + LOGGER.debug("Container " + + net_module.container_name + " not found") + except Exception as e: + LOGGER.error("Failed to resolve container") + LOGGER.error(e) + return container + + def stop_networking_services(self, kill=False): + LOGGER.info("Stopping network services") + for net_module in self._net_modules: + # Network modules may just be Docker images, so we do not want to stop them + if not net_module.enable_container: + continue + self._stop_service_module(net_module, kill) + + def start_network_services(self): + LOGGER.info("Starting network services") + + os.makedirs(os.path.join(os.getcwd(), RUNTIME_DIR), exist_ok=True) + + for net_module in self._net_modules: + + # TODO: There should be a better way of doing this + # Do not try starting OVS module again, as it should already be running + if "OVS" != net_module.display_name: + + # Network modules may just be Docker images, so we do not want to start them as containers + if not net_module.enable_container: + continue + + self._start_network_service(net_module) + + LOGGER.info("All network services are running") + self._check_network_services() + + def _attach_test_module_to_network(self, test_module): + LOGGER.debug("Attaching test module " + + test_module.display_name + " to device bridge") + + # Device bridge interface example: tr-di-baseline-test (Test Run Device Interface for baseline test container) + bridge_intf = DEVICE_BRIDGE + "i-" + test_module.dir_name + "-test" + + # Container interface example: tr-cti-baseline-test (Test Run Container Interface for baseline test container) + container_intf = "tr-test-" + test_module.dir_name + + # Container network namespace name + container_net_ns = "tr-test-" + test_module.dir_name + + # Create interface pair + util.run_command("ip link add " + bridge_intf + + " type veth peer name " + container_intf) + + # Add bridge interface to device bridge + util.run_command("ovs-vsctl add-port " + + DEVICE_BRIDGE + " " + bridge_intf) + + # Get PID for running container + # TODO: Some error checking around missing PIDs might be required + container_pid = util.run_command( + "docker inspect -f {{.State.Pid}} " + test_module.container_name)[0] + + # Create symlink for container network namespace + util.run_command("ln -sf /proc/" + container_pid + + "/ns/net /var/run/netns/" + container_net_ns) + + # Attach container interface to container network namespace + util.run_command("ip link set " + container_intf + + " netns " + container_net_ns) + + # Rename container interface name to veth0 + util.run_command("ip netns exec " + container_net_ns + + " ip link set dev " + container_intf + " name veth0") + + # Set MAC address of container interface + util.run_command("ip netns exec " + container_net_ns + " ip link set dev veth0 address 9a:02:57:1e:8f:" + str(test_module.ip_index)) + + # Set IP address of container interface + ipv4_address = self.network_config.ipv4_network[test_module.ip_index] + ipv6_address = self.network_config.ipv6_network[test_module.ip_index] + + ipv4_address_with_prefix=str(ipv4_address) + "/" + str(self.network_config.ipv4_network.prefixlen) + ipv6_address_with_prefix=str(ipv6_address) + "/" + str(self.network_config.ipv6_network.prefixlen) + + util.run_command("ip netns exec " + container_net_ns + " ip addr add " + + ipv4_address_with_prefix + " dev veth0") + + util.run_command("ip netns exec " + container_net_ns + " ip addr add " + + ipv6_address_with_prefix + " dev veth0") + + + # Set interfaces up + util.run_command("ip link set dev " + bridge_intf + " up") + util.run_command("ip netns exec " + container_net_ns + + " ip link set dev veth0 up") + + # TODO: Let's move this into a separate script? It does not look great + def _attach_service_to_network(self, net_module): + LOGGER.debug("Attaching net service " + + net_module.display_name + " to device bridge") + + # Device bridge interface example: tr-di-dhcp (Test Run Device Interface for DHCP container) + bridge_intf = DEVICE_BRIDGE + "i-" + net_module.dir_name + + # Container interface example: tr-cti-dhcp (Test Run Container Interface for DHCP container) + container_intf = "tr-cti-" + net_module.dir_name + + # Container network namespace name + container_net_ns = "tr-ctns-" + net_module.dir_name + + # Create interface pair + util.run_command("ip link add " + bridge_intf + + " type veth peer name " + container_intf) + + # Add bridge interface to device bridge + util.run_command("ovs-vsctl add-port " + + DEVICE_BRIDGE + " " + bridge_intf) + + # Get PID for running container + # TODO: Some error checking around missing PIDs might be required + container_pid = util.run_command( + "docker inspect -f {{.State.Pid}} " + net_module.container_name)[0] + + # Create symlink for container network namespace + util.run_command("ln -sf /proc/" + container_pid + + "/ns/net /var/run/netns/" + container_net_ns) + + # Attach container interface to container network namespace + util.run_command("ip link set " + container_intf + + " netns " + container_net_ns) + + # Rename container interface name to veth0 + util.run_command("ip netns exec " + container_net_ns + + " ip link set dev " + container_intf + " name veth0") + + # Set MAC address of container interface + util.run_command("ip netns exec " + container_net_ns + " ip link set dev veth0 address 9a:02:57:1e:8f:" + str(net_module.net_config.ip_index)) + + # Set IP address of container interface + util.run_command("ip netns exec " + container_net_ns + " ip addr add " + + net_module.net_config.get_ipv4_addr_with_prefix() + " dev veth0") + + util.run_command("ip netns exec " + container_net_ns + " ip addr add " + + net_module.net_config.get_ipv6_addr_with_prefix() + " dev veth0") + + # Set interfaces up + util.run_command("ip link set dev " + bridge_intf + " up") + util.run_command("ip netns exec " + container_net_ns + + " ip link set dev veth0 up") + + if net_module.net_config.enable_wan: + LOGGER.debug("Attaching net service " + + net_module.display_name + " to internet bridge") + + # Internet bridge interface example: tr-ci-dhcp (Test Run Control (Internet) Interface for DHCP container) + bridge_intf = INTERNET_BRIDGE + "i-" + net_module.dir_name + + # Container interface example: tr-cti-dhcp (Test Run Container Interface for DHCP container) + container_intf = "tr-cti-" + net_module.dir_name + + # Create interface pair + util.run_command("ip link add " + bridge_intf + + " type veth peer name " + container_intf) + + # Attach bridge interface to internet bridge + util.run_command("ovs-vsctl add-port " + + INTERNET_BRIDGE + " " + bridge_intf) + + # Attach container interface to container network namespace + util.run_command("ip link set " + container_intf + + " netns " + container_net_ns) + + # Rename container interface name to eth1 + util.run_command("ip netns exec " + container_net_ns + + " ip link set dev " + container_intf + " name eth1") + + # Set MAC address of container interface + util.run_command("ip netns exec " + container_net_ns + + " ip link set dev eth1 address 9a:02:57:1e:8f:0" + str(net_module.net_config.ip_index)) + + # Set interfaces up + util.run_command("ip link set dev " + bridge_intf + " up") + util.run_command("ip netns exec " + + container_net_ns + " ip link set dev eth1 up") + + def restore_net(self): + + LOGGER.info("Clearing baseline network") + + if hasattr(self, 'listener') and self.listener is not None and self.listener.is_running(): + self.listener.stop_listener() + + client = docker.from_env() + + # Stop all network containers if still running + for net_module in self._net_modules: + try: + container = client.containers.get( + "tr-ct-" + net_module.dir_name) + container.kill() + except Exception: + continue + + # Delete data plane + util.run_command("ovs-vsctl --if-exists del-br tr-d") + + # Delete control plane + util.run_command("ovs-vsctl --if-exists del-br tr-c") + + # Restart internet interface + if util.interface_exists(self._int_intf): + util.run_command("ip link set " + self._int_intf + " down") + util.run_command("ip link set " + self._int_intf + " up") + + LOGGER.info("Network is restored") + +class NetworkModule: + + def __init__(self): + self.name = None + self.display_name = None + self.description = None + + self.container = None + self.container_name = None + self.image_name = None + + # Absolute path + self.dir = None + self.dir_name = None + self.build_file = None + self.mounts = [] + + self.enable_container = True + + self.net_config = NetworkModuleNetConfig() + +# The networking configuration for a network module + +class NetworkModuleNetConfig: + + def __init__(self): + + self.enable_wan = False + + self.ip_index = 0 + self.ipv4_address = None + self.ipv4_network = None + self.ipv6_address = None + self.ipv6_network = None + + self.host = False + + def get_ipv4_addr_with_prefix(self): + return format(self.ipv4_address) + "/" + str(self.ipv4_network.prefixlen) + + def get_ipv6_addr_with_prefix(self): + return format(self.ipv6_address) + "/" + str(self.ipv6_network.prefixlen) + +# Represents the current configuration of the network for the device bridge + +class NetworkConfig: + + # TODO: Let's get this from a configuration file + def __init__(self): + self.ipv4_network = ipaddress.ip_network('10.10.10.0/24') + self.ipv6_network = ipaddress.ip_network('fd10:77be:4186::/64') diff --git a/resources/devices/Template/device_config.json b/resources/devices/Template/device_config.json index f8b56b7a3..7a3d4441c 100644 --- a/resources/devices/Template/device_config.json +++ b/resources/devices/Template/device_config.json @@ -27,6 +27,121 @@ "enabled": true } } + }, + "nmap": { + "enabled": true, + "tests": { + "security.nmap.ports": { + "enabled": true, + "security.services.ftp": { + "tcp_ports": { + "20": { + "allowed": false + }, + "21": { + "allowed": false + } + } + }, + "security.services.ssh": { + "tcp_ports": { + "22": { + "allowed": true + } + } + }, + "security.services.telnet": { + "tcp_ports": { + "23": { + "allowed": false + } + } + }, + "security.services.smtp": { + "tcp_ports": { + "25": { + "allowed": false + }, + "465": { + "allowed": false + }, + "587": { + "allowed": false + } + } + }, + "security.services.http": { + "tcp_ports": { + "80": { + "allowed": false + } + } + }, + "security.services.pop": { + "tcp_ports": { + "110": { + "allowed": false + } + } + }, + "security.services.imap": { + "tcp_ports": { + "143": { + "allowed": false + } + } + }, + "security.services.snmpv3": { + "tcp_ports": { + "161": { + "allowed": false + }, + "162": { + "allowed": false + } + }, + "udp_ports": { + "161": { + "allowed": false + }, + "162": { + "allowed": false + } + } + }, + "security.services.https": { + "tcp_ports": { + "80": { + "allowed": false + } + } + }, + "security.services.vnc": { + "tcp_ports": { + "5500": { + "allowed": false + }, + "5800": { + "allowed": false + } + } + }, + "security.services.tftp": { + "udp_ports": { + "69": { + "allowed": false + } + } + }, + "security.services.ntp": { + "udp_ports": { + "123": { + "allowed": false + } + } + } + } + } } } } \ No newline at end of file diff --git a/test_orc/modules/base/base.Dockerfile b/test_orc/modules/base/base.Dockerfile index b5f35326a..a508caef7 100644 --- a/test_orc/modules/base/base.Dockerfile +++ b/test_orc/modules/base/base.Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:jammy # Install common software -RUN apt-get update && apt-get install -y net-tools iputils-ping tcpdump iproute2 jq python3 python3-pip dos2unix +RUN apt-get update && apt-get install -y net-tools iputils-ping tcpdump iproute2 jq python3 python3-pip dos2unix nmap --fix-missing # Setup the base python requirements COPY modules/base/python /testrun/python diff --git a/test_orc/modules/base/bin/get_ipv4_addr b/test_orc/modules/base/bin/get_ipv4_addr new file mode 100644 index 000000000..09a19bc13 --- /dev/null +++ b/test_orc/modules/base/bin/get_ipv4_addr @@ -0,0 +1,8 @@ +#!/bin/bash + +NET=$1 +MAC=$2 + +IP_ADDR=$(nmap -sP $NET | grep -B 2 $MAC | head -n 1 | cut -d " " -f 5) + +echo $IP_ADDR \ No newline at end of file diff --git a/test_orc/modules/base/python/src/test_module.py b/test_orc/modules/base/python/src/test_module.py index 6f7f48c3a..9a348faa7 100644 --- a/test_orc/modules/base/python/src/test_module.py +++ b/test_orc/modules/base/python/src/test_module.py @@ -1,6 +1,7 @@ import json import logger import os +import util LOGGER = None RESULTS_DIR = "/runtime/output/" @@ -12,8 +13,12 @@ class TestModule: def __init__(self, module_name, log_name): self._module_name = module_name self._device_mac = os.environ['DEVICE_MAC'] + self._ipv4_subnet = os.environ['IPV4_SUBNET'] + self._ipv6_subnet = os.environ['IPV6_SUBNET'] self._add_logger(log_name=log_name, module_name=module_name) self._config = self._read_config() + self._device_ipv4_addr = None + self._device_ipv6_addr = None def _add_logger(self, log_name, module_name): global LOGGER @@ -34,8 +39,11 @@ def _get_device_tests(self, device_test_module): return [] else: for test in module_tests: + # Resolve device specific configurations for the test if it exists + # and update module test config with device config options if test["name"] in device_test_module["tests"]: - test["enabled"] = device_test_module["tests"][test["name"]]["enabled"] + dev_test_config = device_test_module["tests"][test["name"]] + test["config"].update(dev_test_config) return module_tests def _get_device_test_module(self): @@ -45,8 +53,10 @@ def _get_device_test_module(self): return None def run_tests(self): + if self._config["config"]["network"]: + self._device_ipv4_addr = self._get_device_ipv4() + LOGGER.info("Device IP Resolved: " + str(self._device_ipv4_addr)) tests = self._get_tests() - device_modules = os.environ['DEVICE_TEST_MODULES'] for test in tests: test_method_name = "_" + test["name"].replace(".", "_") result = None @@ -55,7 +65,11 @@ def run_tests(self): # Resolve the correct python method by test name and run test if hasattr(self, test_method_name): - result = getattr(self, test_method_name)() + if "config" in test: + result = getattr(self, test_method_name)( + config=test["config"]) + else: + result = getattr(self, test_method_name)() else: LOGGER.info("Test " + test["name"] + " not resolved. Skipping") @@ -82,3 +96,11 @@ def _write_results(self, results): f = open(results_file, "w", encoding="utf-8") f.write(results) f.close() + + def _get_device_ipv4(self): + command = '/testrun/bin/get_ipv4_addr {} {}'.format( + self._ipv4_subnet, self._device_mac.upper()) + text, err = util.run_command(command) + if text: + return text.split("\n")[0] + return None diff --git a/test_orc/modules/base/python/src/util.py b/test_orc/modules/base/python/src/util.py new file mode 100644 index 000000000..a2dcfbdb1 --- /dev/null +++ b/test_orc/modules/base/python/src/util.py @@ -0,0 +1,25 @@ +import subprocess +import shlex +import logger + +# Runs a process at the os level +# By default, returns the standard output and error output +# If the caller sets optional output parameter to False, +# will only return a boolean result indicating if it was +# succesful in running the command. Failure is indicated +# by any return code from the process other than zero. +def run_command(cmd, output=True): + success = False + LOGGER = logger.get_logger('util') + process = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = process.communicate() + if process.returncode !=0 and output: + err_msg = "%s. Code: %s" % (stderr.strip(), process.returncode) + LOGGER.error("Command Failed: " + cmd) + LOGGER.error("Error: " + err_msg) + else: + success = True + if output: + return stdout.strip().decode('utf-8'), stderr + else: + return success diff --git a/test_orc/modules/nmap/bin/start_test_module b/test_orc/modules/nmap/bin/start_test_module new file mode 100644 index 000000000..4bb7e9f96 --- /dev/null +++ b/test_orc/modules/nmap/bin/start_test_module @@ -0,0 +1,42 @@ +#!/bin/bash + +# An example startup script that does the bare minimum to start +# a test module via a pyhon script. Each test module should include a +# start_test_module file that overwrites this one to boot all of its +# specific requirements to run. + +# Define where the python source files are located +PYTHON_SRC_DIR=/testrun/python/src + +# Fetch module name +MODULE_NAME=$1 + +# Default interface should be veth0 for all containers +DEFAULT_IFACE=veth0 + +# Allow a user to define an interface by passing it into this script +DEFINED_IFACE=$2 + +# Select which interace to use +if [[ -z $DEFINED_IFACE || "$DEFINED_IFACE" == "null" ]] +then + echo "No interface defined, defaulting to veth0" + INTF=$DEFAULT_IFACE +else + INTF=$DEFINED_IFACE +fi + +# Create and set permissions on the log files +LOG_FILE=/runtime/output/$MODULE_NAME.log +RESULT_FILE=/runtime/output/$MODULE_NAME-result.json +touch $LOG_FILE +touch $RESULT_FILE +chown $HOST_USER:$HOST_USER $LOG_FILE +chown $HOST_USER:$HOST_USER $RESULT_FILE + +# Run the python scrip that will execute the tests for this module +# -u flag allows python print statements +# to be logged by docker by running unbuffered +python3 -u $PYTHON_SRC_DIR/run.py "-m $MODULE_NAME" + +echo Module has finished \ No newline at end of file diff --git a/test_orc/modules/nmap/conf/module_config.json b/test_orc/modules/nmap/conf/module_config.json new file mode 100644 index 000000000..5449327a1 --- /dev/null +++ b/test_orc/modules/nmap/conf/module_config.json @@ -0,0 +1,176 @@ +{ + "config": { + "meta": { + "name": "nmap", + "display_name": "nmap", + "description": "Scan for open ports using nmap" + }, + "network": true, + "docker": { + "enable_container": true, + "timeout": 600 + }, + "tests": [ + { + "name": "security.nmap.ports", + "description": "Run an nmap scan of open ports", + "expected_behavior": "Report all open ports", + "config": { + "security.services.ftp": { + "tcp_ports": { + "20": { + "allowed": false, + "description": "File Transfer Protocol (FTP) Server Data Transfer" + }, + "21": { + "allowed": false, + "description": "File Transfer Protocol (FTP) Server Data Transfer" + } + }, + "description": "Check FTP port 20/21 is disabled and FTP is not running on any port", + "expected_behavior": "There is no FTP service running on any port" + }, + "security.services.ssh": { + "tcp_ports": { + "22": { + "allowed": true, + "description": "Secure Shell (SSH) server" + } + }, + "description": "Check TELNET port 23 is disabled and TELNET is not running on any port", + "expected_behavior": "There is no FTP service running on any port" + }, + "security.services.telnet": { + "tcp_ports": { + "23": { + "allowed": false, + "description": "Telnet Server" + } + }, + "description": "Check TELNET port 23 is disabled and TELNET is not running on any port", + "expected_behavior": "There is no FTP service running on any port" + }, + "security.services.smtp": { + "tcp_ports": { + "25": { + "allowed": false, + "description": "Simple Mail Transfer Protocol (SMTP) Server" + }, + "465": { + "allowed": false, + "description": "Simple Mail Transfer Protocol over SSL (SMTPS) Server" + }, + "587": { + "allowed": false, + "description": "Simple Mail Transfer Protocol via TLS (SMTPS) Server" + } + }, + "description": "Check SMTP port 25 is disabled and ports 465 or 587 with SSL encryption are (not?) enabled and SMTP is not running on any port.", + "expected_behavior": "There is no smtp service running on any port" + }, + "security.services.http": { + "tcp_ports": { + "80": { + "service_scan": { + "script": "http-methods" + }, + "allowed": false, + "description": "Administrative Insecure Web-Server" + } + }, + "description": "Check that there is no HTTP server running on any port", + "expected_behavior": "Device is unreachable on port 80 (or any other port) and only responds to HTTPS requests on port 443 (or any other port if HTTP is used at all)" + }, + "security.services.pop": { + "tcp_ports": { + "110": { + "allowed": false, + "description": "Post Office Protocol v3 (POP3) Server" + } + }, + "description": "Check POP port 110 is disalbed and POP is not running on any port", + "expected_behavior": "There is no pop service running on any port" + }, + "security.services.imap": { + "tcp_ports": { + "143": { + "allowed": false, + "description": "Internet Message Access Protocol (IMAP) Server" + } + }, + "description": "Check IMAP port 143 is disabled and IMAP is not running on any port", + "expected_behavior": "There is no imap service running on any port" + }, + "security.services.snmpv3": { + "tcp_ports": { + "161": { + "allowed": false, + "description": "Simple Network Management Protocol (SNMP)" + }, + "162": { + "allowed": false, + "description": "Simple Network Management Protocol (SNMP) Trap" + } + }, + "udp_ports": { + "161": { + "allowed": false, + "description": "Simple Network Management Protocol (SNMP)" + }, + "162": { + "allowed": false, + "description": "Simple Network Management Protocol (SNMP) Trap" + } + }, + "description": "Check SNMP port 161/162 is disabled. If SNMP is an essential service, check it supports version 3", + "expected_behavior": "Device is unreachable on port 161 (or any other port) and device is unreachable on port 162 (or any other port) unless SNMP is essential in which case it is SNMPv3 is used." + }, + "security.services.https": { + "tcp_ports": { + "80": { + "allowed": false, + "description": "Administrative Secure Web-Server" + } + }, + "description": "Check that if there is a web server running it is running on a secure port.", + "expected_behavior": "Device only responds to HTTPS requests on port 443 (or any other port if HTTP is used at all)" + }, + "security.services.vnc": { + "tcp_ports": { + "5800": { + "allowed": false, + "description": "Virtual Network Computing (VNC) Remote Frame Buffer Protocol Over HTTP" + }, + "5500": { + "allowed": false, + "description": "Virtual Network Computing (VNC) Remote Frame Buffer Protocol" + } + }, + "description": "Check VNC is disabled on any port", + "expected_behavior": "Device cannot be accessed /connected to via VNc on any port" + }, + "security.services.tftp": { + "udp_ports": { + "69": { + "allowed": false, + "description": "Trivial File Transfer Protocol (TFTP) Server" + } + }, + "description": "Check TFTP port 69 is disabled (UDP)", + "expected_behavior": "There is no tftp service running on any port" + }, + "security.services.ntp": { + "udp_ports": { + "123": { + "allowed": false, + "description": "Network Time Protocol (NTP) Server" + } + }, + "description": "Check NTP port 123 is disabled and the device is not operating as an NTP server", + "expected_behavior": "The device dos not respond to NTP requests when it's IP is set as the NTP server on another device" + } + } + } + ] + } +} \ No newline at end of file diff --git a/test_orc/modules/nmap/nmap.Dockerfile b/test_orc/modules/nmap/nmap.Dockerfile new file mode 100644 index 000000000..12f23dde7 --- /dev/null +++ b/test_orc/modules/nmap/nmap.Dockerfile @@ -0,0 +1,11 @@ +# Image name: test-run/baseline-test +FROM test-run/base-test:latest + +# Copy over all configuration files +COPY modules/nmap/conf /testrun/conf + +# Load device binary files +COPY modules/nmap/bin /testrun/bin + +# Copy over all python files +COPY modules/nmap/python /testrun/python \ No newline at end of file diff --git a/test_orc/modules/nmap/python/src/nmap_module.py b/test_orc/modules/nmap/python/src/nmap_module.py new file mode 100644 index 000000000..7d5bd3604 --- /dev/null +++ b/test_orc/modules/nmap/python/src/nmap_module.py @@ -0,0 +1,227 @@ +#!/usr/bin/env python3 + +import time +import util +import json +import threading +from test_module import TestModule + +LOG_NAME = "test_nmap" +LOGGER = None + + +class NmapModule(TestModule): + + def __init__(self, module): + super().__init__(module_name=module, log_name=LOG_NAME) + self._unallowed_ports = [] + self._scan_tcp_results = None + self._udp_tcp_results = None + self._script_scan_results = None + global LOGGER + LOGGER = self._get_logger() + + def _security_nmap_ports(self, config): + LOGGER.info( + "Running security.nmap.ports test") + + # Delete the enabled key from the config if it exists + # to prevent it being treated as a test key + if "enabled" in config: + del config["enabled"] + + if self._device_ipv4_addr is not None: + # Run the monitor method asynchronously to keep this method non-blocking + self._tcp_scan_thread = threading.Thread( + target=self._scan_tcp_ports, args=(config,)) + self._udp_scan_thread = threading.Thread( + target=self._scan_udp_ports, args=(config,)) + self._script_scan_thread = threading.Thread( + target=self._scan_scripts, args=(config,)) + + self._tcp_scan_thread.daemon = True + self._udp_scan_thread.daemon = True + self._script_scan_thread.daemon = True + + self._tcp_scan_thread.start() + self._udp_scan_thread.start() + self._script_scan_thread.start() + + while self._tcp_scan_thread.is_alive() or self._udp_scan_thread.is_alive() or self._script_scan_thread.is_alive(): + time.sleep(1) + + LOGGER.debug("TCP scan results: " + str(self._scan_tcp_results)) + LOGGER.debug("UDP scan results: " + str(self._scan_udp_results)) + LOGGER.debug("Service scan results: " + + str(self._script_scan_results)) + self._process_port_results( + tests=config) + LOGGER.info("Unallowed Ports: " + str(self._unallowed_ports)) + LOGGER.info("Script scan results:\n" + + json.dumps(self._script_scan_results)) + return len(self._unallowed_ports) == 0 + else: + LOGGER.info("Device ip address not resolved, skipping") + return None + + def _process_port_results(self, tests): + for test in tests: + LOGGER.info("Checking results for test: " + str(test)) + self._check_scan_results(test_config=tests[test]) + + def _check_scan_results(self, test_config): + port_config = {} + if "tcp_ports" in test_config: + port_config.update(test_config["tcp_ports"]) + elif "udp_ports" in test_config: + port_config.update(test_config["udp_ports"]) + + scan_results = {} + if self._scan_tcp_results is not None: + scan_results.update(self._scan_tcp_results) + if self._scan_udp_results is not None: + scan_results.update(self._scan_udp_results) + if self._script_scan_results is not None: + scan_results.update(self._script_scan_results) + if port_config is not None: + for port in port_config: + result = None + LOGGER.info("Checking port: " + str(port)) + LOGGER.debug("Port config: " + str(port_config[port])) + if port in scan_results: + if scan_results[port]["state"] == "open": + if not port_config[port]["allowed"]: + LOGGER.info("Unallowed port open") + self._unallowed_ports.append(str(port)) + result = False + else: + LOGGER.info("Allowed port open") + result = True + else: + LOGGER.info("Port is closed") + result = True + else: + LOGGER.info("Port not detected, closed") + result = True + + if result is not None: + port_config[port]["result"] = "compliant" if result else "non-compliant" + else: + port_config[port]["result"] = "skipped" + + def _scan_scripts(self, tests): + scan_results = {} + LOGGER.info("Checing for scan scripts") + for test in tests: + test_config = tests[test] + if "tcp_ports" in test_config: + for port in test_config["tcp_ports"]: + port_config = test_config["tcp_ports"][port] + if "service_scan" in port_config: + LOGGER.info("Service Scan Detected for: " + str(port)) + svc = port_config["service_scan"] + scan_results.update( + self._scan_tcp_with_script(svc["script"])) + if "udp_ports" in test_config: + for port in test_config["udp_ports"]: + if "service_scan" in port: + LOGGER.info("Service Scan Detected for: " + str(port)) + svc = port["service_scan"] + self._scan_udp_with_script(svc["script"], port) + scan_results.update( + self._scan_tcp_with_script(svc["script"])) + self._script_scan_results = scan_results + + def _scan_tcp_with_script(self, script_name, ports=None): + LOGGER.info("Running TCP nmap scan with script " + script_name) + scan_options = " -v -n T3 --host-timeout=6m -A --script " + script_name + port_options = " --open " + if ports is None: + port_options += " -p- " + else: + port_options += " -p" + ports + " " + results_file = "/runtime/output/" + self._module_name + "-"+script_name+".log" + nmap_options = scan_options + port_options + " -oG " + results_file + nmap_results, err = util.run_command( + "nmap " + nmap_options + " " + self._device_ipv4_addr) + LOGGER.info("Nmap TCP script scan complete") + LOGGER.info("nmap script results\n" + str(nmap_results)) + return self._process_nmap_results(nmap_results=nmap_results) + + def _scan_udp_with_script(self, script_name, ports=None): + LOGGER.info("Running UDP nmap scan with script " + script_name) + scan_options = " --sU -Pn -n --script " + script_name + port_options = " --open " + if ports is None: + port_options += " -p- " + else: + port_options += " -p" + ports + " " + nmap_options = scan_options + port_options + nmap_results, err = util.run_command( + "nmap " + nmap_options + self._device_ipv4_addr) + LOGGER.info("Nmap UDP script scan complete") + LOGGER.info("nmap script results\n" + str(nmap_results)) + return self._process_nmap_results(nmap_results=nmap_results) + + def _scan_tcp_ports(self, tests): + max_port = 1000 + ports = [] + for test in tests: + test_config = tests[test] + if "tcp_ports" in test_config: + for port in test_config["tcp_ports"]: + if int(port) > max_port: + ports.append(port) + ports_to_scan = "1-" + str(max_port) + if len(ports) > 0: + ports_to_scan += "," + ','.join(ports) + LOGGER.info("Running nmap TCP port scan") + LOGGER.info("TCP ports: " + str(ports_to_scan)) + nmap_results, err = util.run_command( + "nmap -sT -sV -Pn -v -p " + ports_to_scan + " --version-intensity 7 -T4 " + self._device_ipv4_addr) + LOGGER.info("TCP port scan complete") + self._scan_tcp_results = self._process_nmap_results( + nmap_results=nmap_results) + + def _scan_udp_ports(self, tests): + ports = [] + for test in tests: + test_config = tests[test] + if "udp_ports" in test_config: + for port in test_config["udp_ports"]: + ports.append(port) + if len(ports) > 0: + port_list = ','.join(ports) + LOGGER.info("Running nmap UDP port scan") + LOGGER.info("UDP ports: " + str(port_list)) + nmap_results, err = util.run_command( + "nmap -sU -sV -p " + port_list + " " + self._device_ipv4_addr) + LOGGER.info("UDP port scan complete") + self._scan_udp_results = self._process_nmap_results( + nmap_results=nmap_results) + + def _process_nmap_results(self, nmap_results): + results = {} + LOGGER.info("nmap results\n" + str(nmap_results)) + if nmap_results: + if "Service Info" in nmap_results: + rows = nmap_results.split("PORT")[1].split( + "Service Info")[0].split("\n") + elif "PORT" in nmap_results: + rows = nmap_results.split("PORT")[1].split( + "MAC Address")[0].split("\n") + if rows: + for result in rows[1:-1]: # Iterate skipping the header and tail rows + cols = result.split() + port = cols[0].split("/")[0] + # If results don't start with a a port number, it's likely a bleed over + # from previous result so we need to ignore it + if port.isdigit(): + version = "" + if len(cols) > 3: + # recombine full version information that may contain spaces + version = ' '.join(cols[3:]) + port_result = {cols[0].split( + "/")[0]: {"state": cols[1], "service": cols[2], "version": version}} + results.update(port_result) + return results diff --git a/test_orc/modules/nmap/python/src/run.py b/test_orc/modules/nmap/python/src/run.py new file mode 100644 index 000000000..4c8294769 --- /dev/null +++ b/test_orc/modules/nmap/python/src/run.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python3 + +import argparse +import signal +import sys +import logger + +from nmap_module import NmapModule + +LOGGER = logger.get_logger('test_module') + +class NmapModuleRunner: + + def __init__(self,module): + + signal.signal(signal.SIGINT, self._handler) + signal.signal(signal.SIGTERM, self._handler) + signal.signal(signal.SIGABRT, self._handler) + signal.signal(signal.SIGQUIT, self._handler) + + LOGGER.info("Starting nmap Module") + + self._test_module = NmapModule(module) + self._test_module.run_tests() + + def _handler(self, signum, *other): + LOGGER.debug("SigtermEnum: " + str(signal.SIGTERM)) + LOGGER.debug("Exit signal received: " + str(signum)) + if signum in (2, signal.SIGTERM): + LOGGER.info("Exit signal received. Stopping test module...") + LOGGER.info("Test module stopped") + sys.exit(1) + +def run(argv): + parser = argparse.ArgumentParser(description="Nmap Module Help", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + parser.add_argument( + "-m", "--module", help="Define the module name to be used to create the log file") + + args = parser.parse_args() + + # For some reason passing in the args from bash adds an extra + # space before the argument so we'll just strip out extra space + NmapModuleRunner(args.module.strip()) + +if __name__ == "__main__": + run(sys.argv) diff --git a/test_orc/python/src/module.py b/test_orc/python/src/module.py index 8121c34db..6b2f14f9d 100644 --- a/test_orc/python/src/module.py +++ b/test_orc/python/src/module.py @@ -15,9 +15,13 @@ class TestModule: # pylint: disable=too-few-public-methods,too-many-instance-att container_name: str = None image_name :str = None enable_container: bool = True + network: bool = True timeout: int = 60 # Absolute path dir: str = None dir_name: str = None + + #Set IP Index for all test modules + ip_index: str = 9 diff --git a/test_orc/python/src/test_orchestrator.py b/test_orc/python/src/test_orchestrator.py index c257cd901..08c855d9a 100644 --- a/test_orc/python/src/test_orchestrator.py +++ b/test_orc/python/src/test_orchestrator.py @@ -19,9 +19,10 @@ class TestOrchestrator: """Manages and controls the test modules.""" - def __init__(self): + def __init__(self,net_orc): self._test_modules = [] self._module_config = None + self._net_orc = net_orc self._path = os.path.dirname(os.path.dirname( os.path.dirname(os.path.realpath(__file__)))) @@ -90,7 +91,9 @@ def _run_test_module(self, module, device): environment={ "HOST_USER": getpass.getuser(), "DEVICE_MAC": device.mac_addr, - "DEVICE_TEST_MODULES": device.test_modules + "DEVICE_TEST_MODULES": device.test_modules, + "IPV4_SUBNET": self._net_orc.network_config.ipv4_network, + "IPV6_SUBNET": self._net_orc.network_config.ipv6_network } ) except (docker.errors.APIError, docker.errors.ContainerError) as container_error: @@ -98,6 +101,11 @@ def _run_test_module(self, module, device): LOGGER.debug(container_error) return + # Mount the test container to the virtual network if requried + if module.network: + LOGGER.info("Mounting test module to the network") + self._net_orc._attach_test_module_to_network(module) + # Determine the module timeout time test_module_timeout = time.time() + module.timeout status = self._get_module_status(module) From 07432ee1de1d2759b70d4771b2121913dc82714d Mon Sep 17 00:00:00 2001 From: Jacob Boddey Date: Wed, 17 May 2023 15:49:08 +0100 Subject: [PATCH 13/48] Fix device configs --- framework/device.py | 10 +- framework/testrun.py | 271 +++++++++--------- net_orc/python/src/network_orchestrator.py | 37 ++- .../modules/base/python/src/test_module.py | 8 +- test_orc/python/src/test_orchestrator.py | 4 +- 5 files changed, 168 insertions(+), 162 deletions(-) diff --git a/framework/device.py b/framework/device.py index 74d62d495..80cfb9c9c 100644 --- a/framework/device.py +++ b/framework/device.py @@ -6,9 +6,9 @@ @dataclass class Device(NetworkDevice): - """Represents a physical device and it's configuration.""" + """Represents a physical device and it's configuration.""" - make: str = None - model: str = None - mac_addr: str - test_modules: str = None + make: str = None + model: str = None + mac_addr: str + test_modules: str = None diff --git a/framework/testrun.py b/framework/testrun.py index 44c3bca6d..d5c70a9ca 100644 --- a/framework/testrun.py +++ b/framework/testrun.py @@ -46,142 +46,149 @@ class TestRun: # pylint: disable=too-few-public-methods - """Test Run controller. + """Test Run controller. - Creates an instance of the network orchestrator, test - orchestrator and user interface. - """ + Creates an instance of the network orchestrator, test + orchestrator and user interface. + """ - def __init__(self, config_file=CONFIG_FILE, validate=True, net_only=False, single_intf=False): - self._devices = [] - self._net_only = net_only - self._single_intf = single_intf + def __init__(self, + config_file=CONFIG_FILE, + validate=True, + net_only=False, + single_intf=False): + self._devices = [] + self._net_only = net_only + self._single_intf = single_intf - # Catch any exit signals - self._register_exits() + # Catch any exit signals + self._register_exits() - # Expand the config file to absolute pathing - config_file_abs = self._get_config_abs(config_file=config_file) + # Expand the config file to absolute pathing + config_file_abs = self._get_config_abs(config_file=config_file) - self._net_orc = net_orc.NetworkOrchestrator( - config_file=config_file_abs, - validate=validate, - async_monitor=not self._net_only, - single_intf = self._single_intf) - self._test_orc = test_orc.TestOrchestrator(self._net_orc) + self._net_orc = net_orc.NetworkOrchestrator( + config_file=config_file_abs, + validate=validate, + async_monitor=not self._net_only, + single_intf = self._single_intf) - def start(self): + self._test_orc = test_orc.TestOrchestrator(self._net_orc) + + def start(self): - self._load_all_devices() - - - if self._net_only: - LOGGER.info("Network only option configured, no tests will be run") - self._start_network() - else: - self._start_network() - self._test_orc.start() - - self._net_orc.listener.register_callback( - self._device_stable, - [NetworkEvent.DEVICE_STABLE] - ) - - LOGGER.info("Waiting for devices on the network...") - - # Check timeout and whether testing is currently in progress before stopping - time.sleep(RUNTIME) - - self.stop() - - def stop(self, kill=False): - self._stop_tests() - self._stop_network(kill=kill) - - def _register_exits(self): - signal.signal(signal.SIGINT, self._exit_handler) - signal.signal(signal.SIGTERM, self._exit_handler) - signal.signal(signal.SIGABRT, self._exit_handler) - signal.signal(signal.SIGQUIT, self._exit_handler) - - def _exit_handler(self, signum, arg): # pylint: disable=unused-argument - LOGGER.debug("Exit signal received: " + str(signum)) - if signum in (2, signal.SIGTERM): - LOGGER.info("Exit signal received.") - self.stop(kill=True) - sys.exit(1) - - def _get_config_abs(self, config_file=None): - if config_file is None: - # If not defined, use relative pathing to local file - config_file = os.path.join(parent_dir, CONFIG_FILE) - - # Expand the config file to absolute pathing - return os.path.abspath(config_file) - - def _start_network(self): - # Load in local device configs to the network orchestrator - self._net_orc._devices = self._devices - - # Start the network orchestrator - self._net_orc.start() - - def _run_tests(self, device): - """Iterate through and start all test modules.""" - - # To Do: Make this configurable - time.sleep(60) # Let device bootup - - self._test_orc._run_test_modules(device) - - def _stop_network(self, kill=False): - self._net_orc.stop(kill=kill) - - def _stop_tests(self): - self._test_orc.stop() - - def _load_all_devices(self): - self._load_devices(device_dir=LOCAL_DEVICES_DIR) - self._load_devices(device_dir=RESOURCE_DEVICES_DIR) - - def _load_devices(self, device_dir): - LOGGER.debug('Loading devices from ' + device_dir) - - os.makedirs(device_dir, exist_ok=True) - - for device_folder in os.listdir(device_dir): - with open(os.path.join(device_dir, device_folder, DEVICE_CONFIG), - encoding='utf-8') as device_config_file: - device_config_json = json.load(device_config_file) - - device_make = device_config_json.get(DEVICE_MAKE) - device_model = device_config_json.get(DEVICE_MODEL) - mac_addr = device_config_json.get(DEVICE_MAC_ADDR) - test_modules = device_config_json.get(DEVICE_TEST_MODULES) - - device = Device(make=device_make, model=device_model, - mac_addr=mac_addr, test_modules=json.dumps(test_modules)) - self._devices.append(device) - - def get_device(self, mac_addr): - """Returns a loaded device object from the device mac address.""" - for device in self._devices: - if device.mac_addr == mac_addr: - return device - return None - - def _device_discovered(self, mac_addr): - device = self.get_device(mac_addr) - if device is not None: - LOGGER.info( - f'Discovered {device.make} {device.model} on the network') - else: - device = Device(mac_addr=mac_addr) - self._devices.append(device) - LOGGER.info( - f'A new device has been discovered with mac address {mac_addr}') - - def _device_stable(self, mac_addr): - device = self.get_device(mac_addr) - LOGGER.info(f'Device with mac address {mac_addr} is ready for testing.') - self._test_orc.run_test_modules(device) + self._load_all_devices() + + if self._net_only: + LOGGER.info('Network only option configured, no tests will be run') + self._start_network() + else: + self._start_network() + self._test_orc.start() + + self._net_orc.listener.register_callback( + self._device_stable, + [NetworkEvent.DEVICE_STABLE] + ) + self._net_orc.listener.register_callback( + self._device_discovered, + [NetworkEvent.DEVICE_DISCOVERED] + ) + + LOGGER.info('Waiting for devices on the network...') + + # Check timeout and whether testing is currently + # in progress before stopping + time.sleep(RUNTIME) + + self.stop() + + def stop(self, kill=False): + self._stop_tests() + self._stop_network(kill=kill) + + def _register_exits(self): + signal.signal(signal.SIGINT, self._exit_handler) + signal.signal(signal.SIGTERM, self._exit_handler) + signal.signal(signal.SIGABRT, self._exit_handler) + signal.signal(signal.SIGQUIT, self._exit_handler) + + def _exit_handler(self, signum, arg): # pylint: disable=unused-argument + LOGGER.debug('Exit signal received: ' + str(signum)) + if signum in (2, signal.SIGTERM): + LOGGER.info('Exit signal received.') + self.stop(kill=True) + sys.exit(1) + + def _get_config_abs(self, config_file=None): + if config_file is None: + # If not defined, use relative pathing to local file + config_file = os.path.join(parent_dir, CONFIG_FILE) + + # Expand the config file to absolute pathing + return os.path.abspath(config_file) + + def _start_network(self): + # Start the network orchestrator + self._net_orc.start() + + def _run_tests(self, device): + """Iterate through and start all test modules.""" + + # To Do: Make this configurable + time.sleep(60) # Let device bootup + + self._test_orc._run_test_modules(device) + + def _stop_network(self, kill=False): + self._net_orc.stop(kill=kill) + + def _stop_tests(self): + self._test_orc.stop() + + def _load_all_devices(self): + self._load_devices(device_dir=LOCAL_DEVICES_DIR) + self._load_devices(device_dir=RESOURCE_DEVICES_DIR) + + def _load_devices(self, device_dir): + LOGGER.debug('Loading devices from ' + device_dir) + + os.makedirs(device_dir, exist_ok=True) + + for device_folder in os.listdir(device_dir): + with open(os.path.join(device_dir, device_folder, DEVICE_CONFIG), + encoding='utf-8') as device_config_file: + device_config_json = json.load(device_config_file) + + device_make = device_config_json.get(DEVICE_MAKE) + device_model = device_config_json.get(DEVICE_MODEL) + mac_addr = device_config_json.get(DEVICE_MAC_ADDR) + test_modules = device_config_json.get(DEVICE_TEST_MODULES) + + device = Device(make=device_make, + model=device_model, + mac_addr=mac_addr, + test_modules=json.dumps(test_modules)) + self._devices.append(device) + + def get_device(self, mac_addr): + """Returns a loaded device object from the device mac address.""" + for device in self._devices: + if device.mac_addr == mac_addr: + return device + + def _device_discovered(self, mac_addr): + device = self.get_device(mac_addr) + if device is not None: + LOGGER.info( + f'Discovered {device.make} {device.model} on the network') + else: + device = Device(mac_addr=mac_addr) + self._devices.append(device) + LOGGER.info( + f'A new device has been discovered with mac address {mac_addr}') + + def _device_stable(self, mac_addr): + device = self.get_device(mac_addr) + LOGGER.info(f'Device with mac address {mac_addr} is ready for testing.') + self._test_orc.run_test_modules(device) diff --git a/net_orc/python/src/network_orchestrator.py b/net_orc/python/src/network_orchestrator.py index 2950f97fb..3b3f92e64 100644 --- a/net_orc/python/src/network_orchestrator.py +++ b/net_orc/python/src/network_orchestrator.py @@ -1,11 +1,10 @@ #!/usr/bin/env python3 -import binascii import getpass import ipaddress import json import os -from scapy.all import BOOTP +from scapy.all import sniff, wrpcap, BOOTP import shutil import subprocess import sys @@ -24,7 +23,10 @@ LOGGER = logger.get_logger("net_orc") CONFIG_FILE = "conf/system.json" EXAMPLE_CONFIG_FILE = "conf/system.json.example" -RUNTIME_DIR = "runtime/network" +RUNTIME_DIR = "runtime" +DEVICES_DIR = "devices" +MONITOR_PCAP = "monitor.pcap" +NET_DIR = "runtime/network" NETWORK_MODULES_DIR = "network/modules" NETWORK_MODULE_METADATA = "conf/module_config.json" DEVICE_BRIDGE = "tr-d" @@ -41,7 +43,6 @@ RUNTIME = 1500 - class NetworkOrchestrator: """Manage and controls a virtual testing network.""" @@ -56,22 +57,17 @@ def __init__(self, config_file=CONFIG_FILE, validate=True, async_monitor=False, self._single_intf = single_intf self.listener = None - self._net_modules = [] - + self._devices = [] self.validate = validate - self.async_monitor = async_monitor self._path = os.path.dirname(os.path.dirname( os.path.dirname(os.path.realpath(__file__)))) self.validator = NetworkValidator() - - shutil.rmtree(os.path.join(os.getcwd(), RUNTIME_DIR), ignore_errors=True) - + shutil.rmtree(os.path.join(os.getcwd(), NET_DIR), ignore_errors=True) self.network_config = NetworkConfig() - self.load_config(config_file) def start(self): @@ -154,6 +150,7 @@ def _device_discovered(self, mac_addr): LOGGER.debug(f'Discovered device {mac_addr}. Waiting for device to obtain IP') device = self._get_device(mac_addr=mac_addr) + os.makedirs(os.path.join(RUNTIME_DIR, DEVICES_DIR, device.mac_addr.replace(':', ''))) timeout = time.time() + self._startup_timeout @@ -180,15 +177,15 @@ def _start_device_monitor(self, device): """Start a timer until the steady state has been reached and callback the steady state method for this device.""" LOGGER.info(f"Monitoring device with mac addr {device.mac_addr} for {str(self._monitor_period)} seconds") - timer = Timer(self._monitor_period, - self.listener.call_callback, - args=(NetworkEvent.DEVICE_STABLE, device.mac_addr,)) - timer.start() + packet_capture = sniff(iface=self._dev_intf, timeout=self._monitor_period) + wrpcap(os.path.join(RUNTIME_DIR, DEVICES_DIR, device.mac_addr.replace(":",""), 'monitor.pcap'), packet_capture) + self.listener.call_callback(NetworkEvent.DEVICE_STABLE, device.mac_addr) def _get_device(self, mac_addr): for device in self._devices: if device.mac_addr == mac_addr: return device + device = NetworkDevice(mac_addr=mac_addr) self._devices.append(device) return device @@ -504,7 +501,7 @@ def stop_networking_services(self, kill=False): def start_network_services(self): LOGGER.info("Starting network services") - os.makedirs(os.path.join(os.getcwd(), RUNTIME_DIR), exist_ok=True) + os.makedirs(os.path.join(os.getcwd(), NET_DIR), exist_ok=True) for net_module in self._net_modules: @@ -525,11 +522,11 @@ def _attach_test_module_to_network(self, test_module): LOGGER.debug("Attaching test module " + test_module.display_name + " to device bridge") - # Device bridge interface example: tr-di-baseline-test (Test Run Device Interface for baseline test container) - bridge_intf = DEVICE_BRIDGE + "i-" + test_module.dir_name + "-test" + # Device bridge interface example: tr-d-t-baseline (Test Run Device Interface for Test container) + bridge_intf = DEVICE_BRIDGE + "-t-" + test_module.dir_name - # Container interface example: tr-cti-baseline-test (Test Run Container Interface for baseline test container) - container_intf = "tr-test-" + test_module.dir_name + # Container interface example: tr-cti-baseline-test (Test Run Test Container Interface for test container) + container_intf = "tr-tci-" + test_module.dir_name # Container network namespace name container_net_ns = "tr-test-" + test_module.dir_name diff --git a/test_orc/modules/base/python/src/test_module.py b/test_orc/modules/base/python/src/test_module.py index 9a348faa7..522a048f4 100644 --- a/test_orc/modules/base/python/src/test_module.py +++ b/test_orc/modules/base/python/src/test_module.py @@ -47,9 +47,11 @@ def _get_device_tests(self, device_test_module): return module_tests def _get_device_test_module(self): - test_modules = json.loads(os.environ['DEVICE_TEST_MODULES']) - if self._module_name in test_modules: - return test_modules[self._module_name] + # TODO: Make DEVICE_TEST_MODULES a static string + if 'DEVICE_TEST_MODULES' in os.environ: + test_modules = json.loads(os.environ['DEVICE_TEST_MODULES']) + if self._module_name in test_modules: + return test_modules[self._module_name] return None def run_tests(self): diff --git a/test_orc/python/src/test_orchestrator.py b/test_orc/python/src/test_orchestrator.py index 08c855d9a..48a0cb32d 100644 --- a/test_orc/python/src/test_orchestrator.py +++ b/test_orc/python/src/test_orchestrator.py @@ -57,8 +57,8 @@ def _run_test_module(self, module, device): return LOGGER.info("Running test module " + module.name) - try: + try: container_runtime_dir = os.path.join( self._root_path, "runtime/test/" + device.mac_addr.replace(":","") + "/" + module.name) network_runtime_dir = os.path.join( @@ -103,7 +103,7 @@ def _run_test_module(self, module, device): # Mount the test container to the virtual network if requried if module.network: - LOGGER.info("Mounting test module to the network") + LOGGER.debug("Attaching test module to the network") self._net_orc._attach_test_module_to_network(module) # Determine the module timeout time From 7b27e23debbe9c159fe3be3011a93628f1a361b7 Mon Sep 17 00:00:00 2001 From: jhughesbiot Date: Wed, 17 May 2023 12:32:07 -0600 Subject: [PATCH 14/48] Remove unecessary files --- net_orc/LICENSE | 201 --------------------------- net_orc/README.md | 66 --------- net_orc/conf/.gitignore | 1 - net_orc/conf/network/radius/ca.crt | 26 ---- net_orc/conf/system.json.example | 7 - net_orc/python/src/network_runner.py | 69 --------- 6 files changed, 370 deletions(-) delete mode 100644 net_orc/LICENSE delete mode 100644 net_orc/README.md delete mode 100644 net_orc/conf/.gitignore delete mode 100644 net_orc/conf/network/radius/ca.crt delete mode 100644 net_orc/conf/system.json.example delete mode 100644 net_orc/python/src/network_runner.py diff --git a/net_orc/LICENSE b/net_orc/LICENSE deleted file mode 100644 index 261eeb9e9..000000000 --- a/net_orc/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/net_orc/README.md b/net_orc/README.md deleted file mode 100644 index 9cb1eec1a..000000000 --- a/net_orc/README.md +++ /dev/null @@ -1,66 +0,0 @@ -Testrun logo - -## Introduction :wave: -The network orchestrator is a tool to automate the management of a test lab network and provide essential services to begin device testing in just a few minutes. - -## Motivation :bulb: -Test labs may be maintaining a large and complex network using equipment such as: A managed layer 3 switch, an enterprise-grade network router, virtualized or physical servers to provide DNS, NTP, 802.1x etc. With this amount of moving parts, all with dynamic configuration files and constant software updates, more time is likely to be spent on preparation and clean up of functinality or penetration testing - not forgetting the number of software tools required to perform the testing. - -## How it works :triangular_ruler: -The network orchestrator creates an isolated and controlled network environment to fully simulate enterprise network deployments in your device testing lab. -This removes the necessity for complex hardware, advanced knowledge and networking experience whilst enabling semi-technical engineers to validate device -behaviour against industry cyber standards. - -The network orchestrator will provide the network and some tools to assist an engineer performing the additional testing. At the same time, packet captures of the device behaviour will be recorded, alongside logs for each network service, for further debugging. - -## Minimum Requirements :computer: -### Hardware - - PC running Ubuntu LTS (laptop or desktop) - - 2x USB ethernet adapter (One may be built in ethernet) - - Connect one adapter to your router (for internet access) - - Connect one adapter to your device under test - - Internet connection -### Software - - Python 3 with pip3 (Already available on Ubuntu LTS) - - Docker - [Install guide](https://docs.docker.com/engine/install/ubuntu/) - - Open vSwitch ``sudo apt-get install openvswitch-common openvswitch-switch`` - -An additional network interface (even wifi) with internet access can be used to maintain internet connection during use of the network orchestrator. - -## How to use :arrow_forward: -1) Ensure you have a device with the minimum hardware and software requirements setup -2) Clone the project using ```git clone https://github.com/auto-iot/network-orchestrator``` -3) Navigate into the project using ```cd network-orchestrator``` -4) Copy conf/system.json.example to conf/system.json (after setting the correct interfaces in the file) -5) Start the tool using ```sudo cmd/start``` - -## Issue reporting :triangular_flag_on_post: -If the application has come across a problem at any point during setup or use, please raise an issue under the [issues tab](https://github.com/auto-iot/network-orchestrator/issues). Issue templates exist for both bug reports and feature requests. If neither of these are appropriate for your issue, raise a blank issue instead. - -## Roadmap :chart_with_upwards_trend: - - Ability to modify configuration files of each network service during use (via GRPC) - - IPv6 internet routing - -## Contributing :keyboard: -The contributing requirements can be found in [CONTRIBUTING.md](CONTRIBUTING.md). In short, checkout the [Google CLA](https://cla.developers.google.com/) site to get started. - -## FAQ :raising_hand: -1) What services are provided on the virtual network? - - The following are network services that are containerized and accessible to the device under test though are likely to change over time: - - DHCP in failover configuration with internet connectivity - - IPv6 router advertisements - - DNS (and DNS over HTTPS) - - NTPv4 - - 802.1x Port Based Authentication - -2) Can I run the network orchestrator on a virtual machine? - - Probably. Provided that the required 2x USB ethernet adapters are passed to the virtual machine as USB devices rather than network adapters, the tool should - still work. We will look to test and approve the use of virtualisation in the future. - -3) Can I connect multiple devices to the Network Orchestrator? - - In short, Yes you can. The way in which multiple devices could be tested simultaneously is yet to be decided. However, if you simply want to add field/peer devices during runtime (even another laptop performing manual testing) then you may connect the USB ethernet adapter to an unmanaged switch. - -4) Raise an issue with the label 'question' if your question has not been answered in this readme. \ No newline at end of file diff --git a/net_orc/conf/.gitignore b/net_orc/conf/.gitignore deleted file mode 100644 index 41b89ceb1..000000000 --- a/net_orc/conf/.gitignore +++ /dev/null @@ -1 +0,0 @@ -system.json \ No newline at end of file diff --git a/net_orc/conf/network/radius/ca.crt b/net_orc/conf/network/radius/ca.crt deleted file mode 100644 index d009cb1ab..000000000 --- a/net_orc/conf/network/radius/ca.crt +++ /dev/null @@ -1,26 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEYTCCA0mgAwIBAgIUQJ4F8hBCnCp7ASPZqG/tNQgoUR4wDQYJKoZIhvcNAQEL -BQAwgb8xCzAJBgNVBAYTAkdCMRswGQYDVQQIDBIbWzN+TGVpY2VzdGVyc2hpcmUx -FTATBgNVBAcMDExvdWdoYm9yb3VnaDEUMBIGA1UECgwLRm9yZXN0IFJvY2sxDjAM -BgNVBAsMBUN5YmVyMR8wHQYDVQQDDBZjeWJlci5mb3Jlc3Ryb2NrLmNvLnVrMTUw -MwYJKoZIhvcNAQkBFiZjeWJlcnNlY3VyaXR5LnRlc3RpbmdAZm9yZXN0cm9jay5j -by51azAeFw0yMjAzMDQxMjEzMTBaFw0yNzAzMDMxMjEzMTBaMIG/MQswCQYDVQQG -EwJHQjEbMBkGA1UECAwSG1szfkxlaWNlc3RlcnNoaXJlMRUwEwYDVQQHDAxMb3Vn -aGJvcm91Z2gxFDASBgNVBAoMC0ZvcmVzdCBSb2NrMQ4wDAYDVQQLDAVDeWJlcjEf -MB0GA1UEAwwWY3liZXIuZm9yZXN0cm9jay5jby51azE1MDMGCSqGSIb3DQEJARYm -Y3liZXJzZWN1cml0eS50ZXN0aW5nQGZvcmVzdHJvY2suY28udWswggEiMA0GCSqG -SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDDNz3vJiZ5nX8lohEhqXvxEme3srip8qF7 -r5ScIeQzsTKuPNAmoefx9TcU3SyA2BnREuDX+OCYMN62xxWG2PndOl0LNezAY22C -PJwHbaBntLKY/ZhxYSTyratM7zxKSVLtClamA/bJXBhdfZZKYOP3xlZQEQTygtzK -j5hZwDrpDARtjRZIMWPLqVcoaW9ow2urJVsdD4lYAhpQU2UIgiWo7BG3hJsUfcYX -EQyyrMKJ7xaCwzIU7Sem1PETrzeiWg4KhDijc7A0RMPWlU5ljf0CnY/IZwiDsMRl -hGmGBPvR+ddiWPZPtSKj6TPWpsaMUR9UwncLmSSrhf1otX4Mw0vbAgMBAAGjUzBR -MB0GA1UdDgQWBBR0Qxx2mDTPIfpnzO5YtycGs6t8ijAfBgNVHSMEGDAWgBR0Qxx2 -mDTPIfpnzO5YtycGs6t8ijAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUA -A4IBAQCpTMBMZGXF74WCxrIk23MUsu0OKzMs8B16Wy8BHz+7hInLZwbkx71Z0TP5 -rsMITetSANtM/k4jH7Vmr1xmzU7oSz5zKU1+7rIjKjGtih48WZdJay0uqfKe0K2s -vsRS0LVLY6IiTFWK9YrLC0QFSK7z5GDl1oc/D5yIZAkbsL6PRQJ5RQsYf5BhHfyB -PRV/KcF7c9iKVYW2vILJzbyYLHTDADTHbtfCe5+pAGxagswDjSMVkQu5iJNjbtUO -5iv7PRkgzUFru9Kk6q+LrXbzyPPCwlc3Xbh1q5jSkJLkcV3K26E7+uX5HI+Hxpeh -a8kOsdnw+N8wX6bc7eXIaGBDMine ------END CERTIFICATE----- diff --git a/net_orc/conf/system.json.example b/net_orc/conf/system.json.example deleted file mode 100644 index 77c981394..000000000 --- a/net_orc/conf/system.json.example +++ /dev/null @@ -1,7 +0,0 @@ -{ - "network": { - "device_intf": "enx207bd2620617", - "internet_intf": "enx207bd26205e9" - }, - "log_level": "INFO" -} \ No newline at end of file diff --git a/net_orc/python/src/network_runner.py b/net_orc/python/src/network_runner.py deleted file mode 100644 index 0b7573fb3..000000000 --- a/net_orc/python/src/network_runner.py +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/env python3 - -"""Wrapper for the NetworkOrchestrator that simplifies -virtual network start process by allowing direct calling -from the command line. - -Run using the provided command scripts in the cmd folder. -E.g sudo cmd/start -""" - -import argparse -import signal -import sys -import logger -from network_orchestrator import NetworkOrchestrator - -LOGGER = logger.get_logger("net_runner") - -class NetworkRunner: - """Entry point to the Network Orchestrator.""" - - def __init__(self, config_file=None, validate=True, async_monitor=False): - self._monitor_thread = None - self._register_exits() - self.net_orc = NetworkOrchestrator(config_file=config_file, - validate=validate, - async_monitor=async_monitor) - - def _register_exits(self): - signal.signal(signal.SIGINT, self._exit_handler) - signal.signal(signal.SIGTERM, self._exit_handler) - signal.signal(signal.SIGABRT, self._exit_handler) - signal.signal(signal.SIGQUIT, self._exit_handler) - - def _exit_handler(self, signum, arg): # pylint: disable=unused-argument - LOGGER.debug("Exit signal received: " + str(signum)) - if signum in (2, signal.SIGTERM): - LOGGER.info("Exit signal received.") - # Kill all container services quickly - # If we're here, we want everything to stop immediately - # and don't care about a graceful shutdown - self.stop(True) - sys.exit(1) - - def stop(self, kill=False): - self.net_orc.stop(kill) - - def start(self): - self.net_orc.start() - -def parse_args(): - parser = argparse.ArgumentParser(description="Test Run Help", - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument("--no-validate", action="store_true", - help="Turn off the validation of the network after network boot") - parser.add_argument("-f", "--config-file", default=None, - help="Define the configuration file for the Network Orchestrator") - parser.add_argument("-d", "--daemon", action="store_true", - help="Run the network monitor process in the background as a daemon thread") - - args = parser.parse_known_args()[0] - return args - -if __name__ == "__main__": - arguments = parse_args() - runner = NetworkRunner(config_file=arguments.config_file, - validate=not arguments.no_validate, - async_monitor=arguments.daemon) - runner.start() From 5ac87269dd5b9f3afd7b46af80e0e98a0e405d5f Mon Sep 17 00:00:00 2001 From: jhughesbiot Date: Wed, 17 May 2023 13:07:53 -0600 Subject: [PATCH 15/48] Cleanup duplicate properties --- framework/device.py | 1 - 1 file changed, 1 deletion(-) diff --git a/framework/device.py b/framework/device.py index 80cfb9c9c..eef275d54 100644 --- a/framework/device.py +++ b/framework/device.py @@ -10,5 +10,4 @@ class Device(NetworkDevice): make: str = None model: str = None - mac_addr: str test_modules: str = None From 2c4efe86b384ebd40cdd896b4dd6f556e55968c1 Mon Sep 17 00:00:00 2001 From: jhughesbiot Date: Wed, 17 May 2023 13:13:19 -0600 Subject: [PATCH 16/48] Cleanup install script --- cmd/install | 2 -- 1 file changed, 2 deletions(-) diff --git a/cmd/install b/cmd/install index f5af3a5d3..23e463158 100755 --- a/cmd/install +++ b/cmd/install @@ -4,8 +4,6 @@ python3 -m venv venv source venv/bin/activate -pip3 install --upgrade requests - pip3 install -r framework/requirements.txt pip3 install -r net_orc/python/requirements.txt From 25fd8a5bffc5deb19d0a174a76aaa251f2a5a4ef Mon Sep 17 00:00:00 2001 From: jhughesbiot <50999916+jhughesbiot@users.noreply.github.com> Date: Mon, 22 May 2023 07:51:31 -0700 Subject: [PATCH 17/48] Formatting (#26) * Fix pylint issues in net orc * more pylint fixes * fix listener lint issues * fix logger lint issues * fix validator lint issues * fix util lint issues * Update base network module linting issues * Cleanup linter issues for dhcp modules Remove old code testing code * change to single quote delimeter * Cleanup linter issues for ntp module * Cleanup linter issues for radius module * Cleanup linter issues for template module * fix linter issues with faux-dev --- .../devices/faux-dev/python/src/dhcp_check.py | 136 +- .../devices/faux-dev/python/src/dns_check.py | 170 +-- .../faux-dev/python/src/gateway_check.py | 66 +- .../devices/faux-dev/python/src/logger.py | 50 +- .../devices/faux-dev/python/src/ntp_check.py | 118 +- .../devices/faux-dev/python/src/run.py | 205 +-- .../devices/faux-dev/python/src/util.py | 30 +- .../base/python/src/grpc/start_server.py | 43 +- .../network/modules/base/python/src/logger.py | 61 +- .../dhcp-1/python/src/grpc/dhcp_config.py | 480 +++--- .../dhcp-1/python/src/grpc/network_service.py | 60 +- .../network/modules/dhcp-1/python/src/run.py | 40 - .../dhcp-2/python/src/grpc/dhcp_config.py | 480 +++--- .../dhcp-2/python/src/grpc/network_service.py | 60 +- .../network/modules/dhcp-2/python/src/run.py | 40 - .../modules/ntp/python/src/ntp_server.py | 461 +++--- .../radius/python/src/authenticator.py | 64 +- .../template/python/src/template_main.py | 2 +- net_orc/python/src/listener.py | 21 +- net_orc/python/src/logger.py | 28 +- net_orc/python/src/network_device.py | 1 + net_orc/python/src/network_event.py | 1 + net_orc/python/src/network_orchestrator.py | 1360 +++++++++-------- net_orc/python/src/network_validator.py | 511 +++---- net_orc/python/src/run_validator.py | 52 - net_orc/python/src/util.py | 18 +- 26 files changed, 2272 insertions(+), 2286 deletions(-) delete mode 100644 net_orc/network/modules/dhcp-1/python/src/run.py delete mode 100644 net_orc/network/modules/dhcp-2/python/src/run.py delete mode 100644 net_orc/python/src/run_validator.py diff --git a/net_orc/network/devices/faux-dev/python/src/dhcp_check.py b/net_orc/network/devices/faux-dev/python/src/dhcp_check.py index ab7defc39..82dd6e31f 100644 --- a/net_orc/network/devices/faux-dev/python/src/dhcp_check.py +++ b/net_orc/network/devices/faux-dev/python/src/dhcp_check.py @@ -1,85 +1,87 @@ -#!/usr/bin/env python3 +"""Used to check if the DHCP server is functioning as expected""" import time import logger LOGGER = None -LOG_NAME = "dhcp_validator" -DHCP_LEASE_FILE = "/var/lib/dhcp/dhclient.leases" -IP_ADDRESS_KEY = "fixed-address" -DNS_OPTION_KEY = "option domain-name-servers" -GATEWAY_OPTION_KEY = "option routers" -NTP_OPTION_KEY = "option ntp-servers" +LOG_NAME = 'dhcp_validator' +DHCP_LEASE_FILE = '/var/lib/dhcp/dhclient.leases' +IP_ADDRESS_KEY = 'fixed-address' +DNS_OPTION_KEY = 'option domain-name-servers' +GATEWAY_OPTION_KEY = 'option routers' +NTP_OPTION_KEY = 'option ntp-servers' class DHCPValidator: - def __init__(self, module): - self._dhcp_lease = None - self.dhcp_lease_test = False - self.add_logger(module) + """Validates all expected test behaviors around the DHCP server""" - def add_logger(self, module): - global LOGGER - LOGGER = logger.get_logger(LOG_NAME, module) + def __init__(self, module): + self._dhcp_lease = None + self.dhcp_lease_test = False + self.add_logger(module) - def print_test_results(self): - self.print_test_result("DHCP lease test", self.dhcp_lease_test) + def add_logger(self, module): + global LOGGER + LOGGER = logger.get_logger(LOG_NAME, module) - def print_test_result(self, test_name, result): - LOGGER.info(test_name + ": Pass" if result else test_name + ": Fail") + def print_test_results(self): + self.print_test_result('DHCP lease test', self.dhcp_lease_test) - def get_dhcp_lease(self): - """Returns the current DHCP lease.""" - return self._dhcp_lease + def print_test_result(self, test_name, result): + LOGGER.info(test_name + ': Pass' if result else test_name + ': Fail') - def validate(self): - self._resolve_dhcp_lease() - LOGGER.info("IP Addr: " + self._dhcp_lease.ip_addr) - LOGGER.info("Gateway: " + self._dhcp_lease.gateway) - LOGGER.info("DNS Server: " + self._dhcp_lease.dns_server) - LOGGER.info("NTP Server: " + self._dhcp_lease.ntp_server) + def get_dhcp_lease(self): + """Returns the current DHCP lease.""" + return self._dhcp_lease - def _resolve_dhcp_lease(self): - LOGGER.info("Resolving DHCP lease...") - while self._dhcp_lease is None: - time.sleep(5) - try: - lease_file = open(DHCP_LEASE_FILE) - lines = lease_file.read() - LOGGER.debug("Lease file:\n" + lines) - leases = lines.split("lease ") - # Last lease is the current lease - cur_lease = leases[-1] - if cur_lease is not None: - LOGGER.debug("Current lease: " + cur_lease) - self._dhcp_lease = DHCPLease() - self.dhcp_lease_test = True - # Iterate over entire lease and pick the parts we care about - lease_parts = cur_lease.split("\n") - for part in lease_parts: - part_clean = part.strip() - if part_clean.startswith(IP_ADDRESS_KEY): - self._dhcp_lease.ip_addr = part_clean[len( - IP_ADDRESS_KEY):-1].strip() - elif part_clean.startswith(DNS_OPTION_KEY): - self._dhcp_lease.dns_server = part_clean[len( - DNS_OPTION_KEY):-1].strip() - elif part_clean.startswith(GATEWAY_OPTION_KEY): - self._dhcp_lease.gateway = part_clean[len( - GATEWAY_OPTION_KEY):-1].strip() - elif part_clean.startswith(NTP_OPTION_KEY): - self._dhcp_lease.ntp_server = part_clean[len( - NTP_OPTION_KEY):-1].strip() - except Exception: - LOGGER.error("DHCP Resolved Error") - LOGGER.info("DHCP lease resolved") + def validate(self): + self._resolve_dhcp_lease() + LOGGER.info('IP Addr: ' + self._dhcp_lease.ip_addr) + LOGGER.info('Gateway: ' + self._dhcp_lease.gateway) + LOGGER.info('DNS Server: ' + self._dhcp_lease.dns_server) + LOGGER.info('NTP Server: ' + self._dhcp_lease.ntp_server) + + def _resolve_dhcp_lease(self): + LOGGER.info('Resolving DHCP lease...') + while self._dhcp_lease is None: + time.sleep(5) + try: + with open(DHCP_LEASE_FILE, 'r', encoding='UTF-8') as lease_file: + lines = lease_file.read() + LOGGER.debug('Lease file:\n' + lines) + leases = lines.split('lease ') + # Last lease is the current lease + cur_lease = leases[-1] + if cur_lease is not None: + LOGGER.debug('Current lease: ' + cur_lease) + self._dhcp_lease = DHCPLease() + self.dhcp_lease_test = True + # Iterate over entire lease and pick the parts we care about + lease_parts = cur_lease.split('\n') + for part in lease_parts: + part_clean = part.strip() + if part_clean.startswith(IP_ADDRESS_KEY): + self._dhcp_lease.ip_addr = part_clean[len(IP_ADDRESS_KEY + ):-1].strip() + elif part_clean.startswith(DNS_OPTION_KEY): + self._dhcp_lease.dns_server = part_clean[len(DNS_OPTION_KEY + ):-1].strip() + elif part_clean.startswith(GATEWAY_OPTION_KEY): + self._dhcp_lease.gateway = part_clean[len(GATEWAY_OPTION_KEY + ):-1].strip() + elif part_clean.startswith(NTP_OPTION_KEY): + self._dhcp_lease.ntp_server = part_clean[len(NTP_OPTION_KEY + ):-1].strip() + except Exception: # pylint: disable=broad-exception-caught + LOGGER.error('DHCP Resolved Error') + LOGGER.info('DHCP lease resolved') class DHCPLease: - """Stores information about a device's DHCP lease.""" + """Stores information about a device's DHCP lease.""" - def __init__(self): - self.ip_addr = None - self.gateway = None - self.dns_server = None - self.ntp_server = None + def __init__(self): + self.ip_addr = None + self.gateway = None + self.dns_server = None + self.ntp_server = None diff --git a/net_orc/network/devices/faux-dev/python/src/dns_check.py b/net_orc/network/devices/faux-dev/python/src/dns_check.py index d3d709d6e..73a72e8c8 100644 --- a/net_orc/network/devices/faux-dev/python/src/dns_check.py +++ b/net_orc/network/devices/faux-dev/python/src/dns_check.py @@ -1,109 +1,103 @@ -#!/usr/bin/env python3 +"""Used to check if the DNS server is functioning as expected""" import logger import time import util import subprocess -from dhcp_check import DHCPLease - LOGGER = None -LOG_NAME = "dns_validator" -HOST_PING = "google.com" -CAPTURE_FILE = "/runtime/network/faux-dev.pcap" -DNS_CONFIG_FILE = "/etc/resolv.conf" +LOG_NAME = 'dns_validator' +HOST_PING = 'google.com' +CAPTURE_FILE = '/runtime/network/faux-dev.pcap' +DNS_CONFIG_FILE = '/etc/resolv.conf' class DNSValidator: - - def __init__(self, module): - self._dns_server = None - self._dns_resolution_test = False - self._dns_dhcp_server_test = False - self.add_logger(module) - - def add_logger(self, module): - global LOGGER - LOGGER = logger.get_logger(LOG_NAME, module) - - def print_test_results(self): - self.print_test_result( - "DNS resolution test", self._dns_resolution_test) - self.print_test_result( - "DNS DHCP server test", self._dns_dhcp_server_test) - - def print_test_result(self, test_name, result): - LOGGER.info(test_name + ": Pass" if result else test_name + ": Fail") - - def validate(self, dhcp_lease): - self._dns_server = dhcp_lease.dns_server - self._set_dns_server() - self._check_dns_traffic() - - def _check_dns_traffic(self): - LOGGER.info("Checking DNS traffic for DNS server: " + self._dns_server) - - # Ping a host to generate DNS traffic - if self._ping(HOST_PING)[0]: - LOGGER.info("Ping success") - self._dns_resolution_test = True - else: - LOGGER.info("Ping failed") - - # Some delay between pings and DNS traffic in the capture file - # so give some delay before we try to query again - time.sleep(5) - - # Check if the device has sent any DNS requests - filter_to_dns = 'dst port 53 and dst host {}'.format( - self._dns_server) - to_dns = self._exec_tcpdump(filter_to_dns) - num_query_dns = len(to_dns) - LOGGER.info("DNS queries found: " + str(num_query_dns)) - dns_traffic_detected = len(to_dns) > 0 - if dns_traffic_detected: - LOGGER.info("DNS traffic detected to configured DHCP DNS server") - self._dns_dhcp_server_test = True - else: - LOGGER.error("No DNS traffic detected") - - # Docker containeres resolve DNS servers from the host - # and do not play nice with normal networking methods - # so we need to set our DNS servers manually - def _set_dns_server(self): - f = open(DNS_CONFIG_FILE, "w", encoding="utf-8") - f.write("nameserver " + self._dns_server) - f.close() - - # Generate DNS traffic by doing a simple ping by hostname - def _ping(self, host): - cmd = "ping -c 5 " + host - success = util.run_command(cmd, LOGGER) - return success - - def _exec_tcpdump(self, tcpdump_filter): - """ + """Validates all expected test behaviors around the DNS server""" + + def __init__(self, module): + self._dns_server = None + self.dns_resolution_test = False + self.dns_dhcp_server_test = False + self.add_logger(module) + + def add_logger(self, module): + global LOGGER + LOGGER = logger.get_logger(LOG_NAME, module) + + def print_test_results(self): + self.print_test_result('DNS resolution test', self.dns_resolution_test) + self.print_test_result('DNS DHCP server test', self.dns_dhcp_server_test) + + def print_test_result(self, test_name, result): + LOGGER.info(test_name + ': Pass' if result else test_name + ': Fail') + + def validate(self, dhcp_lease): + self._dns_server = dhcp_lease.dns_server + self._set_dns_server() + self._check_dns_traffic() + + def _check_dns_traffic(self): + LOGGER.info('Checking DNS traffic for DNS server: ' + self._dns_server) + + # Ping a host to generate DNS traffic + if self._ping(HOST_PING)[0]: + LOGGER.info('Ping success') + self.dns_resolution_test = True + else: + LOGGER.info('Ping failed') + + # Some delay between pings and DNS traffic in the capture file + # so give some delay before we try to query again + time.sleep(5) + + # Check if the device has sent any DNS requests + filter_to_dns = f'dst port 53 and dst host {self._dns_server}' + to_dns = self._exec_tcpdump(filter_to_dns) + num_query_dns = len(to_dns) + LOGGER.info('DNS queries found: ' + str(num_query_dns)) + dns_traffic_detected = len(to_dns) > 0 + if dns_traffic_detected: + LOGGER.info('DNS traffic detected to configured DHCP DNS server') + self.dns_dhcp_server_test = True + else: + LOGGER.error('No DNS traffic detected') + + # Docker containeres resolve DNS servers from the host + # and do not play nice with normal networking methods + # so we need to set our DNS servers manually + def _set_dns_server(self): + with open(DNS_CONFIG_FILE, 'w', encoding='utf-8') as f: + f.write('nameserver ' + self._dns_server) + + # Generate DNS traffic by doing a simple ping by hostname + def _ping(self, host): + cmd = 'ping -c 5 ' + host + success = util.run_command(cmd, LOGGER) + return success + + def _exec_tcpdump(self, tcpdump_filter): + """ Args tcpdump_filter: Filter to pass onto tcpdump file capture_file: Optional capture file to look Returns List of packets matching the filter """ - command = 'tcpdump -tttt -n -r {} {}'.format( - CAPTURE_FILE, tcpdump_filter) + command = f'tcpdump -tttt -n -r {CAPTURE_FILE} {tcpdump_filter}' - LOGGER.debug("tcpdump command: " + command) + LOGGER.debug('tcpdump command: ' + command) - process = subprocess.Popen(command, - universal_newlines=True, - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - text = str(process.stdout.read()).rstrip() + process = subprocess.Popen(command, + universal_newlines=True, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + text = str(process.stdout.read()).rstrip() - LOGGER.debug("tcpdump response: " + text) + LOGGER.debug('tcpdump response: ' + text) - if text: - return text.split("\n") + if text: + return text.split('\n') - return [] \ No newline at end of file + return [] diff --git a/net_orc/network/devices/faux-dev/python/src/gateway_check.py b/net_orc/network/devices/faux-dev/python/src/gateway_check.py index 17457874a..85fe35db0 100644 --- a/net_orc/network/devices/faux-dev/python/src/gateway_check.py +++ b/net_orc/network/devices/faux-dev/python/src/gateway_check.py @@ -1,40 +1,40 @@ +"""Used to check if the Gateway server is functioning as expected""" + import logger import util -from dhcp_check import DHCPLease - LOGGER = None -LOG_NAME = "gateway_validator" +LOG_NAME = 'gateway_validator' class GatewayValidator: - - def __init__(self, module): - self._gateway = None - self._default_gateway_test = False - self.add_logger(module) - - def add_logger(self, module): - global LOGGER - LOGGER = logger.get_logger(LOG_NAME, module) - - def print_test_results(self): - self.print_test_result("Default gateway test", - self._default_gateway_test) - - def print_test_result(self, test_name, result): - LOGGER.info(test_name + ": Pass" if result else test_name + ": Fail") - - - def validate(self, dhcp_lease): - self._gateway = dhcp_lease.gateway - self.check_default_gateway() - - def check_default_gateway(self): - LOGGER.info( - "Checking default gateway matches DHCP gateway: " + self._gateway) - cmd = "/testrun/bin/get_default_gateway" - success, default_gateway, stderr = util.run_command(cmd, LOGGER) - LOGGER.info("Default gateway resolved: " + default_gateway) - if default_gateway == self._gateway: - self._default_gateway_test = True \ No newline at end of file + """Validates all expected test behaviors around the Gateway server""" + + def __init__(self, module): + self._gateway = None + self.default_gateway_test = False + self.add_logger(module) + + def add_logger(self, module): + global LOGGER + LOGGER = logger.get_logger(LOG_NAME, module) + + def print_test_results(self): + self.print_test_result('Default gateway test', self.default_gateway_test) + + def print_test_result(self, test_name, result): + LOGGER.info(test_name + ': Pass' if result else test_name + ': Fail') + + def validate(self, dhcp_lease): + self._gateway = dhcp_lease.gateway + self.check_default_gateway() + + def check_default_gateway(self): + LOGGER.info('Checking default gateway matches DHCP gateway: ' + + self._gateway) + cmd = '/testrun/bin/get_default_gateway' + success, default_gateway = util.run_command(cmd, LOGGER) + if success: + LOGGER.info('Default gateway resolved: ' + default_gateway) + if default_gateway == self._gateway: + self.default_gateway_test = True diff --git a/net_orc/network/devices/faux-dev/python/src/logger.py b/net_orc/network/devices/faux-dev/python/src/logger.py index bf692c85e..97d7f935a 100644 --- a/net_orc/network/devices/faux-dev/python/src/logger.py +++ b/net_orc/network/devices/faux-dev/python/src/logger.py @@ -1,43 +1,47 @@ -#!/usr/bin/env python3 +"""Sets up the logger to be used for the faux-device.""" import json import logging import os LOGGERS = {} -_LOG_FORMAT = "%(asctime)s %(name)-8s %(levelname)-7s %(message)s" +_LOG_FORMAT = '%(asctime)s %(name)-8s %(levelname)-7s %(message)s' _DATE_FORMAT = '%b %02d %H:%M:%S' -_CONF_DIR = "conf" -_CONF_FILE_NAME = "system.json" -_LOG_DIR = "/runtime/validation" +_CONF_DIR = 'conf' +_CONF_FILE_NAME = 'system.json' +_LOG_DIR = '/runtime/validation' # Set log level -with open(os.path.join(_CONF_DIR, _CONF_FILE_NAME), encoding='utf-8') as conf_file: - system_conf_json = json.load(conf_file) +with open(os.path.join(_CONF_DIR, _CONF_FILE_NAME), + encoding='utf-8') as conf_file: + system_conf_json = json.load(conf_file) log_level_str = system_conf_json['log_level'] log_level = logging.getLevelName(log_level_str) log_format = logging.Formatter(fmt=_LOG_FORMAT, datefmt=_DATE_FORMAT) + def add_file_handler(log, log_file): - """Add file handler to existing log.""" - handler = logging.FileHandler(os.path.join(_LOG_DIR, log_file + ".log")) - handler.setFormatter(log_format) - log.addHandler(handler) + """Add file handler to existing log.""" + handler = logging.FileHandler(os.path.join(_LOG_DIR, log_file + '.log')) + handler.setFormatter(log_format) + log.addHandler(handler) + def add_stream_handler(log): - """Add stream handler to existing log.""" - handler = logging.StreamHandler() - handler.setFormatter(log_format) - log.addHandler(handler) + """Add stream handler to existing log.""" + handler = logging.StreamHandler() + handler.setFormatter(log_format) + log.addHandler(handler) + def get_logger(name, log_file=None): - """Return logger for requesting class.""" - if name not in LOGGERS: - LOGGERS[name] = logging.getLogger(name) - LOGGERS[name].setLevel(log_level) - add_stream_handler(LOGGERS[name]) - if log_file is not None: - add_file_handler(LOGGERS[name], log_file) - return LOGGERS[name] + """Return logger for requesting class.""" + if name not in LOGGERS: + LOGGERS[name] = logging.getLogger(name) + LOGGERS[name].setLevel(log_level) + add_stream_handler(LOGGERS[name]) + if log_file is not None: + add_file_handler(LOGGERS[name], log_file) + return LOGGERS[name] diff --git a/net_orc/network/devices/faux-dev/python/src/ntp_check.py b/net_orc/network/devices/faux-dev/python/src/ntp_check.py index a50bf337e..ceef164c6 100644 --- a/net_orc/network/devices/faux-dev/python/src/ntp_check.py +++ b/net_orc/network/devices/faux-dev/python/src/ntp_check.py @@ -1,3 +1,4 @@ +"""Used to check if the NTP server is functioning as expected""" import time import logger import util @@ -8,72 +9,71 @@ class NTPValidator: - """Perform testing of the NTP server.""" + """Perform testing of the NTP server.""" - def __init__(self, module): - self._ntp_server = None - self._ntp_sync_test = False - self.add_logger(module) + def __init__(self, module): + self._ntp_server = None + self.ntp_sync_test = False + self.add_logger(module) - def add_logger(self, module): - global LOGGER - LOGGER = logger.get_logger(LOG_NAME, module) + def add_logger(self, module): + global LOGGER + LOGGER = logger.get_logger(LOG_NAME, module) - def print_test_results(self): - """Print all test results to log.""" - self.print_test_result("NTP sync test", - self._ntp_sync_test) + def print_test_results(self): + """Print all test results to log.""" + self.print_test_result("NTP sync test", self.ntp_sync_test) - def print_test_result(self, test_name, result): - """Output test result to log.""" - LOGGER.info(test_name + ": Pass" if result else test_name + ": Fail") + def print_test_result(self, test_name, result): + """Output test result to log.""" + LOGGER.info(test_name + ": Pass" if result else test_name + ": Fail") - def validate(self, dhcp_lease): - """Call NTP sync test.""" - self._ntp_server = dhcp_lease.ntp_server - self.check_ntp() + def validate(self, dhcp_lease): + """Call NTP sync test.""" + self._ntp_server = dhcp_lease.ntp_server + self.check_ntp() - def check_ntp(self): - """Perform NTP sync test.""" - if self._ntp_server is not None: - attempt = 0 - LOGGER.info(f"Attempting to sync to NTP server: {self._ntp_server}") - LOGGER.info("Attempts allowed: " + str(ATTEMPTS)) + def check_ntp(self): + """Perform NTP sync test.""" + if self._ntp_server is not None: + attempt = 0 + LOGGER.info(f"Attempting to sync to NTP server: {self._ntp_server}") + LOGGER.info("Attempts allowed: " + str(ATTEMPTS)) - # If we don't ping before syncing, this will fail. - while attempt < ATTEMPTS and not self._ntp_sync_test: - attempt += 1 - if self.ping_ntp_server(): - self.sync_ntp() - if not self._ntp_sync_test: - LOGGER.info("Waiting 5 seconds before next attempt") - time.sleep(5) - else: - LOGGER.info("No NTP server available from DHCP lease") + # If we don't ping before syncing, this will fail. + while attempt < ATTEMPTS and not self.ntp_sync_test: + attempt += 1 + if self.ping_ntp_server(): + self.sync_ntp() + if not self.ntp_sync_test: + LOGGER.info("Waiting 5 seconds before next attempt") + time.sleep(5) + else: + LOGGER.info("No NTP server available from DHCP lease") - def sync_ntp(self): - """Send NTP request to server.""" - LOGGER.info("Sending NTP Sync Request to: " + self._ntp_server) - cmd = "ntpdate " + self._ntp_server - ntp_response = util.run_command(cmd, LOGGER)[1] - LOGGER.info("NTP sync response: " + ntp_response) - if "adjust time server " + self._ntp_server in ntp_response: - LOGGER.info("NTP sync succesful") - self._ntp_sync_test = True - else: - LOGGER.info("NTP client failed to sync to server") + def sync_ntp(self): + """Send NTP request to server.""" + LOGGER.info("Sending NTP Sync Request to: " + self._ntp_server) + cmd = "ntpdate " + self._ntp_server + ntp_response = util.run_command(cmd, LOGGER)[1] + LOGGER.info("NTP sync response: " + ntp_response) + if "adjust time server " + self._ntp_server in ntp_response: + LOGGER.info("NTP sync succesful") + self.ntp_sync_test = True + else: + LOGGER.info("NTP client failed to sync to server") - def ping_ntp_server(self): - """Ping NTP server before sending a time request.""" - LOGGER.info("Pinging NTP server before syncing...") - if self.ping(self._ntp_server): - LOGGER.info("NTP server successfully pinged") - return True - LOGGER.info("NTP server did not respond to ping") - return False + def ping_ntp_server(self): + """Ping NTP server before sending a time request.""" + LOGGER.info("Pinging NTP server before syncing...") + if self.ping(self._ntp_server): + LOGGER.info("NTP server successfully pinged") + return True + LOGGER.info("NTP server did not respond to ping") + return False - def ping(self, host): - """Send ping request to host.""" - cmd = "ping -c 1 " + host - success = util.run_command(cmd, LOGGER) - return success + def ping(self, host): + """Send ping request to host.""" + cmd = "ping -c 1 " + host + success = util.run_command(cmd, LOGGER) + return success diff --git a/net_orc/network/devices/faux-dev/python/src/run.py b/net_orc/network/devices/faux-dev/python/src/run.py index 5891b8c4b..062a1a643 100644 --- a/net_orc/network/devices/faux-dev/python/src/run.py +++ b/net_orc/network/devices/faux-dev/python/src/run.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3 +"""Used to run all the various validator modules for the faux-device""" import argparse import json @@ -15,100 +15,111 @@ RESULTS_DIR = '/runtime/validation/' LOGGER = logger.get_logger('validator') + class FauxDevice: - """Represents a virtual testing device.""" - - def __init__(self, module): - - signal.signal(signal.SIGINT, self._handler) - signal.signal(signal.SIGTERM, self._handler) - signal.signal(signal.SIGABRT, self._handler) - signal.signal(signal.SIGQUIT, self._handler) - - self.dhcp_validator = DHCPValidator(module) - self.dns_validator = DNSValidator(module) - self.gateway_validator = GatewayValidator(module) - self.ntp_validator = NTPValidator(module) - - self._module = module - self.run_tests() - results = self.generate_results() - self.write_results(results) - - def run_tests(self): - """Execute configured network tests.""" - - # Run DHCP tests first since everything hinges on basic DHCP compliance first - self.dhcp_validator.validate() - - dhcp_lease = self.dhcp_validator.get_dhcp_lease() - - # Use current lease from dhcp tests to validate DNS behaviors - self.dns_validator.validate(dhcp_lease) - - # Use current lease from dhcp tests to validate default gateway - self.gateway_validator.validate(dhcp_lease) - - # Use current lease from dhcp tests to validate ntp server - self.ntp_validator.validate(dhcp_lease) - - def print_test_results(self): - """Print test results to log.""" - self.dhcp_validator.print_test_results() - self.dns_validator.print_test_results() - self.gateway_validator.print_test_results() - self.ntp_validator.print_test_results() - - def generate_results(self): - """Transform test results into JSON format.""" - - results = [] - results.append(self.generate_result("dhcp_lease", self.dhcp_validator.dhcp_lease_test)) - results.append(self.generate_result("dns_from_dhcp", self.dns_validator._dns_dhcp_server_test)) - results.append(self.generate_result("dns_resolution", self.dns_validator._dns_resolution_test)) - results.append(self.generate_result("gateway_default", self.gateway_validator._default_gateway_test)) - results.append(self.generate_result("ntp_sync", self.ntp_validator._ntp_sync_test)) - json_results = json.dumps({"results":results}, indent=2) - - return json_results - - def write_results(self, results): - """Write test results to file.""" - results_file = os.path.join(RESULTS_DIR, "result.json") - LOGGER.info("Writing results to " + results_file) - f = open(results_file, "w", encoding="utf-8") - f.write(results) - f.close() - - def generate_result(self, test_name, test_result): - """Return JSON object for test result.""" - if test_result is not None: - result = "compliant" if test_result else "non-compliant" - else: - result = "skipped" - LOGGER.info(test_name + ": " + result) - res_dict = { - "name": test_name, - "result": result - } - return res_dict - - def _handler(self, signum, frame): # pylint: disable=unused-argument - if signum in (2, signal.SIGTERM): - sys.exit(1) - -def run(argv): # pylint: disable=unused-argument - """Run the network validator.""" - parser = argparse.ArgumentParser(description="Faux Device _validator", - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument("-m","--module", - help="Define the module name to be used to create the log file") - - args = parser.parse_args() - - # For some reason passing in the args from bash adds an extra - # space before the argument so we'll just strip out extra space - FauxDevice(args.module.strip()) - -if __name__ == "__main__": - run(sys.argv) + """Represents a virtual testing device.""" + + def __init__(self, module): + + signal.signal(signal.SIGINT, self._handler) + signal.signal(signal.SIGTERM, self._handler) + signal.signal(signal.SIGABRT, self._handler) + signal.signal(signal.SIGQUIT, self._handler) + + self.dhcp_validator = DHCPValidator(module) + self.dns_validator = DNSValidator(module) + self.gateway_validator = GatewayValidator(module) + self.ntp_validator = NTPValidator(module) + + self._module = module + self.run_tests() + results = self.generate_results() + self.write_results(results) + + def run_tests(self): + """Execute configured network tests.""" + + # Run DHCP tests first since everything hinges + # on basic DHCP compliance first + self.dhcp_validator.validate() + + dhcp_lease = self.dhcp_validator.get_dhcp_lease() + + # Use current lease from dhcp tests to validate DNS behaviors + self.dns_validator.validate(dhcp_lease) + + # Use current lease from dhcp tests to validate default gateway + self.gateway_validator.validate(dhcp_lease) + + # Use current lease from dhcp tests to validate ntp server + self.ntp_validator.validate(dhcp_lease) + + def print_test_results(self): + """Print test results to log.""" + self.dhcp_validator.print_test_results() + self.dns_validator.print_test_results() + self.gateway_validator.print_test_results() + self.ntp_validator.print_test_results() + + def generate_results(self): + """Transform test results into JSON format.""" + + results = [] + results.append( + self.generate_result('dhcp_lease', self.dhcp_validator.dhcp_lease_test)) + results.append( + self.generate_result('dns_from_dhcp', + self.dns_validator.dns_dhcp_server_test)) + results.append( + self.generate_result('dns_resolution', + self.dns_validator.dns_resolution_test)) + results.append( + self.generate_result('gateway_default', + self.gateway_validator.default_gateway_test)) + results.append( + self.generate_result('ntp_sync', self.ntp_validator.ntp_sync_test)) + json_results = json.dumps({'results': results}, indent=2) + + return json_results + + def write_results(self, results): + """Write test results to file.""" + results_file = os.path.join(RESULTS_DIR, 'result.json') + LOGGER.info('Writing results to ' + results_file) + with open(results_file, 'w', encoding='utf-8') as f: + f.write(results) + + def generate_result(self, test_name, test_result): + """Return JSON object for test result.""" + if test_result is not None: + result = 'compliant' if test_result else 'non-compliant' + else: + result = 'skipped' + LOGGER.info(test_name + ': ' + result) + res_dict = {'name': test_name, 'result': result} + return res_dict + + def _handler(self, signum, frame): # pylint: disable=unused-argument + if signum in (2, signal.SIGTERM): + sys.exit(1) + + +def run(argv): # pylint: disable=unused-argument + """Run the network validator.""" + parser = argparse.ArgumentParser( + description='Faux Device _validator', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument( + '-m', + '--module', + help='Define the module name to be used to create the log file') + + args = parser.parse_args() + + # For some reason passing in the args from bash adds an extra + # space before the argument so we'll just strip out extra space + FauxDevice(args.module.strip()) + + +if __name__ == '__main__': + run(sys.argv) diff --git a/net_orc/network/devices/faux-dev/python/src/util.py b/net_orc/network/devices/faux-dev/python/src/util.py index 605af1132..6848206b4 100644 --- a/net_orc/network/devices/faux-dev/python/src/util.py +++ b/net_orc/network/devices/faux-dev/python/src/util.py @@ -1,3 +1,4 @@ +"""Provides basic utilities for the faux-device.""" import subprocess import shlex @@ -10,19 +11,20 @@ def run_command(cmd, logger, output=True): - success = False - process = subprocess.Popen(shlex.split( - cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE) - stdout, stderr = process.communicate() + success = False + process = subprocess.Popen(shlex.split(cmd), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + stdout, stderr = process.communicate() - if process.returncode != 0: - err_msg = "%s. Code: %s" % (stderr.strip(), process.returncode) - logger.error("Command Failed: " + cmd) - logger.error("Error: " + err_msg) - else: - success = True + if process.returncode != 0: + err_msg = f'{stderr.strip()}. Code: {process.returncode}' + logger.error('Command Failed: ' + cmd) + logger.error('Error: ' + err_msg) + else: + success = True - if output: - return success, stdout.strip().decode('utf-8'), stderr - else: - return success, None, stderr + if output: + return success, stdout.strip().decode('utf-8') + else: + return success, None diff --git a/net_orc/network/modules/base/python/src/grpc/start_server.py b/net_orc/network/modules/base/python/src/grpc/start_server.py index 9ed31ffcf..b4016c831 100644 --- a/net_orc/network/modules/base/python/src/grpc/start_server.py +++ b/net_orc/network/modules/base/python/src/grpc/start_server.py @@ -1,34 +1,37 @@ +"""Base class for starting the gRPC server for a network module.""" from concurrent import futures import grpc import proto.grpc_pb2_grpc as pb2_grpc -import proto.grpc_pb2 as pb2 from network_service import NetworkService -import logging -import sys import argparse DEFAULT_PORT = '5001' -def serve(PORT): - server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) - pb2_grpc.add_NetworkModuleServicer_to_server(NetworkService(), server) - server.add_insecure_port('[::]:' + PORT) - server.start() - server.wait_for_termination() -def run(argv): - parser = argparse.ArgumentParser(description="GRPC Server for Network Module", - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument("-p", "--port", default=DEFAULT_PORT, - help="Define the default port to run the server on.") +def serve(port): + server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) + pb2_grpc.add_NetworkModuleServicer_to_server(NetworkService(), server) + server.add_insecure_port('[::]:' + port) + server.start() + server.wait_for_termination() - args = parser.parse_args() - PORT = args.port +def run(): + parser = argparse.ArgumentParser( + description='GRPC Server for Network Module', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('-p', + '--port', + default=DEFAULT_PORT, + help='Define the default port to run the server on.') - print("gRPC server starting on port " + PORT) - serve(PORT) + args = parser.parse_args() + port = args.port -if __name__ == "__main__": - run(sys.argv) \ No newline at end of file + print('gRPC server starting on port ' + port) + serve(port) + + +if __name__ == '__main__': + run() diff --git a/net_orc/network/modules/base/python/src/logger.py b/net_orc/network/modules/base/python/src/logger.py index 4924512c6..abec00f69 100644 --- a/net_orc/network/modules/base/python/src/logger.py +++ b/net_orc/network/modules/base/python/src/logger.py @@ -1,47 +1,48 @@ -#!/usr/bin/env python3 - +"""Sets up the logger to be used for the network modules.""" import json import logging import os LOGGERS = {} -_LOG_FORMAT = "%(asctime)s %(name)-8s %(levelname)-7s %(message)s" +_LOG_FORMAT = '%(asctime)s %(name)-8s %(levelname)-7s %(message)s' _DATE_FORMAT = '%b %02d %H:%M:%S' _DEFAULT_LEVEL = logging.INFO -_CONF_DIR = "conf" -_CONF_FILE_NAME = "system.json" -_LOG_DIR = "/runtime/network/" +_CONF_DIR = 'conf' +_CONF_FILE_NAME = 'system.json' +_LOG_DIR = '/runtime/network/' # Set log level try: - system_conf_json = json.load( - open(os.path.join(_CONF_DIR, _CONF_FILE_NAME))) - log_level_str = system_conf_json['log_level'] - log_level = logging.getLevelName(log_level_str) -except: - # TODO: Print out warning that log level is incorrect or missing - log_level = _DEFAULT_LEVEL + with open(os.path.join(_CONF_DIR, _CONF_FILE_NAME), + encoding='UTF-8') as config_json_file: + system_conf_json = json.load(config_json_file) + + log_level_str = system_conf_json['log_level'] + log_level = logging.getLevelName(log_level_str) +except OSError: + # TODO: Print out warning that log level is incorrect or missing + LOG_LEVEL = _DEFAULT_LEVEL log_format = logging.Formatter(fmt=_LOG_FORMAT, datefmt=_DATE_FORMAT) -def add_file_handler(log, logFile): - handler = logging.FileHandler(_LOG_DIR+logFile+".log") - handler.setFormatter(log_format) - log.addHandler(handler) +def add_file_handler(log, log_file): + handler = logging.FileHandler(_LOG_DIR + log_file + '.log') + handler.setFormatter(log_format) + log.addHandler(handler) def add_stream_handler(log): - handler = logging.StreamHandler() - handler.setFormatter(log_format) - log.addHandler(handler) - - -def get_logger(name, logFile=None): - if name not in LOGGERS: - LOGGERS[name] = logging.getLogger(name) - LOGGERS[name].setLevel(log_level) - add_stream_handler(LOGGERS[name]) - if logFile is not None: - add_file_handler(LOGGERS[name], logFile) - return LOGGERS[name] + handler = logging.StreamHandler() + handler.setFormatter(log_format) + log.addHandler(handler) + + +def get_logger(name, log_file=None): + if name not in LOGGERS: + LOGGERS[name] = logging.getLogger(name) + LOGGERS[name].setLevel(log_level) + add_stream_handler(LOGGERS[name]) + if log_file is not None: + add_file_handler(LOGGERS[name], log_file) + return LOGGERS[name] diff --git a/net_orc/network/modules/dhcp-1/python/src/grpc/dhcp_config.py b/net_orc/network/modules/dhcp-1/python/src/grpc/dhcp_config.py index f5445ca44..23e1b4047 100644 --- a/net_orc/network/modules/dhcp-1/python/src/grpc/dhcp_config.py +++ b/net_orc/network/modules/dhcp-1/python/src/grpc/dhcp_config.py @@ -1,101 +1,106 @@ +"""Contains all the necessary classes to maintain the +DHCP server's configuration""" import re -CONFIG_FILE = "/etc/dhcp/dhcpd.conf" -CONFIG_FILE_TEST = "network/modules/dhcp-1/conf/dhcpd.conf" +CONFIG_FILE = '/etc/dhcp/dhcpd.conf' +CONFIG_FILE_TEST = 'network/modules/dhcp-1/conf/dhcpd.conf' -DEFAULT_LEASE_TIME_KEY = "default-lease-time" +DEFAULT_LEASE_TIME_KEY = 'default-lease-time' class DHCPConfig: - - def __init__(self): - self._default_lease_time = 300 - self._subnets = [] - self._peer = None - - def write_config(self): - conf = str(self) - print("Writing config: \n" + conf) - f = open(CONFIG_FILE, "w") - f.write(conf) - - def resolve_config(self): - with open(CONFIG_FILE) as f: - conf = f.read() - self.resolve_subnets(conf) - self.peer = DHCPFailoverPeer(conf) - - def resolve_subnets(self, conf): - self._subnets = [] - regex = r"(subnet.*)" - subnets = re.findall(regex, conf, re.MULTILINE | re.DOTALL) - for subnet in subnets: - dhcp_subnet = DHCPSubnet(subnet) - self._subnets.append(dhcp_subnet) - - def set_range(self, start, end, subnet=0, pool=0): - print("Setting Range for pool ") - print(self._subnets[subnet]._pools[pool]) - self._subnets[subnet]._pools[pool]._range_start = start - self._subnets[subnet]._pools[pool]._range_end = end - - def resolve_settings(self, conf): - lines = conf.split("\n") - for line in lines: - if DEFAULT_LEASE_TIME_KEY in line: - self._default_lease_time = line.strip().split(DEFAULT_LEASE_TIME_KEY)[ - 1].strip().split(";")[0] - - self.peer = peer - - def __str__(self): - - config = """\r{DEFAULT_LEASE_TIME_KEY} {DEFAULT_LEASE_TIME};""" - - config = config.format(length='multi-line', - DEFAULT_LEASE_TIME_KEY=DEFAULT_LEASE_TIME_KEY, DEFAULT_LEASE_TIME=self._default_lease_time - ) - - config += "\n\n"+str(self.peer) - for subnet in self._subnets: - config += "\n\n"+str(subnet) - return str(config) - - -FAILOVER_PEER_KEY = "failover peer" -PRIMARY_KEY = "primary" -ADDRESS_KEY = "address" -PORT_KEY = "port" -PEER_ADDRESS_KEY = "peer address" -PEER_PORT_KEY = "peer port" -MAX_RESPONSE_DELAY_KEY = "max-response-delay" -MAX_UNACKED_UPDATES_KEY = "max-unacked-updates" -MCLT_KEY = "mclt" -SPLIT_KEY = "split" -LOAD_BALANCE_MAX_SECONDS_KEY = "load balance max seconds" + """Represents the DHCP Servers configuration and gives access to modify it""" + + def __init__(self): + self._default_lease_time = 300 + self.subnets = [] + self._peer = None + + def write_config(self): + conf = str(self) + print('Writing config: \n' + conf) + with open(CONFIG_FILE, 'w', encoding='UTF-8') as conf_file: + conf_file.write(conf) + + def resolve_config(self): + with open(CONFIG_FILE, 'r', encoding='UTF-8') as f: + conf = f.read() + self.resolve_subnets(conf) + self._peer = DHCPFailoverPeer(conf) + + def resolve_subnets(self, conf): + self.subnets = [] + regex = r'(subnet.*)' + subnets = re.findall(regex, conf, re.MULTILINE | re.DOTALL) + for subnet in subnets: + dhcp_subnet = DHCPSubnet(subnet) + self.subnets.append(dhcp_subnet) + + def set_range(self, start, end, subnet=0, pool=0): + print('Setting Range for pool ') + print(self.subnets[subnet].pools[pool]) + self.subnets[subnet].pools[pool].range_start = start + self.subnets[subnet].pools[pool].range_end = end + + # def resolve_settings(self, conf): + # lines = conf.split('\n') + # for line in lines: + # if DEFAULT_LEASE_TIME_KEY in line: + # self._default_lease_time = line.strip().split( + # DEFAULT_LEASE_TIME_KEY)[1].strip().split(';')[0] + + # self.peer = peer + + def __str__(self): + + config = """\r{DEFAULT_LEASE_TIME_KEY} {DEFAULT_LEASE_TIME};""" + + config = config.format(length='multi-line', + DEFAULT_LEASE_TIME_KEY=DEFAULT_LEASE_TIME_KEY, + DEFAULT_LEASE_TIME=self._default_lease_time) + + config += '\n\n' + str(self.peer) + for subnet in self._subnets: + config += '\n\n' + str(subnet) + return str(config) + + +FAILOVER_PEER_KEY = 'failover peer' +PRIMARY_KEY = 'primary' +ADDRESS_KEY = 'address' +PORT_KEY = 'port' +PEER_ADDRESS_KEY = 'peer address' +PEER_PORT_KEY = 'peer port' +MAX_RESPONSE_DELAY_KEY = 'max-response-delay' +MAX_UNACKED_UPDATES_KEY = 'max-unacked-updates' +MCLT_KEY = 'mclt' +SPLIT_KEY = 'split' +LOAD_BALANCE_MAX_SECONDS_KEY = 'load balance max seconds' class DHCPFailoverPeer: - def __init__(self, config): - self.name = None - self.primary = False - self.address = None - self.port = None - self.peer_address = None - self.peer_port = None - self.max_response_delay = None - self.max_unacked_updates = None - self.mclt = None - self.split = None - self.load_balance_max_seconds = None - self.peer = None - - self.resolve_peer(config) - - def __str__(self): - config = "{FAILOVER_PEER_KEY} \"{FAILOVER_PEER}\" {{\n" - config += "\tprimary;" if self.primary else "secondary;" - config += """\n\t{ADDRESS_KEY} {ADDRESS}; + """Contains all information to define the DHCP failover peer""" + + def __init__(self, config): + self.name = None + self.primary = False + self.address = None + self.port = None + self.peer_address = None + self.peer_port = None + self.max_response_delay = None + self.max_unacked_updates = None + self.mclt = None + self.split = None + self.load_balance_max_seconds = None + self.peer = None + + self.resolve_peer(config) + + def __str__(self): + config = '{FAILOVER_PEER_KEY} \"{FAILOVER_PEER}\" {{\n' + config += '\tprimary;' if self.primary else 'secondary;' + config += """\n\t{ADDRESS_KEY} {ADDRESS}; {PORT_KEY} {PORT}; {PEER_ADDRESS_KEY} {PEER_ADDRESS}; {PEER_PORT_KEY} {PEER_PORT}; @@ -106,162 +111,179 @@ def __str__(self): {LOAD_BALANCE_MAX_SECONDS_KEY} {LOAD_BALANCE_MAX_SECONDS}; \r}}""" - return config.format(length='multi-line', - FAILOVER_PEER_KEY=FAILOVER_PEER_KEY, FAILOVER_PEER=self.name, - ADDRESS_KEY=ADDRESS_KEY, ADDRESS=self.address, - PORT_KEY=PORT_KEY, PORT=self.port, - PEER_ADDRESS_KEY=PEER_ADDRESS_KEY, PEER_ADDRESS=self.peer_address, - PEER_PORT_KEY=PEER_PORT_KEY, PEER_PORT=self.peer_port, - MAX_RESPONSE_DELAY_KEY=MAX_RESPONSE_DELAY_KEY, MAX_RESPONSE_DELAY=self.max_response_delay, - MAX_UNACKED_UPDATES_KEY=MAX_UNACKED_UPDATES_KEY, MAX_UNACKED_UPDATES=self.max_unacked_updates, - MCLT_KEY=MCLT_KEY, MCLT=self.mclt, - SPLIT_KEY=SPLIT_KEY, SPLIT=self.split, - LOAD_BALANCE_MAX_SECONDS_KEY=LOAD_BALANCE_MAX_SECONDS_KEY, LOAD_BALANCE_MAX_SECONDS=self.load_balance_max_seconds - ) - - def resolve_peer(self, conf): - peer = "" - lines = conf.split("\n") - for line in lines: - if line.startswith(FAILOVER_PEER_KEY) or len(peer) > 0: - if(len(peer) <= 0): - self.name = line.strip().split(FAILOVER_PEER_KEY)[ - 1].strip().split("{")[0].split("\"")[1] - peer += line+"\n" - if PRIMARY_KEY in line: - self.primary = True - elif ADDRESS_KEY in line and PEER_ADDRESS_KEY not in line: - self.address = line.strip().split(ADDRESS_KEY)[ - 1].strip().split(";")[0] - elif PORT_KEY in line and PEER_PORT_KEY not in line: - self.port = line.strip().split(PORT_KEY)[ - 1].strip().split(";")[0] - elif PEER_ADDRESS_KEY in line: - self.peer_address = line.strip().split(PEER_ADDRESS_KEY)[ - 1].strip().split(";")[0] - elif PEER_PORT_KEY in line: - self.peer_port = line.strip().split(PEER_PORT_KEY)[ - 1].strip().split(";")[0] - elif MAX_RESPONSE_DELAY_KEY in line: - self.max_response_delay = line.strip().split(MAX_RESPONSE_DELAY_KEY)[ - 1].strip().split(";")[0] - elif MAX_UNACKED_UPDATES_KEY in line: - self.max_unacked_updates = line.strip().split(MAX_UNACKED_UPDATES_KEY)[ - 1].strip().split(";")[0] - elif MCLT_KEY in line: - self.mclt = line.strip().split(MCLT_KEY)[ - 1].strip().split(";")[0] - elif SPLIT_KEY in line: - self.split = line.strip().split(SPLIT_KEY)[ - 1].strip().split(";")[0] - elif LOAD_BALANCE_MAX_SECONDS_KEY in line: - self.load_balance_max_seconds = line.strip().split(LOAD_BALANCE_MAX_SECONDS_KEY)[ - 1].strip().split(";")[0] - if line.endswith("}") and len(peer) > 0: - break - self.peer = peer - - -NTP_OPTION_KEY = "option ntp-servers" -SUBNET_MASK_OPTION_KEY = "option subnet-mask" -BROADCAST_OPTION_KEY = "option broadcast-address" -ROUTER_OPTION_KEY = "option routers" -DNS_OPTION_KEY = "option domain-name-servers" + return config.format( + length='multi-line', + FAILOVER_PEER_KEY=FAILOVER_PEER_KEY, + FAILOVER_PEER=self.name, + ADDRESS_KEY=ADDRESS_KEY, + ADDRESS=self.address, + PORT_KEY=PORT_KEY, + PORT=self.port, + PEER_ADDRESS_KEY=PEER_ADDRESS_KEY, + PEER_ADDRESS=self.peer_address, + PEER_PORT_KEY=PEER_PORT_KEY, + PEER_PORT=self.peer_port, + MAX_RESPONSE_DELAY_KEY=MAX_RESPONSE_DELAY_KEY, + MAX_RESPONSE_DELAY=self.max_response_delay, + MAX_UNACKED_UPDATES_KEY=MAX_UNACKED_UPDATES_KEY, + MAX_UNACKED_UPDATES=self.max_unacked_updates, + MCLT_KEY=MCLT_KEY, + MCLT=self.mclt, + SPLIT_KEY=SPLIT_KEY, + SPLIT=self.split, + LOAD_BALANCE_MAX_SECONDS_KEY=LOAD_BALANCE_MAX_SECONDS_KEY, + LOAD_BALANCE_MAX_SECONDS=self.load_balance_max_seconds) + + def resolve_peer(self, conf): + peer = '' + lines = conf.split('\n') + for line in lines: + if line.startswith(FAILOVER_PEER_KEY) or len(peer) > 0: + if len(peer) <= 0: + self.name = line.strip().split(FAILOVER_PEER_KEY)[1].strip().split( + '{')[0].split('\"')[1] + peer += line + '\n' + if PRIMARY_KEY in line: + self.primary = True + elif ADDRESS_KEY in line and PEER_ADDRESS_KEY not in line: + self.address = line.strip().split(ADDRESS_KEY)[1].strip().split( + ';')[0] + elif PORT_KEY in line and PEER_PORT_KEY not in line: + self.port = line.strip().split(PORT_KEY)[1].strip().split(';')[0] + elif PEER_ADDRESS_KEY in line: + self.peer_address = line.strip().split( + PEER_ADDRESS_KEY)[1].strip().split(';')[0] + elif PEER_PORT_KEY in line: + self.peer_port = line.strip().split(PEER_PORT_KEY)[1].strip().split( + ';')[0] + elif MAX_RESPONSE_DELAY_KEY in line: + self.max_response_delay = line.strip().split( + MAX_RESPONSE_DELAY_KEY)[1].strip().split(';')[0] + elif MAX_UNACKED_UPDATES_KEY in line: + self.max_unacked_updates = line.strip().split( + MAX_UNACKED_UPDATES_KEY)[1].strip().split(';')[0] + elif MCLT_KEY in line: + self.mclt = line.strip().split(MCLT_KEY)[1].strip().split(';')[0] + elif SPLIT_KEY in line: + self.split = line.strip().split(SPLIT_KEY)[1].strip().split(';')[0] + elif LOAD_BALANCE_MAX_SECONDS_KEY in line: + self.load_balance_max_seconds = line.strip().split( + LOAD_BALANCE_MAX_SECONDS_KEY)[1].strip().split(';')[0] + if line.endswith('}') and len(peer) > 0: + break + self.peer = peer + + +NTP_OPTION_KEY = 'option ntp-servers' +SUBNET_MASK_OPTION_KEY = 'option subnet-mask' +BROADCAST_OPTION_KEY = 'option broadcast-address' +ROUTER_OPTION_KEY = 'option routers' +DNS_OPTION_KEY = 'option domain-name-servers' class DHCPSubnet: - def __init__(self, subnet): - self._ntp_servers = None - self._subnet_mask = None - self._broadcast = None - self._routers = None - self._dns_servers = None - self._pools = [] - - self.resolve_subnet(subnet) - self.resolve_pools(subnet) - - def __str__(self): - config = """subnet 10.10.10.0 netmask {SUBNET_MASK_OPTION} {{ + """Represents the DHCP Servers subnet configuration""" + + def __init__(self, subnet): + self._ntp_servers = None + self._subnet_mask = None + self._broadcast = None + self._routers = None + self._dns_servers = None + self.pools = [] + + self.resolve_subnet(subnet) + self.resolve_pools(subnet) + + def __str__(self): + config = """subnet 10.10.10.0 netmask {SUBNET_MASK_OPTION} {{ \r\t{NTP_OPTION_KEY} {NTP_OPTION}; \r\t{SUBNET_MASK_OPTION_KEY} {SUBNET_MASK_OPTION}; \r\t{BROADCAST_OPTION_KEY} {BROADCAST_OPTION}; \r\t{ROUTER_OPTION_KEY} {ROUTER_OPTION}; \r\t{DNS_OPTION_KEY} {DNS_OPTION};""" - config = config.format(length='multi-line', - NTP_OPTION_KEY=NTP_OPTION_KEY, NTP_OPTION=self._ntp_servers, - SUBNET_MASK_OPTION_KEY=SUBNET_MASK_OPTION_KEY, SUBNET_MASK_OPTION=self._subnet_mask, - BROADCAST_OPTION_KEY=BROADCAST_OPTION_KEY, BROADCAST_OPTION=self._broadcast, - ROUTER_OPTION_KEY=ROUTER_OPTION_KEY, ROUTER_OPTION=self._routers, - DNS_OPTION_KEY=DNS_OPTION_KEY, DNS_OPTION=self._dns_servers - ) - for pool in self._pools: - config += "\n\t"+str(pool) - - config += "\n\r}" - return config - - def resolve_subnet(self, subnet): - subnet_parts = subnet.split("\n") - for part in subnet_parts: - if NTP_OPTION_KEY in part: - self._ntp_servers = part.strip().split(NTP_OPTION_KEY)[ - 1].strip().split(";")[0] - elif SUBNET_MASK_OPTION_KEY in part: - self._subnet_mask = part.strip().split(SUBNET_MASK_OPTION_KEY)[ - 1].strip().split(";")[0] - elif BROADCAST_OPTION_KEY in part: - self._broadcast = part.strip().split(BROADCAST_OPTION_KEY)[ - 1].strip().split(";")[0] - elif ROUTER_OPTION_KEY in part: - self._routers = part.strip().split(ROUTER_OPTION_KEY)[ - 1].strip().split(";")[0] - elif DNS_OPTION_KEY in part: - self._dns_servers = part.strip().split(DNS_OPTION_KEY)[ - 1].strip().split(";")[0] - - def resolve_pools(self, subnet): - regex = r"(pool.*)\}" - pools = re.findall(regex, subnet, re.MULTILINE | re.DOTALL) - for pool in pools: - dhcp_pool = DHCPPool(pool) - self._pools.append(dhcp_pool) - - -FAILOVER_KEY = "failover peer" -RANGE_KEY = "range" + config = config.format(length='multi-line', + NTP_OPTION_KEY=NTP_OPTION_KEY, + NTP_OPTION=self._ntp_servers, + SUBNET_MASK_OPTION_KEY=SUBNET_MASK_OPTION_KEY, + SUBNET_MASK_OPTION=self._subnet_mask, + BROADCAST_OPTION_KEY=BROADCAST_OPTION_KEY, + BROADCAST_OPTION=self._broadcast, + ROUTER_OPTION_KEY=ROUTER_OPTION_KEY, + ROUTER_OPTION=self._routers, + DNS_OPTION_KEY=DNS_OPTION_KEY, + DNS_OPTION=self._dns_servers) + for pool in self.pools: + config += '\n\t' + str(pool) + + config += '\n\r}' + return config + + def resolve_subnet(self, subnet): + subnet_parts = subnet.split('\n') + for part in subnet_parts: + if NTP_OPTION_KEY in part: + self._ntp_servers = part.strip().split(NTP_OPTION_KEY)[1].strip().split( + ';')[0] + elif SUBNET_MASK_OPTION_KEY in part: + self._subnet_mask = part.strip().split( + SUBNET_MASK_OPTION_KEY)[1].strip().split(';')[0] + elif BROADCAST_OPTION_KEY in part: + self._broadcast = part.strip().split( + BROADCAST_OPTION_KEY)[1].strip().split(';')[0] + elif ROUTER_OPTION_KEY in part: + self._routers = part.strip().split(ROUTER_OPTION_KEY)[1].strip().split( + ';')[0] + elif DNS_OPTION_KEY in part: + self._dns_servers = part.strip().split(DNS_OPTION_KEY)[1].strip().split( + ';')[0] + + def resolve_pools(self, subnet): + regex = r'(pool.*)\}' + pools = re.findall(regex, subnet, re.MULTILINE | re.DOTALL) + for pool in pools: + dhcp_pool = DHCPPool(pool) + self.pools.append(dhcp_pool) + + +FAILOVER_KEY = 'failover peer' +RANGE_KEY = 'range' class DHCPPool: + """Represents a DHCP Servers subnet pool configuration""" - def __init__(self, pool): - self._failover_peer = None - self._range_start = None - self._range_end = None - self.resolve_pool(pool) + def __init__(self, pool): + self.failover_peer = None + self.range_start = None + self.range_end = None + self.resolve_pool(pool) - def __str__(self): + def __str__(self): - config = """pool {{ + config = """pool {{ \r\t\t{FAILOVER_KEY} "{FAILOVER}"; \r\t\t{RANGE_KEY} {RANGE_START} {RANGE_END}; \r\t}}""" - return config.format(length='multi-line', - FAILOVER_KEY=FAILOVER_KEY, FAILOVER=self._failover_peer, - RANGE_KEY=RANGE_KEY, RANGE_START=self._range_start, RANGE_END=self._range_end, - ) - - def resolve_pool(self, pool): - pool_parts = pool.split("\n") - # pool_parts = pool.split("\n") - for part in pool_parts: - if FAILOVER_KEY in part: - self._failover_peer = part.strip().split( - FAILOVER_KEY)[1].strip().split(";")[0].replace("\"", "") - if RANGE_KEY in part: - range = part.strip().split(RANGE_KEY)[ - 1].strip().split(";")[0] - self._range_start = range.split(" ")[0].strip() - self._range_end = range.split(" ")[1].strip() + return config.format( + length='multi-line', + FAILOVER_KEY=FAILOVER_KEY, + FAILOVER=self.failover_peer, + RANGE_KEY=RANGE_KEY, + RANGE_START=self.range_start, + RANGE_END=self.range_end, + ) + + def resolve_pool(self, pool): + pool_parts = pool.split('\n') + # pool_parts = pool.split("\n") + for part in pool_parts: + if FAILOVER_KEY in part: + self.failover_peer = part.strip().split(FAILOVER_KEY)[1].strip().split( + ';')[0].replace('\"', '') + if RANGE_KEY in part: + pool_range = part.strip().split(RANGE_KEY)[1].strip().split(';')[0] + self.range_start = pool_range.split(' ')[0].strip() + self.range_end = pool_range.split(' ')[1].strip() diff --git a/net_orc/network/modules/dhcp-1/python/src/grpc/network_service.py b/net_orc/network/modules/dhcp-1/python/src/grpc/network_service.py index f90cb6b51..49732b362 100644 --- a/net_orc/network/modules/dhcp-1/python/src/grpc/network_service.py +++ b/net_orc/network/modules/dhcp-1/python/src/grpc/network_service.py @@ -1,3 +1,4 @@ +"""gRPC Network Service for the DHCP Server network module""" import proto.grpc_pb2_grpc as pb2_grpc import proto.grpc_pb2 as pb2 @@ -5,40 +6,39 @@ class NetworkService(pb2_grpc.NetworkModule): + """gRPC endpoints for the DHCP Server""" - def __init__(self): - self._dhcp_config = DHCPConfig() + def __init__(self): + self._dhcp_config = DHCPConfig() + def GetDHCPRange(self, request, context): # pylint: disable=W0613 """ - Resolve the current DHCP configuration and return - the first range from the first subnet in the file - """ - - def GetDHCPRange(self, request, context): - self._dhcp_config.resolve_config() - pool = self._dhcp_config._subnets[0]._pools[0] - return pb2.DHCPRange(code=200, start=pool._range_start, end=pool._range_end) + Resolve the current DHCP configuration and return + the first range from the first subnet in the file + """ + self._dhcp_config.resolve_config() + pool = self._dhcp_config.subnets[0].pools[0] + return pb2.DHCPRange(code=200, start=pool.range_start, end=pool.range_end) + def SetDHCPRange(self, request, context): # pylint: disable=W0613 + """ + Change DHCP configuration and set the + the first range from the first subnet in the configuration """ - Change DHCP configuration and set the - the first range from the first subnet in the configuration - """ - - def SetDHCPRange(self, request, context): - print("Setting DHCPRange") - print("Start: " + request.start) - print("End: " + request.end) - self._dhcp_config.resolve_config() - self._dhcp_config.set_range(request.start, request.end, 0, 0) - self._dhcp_config.write_config() - return pb2.Response(code=200, message="DHCP Range Set") + print('Setting DHCPRange') + print('Start: ' + request.start) + print('End: ' + request.end) + self._dhcp_config.resolve_config() + self._dhcp_config.set_range(request.start, request.end, 0, 0) + self._dhcp_config.write_config() + return pb2.Response(code=200, message='DHCP Range Set') + + def GetStatus(self, request, context): # pylint: disable=W0613 + """ + Return the current status of the network module """ - Return the current status of the network module - """ - - def GetStatus(self, request, context): - # ToDo: Figure out how to resolve the current DHCP status - dhcpStatus = True - message = str({"dhcpStatus":dhcpStatus}) - return pb2.Response(code=200, message=message) + # ToDo: Figure out how to resolve the current DHCP status + dhcp_status = True + message = str({'dhcpStatus': dhcp_status}) + return pb2.Response(code=200, message=message) diff --git a/net_orc/network/modules/dhcp-1/python/src/run.py b/net_orc/network/modules/dhcp-1/python/src/run.py deleted file mode 100644 index 830f048cf..000000000 --- a/net_orc/network/modules/dhcp-1/python/src/run.py +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env python3 - -import signal -import sys -import argparse - -from grpc.dhcp_config import DHCPConfig - - -class DHCPServer: - - def __init__(self, module): - - signal.signal(signal.SIGINT, self.handler) - signal.signal(signal.SIGTERM, self.handler) - signal.signal(signal.SIGABRT, self.handler) - signal.signal(signal.SIGQUIT, self.handler) - - config = DHCPConfig() - config.resolve_config() - config.write_config() - - def handler(self, signum, frame): - if (signum == 2 or signal == signal.SIGTERM): - exit(1) - - -def run(argv): - parser = argparse.ArgumentParser(description="Faux Device Validator", - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument( - "-m", "--module", help="Define the module name to be used to create the log file") - - args = parser.parse_args() - - server = DHCPServer(args.module) - - -if __name__ == "__main__": - run(sys.argv) diff --git a/net_orc/network/modules/dhcp-2/python/src/grpc/dhcp_config.py b/net_orc/network/modules/dhcp-2/python/src/grpc/dhcp_config.py index f5445ca44..1d93c2d34 100644 --- a/net_orc/network/modules/dhcp-2/python/src/grpc/dhcp_config.py +++ b/net_orc/network/modules/dhcp-2/python/src/grpc/dhcp_config.py @@ -1,101 +1,106 @@ +"""Contains all the necessary classes to maintain the +DHCP server's configuration""" import re -CONFIG_FILE = "/etc/dhcp/dhcpd.conf" -CONFIG_FILE_TEST = "network/modules/dhcp-1/conf/dhcpd.conf" +CONFIG_FILE = '/etc/dhcp/dhcpd.conf' +CONFIG_FILE_TEST = 'network/modules/dhcp-2/conf/dhcpd.conf' -DEFAULT_LEASE_TIME_KEY = "default-lease-time" +DEFAULT_LEASE_TIME_KEY = 'default-lease-time' class DHCPConfig: - - def __init__(self): - self._default_lease_time = 300 - self._subnets = [] - self._peer = None - - def write_config(self): - conf = str(self) - print("Writing config: \n" + conf) - f = open(CONFIG_FILE, "w") - f.write(conf) - - def resolve_config(self): - with open(CONFIG_FILE) as f: - conf = f.read() - self.resolve_subnets(conf) - self.peer = DHCPFailoverPeer(conf) - - def resolve_subnets(self, conf): - self._subnets = [] - regex = r"(subnet.*)" - subnets = re.findall(regex, conf, re.MULTILINE | re.DOTALL) - for subnet in subnets: - dhcp_subnet = DHCPSubnet(subnet) - self._subnets.append(dhcp_subnet) - - def set_range(self, start, end, subnet=0, pool=0): - print("Setting Range for pool ") - print(self._subnets[subnet]._pools[pool]) - self._subnets[subnet]._pools[pool]._range_start = start - self._subnets[subnet]._pools[pool]._range_end = end - - def resolve_settings(self, conf): - lines = conf.split("\n") - for line in lines: - if DEFAULT_LEASE_TIME_KEY in line: - self._default_lease_time = line.strip().split(DEFAULT_LEASE_TIME_KEY)[ - 1].strip().split(";")[0] - - self.peer = peer - - def __str__(self): - - config = """\r{DEFAULT_LEASE_TIME_KEY} {DEFAULT_LEASE_TIME};""" - - config = config.format(length='multi-line', - DEFAULT_LEASE_TIME_KEY=DEFAULT_LEASE_TIME_KEY, DEFAULT_LEASE_TIME=self._default_lease_time - ) - - config += "\n\n"+str(self.peer) - for subnet in self._subnets: - config += "\n\n"+str(subnet) - return str(config) - - -FAILOVER_PEER_KEY = "failover peer" -PRIMARY_KEY = "primary" -ADDRESS_KEY = "address" -PORT_KEY = "port" -PEER_ADDRESS_KEY = "peer address" -PEER_PORT_KEY = "peer port" -MAX_RESPONSE_DELAY_KEY = "max-response-delay" -MAX_UNACKED_UPDATES_KEY = "max-unacked-updates" -MCLT_KEY = "mclt" -SPLIT_KEY = "split" -LOAD_BALANCE_MAX_SECONDS_KEY = "load balance max seconds" + """Represents the DHCP Servers configuration and gives access to modify it""" + + def __init__(self): + self._default_lease_time = 300 + self.subnets = [] + self._peer = None + + def write_config(self): + conf = str(self) + print('Writing config: \n' + conf) + with open(CONFIG_FILE, 'w', encoding='UTF-8') as conf_file: + conf_file.write(conf) + + def resolve_config(self): + with open(CONFIG_FILE, 'r', encoding='UTF-8') as f: + conf = f.read() + self.resolve_subnets(conf) + self._peer = DHCPFailoverPeer(conf) + + def resolve_subnets(self, conf): + self.subnets = [] + regex = r'(subnet.*)' + subnets = re.findall(regex, conf, re.MULTILINE | re.DOTALL) + for subnet in subnets: + dhcp_subnet = DHCPSubnet(subnet) + self.subnets.append(dhcp_subnet) + + def set_range(self, start, end, subnet=0, pool=0): + print('Setting Range for pool ') + print(self.subnets[subnet].pools[pool]) + self.subnets[subnet].pools[pool].range_start = start + self.subnets[subnet].pools[pool].range_end = end + + # def resolve_settings(self, conf): + # lines = conf.split('\n') + # for line in lines: + # if DEFAULT_LEASE_TIME_KEY in line: + # self._default_lease_time = line.strip().split( + # DEFAULT_LEASE_TIME_KEY)[1].strip().split(';')[0] + + # self.peer = peer + + def __str__(self): + + config = """\r{DEFAULT_LEASE_TIME_KEY} {DEFAULT_LEASE_TIME};""" + + config = config.format(length='multi-line', + DEFAULT_LEASE_TIME_KEY=DEFAULT_LEASE_TIME_KEY, + DEFAULT_LEASE_TIME=self._default_lease_time) + + config += '\n\n' + str(self.peer) + for subnet in self._subnets: + config += '\n\n' + str(subnet) + return str(config) + + +FAILOVER_PEER_KEY = 'failover peer' +PRIMARY_KEY = 'primary' +ADDRESS_KEY = 'address' +PORT_KEY = 'port' +PEER_ADDRESS_KEY = 'peer address' +PEER_PORT_KEY = 'peer port' +MAX_RESPONSE_DELAY_KEY = 'max-response-delay' +MAX_UNACKED_UPDATES_KEY = 'max-unacked-updates' +MCLT_KEY = 'mclt' +SPLIT_KEY = 'split' +LOAD_BALANCE_MAX_SECONDS_KEY = 'load balance max seconds' class DHCPFailoverPeer: - def __init__(self, config): - self.name = None - self.primary = False - self.address = None - self.port = None - self.peer_address = None - self.peer_port = None - self.max_response_delay = None - self.max_unacked_updates = None - self.mclt = None - self.split = None - self.load_balance_max_seconds = None - self.peer = None - - self.resolve_peer(config) - - def __str__(self): - config = "{FAILOVER_PEER_KEY} \"{FAILOVER_PEER}\" {{\n" - config += "\tprimary;" if self.primary else "secondary;" - config += """\n\t{ADDRESS_KEY} {ADDRESS}; + """Contains all information to define the DHCP failover peer""" + + def __init__(self, config): + self.name = None + self.primary = False + self.address = None + self.port = None + self.peer_address = None + self.peer_port = None + self.max_response_delay = None + self.max_unacked_updates = None + self.mclt = None + self.split = None + self.load_balance_max_seconds = None + self.peer = None + + self.resolve_peer(config) + + def __str__(self): + config = '{FAILOVER_PEER_KEY} \"{FAILOVER_PEER}\" {{\n' + config += '\tprimary;' if self.primary else 'secondary;' + config += """\n\t{ADDRESS_KEY} {ADDRESS}; {PORT_KEY} {PORT}; {PEER_ADDRESS_KEY} {PEER_ADDRESS}; {PEER_PORT_KEY} {PEER_PORT}; @@ -106,162 +111,179 @@ def __str__(self): {LOAD_BALANCE_MAX_SECONDS_KEY} {LOAD_BALANCE_MAX_SECONDS}; \r}}""" - return config.format(length='multi-line', - FAILOVER_PEER_KEY=FAILOVER_PEER_KEY, FAILOVER_PEER=self.name, - ADDRESS_KEY=ADDRESS_KEY, ADDRESS=self.address, - PORT_KEY=PORT_KEY, PORT=self.port, - PEER_ADDRESS_KEY=PEER_ADDRESS_KEY, PEER_ADDRESS=self.peer_address, - PEER_PORT_KEY=PEER_PORT_KEY, PEER_PORT=self.peer_port, - MAX_RESPONSE_DELAY_KEY=MAX_RESPONSE_DELAY_KEY, MAX_RESPONSE_DELAY=self.max_response_delay, - MAX_UNACKED_UPDATES_KEY=MAX_UNACKED_UPDATES_KEY, MAX_UNACKED_UPDATES=self.max_unacked_updates, - MCLT_KEY=MCLT_KEY, MCLT=self.mclt, - SPLIT_KEY=SPLIT_KEY, SPLIT=self.split, - LOAD_BALANCE_MAX_SECONDS_KEY=LOAD_BALANCE_MAX_SECONDS_KEY, LOAD_BALANCE_MAX_SECONDS=self.load_balance_max_seconds - ) - - def resolve_peer(self, conf): - peer = "" - lines = conf.split("\n") - for line in lines: - if line.startswith(FAILOVER_PEER_KEY) or len(peer) > 0: - if(len(peer) <= 0): - self.name = line.strip().split(FAILOVER_PEER_KEY)[ - 1].strip().split("{")[0].split("\"")[1] - peer += line+"\n" - if PRIMARY_KEY in line: - self.primary = True - elif ADDRESS_KEY in line and PEER_ADDRESS_KEY not in line: - self.address = line.strip().split(ADDRESS_KEY)[ - 1].strip().split(";")[0] - elif PORT_KEY in line and PEER_PORT_KEY not in line: - self.port = line.strip().split(PORT_KEY)[ - 1].strip().split(";")[0] - elif PEER_ADDRESS_KEY in line: - self.peer_address = line.strip().split(PEER_ADDRESS_KEY)[ - 1].strip().split(";")[0] - elif PEER_PORT_KEY in line: - self.peer_port = line.strip().split(PEER_PORT_KEY)[ - 1].strip().split(";")[0] - elif MAX_RESPONSE_DELAY_KEY in line: - self.max_response_delay = line.strip().split(MAX_RESPONSE_DELAY_KEY)[ - 1].strip().split(";")[0] - elif MAX_UNACKED_UPDATES_KEY in line: - self.max_unacked_updates = line.strip().split(MAX_UNACKED_UPDATES_KEY)[ - 1].strip().split(";")[0] - elif MCLT_KEY in line: - self.mclt = line.strip().split(MCLT_KEY)[ - 1].strip().split(";")[0] - elif SPLIT_KEY in line: - self.split = line.strip().split(SPLIT_KEY)[ - 1].strip().split(";")[0] - elif LOAD_BALANCE_MAX_SECONDS_KEY in line: - self.load_balance_max_seconds = line.strip().split(LOAD_BALANCE_MAX_SECONDS_KEY)[ - 1].strip().split(";")[0] - if line.endswith("}") and len(peer) > 0: - break - self.peer = peer - - -NTP_OPTION_KEY = "option ntp-servers" -SUBNET_MASK_OPTION_KEY = "option subnet-mask" -BROADCAST_OPTION_KEY = "option broadcast-address" -ROUTER_OPTION_KEY = "option routers" -DNS_OPTION_KEY = "option domain-name-servers" + return config.format( + length='multi-line', + FAILOVER_PEER_KEY=FAILOVER_PEER_KEY, + FAILOVER_PEER=self.name, + ADDRESS_KEY=ADDRESS_KEY, + ADDRESS=self.address, + PORT_KEY=PORT_KEY, + PORT=self.port, + PEER_ADDRESS_KEY=PEER_ADDRESS_KEY, + PEER_ADDRESS=self.peer_address, + PEER_PORT_KEY=PEER_PORT_KEY, + PEER_PORT=self.peer_port, + MAX_RESPONSE_DELAY_KEY=MAX_RESPONSE_DELAY_KEY, + MAX_RESPONSE_DELAY=self.max_response_delay, + MAX_UNACKED_UPDATES_KEY=MAX_UNACKED_UPDATES_KEY, + MAX_UNACKED_UPDATES=self.max_unacked_updates, + MCLT_KEY=MCLT_KEY, + MCLT=self.mclt, + SPLIT_KEY=SPLIT_KEY, + SPLIT=self.split, + LOAD_BALANCE_MAX_SECONDS_KEY=LOAD_BALANCE_MAX_SECONDS_KEY, + LOAD_BALANCE_MAX_SECONDS=self.load_balance_max_seconds) + + def resolve_peer(self, conf): + peer = '' + lines = conf.split('\n') + for line in lines: + if line.startswith(FAILOVER_PEER_KEY) or len(peer) > 0: + if len(peer) <= 0: + self.name = line.strip().split(FAILOVER_PEER_KEY)[1].strip().split( + '{')[0].split('\"')[1] + peer += line + '\n' + if PRIMARY_KEY in line: + self.primary = True + elif ADDRESS_KEY in line and PEER_ADDRESS_KEY not in line: + self.address = line.strip().split(ADDRESS_KEY)[1].strip().split( + ';')[0] + elif PORT_KEY in line and PEER_PORT_KEY not in line: + self.port = line.strip().split(PORT_KEY)[1].strip().split(';')[0] + elif PEER_ADDRESS_KEY in line: + self.peer_address = line.strip().split( + PEER_ADDRESS_KEY)[1].strip().split(';')[0] + elif PEER_PORT_KEY in line: + self.peer_port = line.strip().split(PEER_PORT_KEY)[1].strip().split( + ';')[0] + elif MAX_RESPONSE_DELAY_KEY in line: + self.max_response_delay = line.strip().split( + MAX_RESPONSE_DELAY_KEY)[1].strip().split(';')[0] + elif MAX_UNACKED_UPDATES_KEY in line: + self.max_unacked_updates = line.strip().split( + MAX_UNACKED_UPDATES_KEY)[1].strip().split(';')[0] + elif MCLT_KEY in line: + self.mclt = line.strip().split(MCLT_KEY)[1].strip().split(';')[0] + elif SPLIT_KEY in line: + self.split = line.strip().split(SPLIT_KEY)[1].strip().split(';')[0] + elif LOAD_BALANCE_MAX_SECONDS_KEY in line: + self.load_balance_max_seconds = line.strip().split( + LOAD_BALANCE_MAX_SECONDS_KEY)[1].strip().split(';')[0] + if line.endswith('}') and len(peer) > 0: + break + self.peer = peer + + +NTP_OPTION_KEY = 'option ntp-servers' +SUBNET_MASK_OPTION_KEY = 'option subnet-mask' +BROADCAST_OPTION_KEY = 'option broadcast-address' +ROUTER_OPTION_KEY = 'option routers' +DNS_OPTION_KEY = 'option domain-name-servers' class DHCPSubnet: - def __init__(self, subnet): - self._ntp_servers = None - self._subnet_mask = None - self._broadcast = None - self._routers = None - self._dns_servers = None - self._pools = [] - - self.resolve_subnet(subnet) - self.resolve_pools(subnet) - - def __str__(self): - config = """subnet 10.10.10.0 netmask {SUBNET_MASK_OPTION} {{ + """Represents the DHCP Servers subnet configuration""" + + def __init__(self, subnet): + self._ntp_servers = None + self._subnet_mask = None + self._broadcast = None + self._routers = None + self._dns_servers = None + self.pools = [] + + self.resolve_subnet(subnet) + self.resolve_pools(subnet) + + def __str__(self): + config = """subnet 10.10.10.0 netmask {SUBNET_MASK_OPTION} {{ \r\t{NTP_OPTION_KEY} {NTP_OPTION}; \r\t{SUBNET_MASK_OPTION_KEY} {SUBNET_MASK_OPTION}; \r\t{BROADCAST_OPTION_KEY} {BROADCAST_OPTION}; \r\t{ROUTER_OPTION_KEY} {ROUTER_OPTION}; \r\t{DNS_OPTION_KEY} {DNS_OPTION};""" - config = config.format(length='multi-line', - NTP_OPTION_KEY=NTP_OPTION_KEY, NTP_OPTION=self._ntp_servers, - SUBNET_MASK_OPTION_KEY=SUBNET_MASK_OPTION_KEY, SUBNET_MASK_OPTION=self._subnet_mask, - BROADCAST_OPTION_KEY=BROADCAST_OPTION_KEY, BROADCAST_OPTION=self._broadcast, - ROUTER_OPTION_KEY=ROUTER_OPTION_KEY, ROUTER_OPTION=self._routers, - DNS_OPTION_KEY=DNS_OPTION_KEY, DNS_OPTION=self._dns_servers - ) - for pool in self._pools: - config += "\n\t"+str(pool) - - config += "\n\r}" - return config - - def resolve_subnet(self, subnet): - subnet_parts = subnet.split("\n") - for part in subnet_parts: - if NTP_OPTION_KEY in part: - self._ntp_servers = part.strip().split(NTP_OPTION_KEY)[ - 1].strip().split(";")[0] - elif SUBNET_MASK_OPTION_KEY in part: - self._subnet_mask = part.strip().split(SUBNET_MASK_OPTION_KEY)[ - 1].strip().split(";")[0] - elif BROADCAST_OPTION_KEY in part: - self._broadcast = part.strip().split(BROADCAST_OPTION_KEY)[ - 1].strip().split(";")[0] - elif ROUTER_OPTION_KEY in part: - self._routers = part.strip().split(ROUTER_OPTION_KEY)[ - 1].strip().split(";")[0] - elif DNS_OPTION_KEY in part: - self._dns_servers = part.strip().split(DNS_OPTION_KEY)[ - 1].strip().split(";")[0] - - def resolve_pools(self, subnet): - regex = r"(pool.*)\}" - pools = re.findall(regex, subnet, re.MULTILINE | re.DOTALL) - for pool in pools: - dhcp_pool = DHCPPool(pool) - self._pools.append(dhcp_pool) - - -FAILOVER_KEY = "failover peer" -RANGE_KEY = "range" + config = config.format(length='multi-line', + NTP_OPTION_KEY=NTP_OPTION_KEY, + NTP_OPTION=self._ntp_servers, + SUBNET_MASK_OPTION_KEY=SUBNET_MASK_OPTION_KEY, + SUBNET_MASK_OPTION=self._subnet_mask, + BROADCAST_OPTION_KEY=BROADCAST_OPTION_KEY, + BROADCAST_OPTION=self._broadcast, + ROUTER_OPTION_KEY=ROUTER_OPTION_KEY, + ROUTER_OPTION=self._routers, + DNS_OPTION_KEY=DNS_OPTION_KEY, + DNS_OPTION=self._dns_servers) + for pool in self.pools: + config += '\n\t' + str(pool) + + config += '\n\r}' + return config + + def resolve_subnet(self, subnet): + subnet_parts = subnet.split('\n') + for part in subnet_parts: + if NTP_OPTION_KEY in part: + self._ntp_servers = part.strip().split(NTP_OPTION_KEY)[1].strip().split( + ';')[0] + elif SUBNET_MASK_OPTION_KEY in part: + self._subnet_mask = part.strip().split( + SUBNET_MASK_OPTION_KEY)[1].strip().split(';')[0] + elif BROADCAST_OPTION_KEY in part: + self._broadcast = part.strip().split( + BROADCAST_OPTION_KEY)[1].strip().split(';')[0] + elif ROUTER_OPTION_KEY in part: + self._routers = part.strip().split(ROUTER_OPTION_KEY)[1].strip().split( + ';')[0] + elif DNS_OPTION_KEY in part: + self._dns_servers = part.strip().split(DNS_OPTION_KEY)[1].strip().split( + ';')[0] + + def resolve_pools(self, subnet): + regex = r'(pool.*)\}' + pools = re.findall(regex, subnet, re.MULTILINE | re.DOTALL) + for pool in pools: + dhcp_pool = DHCPPool(pool) + self.pools.append(dhcp_pool) + + +FAILOVER_KEY = 'failover peer' +RANGE_KEY = 'range' class DHCPPool: + """Represents a DHCP Servers subnet pool configuration""" - def __init__(self, pool): - self._failover_peer = None - self._range_start = None - self._range_end = None - self.resolve_pool(pool) + def __init__(self, pool): + self.failover_peer = None + self.range_start = None + self.range_end = None + self.resolve_pool(pool) - def __str__(self): + def __str__(self): - config = """pool {{ + config = """pool {{ \r\t\t{FAILOVER_KEY} "{FAILOVER}"; \r\t\t{RANGE_KEY} {RANGE_START} {RANGE_END}; \r\t}}""" - return config.format(length='multi-line', - FAILOVER_KEY=FAILOVER_KEY, FAILOVER=self._failover_peer, - RANGE_KEY=RANGE_KEY, RANGE_START=self._range_start, RANGE_END=self._range_end, - ) - - def resolve_pool(self, pool): - pool_parts = pool.split("\n") - # pool_parts = pool.split("\n") - for part in pool_parts: - if FAILOVER_KEY in part: - self._failover_peer = part.strip().split( - FAILOVER_KEY)[1].strip().split(";")[0].replace("\"", "") - if RANGE_KEY in part: - range = part.strip().split(RANGE_KEY)[ - 1].strip().split(";")[0] - self._range_start = range.split(" ")[0].strip() - self._range_end = range.split(" ")[1].strip() + return config.format( + length='multi-line', + FAILOVER_KEY=FAILOVER_KEY, + FAILOVER=self.failover_peer, + RANGE_KEY=RANGE_KEY, + RANGE_START=self.range_start, + RANGE_END=self.range_end, + ) + + def resolve_pool(self, pool): + pool_parts = pool.split('\n') + # pool_parts = pool.split("\n") + for part in pool_parts: + if FAILOVER_KEY in part: + self.failover_peer = part.strip().split(FAILOVER_KEY)[1].strip().split( + ';')[0].replace('\"', '') + if RANGE_KEY in part: + pool_range = part.strip().split(RANGE_KEY)[1].strip().split(';')[0] + self.range_start = pool_range.split(' ')[0].strip() + self.range_end = pool_range.split(' ')[1].strip() diff --git a/net_orc/network/modules/dhcp-2/python/src/grpc/network_service.py b/net_orc/network/modules/dhcp-2/python/src/grpc/network_service.py index f90cb6b51..49732b362 100644 --- a/net_orc/network/modules/dhcp-2/python/src/grpc/network_service.py +++ b/net_orc/network/modules/dhcp-2/python/src/grpc/network_service.py @@ -1,3 +1,4 @@ +"""gRPC Network Service for the DHCP Server network module""" import proto.grpc_pb2_grpc as pb2_grpc import proto.grpc_pb2 as pb2 @@ -5,40 +6,39 @@ class NetworkService(pb2_grpc.NetworkModule): + """gRPC endpoints for the DHCP Server""" - def __init__(self): - self._dhcp_config = DHCPConfig() + def __init__(self): + self._dhcp_config = DHCPConfig() + def GetDHCPRange(self, request, context): # pylint: disable=W0613 """ - Resolve the current DHCP configuration and return - the first range from the first subnet in the file - """ - - def GetDHCPRange(self, request, context): - self._dhcp_config.resolve_config() - pool = self._dhcp_config._subnets[0]._pools[0] - return pb2.DHCPRange(code=200, start=pool._range_start, end=pool._range_end) + Resolve the current DHCP configuration and return + the first range from the first subnet in the file + """ + self._dhcp_config.resolve_config() + pool = self._dhcp_config.subnets[0].pools[0] + return pb2.DHCPRange(code=200, start=pool.range_start, end=pool.range_end) + def SetDHCPRange(self, request, context): # pylint: disable=W0613 + """ + Change DHCP configuration and set the + the first range from the first subnet in the configuration """ - Change DHCP configuration and set the - the first range from the first subnet in the configuration - """ - - def SetDHCPRange(self, request, context): - print("Setting DHCPRange") - print("Start: " + request.start) - print("End: " + request.end) - self._dhcp_config.resolve_config() - self._dhcp_config.set_range(request.start, request.end, 0, 0) - self._dhcp_config.write_config() - return pb2.Response(code=200, message="DHCP Range Set") + print('Setting DHCPRange') + print('Start: ' + request.start) + print('End: ' + request.end) + self._dhcp_config.resolve_config() + self._dhcp_config.set_range(request.start, request.end, 0, 0) + self._dhcp_config.write_config() + return pb2.Response(code=200, message='DHCP Range Set') + + def GetStatus(self, request, context): # pylint: disable=W0613 + """ + Return the current status of the network module """ - Return the current status of the network module - """ - - def GetStatus(self, request, context): - # ToDo: Figure out how to resolve the current DHCP status - dhcpStatus = True - message = str({"dhcpStatus":dhcpStatus}) - return pb2.Response(code=200, message=message) + # ToDo: Figure out how to resolve the current DHCP status + dhcp_status = True + message = str({'dhcpStatus': dhcp_status}) + return pb2.Response(code=200, message=message) diff --git a/net_orc/network/modules/dhcp-2/python/src/run.py b/net_orc/network/modules/dhcp-2/python/src/run.py deleted file mode 100644 index 830f048cf..000000000 --- a/net_orc/network/modules/dhcp-2/python/src/run.py +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env python3 - -import signal -import sys -import argparse - -from grpc.dhcp_config import DHCPConfig - - -class DHCPServer: - - def __init__(self, module): - - signal.signal(signal.SIGINT, self.handler) - signal.signal(signal.SIGTERM, self.handler) - signal.signal(signal.SIGABRT, self.handler) - signal.signal(signal.SIGQUIT, self.handler) - - config = DHCPConfig() - config.resolve_config() - config.write_config() - - def handler(self, signum, frame): - if (signum == 2 or signal == signal.SIGTERM): - exit(1) - - -def run(argv): - parser = argparse.ArgumentParser(description="Faux Device Validator", - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument( - "-m", "--module", help="Define the module name to be used to create the log file") - - args = parser.parse_args() - - server = DHCPServer(args.module) - - -if __name__ == "__main__": - run(sys.argv) diff --git a/net_orc/network/modules/ntp/python/src/ntp_server.py b/net_orc/network/modules/ntp/python/src/ntp_server.py index a53134fe7..602585196 100644 --- a/net_orc/network/modules/ntp/python/src/ntp_server.py +++ b/net_orc/network/modules/ntp/python/src/ntp_server.py @@ -1,3 +1,4 @@ +"""NTP Server""" import datetime import socket import struct @@ -7,11 +8,12 @@ import threading import select -taskQueue = queue.Queue() -stopFlag = False +task_queue = queue.Queue() +stop_flag = False + def system_to_ntp_time(timestamp): - """Convert a system time to a NTP time. + """Convert a system time to a NTP time. Parameters: timestamp -- timestamp in system time @@ -19,10 +21,11 @@ def system_to_ntp_time(timestamp): Returns: corresponding NTP time """ - return timestamp + NTP.NTP_DELTA + return timestamp + NTP.NTP_DELTA + def _to_int(timestamp): - """Return the integral part of a timestamp. + """Return the integral part of a timestamp. Parameters: timestamp -- NTP timestamp @@ -30,10 +33,11 @@ def _to_int(timestamp): Retuns: integral part """ - return int(timestamp) + return int(timestamp) + def _to_frac(timestamp, n=32): - """Return the fractional part of a timestamp. + """Return the fractional part of a timestamp. Parameters: timestamp -- NTP timestamp @@ -42,10 +46,11 @@ def _to_frac(timestamp, n=32): Retuns: fractional part """ - return int(abs(timestamp - _to_int(timestamp)) * 2**n) + return int(abs(timestamp - _to_int(timestamp)) * 2**n) + def _to_time(integ, frac, n=32): - """Return a timestamp from an integral and fractional part. + """Return a timestamp from an integral and fractional part. Parameters: integ -- integral part @@ -55,115 +60,115 @@ def _to_time(integ, frac, n=32): Retuns: timestamp """ - return integ + float(frac)/2**n - + return integ + float(frac) / 2**n class NTPException(Exception): - """Exception raised by this module.""" - pass + """Exception raised by this module.""" + pass class NTP: - """Helper class defining constants.""" - - _SYSTEM_EPOCH = datetime.date(*time.gmtime(0)[0:3]) - """system epoch""" - _NTP_EPOCH = datetime.date(1900, 1, 1) - """NTP epoch""" - NTP_DELTA = (_SYSTEM_EPOCH - _NTP_EPOCH).days * 24 * 3600 - """delta between system and NTP time""" - - REF_ID_TABLE = { - 'DNC': "DNC routing protocol", - 'NIST': "NIST public modem", - 'TSP': "TSP time protocol", - 'DTS': "Digital Time Service", - 'ATOM': "Atomic clock (calibrated)", - 'VLF': "VLF radio (OMEGA, etc)", - 'callsign': "Generic radio", - 'LORC': "LORAN-C radionavidation", - 'GOES': "GOES UHF environment satellite", - 'GPS': "GPS UHF satellite positioning", - } - """reference identifier table""" - - STRATUM_TABLE = { - 0: "unspecified", - 1: "primary reference", - } - """stratum table""" - - MODE_TABLE = { - 0: "unspecified", - 1: "symmetric active", - 2: "symmetric passive", - 3: "client", - 4: "server", - 5: "broadcast", - 6: "reserved for NTP control messages", - 7: "reserved for private use", - } - """mode table""" - - LEAP_TABLE = { - 0: "no warning", - 1: "last minute has 61 seconds", - 2: "last minute has 59 seconds", - 3: "alarm condition (clock not synchronized)", - } - """leap indicator table""" + """Helper class defining constants.""" + + _SYSTEM_EPOCH = datetime.date(*time.gmtime(0)[0:3]) + """system epoch""" + _NTP_EPOCH = datetime.date(1900, 1, 1) + """NTP epoch""" + NTP_DELTA = (_SYSTEM_EPOCH - _NTP_EPOCH).days * 24 * 3600 + """delta between system and NTP time""" + + REF_ID_TABLE = { + 'DNC': 'DNC routing protocol', + 'NIST': 'NIST public modem', + 'TSP': 'TSP time protocol', + 'DTS': 'Digital Time Service', + 'ATOM': 'Atomic clock (calibrated)', + 'VLF': 'VLF radio (OMEGA, etc)', + 'callsign': 'Generic radio', + 'LORC': 'LORAN-C radionavidation', + 'GOES': 'GOES UHF environment satellite', + 'GPS': 'GPS UHF satellite positioning', + } + """reference identifier table""" + + STRATUM_TABLE = { + 0: 'unspecified', + 1: 'primary reference', + } + """stratum table""" + + MODE_TABLE = { + 0: 'unspecified', + 1: 'symmetric active', + 2: 'symmetric passive', + 3: 'client', + 4: 'server', + 5: 'broadcast', + 6: 'reserved for NTP control messages', + 7: 'reserved for private use', + } + """mode table""" + + LEAP_TABLE = { + 0: 'no warning', + 1: 'last minute has 61 seconds', + 2: 'last minute has 59 seconds', + 3: 'alarm condition (clock not synchronized)', + } + """leap indicator table""" + class NTPPacket: - """NTP packet class. + """NTP packet class. This represents an NTP packet. """ - - _PACKET_FORMAT = "!B B B b 11I" - """packet format to pack/unpack""" - def __init__(self, version=4, mode=3, tx_timestamp=0): - """Constructor. + _PACKET_FORMAT = '!B B B b 11I' + """packet format to pack/unpack""" + + def __init__(self, version=4, mode=3, tx_timestamp=0): + """Constructor. Parameters: version -- NTP version mode -- packet mode (client, server) tx_timestamp -- packet transmit timestamp """ - self.leap = 0 - """leap second indicator""" - self.version = version - """version""" - self.mode = mode - """mode""" - self.stratum = 0 - """stratum""" - self.poll = 0 - """poll interval""" - self.precision = 0 - """precision""" - self.root_delay = 0 - """root delay""" - self.root_dispersion = 0 - """root dispersion""" - self.ref_id = 0 - """reference clock identifier""" - self.ref_timestamp = 0 - """reference timestamp""" - self.orig_timestamp = 0 - self.orig_timestamp_high = 0 - self.orig_timestamp_low = 0 - """originate timestamp""" - self.recv_timestamp = 0 - """receive timestamp""" - self.tx_timestamp = tx_timestamp - self.tx_timestamp_high = 0 - self.tx_timestamp_low = 0 - """tansmit timestamp""" - - def to_data(self): - """Convert this NTPPacket to a buffer that can be sent over a socket. + self.leap = 0 + """leap second indicator""" + self.version = version + """version""" + self.mode = mode + """mode""" + self.stratum = 0 + """stratum""" + self.poll = 0 + """poll interval""" + self.precision = 0 + """precision""" + self.root_delay = 0 + """root delay""" + self.root_dispersion = 0 + """root dispersion""" + self.ref_id = 0 + """reference clock identifier""" + self.ref_timestamp = 0 + """reference timestamp""" + self.orig_timestamp = 0 + self.orig_timestamp_high = 0 + self.orig_timestamp_low = 0 + """originate timestamp""" + self.recv_timestamp = 0 + """receive timestamp""" + self.tx_timestamp = tx_timestamp + self.tx_timestamp_high = 0 + self.tx_timestamp_low = 0 + """tansmit timestamp""" + + def to_data(self): + """Convert this NTPPacket to a buffer that can be sent over a socket. Returns: buffer representing this packet @@ -171,31 +176,32 @@ def to_data(self): Raises: NTPException -- in case of invalid field """ - try: - packed = struct.pack(NTPPacket._PACKET_FORMAT, - (self.leap << 6 | self.version << 3 | self.mode), - self.stratum, - self.poll, - self.precision, - _to_int(self.root_delay) << 16 | _to_frac(self.root_delay, 16), - _to_int(self.root_dispersion) << 16 | - _to_frac(self.root_dispersion, 16), - self.ref_id, - _to_int(self.ref_timestamp), - _to_frac(self.ref_timestamp), - #Change by lichen, avoid loss of precision - self.orig_timestamp_high, - self.orig_timestamp_low, - _to_int(self.recv_timestamp), - _to_frac(self.recv_timestamp), - _to_int(self.tx_timestamp), - _to_frac(self.tx_timestamp)) - except struct.error: - raise NTPException("Invalid NTP packet fields.") - return packed - - def from_data(self, data): - """Populate this instance from a NTP packet payload received from + try: + packed = struct.pack( + NTPPacket._PACKET_FORMAT, + (self.leap << 6 | self.version << 3 | self.mode), + self.stratum, + self.poll, + self.precision, + _to_int(self.root_delay) << 16 | _to_frac(self.root_delay, 16), + _to_int(self.root_dispersion) << 16 + | _to_frac(self.root_dispersion, 16), + self.ref_id, + _to_int(self.ref_timestamp), + _to_frac(self.ref_timestamp), + #Change by lichen, avoid loss of precision + self.orig_timestamp_high, + self.orig_timestamp_low, + _to_int(self.recv_timestamp), + _to_frac(self.recv_timestamp), + _to_int(self.tx_timestamp), + _to_frac(self.tx_timestamp)) + except struct.error as exc: + raise NTPException('Invalid NTP packet fields.') from exc + return packed + + def from_data(self, data): + """Populate this instance from a NTP packet payload received from the network. Parameters: @@ -204,112 +210,115 @@ def from_data(self, data): Raises: NTPException -- in case of invalid packet format """ - try: - unpacked = struct.unpack(NTPPacket._PACKET_FORMAT, - data[0:struct.calcsize(NTPPacket._PACKET_FORMAT)]) - except struct.error: - raise NTPException("Invalid NTP packet.") - - self.leap = unpacked[0] >> 6 & 0x3 - self.version = unpacked[0] >> 3 & 0x7 - self.mode = unpacked[0] & 0x7 - self.stratum = unpacked[1] - self.poll = unpacked[2] - self.precision = unpacked[3] - self.root_delay = float(unpacked[4])/2**16 - self.root_dispersion = float(unpacked[5])/2**16 - self.ref_id = unpacked[6] - self.ref_timestamp = _to_time(unpacked[7], unpacked[8]) - self.orig_timestamp = _to_time(unpacked[9], unpacked[10]) - self.orig_timestamp_high = unpacked[9] - self.orig_timestamp_low = unpacked[10] - self.recv_timestamp = _to_time(unpacked[11], unpacked[12]) - self.tx_timestamp = _to_time(unpacked[13], unpacked[14]) - self.tx_timestamp_high = unpacked[13] - self.tx_timestamp_low = unpacked[14] - - def GetTxTimeStamp(self): - return (self.tx_timestamp_high,self.tx_timestamp_low) - - def SetOriginTimeStamp(self,high,low): - self.orig_timestamp_high = high - self.orig_timestamp_low = low - + try: + unpacked = struct.unpack( + NTPPacket._PACKET_FORMAT, + data[0:struct.calcsize(NTPPacket._PACKET_FORMAT)]) + except struct.error as exc: + raise NTPException('Invalid NTP packet.') from exc + + self.leap = unpacked[0] >> 6 & 0x3 + self.version = unpacked[0] >> 3 & 0x7 + self.mode = unpacked[0] & 0x7 + self.stratum = unpacked[1] + self.poll = unpacked[2] + self.precision = unpacked[3] + self.root_delay = float(unpacked[4]) / 2**16 + self.root_dispersion = float(unpacked[5]) / 2**16 + self.ref_id = unpacked[6] + self.ref_timestamp = _to_time(unpacked[7], unpacked[8]) + self.orig_timestamp = _to_time(unpacked[9], unpacked[10]) + self.orig_timestamp_high = unpacked[9] + self.orig_timestamp_low = unpacked[10] + self.recv_timestamp = _to_time(unpacked[11], unpacked[12]) + self.tx_timestamp = _to_time(unpacked[13], unpacked[14]) + self.tx_timestamp_high = unpacked[13] + self.tx_timestamp_low = unpacked[14] + + def get_tx_timestamp(self): + return (self.tx_timestamp_high, self.tx_timestamp_low) + + def set_origin_timestamp(self, high, low): + self.orig_timestamp_high = high + self.orig_timestamp_low = low + class RecvThread(threading.Thread): - def __init__(self,socket): - threading.Thread.__init__(self) - self.socket = socket - def run(self): - global t,stopFlag - while True: - if stopFlag == True: - print("RecvThread Ended") - break - rlist,wlist,elist = select.select([self.socket],[],[],1); - if len(rlist) != 0: - print("Received %d packets" % len(rlist)) - for tempSocket in rlist: - try: - data,addr = tempSocket.recvfrom(1024) - recvTimestamp = recvTimestamp = system_to_ntp_time(time.time()) - taskQueue.put((data,addr,recvTimestamp)) - except socket.error as msg: - print(msg) + """Thread class to recieve all requests""" + def __init__(self): + threading.Thread.__init__(self) + #self.local_socket = local_socket + + def run(self): + while True: + if stop_flag: + print('RecvThread Ended') + break + rlist, wlist, elist = select.select([local_socket], [], [], 1) # pylint: disable=unused-variable + if len(rlist) != 0: + print(f'Received {len(rlist)} packets') + for temp_socket in rlist: + try: + data, addr = temp_socket.recvfrom(1024) + recv_timestamp = system_to_ntp_time(time.time()) + task_queue.put((data, addr, recv_timestamp)) + except socket.error as msg: + print(msg) + class WorkThread(threading.Thread): - def __init__(self,socket): - threading.Thread.__init__(self) - self.socket = socket - def run(self): - global taskQueue,stopFlag - while True: - if stopFlag == True: - print("WorkThread Ended") - break - try: - data,addr,recvTimestamp = taskQueue.get(timeout=1) - recvPacket = NTPPacket() - recvPacket.from_data(data) - timeStamp_high,timeStamp_low = recvPacket.GetTxTimeStamp() - sendPacket = NTPPacket(version=4,mode=4) - sendPacket.stratum = 2 - sendPacket.poll = 10 - ''' - sendPacket.precision = 0xfa - sendPacket.root_delay = 0x0bfa - sendPacket.root_dispersion = 0x0aa7 - sendPacket.ref_id = 0x808a8c2c - ''' - sendPacket.ref_timestamp = recvTimestamp-5 - sendPacket.SetOriginTimeStamp(timeStamp_high,timeStamp_low) - sendPacket.recv_timestamp = recvTimestamp - sendPacket.tx_timestamp = system_to_ntp_time(time.time()) - socket.sendto(sendPacket.to_data(),addr) - print("Sent to %s:%d" % (addr[0],addr[1])) - except queue.Empty: - continue - - -listenIp = "0.0.0.0" -listenPort = 123 -socket = socket.socket(socket.AF_INET,socket.SOCK_DGRAM) -socket.bind((listenIp,listenPort)) -print("local socket: ", socket.getsockname()); -recvThread = RecvThread(socket) + """Thread class to process all requests and respond""" + def __init__(self): + threading.Thread.__init__(self) + #self.local_socket = local_socket + + def run(self): + while True: + if stop_flag: + print('WorkThread Ended') + break + try: + data, addr, recv_timestamp = task_queue.get(timeout=1) + recv_packet = NTPPacket() + recv_packet.from_data(data) + timestamp_high, timestamp_low = recv_packet.get_tx_timestamp() + send_packet = NTPPacket(version=4, mode=4) + send_packet.stratum = 2 + send_packet.poll = 10 + + # send_packet.precision = 0xfa + # send_packet.root_delay = 0x0bfa + # send_packet.root_dispersion = 0x0aa7 + # send_packet.ref_id = 0x808a8c2c + + send_packet.ref_timestamp = recv_timestamp - 5 + send_packet.set_origin_timestamp(timestamp_high, timestamp_low) + send_packet.recv_timestamp = recv_timestamp + send_packet.tx_timestamp = system_to_ntp_time(time.time()) + local_socket.sendto(send_packet.to_data(), addr) + print(f'Sent to {addr[0]}:{addr[1]}') + except queue.Empty: + continue + + +listen_ip = '0.0.0.0' +listen_port = 123 +local_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) +local_socket.bind((listen_ip, listen_port)) +print('local socket: ', local_socket.getsockname()) +recvThread = RecvThread() recvThread.start() -workThread = WorkThread(socket) +workThread = WorkThread() workThread.start() while True: - try: - time.sleep(0.5) - except KeyboardInterrupt: - print("Exiting...") - stopFlag = True - recvThread.join() - workThread.join() - #socket.close() - print("Exited") - break - + try: + time.sleep(0.5) + except KeyboardInterrupt: + print('Exiting...') + stop_flag = True + recvThread.join() + workThread.join() + #local_socket.close() + print('Exited') + break diff --git a/net_orc/network/modules/radius/python/src/authenticator.py b/net_orc/network/modules/radius/python/src/authenticator.py index 55fa51d87..32f4ac221 100644 --- a/net_orc/network/modules/radius/python/src/authenticator.py +++ b/net_orc/network/modules/radius/python/src/authenticator.py @@ -1,31 +1,45 @@ +"""Authenticator for the RADIUS Server""" from chewie.chewie import Chewie import logging -_LOG_FORMAT = "%(asctime)s %(name)-8s %(levelname)-7s %(message)s" +_LOG_FORMAT = '%(asctime)s %(name)-8s %(levelname)-7s %(message)s' _DATE_FORMAT = '%b %02d %H:%M:%S' -INTERFACE_NAME="veth0" -RADIUS_SERVER_IP="127.0.0.1" -RADIUS_SERVER_PORT=1812 -RADIUS_SERVER_SECRET="testing123" +INTERFACE_NAME = 'veth0' +RADIUS_SERVER_IP = '127.0.0.1' +RADIUS_SERVER_PORT = 1812 +RADIUS_SERVER_SECRET = 'testing123' -class Authenticator(): - - def __init__(self): - self.chewie = Chewie(INTERFACE_NAME, self._get_logger(), self._auth_handler, self._failure_handler, self._logoff_handler, radius_server_ip=RADIUS_SERVER_IP, radius_server_port=RADIUS_SERVER_PORT, radius_server_secret=RADIUS_SERVER_SECRET) - self.chewie.run() - - def _get_logger(self): - logging.basicConfig(format=_LOG_FORMAT, datefmt=_DATE_FORMAT, level=logging.INFO) - logger = logging.getLogger("chewie") - return logger - - def _auth_handler(self, address, group_address, *args, **kwargs): - print("Successful auth for " + str(address) + " on port " + str(group_address)) - def _failure_handler(self, address, group_address): - print("Failed auth for " + str(address) + " on port " + str(group_address)) - - def _logoff_handler(self, address, group_address): - print("Log off reported for " + str(address) + " on port " + str(group_address)) - -authenticator = Authenticator() \ No newline at end of file +class Authenticator(): + """Authenticator for the RADIUS Server""" + def __init__(self): + self.chewie = Chewie(INTERFACE_NAME, + self._get_logger(), + self._auth_handler, + self._failure_handler, + self._logoff_handler, + radius_server_ip=RADIUS_SERVER_IP, + radius_server_port=RADIUS_SERVER_PORT, + radius_server_secret=RADIUS_SERVER_SECRET) + self.chewie.run() + + def _get_logger(self): + logging.basicConfig(format=_LOG_FORMAT, + datefmt=_DATE_FORMAT, + level=logging.INFO) + logger = logging.getLogger('chewie') + return logger + + def _auth_handler(self, address, group_address, *args, **kwargs): # pylint: disable=unused-argument + print('Successful auth for ' + str(address) + ' on port '+ + str(group_address)) + + def _failure_handler(self, address, group_address): + print('Failed auth for ' + str(address) + ' on port ' + str(group_address)) + + def _logoff_handler(self, address, group_address): + print('Log off reported for ' + str(address) + ' on port ' + + str(group_address)) + + +authenticator = Authenticator() diff --git a/net_orc/network/modules/template/python/src/template_main.py b/net_orc/network/modules/template/python/src/template_main.py index 50c425c23..df2452550 100644 --- a/net_orc/network/modules/template/python/src/template_main.py +++ b/net_orc/network/modules/template/python/src/template_main.py @@ -1,4 +1,4 @@ """Python code for the template module.""" if __name__ == "__main__": - print ("Template main") + print("Template main") diff --git a/net_orc/python/src/listener.py b/net_orc/python/src/listener.py index 0323fd9f6..de7a07616 100644 --- a/net_orc/python/src/listener.py +++ b/net_orc/python/src/listener.py @@ -13,6 +13,7 @@ DHCP_ACK = 5 CONTAINER_MAC_PREFIX = '9a:02:57:1e:8f' + class Listener: """Methods to start and stop the network listener.""" @@ -20,8 +21,8 @@ def __init__(self, device_intf): self._device_intf = device_intf self._device_intf_mac = get_if_hwaddr(self._device_intf) - self._sniffer = AsyncSniffer( - iface=self._device_intf, prn=self._packet_callback) + self._sniffer = AsyncSniffer(iface=self._device_intf, + prn=self._packet_callback) self._callbacks = [] self._discovered_devices = [] @@ -40,17 +41,14 @@ def is_running(self): def register_callback(self, callback, events=[]): # pylint: disable=dangerous-default-value """Register a callback for specified events.""" - self._callbacks.append( - { - 'callback': callback, - 'events': events - } - ) + self._callbacks.append({'callback': callback, 'events': events}) def call_callback(self, net_event, *args): for callback in self._callbacks: if net_event in callback['events']: - callback_thread = threading.Thread(target=callback['callback'], name="Callback thread", args=args) + callback_thread = threading.Thread(target=callback['callback'], + name='Callback thread', + args=args) callback_thread.start() def _packet_callback(self, packet): @@ -62,10 +60,11 @@ def _packet_callback(self, packet): # New device discovered callback if not packet.src is None and packet.src not in self._discovered_devices: # Ignore packets originating from our containers - if packet.src.startswith(CONTAINER_MAC_PREFIX) or packet.src == self._device_intf_mac: + if packet.src.startswith( + CONTAINER_MAC_PREFIX) or packet.src == self._device_intf_mac: return self._discovered_devices.append(packet.src) self.call_callback(NetworkEvent.DEVICE_DISCOVERED, packet.src) def _get_dhcp_type(self, packet): - return packet[DHCP].options[0][1] \ No newline at end of file + return packet[DHCP].options[0][1] diff --git a/net_orc/python/src/logger.py b/net_orc/python/src/logger.py index e930f1953..aaf690c8a 100644 --- a/net_orc/python/src/logger.py +++ b/net_orc/python/src/logger.py @@ -1,27 +1,31 @@ -#!/usr/bin/env python3 - +"""Sets up the logger to be used for the network orchestrator.""" import json import logging import os LOGGERS = {} -_LOG_FORMAT = "%(asctime)s %(name)-8s %(levelname)-7s %(message)s" +_LOG_FORMAT = '%(asctime)s %(name)-8s %(levelname)-7s %(message)s' _DATE_FORMAT = '%b %02d %H:%M:%S' _DEFAULT_LEVEL = logging.INFO -_CONF_DIR="conf" -_CONF_FILE_NAME="system.json" +_CONF_DIR = 'conf' +_CONF_FILE_NAME = 'system.json' # Set log level try: - system_conf_json = json.load(open(os.path.join(_CONF_DIR, _CONF_FILE_NAME), encoding='UTF-8')) - log_level_str = system_conf_json['log_level'] - LOG_LEVEL = logging.getLevelName(log_level_str) + + with open(os.path.join(_CONF_DIR, _CONF_FILE_NAME), + encoding='UTF-8') as config_json_file: + system_conf_json = json.load(config_json_file) + + log_level_str = system_conf_json['log_level'] + LOG_LEVEL = logging.getLevelName(log_level_str) except OSError: - LOG_LEVEL = _DEFAULT_LEVEL + LOG_LEVEL = _DEFAULT_LEVEL logging.basicConfig(format=_LOG_FORMAT, datefmt=_DATE_FORMAT, level=LOG_LEVEL) + def get_logger(name): - if name not in LOGGERS: - LOGGERS[name] = logging.getLogger(name) - return LOGGERS[name] + if name not in LOGGERS: + LOGGERS[name] = logging.getLogger(name) + return LOGGERS[name] diff --git a/net_orc/python/src/network_device.py b/net_orc/python/src/network_device.py index f54a273b6..1b856da16 100644 --- a/net_orc/python/src/network_device.py +++ b/net_orc/python/src/network_device.py @@ -1,6 +1,7 @@ """Track device object information.""" from dataclasses import dataclass + @dataclass class NetworkDevice: """Represents a physical device and it's configuration.""" diff --git a/net_orc/python/src/network_event.py b/net_orc/python/src/network_event.py index dc08cf892..f56adf494 100644 --- a/net_orc/python/src/network_event.py +++ b/net_orc/python/src/network_event.py @@ -1,6 +1,7 @@ """Specify the various types of network events to be reported.""" from enum import Enum + class NetworkEvent(Enum): """All possible network events.""" DEVICE_DISCOVERED = 1 diff --git a/net_orc/python/src/network_orchestrator.py b/net_orc/python/src/network_orchestrator.py index 3b3f92e64..39fd3339c 100644 --- a/net_orc/python/src/network_orchestrator.py +++ b/net_orc/python/src/network_orchestrator.py @@ -1,5 +1,5 @@ -#!/usr/bin/env python3 - +"""Network orchestrator is responsible for managing +all of the virtual network services""" import getpass import ipaddress import json @@ -10,7 +10,6 @@ import sys import time import threading -from threading import Timer import docker from docker.types import Mount import logger @@ -20,732 +19,771 @@ from network_event import NetworkEvent from network_validator import NetworkValidator -LOGGER = logger.get_logger("net_orc") -CONFIG_FILE = "conf/system.json" -EXAMPLE_CONFIG_FILE = "conf/system.json.example" -RUNTIME_DIR = "runtime" -DEVICES_DIR = "devices" -MONITOR_PCAP = "monitor.pcap" -NET_DIR = "runtime/network" -NETWORK_MODULES_DIR = "network/modules" -NETWORK_MODULE_METADATA = "conf/module_config.json" -DEVICE_BRIDGE = "tr-d" -INTERNET_BRIDGE = "tr-c" -PRIVATE_DOCKER_NET = "tr-private-net" -CONTAINER_NAME = "network_orchestrator" - -RUNTIME_KEY = "runtime" -MONITOR_PERIOD_KEY = "monitor_period" -STARTUP_TIMEOUT_KEY = "startup_timeout" +LOGGER = logger.get_logger('net_orc') +CONFIG_FILE = 'conf/system.json' +EXAMPLE_CONFIG_FILE = 'conf/system.json.example' +RUNTIME_DIR = 'runtime' +DEVICES_DIR = 'devices' +MONITOR_PCAP = 'monitor.pcap' +NET_DIR = 'runtime/network' +NETWORK_MODULES_DIR = 'network/modules' +NETWORK_MODULE_METADATA = 'conf/module_config.json' +DEVICE_BRIDGE = 'tr-d' +INTERNET_BRIDGE = 'tr-c' +PRIVATE_DOCKER_NET = 'tr-private-net' +CONTAINER_NAME = 'network_orchestrator' + +RUNTIME_KEY = 'runtime' +MONITOR_PERIOD_KEY = 'monitor_period' +STARTUP_TIMEOUT_KEY = 'startup_timeout' DEFAULT_STARTUP_TIMEOUT = 60 DEFAULT_RUNTIME = 1200 DEFAULT_MONITOR_PERIOD = 300 RUNTIME = 1500 -class NetworkOrchestrator: - """Manage and controls a virtual testing network.""" - - def __init__(self, config_file=CONFIG_FILE, validate=True, async_monitor=False, single_intf = False): - - self._runtime = DEFAULT_RUNTIME - self._startup_timeout = DEFAULT_STARTUP_TIMEOUT - self._monitor_period = DEFAULT_MONITOR_PERIOD - - self._int_intf = None - self._dev_intf = None - self._single_intf = single_intf - - self.listener = None - self._net_modules = [] - self._devices = [] - self.validate = validate - self.async_monitor = async_monitor - - self._path = os.path.dirname(os.path.dirname( - os.path.dirname(os.path.realpath(__file__)))) - - self.validator = NetworkValidator() - shutil.rmtree(os.path.join(os.getcwd(), NET_DIR), ignore_errors=True) - self.network_config = NetworkConfig() - self.load_config(config_file) - - def start(self): - """Start the network orchestrator.""" - - LOGGER.info("Starting Network Orchestrator") - # Get all components ready - self.load_network_modules() - - # Restore the network first if required - self.stop(kill=True) - - self.start_network() - - if self.async_monitor: - # Run the monitor method asynchronously to keep this method non-blocking - self._monitor_thread = threading.Thread( - target=self.monitor_network) - self._monitor_thread.daemon = True - self._monitor_thread.start() - else: - self.monitor_network() - - def start_network(self): - """Start the virtual testing network.""" - LOGGER.info("Starting network") - - self.build_network_modules() - self.create_net() - self.start_network_services() - - if self.validate: - # Start the validator after network is ready - self.validator.start() - - # Get network ready (via Network orchestrator) - LOGGER.info("Network is ready.") - - def stop(self, kill=False): - """Stop the network orchestrator.""" - self.stop_validator(kill=kill) - self.stop_network(kill=kill) - - def stop_validator(self, kill=False): - """Stop the network validator.""" - # Shutdown the validator - self.validator.stop(kill=kill) - - def stop_network(self, kill=False): - """Stop the virtual testing network.""" - # Shutdown network - self.stop_networking_services(kill=kill) - self.restore_net() - - def monitor_network(self): - # TODO: This time should be configurable (How long to hold before exiting, this could be infinite too) - time.sleep(RUNTIME) - - self.stop() - - def load_config(self,config_file=None): - if config_file is None: - # If not defined, use relative pathing to local file - self._config_file=os.path.join(self._path, CONFIG_FILE) - else: - # If defined, use as provided - self._config_file=config_file - - if not os.path.isfile(self._config_file): - LOGGER.error("Configuration file is not present at " + config_file) - LOGGER.info("An example is present in " + EXAMPLE_CONFIG_FILE) - sys.exit(1) - - LOGGER.info("Loading config file: " + os.path.abspath(self._config_file)) - with open(self._config_file, encoding='UTF-8') as config_json_file: - config_json = json.load(config_json_file) - self.import_config(config_json) - - def _device_discovered(self, mac_addr): - LOGGER.debug(f'Discovered device {mac_addr}. Waiting for device to obtain IP') - device = self._get_device(mac_addr=mac_addr) - os.makedirs(os.path.join(RUNTIME_DIR, DEVICES_DIR, device.mac_addr.replace(':', ''))) - - timeout = time.time() + self._startup_timeout - - while time.time() < timeout: - if device.ip_addr is None: - time.sleep(3) - else: - break - - if device.ip_addr is None: - LOGGER.info(f"Timed out whilst waiting for {mac_addr} to obtain an IP address") - return - - LOGGER.info(f"Device with mac addr {device.mac_addr} has obtained IP address {device.ip_addr}") - - self._start_device_monitor(device) - - def _dhcp_lease_ack(self, packet): - mac_addr = packet[BOOTP].chaddr.hex(":")[0:17] - device = self._get_device(mac_addr=mac_addr) - device.ip_addr = packet[BOOTP].yiaddr - - def _start_device_monitor(self, device): - """Start a timer until the steady state has been reached and +class NetworkOrchestrator: + """Manage and controls a virtual testing network.""" + + def __init__(self, + config_file=CONFIG_FILE, + validate=True, + async_monitor=False, + single_intf=False): + + self._runtime = DEFAULT_RUNTIME + self._startup_timeout = DEFAULT_STARTUP_TIMEOUT + self._monitor_period = DEFAULT_MONITOR_PERIOD + + self._int_intf = None + self._dev_intf = None + self._single_intf = single_intf + + self.listener = None + self._net_modules = [] + self._devices = [] + self.validate = validate + self.async_monitor = async_monitor + + self._path = os.path.dirname( + os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) + + self.validator = NetworkValidator() + shutil.rmtree(os.path.join(os.getcwd(), NET_DIR), ignore_errors=True) + self.network_config = NetworkConfig() + self.load_config(config_file) + + def start(self): + """Start the network orchestrator.""" + + LOGGER.info('Starting Network Orchestrator') + # Get all components ready + self.load_network_modules() + + # Restore the network first if required + self.stop(kill=True) + + self.start_network() + + if self.async_monitor: + # Run the monitor method asynchronously to keep this method non-blocking + self._monitor_thread = threading.Thread(target=self.monitor_network) + self._monitor_thread.daemon = True + self._monitor_thread.start() + else: + self.monitor_network() + + def start_network(self): + """Start the virtual testing network.""" + LOGGER.info('Starting network') + + self.build_network_modules() + self.create_net() + self.start_network_services() + + if self.validate: + # Start the validator after network is ready + self.validator.start() + + # Get network ready (via Network orchestrator) + LOGGER.info('Network is ready.') + + def stop(self, kill=False): + """Stop the network orchestrator.""" + self.stop_validator(kill=kill) + self.stop_network(kill=kill) + + def stop_validator(self, kill=False): + """Stop the network validator.""" + # Shutdown the validator + self.validator.stop(kill=kill) + + def stop_network(self, kill=False): + """Stop the virtual testing network.""" + # Shutdown network + self.stop_networking_services(kill=kill) + self.restore_net() + + def monitor_network(self): + # TODO: This time should be configurable (How long to hold before exiting, + # this could be infinite too) + time.sleep(RUNTIME) + + self.stop() + + def load_config(self, config_file=None): + if config_file is None: + # If not defined, use relative pathing to local file + self._config_file = os.path.join(self._path, CONFIG_FILE) + else: + # If defined, use as provided + self._config_file = config_file + + if not os.path.isfile(self._config_file): + LOGGER.error('Configuration file is not present at ' + config_file) + LOGGER.info('An example is present in '+ EXAMPLE_CONFIG_FILE) + sys.exit(1) + + LOGGER.info('Loading config file: ' + os.path.abspath(self._config_file)) + with open(self._config_file, encoding='UTF-8') as config_json_file: + config_json = json.load(config_json_file) + self.import_config(config_json) + + def _device_discovered(self, mac_addr): + + LOGGER.debug( + f'Discovered device {mac_addr}. Waiting for device to obtain IP') + device = self._get_device(mac_addr=mac_addr) + os.makedirs( + os.path.join(RUNTIME_DIR, DEVICES_DIR, device.mac_addr.replace(':', + ''))) + + timeout = time.time() + self._startup_timeout + + while time.time() < timeout: + if device.ip_addr is None: + time.sleep(3) + else: + break + + if device.ip_addr is None: + LOGGER.info( + f'Timed out whilst waiting for {mac_addr} to obtain an IP address') + return + + LOGGER.info( + f'Device with mac addr {device.mac_addr} has obtained IP address ' + f'{device.ip_addr}') + + self._start_device_monitor(device) + + def _dhcp_lease_ack(self, packet): + mac_addr = packet[BOOTP].chaddr.hex(':')[0:17] + device = self._get_device(mac_addr=mac_addr) + device.ip_addr = packet[BOOTP].yiaddr + + def _start_device_monitor(self, device): + """Start a timer until the steady state has been reached and callback the steady state method for this device.""" - LOGGER.info(f"Monitoring device with mac addr {device.mac_addr} for {str(self._monitor_period)} seconds") - packet_capture = sniff(iface=self._dev_intf, timeout=self._monitor_period) - wrpcap(os.path.join(RUNTIME_DIR, DEVICES_DIR, device.mac_addr.replace(":",""), 'monitor.pcap'), packet_capture) - self.listener.call_callback(NetworkEvent.DEVICE_STABLE, device.mac_addr) - - def _get_device(self, mac_addr): - for device in self._devices: - if device.mac_addr == mac_addr: - return device - - device = NetworkDevice(mac_addr=mac_addr) - self._devices.append(device) + LOGGER.info( + f'Monitoring device with mac addr {device.mac_addr} ' + f'for {str(self._monitor_period)} seconds') + + packet_capture = sniff(iface=self._dev_intf, timeout=self._monitor_period) + wrpcap( + os.path.join(RUNTIME_DIR, DEVICES_DIR, device.mac_addr.replace(':', ''), + 'monitor.pcap'), packet_capture) + self.listener.call_callback(NetworkEvent.DEVICE_STABLE, device.mac_addr) + + def _get_device(self, mac_addr): + for device in self._devices: + if device.mac_addr == mac_addr: return device - def import_config(self, json_config): - self._int_intf = json_config['network']['internet_intf'] - self._dev_intf = json_config['network']['device_intf'] - - if RUNTIME_KEY in json_config: - self._runtime = json_config[RUNTIME_KEY] - if STARTUP_TIMEOUT_KEY in json_config: - self._startup_timeout = json_config[STARTUP_TIMEOUT_KEY] - if MONITOR_PERIOD_KEY in json_config: - self._monitor_period = json_config[MONITOR_PERIOD_KEY] - - def _check_network_services(self): - LOGGER.debug("Checking network modules...") - for net_module in self._net_modules: - if net_module.enable_container: - LOGGER.debug("Checking network module: " + - net_module.display_name) - success = self._ping(net_module) - if success: - LOGGER.debug(net_module.display_name + - " responded succesfully: " + str(success)) - else: - LOGGER.error(net_module.display_name + - " failed to respond to ping") - - def _ping(self, net_module): - host = net_module.net_config.ipv4_address - namespace = "tr-ctns-" + net_module.dir_name - cmd = "ip netns exec " + namespace + " ping -c 1 " + str(host) - success = util.run_command(cmd, output=False) - return success - - def _create_private_net(self): - client = docker.from_env() - try: - network = client.networks.get(PRIVATE_DOCKER_NET) - network.remove() - except docker.errors.NotFound: - pass - - # TODO: These should be made into variables - ipam_pool = docker.types.IPAMPool( - subnet='100.100.0.0/16', - iprange='100.100.100.0/24' - ) - - ipam_config = docker.types.IPAMConfig( - pool_configs=[ipam_pool] - ) - - client.networks.create( - PRIVATE_DOCKER_NET, - ipam=ipam_config, - internal=True, - check_duplicate=True, - driver="macvlan" - ) - - def _ci_pre_network_create(self): - """ Stores network properties to restore network after + device = NetworkDevice(mac_addr=mac_addr) + self._devices.append(device) + return device + + def import_config(self, json_config): + self._int_intf = json_config['network']['internet_intf'] + self._dev_intf = json_config['network']['device_intf'] + + if RUNTIME_KEY in json_config: + self._runtime = json_config[RUNTIME_KEY] + if STARTUP_TIMEOUT_KEY in json_config: + self._startup_timeout = json_config[STARTUP_TIMEOUT_KEY] + if MONITOR_PERIOD_KEY in json_config: + self._monitor_period = json_config[MONITOR_PERIOD_KEY] + + def _check_network_services(self): + LOGGER.debug('Checking network modules...') + for net_module in self._net_modules: + if net_module.enable_container: + LOGGER.debug('Checking network module: ' + net_module.display_name) + success = self._ping(net_module) + if success: + LOGGER.debug(net_module.display_name + ' responded succesfully: ' + + str(success)) + else: + LOGGER.error(net_module.display_name + ' failed to respond to ping') + + def _ping(self, net_module): + host = net_module.net_config.ipv4_address + namespace = 'tr-ctns-' + net_module.dir_name + cmd = 'ip netns exec ' + namespace + ' ping -c 1 ' + str(host) + success = util.run_command(cmd, output=False) + return success + + def _create_private_net(self): + client = docker.from_env() + try: + network = client.networks.get(PRIVATE_DOCKER_NET) + network.remove() + except docker.errors.NotFound: + pass + + # TODO: These should be made into variables + ipam_pool = docker.types.IPAMPool(subnet='100.100.0.0/16', + iprange='100.100.100.0/24') + + ipam_config = docker.types.IPAMConfig(pool_configs=[ipam_pool]) + + client.networks.create(PRIVATE_DOCKER_NET, + ipam=ipam_config, + internal=True, + check_duplicate=True, + driver='macvlan') + + def _ci_pre_network_create(self): + """ Stores network properties to restore network after network creation and flushes internet interface """ - self._ethmac = subprocess.check_output( - f"cat /sys/class/net/{self._int_intf}/address", shell=True).decode("utf-8").strip() - self._gateway = subprocess.check_output( - "ip route | head -n 1 | awk '{print $3}'", shell=True).decode("utf-8").strip() - self._ipv4 = subprocess.check_output( - f"ip a show {self._int_intf} | grep \"inet \" | awk '{{print $2}}'", shell=True).decode("utf-8").strip() - self._ipv6 = subprocess.check_output( - f"ip a show {self._int_intf} | grep inet6 | awk '{{print $2}}'", shell=True).decode("utf-8").strip() - self._brd = subprocess.check_output( - f"ip a show {self._int_intf} | grep \"inet \" | awk '{{print $4}}'", shell=True).decode("utf-8").strip() - - def _ci_post_network_create(self): - """ Restore network connection in CI environment """ - LOGGER.info("post cr") - util.run_command(f"ip address del {self._ipv4} dev {self._int_intf}") - util.run_command(f"ip -6 address del {self._ipv6} dev {self._int_intf}") - util.run_command(f"ip link set dev {self._int_intf} address 00:B0:D0:63:C2:26") - util.run_command(f"ip addr flush dev {self._int_intf}") - util.run_command(f"ip addr add dev {self._int_intf} 0.0.0.0") - util.run_command(f"ip addr add dev {INTERNET_BRIDGE} {self._ipv4} broadcast {self._brd}") - util.run_command(f"ip -6 addr add {self._ipv6} dev {INTERNET_BRIDGE} ") - util.run_command(f"systemd-resolve --interface {INTERNET_BRIDGE} --set-dns 8.8.8.8") - util.run_command(f"ip link set dev {INTERNET_BRIDGE} up") - util.run_command(f"dhclient {INTERNET_BRIDGE}") - util.run_command(f"ip route del default via 10.1.0.1") - util.run_command(f"ip route add default via {self._gateway} src {self._ipv4[:-3]} metric 100 dev {INTERNET_BRIDGE}") - - def create_net(self): - LOGGER.info("Creating baseline network") - - if not util.interface_exists(self._int_intf) or not util.interface_exists(self._dev_intf): - LOGGER.error("Configured interfaces are not ready for use. " + - "Ensure both interfaces are connected.") - sys.exit(1) - - if self._single_intf: - self._ci_pre_network_create() - - # Create data plane - util.run_command("ovs-vsctl add-br " + DEVICE_BRIDGE) - - # Create control plane - util.run_command("ovs-vsctl add-br " + INTERNET_BRIDGE) - - # Add external interfaces to data and control plane - util.run_command("ovs-vsctl add-port " + - DEVICE_BRIDGE + " " + self._dev_intf) - util.run_command("ovs-vsctl add-port " + - INTERNET_BRIDGE + " " + self._int_intf) - - # Enable forwarding of eapol packets - util.run_command("ovs-ofctl add-flow " + DEVICE_BRIDGE + - " 'table=0, dl_dst=01:80:c2:00:00:03, actions=flood'") - - # Remove IP from internet adapter - util.run_command("ifconfig " + self._int_intf + " 0.0.0.0") - - # Set ports up - util.run_command("ip link set dev " + DEVICE_BRIDGE + " up") - util.run_command("ip link set dev " + INTERNET_BRIDGE + " up") - - if self._single_intf: - self._ci_post_network_create() - - self._create_private_net() - - self.listener = Listener(self._dev_intf) - self.listener.register_callback(self._device_discovered, [ - NetworkEvent.DEVICE_DISCOVERED]) - self.listener.register_callback( - self._dhcp_lease_ack, [NetworkEvent.DHCP_LEASE_ACK]) - self.listener.start_listener() - - def load_network_modules(self): - """Load network modules from module_config.json.""" - LOGGER.debug("Loading network modules from /" + NETWORK_MODULES_DIR) - - loaded_modules = "Loaded the following network modules: " - net_modules_dir = os.path.join(self._path, NETWORK_MODULES_DIR) - - for module_dir in os.listdir(net_modules_dir): - - if self._get_network_module(module_dir) is None: - loaded_module = self._load_network_module(module_dir) - loaded_modules += loaded_module.dir_name + " " - - LOGGER.info(loaded_modules) - - def _load_network_module(self, module_dir): - - net_modules_dir = os.path.join(self._path, NETWORK_MODULES_DIR) - - net_module = NetworkModule() - - # Load basic module information - net_module_json = json.load(open(os.path.join( - self._path, net_modules_dir, module_dir, NETWORK_MODULE_METADATA), encoding='UTF-8')) - - net_module.name = net_module_json['config']['meta']['name'] - net_module.display_name = net_module_json['config']['meta']['display_name'] - net_module.description = net_module_json['config']['meta']['description'] - net_module.dir = os.path.join( - self._path, net_modules_dir, module_dir) - net_module.dir_name = module_dir - net_module.build_file = module_dir + ".Dockerfile" - net_module.container_name = "tr-ct-" + net_module.dir_name - net_module.image_name = "test-run/" + net_module.dir_name - - # Attach folder mounts to network module - if "docker" in net_module_json['config']: - - if "mounts" in net_module_json['config']['docker']: - for mount_point in net_module_json['config']['docker']['mounts']: - net_module.mounts.append(Mount( - target=mount_point['target'], - source=os.path.join( - os.getcwd(), mount_point['source']), - type='bind' - )) - - if "depends_on" in net_module_json['config']['docker']: - depends_on_module = net_module_json['config']['docker']['depends_on'] - if self._get_network_module(depends_on_module) is None: - self._load_network_module(depends_on_module) - - # Determine if this is a container or just an image/template - if "enable_container" in net_module_json['config']['docker']: - net_module.enable_container = net_module_json['config']['docker']['enable_container'] - - # Load network service networking configuration - if net_module.enable_container: - - net_module.net_config.enable_wan = net_module_json['config']['network']['enable_wan'] - net_module.net_config.ip_index = net_module_json['config']['network']['ip_index'] - - net_module.net_config.host = False if not "host" in net_module_json[ - 'config']['network'] else net_module_json['config']['network']['host'] - - net_module.net_config.ipv4_address = self.network_config.ipv4_network[ - net_module.net_config.ip_index] - net_module.net_config.ipv4_network = self.network_config.ipv4_network - - net_module.net_config.ipv6_address = self.network_config.ipv6_network[ - net_module.net_config.ip_index] - net_module.net_config.ipv6_network = self.network_config.ipv6_network - - self._net_modules.append(net_module) + self._ethmac = subprocess.check_output( + f'cat /sys/class/net/{self._int_intf}/address', + shell=True).decode('utf-8').strip() + self._gateway = subprocess.check_output( + 'ip route | head -n 1 | awk \'{print $3}\'', + shell=True).decode('utf-8').strip() + self._ipv4 = subprocess.check_output( + f'ip a show {self._int_intf} | grep \"inet \" | awk \'{{print $2}}\'', + shell=True).decode('utf-8').strip() + self._ipv6 = subprocess.check_output( + f'ip a show {self._int_intf} | grep inet6 | awk \'{{print $2}}\'', + shell=True).decode('utf-8').strip() + self._brd = subprocess.check_output( + f'ip a show {self._int_intf} | grep \"inet \" | awk \'{{print $4}}\'', + shell=True).decode('utf-8').strip() + + def _ci_post_network_create(self): + """ Restore network connection in CI environment """ + LOGGER.info('post cr') + util.run_command(f'ip address del {self._ipv4} dev {self._int_intf}') + util.run_command(f'ip -6 address del {self._ipv6} dev {self._int_intf}') + util.run_command( + f'ip link set dev {self._int_intf} address 00:B0:D0:63:C2:26') + util.run_command(f'ip addr flush dev {self._int_intf}') + util.run_command(f'ip addr add dev {self._int_intf} 0.0.0.0') + util.run_command( + f'ip addr add dev {INTERNET_BRIDGE} {self._ipv4} broadcast {self._brd}') + util.run_command(f'ip -6 addr add {self._ipv6} dev {INTERNET_BRIDGE} ') + util.run_command( + f'systemd-resolve --interface {INTERNET_BRIDGE} --set-dns 8.8.8.8') + util.run_command(f'ip link set dev {INTERNET_BRIDGE} up') + util.run_command(f'dhclient {INTERNET_BRIDGE}') + util.run_command('ip route del default via 10.1.0.1') + util.run_command( + f'ip route add default via {self._gateway} ' + f'src {self._ipv4[:-3]} metric 100 dev {INTERNET_BRIDGE}') + + def create_net(self): + LOGGER.info('Creating baseline network') + + if not util.interface_exists(self._int_intf) or not util.interface_exists( + self._dev_intf): + LOGGER.error('Configured interfaces are not ready for use. ' + + 'Ensure both interfaces are connected.') + sys.exit(1) + + if self._single_intf: + self._ci_pre_network_create() + + # Create data plane + util.run_command('ovs-vsctl add-br ' + DEVICE_BRIDGE) + + # Create control plane + util.run_command('ovs-vsctl add-br ' + INTERNET_BRIDGE) + + # Add external interfaces to data and control plane + util.run_command('ovs-vsctl add-port ' + DEVICE_BRIDGE + ' ' + + self._dev_intf) + util.run_command('ovs-vsctl add-port ' + INTERNET_BRIDGE + ' ' + + self._int_intf) + + # Enable forwarding of eapol packets + util.run_command('ovs-ofctl add-flow ' + DEVICE_BRIDGE + + ' \'table=0, dl_dst=01:80:c2:00:00:03, actions=flood\'') + + # Remove IP from internet adapter + util.run_command('ifconfig ' + self._int_intf + ' 0.0.0.0') + + # Set ports up + util.run_command('ip link set dev ' + DEVICE_BRIDGE + ' up') + util.run_command('ip link set dev ' + INTERNET_BRIDGE + ' up') + + if self._single_intf: + self._ci_post_network_create() + + self._create_private_net() + + self.listener = Listener(self._dev_intf) + self.listener.register_callback(self._device_discovered, + [NetworkEvent.DEVICE_DISCOVERED]) + self.listener.register_callback(self._dhcp_lease_ack, + [NetworkEvent.DHCP_LEASE_ACK]) + self.listener.start_listener() + + def load_network_modules(self): + """Load network modules from module_config.json.""" + LOGGER.debug('Loading network modules from /' + NETWORK_MODULES_DIR) + + loaded_modules = 'Loaded the following network modules: ' + net_modules_dir = os.path.join(self._path, NETWORK_MODULES_DIR) + + for module_dir in os.listdir(net_modules_dir): + + if self._get_network_module(module_dir) is None: + loaded_module = self._load_network_module(module_dir) + loaded_modules += loaded_module.dir_name + ' ' + + LOGGER.info(loaded_modules) + + def _load_network_module(self, module_dir): + + net_modules_dir = os.path.join(self._path, NETWORK_MODULES_DIR) + + net_module = NetworkModule() + + # Load module information + with open(os.path.join(self._path, net_modules_dir, module_dir, + NETWORK_MODULE_METADATA), 'r', + encoding='UTF-8') as module_file_open: + net_module_json = json.load(module_file_open) + + net_module.name = net_module_json['config']['meta']['name'] + net_module.display_name = net_module_json['config']['meta']['display_name'] + net_module.description = net_module_json['config']['meta']['description'] + net_module.dir = os.path.join(self._path, net_modules_dir, module_dir) + net_module.dir_name = module_dir + net_module.build_file = module_dir + '.Dockerfile' + net_module.container_name = 'tr-ct-' + net_module.dir_name + net_module.image_name = 'test-run/' + net_module.dir_name + + # Attach folder mounts to network module + if 'docker' in net_module_json['config']: + + if 'mounts' in net_module_json['config']['docker']: + for mount_point in net_module_json['config']['docker']['mounts']: + net_module.mounts.append( + Mount(target=mount_point['target'], + source=os.path.join(os.getcwd(), mount_point['source']), + type='bind')) + + if 'depends_on' in net_module_json['config']['docker']: + depends_on_module = net_module_json['config']['docker']['depends_on'] + if self._get_network_module(depends_on_module) is None: + self._load_network_module(depends_on_module) + + # Determine if this is a container or just an image/template + if 'enable_container' in net_module_json['config']['docker']: + net_module.enable_container = net_module_json['config']['docker'][ + 'enable_container'] + + # Load network service networking configuration + if net_module.enable_container: + + net_module.net_config.enable_wan = net_module_json['config']['network'][ + 'enable_wan'] + net_module.net_config.ip_index = net_module_json['config']['network'][ + 'ip_index'] + + net_module.net_config.host = False if not 'host' in net_module_json[ + 'config']['network'] else net_module_json['config']['network']['host'] + + net_module.net_config.ipv4_address = self.network_config.ipv4_network[ + net_module.net_config.ip_index] + net_module.net_config.ipv4_network = self.network_config.ipv4_network + + net_module.net_config.ipv6_address = self.network_config.ipv6_network[ + net_module.net_config.ip_index] + net_module.net_config.ipv6_network = self.network_config.ipv6_network + + self._net_modules.append(net_module) + return net_module + + def build_network_modules(self): + LOGGER.info('Building network modules...') + for net_module in self._net_modules: + self._build_module(net_module) + + def _build_module(self, net_module): + LOGGER.debug('Building network module ' + net_module.dir_name) + client = docker.from_env() + client.images.build(dockerfile=os.path.join(net_module.dir, + net_module.build_file), + path=self._path, + forcerm=True, + tag='test-run/' + net_module.dir_name) + + def _get_network_module(self, name): + for net_module in self._net_modules: + if name in (net_module.display_name, net_module.name, + net_module.dir_name): return net_module + return None - def build_network_modules(self): - LOGGER.info("Building network modules...") - for net_module in self._net_modules: - self._build_module(net_module) - - def _build_module(self, net_module): - LOGGER.debug("Building network module " + net_module.dir_name) - client = docker.from_env() - client.images.build( - dockerfile=os.path.join(net_module.dir, net_module.build_file), - path=self._path, - forcerm=True, - tag="test-run/" + net_module.dir_name - ) - - def _get_network_module(self, name): - for net_module in self._net_modules: - if name == net_module.display_name or name == net_module.name or name == net_module.dir_name: - return net_module - return None - - # Start the OVS network module - # This should always be called before loading all - # other modules to allow for a properly setup base - # network - def _start_ovs_module(self): - self._start_network_service(self._get_network_module("OVS")) - - def _start_network_service(self, net_module): - - LOGGER.debug("Starting net service " + net_module.display_name) - network = "host" if net_module.net_config.host else PRIVATE_DOCKER_NET - LOGGER.debug(f"""Network: {network}, image name: {net_module.image_name}, - container name: {net_module.container_name}""") - try: - client = docker.from_env() - net_module.container = client.containers.run( - net_module.image_name, - auto_remove=True, - cap_add=["NET_ADMIN"], - name=net_module.container_name, - hostname=net_module.container_name, - network=PRIVATE_DOCKER_NET, - privileged=True, - detach=True, - mounts=net_module.mounts, - environment={"HOST_USER": getpass.getuser()} - ) - except docker.errors.ContainerError as error: - LOGGER.error("Container run error") - LOGGER.error(error) - - if network != "host": - self._attach_service_to_network(net_module) - - def _stop_service_module(self, net_module, kill=False): - LOGGER.debug("Stopping Service container " + net_module.container_name) - try: - container = self._get_service_container(net_module) - if container is not None: - if kill: - LOGGER.debug("Killing container:" + - net_module.container_name) - container.kill() - else: - LOGGER.debug("Stopping container:" + - net_module.container_name) - container.stop() - LOGGER.debug("Container stopped:" + net_module.container_name) - except Exception as error: - LOGGER.error("Container stop error") - LOGGER.error(error) - - def _get_service_container(self, net_module): - LOGGER.debug("Resolving service container: " + - net_module.container_name) - container = None - try: - client = docker.from_env() - container = client.containers.get(net_module.container_name) - except docker.errors.NotFound: - LOGGER.debug("Container " + - net_module.container_name + " not found") - except Exception as e: - LOGGER.error("Failed to resolve container") - LOGGER.error(e) - return container - - def stop_networking_services(self, kill=False): - LOGGER.info("Stopping network services") - for net_module in self._net_modules: - # Network modules may just be Docker images, so we do not want to stop them - if not net_module.enable_container: - continue - self._stop_service_module(net_module, kill) - - def start_network_services(self): - LOGGER.info("Starting network services") - - os.makedirs(os.path.join(os.getcwd(), NET_DIR), exist_ok=True) - - for net_module in self._net_modules: - - # TODO: There should be a better way of doing this - # Do not try starting OVS module again, as it should already be running - if "OVS" != net_module.display_name: - - # Network modules may just be Docker images, so we do not want to start them as containers - if not net_module.enable_container: - continue - - self._start_network_service(net_module) - - LOGGER.info("All network services are running") - self._check_network_services() - - def _attach_test_module_to_network(self, test_module): - LOGGER.debug("Attaching test module " + - test_module.display_name + " to device bridge") - - # Device bridge interface example: tr-d-t-baseline (Test Run Device Interface for Test container) - bridge_intf = DEVICE_BRIDGE + "-t-" + test_module.dir_name - - # Container interface example: tr-cti-baseline-test (Test Run Test Container Interface for test container) - container_intf = "tr-tci-" + test_module.dir_name - - # Container network namespace name - container_net_ns = "tr-test-" + test_module.dir_name - - # Create interface pair - util.run_command("ip link add " + bridge_intf + - " type veth peer name " + container_intf) - - # Add bridge interface to device bridge - util.run_command("ovs-vsctl add-port " + - DEVICE_BRIDGE + " " + bridge_intf) - - # Get PID for running container - # TODO: Some error checking around missing PIDs might be required - container_pid = util.run_command( - "docker inspect -f {{.State.Pid}} " + test_module.container_name)[0] - - # Create symlink for container network namespace - util.run_command("ln -sf /proc/" + container_pid + - "/ns/net /var/run/netns/" + container_net_ns) - - # Attach container interface to container network namespace - util.run_command("ip link set " + container_intf + - " netns " + container_net_ns) - - # Rename container interface name to veth0 - util.run_command("ip netns exec " + container_net_ns + - " ip link set dev " + container_intf + " name veth0") - - # Set MAC address of container interface - util.run_command("ip netns exec " + container_net_ns + " ip link set dev veth0 address 9a:02:57:1e:8f:" + str(test_module.ip_index)) - - # Set IP address of container interface - ipv4_address = self.network_config.ipv4_network[test_module.ip_index] - ipv6_address = self.network_config.ipv6_network[test_module.ip_index] - - ipv4_address_with_prefix=str(ipv4_address) + "/" + str(self.network_config.ipv4_network.prefixlen) - ipv6_address_with_prefix=str(ipv6_address) + "/" + str(self.network_config.ipv6_network.prefixlen) - - util.run_command("ip netns exec " + container_net_ns + " ip addr add " + - ipv4_address_with_prefix + " dev veth0") - - util.run_command("ip netns exec " + container_net_ns + " ip addr add " + - ipv6_address_with_prefix + " dev veth0") + # Start the OVS network module + # This should always be called before loading all + # other modules to allow for a properly setup base + # network + def _start_ovs_module(self): + self._start_network_service(self._get_network_module('OVS')) + def _start_network_service(self, net_module): - # Set interfaces up - util.run_command("ip link set dev " + bridge_intf + " up") - util.run_command("ip netns exec " + container_net_ns + - " ip link set dev veth0 up") + LOGGER.debug('Starting net service ' + net_module.display_name) + network = 'host' if net_module.net_config.host else PRIVATE_DOCKER_NET + LOGGER.debug(f"""Network: {network}, image name: {net_module.image_name}, + container name: {net_module.container_name}""") + try: + client = docker.from_env() + net_module.container = client.containers.run( + net_module.image_name, + auto_remove=True, + cap_add=['NET_ADMIN'], + name=net_module.container_name, + hostname=net_module.container_name, + network=PRIVATE_DOCKER_NET, + privileged=True, + detach=True, + mounts=net_module.mounts, + environment={'HOST_USER': getpass.getuser()}) + except docker.errors.ContainerError as error: + LOGGER.error('Container run error') + LOGGER.error(error) + + if network != 'host': + self._attach_service_to_network(net_module) + + def _stop_service_module(self, net_module, kill=False): + LOGGER.debug('Stopping Service container ' + net_module.container_name) + try: + container = self._get_service_container(net_module) + if container is not None: + if kill: + LOGGER.debug('Killing container:' + net_module.container_name) + container.kill() + else: + LOGGER.debug('Stopping container:' + net_module.container_name) + container.stop() + LOGGER.debug('Container stopped:' + net_module.container_name) + except Exception as error: # pylint: disable=W0703 + LOGGER.error('Container stop error') + LOGGER.error(error) + + def _get_service_container(self, net_module): + LOGGER.debug('Resolving service container: ' + net_module.container_name) + container = None + try: + client = docker.from_env() + container = client.containers.get(net_module.container_name) + except docker.errors.NotFound: + LOGGER.debug('Container ' + net_module.container_name + ' not found') + except Exception as e: # pylint: disable=W0703 + LOGGER.error('Failed to resolve container') + LOGGER.error(e) + return container + + def stop_networking_services(self, kill=False): + LOGGER.info('Stopping network services') + for net_module in self._net_modules: + # Network modules may just be Docker images, + # so we do not want to stop them + if not net_module.enable_container: + continue + self._stop_service_module(net_module, kill) + + def start_network_services(self): + LOGGER.info('Starting network services') + + os.makedirs(os.path.join(os.getcwd(), NET_DIR), exist_ok=True) + + for net_module in self._net_modules: + + # TODO: There should be a better way of doing this + # Do not try starting OVS module again, as it should already be running + if 'OVS' != net_module.display_name: + + # Network modules may just be Docker images, + # so we do not want to start them as containers + if not net_module.enable_container: + continue + + self._start_network_service(net_module) + + LOGGER.info('All network services are running') + self._check_network_services() + + def _attach_test_module_to_network(self, test_module): + LOGGER.debug('Attaching test module ' + test_module.display_name + + ' to device bridge') + + # Device bridge interface example: + # tr-d-t-baseline (Test Run Device Interface for Test container) + bridge_intf = DEVICE_BRIDGE + '-t-' + test_module.dir_name + + # Container interface example: + # tr-cti-baseline-test (Test Run Container Interface for test container) + container_intf = 'tr-tci-' + test_module.dir_name + + # Container network namespace name + container_net_ns = 'tr-test-' + test_module.dir_name + + # Create interface pair + util.run_command('ip link add ' + bridge_intf + ' type veth peer name ' + + container_intf) + + # Add bridge interface to device bridge + util.run_command('ovs-vsctl add-port ' + DEVICE_BRIDGE + ' ' + bridge_intf) + + # Get PID for running container + # TODO: Some error checking around missing PIDs might be required + container_pid = util.run_command('docker inspect -f {{.State.Pid}} ' + + test_module.container_name)[0] + + # Create symlink for container network namespace + util.run_command('ln -sf /proc/' + container_pid + + '/ns/net /var/run/netns/' + container_net_ns) + + # Attach container interface to container network namespace + util.run_command('ip link set ' + container_intf + ' netns ' + + container_net_ns) + + # Rename container interface name to veth0 + util.run_command('ip netns exec ' + container_net_ns + ' ip link set dev ' + + container_intf + ' name veth0') + + # Set MAC address of container interface + util.run_command('ip netns exec ' + container_net_ns + + ' ip link set dev veth0 address 9a:02:57:1e:8f:' + + str(test_module.ip_index)) + + # Set IP address of container interface + ipv4_address = self.network_config.ipv4_network[test_module.ip_index] + ipv6_address = self.network_config.ipv6_network[test_module.ip_index] + + ipv4_address_with_prefix = str(ipv4_address) + '/' + str( + self.network_config.ipv4_network.prefixlen) + ipv6_address_with_prefix = str(ipv6_address) + '/' + str( + self.network_config.ipv6_network.prefixlen) + + util.run_command('ip netns exec ' + container_net_ns + ' ip addr add ' + + ipv4_address_with_prefix + ' dev veth0') + + util.run_command('ip netns exec ' + container_net_ns + ' ip addr add ' + + ipv6_address_with_prefix + ' dev veth0') + + # Set interfaces up + util.run_command('ip link set dev ' + bridge_intf + ' up') + util.run_command('ip netns exec ' + container_net_ns + + ' ip link set dev veth0 up') + + # TODO: Let's move this into a separate script? It does not look great + def _attach_service_to_network(self, net_module): + LOGGER.debug('Attaching net service ' + net_module.display_name + + ' to device bridge') - # TODO: Let's move this into a separate script? It does not look great - def _attach_service_to_network(self, net_module): - LOGGER.debug("Attaching net service " + - net_module.display_name + " to device bridge") + # Device bridge interface example: + # tr-di-dhcp (Test Run Device Interface for DHCP container) + bridge_intf = DEVICE_BRIDGE + 'i-' + net_module.dir_name - # Device bridge interface example: tr-di-dhcp (Test Run Device Interface for DHCP container) - bridge_intf = DEVICE_BRIDGE + "i-" + net_module.dir_name + # Container interface example: + # tr-cti-dhcp (Test Run Container Interface for DHCP container) + container_intf = 'tr-cti-' + net_module.dir_name - # Container interface example: tr-cti-dhcp (Test Run Container Interface for DHCP container) - container_intf = "tr-cti-" + net_module.dir_name + # Container network namespace name + container_net_ns = 'tr-ctns-' + net_module.dir_name - # Container network namespace name - container_net_ns = "tr-ctns-" + net_module.dir_name + # Create interface pair + util.run_command('ip link add ' + bridge_intf + ' type veth peer name ' + + container_intf) - # Create interface pair - util.run_command("ip link add " + bridge_intf + - " type veth peer name " + container_intf) + # Add bridge interface to device bridge + util.run_command('ovs-vsctl add-port ' + DEVICE_BRIDGE + ' ' + bridge_intf) - # Add bridge interface to device bridge - util.run_command("ovs-vsctl add-port " + - DEVICE_BRIDGE + " " + bridge_intf) + # Get PID for running container + # TODO: Some error checking around missing PIDs might be required + container_pid = util.run_command('docker inspect -f {{.State.Pid}} ' + + net_module.container_name)[0] - # Get PID for running container - # TODO: Some error checking around missing PIDs might be required - container_pid = util.run_command( - "docker inspect -f {{.State.Pid}} " + net_module.container_name)[0] + # Create symlink for container network namespace + util.run_command('ln -sf /proc/' + container_pid + + '/ns/net /var/run/netns/' + container_net_ns) - # Create symlink for container network namespace - util.run_command("ln -sf /proc/" + container_pid + - "/ns/net /var/run/netns/" + container_net_ns) + # Attach container interface to container network namespace + util.run_command('ip link set ' + container_intf + ' netns ' + + container_net_ns) - # Attach container interface to container network namespace - util.run_command("ip link set " + container_intf + - " netns " + container_net_ns) + # Rename container interface name to veth0 + util.run_command('ip netns exec ' + container_net_ns + ' ip link set dev ' + + container_intf + ' name veth0') - # Rename container interface name to veth0 - util.run_command("ip netns exec " + container_net_ns + - " ip link set dev " + container_intf + " name veth0") + # Set MAC address of container interface + util.run_command('ip netns exec ' + container_net_ns + + ' ip link set dev veth0 address 9a:02:57:1e:8f:' + + str(net_module.net_config.ip_index)) - # Set MAC address of container interface - util.run_command("ip netns exec " + container_net_ns + " ip link set dev veth0 address 9a:02:57:1e:8f:" + str(net_module.net_config.ip_index)) + # Set IP address of container interface + util.run_command('ip netns exec ' + container_net_ns + ' ip addr add ' + + net_module.net_config.get_ipv4_addr_with_prefix() + + ' dev veth0') - # Set IP address of container interface - util.run_command("ip netns exec " + container_net_ns + " ip addr add " + - net_module.net_config.get_ipv4_addr_with_prefix() + " dev veth0") + util.run_command('ip netns exec ' + container_net_ns + ' ip addr add ' + + net_module.net_config.get_ipv6_addr_with_prefix() + + ' dev veth0') - util.run_command("ip netns exec " + container_net_ns + " ip addr add " + - net_module.net_config.get_ipv6_addr_with_prefix() + " dev veth0") + # Set interfaces up + util.run_command('ip link set dev ' + bridge_intf + ' up') + util.run_command('ip netns exec ' + container_net_ns + + ' ip link set dev veth0 up') - # Set interfaces up - util.run_command("ip link set dev " + bridge_intf + " up") - util.run_command("ip netns exec " + container_net_ns + - " ip link set dev veth0 up") + if net_module.net_config.enable_wan: + LOGGER.debug('Attaching net service ' + net_module.display_name + + ' to internet bridge') - if net_module.net_config.enable_wan: - LOGGER.debug("Attaching net service " + - net_module.display_name + " to internet bridge") + # Internet bridge interface example: + # tr-ci-dhcp (Test Run Control (Internet) Interface for DHCP container) + bridge_intf = INTERNET_BRIDGE + 'i-' + net_module.dir_name - # Internet bridge interface example: tr-ci-dhcp (Test Run Control (Internet) Interface for DHCP container) - bridge_intf = INTERNET_BRIDGE + "i-" + net_module.dir_name + # Container interface example: + # tr-cti-dhcp (Test Run Container Interface for DHCP container) + container_intf = 'tr-cti-' + net_module.dir_name - # Container interface example: tr-cti-dhcp (Test Run Container Interface for DHCP container) - container_intf = "tr-cti-" + net_module.dir_name + # Create interface pair + util.run_command('ip link add ' + bridge_intf + ' type veth peer name ' + + container_intf) - # Create interface pair - util.run_command("ip link add " + bridge_intf + - " type veth peer name " + container_intf) + # Attach bridge interface to internet bridge + util.run_command('ovs-vsctl add-port ' + INTERNET_BRIDGE + ' ' + + bridge_intf) - # Attach bridge interface to internet bridge - util.run_command("ovs-vsctl add-port " + - INTERNET_BRIDGE + " " + bridge_intf) + # Attach container interface to container network namespace + util.run_command('ip link set ' + container_intf + ' netns ' + + container_net_ns) - # Attach container interface to container network namespace - util.run_command("ip link set " + container_intf + - " netns " + container_net_ns) + # Rename container interface name to eth1 + util.run_command('ip netns exec ' + container_net_ns + + ' ip link set dev ' + container_intf + ' name eth1') - # Rename container interface name to eth1 - util.run_command("ip netns exec " + container_net_ns + - " ip link set dev " + container_intf + " name eth1") + # Set MAC address of container interface + util.run_command('ip netns exec ' + container_net_ns + + ' ip link set dev eth1 address 9a:02:57:1e:8f:0' + + str(net_module.net_config.ip_index)) - # Set MAC address of container interface - util.run_command("ip netns exec " + container_net_ns + - " ip link set dev eth1 address 9a:02:57:1e:8f:0" + str(net_module.net_config.ip_index)) + # Set interfaces up + util.run_command('ip link set dev ' + bridge_intf + ' up') + util.run_command('ip netns exec ' + container_net_ns + + ' ip link set dev eth1 up') - # Set interfaces up - util.run_command("ip link set dev " + bridge_intf + " up") - util.run_command("ip netns exec " + - container_net_ns + " ip link set dev eth1 up") + def restore_net(self): - def restore_net(self): + LOGGER.info('Clearing baseline network') - LOGGER.info("Clearing baseline network") + if hasattr(self, 'listener' + ) and self.listener is not None and self.listener.is_running(): + self.listener.stop_listener() - if hasattr(self, 'listener') and self.listener is not None and self.listener.is_running(): - self.listener.stop_listener() + client = docker.from_env() - client = docker.from_env() + # Stop all network containers if still running + for net_module in self._net_modules: + try: + container = client.containers.get('tr-ct-' + net_module.dir_name) + container.kill() + except Exception: # pylint: disable=W0703 + continue - # Stop all network containers if still running - for net_module in self._net_modules: - try: - container = client.containers.get( - "tr-ct-" + net_module.dir_name) - container.kill() - except Exception: - continue + # Delete data plane + util.run_command('ovs-vsctl --if-exists del-br tr-d') - # Delete data plane - util.run_command("ovs-vsctl --if-exists del-br tr-d") + # Delete control plane + util.run_command('ovs-vsctl --if-exists del-br tr-c') - # Delete control plane - util.run_command("ovs-vsctl --if-exists del-br tr-c") + # Restart internet interface + if util.interface_exists(self._int_intf): + util.run_command('ip link set ' + self._int_intf + ' down') + util.run_command('ip link set ' + self._int_intf + ' up') - # Restart internet interface - if util.interface_exists(self._int_intf): - util.run_command("ip link set " + self._int_intf + " down") - util.run_command("ip link set " + self._int_intf + " up") + LOGGER.info('Network is restored') - LOGGER.info("Network is restored") class NetworkModule: + """Define all the properties of a Network Module""" - def __init__(self): - self.name = None - self.display_name = None - self.description = None + def __init__(self): + self.name = None + self.display_name = None + self.description = None - self.container = None - self.container_name = None - self.image_name = None + self.container = None + self.container_name = None + self.image_name = None - # Absolute path - self.dir = None - self.dir_name = None - self.build_file = None - self.mounts = [] + # Absolute path + self.dir = None + self.dir_name = None + self.build_file = None + self.mounts = [] - self.enable_container = True + self.enable_container = True + + self.net_config = NetworkModuleNetConfig() - self.net_config = NetworkModuleNetConfig() # The networking configuration for a network module + class NetworkModuleNetConfig: + """Define all the properties of the network config + for a network module""" - def __init__(self): + def __init__(self): - self.enable_wan = False + self.enable_wan = False - self.ip_index = 0 - self.ipv4_address = None - self.ipv4_network = None - self.ipv6_address = None - self.ipv6_network = None + self.ip_index = 0 + self.ipv4_address = None + self.ipv4_network = None + self.ipv6_address = None + self.ipv6_network = None - self.host = False + self.host = False - def get_ipv4_addr_with_prefix(self): - return format(self.ipv4_address) + "/" + str(self.ipv4_network.prefixlen) + def get_ipv4_addr_with_prefix(self): + return format(self.ipv4_address) + '/' + str(self.ipv4_network.prefixlen) + + def get_ipv6_addr_with_prefix(self): + return format(self.ipv6_address) + '/' + str(self.ipv6_network.prefixlen) - def get_ipv6_addr_with_prefix(self): - return format(self.ipv6_address) + "/" + str(self.ipv6_network.prefixlen) # Represents the current configuration of the network for the device bridge + class NetworkConfig: + """Define all the properties of the network configuration""" - # TODO: Let's get this from a configuration file - def __init__(self): - self.ipv4_network = ipaddress.ip_network('10.10.10.0/24') - self.ipv6_network = ipaddress.ip_network('fd10:77be:4186::/64') + # TODO: Let's get this from a configuration file + def __init__(self): + self.ipv4_network = ipaddress.ip_network('10.10.10.0/24') + self.ipv6_network = ipaddress.ip_network('fd10:77be:4186::/64') diff --git a/net_orc/python/src/network_validator.py b/net_orc/python/src/network_validator.py index 2f01a06e9..83ca6f671 100644 --- a/net_orc/python/src/network_validator.py +++ b/net_orc/python/src/network_validator.py @@ -9,267 +9,254 @@ import logger import util -LOGGER = logger.get_logger("validator") -OUTPUT_DIR = "runtime/validation" -DEVICES_DIR = "network/devices" -DEVICE_METADATA = "conf/module_config.json" -DEVICE_BRIDGE = "tr-d" -CONF_DIR = "conf" -CONF_FILE = "system.json" +LOGGER = logger.get_logger('validator') +OUTPUT_DIR = 'runtime/validation' +DEVICES_DIR = 'network/devices' +DEVICE_METADATA = 'conf/module_config.json' +DEVICE_BRIDGE = 'tr-d' +CONF_DIR = 'conf' +CONF_FILE = 'system.json' + class NetworkValidator: - """Perform validation of network services.""" - - def __init__(self): - self._net_devices = [] - - self._path = os.path.dirname(os.path.dirname( - os.path.dirname(os.path.realpath(__file__)))) - - self._device_dir = os.path.join(self._path, DEVICES_DIR) - - shutil.rmtree(os.path.join(self._path, OUTPUT_DIR), ignore_errors=True) - - def start(self): - """Start the network validator.""" - LOGGER.info("Starting validator") - self._load_devices() - self._build_network_devices() - self._start_network_devices() - - def stop(self, kill=False): - """Stop the network validator.""" - LOGGER.info("Stopping validator") - self._stop_network_devices(kill) - LOGGER.info("Validator stopped") - - def _build_network_devices(self): - LOGGER.debug("Building network validators...") - for net_device in self._net_devices: - self._build_device(net_device) - - def _build_device(self, net_device): - LOGGER.debug("Building network validator " + net_device.dir_name) - try: - client = docker.from_env() - client.images.build( - dockerfile=os.path.join(net_device.dir, net_device.build_file), - path=self._path, - forcerm=True, - tag="test-run/" + net_device.dir_name - ) - LOGGER.debug("Validator device built: " + net_device.dir_name) - except docker.errors.BuildError as error: - LOGGER.error("Container build error") - LOGGER.error(error) - - def _load_devices(self): - - LOGGER.info(f"Loading validators from {DEVICES_DIR}") - - loaded_devices = "Loaded the following validators: " - - for module_dir in os.listdir(self._device_dir): - - device = FauxDevice() - - # Load basic module information - with open(os.path.join(self._device_dir, module_dir, DEVICE_METADATA), - encoding='utf-8') as device_config_file: - device_json = json.load(device_config_file) - - device.name = device_json['config']['meta']['name'] - device.description = device_json['config']['meta']['description'] - - device.dir = os.path.join(self._path, self._device_dir, module_dir) - device.dir_name = module_dir - device.build_file = module_dir + ".Dockerfile" - device.container_name = "tr-ct-" + device.dir_name - device.image_name = "test-run/" + device.dir_name - - runtime_source = os.path.join(os.getcwd(), OUTPUT_DIR, device.name) - conf_source = os.path.join(os.getcwd(), CONF_DIR) - os.makedirs(runtime_source, exist_ok=True) - - device.mounts = [ - Mount( - target='/runtime/validation', - source=runtime_source, - type = 'bind' - ), - Mount( - target='/conf', - source=conf_source, - type='bind', - read_only=True - ), - Mount( - target='/runtime/network', - source=runtime_source, - type='bind' - ) - ] - - if 'timeout' in device_json['config']['docker']: - device.timeout = device_json['config']['docker']['timeout'] - - # Determine if this is a container or just an image/template - if "enable_container" in device_json['config']['docker']: - device.enable_container = device_json['config']['docker']['enable_container'] - - self._net_devices.append(device) - - loaded_devices += device.dir_name + " " - - LOGGER.info(loaded_devices) - - def _start_network_devices(self): - LOGGER.debug("Starting network devices") - for net_device in self._net_devices: - self._start_network_device(net_device) - - def _start_network_device(self, device): - LOGGER.info("Starting device " + device.name) - LOGGER.debug("Image name: " + device.image_name) - LOGGER.debug("Container name: " + device.container_name) - - try: - client = docker.from_env() - device.container = client.containers.run( - device.image_name, - auto_remove=True, - cap_add=["NET_ADMIN"], - name=device.container_name, - hostname=device.container_name, - network="none", - privileged=True, - detach=True, - mounts=device.mounts, - environment={"HOST_USER": getpass.getuser()} - ) - except docker.errors.ContainerError as error: - LOGGER.error("Container run error") - LOGGER.error(error) - - self._attach_device_to_network(device) - - # Determine the module timeout time - test_module_timeout = time.time() + device.timeout - status = self._get_device_status(device) - - while time.time() < test_module_timeout and status == 'running': - time.sleep(1) - status = self._get_device_status(device) - - LOGGER.info("Validation device " + device.name + " has finished") - - def _get_device_status(self,module): - container = self._get_device_container(module) - if container is not None: - return container.status - return None - - def _attach_device_to_network(self, device): - LOGGER.debug("Attaching device " + device.name + " to device bridge") - - # Device bridge interface example: tr-di-dhcp - # (Test Run Device Interface for DHCP container) - bridge_intf = DEVICE_BRIDGE + "i-" + device.dir_name - - # Container interface example: tr-cti-dhcp (Test Run Container Interface for DHCP container) - container_intf = "tr-cti-" + device.dir_name - - # Container network namespace name - container_net_ns = "tr-ctns-" + device.dir_name - - # Create interface pair - util.run_command("ip link add " + bridge_intf + - " type veth peer name " + container_intf) - - # Add bridge interface to device bridge - util.run_command("ovs-vsctl add-port " + - DEVICE_BRIDGE + " " + bridge_intf) - - # Get PID for running container - # TODO: Some error checking around missing PIDs might be required - container_pid = util.run_command( - "docker inspect -f {{.State.Pid}} " + device.container_name)[0] - - # Create symlink for container network namespace - util.run_command("ln -sf /proc/" + container_pid + - "/ns/net /var/run/netns/" + container_net_ns) - - # Attach container interface to container network namespace - util.run_command("ip link set " + container_intf + - " netns " + container_net_ns) - - # Rename container interface name to veth0 - util.run_command("ip netns exec " + container_net_ns + - " ip link set dev " + container_intf + " name veth0") - - # Set interfaces up - util.run_command("ip link set dev " + bridge_intf + " up") - util.run_command("ip netns exec " + container_net_ns + - " ip link set dev veth0 up") - - def _stop_network_device(self, net_device, kill=False): - LOGGER.debug("Stopping device container " + net_device.container_name) - try: - container = self._get_device_container(net_device) - if container is not None: - if kill: - LOGGER.debug("Killing container:" + - net_device.container_name) - container.kill() - else: - LOGGER.debug("Stopping container:" + - net_device.container_name) - container.stop() - LOGGER.debug("Container stopped:" + net_device.container_name) - except Exception as e: - LOGGER.error("Container stop error") - LOGGER.error(e) - - def _get_device_container(self, net_device): - LOGGER.debug("Resolving device container: " + - net_device.container_name) - container = None - try: - client = docker.from_env() - container = client.containers.get(net_device.container_name) - except docker.errors.NotFound: - LOGGER.debug("Container " + - net_device.container_name + " not found") - except Exception as e: - LOGGER.error("Failed to resolve container") - LOGGER.error(e) - return container - - def _stop_network_devices(self, kill=False): - LOGGER.debug("Stopping devices") - for net_device in self._net_devices: - # Devices may just be Docker images, so we do not want to stop them - if not net_device.enable_container: - continue - self._stop_network_device(net_device, kill) - -class FauxDevice: # pylint: disable=too-few-public-methods,too-many-instance-attributes - """Represent a faux device.""" - - def __init__(self): - self.name = "Unknown device" - self.description = "Unknown description" - - self.container = None - self.container_name = None - self.image_name = None - - # Absolute path - self.dir = None - - self.dir_name = None - self.build_file = None - self.mounts = [] - - self.enable_container = True - self.timeout = 60 + """Perform validation of network services.""" + + def __init__(self): + self._net_devices = [] + + self._path = os.path.dirname( + os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) + + self._device_dir = os.path.join(self._path, DEVICES_DIR) + + shutil.rmtree(os.path.join(self._path, OUTPUT_DIR), ignore_errors=True) + + def start(self): + """Start the network validator.""" + LOGGER.info('Starting validator') + self._load_devices() + self._build_network_devices() + self._start_network_devices() + + def stop(self, kill=False): + """Stop the network validator.""" + LOGGER.info('Stopping validator') + self._stop_network_devices(kill) + LOGGER.info('Validator stopped') + + def _build_network_devices(self): + LOGGER.debug('Building network validators...') + for net_device in self._net_devices: + self._build_device(net_device) + + def _build_device(self, net_device): + LOGGER.debug('Building network validator ' + net_device.dir_name) + try: + client = docker.from_env() + client.images.build(dockerfile=os.path.join(net_device.dir, + net_device.build_file), + path=self._path, + forcerm=True, + tag='test-run/' + net_device.dir_name) + LOGGER.debug('Validator device built: ' + net_device.dir_name) + except docker.errors.BuildError as error: + LOGGER.error('Container build error') + LOGGER.error(error) + + def _load_devices(self): + + LOGGER.info(f'Loading validators from {DEVICES_DIR}') + + loaded_devices = 'Loaded the following validators: ' + + for module_dir in os.listdir(self._device_dir): + + device = FauxDevice() + + # Load basic module information + with open(os.path.join(self._device_dir, module_dir, DEVICE_METADATA), + encoding='utf-8') as device_config_file: + device_json = json.load(device_config_file) + + device.name = device_json['config']['meta']['name'] + device.description = device_json['config']['meta']['description'] + + device.dir = os.path.join(self._path, self._device_dir, module_dir) + device.dir_name = module_dir + device.build_file = module_dir + '.Dockerfile' + device.container_name = 'tr-ct-' + device.dir_name + device.image_name = 'test-run/' + device.dir_name + + runtime_source = os.path.join(os.getcwd(), OUTPUT_DIR, device.name) + conf_source = os.path.join(os.getcwd(), CONF_DIR) + os.makedirs(runtime_source, exist_ok=True) + + device.mounts = [ + Mount(target='/runtime/validation', + source=runtime_source, + type='bind'), + Mount(target='/conf', source=conf_source, type='bind', + read_only=True), + Mount(target='/runtime/network', source=runtime_source, type='bind') + ] + + if 'timeout' in device_json['config']['docker']: + device.timeout = device_json['config']['docker']['timeout'] + + # Determine if this is a container or just an image/template + if 'enable_container' in device_json['config']['docker']: + device.enable_container = device_json['config']['docker'][ + 'enable_container'] + + self._net_devices.append(device) + + loaded_devices += device.dir_name + ' ' + + LOGGER.info(loaded_devices) + + def _start_network_devices(self): + LOGGER.debug('Starting network devices') + for net_device in self._net_devices: + self._start_network_device(net_device) + + def _start_network_device(self, device): + LOGGER.info('Starting device ' + device.name) + LOGGER.debug('Image name: ' + device.image_name) + LOGGER.debug('Container name: ' + device.container_name) + + try: + client = docker.from_env() + device.container = client.containers.run( + device.image_name, + auto_remove=True, + cap_add=['NET_ADMIN'], + name=device.container_name, + hostname=device.container_name, + network='none', + privileged=True, + detach=True, + mounts=device.mounts, + environment={'HOST_USER': getpass.getuser()}) + except docker.errors.ContainerError as error: + LOGGER.error('Container run error') + LOGGER.error(error) + + self._attach_device_to_network(device) + + # Determine the module timeout time + test_module_timeout = time.time() + device.timeout + status = self._get_device_status(device) + + while time.time() < test_module_timeout and status == 'running': + time.sleep(1) + status = self._get_device_status(device) + + LOGGER.info('Validation device ' + device.name + ' has finished') + + def _get_device_status(self, module): + container = self._get_device_container(module) + if container is not None: + return container.status + return None + + def _attach_device_to_network(self, device): + LOGGER.debug('Attaching device ' + device.name + ' to device bridge') + + # Device bridge interface example: tr-di-dhcp + # (Test Run Device Interface for DHCP container) + bridge_intf = DEVICE_BRIDGE + 'i-' + device.dir_name + + # Container interface example: + # tr-cti-dhcp (Test Run Container Interface for DHCP container) + container_intf = 'tr-cti-' + device.dir_name + + # Container network namespace name + container_net_ns = 'tr-ctns-' + device.dir_name + + # Create interface pair + util.run_command('ip link add ' + bridge_intf + ' type veth peer name ' + + container_intf) + + # Add bridge interface to device bridge + util.run_command('ovs-vsctl add-port ' + DEVICE_BRIDGE + ' ' + bridge_intf) + + # Get PID for running container + # TODO: Some error checking around missing PIDs might be required + container_pid = util.run_command('docker inspect -f {{.State.Pid}} ' + + device.container_name)[0] + + # Create symlink for container network namespace + util.run_command('ln -sf /proc/' + container_pid + + '/ns/net /var/run/netns/' + container_net_ns) + + # Attach container interface to container network namespace + util.run_command('ip link set ' + container_intf + ' netns ' + + container_net_ns) + + # Rename container interface name to veth0 + util.run_command('ip netns exec ' + container_net_ns + ' ip link set dev ' + + container_intf + ' name veth0') + + # Set interfaces up + util.run_command('ip link set dev ' + bridge_intf + ' up') + util.run_command('ip netns exec ' + container_net_ns + + ' ip link set dev veth0 up') + + def _stop_network_device(self, net_device, kill=False): + LOGGER.debug('Stopping device container ' + net_device.container_name) + try: + container = self._get_device_container(net_device) + if container is not None: + if kill: + LOGGER.debug('Killing container:' + net_device.container_name) + container.kill() + else: + LOGGER.debug('Stopping container:' + net_device.container_name) + container.stop() + LOGGER.debug('Container stopped:' + net_device.container_name) + except Exception as e: # pylint: disable=W0703 + LOGGER.error('Container stop error') + LOGGER.error(e) + + def _get_device_container(self, net_device): + LOGGER.debug('Resolving device container: ' + net_device.container_name) + container = None + try: + client = docker.from_env() + container = client.containers.get(net_device.container_name) + except docker.errors.NotFound: + LOGGER.debug('Container ' + net_device.container_name + ' not found') + except Exception as e: # pylint: disable=W0703 + LOGGER.error('Failed to resolve container') + LOGGER.error(e) + return container + + def _stop_network_devices(self, kill=False): + LOGGER.debug('Stopping devices') + for net_device in self._net_devices: + # Devices may just be Docker images, so we do not want to stop them + if not net_device.enable_container: + continue + self._stop_network_device(net_device, kill) + + +class FauxDevice: # pylint: disable=too-few-public-methods,too-many-instance-attributes + """Represent a faux device.""" + + def __init__(self): + self.name = 'Unknown device' + self.description = 'Unknown description' + + self.container = None + self.container_name = None + self.image_name = None + + # Absolute path + self.dir = None + + self.dir_name = None + self.build_file = None + self.mounts = [] + + self.enable_container = True + self.timeout = 60 diff --git a/net_orc/python/src/run_validator.py b/net_orc/python/src/run_validator.py deleted file mode 100644 index 318456083..000000000 --- a/net_orc/python/src/run_validator.py +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env python3 - -import os -import logger -import signal -import time -import os - -from network_orchestrator import NetworkOrchestrator -from network_orchestrator_validator import NetworkOrchestratorValidator - -LOGGER = logger.get_logger('test_run') -RUNTIME_FOLDER = "runtime/network" - -class ValidatorRun: - - def __init__(self): - - signal.signal(signal.SIGINT, self.handler) - signal.signal(signal.SIGTERM, self.handler) - signal.signal(signal.SIGABRT, self.handler) - signal.signal(signal.SIGQUIT, self.handler) - - LOGGER.info("Starting Network Orchestrator") - #os.makedirs(RUNTIME_FOLDER) - - # Cleanup any old validator components - self._validator = NetworkOrchestratorValidator() - self._validator._stop_validator(True); - - # Start the validator after network is ready - self._validator._start_validator() - - # TODO: Kill validator once all faux devices are no longer running - time.sleep(2000) - - # Gracefully shutdown network - self._validator._stop_validator(); - - def handler(self, signum, frame): - LOGGER.debug("SigtermEnum: " + str(signal.SIGTERM)) - LOGGER.debug("Exit signal received: " + str(signum)) - if (signum == 2 or signum == signal.SIGTERM): - LOGGER.info("Exit signal received. Stopping validator...") - # Kill all container services quickly - # If we're here, we want everything to stop immediately - # and don't care about a gracefully shutdown. - self._validator._stop_validator(True); - LOGGER.info("Validator stopped") - exit(1) - -test_run = ValidatorRun() diff --git a/net_orc/python/src/util.py b/net_orc/python/src/util.py index e4a4bd5fd..a7b07ddf9 100644 --- a/net_orc/python/src/util.py +++ b/net_orc/python/src/util.py @@ -4,7 +4,8 @@ import logger import netifaces -LOGGER = logger.get_logger("util") +LOGGER = logger.get_logger('util') + def run_command(cmd, output=True): """Runs a process at the os level @@ -19,19 +20,22 @@ def run_command(cmd, output=True): stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = process.communicate() - if process.returncode !=0 and output: - err_msg = "%s. Code: %s" % (stderr.strip(), process.returncode) - LOGGER.error("Command Failed: " + cmd) - LOGGER.error("Error: " + err_msg) + + if process.returncode != 0 and output: + err_msg = f'{stderr.strip()}. Code: {process.returncode}' + LOGGER.error('Command Failed: ' + cmd) + LOGGER.error('Error: ' + err_msg) else: success = True if output: - return stdout.strip().decode("utf-8"), stderr + return stdout.strip().decode('utf-8'), stderr else: return success + def interface_exists(interface): return interface in netifaces.interfaces() + def prettify(mac_string): - return ':'.join('%02x' % ord(b) for b in mac_string) + return ':'.join([f'{ord(b):02x}' for b in mac_string]) From 41aaaf7a819bfddcfaab0aab2e8c7b51e48a3d3e Mon Sep 17 00:00:00 2001 From: jhughesbiot <50999916+jhughesbiot@users.noreply.github.com> Date: Tue, 23 May 2023 12:22:43 -0700 Subject: [PATCH 18/48] Test results (#27) * Collect all module test results * Fix test modules without config options * Add timestamp to test results --- .../modules/base/python/src/test_module.py | 5 +- test_orc/python/src/test_orchestrator.py | 458 +++++++++--------- 2 files changed, 240 insertions(+), 223 deletions(-) diff --git a/test_orc/modules/base/python/src/test_module.py b/test_orc/modules/base/python/src/test_module.py index 522a048f4..2ca686fa9 100644 --- a/test_orc/modules/base/python/src/test_module.py +++ b/test_orc/modules/base/python/src/test_module.py @@ -2,6 +2,7 @@ import logger import os import util +from datetime import datetime LOGGER = None RESULTS_DIR = "/runtime/output/" @@ -43,7 +44,8 @@ def _get_device_tests(self, device_test_module): # and update module test config with device config options if test["name"] in device_test_module["tests"]: dev_test_config = device_test_module["tests"][test["name"]] - test["config"].update(dev_test_config) + if "config" in test: + test["config"].update(dev_test_config) return module_tests def _get_device_test_module(self): @@ -83,6 +85,7 @@ def run_tests(self): test["result"] = "compliant" if result else "non-compliant" else: test["result"] = "skipped" + test["timestamp"] = datetime.now().isoformat() json_results = json.dumps({"results": tests}, indent=2) self._write_results(json_results) diff --git a/test_orc/python/src/test_orchestrator.py b/test_orc/python/src/test_orchestrator.py index 48a0cb32d..acd24b59a 100644 --- a/test_orc/python/src/test_orchestrator.py +++ b/test_orc/python/src/test_orchestrator.py @@ -17,225 +17,239 @@ class TestOrchestrator: - """Manages and controls the test modules.""" - - def __init__(self,net_orc): - self._test_modules = [] - self._module_config = None - self._net_orc = net_orc - - self._path = os.path.dirname(os.path.dirname( - os.path.dirname(os.path.realpath(__file__)))) - - # Resolve the path to the test-run folder - self._root_path = os.path.abspath(os.path.join(self._path, os.pardir)) - - shutil.rmtree(os.path.join(self._root_path, - RUNTIME_DIR), ignore_errors=True) - os.makedirs(os.path.join(self._root_path, RUNTIME_DIR), exist_ok=True) - - def start(self): - LOGGER.info("Starting Test Orchestrator") - self._load_test_modules() - self.build_test_modules() - - def stop(self): - """Stop any running tests""" - self._stop_modules() - - def run_test_modules(self, device): - """Iterates through each test module and starts the container.""" - LOGGER.info(f"Running test modules on device with mac addr {device.mac_addr}") - for module in self._test_modules: - self._run_test_module(module, device) - LOGGER.info("All tests complete") - - def _run_test_module(self, module, device): - """Start the test container and extract the results.""" - - if module is None or not module.enable_container: - return - - LOGGER.info("Running test module " + module.name) - - try: - container_runtime_dir = os.path.join( - self._root_path, "runtime/test/" + device.mac_addr.replace(":","") + "/" + module.name) - network_runtime_dir = os.path.join( - self._root_path, "runtime/network") - os.makedirs(container_runtime_dir) - - client = docker.from_env() - - module.container = client.containers.run( - module.image_name, - auto_remove=True, - cap_add=["NET_ADMIN"], - name=module.container_name, - hostname=module.container_name, - privileged=True, - detach=True, - mounts=[ - Mount( - target="/runtime/output", - source=container_runtime_dir, - type='bind' - ), - Mount( - target="/runtime/network", - source=network_runtime_dir, - type='bind', - read_only=True - ), - ], - environment={ - "HOST_USER": getpass.getuser(), - "DEVICE_MAC": device.mac_addr, - "DEVICE_TEST_MODULES": device.test_modules, - "IPV4_SUBNET": self._net_orc.network_config.ipv4_network, - "IPV6_SUBNET": self._net_orc.network_config.ipv6_network - } - ) - except (docker.errors.APIError, docker.errors.ContainerError) as container_error: - LOGGER.error("Test module " + module.name + " has failed to start") - LOGGER.debug(container_error) - return - - # Mount the test container to the virtual network if requried - if module.network: - LOGGER.debug("Attaching test module to the network") - self._net_orc._attach_test_module_to_network(module) - - # Determine the module timeout time - test_module_timeout = time.time() + module.timeout - status = self._get_module_status(module) - - while time.time() < test_module_timeout and status == 'running': - time.sleep(1) - status = self._get_module_status(module) - - LOGGER.info("Test module " + module.name + " has finished") - - def _get_module_status(self, module): - container = self._get_module_container(module) - if container is not None: - return container.status - return None - - def _get_test_module(self, name): - for test_module in self._test_modules: - if name == test_module.display_name or name == test_module.name or name == test_module.dir_name: - return test_module - return None - - def _get_module_container(self, module): - container = None - try: - client = docker.from_env() - container = client.containers.get(module.container_name) - except docker.errors.NotFound: - LOGGER.debug("Container " + - module.container_name + " not found") - except docker.errors.APIError as error: - LOGGER.error("Failed to resolve container") - LOGGER.error(error) - return container - - def _load_test_modules(self): - """Load network modules from module_config.json.""" - LOGGER.debug("Loading test modules from /" + TEST_MODULES_DIR) - - loaded_modules = "Loaded the following test modules: " - test_modules_dir = os.path.join(self._path, TEST_MODULES_DIR) - - for module_dir in os.listdir(test_modules_dir): - - if self._get_test_module(module_dir) is None: - loaded_module = self._load_test_module(module_dir) - loaded_modules += loaded_module.dir_name + " " - - LOGGER.info(loaded_modules) - - def _load_test_module(self,module_dir): - """Import module configuration from module_config.json.""" - - modules_dir = os.path.join(self._path, TEST_MODULES_DIR) - - # Load basic module information - module = TestModule() - with open(os.path.join( - self._path, - modules_dir, - module_dir, - MODULE_CONFIG), - encoding='UTF-8') as module_config_file: - module_json = json.load(module_config_file) - - module.name = module_json['config']['meta']['name'] - module.display_name = module_json['config']['meta']['display_name'] - module.description = module_json['config']['meta']['description'] - module.dir = os.path.join(self._path, modules_dir, module_dir) - module.dir_name = module_dir - module.build_file = module_dir + ".Dockerfile" - module.container_name = "tr-ct-" + module.dir_name + "-test" - module.image_name = "test-run/" + module.dir_name + "-test" - - if 'timeout' in module_json['config']['docker']: - module.timeout = module_json['config']['docker']['timeout'] - - # Determine if this is a container or just an image/template - if "enable_container" in module_json['config']['docker']: - module.enable_container = module_json['config']['docker']['enable_container'] - - if "depends_on" in module_json['config']['docker']: - depends_on_module = module_json['config']['docker']['depends_on'] - if self._get_test_module(depends_on_module) is None: - self._load_test_module(depends_on_module) - - self._test_modules.append(module) - return module - - def build_test_modules(self): - """Build all test modules.""" - LOGGER.info("Building test modules...") - for module in self._test_modules: - self._build_test_module(module) - - def _build_test_module(self, module): - LOGGER.debug("Building docker image for module " + module.dir_name) - client = docker.from_env() - try: - client.images.build( - dockerfile=os.path.join(module.dir, module.build_file), - path=self._path, - forcerm=True, # Cleans up intermediate containers during build - tag=module.image_name - ) - except docker.errors.BuildError as error: - LOGGER.error(error) - - def _stop_modules(self, kill=False): - LOGGER.info("Stopping test modules") - for module in self._test_modules: - # Test modules may just be Docker images, so we do not want to stop them - if not module.enable_container: - continue - self._stop_module(module, kill) - LOGGER.info("All test modules have been stopped") - - def _stop_module(self, module, kill=False): - LOGGER.debug("Stopping test module " + module.container_name) - try: - container = module.container - if container is not None: - if kill: - LOGGER.debug("Killing container:" + - module.container_name) - container.kill() - else: - LOGGER.debug("Stopping container:" + - module.container_name) - container.stop() - LOGGER.debug("Container stopped:" + module.container_name) - except docker.errors.NotFound: - pass + """Manages and controls the test modules.""" + + def __init__(self, net_orc): + self._test_modules = [] + self._module_config = None + self._net_orc = net_orc + + self._path = os.path.dirname( + os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) + + # Resolve the path to the test-run folder + self._root_path = os.path.abspath(os.path.join(self._path, os.pardir)) + + shutil.rmtree(os.path.join(self._root_path, RUNTIME_DIR), + ignore_errors=True) + os.makedirs(os.path.join(self._root_path, RUNTIME_DIR), exist_ok=True) + + def start(self): + LOGGER.info("Starting Test Orchestrator") + self._load_test_modules() + self.build_test_modules() + + def stop(self): + """Stop any running tests""" + self._stop_modules() + + def run_test_modules(self, device): + """Iterates through each test module and starts the container.""" + LOGGER.info( + f"Running test modules on device with mac addr {device.mac_addr}") + for module in self._test_modules: + self._run_test_module(module, device) + LOGGER.info("All tests complete") + LOGGER.info( + f"Completed running test modules on device with mac addr {device.mac_addr}") + results = self._generate_results(device) + + def _generate_results(self, device): + results = {} + for module in self._test_modules: + container_runtime_dir = os.path.join( + self._root_path, 'runtime/test/' + device.mac_addr.replace(':', '') + + '/' + module.name) + results_file = container_runtime_dir + '/' + module.name + '-result.json' + try: + with open(results_file, 'r', encoding='UTF-8') as f: + module_results = json.load(f) + results[module.name] = module_results + except (FileNotFoundError, PermissionError, json.JSONDecodeError) as results_error: + LOGGER.error("Module Results Errror " + module.name) + LOGGER.debug(results_error) + + out_file = os.path.join( + self._root_path, 'runtime/test/' + device.mac_addr.replace(':', '') + '/results.json') + with open(out_file, 'w') as f: + json.dump(results,f,indent=2) + return results + + def _run_test_module(self, module, device): + """Start the test container and extract the results.""" + + if module is None or not module.enable_container: + return + + LOGGER.info("Running test module " + module.name) + + try: + container_runtime_dir = os.path.join( + self._root_path, "runtime/test/" + device.mac_addr.replace(":", "") + + "/" + module.name) + network_runtime_dir = os.path.join(self._root_path, "runtime/network") + os.makedirs(container_runtime_dir) + + client = docker.from_env() + + module.container = client.containers.run( + module.image_name, + auto_remove=True, + cap_add=["NET_ADMIN"], + name=module.container_name, + hostname=module.container_name, + privileged=True, + detach=True, + mounts=[ + Mount(target="/runtime/output", + source=container_runtime_dir, + type='bind'), + Mount(target="/runtime/network", + source=network_runtime_dir, + type='bind', + read_only=True), + ], + environment={ + "HOST_USER": getpass.getuser(), + "DEVICE_MAC": device.mac_addr, + "DEVICE_TEST_MODULES": device.test_modules, + "IPV4_SUBNET": self._net_orc.network_config.ipv4_network, + "IPV6_SUBNET": self._net_orc.network_config.ipv6_network + }) + except (docker.errors.APIError, + docker.errors.ContainerError) as container_error: + LOGGER.error("Test module " + module.name + " has failed to start") + LOGGER.debug(container_error) + return + + # Mount the test container to the virtual network if requried + if module.network: + LOGGER.debug("Attaching test module to the network") + self._net_orc._attach_test_module_to_network(module) + + # Determine the module timeout time + test_module_timeout = time.time() + module.timeout + status = self._get_module_status(module) + + while time.time() < test_module_timeout and status == 'running': + time.sleep(1) + status = self._get_module_status(module) + + LOGGER.info("Test module " + module.name + " has finished") + + def _get_module_status(self, module): + container = self._get_module_container(module) + if container is not None: + return container.status + return None + + def _get_test_module(self, name): + for test_module in self._test_modules: + if name == test_module.display_name or name == test_module.name or name == test_module.dir_name: + return test_module + return None + + def _get_module_container(self, module): + container = None + try: + client = docker.from_env() + container = client.containers.get(module.container_name) + except docker.errors.NotFound: + LOGGER.debug("Container " + module.container_name + " not found") + except docker.errors.APIError as error: + LOGGER.error("Failed to resolve container") + LOGGER.error(error) + return container + + def _load_test_modules(self): + """Load network modules from module_config.json.""" + LOGGER.debug("Loading test modules from /" + TEST_MODULES_DIR) + + loaded_modules = "Loaded the following test modules: " + test_modules_dir = os.path.join(self._path, TEST_MODULES_DIR) + + for module_dir in os.listdir(test_modules_dir): + + if self._get_test_module(module_dir) is None: + loaded_module = self._load_test_module(module_dir) + loaded_modules += loaded_module.dir_name + " " + + LOGGER.info(loaded_modules) + + def _load_test_module(self, module_dir): + """Import module configuration from module_config.json.""" + + modules_dir = os.path.join(self._path, TEST_MODULES_DIR) + + # Load basic module information + module = TestModule() + with open(os.path.join(self._path, modules_dir, module_dir, MODULE_CONFIG), + encoding='UTF-8') as module_config_file: + module_json = json.load(module_config_file) + + module.name = module_json['config']['meta']['name'] + module.display_name = module_json['config']['meta']['display_name'] + module.description = module_json['config']['meta']['description'] + module.dir = os.path.join(self._path, modules_dir, module_dir) + module.dir_name = module_dir + module.build_file = module_dir + ".Dockerfile" + module.container_name = "tr-ct-" + module.dir_name + "-test" + module.image_name = "test-run/" + module.dir_name + "-test" + + if 'timeout' in module_json['config']['docker']: + module.timeout = module_json['config']['docker']['timeout'] + + # Determine if this is a container or just an image/template + if "enable_container" in module_json['config']['docker']: + module.enable_container = module_json['config']['docker'][ + 'enable_container'] + + if "depends_on" in module_json['config']['docker']: + depends_on_module = module_json['config']['docker']['depends_on'] + if self._get_test_module(depends_on_module) is None: + self._load_test_module(depends_on_module) + + self._test_modules.append(module) + return module + + def build_test_modules(self): + """Build all test modules.""" + LOGGER.info("Building test modules...") + for module in self._test_modules: + self._build_test_module(module) + + def _build_test_module(self, module): + LOGGER.debug("Building docker image for module " + module.dir_name) + client = docker.from_env() + try: + client.images.build( + dockerfile=os.path.join(module.dir, module.build_file), + path=self._path, + forcerm=True, # Cleans up intermediate containers during build + tag=module.image_name) + except docker.errors.BuildError as error: + LOGGER.error(error) + + def _stop_modules(self, kill=False): + LOGGER.info("Stopping test modules") + for module in self._test_modules: + # Test modules may just be Docker images, so we do not want to stop them + if not module.enable_container: + continue + self._stop_module(module, kill) + LOGGER.info("All test modules have been stopped") + + def _stop_module(self, module, kill=False): + LOGGER.debug("Stopping test module " + module.container_name) + try: + container = module.container + if container is not None: + if kill: + LOGGER.debug("Killing container:" + module.container_name) + container.kill() + else: + LOGGER.debug("Stopping container:" + module.container_name) + container.stop() + LOGGER.debug("Container stopped:" + module.container_name) + except docker.errors.NotFound: + pass From ea60b410c7b036b0c715049815a126d8660e1c13 Mon Sep 17 00:00:00 2001 From: jhughesbiot <50999916+jhughesbiot@users.noreply.github.com> Date: Thu, 25 May 2023 02:43:51 -0700 Subject: [PATCH 19/48] Test results (#28) * Collect all module test results * Fix test modules without config options * Add timestamp to test results * Add attempt timing and device info to test results * Ignore disabled test containers when generating results * Fully skip modules that are disabled --- .../modules/base/python/src/test_module.py | 6 ++- test_orc/python/src/module.py | 54 +++++++++---------- test_orc/python/src/test_orchestrator.py | 41 ++++++++++---- 3 files changed, 61 insertions(+), 40 deletions(-) diff --git a/test_orc/modules/base/python/src/test_module.py b/test_orc/modules/base/python/src/test_module.py index 2ca686fa9..22b9e0773 100644 --- a/test_orc/modules/base/python/src/test_module.py +++ b/test_orc/modules/base/python/src/test_module.py @@ -66,7 +66,7 @@ def run_tests(self): result = None if ("enabled" in test and test["enabled"]) or "enabled" not in test: LOGGER.info("Attempting to run test: " + test["name"]) - + test['start'] = datetime.now().isoformat() # Resolve the correct python method by test name and run test if hasattr(self, test_method_name): if "config" in test: @@ -85,7 +85,9 @@ def run_tests(self): test["result"] = "compliant" if result else "non-compliant" else: test["result"] = "skipped" - test["timestamp"] = datetime.now().isoformat() + test['end'] = datetime.now().isoformat() + duration = datetime.fromisoformat(test['end']) - datetime.fromisoformat(test['start']) + test['duration'] = str(duration) json_results = json.dumps({"results": tests}, indent=2) self._write_results(json_results) diff --git a/test_orc/python/src/module.py b/test_orc/python/src/module.py index 6b2f14f9d..54f920fa1 100644 --- a/test_orc/python/src/module.py +++ b/test_orc/python/src/module.py @@ -1,27 +1,27 @@ -"""Represemts a test module.""" -from dataclasses import dataclass -from docker.models.containers import Container - -@dataclass -class TestModule: # pylint: disable=too-few-public-methods,too-many-instance-attributes - """Represents a test module.""" - - name: str = None - display_name: str = None - description: str = None - - build_file: str = None - container: Container = None - container_name: str = None - image_name :str = None - enable_container: bool = True - network: bool = True - - timeout: int = 60 - - # Absolute path - dir: str = None - dir_name: str = None - - #Set IP Index for all test modules - ip_index: str = 9 +"""Represemts a test module.""" +from dataclasses import dataclass +from docker.models.containers import Container + +@dataclass +class TestModule: # pylint: disable=too-few-public-methods,too-many-instance-attributes + """Represents a test module.""" + + name: str = None + display_name: str = None + description: str = None + + build_file: str = None + container: Container = None + container_name: str = None + image_name :str = None + enable_container: bool = True + network: bool = True + + timeout: int = 60 + + # Absolute path + dir: str = None + dir_name: str = None + + #Set IP Index for all test modules + ip_index: str = 9 diff --git a/test_orc/python/src/test_orchestrator.py b/test_orc/python/src/test_orchestrator.py index acd24b59a..f1e45e2f6 100644 --- a/test_orc/python/src/test_orchestrator.py +++ b/test_orc/python/src/test_orchestrator.py @@ -56,18 +56,25 @@ def run_test_modules(self, device): def _generate_results(self, device): results = {} + results["device"] = {} + if device.make is not None: + results["device"]["make"] = device.make + if device.make is not None: + results["device"]["model"] = device.model + results["device"]["mac_addr"] = device.mac_addr for module in self._test_modules: - container_runtime_dir = os.path.join( - self._root_path, 'runtime/test/' + device.mac_addr.replace(':', '') + - '/' + module.name) - results_file = container_runtime_dir + '/' + module.name + '-result.json' - try: - with open(results_file, 'r', encoding='UTF-8') as f: - module_results = json.load(f) - results[module.name] = module_results - except (FileNotFoundError, PermissionError, json.JSONDecodeError) as results_error: - LOGGER.error("Module Results Errror " + module.name) - LOGGER.debug(results_error) + if module.enable_container and self._is_module_enabled(module,device): + container_runtime_dir = os.path.join( + self._root_path, 'runtime/test/' + device.mac_addr.replace(':', '') + + '/' + module.name) + results_file = container_runtime_dir + '/' + module.name + '-result.json' + try: + with open(results_file, 'r', encoding='UTF-8') as f: + module_results = json.load(f) + results[module.name] = module_results + except (FileNotFoundError, PermissionError, json.JSONDecodeError) as results_error: + LOGGER.error("Module Results Errror " + module.name) + LOGGER.debug(results_error) out_file = os.path.join( self._root_path, 'runtime/test/' + device.mac_addr.replace(':', '') + '/results.json') @@ -75,12 +82,24 @@ def _generate_results(self, device): json.dump(results,f,indent=2) return results + def _is_module_enabled(self,module,device): + enabled = True + if device.test_modules is not None: + test_modules = json.loads(device.test_modules) + if module.name in test_modules: + if 'enabled' in test_modules[module.name]: + enabled = test_modules[module.name]["enabled"] + return enabled + def _run_test_module(self, module, device): """Start the test container and extract the results.""" if module is None or not module.enable_container: return + if not self._is_module_enabled(module,device): + return + LOGGER.info("Running test module " + module.name) try: From b6a6cdcc8b22756fabaee45bc46ec399ee3c549c Mon Sep 17 00:00:00 2001 From: Noureddine Date: Thu, 25 May 2023 14:35:16 +0000 Subject: [PATCH 20/48] Fix pylint test and skip internet tests so CI passes (#29) * disable internet checks for pass * fix pylint test --- testing/test_baseline.py | 2 ++ testing/test_pylint | 7 ++++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/testing/test_baseline.py b/testing/test_baseline.py index 3ab30a7c0..e8a257672 100644 --- a/testing/test_baseline.py +++ b/testing/test_baseline.py @@ -20,6 +20,7 @@ def validator_results(): with open(os.path.join(dir, '../', 'runtime/validation/faux-dev/result.json')) as f: return json.load(f) +@pytest.mark.skip(reason="requires internet") def test_internet_connectivity(container_data): assert container_data['network']['internet'] == 200 @@ -43,6 +44,7 @@ def test_dns_server_resolves(container_data): assert re.match(r'[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}', container_data['dns_response']) +@pytest.mark.skip(reason="requires internet") def test_validator_results_compliant(validator_results): results = [True if x['result'] == 'compliant' else False for x in validator_results['results']] diff --git a/testing/test_pylint b/testing/test_pylint index 833961d94..e3ade62b5 100755 --- a/testing/test_pylint +++ b/testing/test_pylint @@ -1,6 +1,6 @@ #!/bin/bash -ERROR_LIMIT=2534 +ERROR_LIMIT=1100 sudo cmd/install @@ -12,9 +12,10 @@ files=$(find . -path ./venv -prune -o -name '*.py' -print) OUT=pylint.out rm -f $OUT && touch $OUT -pylint $files -ry --extension-pkg-allow-list=docker 2>/dev/null | tee -a $OUT -new_errors=$(cat $OUT | grep "statements analysed." | awk '{print $1}') +pylint $files -ry --extension-pkg-allow-list=docker --evaluation="error + warning + refactor + convention" 2>/dev/null | tee -a $OUT + +new_errors=$(cat $OUT | grep -oP "(?!=^Your code has been rated at)([0-9]+)(?=\.00/10[ \(]?)" ) echo "$new_errors > $ERROR_LIMIT?" if (( $new_errors > $ERROR_LIMIT)); then From 3d53ecbdef973d78641d2482569ebb575fa2f601 Mon Sep 17 00:00:00 2001 From: J Boddey Date: Thu, 25 May 2023 19:42:57 +0100 Subject: [PATCH 21/48] Increase pylint score (#31) * More formatting fixes * More formatting fixes * More formatting fixes * More formatting fixes * Misc pylint fixes Fix test module logger --------- Co-authored-by: jhughesbiot --- .gitignore | 135 +---- framework/logger.py | 49 +- framework/test_runner.py | 93 ++- framework/testrun.py | 10 +- net_orc/network/modules/ntp/ntp-server.py | 532 +++++++++--------- .../network/modules/ovs/python/src/logger.py | 12 +- .../modules/ovs/python/src/ovs_control.py | 186 +++--- net_orc/network/modules/ovs/python/src/run.py | 60 +- .../network/modules/ovs/python/src/util.py | 30 +- net_orc/network/modules/radius/conf/ca.crt | 54 +- net_orc/python/src/network_orchestrator.py | 2 +- .../base/python/src/grpc/start_server.py | 36 +- test_orc/modules/base/python/src/logger.py | 62 +- .../modules/base/python/src/test_module.py | 185 +++--- test_orc/modules/base/python/src/util.py | 31 +- .../baseline/python/src/baseline_module.py | 43 +- test_orc/modules/baseline/python/src/run.py | 55 +- test_orc/modules/dns/python/src/dns_module.py | 101 ++-- test_orc/modules/dns/python/src/run.py | 65 ++- .../modules/nmap/python/src/nmap_module.py | 424 +++++++------- test_orc/modules/nmap/python/src/run.py | 55 +- test_orc/python/src/module.py | 5 +- test_orc/python/src/runner.py | 1 + test_orc/python/src/test_orchestrator.py | 66 ++- testing/test_baseline.py | 41 +- 25 files changed, 1119 insertions(+), 1214 deletions(-) diff --git a/.gitignore b/.gitignore index db1580ffb..5dfc1f6f9 100644 --- a/.gitignore +++ b/.gitignore @@ -1,136 +1,7 @@ -# Runtime folder runtime/ venv/ .vscode/ - +error +pylint.out local/ - -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so - -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -pip-wheel-metadata/ -share/python-wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.nox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -*.py,cover -.hypothesis/ -.pytest_cache/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py -db.sqlite3 -db.sqlite3-journal - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# IPython -profile_default/ -ipython_config.py - -# pyenv -.python-version - -# pipenv -# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. -# However, in case of collaboration, if having platform-specific dependencies or dependencies -# having no cross-platform support, pipenv may install dependencies that don't work, or not -# install all needed dependencies. -#Pipfile.lock - -# PEP 582; used by e.g. github.com/David-OConnor/pyflow -__pypackages__/ - -# Celery stuff -celerybeat-schedule -celerybeat.pid - -# SageMath parsed files -*.sage.py - -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ -.dmypy.json -dmypy.json - -# Pyre type checker -.pyre/ +__pycache__/ \ No newline at end of file diff --git a/framework/logger.py b/framework/logger.py index 64d8fdb97..d4702cb38 100644 --- a/framework/logger.py +++ b/framework/logger.py @@ -4,45 +4,46 @@ import os LOGGERS = {} -_LOG_FORMAT = "%(asctime)s %(name)-8s %(levelname)-7s %(message)s" +_LOG_FORMAT = '%(asctime)s %(name)-8s %(levelname)-7s %(message)s' _DATE_FORMAT = '%b %02d %H:%M:%S' _DEFAULT_LOG_LEVEL = logging.INFO _LOG_LEVEL = logging.INFO -_CONF_DIR = "conf" -_CONF_FILE_NAME = "system.json" -_LOG_DIR = "runtime/testing/" +_CONF_DIR = 'conf' +_CONF_FILE_NAME = 'system.json' +_LOG_DIR = 'runtime/testing/' # Set log level -with open(os.path.join(_CONF_DIR, _CONF_FILE_NAME), encoding='utf-8') as system_conf_file: - system_conf_json = json.load(system_conf_file) +with open(os.path.join(_CONF_DIR, _CONF_FILE_NAME), + encoding='utf-8') as system_conf_file: + system_conf_json = json.load(system_conf_file) log_level_str = system_conf_json['log_level'] temp_log = logging.getLogger('temp') try: - temp_log.setLevel(logging.getLevelName(log_level_str)) - _LOG_LEVEL = logging.getLevelName(log_level_str) + temp_log.setLevel(logging.getLevelName(log_level_str)) + _LOG_LEVEL = logging.getLevelName(log_level_str) except ValueError: - print('Invalid log level set in ' + _CONF_DIR + '/' + _CONF_FILE_NAME + - '. Using INFO as log level') - _LOG_LEVEL = _DEFAULT_LOG_LEVEL + print('Invalid log level set in ' + _CONF_DIR + '/' + _CONF_FILE_NAME + + '. Using INFO as log level') + _LOG_LEVEL = _DEFAULT_LOG_LEVEL log_format = logging.Formatter(fmt=_LOG_FORMAT, datefmt=_DATE_FORMAT) def add_file_handler(log, log_file): - handler = logging.FileHandler(_LOG_DIR + log_file + ".log") - handler.setFormatter(log_format) - log.addHandler(handler) + handler = logging.FileHandler(_LOG_DIR + log_file + '.log') + handler.setFormatter(log_format) + log.addHandler(handler) def add_stream_handler(log): - handler = logging.StreamHandler() - handler.setFormatter(log_format) - log.addHandler(handler) + handler = logging.StreamHandler() + handler.setFormatter(log_format) + log.addHandler(handler) def get_logger(name, log_file=None): - if name not in LOGGERS: - LOGGERS[name] = logging.getLogger(name) - LOGGERS[name].setLevel(_LOG_LEVEL) - add_stream_handler(LOGGERS[name]) - if log_file is not None: - add_file_handler(LOGGERS[name], log_file) - return LOGGERS[name] + if name not in LOGGERS: + LOGGERS[name] = logging.getLogger(name) + LOGGERS[name].setLevel(_LOG_LEVEL) + add_stream_handler(LOGGERS[name]) + if log_file is not None: + add_file_handler(LOGGERS[name], log_file) + return LOGGERS[name] diff --git a/framework/test_runner.py b/framework/test_runner.py index 5c4bf1472..95f3e4208 100644 --- a/framework/test_runner.py +++ b/framework/test_runner.py @@ -14,61 +14,60 @@ import logger import signal -LOGGER = logger.get_logger('runner') - +LOGGER = logger.get_logger("runner") class TestRunner: + """Controls and starts the Test Run application.""" - def __init__(self, config_file=None, validate=True, net_only=False, single_intf=False): - self._register_exits() - self.test_run = TestRun(config_file=config_file, - validate=validate, - net_only=net_only, - single_intf=single_intf) - - def _register_exits(self): - signal.signal(signal.SIGINT, self._exit_handler) - signal.signal(signal.SIGTERM, self._exit_handler) - signal.signal(signal.SIGABRT, self._exit_handler) - signal.signal(signal.SIGQUIT, self._exit_handler) + def __init__(self, config_file=None, validate=True, + net_only=False, single_intf=False): + self._register_exits() + self.test_run = TestRun(config_file=config_file, + validate=validate, + net_only=net_only, + single_intf=single_intf) - def _exit_handler(self, signum, arg): # pylint: disable=unused-argument - LOGGER.debug("Exit signal received: " + str(signum)) - if signum in (2, signal.SIGTERM): - LOGGER.info("Exit signal received.") - # Kill all container services quickly - # If we're here, we want everything to stop immediately - # and don't care about a gracefully shutdown - self._stop(True) - sys.exit(1) + def _register_exits(self): + signal.signal(signal.SIGINT, self._exit_handler) + signal.signal(signal.SIGTERM, self._exit_handler) + signal.signal(signal.SIGABRT, self._exit_handler) + signal.signal(signal.SIGQUIT, self._exit_handler) - def stop(self, kill=False): - self.test_run.stop(kill) + def _exit_handler(self, signum, arg): # pylint: disable=unused-argument + LOGGER.debug("Exit signal received: " + str(signum)) + if signum in (2, signal.SIGTERM): + LOGGER.info("Exit signal received.") + # Kill all container services quickly + # If we're here, we want everything to stop immediately + # and don't care about a gracefully shutdown + self._stop(True) + sys.exit(1) - def start(self): - self.test_run.start() - LOGGER.info("Test Run has finished") + def stop(self, kill=False): + self.test_run.stop(kill) + def start(self): + self.test_run.start() + LOGGER.info("Test Run has finished") def parse_args(argv): - parser = argparse.ArgumentParser(description="Test Run", - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument("-f", "--config-file", default=None, - help="Define the configuration file for Test Run and Network Orchestrator") - parser.add_argument("--no-validate", action="store_true", - help="Turn off the validation of the network after network boot") - parser.add_argument("-net", "--net-only", action="store_true", - help="Run the network only, do not run tests") - parser.add_argument("--single-intf", action="store_true", - help="Single interface mode (experimental)") - args, unknown = parser.parse_known_args() - return args - + parser = argparse.ArgumentParser(description="Test Run", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument("-f", "--config-file", default=None, + help="Define the configuration file for Test Run and Network Orchestrator") + parser.add_argument("--no-validate", action="store_true", + help="Turn off the validation of the network after network boot") + parser.add_argument("-net", "--net-only", action="store_true", + help="Run the network only, do not run tests") + parser.add_argument("--single-intf", action="store_true", + help="Single interface mode (experimental)") + parsed_args = parser.parse_known_args()[0] + return parsed_args if __name__ == "__main__": - args = parse_args(sys.argv) - runner = TestRunner(config_file=args.config_file, - validate=not args.no_validate, - net_only=args.net_only, - single_intf=args.single_intf) - runner.start() + args = parse_args(sys.argv) + runner = TestRunner(config_file=args.config_file, + validate=not args.no_validate, + net_only=args.net_only, + single_intf=args.single_intf) + runner.start() diff --git a/framework/testrun.py b/framework/testrun.py index d5c70a9ca..94ad2ef9f 100644 --- a/framework/testrun.py +++ b/framework/testrun.py @@ -52,7 +52,7 @@ class TestRun: # pylint: disable=too-few-public-methods orchestrator and user interface. """ - def __init__(self, + def __init__(self, config_file=CONFIG_FILE, validate=True, net_only=False, @@ -97,7 +97,7 @@ def start(self): LOGGER.info('Waiting for devices on the network...') - # Check timeout and whether testing is currently + # Check timeout and whether testing is currently # in progress before stopping time.sleep(RUNTIME) @@ -138,7 +138,7 @@ def _run_tests(self, device): # To Do: Make this configurable time.sleep(60) # Let device bootup - self._test_orc._run_test_modules(device) + self._test_orc.run_test_modules(device) def _stop_network(self, kill=False): self._net_orc.stop(kill=kill) @@ -165,9 +165,9 @@ def _load_devices(self, device_dir): mac_addr = device_config_json.get(DEVICE_MAC_ADDR) test_modules = device_config_json.get(DEVICE_TEST_MODULES) - device = Device(make=device_make, + device = Device(make=device_make, model=device_model, - mac_addr=mac_addr, + mac_addr=mac_addr, test_modules=json.dumps(test_modules)) self._devices.append(device) diff --git a/net_orc/network/modules/ntp/ntp-server.py b/net_orc/network/modules/ntp/ntp-server.py index ace3099b0..9d6a6da8e 100644 --- a/net_orc/network/modules/ntp/ntp-server.py +++ b/net_orc/network/modules/ntp/ntp-server.py @@ -8,308 +8,300 @@ import select taskQueue = queue.Queue() -stopFlag = False +stop_flag = False def system_to_ntp_time(timestamp): - """Convert a system time to a NTP time. + """Convert a system time to a NTP time. - Parameters: - timestamp -- timestamp in system time + Parameters: + timestamp -- timestamp in system time - Returns: - corresponding NTP time - """ - return timestamp + NTP.NTP_DELTA + Returns: + corresponding NTP time + """ + return timestamp + NTP.NTP_DELTA def _to_int(timestamp): - """Return the integral part of a timestamp. + """Return the integral part of a timestamp. - Parameters: - timestamp -- NTP timestamp + Parameters: + timestamp -- NTP timestamp - Retuns: - integral part - """ - return int(timestamp) + Retuns: + integral part + """ + return int(timestamp) def _to_frac(timestamp, n=32): - """Return the fractional part of a timestamp. + """Return the fractional part of a timestamp. - Parameters: - timestamp -- NTP timestamp - n -- number of bits of the fractional part + Parameters: + timestamp -- NTP timestamp + n -- number of bits of the fractional part - Retuns: - fractional part - """ - return int(abs(timestamp - _to_int(timestamp)) * 2**n) + Retuns: + fractional part + """ + return int(abs(timestamp - _to_int(timestamp)) * 2**n) def _to_time(integ, frac, n=32): - """Return a timestamp from an integral and fractional part. + """Return a timestamp from an integral and fractional part. - Parameters: - integ -- integral part - frac -- fractional part - n -- number of bits of the fractional part - - Retuns: - timestamp - """ - return integ + float(frac)/2**n - + Parameters: + integ -- integral part + frac -- fractional part + n -- number of bits of the fractional part + Retuns: + timestamp + """ + return integ + float(frac)/2**n class NTPException(Exception): - """Exception raised by this module.""" - pass - + """Exception raised by this module.""" + pass class NTP: - """Helper class defining constants.""" - - _SYSTEM_EPOCH = datetime.date(*time.gmtime(0)[0:3]) - """system epoch""" - _NTP_EPOCH = datetime.date(1900, 1, 1) - """NTP epoch""" - NTP_DELTA = (_SYSTEM_EPOCH - _NTP_EPOCH).days * 24 * 3600 - """delta between system and NTP time""" - - REF_ID_TABLE = { - 'DNC': "DNC routing protocol", - 'NIST': "NIST public modem", - 'TSP': "TSP time protocol", - 'DTS': "Digital Time Service", - 'ATOM': "Atomic clock (calibrated)", - 'VLF': "VLF radio (OMEGA, etc)", - 'callsign': "Generic radio", - 'LORC': "LORAN-C radionavidation", - 'GOES': "GOES UHF environment satellite", - 'GPS': "GPS UHF satellite positioning", - } - """reference identifier table""" - - STRATUM_TABLE = { - 0: "unspecified", - 1: "primary reference", - } - """stratum table""" - - MODE_TABLE = { - 0: "unspecified", - 1: "symmetric active", - 2: "symmetric passive", - 3: "client", - 4: "server", - 5: "broadcast", - 6: "reserved for NTP control messages", - 7: "reserved for private use", - } - """mode table""" - - LEAP_TABLE = { - 0: "no warning", - 1: "last minute has 61 seconds", - 2: "last minute has 59 seconds", - 3: "alarm condition (clock not synchronized)", - } - """leap indicator table""" + """Helper class defining constants.""" + + _SYSTEM_EPOCH = datetime.date(*time.gmtime(0)[0:3]) + """system epoch""" + _NTP_EPOCH = datetime.date(1900, 1, 1) + """NTP epoch""" + NTP_DELTA = (_SYSTEM_EPOCH - _NTP_EPOCH).days * 24 * 3600 + """delta between system and NTP time""" + + REF_ID_TABLE = { + 'DNC': "DNC routing protocol", + 'NIST': "NIST public modem", + 'TSP': "TSP time protocol", + 'DTS': "Digital Time Service", + 'ATOM': "Atomic clock (calibrated)", + 'VLF': "VLF radio (OMEGA, etc)", + 'callsign': "Generic radio", + 'LORC': "LORAN-C radionavidation", + 'GOES': "GOES UHF environment satellite", + 'GPS': "GPS UHF satellite positioning", + } + """reference identifier table""" + + STRATUM_TABLE = { + 0: "unspecified", + 1: "primary reference", + } + """stratum table""" + + MODE_TABLE = { + 0: "unspecified", + 1: "symmetric active", + 2: "symmetric passive", + 3: "client", + 4: "server", + 5: "broadcast", + 6: "reserved for NTP control messages", + 7: "reserved for private use", + } + """mode table""" + + LEAP_TABLE = { + 0: "no warning", + 1: "last minute has 61 seconds", + 2: "last minute has 59 seconds", + 3: "alarm condition (clock not synchronized)", + } + """leap indicator table""" class NTPPacket: - """NTP packet class. + """NTP packet class. + + This represents an NTP packet. + """ + + _PACKET_FORMAT = "!B B B b 11I" + """packet format to pack/unpack""" - This represents an NTP packet. + def __init__(self, version=4, mode=3, tx_timestamp=0): + """Constructor. + + Parameters: + version -- NTP version + mode -- packet mode (client, server) + tx_timestamp -- packet transmit timestamp """ - - _PACKET_FORMAT = "!B B B b 11I" - """packet format to pack/unpack""" - - def __init__(self, version=4, mode=3, tx_timestamp=0): - """Constructor. - - Parameters: - version -- NTP version - mode -- packet mode (client, server) - tx_timestamp -- packet transmit timestamp - """ - self.leap = 0 - """leap second indicator""" - self.version = version - """version""" - self.mode = mode - """mode""" - self.stratum = 0 - """stratum""" - self.poll = 0 - """poll interval""" - self.precision = 0 - """precision""" - self.root_delay = 0 - """root delay""" - self.root_dispersion = 0 - """root dispersion""" - self.ref_id = 0 - """reference clock identifier""" - self.ref_timestamp = 0 - """reference timestamp""" - self.orig_timestamp = 0 - self.orig_timestamp_high = 0 - self.orig_timestamp_low = 0 - """originate timestamp""" - self.recv_timestamp = 0 - """receive timestamp""" - self.tx_timestamp = tx_timestamp - self.tx_timestamp_high = 0 - self.tx_timestamp_low = 0 - """tansmit timestamp""" - - def to_data(self): - """Convert this NTPPacket to a buffer that can be sent over a socket. - - Returns: - buffer representing this packet - - Raises: - NTPException -- in case of invalid field - """ - try: - packed = struct.pack(NTPPacket._PACKET_FORMAT, - (self.leap << 6 | self.version << 3 | self.mode), - self.stratum, - self.poll, - self.precision, - _to_int(self.root_delay) << 16 | _to_frac(self.root_delay, 16), - _to_int(self.root_dispersion) << 16 | - _to_frac(self.root_dispersion, 16), - self.ref_id, - _to_int(self.ref_timestamp), - _to_frac(self.ref_timestamp), - #Change by lichen, avoid loss of precision - self.orig_timestamp_high, - self.orig_timestamp_low, - _to_int(self.recv_timestamp), - _to_frac(self.recv_timestamp), - _to_int(self.tx_timestamp), - _to_frac(self.tx_timestamp)) - except struct.error: - raise NTPException("Invalid NTP packet fields.") - return packed - - def from_data(self, data): - """Populate this instance from a NTP packet payload received from - the network. - - Parameters: - data -- buffer payload - - Raises: - NTPException -- in case of invalid packet format - """ - try: - unpacked = struct.unpack(NTPPacket._PACKET_FORMAT, - data[0:struct.calcsize(NTPPacket._PACKET_FORMAT)]) - except struct.error: - raise NTPException("Invalid NTP packet.") - - self.leap = unpacked[0] >> 6 & 0x3 - self.version = unpacked[0] >> 3 & 0x7 - self.mode = unpacked[0] & 0x7 - self.stratum = unpacked[1] - self.poll = unpacked[2] - self.precision = unpacked[3] - self.root_delay = float(unpacked[4])/2**16 - self.root_dispersion = float(unpacked[5])/2**16 - self.ref_id = unpacked[6] - self.ref_timestamp = _to_time(unpacked[7], unpacked[8]) - self.orig_timestamp = _to_time(unpacked[9], unpacked[10]) - self.orig_timestamp_high = unpacked[9] - self.orig_timestamp_low = unpacked[10] - self.recv_timestamp = _to_time(unpacked[11], unpacked[12]) - self.tx_timestamp = _to_time(unpacked[13], unpacked[14]) - self.tx_timestamp_high = unpacked[13] - self.tx_timestamp_low = unpacked[14] - - def GetTxTimeStamp(self): - return (self.tx_timestamp_high,self.tx_timestamp_low) - - def SetOriginTimeStamp(self,high,low): - self.orig_timestamp_high = high - self.orig_timestamp_low = low - + self.leap = 0 + """leap second indicator""" + self.version = version + """version""" + self.mode = mode + """mode""" + self.stratum = 0 + """stratum""" + self.poll = 0 + """poll interval""" + self.precision = 0 + """precision""" + self.root_delay = 0 + """root delay""" + self.root_dispersion = 0 + """root dispersion""" + self.ref_id = 0 + """reference clock identifier""" + self.ref_timestamp = 0 + """reference timestamp""" + self.orig_timestamp = 0 + self.orig_timestamp_high = 0 + self.orig_timestamp_low = 0 + """originate timestamp""" + self.recv_timestamp = 0 + """receive timestamp""" + self.tx_timestamp = tx_timestamp + self.tx_timestamp_high = 0 + self.tx_timestamp_low = 0 + """tansmit timestamp""" + + def to_data(self): + """Convert this NTPPacket to a buffer that can be sent over a socket. + + Returns: + buffer representing this packet + + Raises: + NTPException -- in case of invalid field + """ + try: + packed = struct.pack(NTPPacket._PACKET_FORMAT, + (self.leap << 6 | self.version << 3 | self.mode), + self.stratum, + self.poll, + self.precision, + _to_int(self.root_delay) << 16 | _to_frac(self.root_delay, 16), + _to_int(self.root_dispersion) << 16 | + _to_frac(self.root_dispersion, 16), + self.ref_id, + _to_int(self.ref_timestamp), + _to_frac(self.ref_timestamp), + #Change by lichen, avoid loss of precision + self.orig_timestamp_high, + self.orig_timestamp_low, + _to_int(self.recv_timestamp), + _to_frac(self.recv_timestamp), + _to_int(self.tx_timestamp), + _to_frac(self.tx_timestamp)) + except struct.error: + raise NTPException("Invalid NTP packet fields.") + return packed + + def from_data(self, data): + """Populate this instance from a NTP packet payload received from + the network. + + Parameters: + data -- buffer payload + + Raises: + NTPException -- in case of invalid packet format + """ + try: + unpacked = struct.unpack(NTPPacket._PACKET_FORMAT, + data[0:struct.calcsize(NTPPacket._PACKET_FORMAT)]) + except struct.error: + raise NTPException("Invalid NTP packet.") + + self.leap = unpacked[0] >> 6 & 0x3 + self.version = unpacked[0] >> 3 & 0x7 + self.mode = unpacked[0] & 0x7 + self.stratum = unpacked[1] + self.poll = unpacked[2] + self.precision = unpacked[3] + self.root_delay = float(unpacked[4])/2**16 + self.root_dispersion = float(unpacked[5])/2**16 + self.ref_id = unpacked[6] + self.ref_timestamp = _to_time(unpacked[7], unpacked[8]) + self.orig_timestamp = _to_time(unpacked[9], unpacked[10]) + self.orig_timestamp_high = unpacked[9] + self.orig_timestamp_low = unpacked[10] + self.recv_timestamp = _to_time(unpacked[11], unpacked[12]) + self.tx_timestamp = _to_time(unpacked[13], unpacked[14]) + self.tx_timestamp_high = unpacked[13] + self.tx_timestamp_low = unpacked[14] + + def GetTxTimeStamp(self): + return (self.tx_timestamp_high,self.tx_timestamp_low) + + def SetOriginTimeStamp(self,high,low): + self.orig_timestamp_high = high + self.orig_timestamp_low = low class RecvThread(threading.Thread): - def __init__(self,socket): - threading.Thread.__init__(self) - self.socket = socket - def run(self): - global t,stopFlag - while True: - if stopFlag == True: - print("RecvThread Ended") - break - rlist,wlist,elist = select.select([self.socket],[],[],1); - if len(rlist) != 0: - print("Received %d packets" % len(rlist)) - for tempSocket in rlist: - try: - data,addr = tempSocket.recvfrom(1024) - recvTimestamp = recvTimestamp = system_to_ntp_time(time.time()) - taskQueue.put((data,addr,recvTimestamp)) - except socket.error as msg: - print(msg) + + def __init__(self,socket): + threading.Thread.__init__(self) + self.socket = socket + + def run(self): + global t,stop_flag + while True: + if stop_flag == True: + print("RecvThread Ended") + break + rlist,wlist,elist = select.select([self.socket],[],[],1) + if len(rlist) != 0: + print("Received %d packets" % len(rlist)) + for tempSocket in rlist: + try: + data,addr = tempSocket.recvfrom(1024) + recvTimestamp = recvTimestamp = system_to_ntp_time(time.time()) + taskQueue.put((data,addr,recvTimestamp)) + except socket.error as msg: + print(msg) class WorkThread(threading.Thread): - def __init__(self,socket): - threading.Thread.__init__(self) - self.socket = socket - def run(self): - global taskQueue,stopFlag - while True: - if stopFlag == True: - print("WorkThread Ended") - break - try: - data,addr,recvTimestamp = taskQueue.get(timeout=1) - recvPacket = NTPPacket() - recvPacket.from_data(data) - timeStamp_high,timeStamp_low = recvPacket.GetTxTimeStamp() - sendPacket = NTPPacket(version=4,mode=4) - sendPacket.stratum = 2 - sendPacket.poll = 10 - ''' - sendPacket.precision = 0xfa - sendPacket.root_delay = 0x0bfa - sendPacket.root_dispersion = 0x0aa7 - sendPacket.ref_id = 0x808a8c2c - ''' - sendPacket.ref_timestamp = recvTimestamp-5 - sendPacket.SetOriginTimeStamp(timeStamp_high,timeStamp_low) - sendPacket.recv_timestamp = recvTimestamp - sendPacket.tx_timestamp = system_to_ntp_time(time.time()) - socket.sendto(sendPacket.to_data(),addr) - print("Sent to %s:%d" % (addr[0],addr[1])) - except queue.Empty: - continue - - -listenIp = "0.0.0.0" -listenPort = 123 + + def __init__(self,socket): + threading.Thread.__init__(self) + self.socket = socket + + def run(self): + global taskQueue,stop_flag + while True: + if stop_flag is True: + print("WorkThread Ended") + break + try: + data,addr,recvTimestamp = taskQueue.get(timeout=1) + recvPacket = NTPPacket() + recvPacket.from_data(data) + timeStamp_high,timeStamp_low = recvPacket.GetTxTimeStamp() + sendPacket = NTPPacket(version=4,mode=4) + sendPacket.stratum = 2 + sendPacket.poll = 10 + sendPacket.ref_timestamp = recvTimestamp-5 + sendPacket.SetOriginTimeStamp(timeStamp_high,timeStamp_low) + sendPacket.recv_timestamp = recvTimestamp + sendPacket.tx_timestamp = system_to_ntp_time(time.time()) + socket.sendto(sendPacket.to_data(),addr) + print("Sent to %s:%d" % (addr[0],addr[1])) + except queue.Empty: + continue + +listen_ip = "0.0.0.0" +listen_port = 123 socket = socket.socket(socket.AF_INET,socket.SOCK_DGRAM) -socket.bind((listenIp,listenPort)) -print("local socket: ", socket.getsockname()); +socket.bind((listen_ip,listen_port)) +print(f"local socket: {socket.getsockname()}") recvThread = RecvThread(socket) recvThread.start() workThread = WorkThread(socket) workThread.start() while True: - try: - time.sleep(0.5) - except KeyboardInterrupt: - print("Exiting...") - stopFlag = True - recvThread.join() - workThread.join() - #socket.close() - print("Exited") - break - + try: + time.sleep(0.5) + except KeyboardInterrupt: + print("Exiting...") + stop_flag = True + recvThread.join() + workThread.join() + #socket.close() + print("Exited") + break diff --git a/net_orc/network/modules/ovs/python/src/logger.py b/net_orc/network/modules/ovs/python/src/logger.py index 50dfb4f50..566a5c75e 100644 --- a/net_orc/network/modules/ovs/python/src/logger.py +++ b/net_orc/network/modules/ovs/python/src/logger.py @@ -1,17 +1,17 @@ #!/usr/bin/env python3 import logging -import os -import sys LOGGERS = {} _LOG_FORMAT = "%(asctime)s %(name)-8s %(levelname)-7s %(message)s" _DATE_FORMAT = '%b %02d %H:%M:%S' # Set level to debug if set as runtime flag -logging.basicConfig(format=_LOG_FORMAT, datefmt=_DATE_FORMAT, level=logging.INFO) +logging.basicConfig(format=_LOG_FORMAT, + datefmt=_DATE_FORMAT, + level=logging.INFO) def get_logger(name): - if name not in LOGGERS: - LOGGERS[name] = logging.getLogger(name) - return LOGGERS[name] \ No newline at end of file + if name not in LOGGERS: + LOGGERS[name] = logging.getLogger(name) + return LOGGERS[name] diff --git a/net_orc/network/modules/ovs/python/src/ovs_control.py b/net_orc/network/modules/ovs/python/src/ovs_control.py index 6647dc89e..53406cef2 100644 --- a/net_orc/network/modules/ovs/python/src/ovs_control.py +++ b/net_orc/network/modules/ovs/python/src/ovs_control.py @@ -1,9 +1,7 @@ #!/usr/bin/env python3 -#import ipaddress import json import logger -#import os import util CONFIG_FILE = "/ovs/conf/system.json" @@ -13,95 +11,95 @@ class OVSControl: - def __init__(self): - self._int_intf = None - self._dev_intf = None - self._load_config() - - def add_bridge(self,bridgeName): - LOGGER.info("Adding OVS Bridge: " + bridgeName) - # Create the bridge using ovs-vsctl commands - # Uses the --may-exist option to prevent failures - # if this bridge already exists by this name it won't fail - # and will not modify the existing bridge - success=util.run_command("ovs-vsctl --may-exist add-br " + bridgeName) - return success - - def add_port(self,port, bridgeName): - LOGGER.info("Adding Port " + port + " to OVS Bridge: " + bridgeName) - # Add a port to the bridge using ovs-vsctl commands - # Uses the --may-exist option to prevent failures - # if this port already exists on the bridge and will not - # modify the existing bridge - success=util.run_command("ovs-vsctl --may-exist add-port " + bridgeName + " " + port) - return success - - def create_net(self): - LOGGER.info("Creating baseline network") - - # Create data plane - self.add_bridge(DEVICE_BRIDGE) - - # Create control plane - self.add_bridge(INTERNET_BRIDGE) - - # Remove IP from internet adapter - self.set_interface_ip(self._int_intf,"0.0.0.0") - - # Add external interfaces to data and control plane - self.add_port(self._dev_intf,DEVICE_BRIDGE) - self.add_port(self._int_intf,INTERNET_BRIDGE) - - # # Set ports up - self.set_bridge_up(DEVICE_BRIDGE) - self.set_bridge_up(INTERNET_BRIDGE) - - def delete_bridge(self,bridgeName): - LOGGER.info("Deleting OVS Bridge: " + bridgeName) - # Delete the bridge using ovs-vsctl commands - # Uses the --if-exists option to prevent failures - # if this bridge does not exists - success=util.run_command("ovs-vsctl --if-exists del-br " + bridgeName) - return success - - def _load_config(self): - LOGGER.info("Loading Configuration: " + CONFIG_FILE) - config_json = json.load(open(CONFIG_FILE, 'r')) - self._int_intf = config_json['internet_intf'] - self._dev_intf = config_json['device_intf'] - LOGGER.info("Configuration Loaded") - LOGGER.info("Internet Interface: " + self._int_intf) - LOGGER.info("Device Interface: " + self._dev_intf) - - def restore_net(self): - LOGGER.info("Restoring Network...") - # Delete data plane - self.delete_bridge(DEVICE_BRIDGE) - - # Delete control plane - self.delete_bridge(INTERNET_BRIDGE) - - LOGGER.info("Network is restored") - - def show_config(self): - LOGGER.info("Show current config of OVS") - success=util.run_command("ovs-vsctl show") - return success - - def set_bridge_up(self,bridgeName): - LOGGER.info("Setting Bridge device to up state: " + bridgeName) - success=util.run_command("ip link set dev " + bridgeName + " up") - return success - - def set_interface_ip(self,interface, ipAddr): - LOGGER.info("Setting interface " + interface + " to " + ipAddr) - # Remove IP from internet adapter - util.run_command("ifconfig " + interface + " 0.0.0.0") - -if __name__ == '__main__': - ovs = OVSControl() - ovs.create_net() - ovs.show_config() - ovs.restore_net() - ovs.show_config() - + def __init__(self): + self._int_intf = None + self._dev_intf = None + self._load_config() + + def add_bridge(self, bridge_name): + LOGGER.info("Adding OVS Bridge: " + bridge_name) + # Create the bridge using ovs-vsctl commands + # Uses the --may-exist option to prevent failures + # if this bridge already exists by this name it won't fail + # and will not modify the existing bridge + success=util.run_command("ovs-vsctl --may-exist add-br " + bridge_name) + return success + + def add_port(self,port, bridge_name): + LOGGER.info("Adding Port " + port + " to OVS Bridge: " + bridge_name) + # Add a port to the bridge using ovs-vsctl commands + # Uses the --may-exist option to prevent failures + # if this port already exists on the bridge and will not + # modify the existing bridge + success=util.run_command(f"""ovs-vsctl --may-exist + add-port {bridge_name} {port}""") + return success + + def create_net(self): + LOGGER.info("Creating baseline network") + + # Create data plane + self.add_bridge(DEVICE_BRIDGE) + + # Create control plane + self.add_bridge(INTERNET_BRIDGE) + + # Remove IP from internet adapter + self.set_interface_ip(self._int_intf,"0.0.0.0") + + # Add external interfaces to data and control plane + self.add_port(self._dev_intf,DEVICE_BRIDGE) + self.add_port(self._int_intf,INTERNET_BRIDGE) + + # # Set ports up + self.set_bridge_up(DEVICE_BRIDGE) + self.set_bridge_up(INTERNET_BRIDGE) + + def delete_bridge(self,bridge_name): + LOGGER.info("Deleting OVS Bridge: " + bridge_name) + # Delete the bridge using ovs-vsctl commands + # Uses the --if-exists option to prevent failures + # if this bridge does not exists + success=util.run_command("ovs-vsctl --if-exists del-br " + bridge_name) + return success + + def _load_config(self): + LOGGER.info("Loading Configuration: " + CONFIG_FILE) + config_json = json.load(open(CONFIG_FILE, "r", encoding="utf-8")) + self._int_intf = config_json["internet_intf"] + self._dev_intf = config_json["device_intf"] + LOGGER.info("Configuration Loaded") + LOGGER.info("Internet Interface: " + self._int_intf) + LOGGER.info("Device Interface: " + self._dev_intf) + + def restore_net(self): + LOGGER.info("Restoring Network...") + # Delete data plane + self.delete_bridge(DEVICE_BRIDGE) + + # Delete control plane + self.delete_bridge(INTERNET_BRIDGE) + + LOGGER.info("Network is restored") + + def show_config(self): + LOGGER.info("Show current config of OVS") + success=util.run_command("ovs-vsctl show") + return success + + def set_bridge_up(self,bridge_name): + LOGGER.info("Setting Bridge device to up state: " + bridge_name) + success=util.run_command("ip link set dev " + bridge_name + " up") + return success + + def set_interface_ip(self,interface, ip_addr): + LOGGER.info("Setting interface " + interface + " to " + ip_addr) + # Remove IP from internet adapter + util.run_command("ifconfig " + interface + " 0.0.0.0") + +if __name__ == "__main__": + ovs = OVSControl() + ovs.create_net() + ovs.show_config() + ovs.restore_net() + ovs.show_config() diff --git a/net_orc/network/modules/ovs/python/src/run.py b/net_orc/network/modules/ovs/python/src/run.py index 4c1474e74..f91c2dfeb 100644 --- a/net_orc/network/modules/ovs/python/src/run.py +++ b/net_orc/network/modules/ovs/python/src/run.py @@ -2,7 +2,8 @@ import logger import signal -import time +import sys +import time from ovs_control import OVSControl @@ -10,44 +11,45 @@ class OVSControlRun: - def __init__(self): + def __init__(self): - signal.signal(signal.SIGINT, self.handler) - signal.signal(signal.SIGTERM, self.handler) - signal.signal(signal.SIGABRT, self.handler) - signal.signal(signal.SIGQUIT, self.handler) + signal.signal(signal.SIGINT, self.handler) + signal.signal(signal.SIGTERM, self.handler) + signal.signal(signal.SIGABRT, self.handler) + signal.signal(signal.SIGQUIT, self.handler) - LOGGER.info("Starting OVS Control") + LOGGER.info("Starting OVS Control") - # Get all components ready - self._ovs_control = OVSControl() + # Get all components ready + self._ovs_control = OVSControl() - self._ovs_control.restore_net() + self._ovs_control.restore_net() - self._ovs_control.create_net() + self._ovs_control.create_net() - self._ovs_control.show_config() + self._ovs_control.show_config() - # Get network ready (via Network orchestrator) - LOGGER.info("Network is ready. Waiting for device information...") + # Get network ready (via Network orchestrator) + LOGGER.info("Network is ready. Waiting for device information...") - #Loop forever until process is stopped - while True: - LOGGER.info("OVS Running") - time.sleep(1000) + #Loop forever until process is stopped + while True: + LOGGER.info("OVS Running") + time.sleep(1000) - # TODO: This time should be configurable (How long to hold before exiting, this could be infinite too) - #time.sleep(300) + # TODO: This time should be configurable (How long to hold before exiting, + # this could be infinite too) + #time.sleep(300) - # Tear down network - #self._ovs_control.shutdown() + # Tear down network + #self._ovs_control.shutdown() - def handler(self, signum, frame): - LOGGER.info("SigtermEnum: " + str(signal.SIGTERM)) - LOGGER.info("Exit signal received: " + str(signum)) - if (signum == 2 or signal == signal.SIGTERM): - LOGGER.info("Exit signal received. Restoring network...") - self._ovs_control.shutdown() - exit(1) + def handler(self, signum, frame): + LOGGER.info("SigtermEnum: " + str(signal.SIGTERM)) + LOGGER.info("Exit signal received: " + str(signum)) + if (signum == 2 or signal == signal.SIGTERM): + LOGGER.info("Exit signal received. Restoring network...") + self._ovs_control.shutdown() + sys.exit(1) ovs = OVSControlRun() diff --git a/net_orc/network/modules/ovs/python/src/util.py b/net_orc/network/modules/ovs/python/src/util.py index 8bb0439bc..c9eba39ff 100644 --- a/net_orc/network/modules/ovs/python/src/util.py +++ b/net_orc/network/modules/ovs/python/src/util.py @@ -3,17 +3,19 @@ def run_command(cmd): - success = False - LOGGER = logger.get_logger('util') - process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) - stdout, stderr = process.communicate() - if process.returncode !=0: - err_msg = "%s. Code: %s" % (stderr.strip(), process.returncode) - LOGGER.error("Command Failed: " + cmd) - LOGGER.error("Error: " + err_msg) - else: - succ_msg = "%s. Code: %s" % (stdout.strip().decode('utf-8'), process.returncode) - LOGGER.info("Command Success: " + cmd) - LOGGER.info("Success: " + succ_msg) - success = True - return success \ No newline at end of file + success = False + LOGGER = logger.get_logger('util') + process = subprocess.Popen(cmd.split(), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + stdout, stderr = process.communicate() + if process.returncode !=0: + err_msg = "%s. Code: %s" % (stderr.strip(), process.returncode) + LOGGER.error("Command Failed: " + cmd) + LOGGER.error("Error: " + err_msg) + else: + succ_msg = "%s. Code: %s" % (stdout.strip().decode('utf-8'), process.returncode) + LOGGER.info("Command Success: " + cmd) + LOGGER.info("Success: " + succ_msg) + success = True + return success diff --git a/net_orc/network/modules/radius/conf/ca.crt b/net_orc/network/modules/radius/conf/ca.crt index d009cb1ab..bb8aadf6a 100644 --- a/net_orc/network/modules/radius/conf/ca.crt +++ b/net_orc/network/modules/radius/conf/ca.crt @@ -1,26 +1,30 @@ -----BEGIN CERTIFICATE----- -MIIEYTCCA0mgAwIBAgIUQJ4F8hBCnCp7ASPZqG/tNQgoUR4wDQYJKoZIhvcNAQEL -BQAwgb8xCzAJBgNVBAYTAkdCMRswGQYDVQQIDBIbWzN+TGVpY2VzdGVyc2hpcmUx -FTATBgNVBAcMDExvdWdoYm9yb3VnaDEUMBIGA1UECgwLRm9yZXN0IFJvY2sxDjAM -BgNVBAsMBUN5YmVyMR8wHQYDVQQDDBZjeWJlci5mb3Jlc3Ryb2NrLmNvLnVrMTUw -MwYJKoZIhvcNAQkBFiZjeWJlcnNlY3VyaXR5LnRlc3RpbmdAZm9yZXN0cm9jay5j -by51azAeFw0yMjAzMDQxMjEzMTBaFw0yNzAzMDMxMjEzMTBaMIG/MQswCQYDVQQG -EwJHQjEbMBkGA1UECAwSG1szfkxlaWNlc3RlcnNoaXJlMRUwEwYDVQQHDAxMb3Vn -aGJvcm91Z2gxFDASBgNVBAoMC0ZvcmVzdCBSb2NrMQ4wDAYDVQQLDAVDeWJlcjEf -MB0GA1UEAwwWY3liZXIuZm9yZXN0cm9jay5jby51azE1MDMGCSqGSIb3DQEJARYm -Y3liZXJzZWN1cml0eS50ZXN0aW5nQGZvcmVzdHJvY2suY28udWswggEiMA0GCSqG -SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDDNz3vJiZ5nX8lohEhqXvxEme3srip8qF7 -r5ScIeQzsTKuPNAmoefx9TcU3SyA2BnREuDX+OCYMN62xxWG2PndOl0LNezAY22C -PJwHbaBntLKY/ZhxYSTyratM7zxKSVLtClamA/bJXBhdfZZKYOP3xlZQEQTygtzK -j5hZwDrpDARtjRZIMWPLqVcoaW9ow2urJVsdD4lYAhpQU2UIgiWo7BG3hJsUfcYX -EQyyrMKJ7xaCwzIU7Sem1PETrzeiWg4KhDijc7A0RMPWlU5ljf0CnY/IZwiDsMRl -hGmGBPvR+ddiWPZPtSKj6TPWpsaMUR9UwncLmSSrhf1otX4Mw0vbAgMBAAGjUzBR -MB0GA1UdDgQWBBR0Qxx2mDTPIfpnzO5YtycGs6t8ijAfBgNVHSMEGDAWgBR0Qxx2 -mDTPIfpnzO5YtycGs6t8ijAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUA -A4IBAQCpTMBMZGXF74WCxrIk23MUsu0OKzMs8B16Wy8BHz+7hInLZwbkx71Z0TP5 -rsMITetSANtM/k4jH7Vmr1xmzU7oSz5zKU1+7rIjKjGtih48WZdJay0uqfKe0K2s -vsRS0LVLY6IiTFWK9YrLC0QFSK7z5GDl1oc/D5yIZAkbsL6PRQJ5RQsYf5BhHfyB -PRV/KcF7c9iKVYW2vILJzbyYLHTDADTHbtfCe5+pAGxagswDjSMVkQu5iJNjbtUO -5iv7PRkgzUFru9Kk6q+LrXbzyPPCwlc3Xbh1q5jSkJLkcV3K26E7+uX5HI+Hxpeh -a8kOsdnw+N8wX6bc7eXIaGBDMine ------END CERTIFICATE----- +MIIFDzCCAvegAwIBAgIJAOb7lZzENM1TMA0GCSqGSIb3DQEBCwUAMB0xCzAJBgNV +BAYTAkZSMQ4wDAYDVQQKDAVDQVRUSzAgFw0yMjEwMDcwODIxNTVaGA8yMDcyMDky +NDA4MjE1NVowHTELMAkGA1UEBhMCRlIxDjAMBgNVBAoMBUNBVFRLMIICIjANBgkq +hkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAsZ+wd41TfLs5Vh5Wz1ESqIxwzu3iHWjJ +KbOlpnPdI6uPo9DU5xdmhcH0jc/RVis+EVn1ylFyzN3l4uIACah1Dk3frFXN/LWc +EzN7DyyHO56HZ5IpOFazVMQn5xrRwsglRop6et+Azqm+3xDpBSoKg8YhBAUsezuT +N0XlpsN3BMLjVXfwrTV1ECKP0Emg3qP3EaKRm1EdQ0uVNRNe24q5EDWiLnqlD14a +X5w1hHAj0Rr9kmKo+fs9WL7vIzbgy6xccfkKE8Wk7IR/xabTNjC5x+/7Pscqthic +tGYQ+Rm4Z1XTYDKBgoFHdI2ouscmiceqxESu3hW/IBe3iLin84kGywRGrzjLcOFI +adAj+0y3lGGV7Vw2RI3bUA6oOM8V1zbFUsZLq6+ylmvw0HQLAUeBODo6Iwu8ACxT +8/A+LmBUZFk7copLfvqFUmt8vjP7XiDuYsGvVJrTc6MJWWOITqyirhAkcP/vPoNK +l8PXhLGo66xG+hC57gCm3d3IwkXNLW6UhCHIuUa6LTTTaTehy2unDEm7Rt4ghWlw +2JuDr7QcZrWrRj1OwVAiPNkjLCF30aKxnVQxc2JY9W3H+xRC0YlDNmOpdHHvuJfS +1y1tNUq+fZQGybubDsa0l0LHfoKRGfeFXnxT6tyvNnGEaJG9mkLPXPkEBuadrnvA +oZeymb/D440CAwEAAaNQME4wHQYDVR0OBBYEFHKNGWOtO3haPEkZSVfgnxbEbTs3 +MB8GA1UdIwQYMBaAFHKNGWOtO3haPEkZSVfgnxbEbTs3MAwGA1UdEwQFMAMBAf8w +DQYJKoZIhvcNAQELBQADggIBAGzuawd3tYwDCnO84eZ+NT0uqQk37ss1fdwgDY7G +dAaeSb0oW5+tejdiLzWsKgHL7g6ClEOS3f2Vvo65fsH7BA5Ppmhk+4U6ppqbBiGG +v5WqnRz7OpdMTqunFsmYhbgbu+OZX3TTg6hkIYxFHlHDMpcMwMYtlWsPRZSlTM09 +BbaWyhqTZjbUIxemwc3JW+5zRYoA2ii/Om/2/9iUbngVqEilmUrflMcfn81ddate +0XwMcm/qhyKU+CIAPXmmtLkTms66FSSXMfqy1HizzSsCFntozUA7mtPRm53IsGpR +TOdGTe5Y5jJ/dlXwmZ5dmWBR8qlyxLpG0iB7KWNxs+V7B6kCFU3BhiLPiS/BnDap +EE1JDKu1jktJhxeAhmSsrvZ10bCKZW+dQbSjqr3wScYok/f05daB97LaAs869jra +93uJ7dYA9gfUtkaqZW9oqPrIO3FNZLL5D1z6eWcGC2+3MLhrtNTov3fthFGJyWf7 +iCBdQYofeR4EA4nfI+QcM2HAHNtChGESZ/8p/eBSU4GQW7zURELIKJ5OeTJZGAgs +bMbNbqbiyzCSuM2CHTN+Nw0rMc9AXkqSV57scCu/2ui1z1GKWeI65hKhwc++IXP7 +lJWv710T4+9DOgoi5sFNNLbRcVmkUeodFje83PTs+U/hgvQHW1+RTJ4ESTPMqVf1 +VTyk +-----END CERTIFICATE----- \ No newline at end of file diff --git a/net_orc/python/src/network_orchestrator.py b/net_orc/python/src/network_orchestrator.py index 39fd3339c..53a94b795 100644 --- a/net_orc/python/src/network_orchestrator.py +++ b/net_orc/python/src/network_orchestrator.py @@ -535,7 +535,7 @@ def start_network_services(self): LOGGER.info('All network services are running') self._check_network_services() - def _attach_test_module_to_network(self, test_module): + def attach_test_module_to_network(self, test_module): LOGGER.debug('Attaching test module ' + test_module.display_name + ' to device bridge') diff --git a/test_orc/modules/base/python/src/grpc/start_server.py b/test_orc/modules/base/python/src/grpc/start_server.py index 9ed31ffcf..970da67fc 100644 --- a/test_orc/modules/base/python/src/grpc/start_server.py +++ b/test_orc/modules/base/python/src/grpc/start_server.py @@ -3,32 +3,36 @@ import proto.grpc_pb2_grpc as pb2_grpc import proto.grpc_pb2 as pb2 from network_service import NetworkService -import logging import sys import argparse -DEFAULT_PORT = '5001' +DEFAULT_PORT = "5001" + def serve(PORT): - server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) - pb2_grpc.add_NetworkModuleServicer_to_server(NetworkService(), server) - server.add_insecure_port('[::]:' + PORT) - server.start() - server.wait_for_termination() + server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) + pb2_grpc.add_NetworkModuleServicer_to_server(NetworkService(), server) + server.add_insecure_port("[::]:" + PORT) + server.start() + server.wait_for_termination() + def run(argv): - parser = argparse.ArgumentParser(description="GRPC Server for Network Module", - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument("-p", "--port", default=DEFAULT_PORT, - help="Define the default port to run the server on.") + parser = argparse.ArgumentParser( + description="GRPC Server for Network Module", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument("-p", + "--port", + default=DEFAULT_PORT, + help="Define the default port to run the server on.") - args = parser.parse_args() + args = parser.parse_args() - PORT = args.port + PORT = args.port - print("gRPC server starting on port " + PORT) - serve(PORT) + print("gRPC server starting on port " + PORT) + serve(PORT) if __name__ == "__main__": - run(sys.argv) \ No newline at end of file + run(sys.argv) diff --git a/test_orc/modules/base/python/src/logger.py b/test_orc/modules/base/python/src/logger.py index 641aa16b4..42124beea 100644 --- a/test_orc/modules/base/python/src/logger.py +++ b/test_orc/modules/base/python/src/logger.py @@ -1,46 +1,48 @@ -#!/usr/bin/env python3 - +"""Sets up the logger to be used for the test modules.""" import json import logging import os LOGGERS = {} -_LOG_FORMAT = "%(asctime)s %(name)-8s %(levelname)-7s %(message)s" +_LOG_FORMAT = '%(asctime)s %(name)-8s %(levelname)-7s %(message)s' _DATE_FORMAT = '%b %02d %H:%M:%S' _DEFAULT_LEVEL = logging.INFO -_CONF_DIR = "conf" -_CONF_FILE_NAME = "system.json" -_LOG_DIR = "/runtime/output/" +_CONF_DIR = 'conf' +_CONF_FILE_NAME = 'system.json' +_LOG_DIR = '/runtime/output/' # Set log level try: - system_conf_json = json.load( - open(os.path.join(_CONF_DIR, _CONF_FILE_NAME))) - log_level_str = system_conf_json['log_level'] - log_level = logging.getLevelName(log_level_str) -except: - # TODO: Print out warning that log level is incorrect or missing - log_level = _DEFAULT_LEVEL + with open(os.path.join(_CONF_DIR, _CONF_FILE_NAME), + encoding='UTF-8') as config_json_file: + system_conf_json = json.load(config_json_file) + + log_level_str = system_conf_json['log_level'] + log_level = logging.getLevelName(log_level_str) +except OSError: + # TODO: Print out warning that log level is incorrect or missing + log_level = _DEFAULT_LEVEL log_format = logging.Formatter(fmt=_LOG_FORMAT, datefmt=_DATE_FORMAT) -def add_file_handler(log, logFile): - handler = logging.FileHandler(_LOG_DIR+logFile+".log") - handler.setFormatter(log_format) - log.addHandler(handler) + +def add_file_handler(log, log_file): + handler = logging.FileHandler(_LOG_DIR + log_file + '.log') + handler.setFormatter(log_format) + log.addHandler(handler) def add_stream_handler(log): - handler = logging.StreamHandler() - handler.setFormatter(log_format) - log.addHandler(handler) - - -def get_logger(name, logFile=None): - if name not in LOGGERS: - LOGGERS[name] = logging.getLogger(name) - LOGGERS[name].setLevel(log_level) - add_stream_handler(LOGGERS[name]) - if logFile is not None: - add_file_handler(LOGGERS[name], logFile) - return LOGGERS[name] + handler = logging.StreamHandler() + handler.setFormatter(log_format) + log.addHandler(handler) + + +def get_logger(name, log_file=None): + if name not in LOGGERS: + LOGGERS[name] = logging.getLogger(name) + LOGGERS[name].setLevel(log_level) + add_stream_handler(LOGGERS[name]) + if log_file is not None: + add_file_handler(LOGGERS[name], log_file) + return LOGGERS[name] diff --git a/test_orc/modules/base/python/src/test_module.py b/test_orc/modules/base/python/src/test_module.py index 22b9e0773..34af4cbb4 100644 --- a/test_orc/modules/base/python/src/test_module.py +++ b/test_orc/modules/base/python/src/test_module.py @@ -5,109 +5,108 @@ from datetime import datetime LOGGER = None -RESULTS_DIR = "/runtime/output/" -CONF_FILE = "/testrun/conf/module_config.json" +RESULTS_DIR = '/runtime/output/' +CONF_FILE = '/testrun/conf/module_config.json' class TestModule: + """An example test module.""" - def __init__(self, module_name, log_name): - self._module_name = module_name - self._device_mac = os.environ['DEVICE_MAC'] - self._ipv4_subnet = os.environ['IPV4_SUBNET'] - self._ipv6_subnet = os.environ['IPV6_SUBNET'] - self._add_logger(log_name=log_name, module_name=module_name) - self._config = self._read_config() - self._device_ipv4_addr = None - self._device_ipv6_addr = None + def __init__(self, module_name, log_name): + self._module_name = module_name + self._device_mac = os.environ['DEVICE_MAC'] + self._ipv4_subnet = os.environ['IPV4_SUBNET'] + self._ipv6_subnet = os.environ['IPV6_SUBNET'] + self._add_logger(log_name=log_name, module_name=module_name) + self._config = self._read_config() + self._device_ipv4_addr = None + self._device_ipv6_addr = None - def _add_logger(self, log_name, module_name): - global LOGGER - LOGGER = logger.get_logger(log_name, module_name) + def _add_logger(self, log_name, module_name): + global LOGGER + LOGGER = logger.get_logger(log_name, module_name) - def _get_logger(self): - return LOGGER + def _get_logger(self): + return LOGGER - def _get_tests(self): - device_test_module = self._get_device_test_module() - return self._get_device_tests(device_test_module) + def _get_tests(self): + device_test_module = self._get_device_test_module() + return self._get_device_tests(device_test_module) - def _get_device_tests(self, device_test_module): - module_tests = self._config["config"]["tests"] - if device_test_module is None: - return module_tests - elif not device_test_module["enabled"]: - return [] - else: - for test in module_tests: - # Resolve device specific configurations for the test if it exists - # and update module test config with device config options - if test["name"] in device_test_module["tests"]: - dev_test_config = device_test_module["tests"][test["name"]] - if "config" in test: - test["config"].update(dev_test_config) - return module_tests + def _get_device_tests(self, device_test_module): + module_tests = self._config['config']['tests'] + if device_test_module is None: + return module_tests + elif not device_test_module['enabled']: + return [] + else: + for test in module_tests: + # Resolve device specific configurations for the test if it exists + # and update module test config with device config options + if test['name'] in device_test_module['tests']: + dev_test_config = device_test_module['tests'][test['name']] + if 'config' in test: + test['config'].update(dev_test_config) + return module_tests - def _get_device_test_module(self): - # TODO: Make DEVICE_TEST_MODULES a static string - if 'DEVICE_TEST_MODULES' in os.environ: - test_modules = json.loads(os.environ['DEVICE_TEST_MODULES']) - if self._module_name in test_modules: - return test_modules[self._module_name] - return None + def _get_device_test_module(self): + # TODO: Make DEVICE_TEST_MODULES a static string + if 'DEVICE_TEST_MODULES' in os.environ: + test_modules = json.loads(os.environ['DEVICE_TEST_MODULES']) + if self._module_name in test_modules: + return test_modules[self._module_name] + return None - def run_tests(self): - if self._config["config"]["network"]: - self._device_ipv4_addr = self._get_device_ipv4() - LOGGER.info("Device IP Resolved: " + str(self._device_ipv4_addr)) - tests = self._get_tests() - for test in tests: - test_method_name = "_" + test["name"].replace(".", "_") - result = None - if ("enabled" in test and test["enabled"]) or "enabled" not in test: - LOGGER.info("Attempting to run test: " + test["name"]) - test['start'] = datetime.now().isoformat() - # Resolve the correct python method by test name and run test - if hasattr(self, test_method_name): - if "config" in test: - result = getattr(self, test_method_name)( - config=test["config"]) - else: - result = getattr(self, test_method_name)() - else: - LOGGER.info("Test " + test["name"] + - " not resolved. Skipping") - result = None - else: - LOGGER.info("Test " + test["name"] + - " disabled. Skipping") - if result is not None: - test["result"] = "compliant" if result else "non-compliant" - else: - test["result"] = "skipped" - test['end'] = datetime.now().isoformat() - duration = datetime.fromisoformat(test['end']) - datetime.fromisoformat(test['start']) - test['duration'] = str(duration) - json_results = json.dumps({"results": tests}, indent=2) - self._write_results(json_results) + def run_tests(self): + if self._config['config']['network']: + self._device_ipv4_addr = self._get_device_ipv4() + LOGGER.info('Device IP Resolved: ' + str(self._device_ipv4_addr)) + tests = self._get_tests() + for test in tests: + test_method_name = '_' + test['name'].replace('.', '_') + result = None + if ('enabled' in test and test['enabled']) or 'enabled' not in test: + LOGGER.info('Attempting to run test: ' + test['name']) + test['start'] = datetime.now().isoformat() + # Resolve the correct python method by test name and run test + if hasattr(self, test_method_name): + if 'config' in test: + result = getattr(self, test_method_name)(config=test['config']) + else: + result = getattr(self, test_method_name)() + else: + LOGGER.info('Test ' + test['name'] + ' not resolved. Skipping') + result = None + else: + LOGGER.info('Test ' + test['name'] + ' disabled. Skipping') + if result is not None: + test['result'] = 'compliant' if result else 'non-compliant' + else: + test['result'] = 'skipped' + test['end'] = datetime.now().isoformat() + duration = datetime.fromisoformat(test['end']) - datetime.fromisoformat( + test['start']) + test['duration'] = str(duration) + json_results = json.dumps({'results': tests}, indent=2) + self._write_results(json_results) - def _read_config(self): - f = open(CONF_FILE, encoding="utf-8") - config = json.load(f) - f.close() - return config + def _read_config(self): + f = open(CONF_FILE, encoding='utf-8') + config = json.load(f) + f.close() + return config - def _write_results(self, results): - results_file = RESULTS_DIR + self._module_name + "-result.json" - LOGGER.info("Writing results to " + results_file) - f = open(results_file, "w", encoding="utf-8") - f.write(results) - f.close() + def _write_results(self, results): + results_file = RESULTS_DIR + self._module_name + '-result.json' + LOGGER.info('Writing results to ' + results_file) + f = open(results_file, 'w', encoding='utf-8') + f.write(results) + f.close() - def _get_device_ipv4(self): - command = '/testrun/bin/get_ipv4_addr {} {}'.format( - self._ipv4_subnet, self._device_mac.upper()) - text, err = util.run_command(command) - if text: - return text.split("\n")[0] - return None + def _get_device_ipv4(self): + command = f"""/testrun/bin/get_ipv4_addr {self._ipv4_subnet} + {self._device_mac.upper()}""" + text = util.run_command(command)[0] + if text: + return text.split('\n')[0] + return None diff --git a/test_orc/modules/base/python/src/util.py b/test_orc/modules/base/python/src/util.py index a2dcfbdb1..557f450a6 100644 --- a/test_orc/modules/base/python/src/util.py +++ b/test_orc/modules/base/python/src/util.py @@ -2,6 +2,7 @@ import shlex import logger + # Runs a process at the os level # By default, returns the standard output and error output # If the caller sets optional output parameter to False, @@ -9,17 +10,19 @@ # succesful in running the command. Failure is indicated # by any return code from the process other than zero. def run_command(cmd, output=True): - success = False - LOGGER = logger.get_logger('util') - process = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE) - stdout, stderr = process.communicate() - if process.returncode !=0 and output: - err_msg = "%s. Code: %s" % (stderr.strip(), process.returncode) - LOGGER.error("Command Failed: " + cmd) - LOGGER.error("Error: " + err_msg) - else: - success = True - if output: - return stdout.strip().decode('utf-8'), stderr - else: - return success + success = False + LOGGER = logger.get_logger('util') + process = subprocess.Popen(shlex.split(cmd), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + stdout, stderr = process.communicate() + if process.returncode != 0 and output: + err_msg = "%s. Code: %s" % (stderr.strip(), process.returncode) + LOGGER.error("Command Failed: " + cmd) + LOGGER.error("Error: " + err_msg) + else: + success = True + if output: + return stdout.strip().decode("utf-8"), stderr + else: + return success diff --git a/test_orc/modules/baseline/python/src/baseline_module.py b/test_orc/modules/baseline/python/src/baseline_module.py index 80c04ef48..9816bd28a 100644 --- a/test_orc/modules/baseline/python/src/baseline_module.py +++ b/test_orc/modules/baseline/python/src/baseline_module.py @@ -5,27 +5,26 @@ LOG_NAME = "test_baseline" LOGGER = None + class BaselineModule(TestModule): + """An example testing module.""" + + def __init__(self, module): + super().__init__(module_name=module, log_name=LOG_NAME) + global LOGGER + LOGGER = self._get_logger() + + def _baseline_pass(self): + LOGGER.info("Running baseline pass test") + LOGGER.info("Baseline pass test finished") + return True + + def _baseline_fail(self): + LOGGER.info("Running baseline pass test") + LOGGER.info("Baseline pass test finished") + return False - def __init__(self, module): - super().__init__(module_name=module, log_name=LOG_NAME) - global LOGGER - LOGGER = self._get_logger() - - def _baseline_pass(self): - LOGGER.info( - "Running baseline pass test") - LOGGER.info("Baseline pass test finished") - return True - - def _baseline_fail(self): - LOGGER.info( - "Running baseline pass test") - LOGGER.info("Baseline pass test finished") - return False - - def _baseline_skip(self): - LOGGER.info( - "Running baseline pass test") - LOGGER.info("Baseline pass test finished") - return None \ No newline at end of file + def _baseline_skip(self): + LOGGER.info("Running baseline pass test") + LOGGER.info("Baseline pass test finished") + return None diff --git a/test_orc/modules/baseline/python/src/run.py b/test_orc/modules/baseline/python/src/run.py index 8b55484ae..89b3a08e4 100644 --- a/test_orc/modules/baseline/python/src/run.py +++ b/test_orc/modules/baseline/python/src/run.py @@ -10,40 +10,47 @@ LOGGER = logger.get_logger('test_module') RUNTIME = 1500 + class BaselineModuleRunner: + """An example runner class for test modules.""" + + def __init__(self, module): - def __init__(self,module): + signal.signal(signal.SIGINT, self._handler) + signal.signal(signal.SIGTERM, self._handler) + signal.signal(signal.SIGABRT, self._handler) + signal.signal(signal.SIGQUIT, self._handler) - signal.signal(signal.SIGINT, self._handler) - signal.signal(signal.SIGTERM, self._handler) - signal.signal(signal.SIGABRT, self._handler) - signal.signal(signal.SIGQUIT, self._handler) + LOGGER.info("Starting Baseline Module") - LOGGER.info("Starting Baseline Module") + self._test_module = BaselineModule(module) + self._test_module.run_tests() - self._test_module = BaselineModule(module) - self._test_module.run_tests() + def _handler(self, signum, *other): + LOGGER.debug("SigtermEnum: " + str(signal.SIGTERM)) + LOGGER.debug("Exit signal received: " + str(signum)) + if signum in (2, signal.SIGTERM): + LOGGER.info("Exit signal received. Stopping test module...") + LOGGER.info("Test module stopped") + sys.exit(1) - def _handler(self, signum, *other): - LOGGER.debug("SigtermEnum: " + str(signal.SIGTERM)) - LOGGER.debug("Exit signal received: " + str(signum)) - if signum in (2, signal.SIGTERM): - LOGGER.info("Exit signal received. Stopping test module...") - LOGGER.info("Test module stopped") - sys.exit(1) def run(argv): - parser = argparse.ArgumentParser(description="Baseline Module Help", - formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser = argparse.ArgumentParser( + description="Baseline Module Help", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + parser.add_argument( + "-m", + "--module", + help="Define the module name to be used to create the log file") - parser.add_argument( - "-m", "--module", help="Define the module name to be used to create the log file") + args = parser.parse_args() - args = parser.parse_args() + # For some reason passing in the args from bash adds an extra + # space before the argument so we'll just strip out extra space + BaselineModuleRunner(args.module.strip()) - # For some reason passing in the args from bash adds an extra - # space before the argument so we'll just strip out extra space - BaselineModuleRunner(args.module.strip()) if __name__ == "__main__": - run(sys.argv) + run(sys.argv) diff --git a/test_orc/modules/dns/python/src/dns_module.py b/test_orc/modules/dns/python/src/dns_module.py index f1333ce14..b161805a5 100644 --- a/test_orc/modules/dns/python/src/dns_module.py +++ b/test_orc/modules/dns/python/src/dns_module.py @@ -7,71 +7,70 @@ CAPTURE_FILE = "/runtime/network/dns.pcap" LOGGER = None + class DNSModule(TestModule): - def __init__(self, module): - super().__init__(module_name=module, log_name=LOG_NAME) - self._dns_server = "10.10.10.4" - global LOGGER - LOGGER = self._get_logger() + def __init__(self, module): + super().__init__(module_name=module, log_name=LOG_NAME) + self._dns_server = "10.10.10.4" + global LOGGER + LOGGER = self._get_logger() - def _check_dns_traffic(self, tcpdump_filter): - to_dns = self._exec_tcpdump(tcpdump_filter) - num_query_dns = len(to_dns) - LOGGER.info("DNS queries found: " + str(num_query_dns)) - dns_traffic_detected = len(to_dns) > 0 - LOGGER.info("DNS traffic detected: " + str(dns_traffic_detected)) - return dns_traffic_detected + def _check_dns_traffic(self, tcpdump_filter): + to_dns = self._exec_tcpdump(tcpdump_filter) + num_query_dns = len(to_dns) + LOGGER.info("DNS queries found: " + str(num_query_dns)) + dns_traffic_detected = len(to_dns) > 0 + LOGGER.info("DNS traffic detected: " + str(dns_traffic_detected)) + return dns_traffic_detected - def _dns_network_from_dhcp(self): - LOGGER.info( - "Checking DNS traffic for configured DHCP DNS server: " + self._dns_server) + def _dns_network_from_dhcp(self): + LOGGER.info("Checking DNS traffic for configured DHCP DNS server: " + + self._dns_server) - # Check if the device DNS traffic is to appropriate server - tcpdump_filter = 'dst port 53 and dst host {} and ether src {}'.format( - self._dns_server, self._device_mac) + # Check if the device DNS traffic is to appropriate server + tcpdump_filter = "dst port 53 and dst host {} and ether src {}".format( + self._dns_server, self._device_mac) - result = self._check_dns_traffic(tcpdump_filter=tcpdump_filter) + result = self._check_dns_traffic(tcpdump_filter=tcpdump_filter) - LOGGER.info( - "DNS traffic detected to configured DHCP DNS server: " + str(result)) - return result + LOGGER.info("DNS traffic detected to configured DHCP DNS server: " + + str(result)) + return result - def _dns_network_from_device(self): - LOGGER.info("Checking DNS traffic from device: " + self._device_mac) + def _dns_network_from_device(self): + LOGGER.info("Checking DNS traffic from device: " + self._device_mac) - # Check if the device DNS traffic is to appropriate server - tcpdump_filter = 'dst port 53 and ether src {}'.format( - self._device_mac) + # Check if the device DNS traffic is to appropriate server + tcpdump_filter = "dst port 53 and ether src {}".format(self._device_mac) - result = self._check_dns_traffic(tcpdump_filter=tcpdump_filter) + result = self._check_dns_traffic(tcpdump_filter=tcpdump_filter) - LOGGER.info("DNS traffic detected from device: " + str(result)) - return result + LOGGER.info("DNS traffic detected from device: " + str(result)) + return result - def _exec_tcpdump(self, tcpdump_filter): - """ - Args - tcpdump_filter: Filter to pass onto tcpdump file - capture_file: Optional capture file to look - Returns - List of packets matching the filter - """ - command = 'tcpdump -tttt -n -r {} {}'.format( - CAPTURE_FILE, tcpdump_filter) + def _exec_tcpdump(self, tcpdump_filter): + """ + Args + tcpdump_filter: Filter to pass onto tcpdump file + capture_file: Optional capture file to look + Returns + List of packets matching the filter + """ + command = "tcpdump -tttt -n -r {} {}".format(CAPTURE_FILE, tcpdump_filter) - LOGGER.debug("tcpdump command: " + command) + LOGGER.debug("tcpdump command: " + command) - process = subprocess.Popen(command, - universal_newlines=True, - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - text = str(process.stdout.read()).rstrip() + process = subprocess.Popen(command, + universal_newlines=True, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + text = str(process.stdout.read()).rstrip() - LOGGER.debug("tcpdump response: " + text) + LOGGER.debug("tcpdump response: " + text) - if text: - return text.split("\n") + if text: + return text.split("\n") - return [] + return [] diff --git a/test_orc/modules/dns/python/src/run.py b/test_orc/modules/dns/python/src/run.py index e5fedb67b..06b8aa571 100644 --- a/test_orc/modules/dns/python/src/run.py +++ b/test_orc/modules/dns/python/src/run.py @@ -4,7 +4,6 @@ import signal import sys import logger -import time from dns_module import DNSModule @@ -12,47 +11,53 @@ LOGGER = logger.get_logger(LOG_NAME) RUNTIME = 1500 + class DNSModuleRunner: - def __init__(self,module): + def __init__(self, module): + + signal.signal(signal.SIGINT, self._handler) + signal.signal(signal.SIGTERM, self._handler) + signal.signal(signal.SIGABRT, self._handler) + signal.signal(signal.SIGQUIT, self._handler) + self.add_logger(module) - signal.signal(signal.SIGINT, self._handler) - signal.signal(signal.SIGTERM, self._handler) - signal.signal(signal.SIGABRT, self._handler) - signal.signal(signal.SIGQUIT, self._handler) - self.add_logger(module) + LOGGER.info("Starting DNS Test Module") - LOGGER.info("Starting DNS Test Module") + self._test_module = DNSModule(module) + self._test_module.run_tests() - self._test_module = DNSModule(module) - self._test_module.run_tests() + LOGGER.info("DNS Test Module Finished") - LOGGER.info("DNS Test Module Finished") + def add_logger(self, module): + global LOGGER + LOGGER = logger.get_logger(LOG_NAME, module) - def add_logger(self, module): - global LOGGER - LOGGER = logger.get_logger(LOG_NAME, module) + def _handler(self, signum, *other): + LOGGER.debug("SigtermEnum: " + str(signal.SIGTERM)) + LOGGER.debug("Exit signal received: " + str(signum)) + if signum in (2, signal.SIGTERM): + LOGGER.info("Exit signal received. Stopping test module...") + LOGGER.info("Test module stopped") + sys.exit(1) - def _handler(self, signum, *other): - LOGGER.debug("SigtermEnum: " + str(signal.SIGTERM)) - LOGGER.debug("Exit signal received: " + str(signum)) - if signum in (2, signal.SIGTERM): - LOGGER.info("Exit signal received. Stopping test module...") - LOGGER.info("Test module stopped") - sys.exit(1) def run(argv): - parser = argparse.ArgumentParser(description="Test Module DNS", - formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser = argparse.ArgumentParser( + description="Test Module DNS", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + parser.add_argument( + "-m", + "--module", + help="Define the module name to be used to create the log file") - parser.add_argument( - "-m", "--module", help="Define the module name to be used to create the log file") + args = parser.parse_args() - args = parser.parse_args() + # For some reason passing in the args from bash adds an extra + # space before the argument so we'll just strip out extra space + DNSModuleRunner(args.module.strip()) - # For some reason passing in the args from bash adds an extra - # space before the argument so we'll just strip out extra space - DNSModuleRunner(args.module.strip()) if __name__ == "__main__": - run(sys.argv) + run(sys.argv) diff --git a/test_orc/modules/nmap/python/src/nmap_module.py b/test_orc/modules/nmap/python/src/nmap_module.py index 7d5bd3604..cd6ec276b 100644 --- a/test_orc/modules/nmap/python/src/nmap_module.py +++ b/test_orc/modules/nmap/python/src/nmap_module.py @@ -12,216 +12,218 @@ class NmapModule(TestModule): - def __init__(self, module): - super().__init__(module_name=module, log_name=LOG_NAME) - self._unallowed_ports = [] - self._scan_tcp_results = None - self._udp_tcp_results = None - self._script_scan_results = None - global LOGGER - LOGGER = self._get_logger() - - def _security_nmap_ports(self, config): - LOGGER.info( - "Running security.nmap.ports test") - - # Delete the enabled key from the config if it exists - # to prevent it being treated as a test key - if "enabled" in config: - del config["enabled"] - - if self._device_ipv4_addr is not None: - # Run the monitor method asynchronously to keep this method non-blocking - self._tcp_scan_thread = threading.Thread( - target=self._scan_tcp_ports, args=(config,)) - self._udp_scan_thread = threading.Thread( - target=self._scan_udp_ports, args=(config,)) - self._script_scan_thread = threading.Thread( - target=self._scan_scripts, args=(config,)) - - self._tcp_scan_thread.daemon = True - self._udp_scan_thread.daemon = True - self._script_scan_thread.daemon = True - - self._tcp_scan_thread.start() - self._udp_scan_thread.start() - self._script_scan_thread.start() - - while self._tcp_scan_thread.is_alive() or self._udp_scan_thread.is_alive() or self._script_scan_thread.is_alive(): - time.sleep(1) - - LOGGER.debug("TCP scan results: " + str(self._scan_tcp_results)) - LOGGER.debug("UDP scan results: " + str(self._scan_udp_results)) - LOGGER.debug("Service scan results: " + - str(self._script_scan_results)) - self._process_port_results( - tests=config) - LOGGER.info("Unallowed Ports: " + str(self._unallowed_ports)) - LOGGER.info("Script scan results:\n" + - json.dumps(self._script_scan_results)) - return len(self._unallowed_ports) == 0 + def __init__(self, module): + super().__init__(module_name=module, log_name=LOG_NAME) + self._unallowed_ports = [] + self._scan_tcp_results = None + self._udp_tcp_results = None + self._script_scan_results = None + global LOGGER + LOGGER = self._get_logger() + + def _security_nmap_ports(self, config): + LOGGER.info("Running security.nmap.ports test") + + # Delete the enabled key from the config if it exists + # to prevent it being treated as a test key + if "enabled" in config: + del config["enabled"] + + if self._device_ipv4_addr is not None: + # Run the monitor method asynchronously to keep this method non-blocking + self._tcp_scan_thread = threading.Thread(target=self._scan_tcp_ports, + args=(config, )) + self._udp_scan_thread = threading.Thread(target=self._scan_udp_ports, + args=(config, )) + self._script_scan_thread = threading.Thread(target=self._scan_scripts, + args=(config, )) + + self._tcp_scan_thread.daemon = True + self._udp_scan_thread.daemon = True + self._script_scan_thread.daemon = True + + self._tcp_scan_thread.start() + self._udp_scan_thread.start() + self._script_scan_thread.start() + + while self._tcp_scan_thread.is_alive() or self._udp_scan_thread.is_alive( + ) or self._script_scan_thread.is_alive(): + time.sleep(1) + + LOGGER.debug("TCP scan results: " + str(self._scan_tcp_results)) + LOGGER.debug("UDP scan results: " + str(self._scan_udp_results)) + LOGGER.debug("Service scan results: " + str(self._script_scan_results)) + self._process_port_results(tests=config) + LOGGER.info("Unallowed Ports: " + str(self._unallowed_ports)) + LOGGER.info("Script scan results:\n" + + json.dumps(self._script_scan_results)) + return len(self._unallowed_ports) == 0 + else: + LOGGER.info("Device ip address not resolved, skipping") + return None + + def _process_port_results(self, tests): + for test in tests: + LOGGER.info("Checking results for test: " + str(test)) + self._check_scan_results(test_config=tests[test]) + + def _check_scan_results(self, test_config): + port_config = {} + if "tcp_ports" in test_config: + port_config.update(test_config["tcp_ports"]) + elif "udp_ports" in test_config: + port_config.update(test_config["udp_ports"]) + + scan_results = {} + if self._scan_tcp_results is not None: + scan_results.update(self._scan_tcp_results) + if self._scan_udp_results is not None: + scan_results.update(self._scan_udp_results) + if self._script_scan_results is not None: + scan_results.update(self._script_scan_results) + if port_config is not None: + for port in port_config: + result = None + LOGGER.info("Checking port: " + str(port)) + LOGGER.debug("Port config: " + str(port_config[port])) + if port in scan_results: + if scan_results[port]["state"] == "open": + if not port_config[port]["allowed"]: + LOGGER.info("Unallowed port open") + self._unallowed_ports.append(str(port)) + result = False + else: + LOGGER.info("Allowed port open") + result = True + else: + LOGGER.info("Port is closed") + result = True else: - LOGGER.info("Device ip address not resolved, skipping") - return None - - def _process_port_results(self, tests): - for test in tests: - LOGGER.info("Checking results for test: " + str(test)) - self._check_scan_results(test_config=tests[test]) - - def _check_scan_results(self, test_config): - port_config = {} - if "tcp_ports" in test_config: - port_config.update(test_config["tcp_ports"]) - elif "udp_ports" in test_config: - port_config.update(test_config["udp_ports"]) - - scan_results = {} - if self._scan_tcp_results is not None: - scan_results.update(self._scan_tcp_results) - if self._scan_udp_results is not None: - scan_results.update(self._scan_udp_results) - if self._script_scan_results is not None: - scan_results.update(self._script_scan_results) - if port_config is not None: - for port in port_config: - result = None - LOGGER.info("Checking port: " + str(port)) - LOGGER.debug("Port config: " + str(port_config[port])) - if port in scan_results: - if scan_results[port]["state"] == "open": - if not port_config[port]["allowed"]: - LOGGER.info("Unallowed port open") - self._unallowed_ports.append(str(port)) - result = False - else: - LOGGER.info("Allowed port open") - result = True - else: - LOGGER.info("Port is closed") - result = True - else: - LOGGER.info("Port not detected, closed") - result = True - - if result is not None: - port_config[port]["result"] = "compliant" if result else "non-compliant" - else: - port_config[port]["result"] = "skipped" - - def _scan_scripts(self, tests): - scan_results = {} - LOGGER.info("Checing for scan scripts") - for test in tests: - test_config = tests[test] - if "tcp_ports" in test_config: - for port in test_config["tcp_ports"]: - port_config = test_config["tcp_ports"][port] - if "service_scan" in port_config: - LOGGER.info("Service Scan Detected for: " + str(port)) - svc = port_config["service_scan"] - scan_results.update( - self._scan_tcp_with_script(svc["script"])) - if "udp_ports" in test_config: - for port in test_config["udp_ports"]: - if "service_scan" in port: - LOGGER.info("Service Scan Detected for: " + str(port)) - svc = port["service_scan"] - self._scan_udp_with_script(svc["script"], port) - scan_results.update( - self._scan_tcp_with_script(svc["script"])) - self._script_scan_results = scan_results - - def _scan_tcp_with_script(self, script_name, ports=None): - LOGGER.info("Running TCP nmap scan with script " + script_name) - scan_options = " -v -n T3 --host-timeout=6m -A --script " + script_name - port_options = " --open " - if ports is None: - port_options += " -p- " - else: - port_options += " -p" + ports + " " - results_file = "/runtime/output/" + self._module_name + "-"+script_name+".log" - nmap_options = scan_options + port_options + " -oG " + results_file - nmap_results, err = util.run_command( - "nmap " + nmap_options + " " + self._device_ipv4_addr) - LOGGER.info("Nmap TCP script scan complete") - LOGGER.info("nmap script results\n" + str(nmap_results)) - return self._process_nmap_results(nmap_results=nmap_results) - - def _scan_udp_with_script(self, script_name, ports=None): - LOGGER.info("Running UDP nmap scan with script " + script_name) - scan_options = " --sU -Pn -n --script " + script_name - port_options = " --open " - if ports is None: - port_options += " -p- " + LOGGER.info("Port not detected, closed") + result = True + + if result is not None: + port_config[port][ + "result"] = "compliant" if result else "non-compliant" else: - port_options += " -p" + ports + " " - nmap_options = scan_options + port_options - nmap_results, err = util.run_command( - "nmap " + nmap_options + self._device_ipv4_addr) - LOGGER.info("Nmap UDP script scan complete") - LOGGER.info("nmap script results\n" + str(nmap_results)) - return self._process_nmap_results(nmap_results=nmap_results) - - def _scan_tcp_ports(self, tests): - max_port = 1000 - ports = [] - for test in tests: - test_config = tests[test] - if "tcp_ports" in test_config: - for port in test_config["tcp_ports"]: - if int(port) > max_port: - ports.append(port) - ports_to_scan = "1-" + str(max_port) - if len(ports) > 0: - ports_to_scan += "," + ','.join(ports) - LOGGER.info("Running nmap TCP port scan") - LOGGER.info("TCP ports: " + str(ports_to_scan)) - nmap_results, err = util.run_command( - "nmap -sT -sV -Pn -v -p " + ports_to_scan + " --version-intensity 7 -T4 " + self._device_ipv4_addr) - LOGGER.info("TCP port scan complete") - self._scan_tcp_results = self._process_nmap_results( - nmap_results=nmap_results) - - def _scan_udp_ports(self, tests): - ports = [] - for test in tests: - test_config = tests[test] - if "udp_ports" in test_config: - for port in test_config["udp_ports"]: - ports.append(port) - if len(ports) > 0: - port_list = ','.join(ports) - LOGGER.info("Running nmap UDP port scan") - LOGGER.info("UDP ports: " + str(port_list)) - nmap_results, err = util.run_command( - "nmap -sU -sV -p " + port_list + " " + self._device_ipv4_addr) - LOGGER.info("UDP port scan complete") - self._scan_udp_results = self._process_nmap_results( - nmap_results=nmap_results) - - def _process_nmap_results(self, nmap_results): - results = {} - LOGGER.info("nmap results\n" + str(nmap_results)) - if nmap_results: - if "Service Info" in nmap_results: - rows = nmap_results.split("PORT")[1].split( - "Service Info")[0].split("\n") - elif "PORT" in nmap_results: - rows = nmap_results.split("PORT")[1].split( - "MAC Address")[0].split("\n") - if rows: - for result in rows[1:-1]: # Iterate skipping the header and tail rows - cols = result.split() - port = cols[0].split("/")[0] - # If results don't start with a a port number, it's likely a bleed over - # from previous result so we need to ignore it - if port.isdigit(): - version = "" - if len(cols) > 3: - # recombine full version information that may contain spaces - version = ' '.join(cols[3:]) - port_result = {cols[0].split( - "/")[0]: {"state": cols[1], "service": cols[2], "version": version}} - results.update(port_result) - return results + port_config[port]["result"] = "skipped" + + def _scan_scripts(self, tests): + scan_results = {} + LOGGER.info("Checing for scan scripts") + for test in tests: + test_config = tests[test] + if "tcp_ports" in test_config: + for port in test_config["tcp_ports"]: + port_config = test_config["tcp_ports"][port] + if "service_scan" in port_config: + LOGGER.info("Service Scan Detected for: " + str(port)) + svc = port_config["service_scan"] + scan_results.update(self._scan_tcp_with_script(svc["script"])) + if "udp_ports" in test_config: + for port in test_config["udp_ports"]: + if "service_scan" in port: + LOGGER.info("Service Scan Detected for: " + str(port)) + svc = port["service_scan"] + self._scan_udp_with_script(svc["script"], port) + scan_results.update(self._scan_tcp_with_script(svc["script"])) + self._script_scan_results = scan_results + + def _scan_tcp_with_script(self, script_name, ports=None): + LOGGER.info("Running TCP nmap scan with script " + script_name) + scan_options = " -v -n T3 --host-timeout=6m -A --script " + script_name + port_options = " --open " + if ports is None: + port_options += " -p- " + else: + port_options += " -p" + ports + " " + results_file = f"/runtime/output/{self._module_name}-script_name.log" + nmap_options = scan_options + port_options + " -oG " + results_file + nmap_results = util.run_command("nmap " + nmap_options + " " + + self._device_ipv4_addr)[0] + LOGGER.info("Nmap TCP script scan complete") + LOGGER.info("nmap script results\n" + str(nmap_results)) + return self._process_nmap_results(nmap_results=nmap_results) + + def _scan_udp_with_script(self, script_name, ports=None): + LOGGER.info("Running UDP nmap scan with script " + script_name) + scan_options = " --sU -Pn -n --script " + script_name + port_options = " --open " + if ports is None: + port_options += " -p- " + else: + port_options += " -p" + ports + " " + nmap_options = scan_options + port_options + nmap_results = util.run_command("nmap " + nmap_options + + self._device_ipv4_addr)[0] + LOGGER.info("Nmap UDP script scan complete") + LOGGER.info("nmap script results\n" + str(nmap_results)) + return self._process_nmap_results(nmap_results=nmap_results) + + def _scan_tcp_ports(self, tests): + max_port = 1000 + ports = [] + for test in tests: + test_config = tests[test] + if "tcp_ports" in test_config: + for port in test_config["tcp_ports"]: + if int(port) > max_port: + ports.append(port) + ports_to_scan = "1-" + str(max_port) + if len(ports) > 0: + ports_to_scan += "," + ",".join(ports) + LOGGER.info("Running nmap TCP port scan") + LOGGER.info("TCP ports: " + str(ports_to_scan)) + nmap_results = util.run_command(f"""nmap -sT -sV -Pn -v -p {ports_to_scan} + --version-intensity 7 -T4 {self._device_ipv4_addr}""")[0] + LOGGER.info("TCP port scan complete") + self._scan_tcp_results = self._process_nmap_results( + nmap_results=nmap_results) + + def _scan_udp_ports(self, tests): + ports = [] + for test in tests: + test_config = tests[test] + if "udp_ports" in test_config: + for port in test_config["udp_ports"]: + ports.append(port) + if len(ports) > 0: + port_list = ",".join(ports) + LOGGER.info("Running nmap UDP port scan") + LOGGER.info("UDP ports: " + str(port_list)) + nmap_results = util.run_command( + f"nmap -sU -sV -p {port_list} {self._device_ipv4_addr}")[0] + LOGGER.info("UDP port scan complete") + self._scan_udp_results = self._process_nmap_results( + nmap_results=nmap_results) + + def _process_nmap_results(self, nmap_results): + results = {} + LOGGER.info("nmap results\n" + str(nmap_results)) + if nmap_results: + if "Service Info" in nmap_results: + rows = nmap_results.split("PORT")[1].split("Service Info")[0].split( + "\n") + elif "PORT" in nmap_results: + rows = nmap_results.split("PORT")[1].split("MAC Address")[0].split("\n") + if rows: + for result in rows[1:-1]: # Iterate skipping the header and tail rows + cols = result.split() + port = cols[0].split("/")[0] + # If results do not start with a a port number, + # it is likely a bleed over from previous result so + # we need to ignore it + if port.isdigit(): + version = "" + if len(cols) > 3: + # recombine full version information that may contain spaces + version = " ".join(cols[3:]) + port_result = { + cols[0].split("/")[0]: { + "state": cols[1], + "service": cols[2], + "version": version + } + } + results.update(port_result) + return results diff --git a/test_orc/modules/nmap/python/src/run.py b/test_orc/modules/nmap/python/src/run.py index 4c8294769..4ed1f533c 100644 --- a/test_orc/modules/nmap/python/src/run.py +++ b/test_orc/modules/nmap/python/src/run.py @@ -9,40 +9,47 @@ LOGGER = logger.get_logger('test_module') + class NmapModuleRunner: + """Run the NMAP module tests.""" + + def __init__(self, module): - def __init__(self,module): + signal.signal(signal.SIGINT, self._handler) + signal.signal(signal.SIGTERM, self._handler) + signal.signal(signal.SIGABRT, self._handler) + signal.signal(signal.SIGQUIT, self._handler) - signal.signal(signal.SIGINT, self._handler) - signal.signal(signal.SIGTERM, self._handler) - signal.signal(signal.SIGABRT, self._handler) - signal.signal(signal.SIGQUIT, self._handler) + LOGGER.info("Starting nmap Module") - LOGGER.info("Starting nmap Module") + self._test_module = NmapModule(module) + self._test_module.run_tests() - self._test_module = NmapModule(module) - self._test_module.run_tests() + def _handler(self, signum, *other): + LOGGER.debug("SigtermEnum: " + str(signal.SIGTERM)) + LOGGER.debug("Exit signal received: " + str(signum)) + if signum in (2, signal.SIGTERM): + LOGGER.info("Exit signal received. Stopping test module...") + LOGGER.info("Test module stopped") + sys.exit(1) - def _handler(self, signum, *other): - LOGGER.debug("SigtermEnum: " + str(signal.SIGTERM)) - LOGGER.debug("Exit signal received: " + str(signum)) - if signum in (2, signal.SIGTERM): - LOGGER.info("Exit signal received. Stopping test module...") - LOGGER.info("Test module stopped") - sys.exit(1) def run(argv): - parser = argparse.ArgumentParser(description="Nmap Module Help", - formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser = argparse.ArgumentParser( + description="Nmap Module Help", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + parser.add_argument( + "-m", + "--module", + help="Define the module name to be used to create the log file") - parser.add_argument( - "-m", "--module", help="Define the module name to be used to create the log file") + args = parser.parse_args() - args = parser.parse_args() + # For some reason passing in the args from bash adds an extra + # space before the argument so we'll just strip out extra space + NmapModuleRunner(args.module.strip()) - # For some reason passing in the args from bash adds an extra - # space before the argument so we'll just strip out extra space - NmapModuleRunner(args.module.strip()) if __name__ == "__main__": - run(sys.argv) + run(sys.argv) diff --git a/test_orc/python/src/module.py b/test_orc/python/src/module.py index 54f920fa1..72791f86e 100644 --- a/test_orc/python/src/module.py +++ b/test_orc/python/src/module.py @@ -2,8 +2,9 @@ from dataclasses import dataclass from docker.models.containers import Container + @dataclass -class TestModule: # pylint: disable=too-few-public-methods,too-many-instance-attributes +class TestModule: # pylint: disable=too-few-public-methods,too-many-instance-attributes """Represents a test module.""" name: str = None @@ -13,7 +14,7 @@ class TestModule: # pylint: disable=too-few-public-methods,too-many-instance-att build_file: str = None container: Container = None container_name: str = None - image_name :str = None + image_name: str = None enable_container: bool = True network: bool = True diff --git a/test_orc/python/src/runner.py b/test_orc/python/src/runner.py index cc495bf8d..d82935057 100644 --- a/test_orc/python/src/runner.py +++ b/test_orc/python/src/runner.py @@ -4,6 +4,7 @@ LOGGER = logger.get_logger('runner') + class Runner: """Holds the state of the testing for one device.""" diff --git a/test_orc/python/src/test_orchestrator.py b/test_orc/python/src/test_orchestrator.py index f1e45e2f6..5cc14ae85 100644 --- a/test_orc/python/src/test_orchestrator.py +++ b/test_orc/python/src/test_orchestrator.py @@ -50,9 +50,9 @@ def run_test_modules(self, device): for module in self._test_modules: self._run_test_module(module, device) LOGGER.info("All tests complete") - LOGGER.info( - f"Completed running test modules on device with mac addr {device.mac_addr}") - results = self._generate_results(device) + LOGGER.info(f"""Completed running test modules on device + with mac addr {device.mac_addr}""") + self._generate_results(device) def _generate_results(self, device): results = {} @@ -63,31 +63,33 @@ def _generate_results(self, device): results["device"]["model"] = device.model results["device"]["mac_addr"] = device.mac_addr for module in self._test_modules: - if module.enable_container and self._is_module_enabled(module,device): + if module.enable_container and self._is_module_enabled(module, device): container_runtime_dir = os.path.join( - self._root_path, 'runtime/test/' + device.mac_addr.replace(':', '') + - '/' + module.name) - results_file = container_runtime_dir + '/' + module.name + '-result.json' + self._root_path, "runtime/test/" + + device.mac_addr.replace(":", "") + "/" + module.name) + results_file = container_runtime_dir + "/" + module.name + "-result.json" try: - with open(results_file, 'r', encoding='UTF-8') as f: + with open(results_file, "r", encoding="UTF-8") as f: module_results = json.load(f) results[module.name] = module_results - except (FileNotFoundError, PermissionError, json.JSONDecodeError) as results_error: + except (FileNotFoundError, PermissionError, + json.JSONDecodeError) as results_error: LOGGER.error("Module Results Errror " + module.name) LOGGER.debug(results_error) out_file = os.path.join( - self._root_path, 'runtime/test/' + device.mac_addr.replace(':', '') + '/results.json') - with open(out_file, 'w') as f: - json.dump(results,f,indent=2) + self._root_path, + "runtime/test/" + device.mac_addr.replace(":", "") + "/results.json") + with open(out_file, "w", encoding="utf-8") as f: + json.dump(results, f, indent=2) return results - def _is_module_enabled(self,module,device): + def _is_module_enabled(self, module, device): enabled = True if device.test_modules is not None: test_modules = json.loads(device.test_modules) if module.name in test_modules: - if 'enabled' in test_modules[module.name]: + if "enabled" in test_modules[module.name]: enabled = test_modules[module.name]["enabled"] return enabled @@ -97,7 +99,7 @@ def _run_test_module(self, module, device): if module is None or not module.enable_container: return - if not self._is_module_enabled(module,device): + if not self._is_module_enabled(module, device): return LOGGER.info("Running test module " + module.name) @@ -122,10 +124,10 @@ def _run_test_module(self, module, device): mounts=[ Mount(target="/runtime/output", source=container_runtime_dir, - type='bind'), + type="bind"), Mount(target="/runtime/network", source=network_runtime_dir, - type='bind', + type="bind", read_only=True), ], environment={ @@ -144,13 +146,13 @@ def _run_test_module(self, module, device): # Mount the test container to the virtual network if requried if module.network: LOGGER.debug("Attaching test module to the network") - self._net_orc._attach_test_module_to_network(module) + self._net_orc.attach_test_module_to_network(module) # Determine the module timeout time test_module_timeout = time.time() + module.timeout status = self._get_module_status(module) - while time.time() < test_module_timeout and status == 'running': + while time.time() < test_module_timeout and status == "running": time.sleep(1) status = self._get_module_status(module) @@ -164,7 +166,9 @@ def _get_module_status(self, module): def _get_test_module(self, name): for test_module in self._test_modules: - if name == test_module.display_name or name == test_module.name or name == test_module.dir_name: + if name in [ + test_module.display_name, test_module.name, test_module.dir_name + ]: return test_module return None @@ -203,28 +207,28 @@ def _load_test_module(self, module_dir): # Load basic module information module = TestModule() with open(os.path.join(self._path, modules_dir, module_dir, MODULE_CONFIG), - encoding='UTF-8') as module_config_file: + encoding="UTF-8") as module_config_file: module_json = json.load(module_config_file) - module.name = module_json['config']['meta']['name'] - module.display_name = module_json['config']['meta']['display_name'] - module.description = module_json['config']['meta']['description'] + module.name = module_json["config"]["meta"]["name"] + module.display_name = module_json["config"]["meta"]["display_name"] + module.description = module_json["config"]["meta"]["description"] module.dir = os.path.join(self._path, modules_dir, module_dir) module.dir_name = module_dir module.build_file = module_dir + ".Dockerfile" module.container_name = "tr-ct-" + module.dir_name + "-test" module.image_name = "test-run/" + module.dir_name + "-test" - if 'timeout' in module_json['config']['docker']: - module.timeout = module_json['config']['docker']['timeout'] + if "timeout" in module_json["config"]["docker"]: + module.timeout = module_json["config"]["docker"]["timeout"] # Determine if this is a container or just an image/template - if "enable_container" in module_json['config']['docker']: - module.enable_container = module_json['config']['docker'][ - 'enable_container'] + if "enable_container" in module_json["config"]["docker"]: + module.enable_container = module_json["config"]["docker"][ + "enable_container"] - if "depends_on" in module_json['config']['docker']: - depends_on_module = module_json['config']['docker']['depends_on'] + if "depends_on" in module_json["config"]["docker"]: + depends_on_module = module_json["config"]["docker"]["depends_on"] if self._get_test_module(depends_on_module) is None: self._load_test_module(depends_on_module) diff --git a/testing/test_baseline.py b/testing/test_baseline.py index e8a257672..6f6240c27 100644 --- a/testing/test_baseline.py +++ b/testing/test_baseline.py @@ -1,7 +1,7 @@ import json import pytest import re -import os +import os NTP_SERVER = '10.10.10.5' DNS_SERVER = '10.10.10.4' @@ -10,42 +10,45 @@ @pytest.fixture def container_data(): - dir = os.path.dirname(os.path.abspath(__file__)) - with open(CI_BASELINE_OUT) as f: - return json.load(f) + dir = os.path.dirname(os.path.abspath(__file__)) + with open(CI_BASELINE_OUT, encoding='utf-8') as f: + return json.load(f) @pytest.fixture def validator_results(): - dir = os.path.dirname(os.path.abspath(__file__)) - with open(os.path.join(dir, '../', 'runtime/validation/faux-dev/result.json')) as f: - return json.load(f) + dir = os.path.dirname(os.path.abspath(__file__)) + with open(os.path.join(dir, + '../', + 'runtime/validation/faux-dev/result.json'), + encoding='utf-8') as f: + return json.load(f) @pytest.mark.skip(reason="requires internet") def test_internet_connectivity(container_data): - assert container_data['network']['internet'] == 200 + assert container_data['network']['internet'] == 200 def test_dhcp_ntp_option(container_data): - """ Check DHCP gives NTP server as option """ - assert container_data['dhcp']['ntp-servers'] == NTP_SERVER + """ Check DHCP gives NTP server as option """ + assert container_data['dhcp']['ntp-servers'] == NTP_SERVER def test_dhcp_dns_option(container_data): - assert container_data['dhcp']['domain-name-servers'] == DNS_SERVER + assert container_data['dhcp']['domain-name-servers'] == DNS_SERVER def test_assigned_ipv4_address(container_data): - assert int(container_data['network']['ipv4'].split('.')[-1][:-3]) > 10 + assert int(container_data['network']['ipv4'].split('.')[-1][:-3]) > 10 def test_ntp_server_reachable(container_data): - assert not 'no servers' in container_data['ntp_offset'] + assert not 'no servers' in container_data['ntp_offset'] def test_dns_server_reachable(container_data): - assert not 'no servers' in container_data['dns_response'] + assert not 'no servers' in container_data['dns_response'] def test_dns_server_resolves(container_data): - assert re.match(r'[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}', - container_data['dns_response']) + assert re.match(r'[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}', + container_data['dns_response']) @pytest.mark.skip(reason="requires internet") def test_validator_results_compliant(validator_results): - results = [True if x['result'] == 'compliant' else False - for x in validator_results['results']] - assert all(results) + results = [True if x['result'] == 'compliant' else False + for x in validator_results['results']] + assert all(results) From b91fff541f95659ef9259df9a3f72e20cda9b6c0 Mon Sep 17 00:00:00 2001 From: jhughesbiot <50999916+jhughesbiot@users.noreply.github.com> Date: Tue, 30 May 2023 04:09:17 -0700 Subject: [PATCH 22/48] Pylint (#32) * More formatting fixes * More formatting fixes * More formatting fixes * More formatting fixes * Misc pylint fixes Fix test module logger * remove unused files * more formatting * revert breaking pylint changes * more formatting * fix results file * More formatting * ovs module formatting --------- Co-authored-by: Jacob Boddey --- framework/test_runner.py | 45 ++- net_orc/network/modules/ntp/ntp-server.py | 307 ------------------ .../network/modules/ovs/python/src/logger.py | 9 +- .../modules/ovs/python/src/ovs_control.py | 58 ++-- net_orc/network/modules/ovs/python/src/run.py | 19 +- .../network/modules/ovs/python/src/util.py | 22 +- net_orc/python/src/logger.py | 31 -- .../base/python/src/grpc/start_server.py | 29 +- .../modules/base/python/src/test_module.py | 13 +- test_orc/modules/base/python/src/util.py | 11 +- .../baseline/python/src/baseline_module.py | 4 +- test_orc/modules/baseline/python/src/run.py | 29 +- test_orc/modules/dns/python/src/dns_module.py | 36 +- test_orc/modules/dns/python/src/run.py | 11 +- .../modules/nmap/python/src/nmap_module.py | 18 +- test_orc/modules/nmap/python/src/run.py | 29 +- test_orc/python/src/test_orchestrator.py | 2 +- testing/test_baseline.py | 4 +- 18 files changed, 173 insertions(+), 504 deletions(-) delete mode 100644 net_orc/network/modules/ntp/ntp-server.py delete mode 100644 net_orc/python/src/logger.py diff --git a/framework/test_runner.py b/framework/test_runner.py index 95f3e4208..0733d4353 100644 --- a/framework/test_runner.py +++ b/framework/test_runner.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 - """Wrapper for the TestRun that simplifies virtual testing procedure by allowing direct calling from the command line. @@ -16,11 +15,15 @@ LOGGER = logger.get_logger("runner") + class TestRunner: """Controls and starts the Test Run application.""" - def __init__(self, config_file=None, validate=True, - net_only=False, single_intf=False): + def __init__(self, + config_file=None, + validate=True, + net_only=False, + single_intf=False): self._register_exits() self.test_run = TestRun(config_file=config_file, validate=validate, @@ -50,22 +53,34 @@ def start(self): self.test_run.start() LOGGER.info("Test Run has finished") -def parse_args(argv): - parser = argparse.ArgumentParser(description="Test Run", - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument("-f", "--config-file", default=None, - help="Define the configuration file for Test Run and Network Orchestrator") - parser.add_argument("--no-validate", action="store_true", - help="Turn off the validation of the network after network boot") - parser.add_argument("-net", "--net-only", action="store_true", - help="Run the network only, do not run tests") - parser.add_argument("--single-intf", action="store_true", - help="Single interface mode (experimental)") + +def parse_args(): + parser = argparse.ArgumentParser( + description="Test Run", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument( + "-f", + "--config-file", + default=None, + help="Define the configuration file for Test Run and Network Orchestrator" + ) + parser.add_argument( + "--no-validate", + action="store_true", + help="Turn off the validation of the network after network boot") + parser.add_argument("-net", + "--net-only", + action="store_true", + help="Run the network only, do not run tests") + parser.add_argument("--single-intf", + action="store_true", + help="Single interface mode (experimental)") parsed_args = parser.parse_known_args()[0] return parsed_args + if __name__ == "__main__": - args = parse_args(sys.argv) + args = parse_args() runner = TestRunner(config_file=args.config_file, validate=not args.no_validate, net_only=args.net_only, diff --git a/net_orc/network/modules/ntp/ntp-server.py b/net_orc/network/modules/ntp/ntp-server.py deleted file mode 100644 index 9d6a6da8e..000000000 --- a/net_orc/network/modules/ntp/ntp-server.py +++ /dev/null @@ -1,307 +0,0 @@ -import datetime -import socket -import struct -import time -import queue - -import threading -import select - -taskQueue = queue.Queue() -stop_flag = False - -def system_to_ntp_time(timestamp): - """Convert a system time to a NTP time. - - Parameters: - timestamp -- timestamp in system time - - Returns: - corresponding NTP time - """ - return timestamp + NTP.NTP_DELTA - -def _to_int(timestamp): - """Return the integral part of a timestamp. - - Parameters: - timestamp -- NTP timestamp - - Retuns: - integral part - """ - return int(timestamp) - -def _to_frac(timestamp, n=32): - """Return the fractional part of a timestamp. - - Parameters: - timestamp -- NTP timestamp - n -- number of bits of the fractional part - - Retuns: - fractional part - """ - return int(abs(timestamp - _to_int(timestamp)) * 2**n) - -def _to_time(integ, frac, n=32): - """Return a timestamp from an integral and fractional part. - - Parameters: - integ -- integral part - frac -- fractional part - n -- number of bits of the fractional part - - Retuns: - timestamp - """ - return integ + float(frac)/2**n - -class NTPException(Exception): - """Exception raised by this module.""" - pass - -class NTP: - """Helper class defining constants.""" - - _SYSTEM_EPOCH = datetime.date(*time.gmtime(0)[0:3]) - """system epoch""" - _NTP_EPOCH = datetime.date(1900, 1, 1) - """NTP epoch""" - NTP_DELTA = (_SYSTEM_EPOCH - _NTP_EPOCH).days * 24 * 3600 - """delta between system and NTP time""" - - REF_ID_TABLE = { - 'DNC': "DNC routing protocol", - 'NIST': "NIST public modem", - 'TSP': "TSP time protocol", - 'DTS': "Digital Time Service", - 'ATOM': "Atomic clock (calibrated)", - 'VLF': "VLF radio (OMEGA, etc)", - 'callsign': "Generic radio", - 'LORC': "LORAN-C radionavidation", - 'GOES': "GOES UHF environment satellite", - 'GPS': "GPS UHF satellite positioning", - } - """reference identifier table""" - - STRATUM_TABLE = { - 0: "unspecified", - 1: "primary reference", - } - """stratum table""" - - MODE_TABLE = { - 0: "unspecified", - 1: "symmetric active", - 2: "symmetric passive", - 3: "client", - 4: "server", - 5: "broadcast", - 6: "reserved for NTP control messages", - 7: "reserved for private use", - } - """mode table""" - - LEAP_TABLE = { - 0: "no warning", - 1: "last minute has 61 seconds", - 2: "last minute has 59 seconds", - 3: "alarm condition (clock not synchronized)", - } - """leap indicator table""" - -class NTPPacket: - """NTP packet class. - - This represents an NTP packet. - """ - - _PACKET_FORMAT = "!B B B b 11I" - """packet format to pack/unpack""" - - def __init__(self, version=4, mode=3, tx_timestamp=0): - """Constructor. - - Parameters: - version -- NTP version - mode -- packet mode (client, server) - tx_timestamp -- packet transmit timestamp - """ - self.leap = 0 - """leap second indicator""" - self.version = version - """version""" - self.mode = mode - """mode""" - self.stratum = 0 - """stratum""" - self.poll = 0 - """poll interval""" - self.precision = 0 - """precision""" - self.root_delay = 0 - """root delay""" - self.root_dispersion = 0 - """root dispersion""" - self.ref_id = 0 - """reference clock identifier""" - self.ref_timestamp = 0 - """reference timestamp""" - self.orig_timestamp = 0 - self.orig_timestamp_high = 0 - self.orig_timestamp_low = 0 - """originate timestamp""" - self.recv_timestamp = 0 - """receive timestamp""" - self.tx_timestamp = tx_timestamp - self.tx_timestamp_high = 0 - self.tx_timestamp_low = 0 - """tansmit timestamp""" - - def to_data(self): - """Convert this NTPPacket to a buffer that can be sent over a socket. - - Returns: - buffer representing this packet - - Raises: - NTPException -- in case of invalid field - """ - try: - packed = struct.pack(NTPPacket._PACKET_FORMAT, - (self.leap << 6 | self.version << 3 | self.mode), - self.stratum, - self.poll, - self.precision, - _to_int(self.root_delay) << 16 | _to_frac(self.root_delay, 16), - _to_int(self.root_dispersion) << 16 | - _to_frac(self.root_dispersion, 16), - self.ref_id, - _to_int(self.ref_timestamp), - _to_frac(self.ref_timestamp), - #Change by lichen, avoid loss of precision - self.orig_timestamp_high, - self.orig_timestamp_low, - _to_int(self.recv_timestamp), - _to_frac(self.recv_timestamp), - _to_int(self.tx_timestamp), - _to_frac(self.tx_timestamp)) - except struct.error: - raise NTPException("Invalid NTP packet fields.") - return packed - - def from_data(self, data): - """Populate this instance from a NTP packet payload received from - the network. - - Parameters: - data -- buffer payload - - Raises: - NTPException -- in case of invalid packet format - """ - try: - unpacked = struct.unpack(NTPPacket._PACKET_FORMAT, - data[0:struct.calcsize(NTPPacket._PACKET_FORMAT)]) - except struct.error: - raise NTPException("Invalid NTP packet.") - - self.leap = unpacked[0] >> 6 & 0x3 - self.version = unpacked[0] >> 3 & 0x7 - self.mode = unpacked[0] & 0x7 - self.stratum = unpacked[1] - self.poll = unpacked[2] - self.precision = unpacked[3] - self.root_delay = float(unpacked[4])/2**16 - self.root_dispersion = float(unpacked[5])/2**16 - self.ref_id = unpacked[6] - self.ref_timestamp = _to_time(unpacked[7], unpacked[8]) - self.orig_timestamp = _to_time(unpacked[9], unpacked[10]) - self.orig_timestamp_high = unpacked[9] - self.orig_timestamp_low = unpacked[10] - self.recv_timestamp = _to_time(unpacked[11], unpacked[12]) - self.tx_timestamp = _to_time(unpacked[13], unpacked[14]) - self.tx_timestamp_high = unpacked[13] - self.tx_timestamp_low = unpacked[14] - - def GetTxTimeStamp(self): - return (self.tx_timestamp_high,self.tx_timestamp_low) - - def SetOriginTimeStamp(self,high,low): - self.orig_timestamp_high = high - self.orig_timestamp_low = low - -class RecvThread(threading.Thread): - - def __init__(self,socket): - threading.Thread.__init__(self) - self.socket = socket - - def run(self): - global t,stop_flag - while True: - if stop_flag == True: - print("RecvThread Ended") - break - rlist,wlist,elist = select.select([self.socket],[],[],1) - if len(rlist) != 0: - print("Received %d packets" % len(rlist)) - for tempSocket in rlist: - try: - data,addr = tempSocket.recvfrom(1024) - recvTimestamp = recvTimestamp = system_to_ntp_time(time.time()) - taskQueue.put((data,addr,recvTimestamp)) - except socket.error as msg: - print(msg) - -class WorkThread(threading.Thread): - - def __init__(self,socket): - threading.Thread.__init__(self) - self.socket = socket - - def run(self): - global taskQueue,stop_flag - while True: - if stop_flag is True: - print("WorkThread Ended") - break - try: - data,addr,recvTimestamp = taskQueue.get(timeout=1) - recvPacket = NTPPacket() - recvPacket.from_data(data) - timeStamp_high,timeStamp_low = recvPacket.GetTxTimeStamp() - sendPacket = NTPPacket(version=4,mode=4) - sendPacket.stratum = 2 - sendPacket.poll = 10 - sendPacket.ref_timestamp = recvTimestamp-5 - sendPacket.SetOriginTimeStamp(timeStamp_high,timeStamp_low) - sendPacket.recv_timestamp = recvTimestamp - sendPacket.tx_timestamp = system_to_ntp_time(time.time()) - socket.sendto(sendPacket.to_data(),addr) - print("Sent to %s:%d" % (addr[0],addr[1])) - except queue.Empty: - continue - -listen_ip = "0.0.0.0" -listen_port = 123 -socket = socket.socket(socket.AF_INET,socket.SOCK_DGRAM) -socket.bind((listen_ip,listen_port)) -print(f"local socket: {socket.getsockname()}") -recvThread = RecvThread(socket) -recvThread.start() -workThread = WorkThread(socket) -workThread.start() - -while True: - try: - time.sleep(0.5) - except KeyboardInterrupt: - print("Exiting...") - stop_flag = True - recvThread.join() - workThread.join() - #socket.close() - print("Exited") - break diff --git a/net_orc/network/modules/ovs/python/src/logger.py b/net_orc/network/modules/ovs/python/src/logger.py index 566a5c75e..23e697e43 100644 --- a/net_orc/network/modules/ovs/python/src/logger.py +++ b/net_orc/network/modules/ovs/python/src/logger.py @@ -1,14 +1,13 @@ -#!/usr/bin/env python3 - +"""Sets up the logger to be used for the ovs modules.""" import logging LOGGERS = {} -_LOG_FORMAT = "%(asctime)s %(name)-8s %(levelname)-7s %(message)s" +_LOG_FORMAT = '%(asctime)s %(name)-8s %(levelname)-7s %(message)s' _DATE_FORMAT = '%b %02d %H:%M:%S' # Set level to debug if set as runtime flag -logging.basicConfig(format=_LOG_FORMAT, - datefmt=_DATE_FORMAT, +logging.basicConfig(format=_LOG_FORMAT, + datefmt=_DATE_FORMAT, level=logging.INFO) def get_logger(name): diff --git a/net_orc/network/modules/ovs/python/src/ovs_control.py b/net_orc/network/modules/ovs/python/src/ovs_control.py index 53406cef2..765c50f92 100644 --- a/net_orc/network/modules/ovs/python/src/ovs_control.py +++ b/net_orc/network/modules/ovs/python/src/ovs_control.py @@ -1,32 +1,31 @@ -#!/usr/bin/env python3 - +"""OVS Control Module""" import json import logger import util -CONFIG_FILE = "/ovs/conf/system.json" -DEVICE_BRIDGE = "tr-d" -INTERNET_BRIDGE = "tr-c" +CONFIG_FILE = '/ovs/conf/system.json' +DEVICE_BRIDGE = 'tr-d' +INTERNET_BRIDGE = 'tr-c' LOGGER = logger.get_logger('ovs_ctrl') class OVSControl: - + """OVS Control""" def __init__(self): self._int_intf = None self._dev_intf = None self._load_config() def add_bridge(self, bridge_name): - LOGGER.info("Adding OVS Bridge: " + bridge_name) + LOGGER.info('Adding OVS Bridge: ' + bridge_name) # Create the bridge using ovs-vsctl commands # Uses the --may-exist option to prevent failures # if this bridge already exists by this name it won't fail # and will not modify the existing bridge - success=util.run_command("ovs-vsctl --may-exist add-br " + bridge_name) + success=util.run_command('ovs-vsctl --may-exist add-br ' + bridge_name) return success def add_port(self,port, bridge_name): - LOGGER.info("Adding Port " + port + " to OVS Bridge: " + bridge_name) + LOGGER.info('Adding Port ' + port + ' to OVS Bridge: ' + bridge_name) # Add a port to the bridge using ovs-vsctl commands # Uses the --may-exist option to prevent failures # if this port already exists on the bridge and will not @@ -36,7 +35,7 @@ def add_port(self,port, bridge_name): return success def create_net(self): - LOGGER.info("Creating baseline network") + LOGGER.info('Creating baseline network') # Create data plane self.add_bridge(DEVICE_BRIDGE) @@ -45,7 +44,7 @@ def create_net(self): self.add_bridge(INTERNET_BRIDGE) # Remove IP from internet adapter - self.set_interface_ip(self._int_intf,"0.0.0.0") + self.set_interface_ip(self._int_intf,'0.0.0.0') # Add external interfaces to data and control plane self.add_port(self._dev_intf,DEVICE_BRIDGE) @@ -56,48 +55,49 @@ def create_net(self): self.set_bridge_up(INTERNET_BRIDGE) def delete_bridge(self,bridge_name): - LOGGER.info("Deleting OVS Bridge: " + bridge_name) + LOGGER.info('Deleting OVS Bridge: ' + bridge_name) # Delete the bridge using ovs-vsctl commands # Uses the --if-exists option to prevent failures # if this bridge does not exists - success=util.run_command("ovs-vsctl --if-exists del-br " + bridge_name) + success=util.run_command('ovs-vsctl --if-exists del-br ' + bridge_name) return success def _load_config(self): - LOGGER.info("Loading Configuration: " + CONFIG_FILE) - config_json = json.load(open(CONFIG_FILE, "r", encoding="utf-8")) - self._int_intf = config_json["internet_intf"] - self._dev_intf = config_json["device_intf"] - LOGGER.info("Configuration Loaded") - LOGGER.info("Internet Interface: " + self._int_intf) - LOGGER.info("Device Interface: " + self._dev_intf) + LOGGER.info('Loading Configuration: ' + CONFIG_FILE) + with open(CONFIG_FILE, 'r', encoding='utf-8') as conf_file: + config_json = json.load(conf_file) + self._int_intf = config_json['internet_intf'] + self._dev_intf = config_json['device_intf'] + LOGGER.info('Configuration Loaded') + LOGGER.info('Internet Interface: ' + self._int_intf) + LOGGER.info('Device Interface: ' + self._dev_intf) def restore_net(self): - LOGGER.info("Restoring Network...") + LOGGER.info('Restoring Network...') # Delete data plane self.delete_bridge(DEVICE_BRIDGE) # Delete control plane self.delete_bridge(INTERNET_BRIDGE) - LOGGER.info("Network is restored") + LOGGER.info('Network is restored') def show_config(self): - LOGGER.info("Show current config of OVS") - success=util.run_command("ovs-vsctl show") + LOGGER.info('Show current config of OVS') + success=util.run_command('ovs-vsctl show') return success def set_bridge_up(self,bridge_name): - LOGGER.info("Setting Bridge device to up state: " + bridge_name) - success=util.run_command("ip link set dev " + bridge_name + " up") + LOGGER.info('Setting Bridge device to up state: ' + bridge_name) + success=util.run_command('ip link set dev ' + bridge_name + ' up') return success def set_interface_ip(self,interface, ip_addr): - LOGGER.info("Setting interface " + interface + " to " + ip_addr) + LOGGER.info('Setting interface ' + interface + ' to ' + ip_addr) # Remove IP from internet adapter - util.run_command("ifconfig " + interface + " 0.0.0.0") + util.run_command('ifconfig ' + interface + ' 0.0.0.0') -if __name__ == "__main__": +if __name__ == '__main__': ovs = OVSControl() ovs.create_net() ovs.show_config() diff --git a/net_orc/network/modules/ovs/python/src/run.py b/net_orc/network/modules/ovs/python/src/run.py index f91c2dfeb..5787a74e6 100644 --- a/net_orc/network/modules/ovs/python/src/run.py +++ b/net_orc/network/modules/ovs/python/src/run.py @@ -1,5 +1,4 @@ -#!/usr/bin/env python3 - +"""Run OVS module""" import logger import signal import sys @@ -10,7 +9,7 @@ LOGGER = logger.get_logger('ovs_control_run') class OVSControlRun: - + """Run the OVS module.""" def __init__(self): signal.signal(signal.SIGINT, self.handler) @@ -18,7 +17,7 @@ def __init__(self): signal.signal(signal.SIGABRT, self.handler) signal.signal(signal.SIGQUIT, self.handler) - LOGGER.info("Starting OVS Control") + LOGGER.info('Starting OVS Control') # Get all components ready self._ovs_control = OVSControl() @@ -30,11 +29,11 @@ def __init__(self): self._ovs_control.show_config() # Get network ready (via Network orchestrator) - LOGGER.info("Network is ready. Waiting for device information...") + LOGGER.info('Network is ready. Waiting for device information...') #Loop forever until process is stopped while True: - LOGGER.info("OVS Running") + LOGGER.info('OVS Running') time.sleep(1000) # TODO: This time should be configurable (How long to hold before exiting, @@ -44,11 +43,11 @@ def __init__(self): # Tear down network #self._ovs_control.shutdown() - def handler(self, signum, frame): - LOGGER.info("SigtermEnum: " + str(signal.SIGTERM)) - LOGGER.info("Exit signal received: " + str(signum)) + def handler(self, signum): + LOGGER.info('SigtermEnum: ' + str(signal.SIGTERM)) + LOGGER.info('Exit signal received: ' + str(signum)) if (signum == 2 or signal == signal.SIGTERM): - LOGGER.info("Exit signal received. Restoring network...") + LOGGER.info('Exit signal received. Restoring network...') self._ovs_control.shutdown() sys.exit(1) diff --git a/net_orc/network/modules/ovs/python/src/util.py b/net_orc/network/modules/ovs/python/src/util.py index c9eba39ff..a3ebbb10a 100644 --- a/net_orc/network/modules/ovs/python/src/util.py +++ b/net_orc/network/modules/ovs/python/src/util.py @@ -1,21 +1,23 @@ +"""Provides basic utilities for a ovs module.""" import subprocess import logger +LOGGER = logger.get_logger('util') def run_command(cmd): success = False - LOGGER = logger.get_logger('util') - process = subprocess.Popen(cmd.split(), - stdout=subprocess.PIPE, + process = subprocess.Popen(cmd.split(), + stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = process.communicate() - if process.returncode !=0: - err_msg = "%s. Code: %s" % (stderr.strip(), process.returncode) - LOGGER.error("Command Failed: " + cmd) - LOGGER.error("Error: " + err_msg) + if process.returncode != 0: + err_msg = f'{stderr.strip()}. Code: {process.returncode}' + LOGGER.error('Command Failed: ' + cmd) + LOGGER.error('Error: ' + err_msg) else: - succ_msg = "%s. Code: %s" % (stdout.strip().decode('utf-8'), process.returncode) - LOGGER.info("Command Success: " + cmd) - LOGGER.info("Success: " + succ_msg) + msg = stdout.strip().decode('utf-8') + succ_msg = f'{msg}. Code: {process.returncode}' + LOGGER.info('Command Success: ' + cmd) + LOGGER.info('Success: ' + succ_msg) success = True return success diff --git a/net_orc/python/src/logger.py b/net_orc/python/src/logger.py deleted file mode 100644 index aaf690c8a..000000000 --- a/net_orc/python/src/logger.py +++ /dev/null @@ -1,31 +0,0 @@ -"""Sets up the logger to be used for the network orchestrator.""" -import json -import logging -import os - -LOGGERS = {} -_LOG_FORMAT = '%(asctime)s %(name)-8s %(levelname)-7s %(message)s' -_DATE_FORMAT = '%b %02d %H:%M:%S' -_DEFAULT_LEVEL = logging.INFO -_CONF_DIR = 'conf' -_CONF_FILE_NAME = 'system.json' - -# Set log level -try: - - with open(os.path.join(_CONF_DIR, _CONF_FILE_NAME), - encoding='UTF-8') as config_json_file: - system_conf_json = json.load(config_json_file) - - log_level_str = system_conf_json['log_level'] - LOG_LEVEL = logging.getLevelName(log_level_str) -except OSError: - LOG_LEVEL = _DEFAULT_LEVEL - -logging.basicConfig(format=_LOG_FORMAT, datefmt=_DATE_FORMAT, level=LOG_LEVEL) - - -def get_logger(name): - if name not in LOGGERS: - LOGGERS[name] = logging.getLogger(name) - return LOGGERS[name] diff --git a/test_orc/modules/base/python/src/grpc/start_server.py b/test_orc/modules/base/python/src/grpc/start_server.py index 970da67fc..b4016c831 100644 --- a/test_orc/modules/base/python/src/grpc/start_server.py +++ b/test_orc/modules/base/python/src/grpc/start_server.py @@ -1,38 +1,37 @@ +"""Base class for starting the gRPC server for a network module.""" from concurrent import futures import grpc import proto.grpc_pb2_grpc as pb2_grpc -import proto.grpc_pb2 as pb2 from network_service import NetworkService -import sys import argparse -DEFAULT_PORT = "5001" +DEFAULT_PORT = '5001' -def serve(PORT): +def serve(port): server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) pb2_grpc.add_NetworkModuleServicer_to_server(NetworkService(), server) - server.add_insecure_port("[::]:" + PORT) + server.add_insecure_port('[::]:' + port) server.start() server.wait_for_termination() -def run(argv): +def run(): parser = argparse.ArgumentParser( - description="GRPC Server for Network Module", + description='GRPC Server for Network Module', formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument("-p", - "--port", + parser.add_argument('-p', + '--port', default=DEFAULT_PORT, - help="Define the default port to run the server on.") + help='Define the default port to run the server on.') args = parser.parse_args() - PORT = args.port + port = args.port - print("gRPC server starting on port " + PORT) - serve(PORT) + print('gRPC server starting on port ' + port) + serve(port) -if __name__ == "__main__": - run(sys.argv) +if __name__ == '__main__': + run() diff --git a/test_orc/modules/base/python/src/test_module.py b/test_orc/modules/base/python/src/test_module.py index 34af4cbb4..8e10a3637 100644 --- a/test_orc/modules/base/python/src/test_module.py +++ b/test_orc/modules/base/python/src/test_module.py @@ -1,3 +1,4 @@ +"""Base class for all core test module functions""" import json import logger import os @@ -91,20 +92,18 @@ def run_tests(self): self._write_results(json_results) def _read_config(self): - f = open(CONF_FILE, encoding='utf-8') - config = json.load(f) - f.close() + with open(CONF_FILE, encoding='utf-8') as f: + config = json.load(f) return config def _write_results(self, results): results_file = RESULTS_DIR + self._module_name + '-result.json' LOGGER.info('Writing results to ' + results_file) - f = open(results_file, 'w', encoding='utf-8') - f.write(results) - f.close() + with open(results_file, 'w', encoding='utf-8') as f: + f.write(results) def _get_device_ipv4(self): - command = f"""/testrun/bin/get_ipv4_addr {self._ipv4_subnet} + command = f"""/testrun/bin/get_ipv4_addr {self._ipv4_subnet} {self._device_mac.upper()}""" text = util.run_command(command)[0] if text: diff --git a/test_orc/modules/base/python/src/util.py b/test_orc/modules/base/python/src/util.py index 557f450a6..d387db796 100644 --- a/test_orc/modules/base/python/src/util.py +++ b/test_orc/modules/base/python/src/util.py @@ -1,7 +1,9 @@ +"""Provides basic utilities for a test module.""" import subprocess import shlex import logger +LOGGER = logger.get_logger('util') # Runs a process at the os level # By default, returns the standard output and error output @@ -11,18 +13,17 @@ # by any return code from the process other than zero. def run_command(cmd, output=True): success = False - LOGGER = logger.get_logger('util') process = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = process.communicate() if process.returncode != 0 and output: - err_msg = "%s. Code: %s" % (stderr.strip(), process.returncode) - LOGGER.error("Command Failed: " + cmd) - LOGGER.error("Error: " + err_msg) + err_msg = f'{stderr.strip()}. Code: {process.returncode}' + LOGGER.error('Command Failed: ' + cmd) + LOGGER.error('Error: ' + err_msg) else: success = True if output: - return stdout.strip().decode("utf-8"), stderr + return stdout.strip().decode('utf-8'), stderr else: return success diff --git a/test_orc/modules/baseline/python/src/baseline_module.py b/test_orc/modules/baseline/python/src/baseline_module.py index 9816bd28a..083123436 100644 --- a/test_orc/modules/baseline/python/src/baseline_module.py +++ b/test_orc/modules/baseline/python/src/baseline_module.py @@ -1,5 +1,4 @@ -#!/usr/bin/env python3 - +"""Baseline test module""" from test_module import TestModule LOG_NAME = "test_baseline" @@ -27,4 +26,3 @@ def _baseline_fail(self): def _baseline_skip(self): LOGGER.info("Running baseline pass test") LOGGER.info("Baseline pass test finished") - return None diff --git a/test_orc/modules/baseline/python/src/run.py b/test_orc/modules/baseline/python/src/run.py index 89b3a08e4..1892ed8ae 100644 --- a/test_orc/modules/baseline/python/src/run.py +++ b/test_orc/modules/baseline/python/src/run.py @@ -1,5 +1,4 @@ -#!/usr/bin/env python3 - +"""Run Baseline module""" import argparse import signal import sys @@ -21,29 +20,29 @@ def __init__(self, module): signal.signal(signal.SIGABRT, self._handler) signal.signal(signal.SIGQUIT, self._handler) - LOGGER.info("Starting Baseline Module") + LOGGER.info('Starting Baseline Module') self._test_module = BaselineModule(module) self._test_module.run_tests() - def _handler(self, signum, *other): - LOGGER.debug("SigtermEnum: " + str(signal.SIGTERM)) - LOGGER.debug("Exit signal received: " + str(signum)) + def _handler(self, signum): + LOGGER.debug('SigtermEnum: ' + str(signal.SIGTERM)) + LOGGER.debug('Exit signal received: ' + str(signum)) if signum in (2, signal.SIGTERM): - LOGGER.info("Exit signal received. Stopping test module...") - LOGGER.info("Test module stopped") + LOGGER.info('Exit signal received. Stopping test module...') + LOGGER.info('Test module stopped') sys.exit(1) -def run(argv): +def run(): parser = argparse.ArgumentParser( - description="Baseline Module Help", + description='Baseline Module Help', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( - "-m", - "--module", - help="Define the module name to be used to create the log file") + '-m', + '--module', + help='Define the module name to be used to create the log file') args = parser.parse_args() @@ -52,5 +51,5 @@ def run(argv): BaselineModuleRunner(args.module.strip()) -if __name__ == "__main__": - run(sys.argv) +if __name__ == '__main__': + run() diff --git a/test_orc/modules/dns/python/src/dns_module.py b/test_orc/modules/dns/python/src/dns_module.py index b161805a5..58ce48123 100644 --- a/test_orc/modules/dns/python/src/dns_module.py +++ b/test_orc/modules/dns/python/src/dns_module.py @@ -1,52 +1,52 @@ -#!/usr/bin/env python3 - +"""DNS test module""" import subprocess from test_module import TestModule -LOG_NAME = "test_dns" -CAPTURE_FILE = "/runtime/network/dns.pcap" +LOG_NAME = 'test_dns' +CAPTURE_FILE = '/runtime/network/dns.pcap' LOGGER = None class DNSModule(TestModule): + """DNS Test module""" def __init__(self, module): super().__init__(module_name=module, log_name=LOG_NAME) - self._dns_server = "10.10.10.4" + self._dns_server = '10.10.10.4' global LOGGER LOGGER = self._get_logger() def _check_dns_traffic(self, tcpdump_filter): to_dns = self._exec_tcpdump(tcpdump_filter) num_query_dns = len(to_dns) - LOGGER.info("DNS queries found: " + str(num_query_dns)) + LOGGER.info('DNS queries found: ' + str(num_query_dns)) dns_traffic_detected = len(to_dns) > 0 - LOGGER.info("DNS traffic detected: " + str(dns_traffic_detected)) + LOGGER.info('DNS traffic detected: ' + str(dns_traffic_detected)) return dns_traffic_detected def _dns_network_from_dhcp(self): - LOGGER.info("Checking DNS traffic for configured DHCP DNS server: " + + LOGGER.info('Checking DNS traffic for configured DHCP DNS server: ' + self._dns_server) # Check if the device DNS traffic is to appropriate server - tcpdump_filter = "dst port 53 and dst host {} and ether src {}".format( - self._dns_server, self._device_mac) + tcpdump_filter = (f'dst port 53 and dst host {self._dns_server}', + f' and ether src {self._device_mac}') result = self._check_dns_traffic(tcpdump_filter=tcpdump_filter) - LOGGER.info("DNS traffic detected to configured DHCP DNS server: " + + LOGGER.info('DNS traffic detected to configured DHCP DNS server: ' + str(result)) return result def _dns_network_from_device(self): - LOGGER.info("Checking DNS traffic from device: " + self._device_mac) + LOGGER.info('Checking DNS traffic from device: ' + self._device_mac) # Check if the device DNS traffic is to appropriate server - tcpdump_filter = "dst port 53 and ether src {}".format(self._device_mac) + tcpdump_filter = f'dst port 53 and ether src {self._device_mac}' result = self._check_dns_traffic(tcpdump_filter=tcpdump_filter) - LOGGER.info("DNS traffic detected from device: " + str(result)) + LOGGER.info('DNS traffic detected from device: ' + str(result)) return result def _exec_tcpdump(self, tcpdump_filter): @@ -57,9 +57,9 @@ def _exec_tcpdump(self, tcpdump_filter): Returns List of packets matching the filter """ - command = "tcpdump -tttt -n -r {} {}".format(CAPTURE_FILE, tcpdump_filter) + command = f'tcpdump -tttt -n -r {CAPTURE_FILE} {tcpdump_filter}' - LOGGER.debug("tcpdump command: " + command) + LOGGER.debug('tcpdump command: ' + command) process = subprocess.Popen(command, universal_newlines=True, @@ -68,9 +68,9 @@ def _exec_tcpdump(self, tcpdump_filter): stderr=subprocess.PIPE) text = str(process.stdout.read()).rstrip() - LOGGER.debug("tcpdump response: " + text) + LOGGER.debug('tcpdump response: ' + text) if text: - return text.split("\n") + return text.split('\n') return [] diff --git a/test_orc/modules/dns/python/src/run.py b/test_orc/modules/dns/python/src/run.py index 06b8aa571..4cd991804 100644 --- a/test_orc/modules/dns/python/src/run.py +++ b/test_orc/modules/dns/python/src/run.py @@ -1,5 +1,4 @@ -#!/usr/bin/env python3 - +"""Run DNS test module""" import argparse import signal import sys @@ -13,7 +12,7 @@ class DNSModuleRunner: - + """Run the DNS module tests.""" def __init__(self, module): signal.signal(signal.SIGINT, self._handler) @@ -33,7 +32,7 @@ def add_logger(self, module): global LOGGER LOGGER = logger.get_logger(LOG_NAME, module) - def _handler(self, signum, *other): + def _handler(self, signum): LOGGER.debug("SigtermEnum: " + str(signal.SIGTERM)) LOGGER.debug("Exit signal received: " + str(signum)) if signum in (2, signal.SIGTERM): @@ -42,7 +41,7 @@ def _handler(self, signum, *other): sys.exit(1) -def run(argv): +def run(): parser = argparse.ArgumentParser( description="Test Module DNS", formatter_class=argparse.ArgumentDefaultsHelpFormatter) @@ -60,4 +59,4 @@ def run(argv): if __name__ == "__main__": - run(sys.argv) + run() diff --git a/test_orc/modules/nmap/python/src/nmap_module.py b/test_orc/modules/nmap/python/src/nmap_module.py index cd6ec276b..876343a0f 100644 --- a/test_orc/modules/nmap/python/src/nmap_module.py +++ b/test_orc/modules/nmap/python/src/nmap_module.py @@ -1,5 +1,4 @@ -#!/usr/bin/env python3 - +"""NMAP test module""" import time import util import json @@ -11,7 +10,7 @@ class NmapModule(TestModule): - + """NMAP Test module""" def __init__(self, module): super().__init__(module_name=module, log_name=LOG_NAME) self._unallowed_ports = [] @@ -82,13 +81,13 @@ def _check_scan_results(self, test_config): if self._script_scan_results is not None: scan_results.update(self._script_scan_results) if port_config is not None: - for port in port_config: + for port, config in port_config.items(): result = None LOGGER.info("Checking port: " + str(port)) - LOGGER.debug("Port config: " + str(port_config[port])) + LOGGER.debug("Port config: " + str(config)) if port in scan_results: if scan_results[port]["state"] == "open": - if not port_config[port]["allowed"]: + if not config["allowed"]: LOGGER.info("Unallowed port open") self._unallowed_ports.append(str(port)) result = False @@ -103,10 +102,9 @@ def _check_scan_results(self, test_config): result = True if result is not None: - port_config[port][ - "result"] = "compliant" if result else "non-compliant" + config["result"] = "compliant" if result else "non-compliant" else: - port_config[port]["result"] = "skipped" + config["result"] = "skipped" def _scan_scripts(self, tests): scan_results = {} @@ -174,7 +172,7 @@ def _scan_tcp_ports(self, tests): ports_to_scan += "," + ",".join(ports) LOGGER.info("Running nmap TCP port scan") LOGGER.info("TCP ports: " + str(ports_to_scan)) - nmap_results = util.run_command(f"""nmap -sT -sV -Pn -v -p {ports_to_scan} + nmap_results = util.run_command(f"""nmap -sT -sV -Pn -v -p {ports_to_scan} --version-intensity 7 -T4 {self._device_ipv4_addr}""")[0] LOGGER.info("TCP port scan complete") self._scan_tcp_results = self._process_nmap_results( diff --git a/test_orc/modules/nmap/python/src/run.py b/test_orc/modules/nmap/python/src/run.py index 4ed1f533c..959e30f87 100644 --- a/test_orc/modules/nmap/python/src/run.py +++ b/test_orc/modules/nmap/python/src/run.py @@ -1,5 +1,4 @@ -#!/usr/bin/env python3 - +"""Run NMAP test module""" import argparse import signal import sys @@ -20,29 +19,29 @@ def __init__(self, module): signal.signal(signal.SIGABRT, self._handler) signal.signal(signal.SIGQUIT, self._handler) - LOGGER.info("Starting nmap Module") + LOGGER.info('Starting nmap Module') self._test_module = NmapModule(module) self._test_module.run_tests() - def _handler(self, signum, *other): - LOGGER.debug("SigtermEnum: " + str(signal.SIGTERM)) - LOGGER.debug("Exit signal received: " + str(signum)) + def _handler(self, signum): + LOGGER.debug('SigtermEnum: ' + str(signal.SIGTERM)) + LOGGER.debug('Exit signal received: ' + str(signum)) if signum in (2, signal.SIGTERM): - LOGGER.info("Exit signal received. Stopping test module...") - LOGGER.info("Test module stopped") + LOGGER.info('Exit signal received. Stopping test module...') + LOGGER.info('Test module stopped') sys.exit(1) -def run(argv): +def run(): parser = argparse.ArgumentParser( - description="Nmap Module Help", + description='Nmap Module Help', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( - "-m", - "--module", - help="Define the module name to be used to create the log file") + '-m', + '--module', + help='Define the module name to be used to create the log file') args = parser.parse_args() @@ -51,5 +50,5 @@ def run(argv): NmapModuleRunner(args.module.strip()) -if __name__ == "__main__": - run(sys.argv) +if __name__ == '__main__': + run() diff --git a/test_orc/python/src/test_orchestrator.py b/test_orc/python/src/test_orchestrator.py index 5cc14ae85..4b65bae12 100644 --- a/test_orc/python/src/test_orchestrator.py +++ b/test_orc/python/src/test_orchestrator.py @@ -67,7 +67,7 @@ def _generate_results(self, device): container_runtime_dir = os.path.join( self._root_path, "runtime/test/" + device.mac_addr.replace(":", "") + "/" + module.name) - results_file = container_runtime_dir + "/" + module.name + "-result.json" + results_file = f"{container_runtime_dir}/{module.name}-result.json" try: with open(results_file, "r", encoding="UTF-8") as f: module_results = json.load(f) diff --git a/testing/test_baseline.py b/testing/test_baseline.py index 6f6240c27..b356983dd 100644 --- a/testing/test_baseline.py +++ b/testing/test_baseline.py @@ -23,7 +23,7 @@ def validator_results(): encoding='utf-8') as f: return json.load(f) -@pytest.mark.skip(reason="requires internet") +@pytest.mark.skip(reason='requires internet') def test_internet_connectivity(container_data): assert container_data['network']['internet'] == 200 @@ -47,7 +47,7 @@ def test_dns_server_resolves(container_data): assert re.match(r'[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}', container_data['dns_response']) -@pytest.mark.skip(reason="requires internet") +@pytest.mark.skip(reason='requires internet') def test_validator_results_compliant(validator_results): results = [True if x['result'] == 'compliant' else False for x in validator_results['results']] From b84a026c4fe537fc1b1b5be1b2ccf5727236395f Mon Sep 17 00:00:00 2001 From: jhughesbiot <50999916+jhughesbiot@users.noreply.github.com> Date: Tue, 30 May 2023 15:01:16 -0700 Subject: [PATCH 23/48] Add license header (#36) * More formatting fixes * More formatting fixes * More formatting fixes * More formatting fixes * Misc pylint fixes Fix test module logger * remove unused files * more formatting * revert breaking pylint changes * more formatting * fix results file * More formatting * ovs module formatting * Add ovs control into network orchestrator * Add verification methods for the base network * Add network validation and misc logging updates * remove ovs module * add license header to all python files --------- Co-authored-by: Jacob Boddey Co-authored-by: SuperJonotron --- framework/device.py | 14 ++ framework/logger.py | 14 ++ framework/test_runner.py | 15 +- framework/testrun.py | 14 ++ net_orc/docker-compose.yml | 64 ------ .../devices/faux-dev/python/src/dhcp_check.py | 14 ++ .../devices/faux-dev/python/src/dns_check.py | 14 ++ .../faux-dev/python/src/gateway_check.py | 14 ++ .../devices/faux-dev/python/src/logger.py | 14 ++ .../devices/faux-dev/python/src/ntp_check.py | 14 ++ .../devices/faux-dev/python/src/run.py | 14 ++ .../devices/faux-dev/python/src/util.py | 14 ++ .../base/python/src/grpc/start_server.py | 14 ++ .../network/modules/base/python/src/logger.py | 14 ++ .../dhcp-1/python/src/grpc/dhcp_config.py | 14 ++ .../dhcp-1/python/src/grpc/network_service.py | 14 ++ .../dhcp-2/python/src/grpc/dhcp_config.py | 14 ++ .../dhcp-2/python/src/grpc/network_service.py | 14 ++ .../modules/ntp/python/src/ntp_server.py | 14 ++ .../modules/ovs/bin/start_network_service | 22 --- .../modules/ovs/conf/module_config.json | 24 --- net_orc/network/modules/ovs/ovs.Dockerfile | 20 -- .../modules/ovs/python/requirements.txt | 0 .../network/modules/ovs/python/src/logger.py | 16 -- .../modules/ovs/python/src/ovs_control.py | 105 ---------- net_orc/network/modules/ovs/python/src/run.py | 54 ----- .../network/modules/ovs/python/src/util.py | 23 --- .../radius/python/src/authenticator.py | 14 ++ .../template/python/src/template_main.py | 14 ++ net_orc/python/src/listener.py | 14 ++ net_orc/python/src/network_device.py | 14 ++ net_orc/python/src/network_event.py | 14 ++ net_orc/python/src/network_orchestrator.py | 109 +++++----- net_orc/python/src/network_validator.py | 14 ++ net_orc/python/src/ovs_control.py | 186 ++++++++++++++++++ net_orc/python/src/util.py | 14 ++ .../base/python/src/grpc/start_server.py | 14 ++ test_orc/modules/base/python/src/logger.py | 14 ++ .../modules/base/python/src/test_module.py | 14 ++ test_orc/modules/base/python/src/util.py | 14 ++ .../baseline/python/src/baseline_module.py | 14 ++ test_orc/modules/baseline/python/src/run.py | 14 ++ test_orc/modules/dns/python/src/dns_module.py | 14 ++ test_orc/modules/dns/python/src/run.py | 14 ++ .../modules/nmap/python/src/nmap_module.py | 14 ++ test_orc/modules/nmap/python/src/run.py | 14 ++ test_orc/python/src/module.py | 14 ++ test_orc/python/src/runner.py | 14 ++ test_orc/python/src/test_orchestrator.py | 14 ++ testing/test_baseline.py | 14 ++ 50 files changed, 784 insertions(+), 386 deletions(-) delete mode 100644 net_orc/docker-compose.yml delete mode 100644 net_orc/network/modules/ovs/bin/start_network_service delete mode 100644 net_orc/network/modules/ovs/conf/module_config.json delete mode 100644 net_orc/network/modules/ovs/ovs.Dockerfile delete mode 100644 net_orc/network/modules/ovs/python/requirements.txt delete mode 100644 net_orc/network/modules/ovs/python/src/logger.py delete mode 100644 net_orc/network/modules/ovs/python/src/ovs_control.py delete mode 100644 net_orc/network/modules/ovs/python/src/run.py delete mode 100644 net_orc/network/modules/ovs/python/src/util.py create mode 100644 net_orc/python/src/ovs_control.py diff --git a/framework/device.py b/framework/device.py index eef275d54..53263e6a6 100644 --- a/framework/device.py +++ b/framework/device.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Track device object information.""" from network_device import NetworkDevice diff --git a/framework/logger.py b/framework/logger.py index d4702cb38..cb71c9fdd 100644 --- a/framework/logger.py +++ b/framework/logger.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Manages stream and file loggers.""" import json import logging diff --git a/framework/test_runner.py b/framework/test_runner.py index 0733d4353..0ee5e8416 100644 --- a/framework/test_runner.py +++ b/framework/test_runner.py @@ -1,4 +1,17 @@ -#!/usr/bin/env python3 +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Wrapper for the TestRun that simplifies virtual testing procedure by allowing direct calling from the command line. diff --git a/framework/testrun.py b/framework/testrun.py index 94ad2ef9f..a818c9a45 100644 --- a/framework/testrun.py +++ b/framework/testrun.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """The overall control of the Test Run application. This file provides the integration between all of the diff --git a/net_orc/docker-compose.yml b/net_orc/docker-compose.yml deleted file mode 100644 index 8c50d766a..000000000 --- a/net_orc/docker-compose.yml +++ /dev/null @@ -1,64 +0,0 @@ -version: "3.7" - -services: - - base: - build: - context: network/modules/base - dockerfile: base.Dockerfile - image: test-run/base - container_name: tr-ct-base - - ovs: - depends_on: - - base - build: - context: network/modules/ovs - dockerfile: ovs.Dockerfile - image: test-run/ovs - network_mode: host - container_name: tr-ct-ovs - stdin_open: true - privileged: true - volumes: - - $PWD/network/modules/ovs/python:/ovs/python - # Mount host open vswitch socket to allow container - # access to control open vswitch on the host - - /var/run/openvswitch/db.sock:/var/run/openvswitch/db.sock - # Mount host network namespace to allow container - # access to assign proper namespaces to containers - - /var/run/netns:/var/run/netns - - netorch: - depends_on: - - base - build: - context: . - dockerfile: orchestrator.Dockerfile - image: test-run/orchestrator - network_mode: host - privileged: true - volumes: - - $PWD/cmd:/orchestrator/cmd - - $PWD/network:/orchestrator/network - - $PWD/python:/orchestrator/python - # Mount host docker socket to allow container access - # control docker containers on the host - - /var/run/docker.sock:/var/run/docker.sock - # Mount host open vswitch socket to allow container - # access to control open vswitch on the host - - /var/run/openvswitch/db.sock:/var/run/openvswitch/db.sock - # Mount host network namespace to allow container - # access to assign proper namespaces to containers - - /var/run/netns:/var/run/netns - # Mount the host process information to allow container - # access to configure docker containers and namespaces properly - - /proc:/proc - container_name: network_orchestrator - stdin_open: true - working_dir: /orchestrator - #entrypoint: ["cmd/start"] - # Give more time for stopping so when we stop the container it has - # time to stop all network services gracefuly - stop_grace_period: 60s - entrypoint: ["python3","-u","python/src/run.py"] diff --git a/net_orc/network/devices/faux-dev/python/src/dhcp_check.py b/net_orc/network/devices/faux-dev/python/src/dhcp_check.py index 82dd6e31f..565e33308 100644 --- a/net_orc/network/devices/faux-dev/python/src/dhcp_check.py +++ b/net_orc/network/devices/faux-dev/python/src/dhcp_check.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Used to check if the DHCP server is functioning as expected""" import time diff --git a/net_orc/network/devices/faux-dev/python/src/dns_check.py b/net_orc/network/devices/faux-dev/python/src/dns_check.py index 73a72e8c8..be9c58d43 100644 --- a/net_orc/network/devices/faux-dev/python/src/dns_check.py +++ b/net_orc/network/devices/faux-dev/python/src/dns_check.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Used to check if the DNS server is functioning as expected""" import logger diff --git a/net_orc/network/devices/faux-dev/python/src/gateway_check.py b/net_orc/network/devices/faux-dev/python/src/gateway_check.py index 85fe35db0..a913993fc 100644 --- a/net_orc/network/devices/faux-dev/python/src/gateway_check.py +++ b/net_orc/network/devices/faux-dev/python/src/gateway_check.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Used to check if the Gateway server is functioning as expected""" import logger diff --git a/net_orc/network/devices/faux-dev/python/src/logger.py b/net_orc/network/devices/faux-dev/python/src/logger.py index 97d7f935a..a727ad7bb 100644 --- a/net_orc/network/devices/faux-dev/python/src/logger.py +++ b/net_orc/network/devices/faux-dev/python/src/logger.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Sets up the logger to be used for the faux-device.""" import json diff --git a/net_orc/network/devices/faux-dev/python/src/ntp_check.py b/net_orc/network/devices/faux-dev/python/src/ntp_check.py index ceef164c6..371e4464c 100644 --- a/net_orc/network/devices/faux-dev/python/src/ntp_check.py +++ b/net_orc/network/devices/faux-dev/python/src/ntp_check.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Used to check if the NTP server is functioning as expected""" import time import logger diff --git a/net_orc/network/devices/faux-dev/python/src/run.py b/net_orc/network/devices/faux-dev/python/src/run.py index 062a1a643..8f9733eb4 100644 --- a/net_orc/network/devices/faux-dev/python/src/run.py +++ b/net_orc/network/devices/faux-dev/python/src/run.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Used to run all the various validator modules for the faux-device""" import argparse diff --git a/net_orc/network/devices/faux-dev/python/src/util.py b/net_orc/network/devices/faux-dev/python/src/util.py index 6848206b4..920752217 100644 --- a/net_orc/network/devices/faux-dev/python/src/util.py +++ b/net_orc/network/devices/faux-dev/python/src/util.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Provides basic utilities for the faux-device.""" import subprocess import shlex diff --git a/net_orc/network/modules/base/python/src/grpc/start_server.py b/net_orc/network/modules/base/python/src/grpc/start_server.py index b4016c831..d372949e5 100644 --- a/net_orc/network/modules/base/python/src/grpc/start_server.py +++ b/net_orc/network/modules/base/python/src/grpc/start_server.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Base class for starting the gRPC server for a network module.""" from concurrent import futures import grpc diff --git a/net_orc/network/modules/base/python/src/logger.py b/net_orc/network/modules/base/python/src/logger.py index abec00f69..8893b1e8d 100644 --- a/net_orc/network/modules/base/python/src/logger.py +++ b/net_orc/network/modules/base/python/src/logger.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Sets up the logger to be used for the network modules.""" import json import logging diff --git a/net_orc/network/modules/dhcp-1/python/src/grpc/dhcp_config.py b/net_orc/network/modules/dhcp-1/python/src/grpc/dhcp_config.py index 23e1b4047..99d6bdebd 100644 --- a/net_orc/network/modules/dhcp-1/python/src/grpc/dhcp_config.py +++ b/net_orc/network/modules/dhcp-1/python/src/grpc/dhcp_config.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Contains all the necessary classes to maintain the DHCP server's configuration""" import re diff --git a/net_orc/network/modules/dhcp-1/python/src/grpc/network_service.py b/net_orc/network/modules/dhcp-1/python/src/grpc/network_service.py index 49732b362..64aab8a07 100644 --- a/net_orc/network/modules/dhcp-1/python/src/grpc/network_service.py +++ b/net_orc/network/modules/dhcp-1/python/src/grpc/network_service.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """gRPC Network Service for the DHCP Server network module""" import proto.grpc_pb2_grpc as pb2_grpc import proto.grpc_pb2 as pb2 diff --git a/net_orc/network/modules/dhcp-2/python/src/grpc/dhcp_config.py b/net_orc/network/modules/dhcp-2/python/src/grpc/dhcp_config.py index 1d93c2d34..f6e79a2ec 100644 --- a/net_orc/network/modules/dhcp-2/python/src/grpc/dhcp_config.py +++ b/net_orc/network/modules/dhcp-2/python/src/grpc/dhcp_config.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Contains all the necessary classes to maintain the DHCP server's configuration""" import re diff --git a/net_orc/network/modules/dhcp-2/python/src/grpc/network_service.py b/net_orc/network/modules/dhcp-2/python/src/grpc/network_service.py index 49732b362..64aab8a07 100644 --- a/net_orc/network/modules/dhcp-2/python/src/grpc/network_service.py +++ b/net_orc/network/modules/dhcp-2/python/src/grpc/network_service.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """gRPC Network Service for the DHCP Server network module""" import proto.grpc_pb2_grpc as pb2_grpc import proto.grpc_pb2 as pb2 diff --git a/net_orc/network/modules/ntp/python/src/ntp_server.py b/net_orc/network/modules/ntp/python/src/ntp_server.py index 602585196..4eda2b13e 100644 --- a/net_orc/network/modules/ntp/python/src/ntp_server.py +++ b/net_orc/network/modules/ntp/python/src/ntp_server.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """NTP Server""" import datetime import socket diff --git a/net_orc/network/modules/ovs/bin/start_network_service b/net_orc/network/modules/ovs/bin/start_network_service deleted file mode 100644 index 7c38f484a..000000000 --- a/net_orc/network/modules/ovs/bin/start_network_service +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash -e - -if [[ "$EUID" -ne 0 ]]; then - echo "Must run as root." - exit 1 -fi - -asyncRun() { - "$@" & - pid="$!" - trap "echo 'Stopping PID $pid'; kill -SIGTERM $pid" SIGINT SIGTERM - - # A signal emitted while waiting will make the wait command return code > 128 - # Let's wrap it in a loop that doesn't end before the process is indeed stopped - while kill -0 $pid > /dev/null 2>&1; do - wait - done -} - -# -u flag allows python print statements -# to be logged by docker by running unbuffered -asyncRun exec python3 -u /ovs/python/src/run.py \ No newline at end of file diff --git a/net_orc/network/modules/ovs/conf/module_config.json b/net_orc/network/modules/ovs/conf/module_config.json deleted file mode 100644 index 8a440d0ae..000000000 --- a/net_orc/network/modules/ovs/conf/module_config.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "config": { - "meta": { - "name": "ovs", - "display_name": "OVS", - "description": "Setup and configure Open vSwitch" - }, - "network": { - "interface": "veth0", - "enable_wan": false, - "ip_index": 6, - "host": true - }, - "docker": { - "depends_on": "base", - "mounts": [ - { - "source": "runtime/network", - "target": "/runtime/network" - } - ] - } - } -} \ No newline at end of file diff --git a/net_orc/network/modules/ovs/ovs.Dockerfile b/net_orc/network/modules/ovs/ovs.Dockerfile deleted file mode 100644 index cd4710e66..000000000 --- a/net_orc/network/modules/ovs/ovs.Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -# Image name: test-run/orchestrator -FROM test-run/base:latest - -#Update and get all additional requirements not contained in the base image -RUN apt-get update --fix-missing - -#Install openvswitch -RUN apt-get install -y openvswitch-switch - -# Copy over all configuration files -COPY network/modules/ovs/conf /testrun/conf - -# Copy over all binary files -COPY network/modules/ovs/bin /testrun/bin - -# Copy over all python files -COPY network/modules/ovs/python /testrun/python - -#Install all python requirements for the module -RUN pip3 install -r /testrun/python/requirements.txt \ No newline at end of file diff --git a/net_orc/network/modules/ovs/python/requirements.txt b/net_orc/network/modules/ovs/python/requirements.txt deleted file mode 100644 index e69de29bb..000000000 diff --git a/net_orc/network/modules/ovs/python/src/logger.py b/net_orc/network/modules/ovs/python/src/logger.py deleted file mode 100644 index 23e697e43..000000000 --- a/net_orc/network/modules/ovs/python/src/logger.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Sets up the logger to be used for the ovs modules.""" -import logging - -LOGGERS = {} -_LOG_FORMAT = '%(asctime)s %(name)-8s %(levelname)-7s %(message)s' -_DATE_FORMAT = '%b %02d %H:%M:%S' - -# Set level to debug if set as runtime flag -logging.basicConfig(format=_LOG_FORMAT, - datefmt=_DATE_FORMAT, - level=logging.INFO) - -def get_logger(name): - if name not in LOGGERS: - LOGGERS[name] = logging.getLogger(name) - return LOGGERS[name] diff --git a/net_orc/network/modules/ovs/python/src/ovs_control.py b/net_orc/network/modules/ovs/python/src/ovs_control.py deleted file mode 100644 index 765c50f92..000000000 --- a/net_orc/network/modules/ovs/python/src/ovs_control.py +++ /dev/null @@ -1,105 +0,0 @@ -"""OVS Control Module""" -import json -import logger -import util - -CONFIG_FILE = '/ovs/conf/system.json' -DEVICE_BRIDGE = 'tr-d' -INTERNET_BRIDGE = 'tr-c' -LOGGER = logger.get_logger('ovs_ctrl') - -class OVSControl: - """OVS Control""" - def __init__(self): - self._int_intf = None - self._dev_intf = None - self._load_config() - - def add_bridge(self, bridge_name): - LOGGER.info('Adding OVS Bridge: ' + bridge_name) - # Create the bridge using ovs-vsctl commands - # Uses the --may-exist option to prevent failures - # if this bridge already exists by this name it won't fail - # and will not modify the existing bridge - success=util.run_command('ovs-vsctl --may-exist add-br ' + bridge_name) - return success - - def add_port(self,port, bridge_name): - LOGGER.info('Adding Port ' + port + ' to OVS Bridge: ' + bridge_name) - # Add a port to the bridge using ovs-vsctl commands - # Uses the --may-exist option to prevent failures - # if this port already exists on the bridge and will not - # modify the existing bridge - success=util.run_command(f"""ovs-vsctl --may-exist - add-port {bridge_name} {port}""") - return success - - def create_net(self): - LOGGER.info('Creating baseline network') - - # Create data plane - self.add_bridge(DEVICE_BRIDGE) - - # Create control plane - self.add_bridge(INTERNET_BRIDGE) - - # Remove IP from internet adapter - self.set_interface_ip(self._int_intf,'0.0.0.0') - - # Add external interfaces to data and control plane - self.add_port(self._dev_intf,DEVICE_BRIDGE) - self.add_port(self._int_intf,INTERNET_BRIDGE) - - # # Set ports up - self.set_bridge_up(DEVICE_BRIDGE) - self.set_bridge_up(INTERNET_BRIDGE) - - def delete_bridge(self,bridge_name): - LOGGER.info('Deleting OVS Bridge: ' + bridge_name) - # Delete the bridge using ovs-vsctl commands - # Uses the --if-exists option to prevent failures - # if this bridge does not exists - success=util.run_command('ovs-vsctl --if-exists del-br ' + bridge_name) - return success - - def _load_config(self): - LOGGER.info('Loading Configuration: ' + CONFIG_FILE) - with open(CONFIG_FILE, 'r', encoding='utf-8') as conf_file: - config_json = json.load(conf_file) - self._int_intf = config_json['internet_intf'] - self._dev_intf = config_json['device_intf'] - LOGGER.info('Configuration Loaded') - LOGGER.info('Internet Interface: ' + self._int_intf) - LOGGER.info('Device Interface: ' + self._dev_intf) - - def restore_net(self): - LOGGER.info('Restoring Network...') - # Delete data plane - self.delete_bridge(DEVICE_BRIDGE) - - # Delete control plane - self.delete_bridge(INTERNET_BRIDGE) - - LOGGER.info('Network is restored') - - def show_config(self): - LOGGER.info('Show current config of OVS') - success=util.run_command('ovs-vsctl show') - return success - - def set_bridge_up(self,bridge_name): - LOGGER.info('Setting Bridge device to up state: ' + bridge_name) - success=util.run_command('ip link set dev ' + bridge_name + ' up') - return success - - def set_interface_ip(self,interface, ip_addr): - LOGGER.info('Setting interface ' + interface + ' to ' + ip_addr) - # Remove IP from internet adapter - util.run_command('ifconfig ' + interface + ' 0.0.0.0') - -if __name__ == '__main__': - ovs = OVSControl() - ovs.create_net() - ovs.show_config() - ovs.restore_net() - ovs.show_config() diff --git a/net_orc/network/modules/ovs/python/src/run.py b/net_orc/network/modules/ovs/python/src/run.py deleted file mode 100644 index 5787a74e6..000000000 --- a/net_orc/network/modules/ovs/python/src/run.py +++ /dev/null @@ -1,54 +0,0 @@ -"""Run OVS module""" -import logger -import signal -import sys -import time - -from ovs_control import OVSControl - -LOGGER = logger.get_logger('ovs_control_run') - -class OVSControlRun: - """Run the OVS module.""" - def __init__(self): - - signal.signal(signal.SIGINT, self.handler) - signal.signal(signal.SIGTERM, self.handler) - signal.signal(signal.SIGABRT, self.handler) - signal.signal(signal.SIGQUIT, self.handler) - - LOGGER.info('Starting OVS Control') - - # Get all components ready - self._ovs_control = OVSControl() - - self._ovs_control.restore_net() - - self._ovs_control.create_net() - - self._ovs_control.show_config() - - # Get network ready (via Network orchestrator) - LOGGER.info('Network is ready. Waiting for device information...') - - #Loop forever until process is stopped - while True: - LOGGER.info('OVS Running') - time.sleep(1000) - - # TODO: This time should be configurable (How long to hold before exiting, - # this could be infinite too) - #time.sleep(300) - - # Tear down network - #self._ovs_control.shutdown() - - def handler(self, signum): - LOGGER.info('SigtermEnum: ' + str(signal.SIGTERM)) - LOGGER.info('Exit signal received: ' + str(signum)) - if (signum == 2 or signal == signal.SIGTERM): - LOGGER.info('Exit signal received. Restoring network...') - self._ovs_control.shutdown() - sys.exit(1) - -ovs = OVSControlRun() diff --git a/net_orc/network/modules/ovs/python/src/util.py b/net_orc/network/modules/ovs/python/src/util.py deleted file mode 100644 index a3ebbb10a..000000000 --- a/net_orc/network/modules/ovs/python/src/util.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Provides basic utilities for a ovs module.""" -import subprocess -import logger - -LOGGER = logger.get_logger('util') - -def run_command(cmd): - success = False - process = subprocess.Popen(cmd.split(), - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - stdout, stderr = process.communicate() - if process.returncode != 0: - err_msg = f'{stderr.strip()}. Code: {process.returncode}' - LOGGER.error('Command Failed: ' + cmd) - LOGGER.error('Error: ' + err_msg) - else: - msg = stdout.strip().decode('utf-8') - succ_msg = f'{msg}. Code: {process.returncode}' - LOGGER.info('Command Success: ' + cmd) - LOGGER.info('Success: ' + succ_msg) - success = True - return success diff --git a/net_orc/network/modules/radius/python/src/authenticator.py b/net_orc/network/modules/radius/python/src/authenticator.py index 32f4ac221..0cca1921a 100644 --- a/net_orc/network/modules/radius/python/src/authenticator.py +++ b/net_orc/network/modules/radius/python/src/authenticator.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Authenticator for the RADIUS Server""" from chewie.chewie import Chewie import logging diff --git a/net_orc/network/modules/template/python/src/template_main.py b/net_orc/network/modules/template/python/src/template_main.py index df2452550..ddf83e2c4 100644 --- a/net_orc/network/modules/template/python/src/template_main.py +++ b/net_orc/network/modules/template/python/src/template_main.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Python code for the template module.""" if __name__ == "__main__": diff --git a/net_orc/python/src/listener.py b/net_orc/python/src/listener.py index de7a07616..0bbd2b1c9 100644 --- a/net_orc/python/src/listener.py +++ b/net_orc/python/src/listener.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Intercepts network traffic between network services and the device under test.""" import threading diff --git a/net_orc/python/src/network_device.py b/net_orc/python/src/network_device.py index 1b856da16..f17ac0f0d 100644 --- a/net_orc/python/src/network_device.py +++ b/net_orc/python/src/network_device.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Track device object information.""" from dataclasses import dataclass diff --git a/net_orc/python/src/network_event.py b/net_orc/python/src/network_event.py index f56adf494..204c97a0a 100644 --- a/net_orc/python/src/network_event.py +++ b/net_orc/python/src/network_event.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Specify the various types of network events to be reported.""" from enum import Enum diff --git a/net_orc/python/src/network_orchestrator.py b/net_orc/python/src/network_orchestrator.py index 53a94b795..ba16b6a9c 100644 --- a/net_orc/python/src/network_orchestrator.py +++ b/net_orc/python/src/network_orchestrator.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Network orchestrator is responsible for managing all of the virtual network services""" import getpass @@ -18,6 +32,7 @@ from network_device import NetworkDevice from network_event import NetworkEvent from network_validator import NetworkValidator +from ovs_control import OVSControl LOGGER = logger.get_logger('net_orc') CONFIG_FILE = 'conf/system.json' @@ -73,6 +88,7 @@ def __init__(self, shutil.rmtree(os.path.join(os.getcwd(), NET_DIR), ignore_errors=True) self.network_config = NetworkConfig() self.load_config(config_file) + self._ovs = OVSControl() def start(self): """Start the network orchestrator.""" @@ -142,7 +158,7 @@ def load_config(self, config_file=None): if not os.path.isfile(self._config_file): LOGGER.error('Configuration file is not present at ' + config_file) - LOGGER.info('An example is present in '+ EXAMPLE_CONFIG_FILE) + LOGGER.info('An example is present in ' + EXAMPLE_CONFIG_FILE) sys.exit(1) LOGGER.info('Loading config file: ' + os.path.abspath(self._config_file)) @@ -153,7 +169,7 @@ def load_config(self, config_file=None): def _device_discovered(self, mac_addr): LOGGER.debug( - f'Discovered device {mac_addr}. Waiting for device to obtain IP') + f'Discovered device {mac_addr}. Waiting for device to obtain IP') device = self._get_device(mac_addr=mac_addr) os.makedirs( os.path.join(RUNTIME_DIR, DEVICES_DIR, device.mac_addr.replace(':', @@ -169,12 +185,12 @@ def _device_discovered(self, mac_addr): if device.ip_addr is None: LOGGER.info( - f'Timed out whilst waiting for {mac_addr} to obtain an IP address') + f'Timed out whilst waiting for {mac_addr} to obtain an IP address') return LOGGER.info( - f'Device with mac addr {device.mac_addr} has obtained IP address ' - f'{device.ip_addr}') + f'Device with mac addr {device.mac_addr} has obtained IP address ' + f'{device.ip_addr}') self._start_device_monitor(device) @@ -186,9 +202,8 @@ def _dhcp_lease_ack(self, packet): def _start_device_monitor(self, device): """Start a timer until the steady state has been reached and callback the steady state method for this device.""" - LOGGER.info( - f'Monitoring device with mac addr {device.mac_addr} ' - f'for {str(self._monitor_period)} seconds') + LOGGER.info(f'Monitoring device with mac addr {device.mac_addr} ' + f'for {str(self._monitor_period)} seconds') packet_capture = sniff(iface=self._dev_intf, timeout=self._monitor_period) wrpcap( @@ -293,9 +308,8 @@ def _ci_post_network_create(self): util.run_command(f'ip link set dev {INTERNET_BRIDGE} up') util.run_command(f'dhclient {INTERNET_BRIDGE}') util.run_command('ip route del default via 10.1.0.1') - util.run_command( - f'ip route add default via {self._gateway} ' - f'src {self._ipv4[:-3]} metric 100 dev {INTERNET_BRIDGE}') + util.run_command(f'ip route add default via {self._gateway} ' + f'src {self._ipv4[:-3]} metric 100 dev {INTERNET_BRIDGE}') def create_net(self): LOGGER.info('Creating baseline network') @@ -309,28 +323,14 @@ def create_net(self): if self._single_intf: self._ci_pre_network_create() - # Create data plane - util.run_command('ovs-vsctl add-br ' + DEVICE_BRIDGE) - - # Create control plane - util.run_command('ovs-vsctl add-br ' + INTERNET_BRIDGE) - - # Add external interfaces to data and control plane - util.run_command('ovs-vsctl add-port ' + DEVICE_BRIDGE + ' ' + - self._dev_intf) - util.run_command('ovs-vsctl add-port ' + INTERNET_BRIDGE + ' ' + - self._int_intf) - - # Enable forwarding of eapol packets - util.run_command('ovs-ofctl add-flow ' + DEVICE_BRIDGE + - ' \'table=0, dl_dst=01:80:c2:00:00:03, actions=flood\'') - # Remove IP from internet adapter util.run_command('ifconfig ' + self._int_intf + ' 0.0.0.0') - # Set ports up - util.run_command('ip link set dev ' + DEVICE_BRIDGE + ' up') - util.run_command('ip link set dev ' + INTERNET_BRIDGE + ' up') + # Setup the virtual network + if not self._ovs.create_baseline_net(verify=True): + LOGGER.error('Baseline network validation failed.') + self.stop() + sys.exit(1) if self._single_intf: self._ci_post_network_create() @@ -367,8 +367,9 @@ def _load_network_module(self, module_dir): # Load module information with open(os.path.join(self._path, net_modules_dir, module_dir, - NETWORK_MODULE_METADATA), 'r', - encoding='UTF-8') as module_file_open: + NETWORK_MODULE_METADATA), + 'r', + encoding='UTF-8') as module_file_open: net_module_json = json.load(module_file_open) net_module.name = net_module_json['config']['meta']['name'] @@ -443,13 +444,6 @@ def _get_network_module(self, name): return net_module return None - # Start the OVS network module - # This should always be called before loading all - # other modules to allow for a properly setup base - # network - def _start_ovs_module(self): - self._start_network_service(self._get_network_module('OVS')) - def _start_network_service(self, net_module): LOGGER.debug('Starting net service ' + net_module.display_name) @@ -521,16 +515,12 @@ def start_network_services(self): for net_module in self._net_modules: - # TODO: There should be a better way of doing this - # Do not try starting OVS module again, as it should already be running - if 'OVS' != net_module.display_name: - - # Network modules may just be Docker images, - # so we do not want to start them as containers - if not net_module.enable_container: - continue + # Network modules may just be Docker images, + # so we do not want to start them as containers + if not net_module.enable_container: + continue - self._start_network_service(net_module) + self._start_network_service(net_module) LOGGER.info('All network services are running') self._check_network_services() @@ -555,7 +545,7 @@ def attach_test_module_to_network(self, test_module): container_intf) # Add bridge interface to device bridge - util.run_command('ovs-vsctl add-port ' + DEVICE_BRIDGE + ' ' + bridge_intf) + self._ovs.add_port(port=bridge_intf, bridge_name=DEVICE_BRIDGE) # Get PID for running container # TODO: Some error checking around missing PIDs might be required @@ -620,7 +610,11 @@ def _attach_service_to_network(self, net_module): container_intf) # Add bridge interface to device bridge - util.run_command('ovs-vsctl add-port ' + DEVICE_BRIDGE + ' ' + bridge_intf) + if self._ovs.add_port(port=bridge_intf, bridge_name=DEVICE_BRIDGE): + if not self._ovs.port_exists(bridge_name=DEVICE_BRIDGE, port=bridge_intf): + LOGGER.error('Failed to add ' + net_module.name + ' to device bridge ' + + DEVICE_BRIDGE + '. Exiting.') + sys.exit(1) # Get PID for running container # TODO: Some error checking around missing PIDs might be required @@ -675,8 +669,12 @@ def _attach_service_to_network(self, net_module): container_intf) # Attach bridge interface to internet bridge - util.run_command('ovs-vsctl add-port ' + INTERNET_BRIDGE + ' ' + - bridge_intf) + if self._ovs.add_port(port=bridge_intf, bridge_name=INTERNET_BRIDGE): + if not self._ovs.port_exists(bridge_name=INTERNET_BRIDGE, + port=bridge_intf): + LOGGER.error('Failed to add ' + net_module.name + + ' to internet bridge ' + DEVICE_BRIDGE + '. Exiting.') + sys.exit(1) # Attach container interface to container network namespace util.run_command('ip link set ' + container_intf + ' netns ' + @@ -714,11 +712,8 @@ def restore_net(self): except Exception: # pylint: disable=W0703 continue - # Delete data plane - util.run_command('ovs-vsctl --if-exists del-br tr-d') - - # Delete control plane - util.run_command('ovs-vsctl --if-exists del-br tr-c') + # Clear the virtual network + self._ovs.restore_net() # Restart internet interface if util.interface_exists(self._int_intf): diff --git a/net_orc/python/src/network_validator.py b/net_orc/python/src/network_validator.py index 83ca6f671..a90096f7d 100644 --- a/net_orc/python/src/network_validator.py +++ b/net_orc/python/src/network_validator.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Holds logic for validation of network services prior to runtime.""" import json import os diff --git a/net_orc/python/src/ovs_control.py b/net_orc/python/src/ovs_control.py new file mode 100644 index 000000000..4c989756b --- /dev/null +++ b/net_orc/python/src/ovs_control.py @@ -0,0 +1,186 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""OVS Control Module""" +import json +import logger +import util +import os + +CONFIG_FILE = 'conf/system.json' +DEVICE_BRIDGE = 'tr-d' +INTERNET_BRIDGE = 'tr-c' +LOGGER = logger.get_logger('ovs_ctrl') + + +class OVSControl: + """OVS Control""" + + def __init__(self): + self._int_intf = None + self._dev_intf = None + self._load_config() + + def add_bridge(self, bridge_name): + LOGGER.debug('Adding OVS bridge: ' + bridge_name) + # Create the bridge using ovs-vsctl commands + # Uses the --may-exist option to prevent failures + # if this bridge already exists by this name it won't fail + # and will not modify the existing bridge + success = util.run_command('ovs-vsctl --may-exist add-br ' + bridge_name) + return success + + def add_flow(self, bridge_name, flow): + # Add a flow to the bridge using ovs-ofctl commands + LOGGER.debug(f'Adding flow {flow} to bridge: {bridge_name}') + success = util.run_command(f'ovs-ofctl add-flow {bridge_name} \'{flow}\'') + return success + + def add_port(self, port, bridge_name): + LOGGER.debug('Adding port ' + port + ' to OVS bridge: ' + bridge_name) + # Add a port to the bridge using ovs-vsctl commands + # Uses the --may-exist option to prevent failures + # if this port already exists on the bridge and will not + # modify the existing bridge + success = util.run_command(f"""ovs-vsctl --may-exist + add-port {bridge_name} {port}""") + return success + + def get_bridge_ports(self, bridge_name): + # Get a list of all the ports on a bridge + response = util.run_command(f'ovs-vsctl list-ports {bridge_name}', + output=True) + return response[0].splitlines() + + def bridge_exists(self, bridge_name): + # Check if a bridge exists by the name provided + LOGGER.debug(f'Checking if {bridge_name} exists') + success = util.run_command(f'ovs-vsctl br-exists {bridge_name}') + return success + + def port_exists(self, bridge_name, port): + # Check if a port exists on a specified bridge + LOGGER.debug(f'Checking if {bridge_name} exists') + resp = util.run_command(f'ovs-vsctl port-to-br {port}', True) + return resp[0] == bridge_name + + def validate_baseline_network(self): + # Verify the OVS setup of the virtual network + LOGGER.info('Validating baseline network') + + # Verify the device bridge + dev_bridge = self.verify_bridge(DEVICE_BRIDGE, [self._dev_intf]) + LOGGER.info('Device bridge verified: ' + str(dev_bridge)) + + # Verify the internet bridge + int_bridge = self.verify_bridge(INTERNET_BRIDGE, [self._int_intf]) + LOGGER.info('Internet bridge verified: ' + str(int_bridge)) + + return dev_bridge and int_bridge + + def verify_bridge(self, bridge_name, ports): + LOGGER.debug('Verifying bridge: ' + bridge_name) + verified = True + if self.bridge_exists(bridge_name): + bridge_ports = self.get_bridge_ports(bridge_name) + LOGGER.debug('Checking bridge for ports: ' + str(ports)) + for port in ports: + if port not in bridge_ports: + verified = False + break + else: + verified = False + return verified + + def create_baseline_net(self, verify=True): + LOGGER.debug('Creating baseline network') + + # Remove IP from internet adapter + self.set_interface_ip(interface=self._int_intf, ip_addr='0.0.0.0') + + # Create data plane + self.add_bridge(DEVICE_BRIDGE) + + # Create control plane + self.add_bridge(INTERNET_BRIDGE) + + # Remove IP from internet adapter + self.set_interface_ip(self._int_intf, '0.0.0.0') + + # Add external interfaces to data and control plane + self.add_port(self._dev_intf, DEVICE_BRIDGE) + self.add_port(self._int_intf, INTERNET_BRIDGE) + + # Enable forwarding of eapol packets + self.add_flow(bridge_name=DEVICE_BRIDGE, + flow='table=0, dl_dst=01:80:c2:00:00:03, actions=flood') + + # Set ports up + self.set_bridge_up(DEVICE_BRIDGE) + self.set_bridge_up(INTERNET_BRIDGE) + + self.show_config() + + if verify: + return self.validate_baseline_network() + else: + return None + + def delete_bridge(self, bridge_name): + LOGGER.debug('Deleting OVS Bridge: ' + bridge_name) + # Delete the bridge using ovs-vsctl commands + # Uses the --if-exists option to prevent failures + # if this bridge does not exists + success = util.run_command('ovs-vsctl --if-exists del-br ' + bridge_name) + return success + + def _load_config(self): + path = os.path.dirname( + os.path.dirname( + os.path.dirname(os.path.dirname(os.path.realpath(__file__))))) + config_file = os.path.join(path, CONFIG_FILE) + LOGGER.debug('Loading configuration: ' + config_file) + with open(config_file, 'r', encoding='utf-8') as conf_file: + config_json = json.load(conf_file) + self._int_intf = config_json['network']['internet_intf'] + self._dev_intf = config_json['network']['device_intf'] + LOGGER.debug('Configuration loaded') + LOGGER.debug('Internet interface: ' + self._int_intf) + LOGGER.debug('Device interface: ' + self._dev_intf) + + def restore_net(self): + LOGGER.debug('Restoring network...') + # Delete data plane + self.delete_bridge(DEVICE_BRIDGE) + + # Delete control plane + self.delete_bridge(INTERNET_BRIDGE) + + LOGGER.debug('Network is restored') + + def show_config(self): + LOGGER.debug('Show current config of OVS') + success = util.run_command('ovs-vsctl show', output=True) + LOGGER.debug(f'OVS Config\n{success[0]}') + return success + + def set_bridge_up(self, bridge_name): + LOGGER.debug('Setting bridge device to up state: ' + bridge_name) + success = util.run_command('ip link set dev ' + bridge_name + ' up') + return success + + def set_interface_ip(self, interface, ip_addr): + LOGGER.debug('Setting interface ' + interface + ' to ' + ip_addr) + # Remove IP from internet adapter + util.run_command(f'ifconfig {interface} {ip_addr}') diff --git a/net_orc/python/src/util.py b/net_orc/python/src/util.py index a7b07ddf9..ba9527996 100644 --- a/net_orc/python/src/util.py +++ b/net_orc/python/src/util.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Provides basic utilities for the network orchestrator.""" import subprocess import shlex diff --git a/test_orc/modules/base/python/src/grpc/start_server.py b/test_orc/modules/base/python/src/grpc/start_server.py index b4016c831..d372949e5 100644 --- a/test_orc/modules/base/python/src/grpc/start_server.py +++ b/test_orc/modules/base/python/src/grpc/start_server.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Base class for starting the gRPC server for a network module.""" from concurrent import futures import grpc diff --git a/test_orc/modules/base/python/src/logger.py b/test_orc/modules/base/python/src/logger.py index 42124beea..64594c7b3 100644 --- a/test_orc/modules/base/python/src/logger.py +++ b/test_orc/modules/base/python/src/logger.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Sets up the logger to be used for the test modules.""" import json import logging diff --git a/test_orc/modules/base/python/src/test_module.py b/test_orc/modules/base/python/src/test_module.py index 8e10a3637..57795a182 100644 --- a/test_orc/modules/base/python/src/test_module.py +++ b/test_orc/modules/base/python/src/test_module.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Base class for all core test module functions""" import json import logger diff --git a/test_orc/modules/base/python/src/util.py b/test_orc/modules/base/python/src/util.py index d387db796..0f54c4298 100644 --- a/test_orc/modules/base/python/src/util.py +++ b/test_orc/modules/base/python/src/util.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Provides basic utilities for a test module.""" import subprocess import shlex diff --git a/test_orc/modules/baseline/python/src/baseline_module.py b/test_orc/modules/baseline/python/src/baseline_module.py index 083123436..22555d369 100644 --- a/test_orc/modules/baseline/python/src/baseline_module.py +++ b/test_orc/modules/baseline/python/src/baseline_module.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Baseline test module""" from test_module import TestModule diff --git a/test_orc/modules/baseline/python/src/run.py b/test_orc/modules/baseline/python/src/run.py index 1892ed8ae..42eccbef4 100644 --- a/test_orc/modules/baseline/python/src/run.py +++ b/test_orc/modules/baseline/python/src/run.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Run Baseline module""" import argparse import signal diff --git a/test_orc/modules/dns/python/src/dns_module.py b/test_orc/modules/dns/python/src/dns_module.py index 58ce48123..cd7261da0 100644 --- a/test_orc/modules/dns/python/src/dns_module.py +++ b/test_orc/modules/dns/python/src/dns_module.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """DNS test module""" import subprocess from test_module import TestModule diff --git a/test_orc/modules/dns/python/src/run.py b/test_orc/modules/dns/python/src/run.py index 4cd991804..2b924bbaf 100644 --- a/test_orc/modules/dns/python/src/run.py +++ b/test_orc/modules/dns/python/src/run.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Run DNS test module""" import argparse import signal diff --git a/test_orc/modules/nmap/python/src/nmap_module.py b/test_orc/modules/nmap/python/src/nmap_module.py index 876343a0f..028471bb9 100644 --- a/test_orc/modules/nmap/python/src/nmap_module.py +++ b/test_orc/modules/nmap/python/src/nmap_module.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """NMAP test module""" import time import util diff --git a/test_orc/modules/nmap/python/src/run.py b/test_orc/modules/nmap/python/src/run.py index 959e30f87..ecb6cd028 100644 --- a/test_orc/modules/nmap/python/src/run.py +++ b/test_orc/modules/nmap/python/src/run.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Run NMAP test module""" import argparse import signal diff --git a/test_orc/python/src/module.py b/test_orc/python/src/module.py index 72791f86e..185940dd8 100644 --- a/test_orc/python/src/module.py +++ b/test_orc/python/src/module.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Represemts a test module.""" from dataclasses import dataclass from docker.models.containers import Container diff --git a/test_orc/python/src/runner.py b/test_orc/python/src/runner.py index d82935057..363f800af 100644 --- a/test_orc/python/src/runner.py +++ b/test_orc/python/src/runner.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Provides high level management of the test orchestrator.""" import time import logger diff --git a/test_orc/python/src/test_orchestrator.py b/test_orc/python/src/test_orchestrator.py index 4b65bae12..14b39720d 100644 --- a/test_orc/python/src/test_orchestrator.py +++ b/test_orc/python/src/test_orchestrator.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Provides high level management of the test orchestrator.""" import getpass import os diff --git a/testing/test_baseline.py b/testing/test_baseline.py index b356983dd..246857581 100644 --- a/testing/test_baseline.py +++ b/testing/test_baseline.py @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import json import pytest import re From 38d71aae4895a34aa50dfc4f3afc979d3c881b04 Mon Sep 17 00:00:00 2001 From: jhughesbiot <50999916+jhughesbiot@users.noreply.github.com> Date: Wed, 31 May 2023 09:09:48 -0700 Subject: [PATCH 24/48] Ovs (#35) * More formatting fixes * More formatting fixes * More formatting fixes * More formatting fixes * Misc pylint fixes Fix test module logger * remove unused files * more formatting * revert breaking pylint changes * more formatting * fix results file * More formatting * ovs module formatting * Add ovs control into network orchestrator * Add verification methods for the base network * Add network validation and misc logging updates * remove ovs module --------- Co-authored-by: Jacob Boddey Co-authored-by: SuperJonotron From 00be9cbad0e55a38f91e7757cdac8d2c82697506 Mon Sep 17 00:00:00 2001 From: jhughesbiot Date: Wed, 31 May 2023 10:36:46 -0600 Subject: [PATCH 25/48] remove ovs files added back in during merge --- net_orc/docker-compose.yml | 64 ----------- .../modules/ovs/bin/start_network_service | 22 ---- .../modules/ovs/conf/module_config.json | 24 ---- net_orc/network/modules/ovs/ovs.Dockerfile | 20 ---- .../modules/ovs/python/requirements.txt | 0 .../network/modules/ovs/python/src/logger.py | 16 --- .../modules/ovs/python/src/ovs_control.py | 105 ------------------ net_orc/network/modules/ovs/python/src/run.py | 54 --------- .../network/modules/ovs/python/src/util.py | 23 ---- 9 files changed, 328 deletions(-) delete mode 100644 net_orc/docker-compose.yml delete mode 100644 net_orc/network/modules/ovs/bin/start_network_service delete mode 100644 net_orc/network/modules/ovs/conf/module_config.json delete mode 100644 net_orc/network/modules/ovs/ovs.Dockerfile delete mode 100644 net_orc/network/modules/ovs/python/requirements.txt delete mode 100644 net_orc/network/modules/ovs/python/src/logger.py delete mode 100644 net_orc/network/modules/ovs/python/src/ovs_control.py delete mode 100644 net_orc/network/modules/ovs/python/src/run.py delete mode 100644 net_orc/network/modules/ovs/python/src/util.py diff --git a/net_orc/docker-compose.yml b/net_orc/docker-compose.yml deleted file mode 100644 index 8c50d766a..000000000 --- a/net_orc/docker-compose.yml +++ /dev/null @@ -1,64 +0,0 @@ -version: "3.7" - -services: - - base: - build: - context: network/modules/base - dockerfile: base.Dockerfile - image: test-run/base - container_name: tr-ct-base - - ovs: - depends_on: - - base - build: - context: network/modules/ovs - dockerfile: ovs.Dockerfile - image: test-run/ovs - network_mode: host - container_name: tr-ct-ovs - stdin_open: true - privileged: true - volumes: - - $PWD/network/modules/ovs/python:/ovs/python - # Mount host open vswitch socket to allow container - # access to control open vswitch on the host - - /var/run/openvswitch/db.sock:/var/run/openvswitch/db.sock - # Mount host network namespace to allow container - # access to assign proper namespaces to containers - - /var/run/netns:/var/run/netns - - netorch: - depends_on: - - base - build: - context: . - dockerfile: orchestrator.Dockerfile - image: test-run/orchestrator - network_mode: host - privileged: true - volumes: - - $PWD/cmd:/orchestrator/cmd - - $PWD/network:/orchestrator/network - - $PWD/python:/orchestrator/python - # Mount host docker socket to allow container access - # control docker containers on the host - - /var/run/docker.sock:/var/run/docker.sock - # Mount host open vswitch socket to allow container - # access to control open vswitch on the host - - /var/run/openvswitch/db.sock:/var/run/openvswitch/db.sock - # Mount host network namespace to allow container - # access to assign proper namespaces to containers - - /var/run/netns:/var/run/netns - # Mount the host process information to allow container - # access to configure docker containers and namespaces properly - - /proc:/proc - container_name: network_orchestrator - stdin_open: true - working_dir: /orchestrator - #entrypoint: ["cmd/start"] - # Give more time for stopping so when we stop the container it has - # time to stop all network services gracefuly - stop_grace_period: 60s - entrypoint: ["python3","-u","python/src/run.py"] diff --git a/net_orc/network/modules/ovs/bin/start_network_service b/net_orc/network/modules/ovs/bin/start_network_service deleted file mode 100644 index 7c38f484a..000000000 --- a/net_orc/network/modules/ovs/bin/start_network_service +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash -e - -if [[ "$EUID" -ne 0 ]]; then - echo "Must run as root." - exit 1 -fi - -asyncRun() { - "$@" & - pid="$!" - trap "echo 'Stopping PID $pid'; kill -SIGTERM $pid" SIGINT SIGTERM - - # A signal emitted while waiting will make the wait command return code > 128 - # Let's wrap it in a loop that doesn't end before the process is indeed stopped - while kill -0 $pid > /dev/null 2>&1; do - wait - done -} - -# -u flag allows python print statements -# to be logged by docker by running unbuffered -asyncRun exec python3 -u /ovs/python/src/run.py \ No newline at end of file diff --git a/net_orc/network/modules/ovs/conf/module_config.json b/net_orc/network/modules/ovs/conf/module_config.json deleted file mode 100644 index 8a440d0ae..000000000 --- a/net_orc/network/modules/ovs/conf/module_config.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "config": { - "meta": { - "name": "ovs", - "display_name": "OVS", - "description": "Setup and configure Open vSwitch" - }, - "network": { - "interface": "veth0", - "enable_wan": false, - "ip_index": 6, - "host": true - }, - "docker": { - "depends_on": "base", - "mounts": [ - { - "source": "runtime/network", - "target": "/runtime/network" - } - ] - } - } -} \ No newline at end of file diff --git a/net_orc/network/modules/ovs/ovs.Dockerfile b/net_orc/network/modules/ovs/ovs.Dockerfile deleted file mode 100644 index cd4710e66..000000000 --- a/net_orc/network/modules/ovs/ovs.Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -# Image name: test-run/orchestrator -FROM test-run/base:latest - -#Update and get all additional requirements not contained in the base image -RUN apt-get update --fix-missing - -#Install openvswitch -RUN apt-get install -y openvswitch-switch - -# Copy over all configuration files -COPY network/modules/ovs/conf /testrun/conf - -# Copy over all binary files -COPY network/modules/ovs/bin /testrun/bin - -# Copy over all python files -COPY network/modules/ovs/python /testrun/python - -#Install all python requirements for the module -RUN pip3 install -r /testrun/python/requirements.txt \ No newline at end of file diff --git a/net_orc/network/modules/ovs/python/requirements.txt b/net_orc/network/modules/ovs/python/requirements.txt deleted file mode 100644 index e69de29bb..000000000 diff --git a/net_orc/network/modules/ovs/python/src/logger.py b/net_orc/network/modules/ovs/python/src/logger.py deleted file mode 100644 index 23e697e43..000000000 --- a/net_orc/network/modules/ovs/python/src/logger.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Sets up the logger to be used for the ovs modules.""" -import logging - -LOGGERS = {} -_LOG_FORMAT = '%(asctime)s %(name)-8s %(levelname)-7s %(message)s' -_DATE_FORMAT = '%b %02d %H:%M:%S' - -# Set level to debug if set as runtime flag -logging.basicConfig(format=_LOG_FORMAT, - datefmt=_DATE_FORMAT, - level=logging.INFO) - -def get_logger(name): - if name not in LOGGERS: - LOGGERS[name] = logging.getLogger(name) - return LOGGERS[name] diff --git a/net_orc/network/modules/ovs/python/src/ovs_control.py b/net_orc/network/modules/ovs/python/src/ovs_control.py deleted file mode 100644 index 765c50f92..000000000 --- a/net_orc/network/modules/ovs/python/src/ovs_control.py +++ /dev/null @@ -1,105 +0,0 @@ -"""OVS Control Module""" -import json -import logger -import util - -CONFIG_FILE = '/ovs/conf/system.json' -DEVICE_BRIDGE = 'tr-d' -INTERNET_BRIDGE = 'tr-c' -LOGGER = logger.get_logger('ovs_ctrl') - -class OVSControl: - """OVS Control""" - def __init__(self): - self._int_intf = None - self._dev_intf = None - self._load_config() - - def add_bridge(self, bridge_name): - LOGGER.info('Adding OVS Bridge: ' + bridge_name) - # Create the bridge using ovs-vsctl commands - # Uses the --may-exist option to prevent failures - # if this bridge already exists by this name it won't fail - # and will not modify the existing bridge - success=util.run_command('ovs-vsctl --may-exist add-br ' + bridge_name) - return success - - def add_port(self,port, bridge_name): - LOGGER.info('Adding Port ' + port + ' to OVS Bridge: ' + bridge_name) - # Add a port to the bridge using ovs-vsctl commands - # Uses the --may-exist option to prevent failures - # if this port already exists on the bridge and will not - # modify the existing bridge - success=util.run_command(f"""ovs-vsctl --may-exist - add-port {bridge_name} {port}""") - return success - - def create_net(self): - LOGGER.info('Creating baseline network') - - # Create data plane - self.add_bridge(DEVICE_BRIDGE) - - # Create control plane - self.add_bridge(INTERNET_BRIDGE) - - # Remove IP from internet adapter - self.set_interface_ip(self._int_intf,'0.0.0.0') - - # Add external interfaces to data and control plane - self.add_port(self._dev_intf,DEVICE_BRIDGE) - self.add_port(self._int_intf,INTERNET_BRIDGE) - - # # Set ports up - self.set_bridge_up(DEVICE_BRIDGE) - self.set_bridge_up(INTERNET_BRIDGE) - - def delete_bridge(self,bridge_name): - LOGGER.info('Deleting OVS Bridge: ' + bridge_name) - # Delete the bridge using ovs-vsctl commands - # Uses the --if-exists option to prevent failures - # if this bridge does not exists - success=util.run_command('ovs-vsctl --if-exists del-br ' + bridge_name) - return success - - def _load_config(self): - LOGGER.info('Loading Configuration: ' + CONFIG_FILE) - with open(CONFIG_FILE, 'r', encoding='utf-8') as conf_file: - config_json = json.load(conf_file) - self._int_intf = config_json['internet_intf'] - self._dev_intf = config_json['device_intf'] - LOGGER.info('Configuration Loaded') - LOGGER.info('Internet Interface: ' + self._int_intf) - LOGGER.info('Device Interface: ' + self._dev_intf) - - def restore_net(self): - LOGGER.info('Restoring Network...') - # Delete data plane - self.delete_bridge(DEVICE_BRIDGE) - - # Delete control plane - self.delete_bridge(INTERNET_BRIDGE) - - LOGGER.info('Network is restored') - - def show_config(self): - LOGGER.info('Show current config of OVS') - success=util.run_command('ovs-vsctl show') - return success - - def set_bridge_up(self,bridge_name): - LOGGER.info('Setting Bridge device to up state: ' + bridge_name) - success=util.run_command('ip link set dev ' + bridge_name + ' up') - return success - - def set_interface_ip(self,interface, ip_addr): - LOGGER.info('Setting interface ' + interface + ' to ' + ip_addr) - # Remove IP from internet adapter - util.run_command('ifconfig ' + interface + ' 0.0.0.0') - -if __name__ == '__main__': - ovs = OVSControl() - ovs.create_net() - ovs.show_config() - ovs.restore_net() - ovs.show_config() diff --git a/net_orc/network/modules/ovs/python/src/run.py b/net_orc/network/modules/ovs/python/src/run.py deleted file mode 100644 index 5787a74e6..000000000 --- a/net_orc/network/modules/ovs/python/src/run.py +++ /dev/null @@ -1,54 +0,0 @@ -"""Run OVS module""" -import logger -import signal -import sys -import time - -from ovs_control import OVSControl - -LOGGER = logger.get_logger('ovs_control_run') - -class OVSControlRun: - """Run the OVS module.""" - def __init__(self): - - signal.signal(signal.SIGINT, self.handler) - signal.signal(signal.SIGTERM, self.handler) - signal.signal(signal.SIGABRT, self.handler) - signal.signal(signal.SIGQUIT, self.handler) - - LOGGER.info('Starting OVS Control') - - # Get all components ready - self._ovs_control = OVSControl() - - self._ovs_control.restore_net() - - self._ovs_control.create_net() - - self._ovs_control.show_config() - - # Get network ready (via Network orchestrator) - LOGGER.info('Network is ready. Waiting for device information...') - - #Loop forever until process is stopped - while True: - LOGGER.info('OVS Running') - time.sleep(1000) - - # TODO: This time should be configurable (How long to hold before exiting, - # this could be infinite too) - #time.sleep(300) - - # Tear down network - #self._ovs_control.shutdown() - - def handler(self, signum): - LOGGER.info('SigtermEnum: ' + str(signal.SIGTERM)) - LOGGER.info('Exit signal received: ' + str(signum)) - if (signum == 2 or signal == signal.SIGTERM): - LOGGER.info('Exit signal received. Restoring network...') - self._ovs_control.shutdown() - sys.exit(1) - -ovs = OVSControlRun() diff --git a/net_orc/network/modules/ovs/python/src/util.py b/net_orc/network/modules/ovs/python/src/util.py deleted file mode 100644 index a3ebbb10a..000000000 --- a/net_orc/network/modules/ovs/python/src/util.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Provides basic utilities for a ovs module.""" -import subprocess -import logger - -LOGGER = logger.get_logger('util') - -def run_command(cmd): - success = False - process = subprocess.Popen(cmd.split(), - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - stdout, stderr = process.communicate() - if process.returncode != 0: - err_msg = f'{stderr.strip()}. Code: {process.returncode}' - LOGGER.error('Command Failed: ' + cmd) - LOGGER.error('Error: ' + err_msg) - else: - msg = stdout.strip().decode('utf-8') - succ_msg = f'{msg}. Code: {process.returncode}' - LOGGER.info('Command Success: ' + cmd) - LOGGER.info('Success: ' + succ_msg) - success = True - return success From f33123959cb4c9419c11cb941a82f209f9f3382f Mon Sep 17 00:00:00 2001 From: jhughesbiot <50999916+jhughesbiot@users.noreply.github.com> Date: Mon, 5 Jun 2023 08:34:45 -0700 Subject: [PATCH 26/48] Nmap (#38) * More formatting fixes * More formatting fixes * More formatting fixes * More formatting fixes * Misc pylint fixes Fix test module logger * remove unused files * more formatting * revert breaking pylint changes * more formatting * fix results file * More formatting * ovs module formatting * Add ovs control into network orchestrator * Add verification methods for the base network * Add network validation and misc logging updates * remove ovs module * add license header to all python files * Update tcp scans to speed up full port range scan Add version checking Implement ssh version checking * Add unknown port checks Match unknown ports to existing services Add unknown ports without existing services to results file --------- Co-authored-by: Jacob Boddey Co-authored-by: SuperJonotron --- net_orc/python/src/network_orchestrator.py | 1 + test_orc/modules/nmap/conf/module_config.json | 3 +- .../modules/nmap/python/src/nmap_module.py | 170 +++++++++++++++--- 3 files changed, 146 insertions(+), 28 deletions(-) diff --git a/net_orc/python/src/network_orchestrator.py b/net_orc/python/src/network_orchestrator.py index bb8d77f3d..2852f1565 100644 --- a/net_orc/python/src/network_orchestrator.py +++ b/net_orc/python/src/network_orchestrator.py @@ -190,6 +190,7 @@ def _device_discovered(self, mac_addr): LOGGER.info( f'Device with mac addr {device.mac_addr} has obtained IP address ' f'{device.ip_addr}') + self._start_device_monitor(device) def _dhcp_lease_ack(self, packet): diff --git a/test_orc/modules/nmap/conf/module_config.json b/test_orc/modules/nmap/conf/module_config.json index 5449327a1..aafde4c03 100644 --- a/test_orc/modules/nmap/conf/module_config.json +++ b/test_orc/modules/nmap/conf/module_config.json @@ -34,7 +34,8 @@ "tcp_ports": { "22": { "allowed": true, - "description": "Secure Shell (SSH) server" + "description": "Secure Shell (SSH) server", + "version": "2.0" } }, "description": "Check TELNET port 23 is disabled and TELNET is not running on any port", diff --git a/test_orc/modules/nmap/python/src/nmap_module.py b/test_orc/modules/nmap/python/src/nmap_module.py index 028471bb9..6b5477489 100644 --- a/test_orc/modules/nmap/python/src/nmap_module.py +++ b/test_orc/modules/nmap/python/src/nmap_module.py @@ -25,6 +25,7 @@ class NmapModule(TestModule): """NMAP Test module""" + def __init__(self, module): super().__init__(module_name=module, log_name=LOG_NAME) self._unallowed_ports = [] @@ -67,33 +68,87 @@ def _security_nmap_ports(self, config): LOGGER.debug("UDP scan results: " + str(self._scan_udp_results)) LOGGER.debug("Service scan results: " + str(self._script_scan_results)) self._process_port_results(tests=config) + LOGGER.info("Unallowed Ports Detected: " + str(self._unallowed_ports)) + self._check_unallowed_port(self._unallowed_ports,config) LOGGER.info("Unallowed Ports: " + str(self._unallowed_ports)) - LOGGER.info("Script scan results:\n" + - json.dumps(self._script_scan_results)) return len(self._unallowed_ports) == 0 else: LOGGER.info("Device ip address not resolved, skipping") return None def _process_port_results(self, tests): + scan_results = {} + if self._scan_tcp_results is not None: + scan_results.update(self._scan_tcp_results) + if self._scan_udp_results is not None: + scan_results.update(self._scan_udp_results) + if self._script_scan_results is not None: + scan_results.update(self._script_scan_results) + + self._check_unknown_ports(tests=tests,scan_results=scan_results) + for test in tests: LOGGER.info("Checking results for test: " + str(test)) - self._check_scan_results(test_config=tests[test]) + self._check_scan_results(test_config=tests[test],scan_results=scan_results) + + def _check_unknown_ports(self,tests,scan_results): + """ Check if any of the open ports detected are not defined + in the test configurations. If an open port is detected + without a configuration associated with it, the default behavior + is to mark it as an unallowed port. + """ + known_ports = [] + for test in tests: + if "tcp_ports" in tests[test]: + for port in tests[test]['tcp_ports']: + known_ports.append(port) + if "udp_ports" in tests[test]: + for port in tests[test]['udp_ports']: + known_ports.append(port) + + for port_result in scan_results: + if not port_result in known_ports: + LOGGER.info("Unknown port detected: " + port_result) + unallowed_port = {'port':port_result, + 'service':scan_results[port_result]['service'], + 'tcp_udp':scan_results[port_result]['tcp_udp']} + #self._unallowed_ports.append(unallowed_port) + self._add_unknown_ports(tests,unallowed_port) + + def _add_unknown_ports(self,tests,unallowed_port): + known_service = False + result = {'description':"Undefined port",'allowed':False} + if unallowed_port['tcp_udp'] == 'tcp': + port_style = 'tcp_ports' + elif unallowed_port['tcp_udp'] == 'udp': + port_style = 'udp_ports' + for test in tests: + if unallowed_port['service'] in test: + known_service=True + for test_port in tests[test][port_style]: + if "version" in tests[test][port_style][test_port]: + result['version'] = tests[test][port_style][test_port]['version'] + if "description" in tests[test][port_style][test_port]: + result['description'] = tests[test][port_style][test_port]['description'] + result['inherited_from'] = test_port + if tests[test][port_style][test_port]['allowed']: + result['allowed'] = True + break + + tests[test][port_style][unallowed_port['port']]=result + + if not known_service: + service_name = "security.services.unknown." + str(unallowed_port['port']) + unknown_service = {port_style:{unallowed_port['port']:result}} + tests[service_name]=unknown_service - def _check_scan_results(self, test_config): + def _check_scan_results(self, test_config,scan_results): port_config = {} if "tcp_ports" in test_config: port_config.update(test_config["tcp_ports"]) elif "udp_ports" in test_config: port_config.update(test_config["udp_ports"]) - scan_results = {} - if self._scan_tcp_results is not None: - scan_results.update(self._scan_tcp_results) - if self._scan_udp_results is not None: - scan_results.update(self._scan_udp_results) - if self._script_scan_results is not None: - scan_results.update(self._script_scan_results) if port_config is not None: for port, config in port_config.items(): result = None @@ -103,11 +158,23 @@ def _check_scan_results(self, test_config): if scan_results[port]["state"] == "open": if not config["allowed"]: LOGGER.info("Unallowed port open") - self._unallowed_ports.append(str(port)) + self._unallowed_ports.append( + {"port":str(port), + "service":str(scan_results[port]["service"]), + 'tcp_udp':scan_results[port]['tcp_udp']} + ) result = False else: LOGGER.info("Allowed port open") - result = True + if "version" in config and "version" in scan_results[port]: + version_check = self._check_version(scan_results[port]["service"], + scan_results[port]["version"],config["version"]) + if version_check is not None: + result = version_check + else: + result = True + else: + result = True else: LOGGER.info("Port is closed") result = True @@ -120,6 +187,64 @@ def _check_scan_results(self, test_config): else: config["result"] = "skipped" + def _check_unallowed_port(self,unallowed_ports,tests): + service_allowed=False + allowed = False + version = None + service = None + for port in unallowed_ports: + LOGGER.info('Checking unallowed port: ' + port['port']) + LOGGER.info('Looking for service: ' + port['service']) + LOGGER.info('Unallowed Port Config: ' + str(port)) + if port['tcp_udp'] == 'tcp': + port_style = 'tcp_ports' + elif port['tcp_udp'] == 'udp': + port_style = 'udp_ports' + for test in tests: + LOGGER.info('Checking test: ' + str(test)) + if port['service'] in test: + service_config = tests[test] + service = port['service'] + for service_port in service_config[port_style]: + port_config = service_config[port_style][service_port] + service_allowed |= port_config['allowed'] + version = port_config['version'] if 'version' in port_config else None + if service_allowed: + LOGGER.info("Unallowed port detected for allowed service: " + service) + if version is not None: + allowed = self._check_version(service=service, + version_detected=self._scan_tcp_results[port['port']]['version'], + version_expected=version) + else: + allowed = True + if allowed: + LOGGER.info("Unallowed port exception for approved service: " + port['port']) + for u_port in self._unallowed_ports: + if port['port'] in u_port['port']: + self._unallowed_ports.remove(u_port) + break + break + + def _check_version(self,service,version_detected,version_expected): + """Check if the version specified for the service matches what was + detected by nmap. Since there is no consistency in how nmap service + results are returned, each service that needs a checked must be + implemented individually. If a service version is requested + that is not implemented, this test will provide a skip (None) + result. + """ + LOGGER.info("Checking version for service: " + service) + LOGGER.info("NMAP Version Detected: " + version_detected) + LOGGER.info("Version Expected: " + version_expected) + version_check = None + match service: + case "ssh": + version_check = f"protocol {version_expected}" in version_detected + case _: + LOGGER.info("No version check implemented for service: " + service + ". Skipping") + LOGGER.info("Version check result: " + str(version_check)) + return version_check + def _scan_scripts(self, tests): scan_results = {} LOGGER.info("Checing for scan scripts") @@ -169,25 +294,15 @@ def _scan_udp_with_script(self, script_name, ports=None): nmap_results = util.run_command("nmap " + nmap_options + self._device_ipv4_addr)[0] LOGGER.info("Nmap UDP script scan complete") - LOGGER.info("nmap script results\n" + str(nmap_results)) return self._process_nmap_results(nmap_results=nmap_results) def _scan_tcp_ports(self, tests): max_port = 1000 - ports = [] - for test in tests: - test_config = tests[test] - if "tcp_ports" in test_config: - for port in test_config["tcp_ports"]: - if int(port) > max_port: - ports.append(port) - ports_to_scan = "1-" + str(max_port) - if len(ports) > 0: - ports_to_scan += "," + ",".join(ports) LOGGER.info("Running nmap TCP port scan") - LOGGER.info("TCP ports: " + str(ports_to_scan)) - nmap_results = util.run_command(f"""nmap -sT -sV -Pn -v -p {ports_to_scan} + nmap_results = util.run_command( + f"""nmap --open -sT -sV -Pn -v -p 1-{max_port} --version-intensity 7 -T4 {self._device_ipv4_addr}""")[0] + LOGGER.info("TCP port scan complete") self._scan_tcp_results = self._process_nmap_results( nmap_results=nmap_results) @@ -213,7 +328,7 @@ def _process_nmap_results(self, nmap_results): results = {} LOGGER.info("nmap results\n" + str(nmap_results)) if nmap_results: - if "Service Info" in nmap_results: + if "Service Info" in nmap_results and "MAC Address" not in nmap_results: rows = nmap_results.split("PORT")[1].split("Service Info")[0].split( "\n") elif "PORT" in nmap_results: @@ -232,6 +347,7 @@ def _process_nmap_results(self, nmap_results): version = " ".join(cols[3:]) port_result = { cols[0].split("/")[0]: { + "tcp_udp":cols[0].split("/")[1], "state": cols[1], "service": cols[2], "version": version From 2a68fba9d68afe796a076a6ec29727d2c414a12a Mon Sep 17 00:00:00 2001 From: J Boddey Date: Mon, 5 Jun 2023 18:33:28 +0100 Subject: [PATCH 27/48] Create startup capture (#37) --- framework/testrun.py | 19 +++++------ net_orc/python/src/network_orchestrator.py | 34 ++++++++++++------- net_orc/python/src/network_validator.py | 6 ++-- net_orc/python/src/ovs_control.py | 6 ++-- .../{Template => template}/device_config.json | 0 test_orc/python/src/test_orchestrator.py | 16 ++++++--- 6 files changed, 47 insertions(+), 34 deletions(-) rename resources/devices/{Template => template}/device_config.json (100%) diff --git a/framework/testrun.py b/framework/testrun.py index a818c9a45..25232f90c 100644 --- a/framework/testrun.py +++ b/framework/testrun.py @@ -48,7 +48,7 @@ LOGGER = logger.get_logger('test_run') CONFIG_FILE = 'conf/system.json' EXAMPLE_CONFIG_FILE = 'conf/system.json.example' -RUNTIME = 1500 +RUNTIME = 120 LOCAL_DEVICES_DIR = 'local/devices' RESOURCE_DEVICES_DIR = 'resources/devices' @@ -109,12 +109,17 @@ def start(self): [NetworkEvent.DEVICE_DISCOVERED] ) + self._net_orc.start_listener() LOGGER.info('Waiting for devices on the network...') - # Check timeout and whether testing is currently - # in progress before stopping time.sleep(RUNTIME) + if not self._test_orc.test_in_progress(): + LOGGER.info('Timed out whilst waiting for device') + else: + while self._test_orc.test_in_progress(): + time.sleep(5) + self.stop() def stop(self, kill=False): @@ -146,14 +151,6 @@ def _start_network(self): # Start the network orchestrator self._net_orc.start() - def _run_tests(self, device): - """Iterate through and start all test modules.""" - - # To Do: Make this configurable - time.sleep(60) # Let device bootup - - self._test_orc.run_test_modules(device) - def _stop_network(self, kill=False): self._net_orc.stop(kill=kill) diff --git a/net_orc/python/src/network_orchestrator.py b/net_orc/python/src/network_orchestrator.py index 2852f1565..726eef3b9 100644 --- a/net_orc/python/src/network_orchestrator.py +++ b/net_orc/python/src/network_orchestrator.py @@ -38,7 +38,7 @@ CONFIG_FILE = 'conf/system.json' EXAMPLE_CONFIG_FILE = 'conf/system.json.example' RUNTIME_DIR = 'runtime' -DEVICES_DIR = 'devices' +TEST_DIR = 'test' MONITOR_PCAP = 'monitor.pcap' NET_DIR = 'runtime/network' NETWORK_MODULES_DIR = 'network/modules' @@ -93,7 +93,7 @@ def __init__(self, def start(self): """Start the network orchestrator.""" - LOGGER.info('Starting Network Orchestrator') + LOGGER.debug('Starting network orchestrator') # Get all components ready self.load_network_modules() @@ -125,6 +125,9 @@ def start_network(self): # Get network ready (via Network orchestrator) LOGGER.info('Network is ready.') + def start_listener(self): + self.listener.start_listener() + def stop(self, kill=False): """Stop the network orchestrator.""" self.stop_validator(kill=kill) @@ -172,16 +175,16 @@ def _device_discovered(self, mac_addr): f'Discovered device {mac_addr}. Waiting for device to obtain IP') device = self._get_device(mac_addr=mac_addr) os.makedirs( - os.path.join(RUNTIME_DIR, DEVICES_DIR, device.mac_addr.replace(':', - ''))) - - timeout = time.time() + self._startup_timeout + os.path.join(RUNTIME_DIR, + TEST_DIR, + device.mac_addr.replace(':', ''))) - while time.time() < timeout: - if device.ip_addr is None: - time.sleep(3) - else: - break + packet_capture = sniff(iface=self._dev_intf, + timeout=self._startup_timeout, + stop_filter=self._device_has_ip) + wrpcap( + os.path.join(RUNTIME_DIR, TEST_DIR, device.mac_addr.replace(':', ''), + 'startup.pcap'), packet_capture) if device.ip_addr is None: LOGGER.info( @@ -193,6 +196,12 @@ def _device_discovered(self, mac_addr): self._start_device_monitor(device) + def _device_has_ip(self, packet): + device = self._get_device(mac_addr=packet.src) + if device is None or device.ip_addr is None: + return False + return True + def _dhcp_lease_ack(self, packet): mac_addr = packet[BOOTP].chaddr.hex(':')[0:17] device = self._get_device(mac_addr=mac_addr) @@ -206,7 +215,7 @@ def _start_device_monitor(self, device): packet_capture = sniff(iface=self._dev_intf, timeout=self._monitor_period) wrpcap( - os.path.join(RUNTIME_DIR, DEVICES_DIR, device.mac_addr.replace(':', ''), + os.path.join(RUNTIME_DIR, TEST_DIR, device.mac_addr.replace(':', ''), 'monitor.pcap'), packet_capture) self.listener.call_callback(NetworkEvent.DEVICE_STABLE, device.mac_addr) @@ -341,7 +350,6 @@ def create_net(self): [NetworkEvent.DEVICE_DISCOVERED]) self.listener.register_callback(self._dhcp_lease_ack, [NetworkEvent.DHCP_LEASE_ACK]) - self.listener.start_listener() def load_network_modules(self): """Load network modules from module_config.json.""" diff --git a/net_orc/python/src/network_validator.py b/net_orc/python/src/network_validator.py index a90096f7d..e76e49a5c 100644 --- a/net_orc/python/src/network_validator.py +++ b/net_orc/python/src/network_validator.py @@ -47,16 +47,16 @@ def __init__(self): def start(self): """Start the network validator.""" - LOGGER.info('Starting validator') + LOGGER.debug('Starting validator') self._load_devices() self._build_network_devices() self._start_network_devices() def stop(self, kill=False): """Stop the network validator.""" - LOGGER.info('Stopping validator') + LOGGER.debug('Stopping validator') self._stop_network_devices(kill) - LOGGER.info('Validator stopped') + LOGGER.debug('Validator stopped') def _build_network_devices(self): LOGGER.debug('Building network validators...') diff --git a/net_orc/python/src/ovs_control.py b/net_orc/python/src/ovs_control.py index 4c989756b..ce316dba7 100644 --- a/net_orc/python/src/ovs_control.py +++ b/net_orc/python/src/ovs_control.py @@ -77,15 +77,15 @@ def port_exists(self, bridge_name, port): def validate_baseline_network(self): # Verify the OVS setup of the virtual network - LOGGER.info('Validating baseline network') + LOGGER.debug('Validating baseline network') # Verify the device bridge dev_bridge = self.verify_bridge(DEVICE_BRIDGE, [self._dev_intf]) - LOGGER.info('Device bridge verified: ' + str(dev_bridge)) + LOGGER.debug('Device bridge verified: ' + str(dev_bridge)) # Verify the internet bridge int_bridge = self.verify_bridge(INTERNET_BRIDGE, [self._int_intf]) - LOGGER.info('Internet bridge verified: ' + str(int_bridge)) + LOGGER.debug('Internet bridge verified: ' + str(int_bridge)) return dev_bridge and int_bridge diff --git a/resources/devices/Template/device_config.json b/resources/devices/template/device_config.json similarity index 100% rename from resources/devices/Template/device_config.json rename to resources/devices/template/device_config.json diff --git a/test_orc/python/src/test_orchestrator.py b/test_orc/python/src/test_orchestrator.py index 14b39720d..08b720150 100644 --- a/test_orc/python/src/test_orchestrator.py +++ b/test_orc/python/src/test_orchestrator.py @@ -37,6 +37,7 @@ def __init__(self, net_orc): self._test_modules = [] self._module_config = None self._net_orc = net_orc + self._test_in_progress = False self._path = os.path.dirname( os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) @@ -49,7 +50,7 @@ def __init__(self, net_orc): os.makedirs(os.path.join(self._root_path, RUNTIME_DIR), exist_ok=True) def start(self): - LOGGER.info("Starting Test Orchestrator") + LOGGER.debug("Starting test orchestrator") self._load_test_modules() self.build_test_modules() @@ -59,14 +60,18 @@ def stop(self): def run_test_modules(self, device): """Iterates through each test module and starts the container.""" + self._test_in_progress = True LOGGER.info( f"Running test modules on device with mac addr {device.mac_addr}") for module in self._test_modules: self._run_test_module(module, device) LOGGER.info("All tests complete") - LOGGER.info(f"""Completed running test modules on device - with mac addr {device.mac_addr}""") + LOGGER.info( + f"""Completed running test \ +modules on device with mac \ +addr {device.mac_addr}""") self._generate_results(device) + self._test_in_progress = False def _generate_results(self, device): results = {} @@ -88,7 +93,7 @@ def _generate_results(self, device): results[module.name] = module_results except (FileNotFoundError, PermissionError, json.JSONDecodeError) as results_error: - LOGGER.error("Module Results Errror " + module.name) + LOGGER.error("Error occured whilst running module " + module.name) LOGGER.debug(results_error) out_file = os.path.join( @@ -98,6 +103,9 @@ def _generate_results(self, device): json.dump(results, f, indent=2) return results + def test_in_progress(self): + return self._test_in_progress + def _is_module_enabled(self, module, device): enabled = True if device.test_modules is not None: From 8e8e154623c4aee5286d4003ef9f08c069e113d6 Mon Sep 17 00:00:00 2001 From: jhughesbiot <50999916+jhughesbiot@users.noreply.github.com> Date: Wed, 7 Jun 2023 11:10:16 -0700 Subject: [PATCH 28/48] Connection (#40) * Initial add of connection test module with ping test * Update host user resolving * Update host user resolving for validator * add get user method to validator --- net_orc/python/src/network_orchestrator.py | 42 +++++++++++- net_orc/python/src/network_validator.py | 42 +++++++++++- test_orc/modules/conn/bin/start_test_module | 39 +++++++++++ test_orc/modules/conn/conf/module_config.json | 22 ++++++ test_orc/modules/conn/conn.Dockerfile | 11 +++ .../conn/python/src/connection_module.py | 49 +++++++++++++ test_orc/modules/conn/python/src/run.py | 68 +++++++++++++++++++ test_orc/python/src/test_orchestrator.py | 43 +++++++++++- 8 files changed, 313 insertions(+), 3 deletions(-) create mode 100644 test_orc/modules/conn/bin/start_test_module create mode 100644 test_orc/modules/conn/conf/module_config.json create mode 100644 test_orc/modules/conn/conn.Dockerfile create mode 100644 test_orc/modules/conn/python/src/connection_module.py create mode 100644 test_orc/modules/conn/python/src/run.py diff --git a/net_orc/python/src/network_orchestrator.py b/net_orc/python/src/network_orchestrator.py index 726eef3b9..77af509f2 100644 --- a/net_orc/python/src/network_orchestrator.py +++ b/net_orc/python/src/network_orchestrator.py @@ -469,7 +469,7 @@ def _start_network_service(self, net_module): privileged=True, detach=True, mounts=net_module.mounts, - environment={'HOST_USER': getpass.getuser()}) + environment={'HOST_USER': self._get_host_user()}) except docker.errors.ContainerError as error: LOGGER.error('Container run error') LOGGER.error(error) @@ -477,6 +477,46 @@ def _start_network_service(self, net_module): if network != 'host': self._attach_service_to_network(net_module) + def _get_host_user(self): + user = self._get_os_user() + + # If primary method failed, try secondary + if user is None: + user = self._get_user() + + LOGGER.debug("Network orchestrator host user: " + user) + return user + + def _get_os_user(self): + user = None + try: + user = os.getlogin() + except OSError as e: + # Handle the OSError exception + LOGGER.error("An OS error occurred while retrieving the login name.") + except Exception as e: + # Catch any other unexpected exceptions + LOGGER.error("An exception occurred:", e) + return user + + def _get_user(self): + user = None + try: + user = getpass.getuser() + except (KeyError, ImportError, ModuleNotFoundError, OSError) as e: + # Handle specific exceptions individually + if isinstance(e, KeyError): + LOGGER.error("USER environment variable not set or unavailable.") + elif isinstance(e, ImportError): + LOGGER.error("Unable to import the getpass module.") + elif isinstance(e, ModuleNotFoundError): + LOGGER.error("The getpass module was not found.") + elif isinstance(e, OSError): + LOGGER.error("An OS error occurred while retrieving the username.") + else: + LOGGER.error("An exception occurred:", e) + return user + def _stop_service_module(self, net_module, kill=False): LOGGER.debug('Stopping Service container ' + net_module.container_name) try: diff --git a/net_orc/python/src/network_validator.py b/net_orc/python/src/network_validator.py index e76e49a5c..4a3a2a080 100644 --- a/net_orc/python/src/network_validator.py +++ b/net_orc/python/src/network_validator.py @@ -150,7 +150,7 @@ def _start_network_device(self, device): privileged=True, detach=True, mounts=device.mounts, - environment={'HOST_USER': getpass.getuser()}) + environment={'HOST_USER': self._get_host_user()}) except docker.errors.ContainerError as error: LOGGER.error('Container run error') LOGGER.error(error) @@ -167,6 +167,46 @@ def _start_network_device(self, device): LOGGER.info('Validation device ' + device.name + ' has finished') + def _get_host_user(self): + user = self._get_os_user() + + # If primary method failed, try secondary + if user is None: + user = self._get_user() + + LOGGER.debug("Network validator host user: " + user) + return user + + def _get_os_user(self): + user = None + try: + user = os.getlogin() + except OSError as e: + # Handle the OSError exception + LOGGER.error("An OS error occurred while retrieving the login name.") + except Exception as e: + # Catch any other unexpected exceptions + LOGGER.error("An exception occurred:", e) + return user + + def _get_user(self): + user = None + try: + user = getpass.getuser() + except (KeyError, ImportError, ModuleNotFoundError, OSError) as e: + # Handle specific exceptions individually + if isinstance(e, KeyError): + LOGGER.error("USER environment variable not set or unavailable.") + elif isinstance(e, ImportError): + LOGGER.error("Unable to import the getpass module.") + elif isinstance(e, ModuleNotFoundError): + LOGGER.error("The getpass module was not found.") + elif isinstance(e, OSError): + LOGGER.error("An OS error occurred while retrieving the username.") + else: + LOGGER.error("An exception occurred:", e) + return user + def _get_device_status(self, module): container = self._get_device_container(module) if container is not None: diff --git a/test_orc/modules/conn/bin/start_test_module b/test_orc/modules/conn/bin/start_test_module new file mode 100644 index 000000000..4550849ce --- /dev/null +++ b/test_orc/modules/conn/bin/start_test_module @@ -0,0 +1,39 @@ +#!/bin/bash + +# Setup and start the connection test module + +# Define where the python source files are located +PYTHON_SRC_DIR=/testrun/python/src + +# Fetch module name +MODULE_NAME=$1 + +# Default interface should be veth0 for all containers +DEFAULT_IFACE=veth0 + +# Allow a user to define an interface by passing it into this script +DEFINED_IFACE=$2 + +# Select which interace to use +if [[ -z $DEFINED_IFACE || "$DEFINED_IFACE" == "null" ]] +then + echo "No interface defined, defaulting to veth0" + INTF=$DEFAULT_IFACE +else + INTF=$DEFINED_IFACE +fi + +# Create and set permissions on the log files +LOG_FILE=/runtime/output/$MODULE_NAME.log +RESULT_FILE=/runtime/output/$MODULE_NAME-result.json +touch $LOG_FILE +touch $RESULT_FILE +chown $HOST_USER:$HOST_USER $LOG_FILE +chown $HOST_USER:$HOST_USER $RESULT_FILE + +# Run the python scrip that will execute the tests for this module +# -u flag allows python print statements +# to be logged by docker by running unbuffered +python3 -u $PYTHON_SRC_DIR/run.py "-m $MODULE_NAME" + +echo Module has finished \ No newline at end of file diff --git a/test_orc/modules/conn/conf/module_config.json b/test_orc/modules/conn/conf/module_config.json new file mode 100644 index 000000000..e73846340 --- /dev/null +++ b/test_orc/modules/conn/conf/module_config.json @@ -0,0 +1,22 @@ +{ + "config": { + "meta": { + "name": "connection", + "display_name": "Connection", + "description": "Connection tests" + }, + "network": true, + "docker": { + "depends_on": "base", + "enable_container": true, + "timeout": 30 + }, + "tests":[ + { + "name": "connection.target_ping", + "description": "The device under test responds to an ICMP echo (ping) request.", + "expected_behavior": "The device under test responds to an ICMP echo (ping) request." + } + ] + } +} \ No newline at end of file diff --git a/test_orc/modules/conn/conn.Dockerfile b/test_orc/modules/conn/conn.Dockerfile new file mode 100644 index 000000000..f6a2c86b4 --- /dev/null +++ b/test_orc/modules/conn/conn.Dockerfile @@ -0,0 +1,11 @@ +# Image name: test-run/conn-test +FROM test-run/base-test:latest + +# Copy over all configuration files +COPY modules/conn/conf /testrun/conf + +# Load device binary files +COPY modules/conn/bin /testrun/bin + +# Copy over all python files +COPY modules/conn/python /testrun/python \ No newline at end of file diff --git a/test_orc/modules/conn/python/src/connection_module.py b/test_orc/modules/conn/python/src/connection_module.py new file mode 100644 index 000000000..086f32a04 --- /dev/null +++ b/test_orc/modules/conn/python/src/connection_module.py @@ -0,0 +1,49 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Connection test module""" +import util +import sys +from test_module import TestModule + +LOG_NAME = "test_connection" +LOGGER = None + + +class ConnectionModule(TestModule): + """Connection Test module""" + + def __init__(self, module): + super().__init__(module_name=module, log_name=LOG_NAME) + global LOGGER + LOGGER = self._get_logger() + + def _connection_target_ping(self): + LOGGER.info("Running connection.target_ping") + + # If the ipv4 address wasn't resolved yet, try again + if self._device_ipv4_addr is None: + self._device_ipv4_addr = self._get_device_ipv4(self) + + if self._device_ipv4_addr is None: + LOGGER.error("No device IP could be resolved") + sys.exit(1) + else: + return self._ping(self._device_ipv4_addr) + + + def _ping(self, host): + cmd = 'ping -c 1 ' + str(host) + success = util.run_command(cmd, output=False) + return success \ No newline at end of file diff --git a/test_orc/modules/conn/python/src/run.py b/test_orc/modules/conn/python/src/run.py new file mode 100644 index 000000000..5165b58c6 --- /dev/null +++ b/test_orc/modules/conn/python/src/run.py @@ -0,0 +1,68 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run NMAP test module""" +import argparse +import signal +import sys +import logger + +from connection_module import ConnectionModule + +LOGGER = logger.get_logger('connection_module') + + +class ConnectionModuleRunner: + """Run the Connection module tests.""" + + def __init__(self, module): + + signal.signal(signal.SIGINT, self._handler) + signal.signal(signal.SIGTERM, self._handler) + signal.signal(signal.SIGABRT, self._handler) + signal.signal(signal.SIGQUIT, self._handler) + + LOGGER.info('Starting connection module') + + self._test_module = ConnectionModule(module) + self._test_module.run_tests() + + def _handler(self, signum): + LOGGER.debug('SigtermEnum: ' + str(signal.SIGTERM)) + LOGGER.debug('Exit signal received: ' + str(signum)) + if signum in (2, signal.SIGTERM): + LOGGER.info('Exit signal received. Stopping connection test module...') + LOGGER.info('Test module stopped') + sys.exit(1) + + +def run(): + parser = argparse.ArgumentParser( + description='Connection Module Help', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + parser.add_argument( + '-m', + '--module', + help='Define the module name to be used to create the log file') + + args = parser.parse_args() + + # For some reason passing in the args from bash adds an extra + # space before the argument so we'll just strip out extra space + ConnectionModuleRunner(args.module.strip()) + + +if __name__ == '__main__': + run() diff --git a/test_orc/python/src/test_orchestrator.py b/test_orc/python/src/test_orchestrator.py index 08b720150..e122221f5 100644 --- a/test_orc/python/src/test_orchestrator.py +++ b/test_orc/python/src/test_orchestrator.py @@ -153,7 +153,7 @@ def _run_test_module(self, module, device): read_only=True), ], environment={ - "HOST_USER": getpass.getuser(), + "HOST_USER": self._get_host_user(), "DEVICE_MAC": device.mac_addr, "DEVICE_TEST_MODULES": device.test_modules, "IPV4_SUBNET": self._net_orc.network_config.ipv4_network, @@ -206,6 +206,47 @@ def _get_module_container(self, module): LOGGER.error(error) return container + def _get_host_user(self): + user = self._get_os_user() + + # If primary method failed, try secondary + if user is None: + user = self._get_user() + + LOGGER.debug("Test orchestrator host user: " + user) + return user + + def _get_os_user(self): + user = None + try: + user = os.getlogin() + except OSError as e: + # Handle the OSError exception + LOGGER.error("An OS error occurred while retrieving the login name.") + except Exception as e: + # Catch any other unexpected exceptions + LOGGER.error("An exception occurred:", e) + return user + + def _get_user(self): + user = None + try: + user = getpass.getuser() + except (KeyError, ImportError, ModuleNotFoundError, OSError) as e: + # Handle specific exceptions individually + if isinstance(e, KeyError): + LOGGER.error("USER environment variable not set or unavailable.") + elif isinstance(e, ImportError): + LOGGER.error("Unable to import the getpass module.") + elif isinstance(e, ModuleNotFoundError): + LOGGER.error("The getpass module was not found.") + elif isinstance(e, OSError): + LOGGER.error("An OS error occurred while retrieving the username.") + else: + LOGGER.error("An exception occurred:", e) + return user + + def _load_test_modules(self): """Load network modules from module_config.json.""" LOGGER.debug("Loading test modules from /" + TEST_MODULES_DIR) From 6ff220b59bceb3a843ebae7ab4942116b0ed601d Mon Sep 17 00:00:00 2001 From: jhughesbiot <50999916+jhughesbiot@users.noreply.github.com> Date: Thu, 8 Jun 2023 01:17:28 -0700 Subject: [PATCH 29/48] Conn mac oui (#42) * Initial add of connection test module with ping test * Update host user resolving * Update host user resolving for validator * add get user method to validator * Add mac_oui test Add option to return test result and details of test for reporting --- .../modules/base/python/src/test_module.py | 7 +- test_orc/modules/conn/conf/module_config.json | 5 + test_orc/modules/conn/conn.Dockerfile | 8 +- .../conn/python/src/connection_module.py | 21 ++++ test_orc/python/src/runner.py | 110 +++++++++--------- 5 files changed, 94 insertions(+), 57 deletions(-) diff --git a/test_orc/modules/base/python/src/test_module.py b/test_orc/modules/base/python/src/test_module.py index 57795a182..f29668bb2 100644 --- a/test_orc/modules/base/python/src/test_module.py +++ b/test_orc/modules/base/python/src/test_module.py @@ -95,7 +95,12 @@ def run_tests(self): else: LOGGER.info('Test ' + test['name'] + ' disabled. Skipping') if result is not None: - test['result'] = 'compliant' if result else 'non-compliant' + success = None + if isinstance(result,bool): + test['result'] = 'compliant' if result else 'non-compliant' + else: + test['result'] = 'compliant' if result[0] else 'non-compliant' + test['result_details'] = result[1] else: test['result'] = 'skipped' test['end'] = datetime.now().isoformat() diff --git a/test_orc/modules/conn/conf/module_config.json b/test_orc/modules/conn/conf/module_config.json index e73846340..505cc9e78 100644 --- a/test_orc/modules/conn/conf/module_config.json +++ b/test_orc/modules/conn/conf/module_config.json @@ -16,6 +16,11 @@ "name": "connection.target_ping", "description": "The device under test responds to an ICMP echo (ping) request.", "expected_behavior": "The device under test responds to an ICMP echo (ping) request." + }, + { + "name": "connection.mac_oui", + "description": "The device under test hs a MAC address prefix that is registered against a known manufacturer.", + "expected_behavior": "The MAC address prefix is registered in the IEEE Organizationally Unique Identifier database." } ] } diff --git a/test_orc/modules/conn/conn.Dockerfile b/test_orc/modules/conn/conn.Dockerfile index f6a2c86b4..10130933d 100644 --- a/test_orc/modules/conn/conn.Dockerfile +++ b/test_orc/modules/conn/conn.Dockerfile @@ -1,6 +1,12 @@ # Image name: test-run/conn-test FROM test-run/base-test:latest +# Install all necessary packages +RUN apt-get install -y wget + +#Update the oui.txt file from ieee +RUN wget http://standards-oui.ieee.org/oui.txt -P /usr/local/etc/ + # Copy over all configuration files COPY modules/conn/conf /testrun/conf @@ -8,4 +14,4 @@ COPY modules/conn/conf /testrun/conf COPY modules/conn/bin /testrun/bin # Copy over all python files -COPY modules/conn/python /testrun/python \ No newline at end of file +COPY modules/conn/python /testrun/python diff --git a/test_orc/modules/conn/python/src/connection_module.py b/test_orc/modules/conn/python/src/connection_module.py index 086f32a04..28d41638c 100644 --- a/test_orc/modules/conn/python/src/connection_module.py +++ b/test_orc/modules/conn/python/src/connection_module.py @@ -19,6 +19,7 @@ LOG_NAME = "test_connection" LOGGER = None +OUI_FILE="/usr/local/etc/oui.txt" class ConnectionModule(TestModule): @@ -42,6 +43,26 @@ def _connection_target_ping(self): else: return self._ping(self._device_ipv4_addr) + def _connection_mac_oui(self): + LOGGER.info("Running connection.mac_oui") + manufacturer = self._get_oui_manufacturer(self._device_mac) + if manufacturer is not None: + LOGGER.info("OUI Manufacturer found: " + manufacturer) + return True, "OUI Manufacturer found: " + manufacturer + else: + LOGGER.info("No OUI Manufacturer found for: " + self._device_mac) + return False, "No OUI Manufacturer found for: " + self._device_mac + + def _get_oui_manufacturer(self,mac_address): + # Do some quick fixes on the format of the mac_address + # to match the oui file pattern + mac_address = mac_address.replace(":","-").upper() + with open(OUI_FILE, "r") as file: + for line in file: + if mac_address.startswith(line[:8]): + start = line.index("(hex)") + len("(hex)") + return line[start:].strip() # Extract the company name + return None def _ping(self, host): cmd = 'ping -c 1 ' + str(host) diff --git a/test_orc/python/src/runner.py b/test_orc/python/src/runner.py index 363f800af..ed3b9059a 100644 --- a/test_orc/python/src/runner.py +++ b/test_orc/python/src/runner.py @@ -1,55 +1,55 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Provides high level management of the test orchestrator.""" -import time -import logger - -LOGGER = logger.get_logger('runner') - - -class Runner: - """Holds the state of the testing for one device.""" - - def __init__(self, test_orc, device): - self._test_orc = test_orc - self._device = device - - def run(self): - self._run_test_modules() - - def _run_test_modules(self): - """Iterates through each test module and starts the container.""" - LOGGER.info('Running test modules...') - for module in self._test_modules: - self.run_test_module(module) - LOGGER.info('All tests complete') - - def run_test_module(self, module): - """Start the test container and extract the results.""" - - if module is None or not module.enable_container: - return - - self._test_orc.start_test_module(module) - - # Determine the module timeout time - test_module_timeout = time.time() + module.timeout - status = self._test_orc.get_module_status(module) - - while time.time() < test_module_timeout and status == 'running': - time.sleep(1) - status = self._test_orc.get_module_status(module) - - LOGGER.info(f'Test module {module.display_name} has finished') +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Provides high level management of the test orchestrator.""" +import time +import logger + +LOGGER = logger.get_logger('runner') + + +class Runner: + """Holds the state of the testing for one device.""" + + def __init__(self, test_orc, device): + self._test_orc = test_orc + self._device = device + + def run(self): + self._run_test_modules() + + def _run_test_modules(self): + """Iterates through each test module and starts the container.""" + LOGGER.info('Running test modules...') + for module in self._test_modules: + self.run_test_module(module) + LOGGER.info('All tests complete') + + def run_test_module(self, module): + """Start the test container and extract the results.""" + + if module is None or not module.enable_container: + return + + self._test_orc.start_test_module(module) + + # Determine the module timeout time + test_module_timeout = time.time() + module.timeout + status = self._test_orc.get_module_status(module) + + while time.time() < test_module_timeout and status == 'running': + time.sleep(1) + status = self._test_orc.get_module_status(module) + + LOGGER.info(f'Test module {module.display_name} has finished') From 4ca8f442cf5b73b561456a42576c02bc69ee57a7 Mon Sep 17 00:00:00 2001 From: jhughesbiot <50999916+jhughesbiot@users.noreply.github.com> Date: Thu, 8 Jun 2023 01:51:05 -0700 Subject: [PATCH 30/48] Con mac address (#43) * Initial add of connection test module with ping test * Update host user resolving * Update host user resolving for validator * add get user method to validator * Add mac_oui test Add option to return test result and details of test for reporting * Add connection.mac_address test --- test_orc/modules/conn/conf/module_config.json | 12 +++++++++++- .../modules/conn/python/src/connection_module.py | 12 +++++++++++- 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/test_orc/modules/conn/conf/module_config.json b/test_orc/modules/conn/conf/module_config.json index 505cc9e78..25145980e 100644 --- a/test_orc/modules/conn/conf/module_config.json +++ b/test_orc/modules/conn/conf/module_config.json @@ -11,7 +11,17 @@ "enable_container": true, "timeout": 30 }, - "tests":[ + "tests": [ + { + "name": "connection.mac_address", + "description": "Check and note device physical address.", + "expected_behavior": "N/A" + }, + { + "name": "connection.mac_oui", + "description": "The device under test hs a MAC address prefix that is registered against a known manufacturer.", + "expected_behavior": "The MAC address prefix is registered in the IEEE Organizationally Unique Identifier database." + }, { "name": "connection.target_ping", "description": "The device under test responds to an ICMP echo (ping) request.", diff --git a/test_orc/modules/conn/python/src/connection_module.py b/test_orc/modules/conn/python/src/connection_module.py index 28d41638c..48d134584 100644 --- a/test_orc/modules/conn/python/src/connection_module.py +++ b/test_orc/modules/conn/python/src/connection_module.py @@ -43,6 +43,15 @@ def _connection_target_ping(self): else: return self._ping(self._device_ipv4_addr) + def _connection_mac_address(self): + LOGGER.info("Running connection.mac_address") + if self._device_mac is not None: + LOGGER.info("MAC address found: " + self._device_mac) + return True, "MAC address found: " + self._device_mac + else: + LOGGER.info("No MAC address found: " + self._device_mac) + return False, "No MAC address found." + def _connection_mac_oui(self): LOGGER.info("Running connection.mac_oui") manufacturer = self._get_oui_manufacturer(self._device_mac) @@ -67,4 +76,5 @@ def _get_oui_manufacturer(self,mac_address): def _ping(self, host): cmd = 'ping -c 1 ' + str(host) success = util.run_command(cmd, output=False) - return success \ No newline at end of file + return success + \ No newline at end of file From ff04f4393574cfa148c790d5a01c7442a4651ea3 Mon Sep 17 00:00:00 2001 From: jhughesbiot <50999916+jhughesbiot@users.noreply.github.com> Date: Thu, 8 Jun 2023 13:15:26 -0700 Subject: [PATCH 31/48] Dns (#44) * Add MDNS test * Update existing mdns logging to be more consistent with other tests * Add startup and monitor captures --- test_orc/modules/dns/conf/module_config.json | 4 ++ test_orc/modules/dns/python/src/dns_module.py | 37 ++++++++++++++++--- test_orc/python/src/test_orchestrator.py | 17 +++++++++ 3 files changed, 52 insertions(+), 6 deletions(-) diff --git a/test_orc/modules/dns/conf/module_config.json b/test_orc/modules/dns/conf/module_config.json index b8ff36c97..177537b69 100644 --- a/test_orc/modules/dns/conf/module_config.json +++ b/test_orc/modules/dns/conf/module_config.json @@ -21,6 +21,10 @@ "name": "dns.network.from_dhcp", "description": "Verify the device allows for a DNS server to be entered automatically", "expected_behavior": "The device sends DNS requests to the DNS server provided by the DHCP server" + }, + { + "name": "dns.mdns", + "description": "If the device has MDNS (or any kind of IP multicast), can it be disabled" } ] } diff --git a/test_orc/modules/dns/python/src/dns_module.py b/test_orc/modules/dns/python/src/dns_module.py index cd7261da0..8d32d4dfb 100644 --- a/test_orc/modules/dns/python/src/dns_module.py +++ b/test_orc/modules/dns/python/src/dns_module.py @@ -17,7 +17,9 @@ from test_module import TestModule LOG_NAME = 'test_dns' -CAPTURE_FILE = '/runtime/network/dns.pcap' +DNS_SERVER_CAPTURE_FILE = '/runtime/network/dns.pcap' +STARTUP_CAPTURE_FILE = '/runtime/device/startup.pcap' +MONITOR_CAPTURE_FILE = '/runtime/device/monitor.pcap' LOGGER = None @@ -31,14 +33,24 @@ def __init__(self, module): LOGGER = self._get_logger() def _check_dns_traffic(self, tcpdump_filter): - to_dns = self._exec_tcpdump(tcpdump_filter) - num_query_dns = len(to_dns) + dns_server_queries = self._exec_tcpdump(tcpdump_filter,DNS_SERVER_CAPTURE_FILE) + LOGGER.info('DNS Server queries found: ' + str(len(dns_server_queries))) + + dns_startup_queries = self._exec_tcpdump(tcpdump_filter,STARTUP_CAPTURE_FILE) + LOGGER.info('Startup DNS queries found: ' + str(len(dns_startup_queries))) + + dns_monitor_queries = self._exec_tcpdump(tcpdump_filter,MONITOR_CAPTURE_FILE) + LOGGER.info('Monitor DNS queries found: ' + str(len(dns_monitor_queries))) + + num_query_dns = len(dns_server_queries) + len(dns_startup_queries) + len(dns_monitor_queries) + LOGGER.info('DNS queries found: ' + str(num_query_dns)) - dns_traffic_detected = len(to_dns) > 0 + dns_traffic_detected = num_query_dns > 0 LOGGER.info('DNS traffic detected: ' + str(dns_traffic_detected)) return dns_traffic_detected def _dns_network_from_dhcp(self): + LOGGER.info("Running dns.network.from_dhcp") LOGGER.info('Checking DNS traffic for configured DHCP DNS server: ' + self._dns_server) @@ -53,6 +65,7 @@ def _dns_network_from_dhcp(self): return result def _dns_network_from_device(self): + LOGGER.info("Running dns.network.from_device") LOGGER.info('Checking DNS traffic from device: ' + self._device_mac) # Check if the device DNS traffic is to appropriate server @@ -63,7 +76,19 @@ def _dns_network_from_device(self): LOGGER.info('DNS traffic detected from device: ' + str(result)) return result - def _exec_tcpdump(self, tcpdump_filter): + def _dns_mdns(self): + LOGGER.info("Running dns.mdns") + + # Check if the device sends any MDNS traffic + tcpdump_filter = f'udp port 5353 and ether src {self._device_mac}' + + result = self._check_dns_traffic(tcpdump_filter=tcpdump_filter) + + LOGGER.info('MDNS traffic detected from device: ' + str(result)) + return not result + + + def _exec_tcpdump(self, tcpdump_filter, capture_file): """ Args tcpdump_filter: Filter to pass onto tcpdump file @@ -71,7 +96,7 @@ def _exec_tcpdump(self, tcpdump_filter): Returns List of packets matching the filter """ - command = f'tcpdump -tttt -n -r {CAPTURE_FILE} {tcpdump_filter}' + command = f'tcpdump -tttt -n -r {capture_file} {tcpdump_filter}' LOGGER.debug('tcpdump command: ' + command) diff --git a/test_orc/python/src/test_orchestrator.py b/test_orc/python/src/test_orchestrator.py index e122221f5..b8b7a3af2 100644 --- a/test_orc/python/src/test_orchestrator.py +++ b/test_orc/python/src/test_orchestrator.py @@ -133,6 +133,15 @@ def _run_test_module(self, module, device): network_runtime_dir = os.path.join(self._root_path, "runtime/network") os.makedirs(container_runtime_dir) + device_startup_capture = os.path.join( + self._root_path, "runtime/test/" + device.mac_addr.replace(":", "") + + "/startup.pcap") + + device_monitor_capture = os.path.join( + self._root_path, "runtime/test/" + device.mac_addr.replace(":", "") + + "/monitor.pcap") + + client = docker.from_env() module.container = client.containers.run( @@ -151,6 +160,14 @@ def _run_test_module(self, module, device): source=network_runtime_dir, type="bind", read_only=True), + Mount(target="/runtime/device/startup.pcap", + source=device_startup_capture, + type="bind", + read_only=True), + Mount(target="/runtime/device/monitor.pcap", + source=device_monitor_capture, + type="bind", + read_only=True), ], environment={ "HOST_USER": self._get_host_user(), From 752f7017f488c3f6ac92a38fb39c89b168996dc9 Mon Sep 17 00:00:00 2001 From: jhughesbiot <50999916+jhughesbiot@users.noreply.github.com> Date: Fri, 9 Jun 2023 02:10:26 -0700 Subject: [PATCH 32/48] File permissions (#45) * Fix validator file permissions * Fix test module permissions * Fix device capture file permissions * Fix device results permissions --- .../devices/faux-dev/bin/start_network_service | 8 ++++---- net_orc/python/src/network_orchestrator.py | 13 +++++++++---- net_orc/python/src/network_validator.py | 6 ++++++ test_orc/modules/base/bin/start_module | 6 ++++++ test_orc/modules/dns/python/src/run.py | 8 ++++---- test_orc/modules/nmap/python/src/run.py | 13 ++++++++++--- test_orc/python/src/test_orchestrator.py | 16 ++++++++++++---- 7 files changed, 51 insertions(+), 19 deletions(-) diff --git a/net_orc/network/devices/faux-dev/bin/start_network_service b/net_orc/network/devices/faux-dev/bin/start_network_service index b727d2091..13e2f6baf 100644 --- a/net_orc/network/devices/faux-dev/bin/start_network_service +++ b/net_orc/network/devices/faux-dev/bin/start_network_service @@ -22,12 +22,12 @@ else fi #Create and set permissions on the output files -LOG_FILE=/runtime/validation/$MODULE_NAME.log -RESULT_FILE=/runtime/validation/result.json +OUTPUT_DIR=/runtime/validation/ +LOG_FILE=$OUTPUT_DIR/$MODULE_NAME.log +RESULT_FILE=$OUTPUT_DIR/result.json touch $LOG_FILE touch $RESULT_FILE -chown $HOST_USER:$HOST_USER $LOG_FILE -chown $HOST_USER:$HOST_USER $RESULT_FILE +chown -R $HOST_USER:$HOST_USER $OUTPUT_DIR # Start dhclient $BIN_DIR/start_dhcp_client $INTF diff --git a/net_orc/python/src/network_orchestrator.py b/net_orc/python/src/network_orchestrator.py index 77af509f2..f53b17d15 100644 --- a/net_orc/python/src/network_orchestrator.py +++ b/net_orc/python/src/network_orchestrator.py @@ -94,6 +94,9 @@ def start(self): """Start the network orchestrator.""" LOGGER.debug('Starting network orchestrator') + + self._host_user = self._get_host_user() + # Get all components ready self.load_network_modules() @@ -174,10 +177,12 @@ def _device_discovered(self, mac_addr): LOGGER.debug( f'Discovered device {mac_addr}. Waiting for device to obtain IP') device = self._get_device(mac_addr=mac_addr) - os.makedirs( - os.path.join(RUNTIME_DIR, + + device_runtime_dir = os.path.join(RUNTIME_DIR, TEST_DIR, - device.mac_addr.replace(':', ''))) + device.mac_addr.replace(':', '')) + os.makedirs(device_runtime_dir) + util.run_command(f'chown -R {self._host_user}:{self._host_user} {device_runtime_dir}') packet_capture = sniff(iface=self._dev_intf, timeout=self._startup_timeout, @@ -469,7 +474,7 @@ def _start_network_service(self, net_module): privileged=True, detach=True, mounts=net_module.mounts, - environment={'HOST_USER': self._get_host_user()}) + environment={'HOST_USER': self._host_user}) except docker.errors.ContainerError as error: LOGGER.error('Container run error') LOGGER.error(error) diff --git a/net_orc/python/src/network_validator.py b/net_orc/python/src/network_validator.py index 4a3a2a080..832a154e3 100644 --- a/net_orc/python/src/network_validator.py +++ b/net_orc/python/src/network_validator.py @@ -48,6 +48,12 @@ def __init__(self): def start(self): """Start the network validator.""" LOGGER.debug('Starting validator') + + # Setup the output directory + host_user = self._get_host_user() + os.makedirs(OUTPUT_DIR, exist_ok=True) + util.run_command(f'chown -R {host_user}:{host_user} {OUTPUT_DIR}') + self._load_devices() self._build_network_devices() self._start_network_devices() diff --git a/test_orc/modules/base/bin/start_module b/test_orc/modules/base/bin/start_module index 6adc53f58..c179668ba 100644 --- a/test_orc/modules/base/bin/start_module +++ b/test_orc/modules/base/bin/start_module @@ -1,5 +1,8 @@ #!/bin/bash +# Define the local mount point to store local files to +OUTPUT_DIR="/runtime/output" + # Directory where all binaries will be loaded BIN_DIR="/testrun/bin" @@ -11,6 +14,9 @@ IFACE=veth0 # HOST_USER mapped in via docker container environemnt variables useradd $HOST_USER +# Set permissions on the output files +chown -R $HOST_USER:$HOST_USER $OUTPUT_DIR + # Enable IPv6 for all containers sysctl net.ipv6.conf.all.disable_ipv6=0 sysctl -p diff --git a/test_orc/modules/dns/python/src/run.py b/test_orc/modules/dns/python/src/run.py index 2b924bbaf..4803f63cd 100644 --- a/test_orc/modules/dns/python/src/run.py +++ b/test_orc/modules/dns/python/src/run.py @@ -20,7 +20,7 @@ from dns_module import DNSModule -LOG_NAME = "dns_module" +LOG_NAME = "dns_runner" LOGGER = logger.get_logger(LOG_NAME) RUNTIME = 1500 @@ -35,12 +35,12 @@ def __init__(self, module): signal.signal(signal.SIGQUIT, self._handler) self.add_logger(module) - LOGGER.info("Starting DNS Test Module") + LOGGER.info("Starting DNS test module") self._test_module = DNSModule(module) self._test_module.run_tests() - LOGGER.info("DNS Test Module Finished") + LOGGER.info("DNS test module finished") def add_logger(self, module): global LOGGER @@ -57,7 +57,7 @@ def _handler(self, signum): def run(): parser = argparse.ArgumentParser( - description="Test Module DNS", + description="DNS Module Help", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( diff --git a/test_orc/modules/nmap/python/src/run.py b/test_orc/modules/nmap/python/src/run.py index ecb6cd028..5e33451d9 100644 --- a/test_orc/modules/nmap/python/src/run.py +++ b/test_orc/modules/nmap/python/src/run.py @@ -20,8 +20,8 @@ from nmap_module import NmapModule -LOGGER = logger.get_logger('test_module') - +LOG_NAME = "nmap_runner" +LOGGER = logger.get_logger(LOG_NAME) class NmapModuleRunner: """Run the NMAP module tests.""" @@ -32,12 +32,19 @@ def __init__(self, module): signal.signal(signal.SIGTERM, self._handler) signal.signal(signal.SIGABRT, self._handler) signal.signal(signal.SIGQUIT, self._handler) + self.add_logger(module) - LOGGER.info('Starting nmap Module') + LOGGER.info('Starting nmap module') self._test_module = NmapModule(module) self._test_module.run_tests() + LOGGER.info("nmap test module finished") + + def add_logger(self, module): + global LOGGER + LOGGER = logger.get_logger(LOG_NAME, module) + def _handler(self, signum): LOGGER.debug('SigtermEnum: ' + str(signal.SIGTERM)) LOGGER.debug('Exit signal received: ' + str(signum)) diff --git a/test_orc/python/src/test_orchestrator.py b/test_orc/python/src/test_orchestrator.py index b8b7a3af2..9f0f100ab 100644 --- a/test_orc/python/src/test_orchestrator.py +++ b/test_orc/python/src/test_orchestrator.py @@ -22,10 +22,11 @@ from docker.types import Mount import logger from module import TestModule +import util LOG_NAME = "test_orc" LOGGER = logger.get_logger("test_orc") -RUNTIME_DIR = "runtime" +RUNTIME_DIR = "runtime/test" TEST_MODULES_DIR = "modules" MODULE_CONFIG = "conf/module_config.json" @@ -47,10 +48,15 @@ def __init__(self, net_orc): shutil.rmtree(os.path.join(self._root_path, RUNTIME_DIR), ignore_errors=True) - os.makedirs(os.path.join(self._root_path, RUNTIME_DIR), exist_ok=True) def start(self): LOGGER.debug("Starting test orchestrator") + + # Setup the output directory + self._host_user = self._get_host_user() + os.makedirs(RUNTIME_DIR, exist_ok=True) + util.run_command(f'chown -R {self._host_user}:{self._host_user} {RUNTIME_DIR}') + self._load_test_modules() self.build_test_modules() @@ -101,6 +107,7 @@ def _generate_results(self, device): "runtime/test/" + device.mac_addr.replace(":", "") + "/results.json") with open(out_file, "w", encoding="utf-8") as f: json.dump(results, f, indent=2) + util.run_command(f'chown -R {self._host_user}:{self._host_user} {out_file}') return results def test_in_progress(self): @@ -136,11 +143,12 @@ def _run_test_module(self, module, device): device_startup_capture = os.path.join( self._root_path, "runtime/test/" + device.mac_addr.replace(":", "") + "/startup.pcap") + util.run_command(f'chown -R {self._host_user}:{self._host_user} {device_startup_capture}') device_monitor_capture = os.path.join( self._root_path, "runtime/test/" + device.mac_addr.replace(":", "") + "/monitor.pcap") - + util.run_command(f'chown -R {self._host_user}:{self._host_user} {device_monitor_capture}') client = docker.from_env() @@ -170,7 +178,7 @@ def _run_test_module(self, module, device): read_only=True), ], environment={ - "HOST_USER": self._get_host_user(), + "HOST_USER": self._host_user, "DEVICE_MAC": device.mac_addr, "DEVICE_TEST_MODULES": device.test_modules, "IPV4_SUBNET": self._net_orc.network_config.ipv4_network, From f6e4e93c5263fc66d578ff11dbb197ca278fa72d Mon Sep 17 00:00:00 2001 From: jhughesbiot <50999916+jhughesbiot@users.noreply.github.com> Date: Mon, 12 Jun 2023 05:20:42 -0700 Subject: [PATCH 33/48] Add connection single ip test (#47) --- test_orc/modules/conn/conf/module_config.json | 10 +-- test_orc/modules/conn/conn.Dockerfile | 6 ++ test_orc/modules/conn/python/requirements.txt | 1 + .../conn/python/src/connection_module.py | 63 +++++++++++++++---- 4 files changed, 62 insertions(+), 18 deletions(-) create mode 100644 test_orc/modules/conn/python/requirements.txt diff --git a/test_orc/modules/conn/conf/module_config.json b/test_orc/modules/conn/conf/module_config.json index 25145980e..0f599c5d3 100644 --- a/test_orc/modules/conn/conf/module_config.json +++ b/test_orc/modules/conn/conf/module_config.json @@ -22,15 +22,15 @@ "description": "The device under test hs a MAC address prefix that is registered against a known manufacturer.", "expected_behavior": "The MAC address prefix is registered in the IEEE Organizationally Unique Identifier database." }, + { + "name": "connection.single_ip", + "description": "The network switch port connected to the device reports only one IP address for the device under test.", + "expected_behavior": "The device under test does not behave as a network switch and only requets one IP address. This test is to avoid that devices implement network switches that allow connecting strings of daisy chained devices to one single network port, as this would not make 802.1x port based authentication possible." + }, { "name": "connection.target_ping", "description": "The device under test responds to an ICMP echo (ping) request.", "expected_behavior": "The device under test responds to an ICMP echo (ping) request." - }, - { - "name": "connection.mac_oui", - "description": "The device under test hs a MAC address prefix that is registered against a known manufacturer.", - "expected_behavior": "The MAC address prefix is registered in the IEEE Organizationally Unique Identifier database." } ] } diff --git a/test_orc/modules/conn/conn.Dockerfile b/test_orc/modules/conn/conn.Dockerfile index 10130933d..cf25d0f02 100644 --- a/test_orc/modules/conn/conn.Dockerfile +++ b/test_orc/modules/conn/conn.Dockerfile @@ -7,6 +7,12 @@ RUN apt-get install -y wget #Update the oui.txt file from ieee RUN wget http://standards-oui.ieee.org/oui.txt -P /usr/local/etc/ +#Load the requirements file +COPY modules/conn/python/requirements.txt /testrun/python + +#Install all python requirements for the module +RUN pip3 install -r /testrun/python/requirements.txt + # Copy over all configuration files COPY modules/conn/conf /testrun/conf diff --git a/test_orc/modules/conn/python/requirements.txt b/test_orc/modules/conn/python/requirements.txt new file mode 100644 index 000000000..93b351f44 --- /dev/null +++ b/test_orc/modules/conn/python/requirements.txt @@ -0,0 +1 @@ +scapy \ No newline at end of file diff --git a/test_orc/modules/conn/python/src/connection_module.py b/test_orc/modules/conn/python/src/connection_module.py index 48d134584..196c335d8 100644 --- a/test_orc/modules/conn/python/src/connection_module.py +++ b/test_orc/modules/conn/python/src/connection_module.py @@ -15,11 +15,15 @@ """Connection test module""" import util import sys +from scapy.all import * from test_module import TestModule LOG_NAME = "test_connection" LOGGER = None OUI_FILE="/usr/local/etc/oui.txt" +DHCP_SERVER_CAPTURE_FILE = '/runtime/network/dhcp-1.pcap' +STARTUP_CAPTURE_FILE = '/runtime/device/startup.pcap' +MONITOR_CAPTURE_FILE = '/runtime/device/monitor.pcap' class ConnectionModule(TestModule): @@ -30,19 +34,6 @@ def __init__(self, module): global LOGGER LOGGER = self._get_logger() - def _connection_target_ping(self): - LOGGER.info("Running connection.target_ping") - - # If the ipv4 address wasn't resolved yet, try again - if self._device_ipv4_addr is None: - self._device_ipv4_addr = self._get_device_ipv4(self) - - if self._device_ipv4_addr is None: - LOGGER.error("No device IP could be resolved") - sys.exit(1) - else: - return self._ping(self._device_ipv4_addr) - def _connection_mac_address(self): LOGGER.info("Running connection.mac_address") if self._device_mac is not None: @@ -62,6 +53,52 @@ def _connection_mac_oui(self): LOGGER.info("No OUI Manufacturer found for: " + self._device_mac) return False, "No OUI Manufacturer found for: " + self._device_mac + def _connection_single_ip(self): + LOGGER.info("Running connection.single_ip") + + result = None + if self._device_mac is None: + LOGGER.info("No MAC address found: ") + return result, "No MAC address found." + + # Read all the pcap files containing DHCP packet information + packets = rdpcap(DHCP_SERVER_CAPTURE_FILE) + packets.append(rdpcap(STARTUP_CAPTURE_FILE)) + packets.append(rdpcap(MONITOR_CAPTURE_FILE)) + + # Extract MAC addresses from DHCP packets + mac_addresses = set() + LOGGER.info("Inspecting: " + str(len(packets)) + " packets") + for packet in packets: + # Option[1] = message-type, option 3 = DHCPREQUEST + if DHCP in packet and packet[DHCP].options[0][1] == 3: + mac_address = packet[Ether].src + mac_addresses.add(mac_address.upper()) + + # Check if the device mac address is in the list of DHCPREQUESTs + result = self._device_mac.upper() in mac_addresses + LOGGER.info("DHCPREQUEST detected from device: " + str(result)) + + # Check the unique MAC addresses to see if they match the device + for mac_address in mac_addresses: + LOGGER.info("DHCPREQUEST from MAC address: " + mac_address) + result &= self._device_mac.upper() == mac_address + return result + + + def _connection_target_ping(self): + LOGGER.info("Running connection.target_ping") + + # If the ipv4 address wasn't resolved yet, try again + if self._device_ipv4_addr is None: + self._device_ipv4_addr = self._get_device_ipv4(self) + + if self._device_ipv4_addr is None: + LOGGER.error("No device IP could be resolved") + sys.exit(1) + else: + return self._ping(self._device_ipv4_addr) + def _get_oui_manufacturer(self,mac_address): # Do some quick fixes on the format of the mac_address # to match the oui file pattern From bca0db853ee1f83767663cb586b3e60ba91c6dcd Mon Sep 17 00:00:00 2001 From: jhughesbiot <50999916+jhughesbiot@users.noreply.github.com> Date: Thu, 15 Jun 2023 08:35:32 -0700 Subject: [PATCH 34/48] Nmap results (#49) * Update processing of nmap results to use xml output and json conversions for stability * Update matching with regex to prevent wrong service matches and duplicate processing for partial matches * Update max port scan range --- test_orc/modules/nmap/nmap.Dockerfile | 6 + test_orc/modules/nmap/python/requirements.txt | 1 + .../modules/nmap/python/src/nmap_module.py | 126 +++++++++++------- 3 files changed, 82 insertions(+), 51 deletions(-) create mode 100644 test_orc/modules/nmap/python/requirements.txt diff --git a/test_orc/modules/nmap/nmap.Dockerfile b/test_orc/modules/nmap/nmap.Dockerfile index 12f23dde7..3a8728d9f 100644 --- a/test_orc/modules/nmap/nmap.Dockerfile +++ b/test_orc/modules/nmap/nmap.Dockerfile @@ -1,6 +1,12 @@ # Image name: test-run/baseline-test FROM test-run/base-test:latest +#Load the requirements file +COPY modules/nmap/python/requirements.txt /testrun/python + +#Install all python requirements for the module +RUN pip3 install -r /testrun/python/requirements.txt + # Copy over all configuration files COPY modules/nmap/conf /testrun/conf diff --git a/test_orc/modules/nmap/python/requirements.txt b/test_orc/modules/nmap/python/requirements.txt new file mode 100644 index 000000000..42669b12c --- /dev/null +++ b/test_orc/modules/nmap/python/requirements.txt @@ -0,0 +1 @@ +xmltodict \ No newline at end of file diff --git a/test_orc/modules/nmap/python/src/nmap_module.py b/test_orc/modules/nmap/python/src/nmap_module.py index 6b5477489..ea013f413 100644 --- a/test_orc/modules/nmap/python/src/nmap_module.py +++ b/test_orc/modules/nmap/python/src/nmap_module.py @@ -17,6 +17,8 @@ import util import json import threading +import xmltodict +import re from test_module import TestModule LOG_NAME = "test_nmap" @@ -35,6 +37,7 @@ def __init__(self, module): global LOGGER LOGGER = self._get_logger() + def _security_nmap_ports(self, config): LOGGER.info("Running security.nmap.ports test") @@ -88,7 +91,7 @@ def _process_port_results(self, tests): self._check_unknown_ports(tests=tests,scan_results=scan_results) for test in tests: - LOGGER.info("Checking results for test: " + str(test)) + LOGGER.info("Checking scan results for test: " + str(test)) self._check_scan_results(test_config=tests[test],scan_results=scan_results) def _check_unknown_ports(self,tests,scan_results): @@ -122,8 +125,16 @@ def _add_unknown_ports(self,tests,unallowed_port): port_style = 'tcp_ports' elif unallowed_port['tcp_udp'] == 'udp': port_style = 'udp_ports' + + LOGGER.info("Unknown Port Service: " + unallowed_port['service']) for test in tests: - if unallowed_port['service'] in test: + LOGGER.debug("Checking for known service: " + test) + # Create a regular expression pattern to match the variable at the + # end of the string + port_service = r"\b" + re.escape(unallowed_port['service']) + r"\b$" + service_match = re.search(port_service, test) + if service_match: + LOGGER.info("Service Matched: " + test) known_service=True for test_port in tests[test][port_style]: if "version" in tests[test][port_style][test_port]: @@ -134,8 +145,8 @@ def _add_unknown_ports(self,tests,unallowed_port): if tests[test][port_style][test_port]['allowed']: result['allowed'] = True break - tests[test][port_style][unallowed_port['port']]=result + break if not known_service: service_name = "security.services.unknown." + str(unallowed_port['port']) @@ -195,14 +206,19 @@ def _check_unallowed_port(self,unallowed_ports,tests): for port in unallowed_ports: LOGGER.info('Checking unallowed port: ' + port['port']) LOGGER.info('Looking for service: ' + port['service']) - LOGGER.info('Unallowed Port Config: ' + str(port)) + LOGGER.debug('Unallowed Port Config: ' + str(port)) if port['tcp_udp'] == 'tcp': port_style = 'tcp_ports' elif port['tcp_udp'] == 'udp': port_style = 'udp_ports' for test in tests: - LOGGER.info('Checking test: ' + str(test)) - if port['service'] in test: + LOGGER.debug('Checking test: ' + str(test)) + # Create a regular expression pattern to match the variable at the + # end of the string + port_service = r"\b" + re.escape(port['service']) + r"\b$" + service_match = re.search(port_service, test) + if service_match: + LOGGER.info("Service Matched: " + test) service_config = tests[test] service = port['service'] for service_port in service_config[port_style]: @@ -247,7 +263,7 @@ def _check_version(self,service,version_detected,version_expected): def _scan_scripts(self, tests): scan_results = {} - LOGGER.info("Checing for scan scripts") + LOGGER.info("Checking for scan scripts") for test in tests: test_config = tests[test] if "tcp_ports" in test_config: @@ -256,14 +272,15 @@ def _scan_scripts(self, tests): if "service_scan" in port_config: LOGGER.info("Service Scan Detected for: " + str(port)) svc = port_config["service_scan"] - scan_results.update(self._scan_tcp_with_script(svc["script"])) + result = self._scan_tcp_with_script(svc["script"]) + scan_results.update(result) if "udp_ports" in test_config: for port in test_config["udp_ports"]: if "service_scan" in port: LOGGER.info("Service Scan Detected for: " + str(port)) svc = port["service_scan"] - self._scan_udp_with_script(svc["script"], port) - scan_results.update(self._scan_tcp_with_script(svc["script"])) + result = self._scan_udp_with_script(svc["script"], port) + scan_results.update(result) self._script_scan_results = scan_results def _scan_tcp_with_script(self, script_name, ports=None): @@ -275,12 +292,12 @@ def _scan_tcp_with_script(self, script_name, ports=None): else: port_options += " -p" + ports + " " results_file = f"/runtime/output/{self._module_name}-script_name.log" - nmap_options = scan_options + port_options + " -oG " + results_file + nmap_options = scan_options + port_options + " " + results_file + " -oX -" nmap_results = util.run_command("nmap " + nmap_options + " " + self._device_ipv4_addr)[0] LOGGER.info("Nmap TCP script scan complete") - LOGGER.info("nmap script results\n" + str(nmap_results)) - return self._process_nmap_results(nmap_results=nmap_results) + nmap_results_json = self._nmap_results_to_json(nmap_results) + return self._process_nmap_json_results(nmap_results_json=nmap_results_json) def _scan_udp_with_script(self, script_name, ports=None): LOGGER.info("Running UDP nmap scan with script " + script_name) @@ -290,22 +307,24 @@ def _scan_udp_with_script(self, script_name, ports=None): port_options += " -p- " else: port_options += " -p" + ports + " " - nmap_options = scan_options + port_options + nmap_options = scan_options + port_options + " -oX - " nmap_results = util.run_command("nmap " + nmap_options + self._device_ipv4_addr)[0] LOGGER.info("Nmap UDP script scan complete") - return self._process_nmap_results(nmap_results=nmap_results) + nmap_results_json = self._nmap_results_to_json(nmap_results) + return self._process_nmap_json_results(nmap_results_json=nmap_results_json) def _scan_tcp_ports(self, tests): - max_port = 1000 + max_port = 65535 LOGGER.info("Running nmap TCP port scan") nmap_results = util.run_command( f"""nmap --open -sT -sV -Pn -v -p 1-{max_port} - --version-intensity 7 -T4 {self._device_ipv4_addr}""")[0] + --version-intensity 7 -T4 -oX - {self._device_ipv4_addr}""")[0] LOGGER.info("TCP port scan complete") - self._scan_tcp_results = self._process_nmap_results( - nmap_results=nmap_results) + nmap_results_json = self._nmap_results_to_json(nmap_results) + self._scan_tcp_results = self._process_nmap_json_results( + nmap_results_json=nmap_results_json) def _scan_udp_ports(self, tests): ports = [] @@ -319,39 +338,44 @@ def _scan_udp_ports(self, tests): LOGGER.info("Running nmap UDP port scan") LOGGER.info("UDP ports: " + str(port_list)) nmap_results = util.run_command( - f"nmap -sU -sV -p {port_list} {self._device_ipv4_addr}")[0] + f"nmap -sU -sV -p {port_list} -oX - {self._device_ipv4_addr}")[0] LOGGER.info("UDP port scan complete") - self._scan_udp_results = self._process_nmap_results( - nmap_results=nmap_results) + nmap_results_json = self._nmap_results_to_json(nmap_results) + self._scan_udp_results = self._process_nmap_json_results( + nmap_results_json=nmap_results_json) - def _process_nmap_results(self, nmap_results): + def _nmap_results_to_json(self,nmap_results): + try: + xml_data = xmltodict.parse(nmap_results) + json_data = json.dumps(xml_data, indent=4) + return json.loads(json_data) + + except Exception as e: + LOGGER.error(f"Error parsing Nmap output: {e}") + + def _process_nmap_json_results(self,nmap_results_json): + LOGGER.debug("nmap results\n" + json.dumps(nmap_results_json,indent=2)) results = {} - LOGGER.info("nmap results\n" + str(nmap_results)) - if nmap_results: - if "Service Info" in nmap_results and "MAC Address" not in nmap_results: - rows = nmap_results.split("PORT")[1].split("Service Info")[0].split( - "\n") - elif "PORT" in nmap_results: - rows = nmap_results.split("PORT")[1].split("MAC Address")[0].split("\n") - if rows: - for result in rows[1:-1]: # Iterate skipping the header and tail rows - cols = result.split() - port = cols[0].split("/")[0] - # If results do not start with a a port number, - # it is likely a bleed over from previous result so - # we need to ignore it - if port.isdigit(): - version = "" - if len(cols) > 3: - # recombine full version information that may contain spaces - version = " ".join(cols[3:]) - port_result = { - cols[0].split("/")[0]: { - "tcp_udp":cols[0].split("/")[1], - "state": cols[1], - "service": cols[2], - "version": version - } - } - results.update(port_result) + if "ports" in nmap_results_json["nmaprun"]["host"]: + ports = nmap_results_json["nmaprun"]["host"]["ports"] + # Checking if an object is a JSON object + if isinstance(ports["port"], dict): + results.update(self._json_port_to_dict(ports["port"])) + elif isinstance(ports["port"], list): + for port in ports["port"]: + results.update(self._json_port_to_dict(port)) return results + + def _json_port_to_dict(self,port_json): + port_result = {} + port = {} + port["tcp_udp"] = port_json["@protocol"] + port["state"] = port_json["state"]["@state"] + port["service"] = port_json["service"]["@name"] + port["version"] = "" + if "@version" in port_json["service"]: + port["version"] += port_json["service"]["@version"] + if "@extrainfo" in port_json["service"]: + port["version"] += " " + port_json["service"]["@extrainfo"] + port_result = {port_json["@portid"]:port} + return port_result \ No newline at end of file From 5b56a793502f4fd71700f2562ccacd98d6c6458a Mon Sep 17 00:00:00 2001 From: J Boddey Date: Thu, 15 Jun 2023 17:50:42 +0100 Subject: [PATCH 35/48] Framework restructure (#50) * Restructure framework and modules * Fix CI paths * Fix base module * Add build script * Remove build logs * Update base and template docker files to fit the new format Implement a template option on network modules Fix skipping of base image build * remove base image build in ci * Remove group from chown --------- Co-authored-by: jhughesbiot --- .gitignore | 3 +- cmd/install | 4 - cmd/start | 4 +- framework/{ => python/src/common}/logger.py | 123 ++++++++-------- .../python/src/common}/util.py | 110 +++++++-------- framework/{ => python/src/core}/device.py | 2 +- .../{ => python/src/core}/test_runner.py | 2 +- framework/{ => python/src/core}/testrun.py | 24 ++-- .../python/src/net_orc}/listener.py | 4 +- .../python/src/net_orc}/network_device.py | 0 .../python/src/net_orc}/network_event.py | 0 .../src/net_orc}/network_orchestrator.py | 36 +++-- .../python/src/net_orc}/network_validator.py | 29 ++-- .../python/src/net_orc}/ovs_control.py | 10 +- .../python/src/test_orc}/module.py | 0 .../python/src/test_orc}/runner.py | 0 .../python/src/test_orc}/test_orchestrator.py | 30 ++-- framework/requirements.txt | 9 +- .../devices/faux-dev/bin/get_default_gateway | 0 .../devices/faux-dev/bin/start_dhcp_client | 0 .../faux-dev/bin/start_network_service | 2 +- .../devices/faux-dev/conf/module_config.json | 0 .../devices/faux-dev/faux-dev.Dockerfile | 11 +- .../devices/faux-dev/python/src/dhcp_check.py | 0 .../devices/faux-dev/python/src/dns_check.py | 0 .../faux-dev/python/src/gateway_check.py | 0 .../devices/faux-dev/python/src/logger.py | 0 .../devices/faux-dev/python/src/ntp_check.py | 0 .../devices/faux-dev/python/src/run.py | 0 .../devices/faux-dev/python/src/util.py | 0 .../network}/base/base.Dockerfile | 9 +- .../network}/base/bin/capture | 2 +- .../network}/base/bin/setup_binaries | 0 .../network}/base/bin/start_grpc | 0 .../network}/base/bin/start_module | 0 .../network}/base/bin/start_network_service | 0 .../network}/base/bin/wait_for_interface | 0 .../network}/base/conf/module_config.json | 0 .../network}/base/python/requirements.txt | 0 .../base/python/src/grpc/start_server.py | 0 .../network}/base/python/src/logger.py | 0 .../network}/dhcp-1/bin/start_network_service | 4 +- .../network}/dhcp-1/conf/dhcpd.conf | 0 .../network}/dhcp-1/conf/module_config.json | 0 .../network}/dhcp-1/conf/radvd.conf | 0 .../network/dhcp-1/dhcp-1.Dockerfile | 9 +- .../dhcp-1/python/src/grpc/__init__.py | 0 .../dhcp-1/python/src/grpc/dhcp_config.py | 0 .../dhcp-1/python/src/grpc/network_service.py | 0 .../dhcp-1/python/src/grpc/proto/grpc.proto | 0 .../network}/dhcp-2/bin/start_network_service | 4 +- .../network}/dhcp-2/conf/dhcpd.conf | 0 .../network}/dhcp-2/conf/module_config.json | 0 .../network}/dhcp-2/conf/radvd.conf | 0 .../network/dhcp-2/dhcp-2.Dockerfile | 12 +- .../dhcp-2/python/src/grpc/__init__.py | 0 .../dhcp-2/python/src/grpc/dhcp_config.py | 0 .../dhcp-2/python/src/grpc/network_service.py | 0 .../dhcp-2/python/src/grpc/proto/grpc.proto | 0 .../network}/dns/bin/start_network_service | 0 .../network}/dns/conf/dnsmasq.conf | 0 .../network}/dns/conf/module_config.json | 0 .../network}/dns/dns.Dockerfile | 7 +- .../gateway/bin/start_network_service | 0 .../network}/gateway/conf/module_config.json | 0 .../network}/gateway/gateway.Dockerfile | 7 +- .../network}/ntp/bin/start_network_service | 2 +- .../network}/ntp/conf/module_config.json | 0 modules/network/ntp/ntp.Dockerfile | 16 +++ .../network}/ntp/python/src/ntp_server.py | 0 .../network}/radius/bin/start_network_service | 2 +- .../network}/radius/conf/ca.crt | 0 .../network}/radius/conf/eap | 0 .../network}/radius/conf/module_config.json | 0 .../network}/radius/python/requirements.txt | 0 .../radius/python/src/authenticator.py | 0 .../network}/radius/radius.Dockerfile | 9 +- .../template/bin/start_network_service | 0 .../network}/template/conf/module_config.json | 1 + .../template/python/src/template_main.py | 0 modules/network/template/template.Dockerfile | 14 ++ .../test}/base/base.Dockerfile | 9 +- .../modules => modules/test}/base/bin/capture | 2 +- .../test}/base/bin/get_ipv4_addr | 0 .../test}/base/bin/setup_binaries | 0 .../test}/base/bin/start_grpc | 0 .../test}/base/bin/start_module | 2 +- .../test}/base/bin/wait_for_interface | 0 .../test}/base/conf/module_config.json | 0 .../test}/base/python/requirements.txt | 0 .../base/python/src/grpc/start_server.py | 0 .../test}/base/python/src/logger.py | 0 .../test}/base/python/src/test_module.py | 0 .../test}/base/python/src/util.py | 0 modules/test/baseline/baseline.Dockerfile | 14 ++ .../test}/baseline/bin/start_test_module | 4 +- .../test}/baseline/conf/module_config.json | 0 .../baseline/python/src/baseline_module.py | 0 .../test}/baseline/python/src/run.py | 0 .../test}/conn/bin/start_test_module | 4 +- .../test}/conn/conf/module_config.json | 0 .../test}/conn/conn.Dockerfile | 13 +- .../test}/conn/python/requirements.txt | 0 .../conn/python/src/connection_module.py | 0 .../test}/conn/python/src/run.py | 0 .../test}/dns/bin/start_test_module | 4 +- .../test}/dns/conf/module_config.json | 0 modules/test/dns/dns.Dockerfile | 14 ++ .../test}/dns/python/src/dns_module.py | 0 .../test}/dns/python/src/run.py | 0 .../test}/nmap/bin/start_test_module | 4 +- .../test}/nmap/conf/module_config.json | 0 modules/test/nmap/nmap.Dockerfile | 20 +++ .../test}/nmap/python/requirements.txt | 0 .../test}/nmap/python/src/nmap_module.py | 0 .../test}/nmap/python/src/run.py | 0 net_orc/.gitignore | 133 ------------------ net_orc/network/modules/ntp/ntp.Dockerfile | 13 -- .../modules/template/template.Dockerfile | 11 -- net_orc/orchestrator.Dockerfile | 22 --- net_orc/python/requirements.txt | 4 - test_orc/modules/baseline/baseline.Dockerfile | 11 -- test_orc/modules/dns/dns.Dockerfile | 11 -- test_orc/modules/nmap/nmap.Dockerfile | 17 --- test_orc/python/requirements.txt | 0 testing/test_baseline | 5 +- 126 files changed, 357 insertions(+), 461 deletions(-) rename framework/{ => python/src/common}/logger.py (57%) rename {net_orc/python/src => framework/python/src/common}/util.py (95%) rename framework/{ => python/src/core}/device.py (91%) rename framework/{ => python/src/core}/test_runner.py (96%) rename framework/{ => python/src/core}/testrun.py (85%) rename {net_orc/python/src => framework/python/src/net_orc}/listener.py (97%) rename {net_orc/python/src => framework/python/src/net_orc}/network_device.py (100%) rename {net_orc/python/src => framework/python/src/net_orc}/network_event.py (100%) rename {net_orc/python/src => framework/python/src/net_orc}/network_orchestrator.py (94%) rename {net_orc/python/src => framework/python/src/net_orc}/network_validator.py (91%) rename {net_orc/python/src => framework/python/src/net_orc}/ovs_control.py (95%) rename {test_orc/python/src => framework/python/src/test_orc}/module.py (100%) rename {test_orc/python/src => framework/python/src/test_orc}/runner.py (100%) rename {test_orc/python/src => framework/python/src/test_orc}/test_orchestrator.py (91%) rename {net_orc/network => modules}/devices/faux-dev/bin/get_default_gateway (100%) rename {net_orc/network => modules}/devices/faux-dev/bin/start_dhcp_client (100%) rename {net_orc/network => modules}/devices/faux-dev/bin/start_network_service (91%) rename {net_orc/network => modules}/devices/faux-dev/conf/module_config.json (100%) rename {net_orc/network => modules}/devices/faux-dev/faux-dev.Dockerfile (65%) rename {net_orc/network => modules}/devices/faux-dev/python/src/dhcp_check.py (100%) rename {net_orc/network => modules}/devices/faux-dev/python/src/dns_check.py (100%) rename {net_orc/network => modules}/devices/faux-dev/python/src/gateway_check.py (100%) rename {net_orc/network => modules}/devices/faux-dev/python/src/logger.py (100%) rename {net_orc/network => modules}/devices/faux-dev/python/src/ntp_check.py (100%) rename {net_orc/network => modules}/devices/faux-dev/python/src/run.py (100%) rename {net_orc/network => modules}/devices/faux-dev/python/src/util.py (100%) rename {net_orc/network/modules => modules/network}/base/base.Dockerfile (74%) rename {net_orc/network/modules => modules/network}/base/bin/capture (90%) rename {net_orc/network/modules => modules/network}/base/bin/setup_binaries (100%) rename {net_orc/network/modules => modules/network}/base/bin/start_grpc (100%) rename {net_orc/network/modules => modules/network}/base/bin/start_module (100%) rename {net_orc/network/modules => modules/network}/base/bin/start_network_service (100%) rename {net_orc/network/modules => modules/network}/base/bin/wait_for_interface (100%) rename {net_orc/network/modules => modules/network}/base/conf/module_config.json (100%) rename {net_orc/network/modules => modules/network}/base/python/requirements.txt (100%) rename {net_orc/network/modules => modules/network}/base/python/src/grpc/start_server.py (100%) rename {net_orc/network/modules => modules/network}/base/python/src/logger.py (100%) rename {net_orc/network/modules => modules/network}/dhcp-1/bin/start_network_service (91%) rename {net_orc/network/modules => modules/network}/dhcp-1/conf/dhcpd.conf (100%) rename {net_orc/network/modules => modules/network}/dhcp-1/conf/module_config.json (100%) rename {net_orc/network/modules => modules/network}/dhcp-1/conf/radvd.conf (100%) rename net_orc/network/modules/dhcp-2/dhcp-2.Dockerfile => modules/network/dhcp-1/dhcp-1.Dockerfile (56%) rename {net_orc/network/modules => modules/network}/dhcp-1/python/src/grpc/__init__.py (100%) rename {net_orc/network/modules => modules/network}/dhcp-1/python/src/grpc/dhcp_config.py (100%) rename {net_orc/network/modules => modules/network}/dhcp-1/python/src/grpc/network_service.py (100%) rename {net_orc/network/modules => modules/network}/dhcp-1/python/src/grpc/proto/grpc.proto (100%) rename {net_orc/network/modules => modules/network}/dhcp-2/bin/start_network_service (91%) rename {net_orc/network/modules => modules/network}/dhcp-2/conf/dhcpd.conf (100%) rename {net_orc/network/modules => modules/network}/dhcp-2/conf/module_config.json (100%) rename {net_orc/network/modules => modules/network}/dhcp-2/conf/radvd.conf (100%) rename net_orc/network/modules/dhcp-1/dhcp-1.Dockerfile => modules/network/dhcp-2/dhcp-2.Dockerfile (55%) rename {net_orc/network/modules => modules/network}/dhcp-2/python/src/grpc/__init__.py (100%) rename {net_orc/network/modules => modules/network}/dhcp-2/python/src/grpc/dhcp_config.py (100%) rename {net_orc/network/modules => modules/network}/dhcp-2/python/src/grpc/network_service.py (100%) rename {net_orc/network/modules => modules/network}/dhcp-2/python/src/grpc/proto/grpc.proto (100%) rename {net_orc/network/modules => modules/network}/dns/bin/start_network_service (100%) rename {net_orc/network/modules => modules/network}/dns/conf/dnsmasq.conf (100%) rename {net_orc/network/modules => modules/network}/dns/conf/module_config.json (100%) rename {net_orc/network/modules => modules/network}/dns/dns.Dockerfile (67%) rename {net_orc/network/modules => modules/network}/gateway/bin/start_network_service (100%) rename {net_orc/network/modules => modules/network}/gateway/conf/module_config.json (100%) rename {net_orc/network/modules => modules/network}/gateway/gateway.Dockerfile (59%) rename {net_orc/network/modules => modules/network}/ntp/bin/start_network_service (82%) rename {net_orc/network/modules => modules/network}/ntp/conf/module_config.json (100%) create mode 100644 modules/network/ntp/ntp.Dockerfile rename {net_orc/network/modules => modules/network}/ntp/python/src/ntp_server.py (100%) rename {net_orc/network/modules => modules/network}/radius/bin/start_network_service (89%) rename {net_orc/network/modules => modules/network}/radius/conf/ca.crt (100%) rename {net_orc/network/modules => modules/network}/radius/conf/eap (100%) rename {net_orc/network/modules => modules/network}/radius/conf/module_config.json (100%) rename {net_orc/network/modules => modules/network}/radius/python/requirements.txt (100%) rename {net_orc/network/modules => modules/network}/radius/python/src/authenticator.py (100%) rename {net_orc/network/modules => modules/network}/radius/radius.Dockerfile (74%) rename {net_orc/network/modules => modules/network}/template/bin/start_network_service (100%) rename {net_orc/network/modules => modules/network}/template/conf/module_config.json (91%) rename {net_orc/network/modules => modules/network}/template/python/src/template_main.py (100%) create mode 100644 modules/network/template/template.Dockerfile rename {test_orc/modules => modules/test}/base/base.Dockerfile (74%) rename {test_orc/modules => modules/test}/base/bin/capture (88%) rename {test_orc/modules => modules/test}/base/bin/get_ipv4_addr (100%) rename {test_orc/modules => modules/test}/base/bin/setup_binaries (100%) rename {test_orc/modules => modules/test}/base/bin/start_grpc (100%) rename {test_orc/modules => modules/test}/base/bin/start_module (97%) rename {test_orc/modules => modules/test}/base/bin/wait_for_interface (100%) rename {test_orc/modules => modules/test}/base/conf/module_config.json (100%) rename {test_orc/modules => modules/test}/base/python/requirements.txt (100%) rename {test_orc/modules => modules/test}/base/python/src/grpc/start_server.py (100%) rename {test_orc/modules => modules/test}/base/python/src/logger.py (100%) rename {test_orc/modules => modules/test}/base/python/src/test_module.py (100%) rename {test_orc/modules => modules/test}/base/python/src/util.py (100%) create mode 100644 modules/test/baseline/baseline.Dockerfile rename {test_orc/modules => modules/test}/baseline/bin/start_test_module (90%) rename {test_orc/modules => modules/test}/baseline/conf/module_config.json (100%) rename {test_orc/modules => modules/test}/baseline/python/src/baseline_module.py (100%) rename {test_orc/modules => modules/test}/baseline/python/src/run.py (100%) rename {test_orc/modules => modules/test}/conn/bin/start_test_module (92%) rename {test_orc/modules => modules/test}/conn/conf/module_config.json (100%) rename {test_orc/modules => modules/test}/conn/conn.Dockerfile (59%) rename {test_orc/modules => modules/test}/conn/python/requirements.txt (100%) rename {test_orc/modules => modules/test}/conn/python/src/connection_module.py (100%) rename {test_orc/modules => modules/test}/conn/python/src/run.py (100%) rename {test_orc/modules => modules/test}/dns/bin/start_test_module (90%) rename {test_orc/modules => modules/test}/dns/conf/module_config.json (100%) create mode 100644 modules/test/dns/dns.Dockerfile rename {test_orc/modules => modules/test}/dns/python/src/dns_module.py (100%) rename {test_orc/modules => modules/test}/dns/python/src/run.py (100%) rename {test_orc/modules => modules/test}/nmap/bin/start_test_module (93%) rename {test_orc/modules => modules/test}/nmap/conf/module_config.json (100%) create mode 100644 modules/test/nmap/nmap.Dockerfile rename {test_orc/modules => modules/test}/nmap/python/requirements.txt (100%) rename {test_orc/modules => modules/test}/nmap/python/src/nmap_module.py (100%) rename {test_orc/modules => modules/test}/nmap/python/src/run.py (100%) delete mode 100644 net_orc/.gitignore delete mode 100644 net_orc/network/modules/ntp/ntp.Dockerfile delete mode 100644 net_orc/network/modules/template/template.Dockerfile delete mode 100644 net_orc/orchestrator.Dockerfile delete mode 100644 net_orc/python/requirements.txt delete mode 100644 test_orc/modules/baseline/baseline.Dockerfile delete mode 100644 test_orc/modules/dns/dns.Dockerfile delete mode 100644 test_orc/modules/nmap/nmap.Dockerfile delete mode 100644 test_orc/python/requirements.txt diff --git a/.gitignore b/.gitignore index 5dfc1f6f9..ad8f26d34 100644 --- a/.gitignore +++ b/.gitignore @@ -4,4 +4,5 @@ venv/ error pylint.out local/ -__pycache__/ \ No newline at end of file +__pycache__/ +build/ \ No newline at end of file diff --git a/cmd/install b/cmd/install index 23e463158..37c03e113 100755 --- a/cmd/install +++ b/cmd/install @@ -6,8 +6,4 @@ source venv/bin/activate pip3 install -r framework/requirements.txt -pip3 install -r net_orc/python/requirements.txt - -pip3 install -r test_orc/python/requirements.txt - deactivate diff --git a/cmd/start b/cmd/start index d146f413d..55d2e52eb 100755 --- a/cmd/start +++ b/cmd/start @@ -18,7 +18,9 @@ rm -rf runtime source venv/bin/activate # TODO: Execute python code -python -u framework/test_runner.py $@ +# Set the PYTHONPATH to include the "src" directory +export PYTHONPATH="$PWD/framework/python/src" +python -u framework/python/src/core/test_runner.py $@ # TODO: Work in progress code for containerization of OVS module # asyncRun() { diff --git a/framework/logger.py b/framework/python/src/common/logger.py similarity index 57% rename from framework/logger.py rename to framework/python/src/common/logger.py index cb71c9fdd..539767f53 100644 --- a/framework/logger.py +++ b/framework/python/src/common/logger.py @@ -1,63 +1,60 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Manages stream and file loggers.""" -import json -import logging -import os - -LOGGERS = {} -_LOG_FORMAT = '%(asctime)s %(name)-8s %(levelname)-7s %(message)s' -_DATE_FORMAT = '%b %02d %H:%M:%S' -_DEFAULT_LOG_LEVEL = logging.INFO -_LOG_LEVEL = logging.INFO -_CONF_DIR = 'conf' -_CONF_FILE_NAME = 'system.json' -_LOG_DIR = 'runtime/testing/' - -# Set log level -with open(os.path.join(_CONF_DIR, _CONF_FILE_NAME), - encoding='utf-8') as system_conf_file: - system_conf_json = json.load(system_conf_file) -log_level_str = system_conf_json['log_level'] - -temp_log = logging.getLogger('temp') -try: - temp_log.setLevel(logging.getLevelName(log_level_str)) - _LOG_LEVEL = logging.getLevelName(log_level_str) -except ValueError: - print('Invalid log level set in ' + _CONF_DIR + '/' + _CONF_FILE_NAME + - '. Using INFO as log level') - _LOG_LEVEL = _DEFAULT_LOG_LEVEL - -log_format = logging.Formatter(fmt=_LOG_FORMAT, datefmt=_DATE_FORMAT) - -def add_file_handler(log, log_file): - handler = logging.FileHandler(_LOG_DIR + log_file + '.log') - handler.setFormatter(log_format) - log.addHandler(handler) - -def add_stream_handler(log): - handler = logging.StreamHandler() - handler.setFormatter(log_format) - log.addHandler(handler) - -def get_logger(name, log_file=None): - if name not in LOGGERS: - LOGGERS[name] = logging.getLogger(name) - LOGGERS[name].setLevel(_LOG_LEVEL) - add_stream_handler(LOGGERS[name]) - if log_file is not None: - add_file_handler(LOGGERS[name], log_file) - return LOGGERS[name] +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Sets up the logger to be used for the test modules.""" +import json +import logging +import os + +LOGGERS = {} +_LOG_FORMAT = '%(asctime)s %(name)-8s %(levelname)-7s %(message)s' +_DATE_FORMAT = '%b %02d %H:%M:%S' +_DEFAULT_LEVEL = logging.INFO +_CONF_DIR = 'conf' +_CONF_FILE_NAME = 'system.json' + +# Set log level +try: + with open(os.path.join(_CONF_DIR, _CONF_FILE_NAME), + encoding='UTF-8') as config_json_file: + system_conf_json = json.load(config_json_file) + + log_level_str = system_conf_json['log_level'] + log_level = logging.getLevelName(log_level_str) +except OSError: + # TODO: Print out warning that log level is incorrect or missing + log_level = _DEFAULT_LEVEL + +log_format = logging.Formatter(fmt=_LOG_FORMAT, datefmt=_DATE_FORMAT) + +def add_file_handler(log, log_file, log_dir): + handler = logging.FileHandler(log_dir + log_file + '.log') + handler.setFormatter(log_format) + log.addHandler(handler) + + +def add_stream_handler(log): + handler = logging.StreamHandler() + handler.setFormatter(log_format) + log.addHandler(handler) + + +def get_logger(name, log_file=None, log_dir=None): + if name not in LOGGERS: + LOGGERS[name] = logging.getLogger(name) + LOGGERS[name].setLevel(log_level) + add_stream_handler(LOGGERS[name]) + if log_file is not None and log_dir is not None: + add_file_handler(LOGGERS[name], log_file, log_dir) + return LOGGERS[name] diff --git a/net_orc/python/src/util.py b/framework/python/src/common/util.py similarity index 95% rename from net_orc/python/src/util.py rename to framework/python/src/common/util.py index ba9527996..1ffe70651 100644 --- a/net_orc/python/src/util.py +++ b/framework/python/src/common/util.py @@ -1,55 +1,55 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Provides basic utilities for the network orchestrator.""" -import subprocess -import shlex -import logger -import netifaces - -LOGGER = logger.get_logger('util') - - -def run_command(cmd, output=True): - """Runs a process at the os level - By default, returns the standard output and error output - If the caller sets optional output parameter to False, - will only return a boolean result indicating if it was - succesful in running the command. Failure is indicated - by any return code from the process other than zero.""" - - success = False - process = subprocess.Popen(shlex.split(cmd), - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - stdout, stderr = process.communicate() - - if process.returncode != 0 and output: - err_msg = f'{stderr.strip()}. Code: {process.returncode}' - LOGGER.error('Command Failed: ' + cmd) - LOGGER.error('Error: ' + err_msg) - else: - success = True - if output: - return stdout.strip().decode('utf-8'), stderr - else: - return success - - -def interface_exists(interface): - return interface in netifaces.interfaces() - - -def prettify(mac_string): - return ':'.join([f'{ord(b):02x}' for b in mac_string]) +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Provides basic utilities for the network orchestrator.""" +import subprocess +import shlex +from common import logger +import netifaces + +LOGGER = logger.get_logger('util') + + +def run_command(cmd, output=True): + """Runs a process at the os level + By default, returns the standard output and error output + If the caller sets optional output parameter to False, + will only return a boolean result indicating if it was + succesful in running the command. Failure is indicated + by any return code from the process other than zero.""" + + success = False + process = subprocess.Popen(shlex.split(cmd), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + stdout, stderr = process.communicate() + + if process.returncode != 0 and output: + err_msg = f'{stderr.strip()}. Code: {process.returncode}' + LOGGER.error('Command Failed: ' + cmd) + LOGGER.error('Error: ' + err_msg) + else: + success = True + if output: + return stdout.strip().decode('utf-8'), stderr + else: + return success + + +def interface_exists(interface): + return interface in netifaces.interfaces() + + +def prettify(mac_string): + return ':'.join([f'{ord(b):02x}' for b in mac_string]) diff --git a/framework/device.py b/framework/python/src/core/device.py similarity index 91% rename from framework/device.py rename to framework/python/src/core/device.py index 53263e6a6..44f275bdf 100644 --- a/framework/device.py +++ b/framework/python/src/core/device.py @@ -14,7 +14,7 @@ """Track device object information.""" -from network_device import NetworkDevice +from net_orc.network_device import NetworkDevice from dataclasses import dataclass diff --git a/framework/test_runner.py b/framework/python/src/core/test_runner.py similarity index 96% rename from framework/test_runner.py rename to framework/python/src/core/test_runner.py index 0ee5e8416..226f874cc 100644 --- a/framework/test_runner.py +++ b/framework/python/src/core/test_runner.py @@ -23,7 +23,7 @@ import argparse import sys from testrun import TestRun -import logger +from common import logger import signal LOGGER = logger.get_logger("runner") diff --git a/framework/testrun.py b/framework/python/src/core/testrun.py similarity index 85% rename from framework/testrun.py rename to framework/python/src/core/testrun.py index 25232f90c..e59b7cda2 100644 --- a/framework/testrun.py +++ b/framework/python/src/core/testrun.py @@ -25,25 +25,18 @@ import json import signal import time -import logger +from common import logger # Locate parent directory current_dir = os.path.dirname(os.path.realpath(__file__)) -parent_dir = os.path.dirname(current_dir) -# Add net_orc to Python path -net_orc_dir = os.path.join(parent_dir, 'net_orc', 'python', 'src') -sys.path.append(net_orc_dir) +# Locate the test-run root directory, 4 levels, src->python->framework->test-run +root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(current_dir)))) -# Add test_orc to Python path -test_orc_dir = os.path.join(parent_dir, 'test_orc', 'python', 'src') -sys.path.append(test_orc_dir) - -from listener import NetworkEvent # pylint: disable=wrong-import-position,import-outside-toplevel -import test_orchestrator as test_orc # pylint: disable=wrong-import-position,import-outside-toplevel -import network_orchestrator as net_orc # pylint: disable=wrong-import-position,import-outside-toplevel - -from device import Device # pylint: disable=wrong-import-position,import-outside-toplevel +from net_orc.listener import NetworkEvent +from test_orc import test_orchestrator as test_orc +from net_orc import network_orchestrator as net_orc +from device import Device LOGGER = logger.get_logger('test_run') CONFIG_FILE = 'conf/system.json' @@ -58,7 +51,6 @@ DEVICE_MAC_ADDR = 'mac_addr' DEVICE_TEST_MODULES = 'test_modules' - class TestRun: # pylint: disable=too-few-public-methods """Test Run controller. @@ -142,7 +134,7 @@ def _exit_handler(self, signum, arg): # pylint: disable=unused-argument def _get_config_abs(self, config_file=None): if config_file is None: # If not defined, use relative pathing to local file - config_file = os.path.join(parent_dir, CONFIG_FILE) + config_file = os.path.join(root_dir, CONFIG_FILE) # Expand the config file to absolute pathing return os.path.abspath(config_file) diff --git a/net_orc/python/src/listener.py b/framework/python/src/net_orc/listener.py similarity index 97% rename from net_orc/python/src/listener.py rename to framework/python/src/net_orc/listener.py index 0bbd2b1c9..4f8e1961f 100644 --- a/net_orc/python/src/listener.py +++ b/framework/python/src/net_orc/listener.py @@ -16,8 +16,8 @@ under test.""" import threading from scapy.all import AsyncSniffer, DHCP, get_if_hwaddr -import logger -from network_event import NetworkEvent +from net_orc.network_event import NetworkEvent +from common import logger LOGGER = logger.get_logger('listener') diff --git a/net_orc/python/src/network_device.py b/framework/python/src/net_orc/network_device.py similarity index 100% rename from net_orc/python/src/network_device.py rename to framework/python/src/net_orc/network_device.py diff --git a/net_orc/python/src/network_event.py b/framework/python/src/net_orc/network_event.py similarity index 100% rename from net_orc/python/src/network_event.py rename to framework/python/src/net_orc/network_event.py diff --git a/net_orc/python/src/network_orchestrator.py b/framework/python/src/net_orc/network_orchestrator.py similarity index 94% rename from net_orc/python/src/network_orchestrator.py rename to framework/python/src/net_orc/network_orchestrator.py index f53b17d15..f1f479742 100644 --- a/net_orc/python/src/network_orchestrator.py +++ b/framework/python/src/net_orc/network_orchestrator.py @@ -26,13 +26,14 @@ import threading import docker from docker.types import Mount -import logger -import util -from listener import Listener -from network_device import NetworkDevice -from network_event import NetworkEvent -from network_validator import NetworkValidator -from ovs_control import OVSControl +from collections import OrderedDict +from common import logger +from common import util +from net_orc.listener import Listener +from net_orc.network_device import NetworkDevice +from net_orc.network_event import NetworkEvent +from net_orc.network_validator import NetworkValidator +from net_orc.ovs_control import OVSControl LOGGER = logger.get_logger('net_orc') CONFIG_FILE = 'conf/system.json' @@ -41,7 +42,8 @@ TEST_DIR = 'test' MONITOR_PCAP = 'monitor.pcap' NET_DIR = 'runtime/network' -NETWORK_MODULES_DIR = 'network/modules' +#NETWORK_MODULES_DIR = 'network/modules' +NETWORK_MODULES_DIR = 'modules/network' NETWORK_MODULE_METADATA = 'conf/module_config.json' DEVICE_BRIDGE = 'tr-d' INTERNET_BRIDGE = 'tr-c' @@ -81,8 +83,9 @@ def __init__(self, self.validate = validate self.async_monitor = async_monitor - self._path = os.path.dirname( - os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) + self._path = os.path.dirname(os.path.dirname( + os.path.dirname( + os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))) self.validator = NetworkValidator() shutil.rmtree(os.path.join(os.getcwd(), NET_DIR), ignore_errors=True) @@ -182,7 +185,7 @@ def _device_discovered(self, mac_addr): TEST_DIR, device.mac_addr.replace(':', '')) os.makedirs(device_runtime_dir) - util.run_command(f'chown -R {self._host_user}:{self._host_user} {device_runtime_dir}') + util.run_command(f'chown -R {self._host_user} {device_runtime_dir}') packet_capture = sniff(iface=self._dev_intf, timeout=self._startup_timeout, @@ -413,6 +416,11 @@ def _load_network_module(self, module_dir): net_module.enable_container = net_module_json['config']['docker'][ 'enable_container'] + # Determine if this is a template + if 'template' in net_module_json['config']['docker']: + net_module.template = net_module_json['config']['docker'][ + 'template'] + # Load network service networking configuration if net_module.enable_container: @@ -432,13 +440,14 @@ def _load_network_module(self, module_dir): net_module.net_config.ip_index] net_module.net_config.ipv6_network = self.network_config.ipv6_network - self._net_modules.append(net_module) + self._net_modules.append(net_module) return net_module def build_network_modules(self): LOGGER.info('Building network modules...') for net_module in self._net_modules: - self._build_module(net_module) + if not net_module.template: + self._build_module(net_module) def _build_module(self, net_module): LOGGER.debug('Building network module ' + net_module.dir_name) @@ -786,6 +795,7 @@ def __init__(self): self.container = None self.container_name = None self.image_name = None + self.template = False # Absolute path self.dir = None diff --git a/net_orc/python/src/network_validator.py b/framework/python/src/net_orc/network_validator.py similarity index 91% rename from net_orc/python/src/network_validator.py rename to framework/python/src/net_orc/network_validator.py index 832a154e3..4ee46124d 100644 --- a/net_orc/python/src/network_validator.py +++ b/framework/python/src/net_orc/network_validator.py @@ -20,12 +20,12 @@ import docker from docker.types import Mount import getpass -import logger -import util +from common import logger +from common import util LOGGER = logger.get_logger('validator') OUTPUT_DIR = 'runtime/validation' -DEVICES_DIR = 'network/devices' +DEVICES_DIR = 'modules/devices' DEVICE_METADATA = 'conf/module_config.json' DEVICE_BRIDGE = 'tr-d' CONF_DIR = 'conf' @@ -38,8 +38,9 @@ class NetworkValidator: def __init__(self): self._net_devices = [] - self._path = os.path.dirname( - os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) + self._path = os.path.dirname(os.path.dirname( + os.path.dirname( + os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))) self._device_dir = os.path.join(self._path, DEVICES_DIR) @@ -48,11 +49,11 @@ def __init__(self): def start(self): """Start the network validator.""" LOGGER.debug('Starting validator') - + # Setup the output directory host_user = self._get_host_user() os.makedirs(OUTPUT_DIR, exist_ok=True) - util.run_command(f'chown -R {host_user}:{host_user} {OUTPUT_DIR}') + util.run_command(f'chown -R {host_user} {OUTPUT_DIR}') self._load_devices() self._build_network_devices() @@ -85,7 +86,7 @@ def _build_device(self, net_device): def _load_devices(self): - LOGGER.info(f'Loading validators from {DEVICES_DIR}') + LOGGER.info(f'Loading validators from {self._device_dir}') loaded_devices = 'Loaded the following validators: ' @@ -175,24 +176,24 @@ def _start_network_device(self, device): def _get_host_user(self): user = self._get_os_user() - + # If primary method failed, try secondary if user is None: user = self._get_user() - LOGGER.debug("Network validator host user: " + user) + LOGGER.debug(f'Network validator host user: {user}') return user def _get_os_user(self): user = None try: user = os.getlogin() - except OSError as e: + except OSError: # Handle the OSError exception - LOGGER.error("An OS error occurred while retrieving the login name.") - except Exception as e: + LOGGER.error('An OS error occurred while retrieving the login name.') + except Exception as error: # Catch any other unexpected exceptions - LOGGER.error("An exception occurred:", e) + LOGGER.error('An exception occurred:', error) return user def _get_user(self): diff --git a/net_orc/python/src/ovs_control.py b/framework/python/src/net_orc/ovs_control.py similarity index 95% rename from net_orc/python/src/ovs_control.py rename to framework/python/src/net_orc/ovs_control.py index ce316dba7..3c950d4af 100644 --- a/net_orc/python/src/ovs_control.py +++ b/framework/python/src/net_orc/ovs_control.py @@ -14,9 +14,9 @@ """OVS Control Module""" import json -import logger -import util import os +from common import logger +from common import util CONFIG_FILE = 'conf/system.json' DEVICE_BRIDGE = 'tr-d' @@ -146,9 +146,9 @@ def delete_bridge(self, bridge_name): return success def _load_config(self): - path = os.path.dirname( - os.path.dirname( - os.path.dirname(os.path.dirname(os.path.realpath(__file__))))) + path = os.path.dirname(os.path.dirname( + os.path.dirname( + os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))) config_file = os.path.join(path, CONFIG_FILE) LOGGER.debug('Loading configuration: ' + config_file) with open(config_file, 'r', encoding='utf-8') as conf_file: diff --git a/test_orc/python/src/module.py b/framework/python/src/test_orc/module.py similarity index 100% rename from test_orc/python/src/module.py rename to framework/python/src/test_orc/module.py diff --git a/test_orc/python/src/runner.py b/framework/python/src/test_orc/runner.py similarity index 100% rename from test_orc/python/src/runner.py rename to framework/python/src/test_orc/runner.py diff --git a/test_orc/python/src/test_orchestrator.py b/framework/python/src/test_orc/test_orchestrator.py similarity index 91% rename from test_orc/python/src/test_orchestrator.py rename to framework/python/src/test_orc/test_orchestrator.py index 9f0f100ab..58c1944f8 100644 --- a/test_orc/python/src/test_orchestrator.py +++ b/framework/python/src/test_orc/test_orchestrator.py @@ -20,14 +20,14 @@ import shutil import docker from docker.types import Mount -import logger -from module import TestModule -import util +from common import logger +from test_orc.module import TestModule +from common import util LOG_NAME = "test_orc" LOGGER = logger.get_logger("test_orc") RUNTIME_DIR = "runtime/test" -TEST_MODULES_DIR = "modules" +TEST_MODULES_DIR = "modules/test" MODULE_CONFIG = "conf/module_config.json" @@ -40,22 +40,28 @@ def __init__(self, net_orc): self._net_orc = net_orc self._test_in_progress = False - self._path = os.path.dirname( - os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) + self._path = os.path.dirname(os.path.dirname( + os.path.dirname( + os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))) # Resolve the path to the test-run folder - self._root_path = os.path.abspath(os.path.join(self._path, os.pardir)) + #self._root_path = os.path.abspath(os.path.join(self._path, os.pardir)) + + + self._root_path = os.path.dirname(os.path.dirname( + os.path.dirname( + os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))) shutil.rmtree(os.path.join(self._root_path, RUNTIME_DIR), ignore_errors=True) def start(self): LOGGER.debug("Starting test orchestrator") - + # Setup the output directory self._host_user = self._get_host_user() os.makedirs(RUNTIME_DIR, exist_ok=True) - util.run_command(f'chown -R {self._host_user}:{self._host_user} {RUNTIME_DIR}') + util.run_command(f'chown -R {self._host_user} {RUNTIME_DIR}') self._load_test_modules() self.build_test_modules() @@ -107,7 +113,7 @@ def _generate_results(self, device): "runtime/test/" + device.mac_addr.replace(":", "") + "/results.json") with open(out_file, "w", encoding="utf-8") as f: json.dump(results, f, indent=2) - util.run_command(f'chown -R {self._host_user}:{self._host_user} {out_file}') + util.run_command(f'chown -R {self._host_user} {out_file}') return results def test_in_progress(self): @@ -143,12 +149,12 @@ def _run_test_module(self, module, device): device_startup_capture = os.path.join( self._root_path, "runtime/test/" + device.mac_addr.replace(":", "") + "/startup.pcap") - util.run_command(f'chown -R {self._host_user}:{self._host_user} {device_startup_capture}') + util.run_command(f'chown -R {self._host_user} {device_startup_capture}') device_monitor_capture = os.path.join( self._root_path, "runtime/test/" + device.mac_addr.replace(":", "") + "/monitor.pcap") - util.run_command(f'chown -R {self._host_user}:{self._host_user} {device_monitor_capture}') + util.run_command(f'chown -R {self._host_user} {device_monitor_capture}') client = docker.from_env() diff --git a/framework/requirements.txt b/framework/requirements.txt index ca56948f4..03eab9796 100644 --- a/framework/requirements.txt +++ b/framework/requirements.txt @@ -1 +1,8 @@ -requests<2.29.0 \ No newline at end of file +# Requirements for the core module +requests<2.29.0 + +# Requirements for the net_orc module +docker +ipaddress +netifaces +scapy \ No newline at end of file diff --git a/net_orc/network/devices/faux-dev/bin/get_default_gateway b/modules/devices/faux-dev/bin/get_default_gateway similarity index 100% rename from net_orc/network/devices/faux-dev/bin/get_default_gateway rename to modules/devices/faux-dev/bin/get_default_gateway diff --git a/net_orc/network/devices/faux-dev/bin/start_dhcp_client b/modules/devices/faux-dev/bin/start_dhcp_client similarity index 100% rename from net_orc/network/devices/faux-dev/bin/start_dhcp_client rename to modules/devices/faux-dev/bin/start_dhcp_client diff --git a/net_orc/network/devices/faux-dev/bin/start_network_service b/modules/devices/faux-dev/bin/start_network_service similarity index 91% rename from net_orc/network/devices/faux-dev/bin/start_network_service rename to modules/devices/faux-dev/bin/start_network_service index 13e2f6baf..80a587684 100644 --- a/net_orc/network/devices/faux-dev/bin/start_network_service +++ b/modules/devices/faux-dev/bin/start_network_service @@ -27,7 +27,7 @@ LOG_FILE=$OUTPUT_DIR/$MODULE_NAME.log RESULT_FILE=$OUTPUT_DIR/result.json touch $LOG_FILE touch $RESULT_FILE -chown -R $HOST_USER:$HOST_USER $OUTPUT_DIR +chown -R $HOST_USER $OUTPUT_DIR # Start dhclient $BIN_DIR/start_dhcp_client $INTF diff --git a/net_orc/network/devices/faux-dev/conf/module_config.json b/modules/devices/faux-dev/conf/module_config.json similarity index 100% rename from net_orc/network/devices/faux-dev/conf/module_config.json rename to modules/devices/faux-dev/conf/module_config.json diff --git a/net_orc/network/devices/faux-dev/faux-dev.Dockerfile b/modules/devices/faux-dev/faux-dev.Dockerfile similarity index 65% rename from net_orc/network/devices/faux-dev/faux-dev.Dockerfile rename to modules/devices/faux-dev/faux-dev.Dockerfile index 1686341b5..0a4f02f38 100644 --- a/net_orc/network/devices/faux-dev/faux-dev.Dockerfile +++ b/modules/devices/faux-dev/faux-dev.Dockerfile @@ -1,6 +1,9 @@ # Image name: test-run/faux-dev FROM test-run/base:latest +ARG MODULE_NAME=faux-dev +ARG MODULE_DIR=modules/devices/$MODULE_NAME + #Update and get all additional requirements not contained in the base image RUN apt-get update --fix-missing @@ -11,10 +14,10 @@ ARG DEBIAN_FRONTEND=noninteractive RUN apt-get install -y isc-dhcp-client ntp ntpdate # Copy over all configuration files -COPY network/devices/faux-dev/conf /testrun/conf +COPY $MODULE_DIR/conf /testrun/conf -# Load device binary files -COPY network/devices/faux-dev/bin /testrun/bin +# Copy over all binary files +COPY $MODULE_DIR/bin /testrun/bin # Copy over all python files -COPY network/devices/faux-dev/python /testrun/python \ No newline at end of file +COPY $MODULE_DIR/python /testrun/python \ No newline at end of file diff --git a/net_orc/network/devices/faux-dev/python/src/dhcp_check.py b/modules/devices/faux-dev/python/src/dhcp_check.py similarity index 100% rename from net_orc/network/devices/faux-dev/python/src/dhcp_check.py rename to modules/devices/faux-dev/python/src/dhcp_check.py diff --git a/net_orc/network/devices/faux-dev/python/src/dns_check.py b/modules/devices/faux-dev/python/src/dns_check.py similarity index 100% rename from net_orc/network/devices/faux-dev/python/src/dns_check.py rename to modules/devices/faux-dev/python/src/dns_check.py diff --git a/net_orc/network/devices/faux-dev/python/src/gateway_check.py b/modules/devices/faux-dev/python/src/gateway_check.py similarity index 100% rename from net_orc/network/devices/faux-dev/python/src/gateway_check.py rename to modules/devices/faux-dev/python/src/gateway_check.py diff --git a/net_orc/network/devices/faux-dev/python/src/logger.py b/modules/devices/faux-dev/python/src/logger.py similarity index 100% rename from net_orc/network/devices/faux-dev/python/src/logger.py rename to modules/devices/faux-dev/python/src/logger.py diff --git a/net_orc/network/devices/faux-dev/python/src/ntp_check.py b/modules/devices/faux-dev/python/src/ntp_check.py similarity index 100% rename from net_orc/network/devices/faux-dev/python/src/ntp_check.py rename to modules/devices/faux-dev/python/src/ntp_check.py diff --git a/net_orc/network/devices/faux-dev/python/src/run.py b/modules/devices/faux-dev/python/src/run.py similarity index 100% rename from net_orc/network/devices/faux-dev/python/src/run.py rename to modules/devices/faux-dev/python/src/run.py diff --git a/net_orc/network/devices/faux-dev/python/src/util.py b/modules/devices/faux-dev/python/src/util.py similarity index 100% rename from net_orc/network/devices/faux-dev/python/src/util.py rename to modules/devices/faux-dev/python/src/util.py diff --git a/net_orc/network/modules/base/base.Dockerfile b/modules/network/base/base.Dockerfile similarity index 74% rename from net_orc/network/modules/base/base.Dockerfile rename to modules/network/base/base.Dockerfile index 2400fd1c6..d14713c59 100644 --- a/net_orc/network/modules/base/base.Dockerfile +++ b/modules/network/base/base.Dockerfile @@ -1,17 +1,20 @@ # Image name: test-run/base FROM ubuntu:jammy +ARG MODULE_NAME=base +ARG MODULE_DIR=modules/network/$MODULE_NAME + # Install common software RUN apt-get update && apt-get install -y net-tools iputils-ping tcpdump iproute2 jq python3 python3-pip dos2unix -#Setup the base python requirements -COPY network/modules/base/python /testrun/python +# Setup the base python requirements +COPY $MODULE_DIR/python /testrun/python # Install all python requirements for the module RUN pip3 install -r /testrun/python/requirements.txt # Add the bin files -COPY network/modules/base/bin /testrun/bin +COPY $MODULE_DIR/bin /testrun/bin # Remove incorrect line endings RUN dos2unix /testrun/bin/* diff --git a/net_orc/network/modules/base/bin/capture b/modules/network/base/bin/capture similarity index 90% rename from net_orc/network/modules/base/bin/capture rename to modules/network/base/bin/capture index 8a8430feb..bc6c425e5 100644 --- a/net_orc/network/modules/base/bin/capture +++ b/modules/network/base/bin/capture @@ -23,7 +23,7 @@ fi # Create the output directory and start the capture mkdir -p $PCAP_DIR -chown $HOST_USER:$HOST_USER $PCAP_DIR +chown $HOST_USER $PCAP_DIR tcpdump -i $INTERFACE -w $PCAP_DIR/$PCAP_FILE -Z $HOST_USER & #Small pause to let the capture to start diff --git a/net_orc/network/modules/base/bin/setup_binaries b/modules/network/base/bin/setup_binaries similarity index 100% rename from net_orc/network/modules/base/bin/setup_binaries rename to modules/network/base/bin/setup_binaries diff --git a/net_orc/network/modules/base/bin/start_grpc b/modules/network/base/bin/start_grpc similarity index 100% rename from net_orc/network/modules/base/bin/start_grpc rename to modules/network/base/bin/start_grpc diff --git a/net_orc/network/modules/base/bin/start_module b/modules/network/base/bin/start_module similarity index 100% rename from net_orc/network/modules/base/bin/start_module rename to modules/network/base/bin/start_module diff --git a/net_orc/network/modules/base/bin/start_network_service b/modules/network/base/bin/start_network_service similarity index 100% rename from net_orc/network/modules/base/bin/start_network_service rename to modules/network/base/bin/start_network_service diff --git a/net_orc/network/modules/base/bin/wait_for_interface b/modules/network/base/bin/wait_for_interface similarity index 100% rename from net_orc/network/modules/base/bin/wait_for_interface rename to modules/network/base/bin/wait_for_interface diff --git a/net_orc/network/modules/base/conf/module_config.json b/modules/network/base/conf/module_config.json similarity index 100% rename from net_orc/network/modules/base/conf/module_config.json rename to modules/network/base/conf/module_config.json diff --git a/net_orc/network/modules/base/python/requirements.txt b/modules/network/base/python/requirements.txt similarity index 100% rename from net_orc/network/modules/base/python/requirements.txt rename to modules/network/base/python/requirements.txt diff --git a/net_orc/network/modules/base/python/src/grpc/start_server.py b/modules/network/base/python/src/grpc/start_server.py similarity index 100% rename from net_orc/network/modules/base/python/src/grpc/start_server.py rename to modules/network/base/python/src/grpc/start_server.py diff --git a/net_orc/network/modules/base/python/src/logger.py b/modules/network/base/python/src/logger.py similarity index 100% rename from net_orc/network/modules/base/python/src/logger.py rename to modules/network/base/python/src/logger.py diff --git a/net_orc/network/modules/dhcp-1/bin/start_network_service b/modules/network/dhcp-1/bin/start_network_service similarity index 91% rename from net_orc/network/modules/dhcp-1/bin/start_network_service rename to modules/network/dhcp-1/bin/start_network_service index e8e0ad06c..a60806684 100644 --- a/net_orc/network/modules/dhcp-1/bin/start_network_service +++ b/modules/network/dhcp-1/bin/start_network_service @@ -21,8 +21,8 @@ mkdir /var/run/radvd #Create and set permissions on the log files touch $DHCP_LOG_FILE touch $RA_LOG_FILE -chown $HOST_USER:$HOST_USER $DHCP_LOG_FILE -chown $HOST_USER:$HOST_USER $RA_LOG_FILE +chown $HOST_USER $DHCP_LOG_FILE +chown $HOST_USER $RA_LOG_FILE #Move the config files to the correct location diff --git a/net_orc/network/modules/dhcp-1/conf/dhcpd.conf b/modules/network/dhcp-1/conf/dhcpd.conf similarity index 100% rename from net_orc/network/modules/dhcp-1/conf/dhcpd.conf rename to modules/network/dhcp-1/conf/dhcpd.conf diff --git a/net_orc/network/modules/dhcp-1/conf/module_config.json b/modules/network/dhcp-1/conf/module_config.json similarity index 100% rename from net_orc/network/modules/dhcp-1/conf/module_config.json rename to modules/network/dhcp-1/conf/module_config.json diff --git a/net_orc/network/modules/dhcp-1/conf/radvd.conf b/modules/network/dhcp-1/conf/radvd.conf similarity index 100% rename from net_orc/network/modules/dhcp-1/conf/radvd.conf rename to modules/network/dhcp-1/conf/radvd.conf diff --git a/net_orc/network/modules/dhcp-2/dhcp-2.Dockerfile b/modules/network/dhcp-1/dhcp-1.Dockerfile similarity index 56% rename from net_orc/network/modules/dhcp-2/dhcp-2.Dockerfile rename to modules/network/dhcp-1/dhcp-1.Dockerfile index 989992570..766f18c57 100644 --- a/net_orc/network/modules/dhcp-2/dhcp-2.Dockerfile +++ b/modules/network/dhcp-1/dhcp-1.Dockerfile @@ -1,14 +1,17 @@ # Image name: test-run/dhcp-primary FROM test-run/base:latest +ARG MODULE_NAME=dhcp-1 +ARG MODULE_DIR=modules/network/$MODULE_NAME + # Install dhcp server RUN apt-get install -y isc-dhcp-server radvd # Copy over all configuration files -COPY network/modules/dhcp-2/conf /testrun/conf +COPY $MODULE_DIR/conf /testrun/conf # Copy over all binary files -COPY network/modules/dhcp-2/bin /testrun/bin +COPY $MODULE_DIR/bin /testrun/bin # Copy over all python files -COPY network/modules/dhcp-2/python /testrun/python +COPY $MODULE_DIR/python /testrun/python diff --git a/net_orc/network/modules/dhcp-1/python/src/grpc/__init__.py b/modules/network/dhcp-1/python/src/grpc/__init__.py similarity index 100% rename from net_orc/network/modules/dhcp-1/python/src/grpc/__init__.py rename to modules/network/dhcp-1/python/src/grpc/__init__.py diff --git a/net_orc/network/modules/dhcp-1/python/src/grpc/dhcp_config.py b/modules/network/dhcp-1/python/src/grpc/dhcp_config.py similarity index 100% rename from net_orc/network/modules/dhcp-1/python/src/grpc/dhcp_config.py rename to modules/network/dhcp-1/python/src/grpc/dhcp_config.py diff --git a/net_orc/network/modules/dhcp-1/python/src/grpc/network_service.py b/modules/network/dhcp-1/python/src/grpc/network_service.py similarity index 100% rename from net_orc/network/modules/dhcp-1/python/src/grpc/network_service.py rename to modules/network/dhcp-1/python/src/grpc/network_service.py diff --git a/net_orc/network/modules/dhcp-1/python/src/grpc/proto/grpc.proto b/modules/network/dhcp-1/python/src/grpc/proto/grpc.proto similarity index 100% rename from net_orc/network/modules/dhcp-1/python/src/grpc/proto/grpc.proto rename to modules/network/dhcp-1/python/src/grpc/proto/grpc.proto diff --git a/net_orc/network/modules/dhcp-2/bin/start_network_service b/modules/network/dhcp-2/bin/start_network_service similarity index 91% rename from net_orc/network/modules/dhcp-2/bin/start_network_service rename to modules/network/dhcp-2/bin/start_network_service index d58174695..ad5ff09e7 100644 --- a/net_orc/network/modules/dhcp-2/bin/start_network_service +++ b/modules/network/dhcp-2/bin/start_network_service @@ -21,8 +21,8 @@ mkdir /var/run/radvd #Create and set permissions on the log files touch $DHCP_LOG_FILE touch $RA_LOG_FILE -chown $HOST_USER:$HOST_USER $DHCP_LOG_FILE -chown $HOST_USER:$HOST_USER $RA_LOG_FILE +chown $HOST_USER $DHCP_LOG_FILE +chown $HOST_USER $RA_LOG_FILE #Move the config files to the correct location diff --git a/net_orc/network/modules/dhcp-2/conf/dhcpd.conf b/modules/network/dhcp-2/conf/dhcpd.conf similarity index 100% rename from net_orc/network/modules/dhcp-2/conf/dhcpd.conf rename to modules/network/dhcp-2/conf/dhcpd.conf diff --git a/net_orc/network/modules/dhcp-2/conf/module_config.json b/modules/network/dhcp-2/conf/module_config.json similarity index 100% rename from net_orc/network/modules/dhcp-2/conf/module_config.json rename to modules/network/dhcp-2/conf/module_config.json diff --git a/net_orc/network/modules/dhcp-2/conf/radvd.conf b/modules/network/dhcp-2/conf/radvd.conf similarity index 100% rename from net_orc/network/modules/dhcp-2/conf/radvd.conf rename to modules/network/dhcp-2/conf/radvd.conf diff --git a/net_orc/network/modules/dhcp-1/dhcp-1.Dockerfile b/modules/network/dhcp-2/dhcp-2.Dockerfile similarity index 55% rename from net_orc/network/modules/dhcp-1/dhcp-1.Dockerfile rename to modules/network/dhcp-2/dhcp-2.Dockerfile index 99804e0e3..231d0c558 100644 --- a/net_orc/network/modules/dhcp-1/dhcp-1.Dockerfile +++ b/modules/network/dhcp-2/dhcp-2.Dockerfile @@ -1,14 +1,18 @@ # Image name: test-run/dhcp-primary FROM test-run/base:latest +ARG MODULE_NAME=dhcp-2 +ARG MODULE_DIR=modules/network/$MODULE_NAME + # Install dhcp server RUN apt-get install -y isc-dhcp-server radvd # Copy over all configuration files -COPY network/modules/dhcp-1/conf /testrun/conf +COPY $MODULE_DIR/conf /testrun/conf # Copy over all binary files -COPY network/modules/dhcp-1/bin /testrun/bin - +COPY $MODULE_DIR/bin /testrun/bin + # Copy over all python files -COPY network/modules/dhcp-1/python /testrun/python +COPY $MODULE_DIR/python /testrun/python + diff --git a/net_orc/network/modules/dhcp-2/python/src/grpc/__init__.py b/modules/network/dhcp-2/python/src/grpc/__init__.py similarity index 100% rename from net_orc/network/modules/dhcp-2/python/src/grpc/__init__.py rename to modules/network/dhcp-2/python/src/grpc/__init__.py diff --git a/net_orc/network/modules/dhcp-2/python/src/grpc/dhcp_config.py b/modules/network/dhcp-2/python/src/grpc/dhcp_config.py similarity index 100% rename from net_orc/network/modules/dhcp-2/python/src/grpc/dhcp_config.py rename to modules/network/dhcp-2/python/src/grpc/dhcp_config.py diff --git a/net_orc/network/modules/dhcp-2/python/src/grpc/network_service.py b/modules/network/dhcp-2/python/src/grpc/network_service.py similarity index 100% rename from net_orc/network/modules/dhcp-2/python/src/grpc/network_service.py rename to modules/network/dhcp-2/python/src/grpc/network_service.py diff --git a/net_orc/network/modules/dhcp-2/python/src/grpc/proto/grpc.proto b/modules/network/dhcp-2/python/src/grpc/proto/grpc.proto similarity index 100% rename from net_orc/network/modules/dhcp-2/python/src/grpc/proto/grpc.proto rename to modules/network/dhcp-2/python/src/grpc/proto/grpc.proto diff --git a/net_orc/network/modules/dns/bin/start_network_service b/modules/network/dns/bin/start_network_service similarity index 100% rename from net_orc/network/modules/dns/bin/start_network_service rename to modules/network/dns/bin/start_network_service diff --git a/net_orc/network/modules/dns/conf/dnsmasq.conf b/modules/network/dns/conf/dnsmasq.conf similarity index 100% rename from net_orc/network/modules/dns/conf/dnsmasq.conf rename to modules/network/dns/conf/dnsmasq.conf diff --git a/net_orc/network/modules/dns/conf/module_config.json b/modules/network/dns/conf/module_config.json similarity index 100% rename from net_orc/network/modules/dns/conf/module_config.json rename to modules/network/dns/conf/module_config.json diff --git a/net_orc/network/modules/dns/dns.Dockerfile b/modules/network/dns/dns.Dockerfile similarity index 67% rename from net_orc/network/modules/dns/dns.Dockerfile rename to modules/network/dns/dns.Dockerfile index 84c1c7eb1..edfd4dd03 100644 --- a/net_orc/network/modules/dns/dns.Dockerfile +++ b/modules/network/dns/dns.Dockerfile @@ -1,6 +1,9 @@ # Image name: test-run/dns FROM test-run/base:latest +ARG MODULE_NAME=dns +ARG MODULE_DIR=modules/network/$MODULE_NAME + #Update and get all additional requirements not contained in the base image RUN apt-get update --fix-missing @@ -8,7 +11,7 @@ RUN apt-get update --fix-missing RUN apt-get install -y dnsmasq # Copy over all configuration files -COPY network/modules/dns/conf /testrun/conf +COPY $MODULE_DIR/conf /testrun/conf # Copy over all binary files -COPY network/modules/dns/bin /testrun/bin +COPY $MODULE_DIR/bin /testrun/bin diff --git a/net_orc/network/modules/gateway/bin/start_network_service b/modules/network/gateway/bin/start_network_service similarity index 100% rename from net_orc/network/modules/gateway/bin/start_network_service rename to modules/network/gateway/bin/start_network_service diff --git a/net_orc/network/modules/gateway/conf/module_config.json b/modules/network/gateway/conf/module_config.json similarity index 100% rename from net_orc/network/modules/gateway/conf/module_config.json rename to modules/network/gateway/conf/module_config.json diff --git a/net_orc/network/modules/gateway/gateway.Dockerfile b/modules/network/gateway/gateway.Dockerfile similarity index 59% rename from net_orc/network/modules/gateway/gateway.Dockerfile rename to modules/network/gateway/gateway.Dockerfile index b7085ebac..9bfa77dae 100644 --- a/net_orc/network/modules/gateway/gateway.Dockerfile +++ b/modules/network/gateway/gateway.Dockerfile @@ -1,11 +1,14 @@ # Image name: test-run/gateway FROM test-run/base:latest +ARG MODULE_NAME=gateway +ARG MODULE_DIR=modules/network/$MODULE_NAME + # Install required packages RUN apt-get install -y iptables isc-dhcp-client # Copy over all configuration files -COPY network/modules/gateway/conf /testrun/conf +COPY $MODULE_DIR/conf /testrun/conf # Copy over all binary files -COPY network/modules/gateway/bin /testrun/bin +COPY $MODULE_DIR/bin /testrun/bin diff --git a/net_orc/network/modules/ntp/bin/start_network_service b/modules/network/ntp/bin/start_network_service similarity index 82% rename from net_orc/network/modules/ntp/bin/start_network_service rename to modules/network/ntp/bin/start_network_service index 4c0c5dc74..b20cf8831 100644 --- a/net_orc/network/modules/ntp/bin/start_network_service +++ b/modules/network/ntp/bin/start_network_service @@ -7,7 +7,7 @@ echo Starting ntp #Create and set permissions on the log file touch $LOG_FILE -chown $HOST_USER:$HOST_USER $LOG_FILE +chown $HOST_USER $LOG_FILE #Start the NTP server python3 -u $PYTHON_SRC_DIR/ntp_server.py > $LOG_FILE diff --git a/net_orc/network/modules/ntp/conf/module_config.json b/modules/network/ntp/conf/module_config.json similarity index 100% rename from net_orc/network/modules/ntp/conf/module_config.json rename to modules/network/ntp/conf/module_config.json diff --git a/modules/network/ntp/ntp.Dockerfile b/modules/network/ntp/ntp.Dockerfile new file mode 100644 index 000000000..1add3178e --- /dev/null +++ b/modules/network/ntp/ntp.Dockerfile @@ -0,0 +1,16 @@ +# Image name: test-run/ntp +FROM test-run/base:latest + +ARG MODULE_NAME=ntp +ARG MODULE_DIR=modules/network/$MODULE_NAME + +# Copy over all configuration files +COPY $MODULE_DIR/conf /testrun/conf + +# Copy over all binary files +COPY $MODULE_DIR/bin /testrun/bin + +# Copy over all python files +COPY $MODULE_DIR/python /testrun/python + +EXPOSE 123/udp diff --git a/net_orc/network/modules/ntp/python/src/ntp_server.py b/modules/network/ntp/python/src/ntp_server.py similarity index 100% rename from net_orc/network/modules/ntp/python/src/ntp_server.py rename to modules/network/ntp/python/src/ntp_server.py diff --git a/net_orc/network/modules/radius/bin/start_network_service b/modules/network/radius/bin/start_network_service similarity index 89% rename from net_orc/network/modules/radius/bin/start_network_service rename to modules/network/radius/bin/start_network_service index e27a828dd..399a90ae5 100644 --- a/net_orc/network/modules/radius/bin/start_network_service +++ b/modules/network/radius/bin/start_network_service @@ -15,6 +15,6 @@ python3 -u $PYTHON_SRC_DIR/authenticator.py & #Create and set permissions on the log file touch $LOG_FILE -chown $HOST_USER:$HOST_USER $LOG_FILE +chown $HOST_USER $LOG_FILE freeradius -f -X &> $LOG_FILE \ No newline at end of file diff --git a/net_orc/network/modules/radius/conf/ca.crt b/modules/network/radius/conf/ca.crt similarity index 100% rename from net_orc/network/modules/radius/conf/ca.crt rename to modules/network/radius/conf/ca.crt diff --git a/net_orc/network/modules/radius/conf/eap b/modules/network/radius/conf/eap similarity index 100% rename from net_orc/network/modules/radius/conf/eap rename to modules/network/radius/conf/eap diff --git a/net_orc/network/modules/radius/conf/module_config.json b/modules/network/radius/conf/module_config.json similarity index 100% rename from net_orc/network/modules/radius/conf/module_config.json rename to modules/network/radius/conf/module_config.json diff --git a/net_orc/network/modules/radius/python/requirements.txt b/modules/network/radius/python/requirements.txt similarity index 100% rename from net_orc/network/modules/radius/python/requirements.txt rename to modules/network/radius/python/requirements.txt diff --git a/net_orc/network/modules/radius/python/src/authenticator.py b/modules/network/radius/python/src/authenticator.py similarity index 100% rename from net_orc/network/modules/radius/python/src/authenticator.py rename to modules/network/radius/python/src/authenticator.py diff --git a/net_orc/network/modules/radius/radius.Dockerfile b/modules/network/radius/radius.Dockerfile similarity index 74% rename from net_orc/network/modules/radius/radius.Dockerfile rename to modules/network/radius/radius.Dockerfile index a72313826..c44c5f0cc 100644 --- a/net_orc/network/modules/radius/radius.Dockerfile +++ b/modules/network/radius/radius.Dockerfile @@ -1,6 +1,9 @@ # Image name: test-run/radius FROM test-run/base:latest +ARG MODULE_NAME=radius +ARG MODULE_DIR=modules/network/$MODULE_NAME + # Install radius and git RUN apt-get update && apt-get install -y openssl freeradius git @@ -14,13 +17,13 @@ EXPOSE 1812/udp EXPOSE 1813/udp # Copy over all configuration files -COPY network/modules/radius/conf /testrun/conf +COPY $MODULE_DIR/conf /testrun/conf # Copy over all binary files -COPY network/modules/radius/bin /testrun/bin +COPY $MODULE_DIR/bin /testrun/bin # Copy over all python files -COPY network/modules/radius/python /testrun/python +COPY $MODULE_DIR/python /testrun/python # Install all python requirements for the module RUN pip3 install -r /testrun/python/requirements.txt \ No newline at end of file diff --git a/net_orc/network/modules/template/bin/start_network_service b/modules/network/template/bin/start_network_service similarity index 100% rename from net_orc/network/modules/template/bin/start_network_service rename to modules/network/template/bin/start_network_service diff --git a/net_orc/network/modules/template/conf/module_config.json b/modules/network/template/conf/module_config.json similarity index 91% rename from net_orc/network/modules/template/conf/module_config.json rename to modules/network/template/conf/module_config.json index c767c9ad6..e702e1804 100644 --- a/net_orc/network/modules/template/conf/module_config.json +++ b/modules/network/template/conf/module_config.json @@ -15,6 +15,7 @@ }, "docker": { "enable_container": false, + "template":true, "depends_on": "base", "mounts": [ { diff --git a/net_orc/network/modules/template/python/src/template_main.py b/modules/network/template/python/src/template_main.py similarity index 100% rename from net_orc/network/modules/template/python/src/template_main.py rename to modules/network/template/python/src/template_main.py diff --git a/modules/network/template/template.Dockerfile b/modules/network/template/template.Dockerfile new file mode 100644 index 000000000..9efbfb230 --- /dev/null +++ b/modules/network/template/template.Dockerfile @@ -0,0 +1,14 @@ +# Image name: test-run/template +FROM test-run/base:latest + +ARG MODULE_NAME=template +ARG MODULE_DIR=modules/network/$MODULE_NAME + +# Copy over all configuration files +COPY $MODULE_DIR/conf /testrun/conf + +# Copy over all binary files +COPY $MODULE_DIR/bin /testrun/bin + +# Copy over all python files +COPY $MODULE_DIR/python /testrun/python \ No newline at end of file diff --git a/test_orc/modules/base/base.Dockerfile b/modules/test/base/base.Dockerfile similarity index 74% rename from test_orc/modules/base/base.Dockerfile rename to modules/test/base/base.Dockerfile index a508caef7..b8398eae9 100644 --- a/test_orc/modules/base/base.Dockerfile +++ b/modules/test/base/base.Dockerfile @@ -1,17 +1,20 @@ # Image name: test-run/base-test FROM ubuntu:jammy +ARG MODULE_NAME=base +ARG MODULE_DIR=modules/test/$MODULE_NAME + # Install common software RUN apt-get update && apt-get install -y net-tools iputils-ping tcpdump iproute2 jq python3 python3-pip dos2unix nmap --fix-missing # Setup the base python requirements -COPY modules/base/python /testrun/python +COPY $MODULE_DIR/python /testrun/python # Install all python requirements for the module RUN pip3 install -r /testrun/python/requirements.txt -# Add the bin files -COPY modules/base/bin /testrun/bin +# Copy over all binary files +COPY $MODULE_DIR/bin /testrun/bin # Remove incorrect line endings RUN dos2unix /testrun/bin/* diff --git a/test_orc/modules/base/bin/capture b/modules/test/base/bin/capture similarity index 88% rename from test_orc/modules/base/bin/capture rename to modules/test/base/bin/capture index facb6acf7..45cfcd42f 100644 --- a/test_orc/modules/base/bin/capture +++ b/modules/test/base/bin/capture @@ -12,7 +12,7 @@ INTERFACE=$2 # Create the output directory and start the capture mkdir -p $PCAP_DIR -chown $HOST_USER:$HOST_USER $PCAP_DIR +chown $HOST_USER $PCAP_DIR tcpdump -i $INTERFACE -w $PCAP_DIR/$PCAP_FILE -Z $HOST_USER & # Small pause to let the capture to start diff --git a/test_orc/modules/base/bin/get_ipv4_addr b/modules/test/base/bin/get_ipv4_addr similarity index 100% rename from test_orc/modules/base/bin/get_ipv4_addr rename to modules/test/base/bin/get_ipv4_addr diff --git a/test_orc/modules/base/bin/setup_binaries b/modules/test/base/bin/setup_binaries similarity index 100% rename from test_orc/modules/base/bin/setup_binaries rename to modules/test/base/bin/setup_binaries diff --git a/test_orc/modules/base/bin/start_grpc b/modules/test/base/bin/start_grpc similarity index 100% rename from test_orc/modules/base/bin/start_grpc rename to modules/test/base/bin/start_grpc diff --git a/test_orc/modules/base/bin/start_module b/modules/test/base/bin/start_module similarity index 97% rename from test_orc/modules/base/bin/start_module rename to modules/test/base/bin/start_module index c179668ba..3e4737d8b 100644 --- a/test_orc/modules/base/bin/start_module +++ b/modules/test/base/bin/start_module @@ -15,7 +15,7 @@ IFACE=veth0 useradd $HOST_USER # Set permissions on the output files -chown -R $HOST_USER:$HOST_USER $OUTPUT_DIR +chown -R $HOST_USER $OUTPUT_DIR # Enable IPv6 for all containers sysctl net.ipv6.conf.all.disable_ipv6=0 diff --git a/test_orc/modules/base/bin/wait_for_interface b/modules/test/base/bin/wait_for_interface similarity index 100% rename from test_orc/modules/base/bin/wait_for_interface rename to modules/test/base/bin/wait_for_interface diff --git a/test_orc/modules/base/conf/module_config.json b/modules/test/base/conf/module_config.json similarity index 100% rename from test_orc/modules/base/conf/module_config.json rename to modules/test/base/conf/module_config.json diff --git a/test_orc/modules/base/python/requirements.txt b/modules/test/base/python/requirements.txt similarity index 100% rename from test_orc/modules/base/python/requirements.txt rename to modules/test/base/python/requirements.txt diff --git a/test_orc/modules/base/python/src/grpc/start_server.py b/modules/test/base/python/src/grpc/start_server.py similarity index 100% rename from test_orc/modules/base/python/src/grpc/start_server.py rename to modules/test/base/python/src/grpc/start_server.py diff --git a/test_orc/modules/base/python/src/logger.py b/modules/test/base/python/src/logger.py similarity index 100% rename from test_orc/modules/base/python/src/logger.py rename to modules/test/base/python/src/logger.py diff --git a/test_orc/modules/base/python/src/test_module.py b/modules/test/base/python/src/test_module.py similarity index 100% rename from test_orc/modules/base/python/src/test_module.py rename to modules/test/base/python/src/test_module.py diff --git a/test_orc/modules/base/python/src/util.py b/modules/test/base/python/src/util.py similarity index 100% rename from test_orc/modules/base/python/src/util.py rename to modules/test/base/python/src/util.py diff --git a/modules/test/baseline/baseline.Dockerfile b/modules/test/baseline/baseline.Dockerfile new file mode 100644 index 000000000..c2b32e7b7 --- /dev/null +++ b/modules/test/baseline/baseline.Dockerfile @@ -0,0 +1,14 @@ +# Image name: test-run/baseline-test +FROM test-run/base-test:latest + +ARG MODULE_NAME=baseline +ARG MODULE_DIR=modules/test/$MODULE_NAME + +# Copy over all configuration files +COPY $MODULE_DIR/conf /testrun/conf + +# Copy over all binary files +COPY $MODULE_DIR/bin /testrun/bin + +# Copy over all python files +COPY $MODULE_DIR/python /testrun/python \ No newline at end of file diff --git a/test_orc/modules/baseline/bin/start_test_module b/modules/test/baseline/bin/start_test_module similarity index 90% rename from test_orc/modules/baseline/bin/start_test_module rename to modules/test/baseline/bin/start_test_module index 2938eb0f8..a09349cf9 100644 --- a/test_orc/modules/baseline/bin/start_test_module +++ b/modules/test/baseline/bin/start_test_module @@ -31,8 +31,8 @@ LOG_FILE=/runtime/output/$MODULE_NAME.log RESULT_FILE=/runtime/output/$MODULE_NAME-result.json touch $LOG_FILE touch $RESULT_FILE -chown $HOST_USER:$HOST_USER $LOG_FILE -chown $HOST_USER:$HOST_USER $RESULT_FILE +chown $HOST_USER $LOG_FILE +chown $HOST_USER $RESULT_FILE # Run the python scrip that will execute the tests for this module # -u flag allows python print statements diff --git a/test_orc/modules/baseline/conf/module_config.json b/modules/test/baseline/conf/module_config.json similarity index 100% rename from test_orc/modules/baseline/conf/module_config.json rename to modules/test/baseline/conf/module_config.json diff --git a/test_orc/modules/baseline/python/src/baseline_module.py b/modules/test/baseline/python/src/baseline_module.py similarity index 100% rename from test_orc/modules/baseline/python/src/baseline_module.py rename to modules/test/baseline/python/src/baseline_module.py diff --git a/test_orc/modules/baseline/python/src/run.py b/modules/test/baseline/python/src/run.py similarity index 100% rename from test_orc/modules/baseline/python/src/run.py rename to modules/test/baseline/python/src/run.py diff --git a/test_orc/modules/conn/bin/start_test_module b/modules/test/conn/bin/start_test_module similarity index 92% rename from test_orc/modules/conn/bin/start_test_module rename to modules/test/conn/bin/start_test_module index 4550849ce..8290c0764 100644 --- a/test_orc/modules/conn/bin/start_test_module +++ b/modules/test/conn/bin/start_test_module @@ -28,8 +28,8 @@ LOG_FILE=/runtime/output/$MODULE_NAME.log RESULT_FILE=/runtime/output/$MODULE_NAME-result.json touch $LOG_FILE touch $RESULT_FILE -chown $HOST_USER:$HOST_USER $LOG_FILE -chown $HOST_USER:$HOST_USER $RESULT_FILE +chown $HOST_USER $LOG_FILE +chown $HOST_USER $RESULT_FILE # Run the python scrip that will execute the tests for this module # -u flag allows python print statements diff --git a/test_orc/modules/conn/conf/module_config.json b/modules/test/conn/conf/module_config.json similarity index 100% rename from test_orc/modules/conn/conf/module_config.json rename to modules/test/conn/conf/module_config.json diff --git a/test_orc/modules/conn/conn.Dockerfile b/modules/test/conn/conn.Dockerfile similarity index 59% rename from test_orc/modules/conn/conn.Dockerfile rename to modules/test/conn/conn.Dockerfile index cf25d0f02..2526b0046 100644 --- a/test_orc/modules/conn/conn.Dockerfile +++ b/modules/test/conn/conn.Dockerfile @@ -1,6 +1,9 @@ # Image name: test-run/conn-test FROM test-run/base-test:latest +ARG MODULE_NAME=conn +ARG MODULE_DIR=modules/test/$MODULE_NAME + # Install all necessary packages RUN apt-get install -y wget @@ -8,16 +11,16 @@ RUN apt-get install -y wget RUN wget http://standards-oui.ieee.org/oui.txt -P /usr/local/etc/ #Load the requirements file -COPY modules/conn/python/requirements.txt /testrun/python +COPY $MODULE_DIR/python/requirements.txt /testrun/python #Install all python requirements for the module RUN pip3 install -r /testrun/python/requirements.txt # Copy over all configuration files -COPY modules/conn/conf /testrun/conf +COPY $MODULE_DIR/conf /testrun/conf -# Load device binary files -COPY modules/conn/bin /testrun/bin +# Copy over all binary files +COPY $MODULE_DIR/bin /testrun/bin # Copy over all python files -COPY modules/conn/python /testrun/python +COPY $MODULE_DIR/python /testrun/python diff --git a/test_orc/modules/conn/python/requirements.txt b/modules/test/conn/python/requirements.txt similarity index 100% rename from test_orc/modules/conn/python/requirements.txt rename to modules/test/conn/python/requirements.txt diff --git a/test_orc/modules/conn/python/src/connection_module.py b/modules/test/conn/python/src/connection_module.py similarity index 100% rename from test_orc/modules/conn/python/src/connection_module.py rename to modules/test/conn/python/src/connection_module.py diff --git a/test_orc/modules/conn/python/src/run.py b/modules/test/conn/python/src/run.py similarity index 100% rename from test_orc/modules/conn/python/src/run.py rename to modules/test/conn/python/src/run.py diff --git a/test_orc/modules/dns/bin/start_test_module b/modules/test/dns/bin/start_test_module similarity index 90% rename from test_orc/modules/dns/bin/start_test_module rename to modules/test/dns/bin/start_test_module index 2938eb0f8..a09349cf9 100644 --- a/test_orc/modules/dns/bin/start_test_module +++ b/modules/test/dns/bin/start_test_module @@ -31,8 +31,8 @@ LOG_FILE=/runtime/output/$MODULE_NAME.log RESULT_FILE=/runtime/output/$MODULE_NAME-result.json touch $LOG_FILE touch $RESULT_FILE -chown $HOST_USER:$HOST_USER $LOG_FILE -chown $HOST_USER:$HOST_USER $RESULT_FILE +chown $HOST_USER $LOG_FILE +chown $HOST_USER $RESULT_FILE # Run the python scrip that will execute the tests for this module # -u flag allows python print statements diff --git a/test_orc/modules/dns/conf/module_config.json b/modules/test/dns/conf/module_config.json similarity index 100% rename from test_orc/modules/dns/conf/module_config.json rename to modules/test/dns/conf/module_config.json diff --git a/modules/test/dns/dns.Dockerfile b/modules/test/dns/dns.Dockerfile new file mode 100644 index 000000000..f831d0e2b --- /dev/null +++ b/modules/test/dns/dns.Dockerfile @@ -0,0 +1,14 @@ +# Image name: test-run/conn-test +FROM test-run/base-test:latest + +ARG MODULE_NAME=dns +ARG MODULE_DIR=modules/test/$MODULE_NAME + +# Copy over all configuration files +COPY $MODULE_DIR/conf /testrun/conf + +# Copy over all binary files +COPY $MODULE_DIR/bin /testrun/bin + +# Copy over all python files +COPY $MODULE_DIR/python /testrun/python \ No newline at end of file diff --git a/test_orc/modules/dns/python/src/dns_module.py b/modules/test/dns/python/src/dns_module.py similarity index 100% rename from test_orc/modules/dns/python/src/dns_module.py rename to modules/test/dns/python/src/dns_module.py diff --git a/test_orc/modules/dns/python/src/run.py b/modules/test/dns/python/src/run.py similarity index 100% rename from test_orc/modules/dns/python/src/run.py rename to modules/test/dns/python/src/run.py diff --git a/test_orc/modules/nmap/bin/start_test_module b/modules/test/nmap/bin/start_test_module similarity index 93% rename from test_orc/modules/nmap/bin/start_test_module rename to modules/test/nmap/bin/start_test_module index 4bb7e9f96..333566342 100644 --- a/test_orc/modules/nmap/bin/start_test_module +++ b/modules/test/nmap/bin/start_test_module @@ -31,8 +31,8 @@ LOG_FILE=/runtime/output/$MODULE_NAME.log RESULT_FILE=/runtime/output/$MODULE_NAME-result.json touch $LOG_FILE touch $RESULT_FILE -chown $HOST_USER:$HOST_USER $LOG_FILE -chown $HOST_USER:$HOST_USER $RESULT_FILE +chown $HOST_USER $LOG_FILE +chown $HOST_USER $RESULT_FILE # Run the python scrip that will execute the tests for this module # -u flag allows python print statements diff --git a/test_orc/modules/nmap/conf/module_config.json b/modules/test/nmap/conf/module_config.json similarity index 100% rename from test_orc/modules/nmap/conf/module_config.json rename to modules/test/nmap/conf/module_config.json diff --git a/modules/test/nmap/nmap.Dockerfile b/modules/test/nmap/nmap.Dockerfile new file mode 100644 index 000000000..c1a2f96ce --- /dev/null +++ b/modules/test/nmap/nmap.Dockerfile @@ -0,0 +1,20 @@ +# Image name: test-run/nmap-test +FROM test-run/base-test:latest + +ARG MODULE_NAME=nmap +ARG MODULE_DIR=modules/test/$MODULE_NAME + +#Load the requirements file +COPY $MODULE_DIR/python/requirements.txt /testrun/python + +#Install all python requirements for the module +RUN pip3 install -r /testrun/python/requirements.txt + +# Copy over all configuration files +COPY $MODULE_DIR/conf /testrun/conf + +# Copy over all binary files +COPY $MODULE_DIR/bin /testrun/bin + +# Copy over all python files +COPY $MODULE_DIR/python /testrun/python \ No newline at end of file diff --git a/test_orc/modules/nmap/python/requirements.txt b/modules/test/nmap/python/requirements.txt similarity index 100% rename from test_orc/modules/nmap/python/requirements.txt rename to modules/test/nmap/python/requirements.txt diff --git a/test_orc/modules/nmap/python/src/nmap_module.py b/modules/test/nmap/python/src/nmap_module.py similarity index 100% rename from test_orc/modules/nmap/python/src/nmap_module.py rename to modules/test/nmap/python/src/nmap_module.py diff --git a/test_orc/modules/nmap/python/src/run.py b/modules/test/nmap/python/src/run.py similarity index 100% rename from test_orc/modules/nmap/python/src/run.py rename to modules/test/nmap/python/src/run.py diff --git a/net_orc/.gitignore b/net_orc/.gitignore deleted file mode 100644 index 2d77147eb..000000000 --- a/net_orc/.gitignore +++ /dev/null @@ -1,133 +0,0 @@ -# Runtime folder -runtime/ -.vscode/ - -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so - -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -pip-wheel-metadata/ -share/python-wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.nox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -*.py,cover -.hypothesis/ -.pytest_cache/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py -db.sqlite3 -db.sqlite3-journal - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# IPython -profile_default/ -ipython_config.py - -# pyenv -.python-version - -# pipenv -# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. -# However, in case of collaboration, if having platform-specific dependencies or dependencies -# having no cross-platform support, pipenv may install dependencies that don't work, or not -# install all needed dependencies. -#Pipfile.lock - -# PEP 582; used by e.g. github.com/David-OConnor/pyflow -__pypackages__/ - -# Celery stuff -celerybeat-schedule -celerybeat.pid - -# SageMath parsed files -*.sage.py - -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ -.dmypy.json -dmypy.json - -# Pyre type checker -.pyre/ diff --git a/net_orc/network/modules/ntp/ntp.Dockerfile b/net_orc/network/modules/ntp/ntp.Dockerfile deleted file mode 100644 index 3474a504e..000000000 --- a/net_orc/network/modules/ntp/ntp.Dockerfile +++ /dev/null @@ -1,13 +0,0 @@ -# Image name: test-run/ntp -FROM test-run/base:latest - -# Copy over all configuration files -COPY network/modules/ntp/conf /testrun/conf - -# Copy over all binary files -COPY network/modules/ntp/bin /testrun/bin - -# Copy over all python files -COPY network/modules/ntp/python /testrun/python - -EXPOSE 123/udp diff --git a/net_orc/network/modules/template/template.Dockerfile b/net_orc/network/modules/template/template.Dockerfile deleted file mode 100644 index 45f9da6d9..000000000 --- a/net_orc/network/modules/template/template.Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -# Image name: test-run/template -FROM test-run/base:latest - -# Copy over all configuration files -COPY network/modules/template/conf /testrun/conf - -# Load device binary files -COPY network/modules/template/bin /testrun/bin - -# Copy over all python files -COPY network/modules/template/python /testrun/python \ No newline at end of file diff --git a/net_orc/orchestrator.Dockerfile b/net_orc/orchestrator.Dockerfile deleted file mode 100644 index f062a33d4..000000000 --- a/net_orc/orchestrator.Dockerfile +++ /dev/null @@ -1,22 +0,0 @@ -# Image name: test-run/orchestrator -FROM test-run/base:latest - -#Update and get all additional requirements not contained in the base image -RUN apt-get update - -RUN apt-get install -y python3-pip curl openvswitch-switch - -#Download and install docker client -ENV DOCKERVERSION=20.10.2 -RUN curl -fsSLO https://download.docker.com/linux/static/stable/x86_64/docker-${DOCKERVERSION}.tgz \ - && tar xzvf docker-${DOCKERVERSION}.tgz --strip 1 -C /usr/local/bin docker/docker \ - && rm docker-${DOCKERVERSION}.tgz - -#Create a directory to load all the app files into -RUN mkdir /python - -#Load the requirements file -COPY python/requirements.txt /python - -#Install all python requirements for the module -RUN pip3 install -r python/requirements.txt diff --git a/net_orc/python/requirements.txt b/net_orc/python/requirements.txt deleted file mode 100644 index 5d8f29214..000000000 --- a/net_orc/python/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -docker -ipaddress -netifaces -scapy \ No newline at end of file diff --git a/test_orc/modules/baseline/baseline.Dockerfile b/test_orc/modules/baseline/baseline.Dockerfile deleted file mode 100644 index 5b634e6ee..000000000 --- a/test_orc/modules/baseline/baseline.Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -# Image name: test-run/baseline-test -FROM test-run/base-test:latest - -# Copy over all configuration files -COPY modules/baseline/conf /testrun/conf - -# Load device binary files -COPY modules/baseline/bin /testrun/bin - -# Copy over all python files -COPY modules/baseline/python /testrun/python \ No newline at end of file diff --git a/test_orc/modules/dns/dns.Dockerfile b/test_orc/modules/dns/dns.Dockerfile deleted file mode 100644 index 7c3497bc3..000000000 --- a/test_orc/modules/dns/dns.Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -# Image name: test-run/baseline-test -FROM test-run/base-test:latest - -# Copy over all configuration files -COPY modules/dns/conf /testrun/conf - -# Load device binary files -COPY modules/dns/bin /testrun/bin - -# Copy over all python files -COPY modules/dns/python /testrun/python \ No newline at end of file diff --git a/test_orc/modules/nmap/nmap.Dockerfile b/test_orc/modules/nmap/nmap.Dockerfile deleted file mode 100644 index 3a8728d9f..000000000 --- a/test_orc/modules/nmap/nmap.Dockerfile +++ /dev/null @@ -1,17 +0,0 @@ -# Image name: test-run/baseline-test -FROM test-run/base-test:latest - -#Load the requirements file -COPY modules/nmap/python/requirements.txt /testrun/python - -#Install all python requirements for the module -RUN pip3 install -r /testrun/python/requirements.txt - -# Copy over all configuration files -COPY modules/nmap/conf /testrun/conf - -# Load device binary files -COPY modules/nmap/bin /testrun/bin - -# Copy over all python files -COPY modules/nmap/python /testrun/python \ No newline at end of file diff --git a/test_orc/python/requirements.txt b/test_orc/python/requirements.txt deleted file mode 100644 index e69de29bb..000000000 diff --git a/testing/test_baseline b/testing/test_baseline index d7fc1e5c5..bf191b88f 100755 --- a/testing/test_baseline +++ b/testing/test_baseline @@ -3,6 +3,8 @@ TESTRUN_OUT=/tmp/testrun.log +ifconfig + # Setup requirements sudo apt-get update sudo apt-get install openvswitch-common openvswitch-switch tcpdump jq moreutils coreutils @@ -18,9 +20,6 @@ sudo docker network create -d macvlan -o parent=endev0b endev0 # Start OVS sudo /usr/share/openvswitch/scripts/ovs-ctl start -# Fix due to ordering -sudo docker build ./net_orc/ -t test-run/base -f net_orc/network/modules/base/base.Dockerfile - # Build Test Container sudo docker build ./testing/docker/ci_baseline -t ci1 -f ./testing/docker/ci_baseline/Dockerfile From 7bb93664219e2575dc6934e4fcf3d383636508b1 Mon Sep 17 00:00:00 2001 From: jhughesbiot <50999916+jhughesbiot@users.noreply.github.com> Date: Tue, 20 Jun 2023 03:03:32 -0700 Subject: [PATCH 36/48] Ip control (#51) * Add initial work for ip control module * Implement ip control module with additional cleanup methods * Update link check to not use error stream * Add error checking around container network configurations * Add network cleanup for namespaces and links * formatting --- framework/python/src/net_orc/ip_control.py | 220 ++++++++++++++++++ .../src/net_orc/network_orchestrator.py | 114 +++------ 2 files changed, 257 insertions(+), 77 deletions(-) create mode 100644 framework/python/src/net_orc/ip_control.py diff --git a/framework/python/src/net_orc/ip_control.py b/framework/python/src/net_orc/ip_control.py new file mode 100644 index 000000000..eb683c46b --- /dev/null +++ b/framework/python/src/net_orc/ip_control.py @@ -0,0 +1,220 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""IP Control Module""" +from common import logger +from common import util +import re + +LOGGER = logger.get_logger('ip_ctrl') + + +class IPControl: + """IP Control""" + + def __init__(self): + """Initialize the IPControl object""" + + def add_link(self, interface_name, peer_name): + """Create an ip link with a peer""" + success = util.run_command('ip link add ' + interface_name + + ' type veth peer name ' + peer_name) + return success + + def add_namespace(self, namespace): + """Add a network namespace""" + exists = self.namespace_exists(namespace) + LOGGER.info("Namespace exists: " + str(exists)) + if exists: + return True + else: + success = util.run_command('ip netns add ' + namespace) + return success + + def delete_link(self, interface_name): + """Delete an ip link""" + success = util.run_command('ip link delete ' + interface_name) + return success + + def delete_namespace(self, interface_name): + """Delete an ip namespace""" + success = util.run_command('ip netns delete ' + interface_name) + return success + + def link_exists(self, link_name): + links = self.get_links() + return link_name in links + + def namespace_exists(self, namespace): + """Check if a namespace already exists""" + namespaces = self.get_namespaces() + if namespace in namespaces: + return True + else: + return False + + def get_links(self): + stdout, stderr = util.run_command('ip link list') + links = stdout.strip().split('\n') + netns_links = [] + for link in links: + match = re.search(r'\d+:\s+(\S+)', link) + if match: + interface_name = match.group(1) + name_match = re.search(r'(.*)@', interface_name) + if name_match: + interface_name = name_match.group(1) + netns_links.append(interface_name.strip()) + return netns_links + + def get_namespaces(self): + stdout, stderr = util.run_command('ip netns list') + #Strip ID's from the namespace results + namespaces = re.findall(r'(\S+)(?:\s+\(id: \d+\))?', stdout) + return namespaces + + def set_namespace(self, interface_name, namespace): + """Attach an interface to a network namespace""" + success = util.run_command('ip link set ' + interface_name + ' netns ' + + namespace) + return success + + def rename_interface(self, interface_name, namespace, new_name): + """Rename an interface""" + success = util.run_command('ip netns exec ' + namespace + + ' ip link set dev ' + interface_name + ' name ' + + new_name) + return success + + def set_interface_mac(self, interface_name, namespace, mac_addr): + """Set MAC address of an interface""" + success = util.run_command('ip netns exec ' + namespace + + ' ip link set dev ' + interface_name + + ' address ' + mac_addr) + return success + + def set_interface_ip(self, interface_name, namespace, ipaddr): + """Set IP address of an interface""" + success = util.run_command('ip netns exec ' + namespace + ' ip addr add ' + + ipaddr + ' dev ' + interface_name) + return success + + def set_interface_up(self, interface_name, namespace=None): + """Set the interface to the up state""" + if namespace is None: + success = util.run_command('ip link set dev ' + interface_name + ' up') + else: + success = util.run_command('ip netns exec ' + namespace + + ' ip link set dev ' + interface_name + ' up') + return success + + def clean_all(self): + """Cleanup all existing test run interfaces and namespaces""" + + # Delete all namesapces that start with tr + namespaces = self.get_namespaces() + for ns in namespaces: + if 'tr' in ns: + self.delete_namespace(ns) + + # Delete all namespaces that start with tr + links = self.get_links() + for link in links: + if 'tr' in link: + self.delete_link(link) + + def cleanup(self, interface=None, namespace=None): + """Cleanup existing link and namespace if they still exist""" + + link_clean = True + if interface is not None: + if self.link_exists(interface): + link_clean = self.delete_link(interface) + + ns_clean = True + if namespace is not None: + if self.namespace_exists(namespace): + ns_clean = self.delete_namespace + return link_clean and ns_clean + + def configure_container_interface(self, + bridge_intf, + container_intf, + namespace_intf, + namespace, + mac_addr, + container_name=None, + ipv4_addr=None, + ipv6_addr=None): + + # Cleanup old interface and namespaces + self.cleanup(bridge_intf, namespace) + + # Create interface pair + self.add_link(bridge_intf, container_intf) + + if container_name is not None: + # Get PID for running container + # TODO: Some error checking around missing PIDs might be required + container_pid = util.run_command('docker inspect -f {{.State.Pid}} ' + + container_name)[0] + if not container_pid.isdigit(): + LOGGER.error(f'Failed to resolve pid for {container_name}') + return False + + # Create symlink for container network namespace + if not util.run_command('ln -sf /proc/' + container_pid + + '/ns/net /var/run/netns/' + namespace, + output=False): + LOGGER.error( + f'Failed to link {container_name} to namespace {namespace_intf}') + return False + + # Attach container interface to container network namespace + if not self.set_namespace(container_intf, namespace): + LOGGER.error(f'Failed to set namespace {namespace} for {container_intf}') + return False + + # Rename container interface name + if not self.rename_interface(container_intf, namespace, namespace_intf): + LOGGER.error( + f'Failed to rename container interface {container_intf} to {namespace_intf}' + ) + return False + + # Set MAC address of container interface + if not self.set_interface_mac(namespace_intf, namespace, mac_addr): + LOGGER.error( + f'Failed to set MAC address for {namespace_intf} to {mac_addr}') + return False + + # Set IP address of container interface + if ipv4_addr is not None: + if not self.set_interface_ip(namespace_intf, namespace, ipv4_addr): + LOGGER.error( + f'Failed to set IPv4 address for {namespace_intf} to {ipv4_addr}') + return False + if ipv6_addr is not None: + if not self.set_interface_ip(namespace_intf, namespace, ipv6_addr): + LOGGER.error( + f'Failed to set IPv6 address for {namespace_intf} to {ipv6_addr}') + return False + + # Set interfaces up + if not self.set_interface_up(bridge_intf): + LOGGER.error(f'Failed to set interface up {bridge_intf}') + return False + if not self.set_interface_up(namespace_intf, namespace): + LOGGER.error(f'Failed to set interface up {namespace_intf}') + return False + return True diff --git a/framework/python/src/net_orc/network_orchestrator.py b/framework/python/src/net_orc/network_orchestrator.py index f1f479742..f3c07e8e4 100644 --- a/framework/python/src/net_orc/network_orchestrator.py +++ b/framework/python/src/net_orc/network_orchestrator.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Network orchestrator is responsible for managing all of the virtual network services""" import getpass @@ -34,6 +33,7 @@ from net_orc.network_event import NetworkEvent from net_orc.network_validator import NetworkValidator from net_orc.ovs_control import OVSControl +from net_orc.ip_control import IPControl LOGGER = logger.get_logger('net_orc') CONFIG_FILE = 'conf/system.json' @@ -83,15 +83,17 @@ def __init__(self, self.validate = validate self.async_monitor = async_monitor - self._path = os.path.dirname(os.path.dirname( - os.path.dirname( - os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))) + self._path = os.path.dirname( + os.path.dirname( + os.path.dirname( + os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))) self.validator = NetworkValidator() shutil.rmtree(os.path.join(os.getcwd(), NET_DIR), ignore_errors=True) self.network_config = NetworkConfig() self.load_config(config_file) self._ovs = OVSControl() + self._ip_ctrl = IPControl() def start(self): """Start the network orchestrator.""" @@ -181,9 +183,8 @@ def _device_discovered(self, mac_addr): f'Discovered device {mac_addr}. Waiting for device to obtain IP') device = self._get_device(mac_addr=mac_addr) - device_runtime_dir = os.path.join(RUNTIME_DIR, - TEST_DIR, - device.mac_addr.replace(':', '')) + device_runtime_dir = os.path.join(RUNTIME_DIR, TEST_DIR, + device.mac_addr.replace(':', '')) os.makedirs(device_runtime_dir) util.run_command(f'chown -R {self._host_user} {device_runtime_dir}') @@ -201,7 +202,7 @@ def _device_discovered(self, mac_addr): LOGGER.info( f'Device with mac addr {device.mac_addr} has obtained IP address ' f'{device.ip_addr}') - + self._start_device_monitor(device) def _device_has_ip(self, packet): @@ -418,8 +419,7 @@ def _load_network_module(self, module_dir): # Determine if this is a template if 'template' in net_module_json['config']['docker']: - net_module.template = net_module_json['config']['docker'][ - 'template'] + net_module.template = net_module_json['config']['docker']['template'] # Load network service networking configuration if net_module.enable_container: @@ -493,7 +493,7 @@ def _start_network_service(self, net_module): def _get_host_user(self): user = self._get_os_user() - + # If primary method failed, try secondary if user is None: user = self._get_user() @@ -510,7 +510,7 @@ def _get_os_user(self): LOGGER.error("An OS error occurred while retrieving the login name.") except Exception as e: # Catch any other unexpected exceptions - LOGGER.error("An exception occurred:", e) + LOGGER.error("An exception occurred:", e) return user def _get_user(self): @@ -520,15 +520,15 @@ def _get_user(self): except (KeyError, ImportError, ModuleNotFoundError, OSError) as e: # Handle specific exceptions individually if isinstance(e, KeyError): - LOGGER.error("USER environment variable not set or unavailable.") + LOGGER.error("USER environment variable not set or unavailable.") elif isinstance(e, ImportError): - LOGGER.error("Unable to import the getpass module.") + LOGGER.error("Unable to import the getpass module.") elif isinstance(e, ModuleNotFoundError): - LOGGER.error("The getpass module was not found.") + LOGGER.error("The getpass module was not found.") elif isinstance(e, OSError): - LOGGER.error("An OS error occurred while retrieving the username.") + LOGGER.error("An OS error occurred while retrieving the username.") else: - LOGGER.error("An exception occurred:", e) + LOGGER.error("An exception occurred:", e) return user def _stop_service_module(self, net_module, kill=False): @@ -666,9 +666,18 @@ def _attach_service_to_network(self, net_module): # Container network namespace name container_net_ns = 'tr-ctns-' + net_module.dir_name - # Create interface pair - util.run_command('ip link add ' + bridge_intf + ' type veth peer name ' + - container_intf) + # Resolve the interface information + mac_addr = '9a:02:57:1e:8f:' + str(net_module.net_config.ip_index) + ipv4_addr = net_module.net_config.get_ipv4_addr_with_prefix() + ipv6_addr = net_module.net_config.get_ipv6_addr_with_prefix() + + # Add and configure the interface container + if not self._ip_ctrl.configure_container_interface( + bridge_intf, container_intf, "veth0", container_net_ns, mac_addr, + net_module.container_name, ipv4_addr, ipv6_addr): + LOGGER.error('Failed to configure local networking for ' + + net_module.name + '. Exiting.') + sys.exit(1) # Add bridge interface to device bridge if self._ovs.add_port(port=bridge_intf, bridge_name=DEVICE_BRIDGE): @@ -677,42 +686,6 @@ def _attach_service_to_network(self, net_module): DEVICE_BRIDGE + '. Exiting.') sys.exit(1) - # Get PID for running container - # TODO: Some error checking around missing PIDs might be required - container_pid = util.run_command('docker inspect -f {{.State.Pid}} ' + - net_module.container_name)[0] - - # Create symlink for container network namespace - util.run_command('ln -sf /proc/' + container_pid + - '/ns/net /var/run/netns/' + container_net_ns) - - # Attach container interface to container network namespace - util.run_command('ip link set ' + container_intf + ' netns ' + - container_net_ns) - - # Rename container interface name to veth0 - util.run_command('ip netns exec ' + container_net_ns + ' ip link set dev ' + - container_intf + ' name veth0') - - # Set MAC address of container interface - util.run_command('ip netns exec ' + container_net_ns + - ' ip link set dev veth0 address 9a:02:57:1e:8f:' + - str(net_module.net_config.ip_index)) - - # Set IP address of container interface - util.run_command('ip netns exec ' + container_net_ns + ' ip addr add ' + - net_module.net_config.get_ipv4_addr_with_prefix() + - ' dev veth0') - - util.run_command('ip netns exec ' + container_net_ns + ' ip addr add ' + - net_module.net_config.get_ipv6_addr_with_prefix() + - ' dev veth0') - - # Set interfaces up - util.run_command('ip link set dev ' + bridge_intf + ' up') - util.run_command('ip netns exec ' + container_net_ns + - ' ip link set dev veth0 up') - if net_module.net_config.enable_wan: LOGGER.debug('Attaching net service ' + net_module.display_name + ' to internet bridge') @@ -725,9 +698,11 @@ def _attach_service_to_network(self, net_module): # tr-cti-dhcp (Test Run Container Interface for DHCP container) container_intf = 'tr-cti-' + net_module.dir_name - # Create interface pair - util.run_command('ip link add ' + bridge_intf + ' type veth peer name ' + - container_intf) + if not self._ip_ctrl.configure_container_interface( + bridge_intf, container_intf, "eth1", container_net_ns, mac_addr): + LOGGER.error('Failed to configure internet networking for ' + + net_module.name + '. Exiting.') + sys.exit(1) # Attach bridge interface to internet bridge if self._ovs.add_port(port=bridge_intf, bridge_name=INTERNET_BRIDGE): @@ -737,24 +712,6 @@ def _attach_service_to_network(self, net_module): ' to internet bridge ' + DEVICE_BRIDGE + '. Exiting.') sys.exit(1) - # Attach container interface to container network namespace - util.run_command('ip link set ' + container_intf + ' netns ' + - container_net_ns) - - # Rename container interface name to eth1 - util.run_command('ip netns exec ' + container_net_ns + - ' ip link set dev ' + container_intf + ' name eth1') - - # Set MAC address of container interface - util.run_command('ip netns exec ' + container_net_ns + - ' ip link set dev eth1 address 9a:02:57:1e:8f:0' + - str(net_module.net_config.ip_index)) - - # Set interfaces up - util.run_command('ip link set dev ' + bridge_intf + ' up') - util.run_command('ip netns exec ' + container_net_ns + - ' ip link set dev eth1 up') - def restore_net(self): LOGGER.info('Clearing baseline network') @@ -776,6 +733,9 @@ def restore_net(self): # Clear the virtual network self._ovs.restore_net() + # Clean up any existing network artifacts + self._ip_ctrl.clean_all() + # Restart internet interface if util.interface_exists(self._int_intf): util.run_command('ip link set ' + self._int_intf + ' down') From b0d14c2ac75278fcede8fe2f889105bf2b8f9774 Mon Sep 17 00:00:00 2001 From: J Boddey Date: Fri, 23 Jun 2023 13:28:41 +0100 Subject: [PATCH 37/48] Move config to /local (#52) * Move config to /local * Fix testing config * Fix ovs_control config location * Fix faux dev config location --- .gitignore | 1 - conf/.gitignore | 1 - framework/python/src/core/testrun.py | 4 ++-- framework/python/src/net_orc/network_orchestrator.py | 5 ++--- framework/python/src/net_orc/network_validator.py | 2 +- framework/python/src/net_orc/ovs_control.py | 2 +- local/.gitignore | 2 ++ {conf => local}/system.json.example | 0 testing/test_baseline | 2 +- 9 files changed, 9 insertions(+), 10 deletions(-) delete mode 100644 conf/.gitignore create mode 100644 local/.gitignore rename {conf => local}/system.json.example (100%) diff --git a/.gitignore b/.gitignore index ad8f26d34..e168ec07a 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,5 @@ venv/ .vscode/ error pylint.out -local/ __pycache__/ build/ \ No newline at end of file diff --git a/conf/.gitignore b/conf/.gitignore deleted file mode 100644 index 41b89ceb1..000000000 --- a/conf/.gitignore +++ /dev/null @@ -1 +0,0 @@ -system.json \ No newline at end of file diff --git a/framework/python/src/core/testrun.py b/framework/python/src/core/testrun.py index e59b7cda2..d613410e9 100644 --- a/framework/python/src/core/testrun.py +++ b/framework/python/src/core/testrun.py @@ -39,8 +39,8 @@ from device import Device LOGGER = logger.get_logger('test_run') -CONFIG_FILE = 'conf/system.json' -EXAMPLE_CONFIG_FILE = 'conf/system.json.example' +CONFIG_FILE = 'local/system.json' +EXAMPLE_CONFIG_FILE = 'local/system.json.example' RUNTIME = 120 LOCAL_DEVICES_DIR = 'local/devices' diff --git a/framework/python/src/net_orc/network_orchestrator.py b/framework/python/src/net_orc/network_orchestrator.py index f3c07e8e4..643dc4def 100644 --- a/framework/python/src/net_orc/network_orchestrator.py +++ b/framework/python/src/net_orc/network_orchestrator.py @@ -25,7 +25,6 @@ import threading import docker from docker.types import Mount -from collections import OrderedDict from common import logger from common import util from net_orc.listener import Listener @@ -36,8 +35,8 @@ from net_orc.ip_control import IPControl LOGGER = logger.get_logger('net_orc') -CONFIG_FILE = 'conf/system.json' -EXAMPLE_CONFIG_FILE = 'conf/system.json.example' +CONFIG_FILE = 'local/system.json' +EXAMPLE_CONFIG_FILE = 'local/system.json.example' RUNTIME_DIR = 'runtime' TEST_DIR = 'test' MONITOR_PCAP = 'monitor.pcap' diff --git a/framework/python/src/net_orc/network_validator.py b/framework/python/src/net_orc/network_validator.py index 4ee46124d..a4c51eb2d 100644 --- a/framework/python/src/net_orc/network_validator.py +++ b/framework/python/src/net_orc/network_validator.py @@ -28,7 +28,7 @@ DEVICES_DIR = 'modules/devices' DEVICE_METADATA = 'conf/module_config.json' DEVICE_BRIDGE = 'tr-d' -CONF_DIR = 'conf' +CONF_DIR = 'local' CONF_FILE = 'system.json' diff --git a/framework/python/src/net_orc/ovs_control.py b/framework/python/src/net_orc/ovs_control.py index 3c950d4af..83823e8fa 100644 --- a/framework/python/src/net_orc/ovs_control.py +++ b/framework/python/src/net_orc/ovs_control.py @@ -18,7 +18,7 @@ from common import logger from common import util -CONFIG_FILE = 'conf/system.json' +CONFIG_FILE = 'local/system.json' DEVICE_BRIDGE = 'tr-d' INTERNET_BRIDGE = 'tr-c' LOGGER = logger.get_logger('ovs_ctrl') diff --git a/local/.gitignore b/local/.gitignore new file mode 100644 index 000000000..4fb365c03 --- /dev/null +++ b/local/.gitignore @@ -0,0 +1,2 @@ +system.json +devices \ No newline at end of file diff --git a/conf/system.json.example b/local/system.json.example similarity index 100% rename from conf/system.json.example rename to local/system.json.example diff --git a/testing/test_baseline b/testing/test_baseline index bf191b88f..36d21fa5e 100755 --- a/testing/test_baseline +++ b/testing/test_baseline @@ -23,7 +23,7 @@ sudo /usr/share/openvswitch/scripts/ovs-ctl start # Build Test Container sudo docker build ./testing/docker/ci_baseline -t ci1 -f ./testing/docker/ci_baseline/Dockerfile -cat <conf/system.json +cat <local/system.json { "network": { "device_intf": "endev0a", From 94e937fb657954c77c364f20adc1d56a5f15c975 Mon Sep 17 00:00:00 2001 From: J Boddey Date: Fri, 23 Jun 2023 14:18:42 +0100 Subject: [PATCH 38/48] Add documentation (#53) --- docs/configure_device.md | 41 +++++++++++++++++++++++++++++++ docs/get_started.md | 53 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 94 insertions(+) create mode 100644 docs/configure_device.md create mode 100644 docs/get_started.md diff --git a/docs/configure_device.md b/docs/configure_device.md new file mode 100644 index 000000000..ad58521a4 --- /dev/null +++ b/docs/configure_device.md @@ -0,0 +1,41 @@ +# Device Configuration + +The device configuration file allows you to customize the testing behavior for a specific device. This file is located at `local/devices/{Device Name}/device_config.json`. Below is an overview of how to configure the device tests. + +## Device Information + +The device information section includes the manufacturer, model, and MAC address of the device. These details help identify the specific device being tested. + +## Test Modules + +Test modules are groups of tests that can be enabled or disabled as needed. You can choose which test modules to include for your device. The device configuration file contains the following test module: + +- DNS Test Module + +### Enabling and Disabling Test Modules + +To enable or disable a test module, modify the `enabled` field within the respective module. Setting it to `true` enables the module, while setting it to `false` disables the module. + +## Individual Tests + +Within the DNS test module, there are individual tests that can be enabled or disabled. These tests focus on specific aspects of network behavior. You can customize the tests based on your device and testing requirements. + +### Enabling and Disabling Tests + +To enable or disable an individual test, modify the `enabled` field within the respective test. Setting it to `true` enables the test, while setting it to `false` disables the test. + +> Note: The example device configuration file (`resources/devices/template/device_config.json`) provides a complete usage example, including the structure and configuration options for the DNS test module and its tests. You can refer to this file to understand how to configure your device tests effectively. + +## Customizing the Device Configuration + +To customize the device configuration for your specific device, follow these steps: + +1. Copy the default configuration file provided in the `resources/devices/template` folder. + - Create a new folder for your device under `local/devices` directory. + - Copy the `device_config.json` file from `resources/devices/template` to the newly created device folder. + +This ensures that you have a copy of the default configuration file, which you can then modify for your specific device. + +> Note: Ensure that the device configuration file is properly formatted, and the changes made align with the intended test behavior. Incorrect settings or syntax may lead to unexpected results during testing. + +If you encounter any issues or need assistance with the device configuration, refer to the Test Run documentation or ask a question on the Issues page. diff --git a/docs/get_started.md b/docs/get_started.md new file mode 100644 index 000000000..7b8cf9e13 --- /dev/null +++ b/docs/get_started.md @@ -0,0 +1,53 @@ +# Getting Started + +## Prerequisites + +### Hardware + +Before starting with Test Run, ensure you have the following hardware: + +- PC running Ubuntu LTS (laptop or desktop) +- 2x USB Ethernet adapter (one may be a built-in Ethernet port) +- Internet connection + +### Software + +Ensure the following software is installed on your Ubuntu LTS PC: + +- Python 3 (already available on Ubuntu LTS) +- Docker - Installation Guide: [https://docs.docker.com/engine/install/](https://docs.docker.com/engine/install/) +- Open vSwitch ``sudo apt-get install openvswitch-common openvswitch-switch`` + +## Installation + +1. Download Test Run from the releases page or the appropriate source. + +2. Run the install script. + +## Configuration + +1. Copy the default configuration file. + +2. Open the `local/system.json` file and modify the configuration as needed. Specify the interface names for the internet and device interfaces. + +## Test Your Device + +1. Attach network interfaces: + + - Connect one USB Ethernet adapter to the internet source (e.g., router or switch) using an Ethernet cable. + - Connect the other USB Ethernet adapter directly to the IoT device you want to test using an Ethernet cable. + +2. Start Test Run. + + - To run Test Run in network-only mode (without running any tests), use the `--net-only` option. + + - To skip network validation before use and not launch the faux device on startup, use the `--no-validate` option. + +# Troubleshooting + +If you encounter any issues or need assistance, consider the following: + +- Ensure that all hardware and software prerequisites are met. +- Verify that the network interfaces are connected correctly. +- Check the configuration in the `local/system.json` file. +- Refer to the Test Run documentation or ask for further assistance from the support team. From 098de20e3774db3a381dafb564190561e40f5270 Mon Sep 17 00:00:00 2001 From: J Boddey Date: Wed, 28 Jun 2023 11:22:59 +0100 Subject: [PATCH 39/48] Sync dev to main (#56) * Merge dev into main (Sprint 7 and 8) (#33) * Implement test orchestrator (#4) * Initial work on test-orchestrator * Ignore runtime folder * Update runtime directory for test modules * Fix logging Add initial framework for running tests * logging and misc cleanup * logging changes * Add a stop hook after all tests complete * Refactor test_orc code * Add arg passing Add option to use locally cloned via install or remote via main project network orchestrator * Fix baseline module Fix orchestrator exiting only after timeout * Add result file to baseline test module Change result format to match closer to design doc * Refactor pylint * Skip test module if it failed to start * Refactor * Check for valid log level --------- Co-authored-by: Jacob Boddey * Add issue report templates (#7) * Add issue templates * Update README.md * Discover devices on the network (#5) * Test run sync (#8) * Initial work on test-orchestrator * Ignore runtime folder * Update runtime directory for test modules * Fix logging Add initial framework for running tests * logging and misc cleanup * logging changes * Add a stop hook after all tests complete * Refactor test_orc code * Add arg passing Add option to use locally cloned via install or remote via main project network orchestrator * Fix baseline module Fix orchestrator exiting only after timeout * Add result file to baseline test module Change result format to match closer to design doc * Refactor pylint * Skip test module if it failed to start * Refactor * Check for valid log level * Add config file arg Misc changes to network start procedure * fix merge issues * Update runner and test orch procedure Add useful runtiem args * Restructure test run startup process Misc updates to work with net orch updates * Refactor --------- * Quick refactor (#9) * Fix duplicate sleep calls * Add net orc (#11) * Add network orchestrator repository * cleanup duplicate start and install scripts * Temporary fix for python dependencies * Remove duplicate python requirements * remove duplicate conf files * remove remote-net option * cleanp unecessary files * Add the DNS test module (#12) * Add network orchestrator repository * cleanup duplicate start and install scripts * Temporary fix for python dependencies * Remove duplicate python requirements * remove duplicate conf files * remove remote-net option * cleanp unecessary files * Add dns test module Fix test module build process * Add mac address of device under test to test container Update dns test to use mac address filter * Update dns module tests * Change result output * logging update * Update test module for better reusability * Load in module config to test module * logging cleanup * Update baseline module to new template Misc cleanup * Add ability to disable individual tests * remove duplicate readme * Update device directories * Remove local folder * Update device template Update test module to work with new device config file format * Change test module network config options Do not start network services for modules not configured for network * Refactor --------- * Add baseline and pylint tests (#25) * Discover devices on the network (#22) * Discover devices on the network * Add defaults when missing from config Implement monitor wait period from config * Add steady state monitor Remove duplicate callback registrations * Load devices into network orchestrator during testrun start --------- Co-authored-by: jhughesbiot * Build dependencies first (#21) * Build dependencies first * Remove debug message * Add depend on option to test modules * Re-add single interface option * Import subprocess --------- Co-authored-by: jhughesbiot * Port scan test module (#23) * Add network orchestrator repository * cleanup duplicate start and install scripts * Temporary fix for python dependencies * Remove duplicate python requirements * remove duplicate conf files * remove remote-net option * cleanp unecessary files * Add dns test module Fix test module build process * Add mac address of device under test to test container Update dns test to use mac address filter * Update dns module tests * Change result output * logging update * Update test module for better reusability * Load in module config to test module * logging cleanup * Update baseline module to new template Misc cleanup * Add ability to disable individual tests * remove duplicate readme * Update device directories * Remove local folder * Update device template Update test module to work with new device config file format * Change test module network config options Do not start network services for modules not configured for network * Initial nmap test module add Add device ip resolving to base module Add network mounting for test modules * Update ipv4 device resolving in test modules * Map in ip subnets and remove hard coded references * Add ftp port test * Add ability to pass config for individual tests within a module Update nmap module scan to run tests based on config * Add full module check for compliance * Add all tcp port scans to config * Update nmap commands to match existing DAQ tests Add udp scanning and tests * logging cleanup * Update TCP port scanning range Update logging * Merge device config into module config Update device template * fix merge issues * Update timeouts Add multi-threading for multiple scanns to run simultaneously Add option to use scan scripts for services * Fix merge issues * Fix device configs * Remove unecessary files * Cleanup duplicate properties * Cleanup install script * Formatting (#26) * Fix pylint issues in net orc * more pylint fixes * fix listener lint issues * fix logger lint issues * fix validator lint issues * fix util lint issues * Update base network module linting issues * Cleanup linter issues for dhcp modules Remove old code testing code * change to single quote delimeter * Cleanup linter issues for ntp module * Cleanup linter issues for radius module * Cleanup linter issues for template module * fix linter issues with faux-dev * Test results (#27) * Collect all module test results * Fix test modules without config options * Add timestamp to test results * Test results (#28) * Collect all module test results * Fix test modules without config options * Add timestamp to test results * Add attempt timing and device info to test results * Ignore disabled test containers when generating results * Fully skip modules that are disabled * Fix pylint test and skip internet tests so CI passes (#29) * disable internet checks for pass * fix pylint test * Increase pylint score (#31) * More formatting fixes * More formatting fixes * More formatting fixes * More formatting fixes * Misc pylint fixes Fix test module logger --------- Co-authored-by: jhughesbiot * Pylint (#32) * More formatting fixes * More formatting fixes * More formatting fixes * More formatting fixes * Misc pylint fixes Fix test module logger * remove unused files * more formatting * revert breaking pylint changes * more formatting * fix results file * More formatting * ovs module formatting --------- Co-authored-by: Jacob Boddey * Add license header (#36) * More formatting fixes * More formatting fixes * More formatting fixes * More formatting fixes * Misc pylint fixes Fix test module logger * remove unused files * more formatting * revert breaking pylint changes * more formatting * fix results file * More formatting * ovs module formatting * Add ovs control into network orchestrator * Add verification methods for the base network * Add network validation and misc logging updates * remove ovs module * add license header to all python files --------- Co-authored-by: Jacob Boddey Co-authored-by: SuperJonotron * Ovs (#35) * More formatting fixes * More formatting fixes * More formatting fixes * More formatting fixes * Misc pylint fixes Fix test module logger * remove unused files * more formatting * revert breaking pylint changes * more formatting * fix results file * More formatting * ovs module formatting * Add ovs control into network orchestrator * Add verification methods for the base network * Add network validation and misc logging updates * remove ovs module --------- Co-authored-by: Jacob Boddey Co-authored-by: SuperJonotron * remove ovs files added back in during merge * Nmap (#38) * More formatting fixes * More formatting fixes * More formatting fixes * More formatting fixes * Misc pylint fixes Fix test module logger * remove unused files * more formatting * revert breaking pylint changes * more formatting * fix results file * More formatting * ovs module formatting * Add ovs control into network orchestrator * Add verification methods for the base network * Add network validation and misc logging updates * remove ovs module * add license header to all python files * Update tcp scans to speed up full port range scan Add version checking Implement ssh version checking * Add unknown port checks Match unknown ports to existing services Add unknown ports without existing services to results file --------- Co-authored-by: Jacob Boddey Co-authored-by: SuperJonotron * Create startup capture (#37) * Connection (#40) * Initial add of connection test module with ping test * Update host user resolving * Update host user resolving for validator * add get user method to validator * Conn mac oui (#42) * Initial add of connection test module with ping test * Update host user resolving * Update host user resolving for validator * add get user method to validator * Add mac_oui test Add option to return test result and details of test for reporting * Con mac address (#43) * Initial add of connection test module with ping test * Update host user resolving * Update host user resolving for validator * add get user method to validator * Add mac_oui test Add option to return test result and details of test for reporting * Add connection.mac_address test * Dns (#44) * Add MDNS test * Update existing mdns logging to be more consistent with other tests * Add startup and monitor captures * File permissions (#45) * Fix validator file permissions * Fix test module permissions * Fix device capture file permissions * Fix device results permissions * Add connection single ip test (#47) * Nmap results (#49) * Update processing of nmap results to use xml output and json conversions for stability * Update matching with regex to prevent wrong service matches and duplicate processing for partial matches * Update max port scan range * Framework restructure (#50) * Restructure framework and modules * Fix CI paths * Fix base module * Add build script * Remove build logs * Update base and template docker files to fit the new format Implement a template option on network modules Fix skipping of base image build * remove base image build in ci * Remove group from chown --------- Co-authored-by: jhughesbiot * Ip control (#51) * Add initial work for ip control module * Implement ip control module with additional cleanup methods * Update link check to not use error stream * Add error checking around container network configurations * Add network cleanup for namespaces and links * formatting * Move config to /local (#52) * Move config to /local * Fix testing config * Fix ovs_control config location * Fix faux dev config location * Add documentation (#53) --------- Co-authored-by: jhughesbiot <50999916+jhughesbiot@users.noreply.github.com> Co-authored-by: jhughesbiot Co-authored-by: Noureddine Co-authored-by: SuperJonotron * Sprint 8 Hotfix (#54) * Fix connection results.json * Re add try/catch * Fix log level * Debug test module load order * Add depends on to nmap module * Remove logging change --------- Co-authored-by: jhughesbiot <50999916+jhughesbiot@users.noreply.github.com> Co-authored-by: jhughesbiot Co-authored-by: Noureddine Co-authored-by: SuperJonotron --- cmd/start | 21 ----- framework/python/src/common/logger.py | 2 +- framework/python/src/common/util.py | 44 ++++++++++- framework/python/src/core/device.py | 2 +- framework/python/src/core/testrun.py | 37 ++++++--- .../src/net_orc/network_orchestrator.py | 76 +++---------------- .../python/src/test_orc/test_orchestrator.py | 61 +++------------ modules/test/base/python/src/test_module.py | 1 - modules/test/nmap/conf/module_config.json | 1 + resources/devices/template/device_config.json | 11 ++- 10 files changed, 98 insertions(+), 158 deletions(-) diff --git a/cmd/start b/cmd/start index 55d2e52eb..17bc2af6c 100755 --- a/cmd/start +++ b/cmd/start @@ -22,25 +22,4 @@ source venv/bin/activate export PYTHONPATH="$PWD/framework/python/src" python -u framework/python/src/core/test_runner.py $@ -# TODO: Work in progress code for containerization of OVS module -# asyncRun() { -# "$@" & -# pid="$!" -# echo "PID Running: " $pid -# trap "echo 'Stopping PID $pid'; kill -SIGTERM $pid" SIGINT SIGTERM - -# sleep 10 - -# # A signal emitted while waiting will make the wait command return code > 128 -# # Let's wrap it in a loop that doesn't end before the process is indeed stopped -# while kill -0 $pid > /dev/null 2>&1; do -# #while $(kill -0 $pid 2>/dev/null); do -# wait -# done -# } - -# # -u flag allows python print statements -# # to be logged by docker by running unbuffered -# asyncRun python3 -u python/src/run.py $@ - deactivate \ No newline at end of file diff --git a/framework/python/src/common/logger.py b/framework/python/src/common/logger.py index 539767f53..8dd900fea 100644 --- a/framework/python/src/common/logger.py +++ b/framework/python/src/common/logger.py @@ -21,7 +21,7 @@ _LOG_FORMAT = '%(asctime)s %(name)-8s %(levelname)-7s %(message)s' _DATE_FORMAT = '%b %02d %H:%M:%S' _DEFAULT_LEVEL = logging.INFO -_CONF_DIR = 'conf' +_CONF_DIR = 'local' _CONF_FILE_NAME = 'system.json' # Set log level diff --git a/framework/python/src/common/util.py b/framework/python/src/common/util.py index 1ffe70651..441b93224 100644 --- a/framework/python/src/common/util.py +++ b/framework/python/src/common/util.py @@ -13,6 +13,8 @@ # limitations under the License. """Provides basic utilities for the network orchestrator.""" +import getpass +import os import subprocess import shlex from common import logger @@ -37,7 +39,7 @@ def run_command(cmd, output=True): if process.returncode != 0 and output: err_msg = f'{stderr.strip()}. Code: {process.returncode}' - LOGGER.error('Command Failed: ' + cmd) + LOGGER.error('Command failed: ' + cmd) LOGGER.error('Error: ' + err_msg) else: success = True @@ -50,6 +52,44 @@ def run_command(cmd, output=True): def interface_exists(interface): return interface in netifaces.interfaces() - def prettify(mac_string): return ':'.join([f'{ord(b):02x}' for b in mac_string]) + +def get_host_user(): + user = get_os_user() + + # If primary method failed, try secondary + if user is None: + user = get_user() + + return user + +def get_os_user(): + user = None + try: + user = os.getlogin() + except OSError: + # Handle the OSError exception + LOGGER.error('An OS error occured whilst calling os.getlogin()') + except Exception: + # Catch any other unexpected exceptions + LOGGER.error('An unknown exception occured whilst calling os.getlogin()') + return user + +def get_user(): + user = None + try: + user = getpass.getuser() + except (KeyError, ImportError, ModuleNotFoundError, OSError) as e: + # Handle specific exceptions individually + if isinstance(e, KeyError): + LOGGER.error('USER environment variable not set or unavailable.') + elif isinstance(e, ImportError): + LOGGER.error('Unable to import the getpass module.') + elif isinstance(e, ModuleNotFoundError): + LOGGER.error('The getpass module was not found.') + elif isinstance(e, OSError): + LOGGER.error('An OS error occurred while retrieving the username.') + else: + LOGGER.error('An exception occurred:', e) + return user diff --git a/framework/python/src/core/device.py b/framework/python/src/core/device.py index 44f275bdf..efce2dba1 100644 --- a/framework/python/src/core/device.py +++ b/framework/python/src/core/device.py @@ -22,6 +22,6 @@ class Device(NetworkDevice): """Represents a physical device and it's configuration.""" - make: str = None + manufacturer: str = None model: str = None test_modules: str = None diff --git a/framework/python/src/core/testrun.py b/framework/python/src/core/testrun.py index d613410e9..a91736e95 100644 --- a/framework/python/src/core/testrun.py +++ b/framework/python/src/core/testrun.py @@ -25,7 +25,7 @@ import json import signal import time -from common import logger +from common import logger, util # Locate parent directory current_dir = os.path.dirname(os.path.realpath(__file__)) @@ -46,7 +46,7 @@ LOCAL_DEVICES_DIR = 'local/devices' RESOURCE_DEVICES_DIR = 'resources/devices' DEVICE_CONFIG = 'device_config.json' -DEVICE_MAKE = 'make' +DEVICE_MANUFACTURER = 'manufacturer' DEVICE_MODEL = 'model' DEVICE_MAC_ADDR = 'mac_addr' DEVICE_TEST_MODULES = 'test_modules' @@ -76,7 +76,6 @@ def __init__(self, self._net_orc = net_orc.NetworkOrchestrator( config_file=config_file_abs, validate=validate, - async_monitor=not self._net_only, single_intf = self._single_intf) self._test_orc = test_orc.TestOrchestrator(self._net_orc) @@ -85,17 +84,30 @@ def start(self): self._load_all_devices() + self._start_network() + if self._net_only: LOGGER.info('Network only option configured, no tests will be run') - self._start_network() + + self._net_orc.listener.register_callback( + self._device_discovered, + [NetworkEvent.DEVICE_DISCOVERED] + ) + + self._net_orc.start_listener() + LOGGER.info('Waiting for devices on the network...') + + while True: + time.sleep(RUNTIME) + else: - self._start_network() self._test_orc.start() self._net_orc.listener.register_callback( self._device_stable, [NetworkEvent.DEVICE_STABLE] ) + self._net_orc.listener.register_callback( self._device_discovered, [NetworkEvent.DEVICE_DISCOVERED] @@ -106,13 +118,13 @@ def start(self): time.sleep(RUNTIME) - if not self._test_orc.test_in_progress(): - LOGGER.info('Timed out whilst waiting for device') + if not (self._test_orc.test_in_progress() or self._net_orc.monitor_in_progress()): + LOGGER.info('Timed out whilst waiting for device or stopping due to test completion') else: - while self._test_orc.test_in_progress(): + while self._test_orc.test_in_progress() or self._net_orc.monitor_in_progress(): time.sleep(5) - self.stop() + self.stop() def stop(self, kill=False): self._stop_tests() @@ -157,18 +169,19 @@ def _load_devices(self, device_dir): LOGGER.debug('Loading devices from ' + device_dir) os.makedirs(device_dir, exist_ok=True) + util.run_command(f'chown -R {util.get_host_user()} {device_dir}') for device_folder in os.listdir(device_dir): with open(os.path.join(device_dir, device_folder, DEVICE_CONFIG), encoding='utf-8') as device_config_file: device_config_json = json.load(device_config_file) - device_make = device_config_json.get(DEVICE_MAKE) + device_manufacturer = device_config_json.get(DEVICE_MANUFACTURER) device_model = device_config_json.get(DEVICE_MODEL) mac_addr = device_config_json.get(DEVICE_MAC_ADDR) test_modules = device_config_json.get(DEVICE_TEST_MODULES) - device = Device(make=device_make, + device = Device(manufacturer=device_manufacturer, model=device_model, mac_addr=mac_addr, test_modules=json.dumps(test_modules)) @@ -184,7 +197,7 @@ def _device_discovered(self, mac_addr): device = self.get_device(mac_addr) if device is not None: LOGGER.info( - f'Discovered {device.make} {device.model} on the network') + f'Discovered {device.manufacturer} {device.model} on the network') else: device = Device(mac_addr=mac_addr) self._devices.append(device) diff --git a/framework/python/src/net_orc/network_orchestrator.py b/framework/python/src/net_orc/network_orchestrator.py index 643dc4def..499ce954b 100644 --- a/framework/python/src/net_orc/network_orchestrator.py +++ b/framework/python/src/net_orc/network_orchestrator.py @@ -21,8 +21,6 @@ import shutil import subprocess import sys -import time -import threading import docker from docker.types import Mount from common import logger @@ -41,7 +39,6 @@ TEST_DIR = 'test' MONITOR_PCAP = 'monitor.pcap' NET_DIR = 'runtime/network' -#NETWORK_MODULES_DIR = 'network/modules' NETWORK_MODULES_DIR = 'modules/network' NETWORK_MODULE_METADATA = 'conf/module_config.json' DEVICE_BRIDGE = 'tr-d' @@ -56,21 +53,18 @@ DEFAULT_RUNTIME = 1200 DEFAULT_MONITOR_PERIOD = 300 -RUNTIME = 1500 - - class NetworkOrchestrator: """Manage and controls a virtual testing network.""" def __init__(self, config_file=CONFIG_FILE, validate=True, - async_monitor=False, single_intf=False): self._runtime = DEFAULT_RUNTIME self._startup_timeout = DEFAULT_STARTUP_TIMEOUT self._monitor_period = DEFAULT_MONITOR_PERIOD + self._monitor_in_progress = False self._int_intf = None self._dev_intf = None @@ -80,7 +74,6 @@ def __init__(self, self._net_modules = [] self._devices = [] self.validate = validate - self.async_monitor = async_monitor self._path = os.path.dirname( os.path.dirname( @@ -99,7 +92,7 @@ def start(self): LOGGER.debug('Starting network orchestrator') - self._host_user = self._get_host_user() + self._host_user = util.get_host_user() # Get all components ready self.load_network_modules() @@ -109,14 +102,6 @@ def start(self): self.start_network() - if self.async_monitor: - # Run the monitor method asynchronously to keep this method non-blocking - self._monitor_thread = threading.Thread(target=self.monitor_network) - self._monitor_thread.daemon = True - self._monitor_thread.start() - else: - self.monitor_network() - def start_network(self): """Start the virtual testing network.""" LOGGER.info('Starting network') @@ -130,7 +115,7 @@ def start_network(self): self.validator.start() # Get network ready (via Network orchestrator) - LOGGER.info('Network is ready.') + LOGGER.debug('Network is ready') def start_listener(self): self.listener.start_listener() @@ -151,13 +136,6 @@ def stop_network(self, kill=False): self.stop_networking_services(kill=kill) self.restore_net() - def monitor_network(self): - # TODO: This time should be configurable (How long to hold before exiting, - # this could be infinite too) - time.sleep(RUNTIME) - - self.stop() - def load_config(self, config_file=None): if config_file is None: # If not defined, use relative pathing to local file @@ -178,8 +156,11 @@ def load_config(self, config_file=None): def _device_discovered(self, mac_addr): + self._monitor_in_progress = True + LOGGER.debug( f'Discovered device {mac_addr}. Waiting for device to obtain IP') + device = self._get_device(mac_addr=mac_addr) device_runtime_dir = os.path.join(RUNTIME_DIR, TEST_DIR, @@ -204,6 +185,9 @@ def _device_discovered(self, mac_addr): self._start_device_monitor(device) + def monitor_in_progress(self): + return self._monitor_in_progress + def _device_has_ip(self, packet): device = self._get_device(mac_addr=packet.src) if device is None or device.ip_addr is None: @@ -225,6 +209,8 @@ def _start_device_monitor(self, device): wrpcap( os.path.join(RUNTIME_DIR, TEST_DIR, device.mac_addr.replace(':', ''), 'monitor.pcap'), packet_capture) + + self._monitor_in_progress = False self.listener.call_callback(NetworkEvent.DEVICE_STABLE, device.mac_addr) def _get_device(self, mac_addr): @@ -490,46 +476,6 @@ def _start_network_service(self, net_module): if network != 'host': self._attach_service_to_network(net_module) - def _get_host_user(self): - user = self._get_os_user() - - # If primary method failed, try secondary - if user is None: - user = self._get_user() - - LOGGER.debug("Network orchestrator host user: " + user) - return user - - def _get_os_user(self): - user = None - try: - user = os.getlogin() - except OSError as e: - # Handle the OSError exception - LOGGER.error("An OS error occurred while retrieving the login name.") - except Exception as e: - # Catch any other unexpected exceptions - LOGGER.error("An exception occurred:", e) - return user - - def _get_user(self): - user = None - try: - user = getpass.getuser() - except (KeyError, ImportError, ModuleNotFoundError, OSError) as e: - # Handle specific exceptions individually - if isinstance(e, KeyError): - LOGGER.error("USER environment variable not set or unavailable.") - elif isinstance(e, ImportError): - LOGGER.error("Unable to import the getpass module.") - elif isinstance(e, ModuleNotFoundError): - LOGGER.error("The getpass module was not found.") - elif isinstance(e, OSError): - LOGGER.error("An OS error occurred while retrieving the username.") - else: - LOGGER.error("An exception occurred:", e) - return user - def _stop_service_module(self, net_module, kill=False): LOGGER.debug('Stopping Service container ' + net_module.container_name) try: diff --git a/framework/python/src/test_orc/test_orchestrator.py b/framework/python/src/test_orc/test_orchestrator.py index 58c1944f8..4bc9fc003 100644 --- a/framework/python/src/test_orc/test_orchestrator.py +++ b/framework/python/src/test_orc/test_orchestrator.py @@ -59,7 +59,7 @@ def start(self): LOGGER.debug("Starting test orchestrator") # Setup the output directory - self._host_user = self._get_host_user() + self._host_user = util.get_host_user() os.makedirs(RUNTIME_DIR, exist_ok=True) util.run_command(f'chown -R {self._host_user} {RUNTIME_DIR}') @@ -78,19 +78,16 @@ def run_test_modules(self, device): for module in self._test_modules: self._run_test_module(module, device) LOGGER.info("All tests complete") - LOGGER.info( - f"""Completed running test \ -modules on device with mac \ -addr {device.mac_addr}""") + self._generate_results(device) self._test_in_progress = False def _generate_results(self, device): results = {} results["device"] = {} - if device.make is not None: - results["device"]["make"] = device.make - if device.make is not None: + if device.manufacturer is not None: + results["device"]["manufacturer"] = device.manufacturer + if device.model is not None: results["device"]["model"] = device.model results["device"]["mac_addr"] = device.mac_addr for module in self._test_modules: @@ -100,12 +97,12 @@ def _generate_results(self, device): device.mac_addr.replace(":", "") + "/" + module.name) results_file = f"{container_runtime_dir}/{module.name}-result.json" try: - with open(results_file, "r", encoding="UTF-8") as f: + with open(results_file, "r", encoding="utf-8-sig") as f: module_results = json.load(f) results[module.name] = module_results except (FileNotFoundError, PermissionError, json.JSONDecodeError) as results_error: - LOGGER.error("Error occured whilst running module " + module.name) + LOGGER.error("Error occured whilst obbtaining results for module " + module.name) LOGGER.debug(results_error) out_file = os.path.join( @@ -237,47 +234,6 @@ def _get_module_container(self, module): LOGGER.error(error) return container - def _get_host_user(self): - user = self._get_os_user() - - # If primary method failed, try secondary - if user is None: - user = self._get_user() - - LOGGER.debug("Test orchestrator host user: " + user) - return user - - def _get_os_user(self): - user = None - try: - user = os.getlogin() - except OSError as e: - # Handle the OSError exception - LOGGER.error("An OS error occurred while retrieving the login name.") - except Exception as e: - # Catch any other unexpected exceptions - LOGGER.error("An exception occurred:", e) - return user - - def _get_user(self): - user = None - try: - user = getpass.getuser() - except (KeyError, ImportError, ModuleNotFoundError, OSError) as e: - # Handle specific exceptions individually - if isinstance(e, KeyError): - LOGGER.error("USER environment variable not set or unavailable.") - elif isinstance(e, ImportError): - LOGGER.error("Unable to import the getpass module.") - elif isinstance(e, ModuleNotFoundError): - LOGGER.error("The getpass module was not found.") - elif isinstance(e, OSError): - LOGGER.error("An OS error occurred while retrieving the username.") - else: - LOGGER.error("An exception occurred:", e) - return user - - def _load_test_modules(self): """Load network modules from module_config.json.""" LOGGER.debug("Loading test modules from /" + TEST_MODULES_DIR) @@ -296,6 +252,8 @@ def _load_test_modules(self): def _load_test_module(self, module_dir): """Import module configuration from module_config.json.""" + LOGGER.debug("Loading test module " + module_dir) + modules_dir = os.path.join(self._path, TEST_MODULES_DIR) # Load basic module information @@ -337,6 +295,7 @@ def build_test_modules(self): def _build_test_module(self, module): LOGGER.debug("Building docker image for module " + module.dir_name) + client = docker.from_env() try: client.images.build( diff --git a/modules/test/base/python/src/test_module.py b/modules/test/base/python/src/test_module.py index f29668bb2..5342e36f8 100644 --- a/modules/test/base/python/src/test_module.py +++ b/modules/test/base/python/src/test_module.py @@ -65,7 +65,6 @@ def _get_device_tests(self, device_test_module): return module_tests def _get_device_test_module(self): - # TODO: Make DEVICE_TEST_MODULES a static string if 'DEVICE_TEST_MODULES' in os.environ: test_modules = json.loads(os.environ['DEVICE_TEST_MODULES']) if self._module_name in test_modules: diff --git a/modules/test/nmap/conf/module_config.json b/modules/test/nmap/conf/module_config.json index aafde4c03..292eced8b 100644 --- a/modules/test/nmap/conf/module_config.json +++ b/modules/test/nmap/conf/module_config.json @@ -7,6 +7,7 @@ }, "network": true, "docker": { + "depends_on": "base", "enable_container": true, "timeout": 600 }, diff --git a/resources/devices/template/device_config.json b/resources/devices/template/device_config.json index 7a3d4441c..3bb804b22 100644 --- a/resources/devices/template/device_config.json +++ b/resources/devices/template/device_config.json @@ -1,5 +1,5 @@ { - "make": "Manufacturer X", + "manufacturer": "Manufacturer X", "model": "Device X", "mac_addr": "aa:bb:cc:dd:ee:ff", "test_modules": { @@ -15,9 +15,9 @@ } }, "baseline": { - "enabled": true, + "enabled": false, "tests": { - "baseline.passe": { + "baseline.non-compliant": { "enabled": true }, "baseline.pass": { @@ -74,6 +74,9 @@ "tcp_ports": { "80": { "allowed": false + }, + "443": { + "allowed": true } } }, @@ -144,4 +147,4 @@ } } } -} \ No newline at end of file +} From f185bb15018391368156b6ff0bd5753da2c1d8f6 Mon Sep 17 00:00:00 2001 From: jhughesbiot <50999916+jhughesbiot@users.noreply.github.com> Date: Thu, 29 Jun 2023 08:48:03 -0700 Subject: [PATCH 40/48] Fix missing results on udp tests when tcp ports are also defined (#59) --- modules/test/nmap/python/src/nmap_module.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/modules/test/nmap/python/src/nmap_module.py b/modules/test/nmap/python/src/nmap_module.py index ea013f413..f998f302a 100644 --- a/modules/test/nmap/python/src/nmap_module.py +++ b/modules/test/nmap/python/src/nmap_module.py @@ -153,13 +153,17 @@ def _add_unknown_ports(self,tests,unallowed_port): unknown_service = {port_style:{unallowed_port['port']:result}} tests[service_name]=unknown_service - def _check_scan_results(self, test_config,scan_results): - port_config = {} - if "tcp_ports" in test_config: - port_config.update(test_config["tcp_ports"]) - elif "udp_ports" in test_config: - port_config.update(test_config["udp_ports"]) + def _check_scan_results(self,test_config,scan_results): + if "tcp_ports" in test_config: + port_config = test_config["tcp_ports"] + self._check_scan_result(port_config=port_config,scan_results=scan_results) + if "udp_ports" in test_config: + port_config = test_config["udp_ports"] + self._check_scan_result(port_config=port_config,scan_results=scan_results) + + + def _check_scan_result(self,port_config,scan_results): if port_config is not None: for port, config in port_config.items(): result = None From 355c838112a10b1eab31f15f2db76281836b481a Mon Sep 17 00:00:00 2001 From: J Boddey Date: Mon, 3 Jul 2023 12:04:49 +0100 Subject: [PATCH 41/48] Add licence header (#61) --- cmd/install | 14 ++++++++++++++ cmd/start | 14 ++++++++++++++ modules/devices/faux-dev/bin/get_default_gateway | 14 ++++++++++++++ modules/devices/faux-dev/bin/start_dhcp_client | 14 ++++++++++++++ .../devices/faux-dev/bin/start_network_service | 14 ++++++++++++++ modules/devices/faux-dev/faux-dev.Dockerfile | 14 ++++++++++++++ modules/network/base/base.Dockerfile | 14 ++++++++++++++ modules/network/base/bin/capture | 14 ++++++++++++++ modules/network/base/bin/setup_binaries | 14 ++++++++++++++ modules/network/base/bin/start_grpc | 14 ++++++++++++++ modules/network/base/bin/start_module | 14 ++++++++++++++ modules/network/base/bin/start_network_service | 14 ++++++++++++++ modules/network/base/bin/wait_for_interface | 14 ++++++++++++++ modules/network/dhcp-1/bin/start_network_service | 14 ++++++++++++++ modules/network/dhcp-1/dhcp-1.Dockerfile | 14 ++++++++++++++ modules/network/dhcp-2/bin/start_network_service | 14 ++++++++++++++ modules/network/dhcp-2/dhcp-2.Dockerfile | 14 ++++++++++++++ modules/network/dns/bin/start_network_service | 14 ++++++++++++++ modules/network/dns/dns.Dockerfile | 14 ++++++++++++++ .../network/gateway/bin/start_network_service | 14 ++++++++++++++ modules/network/gateway/gateway.Dockerfile | 14 ++++++++++++++ modules/network/ntp/bin/start_network_service | 14 ++++++++++++++ modules/network/ntp/ntp.Dockerfile | 14 ++++++++++++++ modules/network/radius/bin/start_network_service | 14 ++++++++++++++ modules/network/radius/radius.Dockerfile | 14 ++++++++++++++ .../network/template/bin/start_network_service | 14 ++++++++++++++ modules/network/template/template.Dockerfile | 14 ++++++++++++++ modules/test/base/base.Dockerfile | 14 ++++++++++++++ modules/test/base/bin/capture | 14 ++++++++++++++ modules/test/base/bin/get_ipv4_addr | 14 ++++++++++++++ modules/test/base/bin/setup_binaries | 14 ++++++++++++++ modules/test/base/bin/start_grpc | 14 ++++++++++++++ modules/test/base/bin/start_module | 14 ++++++++++++++ modules/test/base/bin/wait_for_interface | 14 ++++++++++++++ modules/test/baseline/baseline.Dockerfile | 14 ++++++++++++++ modules/test/baseline/bin/start_test_module | 14 ++++++++++++++ modules/test/conn/bin/start_test_module | 14 ++++++++++++++ modules/test/conn/conn.Dockerfile | 14 ++++++++++++++ modules/test/dns/bin/start_test_module | 14 ++++++++++++++ modules/test/dns/dns.Dockerfile | 14 ++++++++++++++ modules/test/nmap/bin/start_test_module | 14 ++++++++++++++ modules/test/nmap/nmap.Dockerfile | 14 ++++++++++++++ testing/docker/ci_baseline/Dockerfile | 16 +++++++++++++++- testing/docker/ci_baseline/entrypoint.sh | 14 ++++++++++++++ testing/test_baseline | 15 ++++++++++++++- testing/test_pylint | 14 ++++++++++++++ 46 files changed, 645 insertions(+), 2 deletions(-) diff --git a/cmd/install b/cmd/install index 37c03e113..4e8639a66 100755 --- a/cmd/install +++ b/cmd/install @@ -1,5 +1,19 @@ #!/bin/bash -e +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + python3 -m venv venv source venv/bin/activate diff --git a/cmd/start b/cmd/start index 17bc2af6c..64ac197eb 100755 --- a/cmd/start +++ b/cmd/start @@ -1,5 +1,19 @@ #!/bin/bash -e +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + if [[ "$EUID" -ne 0 ]]; then echo "Must run as root. Use sudo cmd/start" exit 1 diff --git a/modules/devices/faux-dev/bin/get_default_gateway b/modules/devices/faux-dev/bin/get_default_gateway index f6f1e2a0d..f4d1a4a23 100644 --- a/modules/devices/faux-dev/bin/get_default_gateway +++ b/modules/devices/faux-dev/bin/get_default_gateway @@ -1,3 +1,17 @@ #!/bin/bash -e +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + route | grep default | awk '{print $2}' \ No newline at end of file diff --git a/modules/devices/faux-dev/bin/start_dhcp_client b/modules/devices/faux-dev/bin/start_dhcp_client index de9270c82..90362c4a4 100644 --- a/modules/devices/faux-dev/bin/start_dhcp_client +++ b/modules/devices/faux-dev/bin/start_dhcp_client @@ -1,5 +1,19 @@ #!/bin/bash -e +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Fetch the interface INTF=$1 diff --git a/modules/devices/faux-dev/bin/start_network_service b/modules/devices/faux-dev/bin/start_network_service index 80a587684..d4bb8a92d 100644 --- a/modules/devices/faux-dev/bin/start_network_service +++ b/modules/devices/faux-dev/bin/start_network_service @@ -1,5 +1,19 @@ #!/bin/bash -e +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Directory where all binaries will be loaded BIN_DIR="/testrun/bin" diff --git a/modules/devices/faux-dev/faux-dev.Dockerfile b/modules/devices/faux-dev/faux-dev.Dockerfile index 0a4f02f38..ecfdfc5c2 100644 --- a/modules/devices/faux-dev/faux-dev.Dockerfile +++ b/modules/devices/faux-dev/faux-dev.Dockerfile @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Image name: test-run/faux-dev FROM test-run/base:latest diff --git a/modules/network/base/base.Dockerfile b/modules/network/base/base.Dockerfile index d14713c59..f8fa43c57 100644 --- a/modules/network/base/base.Dockerfile +++ b/modules/network/base/base.Dockerfile @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Image name: test-run/base FROM ubuntu:jammy diff --git a/modules/network/base/bin/capture b/modules/network/base/bin/capture index bc6c425e5..59ffb4118 100644 --- a/modules/network/base/bin/capture +++ b/modules/network/base/bin/capture @@ -1,5 +1,19 @@ #!/bin/bash -e +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Fetch module name MODULE_NAME=$1 diff --git a/modules/network/base/bin/setup_binaries b/modules/network/base/bin/setup_binaries index 3535ead3c..6af744693 100644 --- a/modules/network/base/bin/setup_binaries +++ b/modules/network/base/bin/setup_binaries @@ -1,5 +1,19 @@ #!/bin/bash -e +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Directory where all binaries will be loaded BIN_DIR=$1 diff --git a/modules/network/base/bin/start_grpc b/modules/network/base/bin/start_grpc index 9792b4bd4..56f915db7 100644 --- a/modules/network/base/bin/start_grpc +++ b/modules/network/base/bin/start_grpc @@ -1,5 +1,19 @@ #!/bin/bash -e +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + GRPC_DIR="/testrun/python/src/grpc" GRPC_PROTO_DIR="proto" GRPC_PROTO_FILE="grpc.proto" diff --git a/modules/network/base/bin/start_module b/modules/network/base/bin/start_module index 7fdcbc404..e00747b43 100644 --- a/modules/network/base/bin/start_module +++ b/modules/network/base/bin/start_module @@ -1,5 +1,19 @@ #!/bin/bash +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Directory where all binaries will be loaded BIN_DIR="/testrun/bin" diff --git a/modules/network/base/bin/start_network_service b/modules/network/base/bin/start_network_service index 7d13750b8..9cd0a70c2 100644 --- a/modules/network/base/bin/start_network_service +++ b/modules/network/base/bin/start_network_service @@ -1,5 +1,19 @@ #!/bin/bash +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Place holder function for testing and validation # Each network module should include a start_networkig_service # file that overwrites this one to boot all of the its specific diff --git a/modules/network/base/bin/wait_for_interface b/modules/network/base/bin/wait_for_interface index 1377705d8..a0c8a63b8 100644 --- a/modules/network/base/bin/wait_for_interface +++ b/modules/network/base/bin/wait_for_interface @@ -1,5 +1,19 @@ #!/bin/bash +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Default interface should be veth0 for all containers DEFAULT_IFACE=veth0 diff --git a/modules/network/dhcp-1/bin/start_network_service b/modules/network/dhcp-1/bin/start_network_service index a60806684..fbeede871 100644 --- a/modules/network/dhcp-1/bin/start_network_service +++ b/modules/network/dhcp-1/bin/start_network_service @@ -1,5 +1,19 @@ #!/bin/bash +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + CONFIG_FILE=/etc/dhcp/dhcpd.conf DHCP_PID_FILE=/var/run/dhcpd.pid DHCP_LOG_FILE=/runtime/network/dhcp1-dhcpd.log diff --git a/modules/network/dhcp-1/dhcp-1.Dockerfile b/modules/network/dhcp-1/dhcp-1.Dockerfile index 766f18c57..a4eb8d90a 100644 --- a/modules/network/dhcp-1/dhcp-1.Dockerfile +++ b/modules/network/dhcp-1/dhcp-1.Dockerfile @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Image name: test-run/dhcp-primary FROM test-run/base:latest diff --git a/modules/network/dhcp-2/bin/start_network_service b/modules/network/dhcp-2/bin/start_network_service index ad5ff09e7..550854d49 100644 --- a/modules/network/dhcp-2/bin/start_network_service +++ b/modules/network/dhcp-2/bin/start_network_service @@ -1,5 +1,19 @@ #!/bin/bash +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + CONFIG_FILE=/etc/dhcp/dhcpd.conf DHCP_PID_FILE=/var/run/dhcpd.pid DHCP_LOG_FILE=/runtime/network/dhcp2-dhcpd.log diff --git a/modules/network/dhcp-2/dhcp-2.Dockerfile b/modules/network/dhcp-2/dhcp-2.Dockerfile index 231d0c558..df77cb811 100644 --- a/modules/network/dhcp-2/dhcp-2.Dockerfile +++ b/modules/network/dhcp-2/dhcp-2.Dockerfile @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Image name: test-run/dhcp-primary FROM test-run/base:latest diff --git a/modules/network/dns/bin/start_network_service b/modules/network/dns/bin/start_network_service index 4537033c0..98e75ccff 100644 --- a/modules/network/dns/bin/start_network_service +++ b/modules/network/dns/bin/start_network_service @@ -1,5 +1,19 @@ #!/bin/bash +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + CONFIG_FILE=/etc/dnsmasq.conf PID_FILE=/var/run/dnsmasq.pid LOG_FILE=/runtime/network/dns.log diff --git a/modules/network/dns/dns.Dockerfile b/modules/network/dns/dns.Dockerfile index edfd4dd03..d59b8a391 100644 --- a/modules/network/dns/dns.Dockerfile +++ b/modules/network/dns/dns.Dockerfile @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Image name: test-run/dns FROM test-run/base:latest diff --git a/modules/network/gateway/bin/start_network_service b/modules/network/gateway/bin/start_network_service index b1b31d335..dc456d380 100644 --- a/modules/network/gateway/bin/start_network_service +++ b/modules/network/gateway/bin/start_network_service @@ -1,5 +1,19 @@ #!/bin/bash +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + LOCAL_INTF=veth0 EXT_INTF=eth1 diff --git a/modules/network/gateway/gateway.Dockerfile b/modules/network/gateway/gateway.Dockerfile index 9bfa77dae..d15d31610 100644 --- a/modules/network/gateway/gateway.Dockerfile +++ b/modules/network/gateway/gateway.Dockerfile @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Image name: test-run/gateway FROM test-run/base:latest diff --git a/modules/network/ntp/bin/start_network_service b/modules/network/ntp/bin/start_network_service index b20cf8831..91129b18f 100644 --- a/modules/network/ntp/bin/start_network_service +++ b/modules/network/ntp/bin/start_network_service @@ -1,5 +1,19 @@ #!/bin/bash +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + PYTHON_SRC_DIR=/testrun/python/src LOG_FILE="/runtime/network/ntp.log" diff --git a/modules/network/ntp/ntp.Dockerfile b/modules/network/ntp/ntp.Dockerfile index 1add3178e..cfd78c05e 100644 --- a/modules/network/ntp/ntp.Dockerfile +++ b/modules/network/ntp/ntp.Dockerfile @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Image name: test-run/ntp FROM test-run/base:latest diff --git a/modules/network/radius/bin/start_network_service b/modules/network/radius/bin/start_network_service index 399a90ae5..d285c20d9 100644 --- a/modules/network/radius/bin/start_network_service +++ b/modules/network/radius/bin/start_network_service @@ -1,5 +1,19 @@ #!/bin/bash +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + PYTHON_SRC_DIR=/testrun/python/src CONF_DIR="/testrun/conf" LOG_FILE="/runtime/network/radius.log" diff --git a/modules/network/radius/radius.Dockerfile b/modules/network/radius/radius.Dockerfile index c44c5f0cc..4c8f8fac5 100644 --- a/modules/network/radius/radius.Dockerfile +++ b/modules/network/radius/radius.Dockerfile @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Image name: test-run/radius FROM test-run/base:latest diff --git a/modules/network/template/bin/start_network_service b/modules/network/template/bin/start_network_service index 94ae0def9..f184338a0 100644 --- a/modules/network/template/bin/start_network_service +++ b/modules/network/template/bin/start_network_service @@ -1,5 +1,19 @@ #!/bin/bash +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Place holder function for testing and validation # Each network module should include a start_networkig_service # file that overwrites this one to boot all of the its specific diff --git a/modules/network/template/template.Dockerfile b/modules/network/template/template.Dockerfile index 9efbfb230..1c3060496 100644 --- a/modules/network/template/template.Dockerfile +++ b/modules/network/template/template.Dockerfile @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Image name: test-run/template FROM test-run/base:latest diff --git a/modules/test/base/base.Dockerfile b/modules/test/base/base.Dockerfile index b8398eae9..9c7f2bac2 100644 --- a/modules/test/base/base.Dockerfile +++ b/modules/test/base/base.Dockerfile @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Image name: test-run/base-test FROM ubuntu:jammy diff --git a/modules/test/base/bin/capture b/modules/test/base/bin/capture index 45cfcd42f..69fa916c3 100644 --- a/modules/test/base/bin/capture +++ b/modules/test/base/bin/capture @@ -1,5 +1,19 @@ #!/bin/bash -e +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Fetch module name MODULE_NAME=$1 diff --git a/modules/test/base/bin/get_ipv4_addr b/modules/test/base/bin/get_ipv4_addr index 09a19bc13..c244b157d 100644 --- a/modules/test/base/bin/get_ipv4_addr +++ b/modules/test/base/bin/get_ipv4_addr @@ -1,5 +1,19 @@ #!/bin/bash +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + NET=$1 MAC=$2 diff --git a/modules/test/base/bin/setup_binaries b/modules/test/base/bin/setup_binaries index 3535ead3c..6af744693 100644 --- a/modules/test/base/bin/setup_binaries +++ b/modules/test/base/bin/setup_binaries @@ -1,5 +1,19 @@ #!/bin/bash -e +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Directory where all binaries will be loaded BIN_DIR=$1 diff --git a/modules/test/base/bin/start_grpc b/modules/test/base/bin/start_grpc index 917381e89..7852b8ae3 100644 --- a/modules/test/base/bin/start_grpc +++ b/modules/test/base/bin/start_grpc @@ -1,5 +1,19 @@ #!/bin/bash -e +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + GRPC_DIR="/testrun/python/src/grpc" GRPC_PROTO_DIR="proto" GRPC_PROTO_FILE="grpc.proto" diff --git a/modules/test/base/bin/start_module b/modules/test/base/bin/start_module index 3e4737d8b..5f6e1ee35 100644 --- a/modules/test/base/bin/start_module +++ b/modules/test/base/bin/start_module @@ -1,5 +1,19 @@ #!/bin/bash +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Define the local mount point to store local files to OUTPUT_DIR="/runtime/output" diff --git a/modules/test/base/bin/wait_for_interface b/modules/test/base/bin/wait_for_interface index c9c1682f0..4c336c8fb 100644 --- a/modules/test/base/bin/wait_for_interface +++ b/modules/test/base/bin/wait_for_interface @@ -1,5 +1,19 @@ #!/bin/bash +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Allow a user to define an interface by passing it into this script INTF=$1 diff --git a/modules/test/baseline/baseline.Dockerfile b/modules/test/baseline/baseline.Dockerfile index c2b32e7b7..f7d21f8c8 100644 --- a/modules/test/baseline/baseline.Dockerfile +++ b/modules/test/baseline/baseline.Dockerfile @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Image name: test-run/baseline-test FROM test-run/base-test:latest diff --git a/modules/test/baseline/bin/start_test_module b/modules/test/baseline/bin/start_test_module index a09349cf9..a529c2fcf 100644 --- a/modules/test/baseline/bin/start_test_module +++ b/modules/test/baseline/bin/start_test_module @@ -1,5 +1,19 @@ #!/bin/bash +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # An example startup script that does the bare minimum to start # a test module via a pyhon script. Each test module should include a # start_test_module file that overwrites this one to boot all of its diff --git a/modules/test/conn/bin/start_test_module b/modules/test/conn/bin/start_test_module index 8290c0764..0df510b86 100644 --- a/modules/test/conn/bin/start_test_module +++ b/modules/test/conn/bin/start_test_module @@ -1,5 +1,19 @@ #!/bin/bash +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Setup and start the connection test module # Define where the python source files are located diff --git a/modules/test/conn/conn.Dockerfile b/modules/test/conn/conn.Dockerfile index 2526b0046..1714f49f2 100644 --- a/modules/test/conn/conn.Dockerfile +++ b/modules/test/conn/conn.Dockerfile @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Image name: test-run/conn-test FROM test-run/base-test:latest diff --git a/modules/test/dns/bin/start_test_module b/modules/test/dns/bin/start_test_module index a09349cf9..a529c2fcf 100644 --- a/modules/test/dns/bin/start_test_module +++ b/modules/test/dns/bin/start_test_module @@ -1,5 +1,19 @@ #!/bin/bash +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # An example startup script that does the bare minimum to start # a test module via a pyhon script. Each test module should include a # start_test_module file that overwrites this one to boot all of its diff --git a/modules/test/dns/dns.Dockerfile b/modules/test/dns/dns.Dockerfile index f831d0e2b..b832c2afb 100644 --- a/modules/test/dns/dns.Dockerfile +++ b/modules/test/dns/dns.Dockerfile @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Image name: test-run/conn-test FROM test-run/base-test:latest diff --git a/modules/test/nmap/bin/start_test_module b/modules/test/nmap/bin/start_test_module index 333566342..d8cede486 100644 --- a/modules/test/nmap/bin/start_test_module +++ b/modules/test/nmap/bin/start_test_module @@ -1,5 +1,19 @@ #!/bin/bash +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # An example startup script that does the bare minimum to start # a test module via a pyhon script. Each test module should include a # start_test_module file that overwrites this one to boot all of its diff --git a/modules/test/nmap/nmap.Dockerfile b/modules/test/nmap/nmap.Dockerfile index c1a2f96ce..1789da382 100644 --- a/modules/test/nmap/nmap.Dockerfile +++ b/modules/test/nmap/nmap.Dockerfile @@ -1,3 +1,17 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Image name: test-run/nmap-test FROM test-run/base-test:latest diff --git a/testing/docker/ci_baseline/Dockerfile b/testing/docker/ci_baseline/Dockerfile index 7c3c1eebd..468c6f7a0 100644 --- a/testing/docker/ci_baseline/Dockerfile +++ b/testing/docker/ci_baseline/Dockerfile @@ -1,6 +1,20 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + FROM ubuntu:jammy -#Update and get all additional requirements not contained in the base image +# Update and get all additional requirements not contained in the base image RUN apt-get update && apt-get -y upgrade RUN apt-get install -y isc-dhcp-client ntpdate coreutils moreutils inetutils-ping curl jq dnsutils diff --git a/testing/docker/ci_baseline/entrypoint.sh b/testing/docker/ci_baseline/entrypoint.sh index bc2da3ec2..0f3301cd8 100755 --- a/testing/docker/ci_baseline/entrypoint.sh +++ b/testing/docker/ci_baseline/entrypoint.sh @@ -1,5 +1,19 @@ #!/bin/bash +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + OUT=/out/testrun_ci.json NTP_SERVER=10.10.10.5 diff --git a/testing/test_baseline b/testing/test_baseline index 36d21fa5e..ac47a5cfa 100755 --- a/testing/test_baseline +++ b/testing/test_baseline @@ -1,6 +1,19 @@ - #!/bin/bash -e +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + TESTRUN_OUT=/tmp/testrun.log ifconfig diff --git a/testing/test_pylint b/testing/test_pylint index e3ade62b5..5cd1dff73 100755 --- a/testing/test_pylint +++ b/testing/test_pylint @@ -1,5 +1,19 @@ #!/bin/bash +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + ERROR_LIMIT=1100 sudo cmd/install From 8d653860caf4b9d954255308a38d9c1f28d19c58 Mon Sep 17 00:00:00 2001 From: Jacob Boddey Date: Mon, 3 Jul 2023 12:31:08 +0100 Subject: [PATCH 42/48] Resolve merge conflict --- modules/network/dns/dns.Dockerfile | 3 --- 1 file changed, 3 deletions(-) diff --git a/modules/network/dns/dns.Dockerfile b/modules/network/dns/dns.Dockerfile index b68129f7c..d59b8a391 100644 --- a/modules/network/dns/dns.Dockerfile +++ b/modules/network/dns/dns.Dockerfile @@ -1,4 +1,3 @@ -<<<<<<< HEAD # Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -======= ->>>>>>> main # Image name: test-run/dns FROM test-run/base:latest From 26f8c5b89162083a67978436a2affd10e4400b68 Mon Sep 17 00:00:00 2001 From: J Boddey Date: Tue, 4 Jul 2023 09:32:03 +0100 Subject: [PATCH 43/48] Add network docs (#63) * Add network docs * Rename to readme * Add link to template module --- docs/network/README.md | 41 ++++++++++++++ docs/network/add_new_service.md | 94 +++++++++++++++++++++++++++++++++ docs/network/addresses.md | 18 +++++++ 3 files changed, 153 insertions(+) create mode 100644 docs/network/README.md create mode 100644 docs/network/add_new_service.md create mode 100644 docs/network/addresses.md diff --git a/docs/network/README.md b/docs/network/README.md new file mode 100644 index 000000000..2d66d3e6a --- /dev/null +++ b/docs/network/README.md @@ -0,0 +1,41 @@ +# Network Overview + +## Table of Contents +1) Network Overview (this page) +2) [Addresses](addresses.md) +3) [Add a new network service](add_new_service.md) + +Test Run provides several built-in network services that can be utilized for testing purposes. These services are already available and can be used without any additional configuration. + +The following network services are provided: + +### Internet Connectivity (Gateway Service) + +The gateway service provides internet connectivity to the test network. It allows devices in the network to access external resources and communicate with the internet. + +### DHCPv4 Service + +The DHCPv4 service provides Dynamic Host Configuration Protocol (DHCP) functionality for IPv4 addressing. It includes the following components: + +- Primary DHCP Server: A primary DHCP server is available to assign IPv4 addresses to DHCP clients in the network. +- Secondary DHCP Server (Failover Configuration): A secondary DHCP server operates in failover configuration with the primary server to provide high availability and redundancy. + +#### Configuration + +The configuration of the DHCPv4 service can be modified using the provided GRPC (gRPC Remote Procedure Call) service. + +### IPv6 SLAAC Addressing + +The primary DHCP server also provides IPv6 Stateless Address Autoconfiguration (SLAAC) addressing for devices in the network. IPv6 addresses are automatically assigned to devices using SLAAC where test devices support it. + +### NTP Service + +The Network Time Protocol (NTP) service provides time synchronization for devices in the network. It ensures that all devices have accurate and synchronized time information. + +### DNS Service + +The DNS (Domain Name System) service resolves domain names to their corresponding IP addresses. It allows devices in the network to access external resources using domain names. + +### 802.1x Authentication (Radius Module) + +The radius module provides 802.1x authentication for devices in the network. It ensures secure and authenticated access to the network. The issuing CA (Certificate Authority) certificate can be specified by the user if required. \ No newline at end of file diff --git a/docs/network/add_new_service.md b/docs/network/add_new_service.md new file mode 100644 index 000000000..1ad07b60d --- /dev/null +++ b/docs/network/add_new_service.md @@ -0,0 +1,94 @@ +# Adding a New Network Service + +The Test Run framework allows users to add their own network services with ease. A template network service can be used to get started quickly, this can be found at [modules/network/template](../../modules/network/template). Otherwise, see below for details of the requirements for new network services. + +To add a new network service to Test Run, follow the procedure below: + +1. Create a folder under `modules/network/` with the name of the network service in lowercase, using only alphanumeric characters and hyphens (`-`). +2. Inside the created folder, include the following files and folders: + - `{module}.Dockerfile`: Dockerfile for building the network service image. Replace `{module}` with the name of the module. + - `conf/`: Folder containing the module configuration files. + - `bin/`: Folder containing the startup script for the network service. + - Any additional application code can be placed in its own folder. + +### Example `module_config.json` + +```json +{ + "config": { + "meta": { + "name": "{module}", + "display_name": "Network Service Name", + "description": "Description of the network service" + }, + "network": { + "interface": "veth0", + "enable_wan": false, + "ip_index": 2 + }, + "grpc": { + "port": 5001 + }, + "docker": { + "depends_on": "base", + "mounts": [ + { + "source": "runtime/network", + "target": "/runtime/network" + } + ] + } + } +} +``` + +### Example of {module}.Dockerfile + +```Dockerfile +# Image name: test-run/{module} +FROM test-run/base:latest + +ARG MODULE_NAME={module} +ARG MODULE_DIR=modules/network/$MODULE_NAME + +# Install network service dependencies +# ... + +# Copy over all configuration files +COPY $MODULE_DIR/conf /testrun/conf + +# Copy over all binary files +COPY $MODULE_DIR/bin /testrun/bin + +# Copy over all python files +COPY $MODULE_DIR/python /testrun/python + +# Do not specify a CMD or Entrypoint as Test Run will automatically start your service as required +``` + +### Example of start_network_service script + +```bash +#!/bin/bash + +CONFIG_FILE=/etc/network_service/config.conf +# ... + +echo "Starting Network Service..." + +# Perform any required setup steps +# ... + +# Start the network service +# ... + +# Monitor for changes in the config file +# ... + +# Restart the network service when the config changes +# ... +``` + + + + diff --git a/docs/network/addresses.md b/docs/network/addresses.md new file mode 100644 index 000000000..ecaacfd36 --- /dev/null +++ b/docs/network/addresses.md @@ -0,0 +1,18 @@ +# Network Addresses + +Each network service is configured with an IPv4 and IPv6 address. For IPv4 addressing, the last number in the IPv4 address is fixed (ensuring the IP is unique). See below for a table of network addresses: + +| Name | Mac address | IPv4 address | IPv6 address | +|---------------------|----------------------|--------------|------------------------------| +| Internet gateway | 9a:02:57:1e:8f:01 | 10.10.10.1 | fd10:77be:4186::1 | +| DHCP primary | 9a:02:57:1e:8f:02 | 10.10.10.2 | fd10:77be:4186::2 | +| DHCP secondary | 9a:02:57:1e:8f:03 | 10.10.10.3 | fd10:77be:4186::3 | +| DNS server | 9a:02:57:1e:8f:04 | 10.10.10.4 | fd10:77be:4186::4 | +| NTP server | 9a:02:57:1e:8f:05 | 10.10.10.5 | fd10:77be:4186::5 | +| Radius authenticator| 9a:02:57:1e:8f:07 | 10.10.10.7 | fd10:77be:4186::7 | +| Active test module | 9a:02:57:1e:8f:09 | 10.10.10.9 | fd10:77be:4186::9 | + + +The default network range is 10.10.10.0/24 and devices will be assigned addresses in that range via DHCP. The range may change when requested by a test module. In which case, network services will be restarted and accessible on the new range, with the same final host ID. The default IPv6 network is fd10:77be:4186::/64 and addresses will be assigned to devices on the network using IPv6 SLAAC. + +When creating a new network module, please ensure that the ip_index value in the module_config.json is unique otherwise unexpected behaviour will occur. \ No newline at end of file From 4a5c1eaa532ba9970391c75ade52c6befb8719bd Mon Sep 17 00:00:00 2001 From: jhughesbiot <50999916+jhughesbiot@users.noreply.github.com> Date: Wed, 5 Jul 2023 07:26:08 -0700 Subject: [PATCH 44/48] Dhcp (#64) * Add initial work for ip control module * Implement ip control module with additional cleanup methods * Update link check to not use error stream * Add error checking around container network configurations * Add network cleanup for namespaces and links * formatting * initial work on adding grpc functions for dhcp tests * rework code to allow for better usage and unit testing * working poc for test containers and grpc client to dhcp-1 * Move grpc client code into base image * Move grpc proto builds outside of dockerfile into module startup script * Setup pythonpath var in test module base startup process misc cleanup * pylinting and logging updates * Add python path resolving to network modules Update grpc path to prevent conflicts misc pylinting * Change lease resolving method to fix pylint issue * cleanup unit tests * cleanup unit tests * Add grpc updates to dhcp2 module Update dhcp_config to deal with missing optional variables * Add grpc updates to dhcp2 module Update dhcp_config to deal with missing optional variables * fix line endings * misc cleanup --- local/system.json.example | 18 +- modules/network/base/base.Dockerfile | 4 + modules/network/base/bin/setup_python_path | 25 + modules/network/base/bin/start_grpc | 6 +- modules/network/base/bin/start_module | 12 +- modules/network/base/python/requirements.txt | 3 +- .../src/{grpc => grpc_server}/start_server.py | 0 modules/network/base/python/src/logger.py | 2 +- .../network/dhcp-1/bin/start_network_service | 2 +- modules/network/dhcp-1/conf/dhcpd.conf | 54 +- modules/network/dhcp-1/dhcp-1.Dockerfile | 8 +- .../dhcp-1/python/src/grpc/dhcp_config.py | 303 ----------- .../dhcp-1/python/src/grpc/network_service.py | 58 --- .../dhcp-1/python/src/grpc/proto/grpc.proto | 36 -- .../src/{grpc => grpc_server}/__init__.py | 0 .../python/src/grpc_server/dhcp_config.py | 493 ++++++++++++++++++ .../src/grpc_server/dhcp_config_test.py | 103 ++++ .../python/src/grpc_server/dhcp_lease.py | 75 +++ .../python/src/grpc_server/dhcp_leases.py | 107 ++++ .../python/src/grpc_server/network_service.py | 157 ++++++ .../python/src/grpc_server/proto/grpc.proto | 59 +++ .../network/dhcp-2/bin/start_network_service | 2 +- modules/network/dhcp-2/conf/dhcpd.conf | 35 +- .../dhcp-2/python/src/grpc/dhcp_config.py | 303 ----------- .../dhcp-2/python/src/grpc/network_service.py | 58 --- .../dhcp-2/python/src/grpc/proto/grpc.proto | 36 -- .../src/{grpc => grpc_server}/__init__.py | 0 .../python/src/grpc_server/dhcp_config.py | 493 ++++++++++++++++++ .../src/grpc_server/dhcp_config_test.py | 103 ++++ .../python/src/grpc_server/dhcp_lease.py | 75 +++ .../python/src/grpc_server/dhcp_leases.py | 107 ++++ .../python/src/grpc_server/network_service.py | 157 ++++++ .../python/src/grpc_server/proto/grpc.proto | 59 +++ modules/test/base/base.Dockerfile | 8 + modules/test/base/bin/setup_grpc_clients | 34 ++ modules/test/base/bin/setup_python_path | 25 + modules/test/base/bin/start_module | 17 +- .../python/src/grpc/proto/dhcp1/client.py | 98 ++++ modules/test/conn/conn.Dockerfile | 4 +- .../test/conn/python/src/connection_module.py | 29 ++ testing/test_baseline | 2 +- testing/unit_test/run_tests.sh | 18 + 42 files changed, 2326 insertions(+), 862 deletions(-) create mode 100644 modules/network/base/bin/setup_python_path rename modules/network/base/python/src/{grpc => grpc_server}/start_server.py (100%) delete mode 100644 modules/network/dhcp-1/python/src/grpc/dhcp_config.py delete mode 100644 modules/network/dhcp-1/python/src/grpc/network_service.py delete mode 100644 modules/network/dhcp-1/python/src/grpc/proto/grpc.proto rename modules/network/dhcp-1/python/src/{grpc => grpc_server}/__init__.py (100%) create mode 100644 modules/network/dhcp-1/python/src/grpc_server/dhcp_config.py create mode 100644 modules/network/dhcp-1/python/src/grpc_server/dhcp_config_test.py create mode 100644 modules/network/dhcp-1/python/src/grpc_server/dhcp_lease.py create mode 100644 modules/network/dhcp-1/python/src/grpc_server/dhcp_leases.py create mode 100644 modules/network/dhcp-1/python/src/grpc_server/network_service.py create mode 100644 modules/network/dhcp-1/python/src/grpc_server/proto/grpc.proto delete mode 100644 modules/network/dhcp-2/python/src/grpc/dhcp_config.py delete mode 100644 modules/network/dhcp-2/python/src/grpc/network_service.py delete mode 100644 modules/network/dhcp-2/python/src/grpc/proto/grpc.proto rename modules/network/dhcp-2/python/src/{grpc => grpc_server}/__init__.py (100%) create mode 100644 modules/network/dhcp-2/python/src/grpc_server/dhcp_config.py create mode 100644 modules/network/dhcp-2/python/src/grpc_server/dhcp_config_test.py create mode 100644 modules/network/dhcp-2/python/src/grpc_server/dhcp_lease.py create mode 100644 modules/network/dhcp-2/python/src/grpc_server/dhcp_leases.py create mode 100644 modules/network/dhcp-2/python/src/grpc_server/network_service.py create mode 100644 modules/network/dhcp-2/python/src/grpc_server/proto/grpc.proto create mode 100644 modules/test/base/bin/setup_grpc_clients create mode 100644 modules/test/base/bin/setup_python_path create mode 100644 modules/test/base/python/src/grpc/proto/dhcp1/client.py create mode 100644 testing/unit_test/run_tests.sh diff --git a/local/system.json.example b/local/system.json.example index ecf480104..e99e013f3 100644 --- a/local/system.json.example +++ b/local/system.json.example @@ -1,10 +1,10 @@ -{ - "network": { - "device_intf": "enx123456789123", - "internet_intf": "enx123456789124" - }, - "log_level": "INFO", - "startup_timeout": 60, - "monitor_period": 300, - "runtime": 1200 +{ + "network": { + "device_intf": "enx123456789123", + "internet_intf": "enx123456789124" + }, + "log_level": "INFO", + "startup_timeout": 60, + "monitor_period": 300, + "runtime": 1200 } \ No newline at end of file diff --git a/modules/network/base/base.Dockerfile b/modules/network/base/base.Dockerfile index f8fa43c57..ac964a99d 100644 --- a/modules/network/base/base.Dockerfile +++ b/modules/network/base/base.Dockerfile @@ -17,10 +17,14 @@ FROM ubuntu:jammy ARG MODULE_NAME=base ARG MODULE_DIR=modules/network/$MODULE_NAME +ARG COMMON_DIR=framework/python/src/common # Install common software RUN apt-get update && apt-get install -y net-tools iputils-ping tcpdump iproute2 jq python3 python3-pip dos2unix +# Install common python modules +COPY $COMMON_DIR/ /testrun/python/src/common + # Setup the base python requirements COPY $MODULE_DIR/python /testrun/python diff --git a/modules/network/base/bin/setup_python_path b/modules/network/base/bin/setup_python_path new file mode 100644 index 000000000..3e30e965d --- /dev/null +++ b/modules/network/base/bin/setup_python_path @@ -0,0 +1,25 @@ +#!/bin/bash + +ROOT_DIRECTORY="/testrun/python/src" + +# Function to recursively add subdirectories to PYTHONPATH +add_subdirectories_to_pythonpath() { + local directory=$1 + local subdirectories=( "$directory"/* ) + local subdirectory + + for subdirectory in "${subdirectories[@]}"; do + if [[ -d "$subdirectory" && ! "$subdirectory" = *'__pycache__' ]]; then + export PYTHONPATH="$PYTHONPATH:$subdirectory" + add_subdirectories_to_pythonpath "$subdirectory" + fi + done +} + +# Set PYTHONPATH initially to an empty string +export PYTHONPATH="$ROOT_DIRECTORY" + +# Add all subdirectories to PYTHONPATH +add_subdirectories_to_pythonpath "$ROOT_DIRECTORY" + +echo "$PYTHONPATH" \ No newline at end of file diff --git a/modules/network/base/bin/start_grpc b/modules/network/base/bin/start_grpc index 56f915db7..840bea65f 100644 --- a/modules/network/base/bin/start_grpc +++ b/modules/network/base/bin/start_grpc @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -GRPC_DIR="/testrun/python/src/grpc" +GRPC_DIR="/testrun/python/src/grpc_server" GRPC_PROTO_DIR="proto" GRPC_PROTO_FILE="grpc.proto" @@ -22,10 +22,10 @@ GRPC_PROTO_FILE="grpc.proto" pushd $GRPC_DIR >/dev/null 2>&1 #Build the grpc proto file every time before starting server -python3 -m grpc_tools.protoc --proto_path=. ./$GRPC_PROTO_DIR/$GRPC_PROTO_FILE --python_out=. --grpc_python_out=. +python3 -u -m grpc_tools.protoc --proto_path=. ./$GRPC_PROTO_DIR/$GRPC_PROTO_FILE --python_out=. --grpc_python_out=. popd >/dev/null 2>&1 #Start the grpc server -python3 -u $GRPC_DIR/start_server.py $@ +python3 -u $GRPC_DIR/start_server.py $@ & diff --git a/modules/network/base/bin/start_module b/modules/network/base/bin/start_module index e00747b43..6de62f1a5 100644 --- a/modules/network/base/bin/start_module +++ b/modules/network/base/bin/start_module @@ -60,10 +60,16 @@ else INTF=$DEFINED_IFACE fi -echo "Starting module $MODULE_NAME on local interface $INTF..." +# Setup the PYTHONPATH so all imports work as expected +echo "Setting up PYTHONPATH..." +export PYTHONPATH=$($BIN_DIR/setup_python_path) +echo "PYTHONPATH: $PYTHONPATH" +echo "Configuring binary files..." $BIN_DIR/setup_binaries $BIN_DIR +echo "Starting module $MODULE_NAME on local interface $INTF..." + # Wait for interface to become ready $BIN_DIR/wait_for_interface $INTF @@ -80,9 +86,9 @@ then if [[ ! -z $GRPC_PORT && ! $GRPC_PORT == "null" ]] then echo "gRPC port resolved from config: $GRPC_PORT" - $BIN_DIR/start_grpc "-p $GRPC_PORT" & + $BIN_DIR/start_grpc "-p $GRPC_PORT" else - $BIN_DIR/start_grpc & + $BIN_DIR/start_grpc fi fi diff --git a/modules/network/base/python/requirements.txt b/modules/network/base/python/requirements.txt index 9c4e2b056..9d9473d74 100644 --- a/modules/network/base/python/requirements.txt +++ b/modules/network/base/python/requirements.txt @@ -1,2 +1,3 @@ grpcio -grpcio-tools \ No newline at end of file +grpcio-tools +netifaces \ No newline at end of file diff --git a/modules/network/base/python/src/grpc/start_server.py b/modules/network/base/python/src/grpc_server/start_server.py similarity index 100% rename from modules/network/base/python/src/grpc/start_server.py rename to modules/network/base/python/src/grpc_server/start_server.py diff --git a/modules/network/base/python/src/logger.py b/modules/network/base/python/src/logger.py index 8893b1e8d..998a4aaae 100644 --- a/modules/network/base/python/src/logger.py +++ b/modules/network/base/python/src/logger.py @@ -35,7 +35,7 @@ log_level = logging.getLevelName(log_level_str) except OSError: # TODO: Print out warning that log level is incorrect or missing - LOG_LEVEL = _DEFAULT_LEVEL + log_level = _DEFAULT_LEVEL log_format = logging.Formatter(fmt=_LOG_FORMAT, datefmt=_DATE_FORMAT) diff --git a/modules/network/dhcp-1/bin/start_network_service b/modules/network/dhcp-1/bin/start_network_service index fbeede871..9f4a3dc51 100644 --- a/modules/network/dhcp-1/bin/start_network_service +++ b/modules/network/dhcp-1/bin/start_network_service @@ -20,7 +20,7 @@ DHCP_LOG_FILE=/runtime/network/dhcp1-dhcpd.log RA_PID_FILE=/var/run/radvd/radvd.pid RA_LOG_FILE=/runtime/network/dhcp1-radvd.log -echo "Starrting Network Service..." +echo "Starting Network Service..." #Enable IPv6 Forwarding sysctl net.ipv6.conf.all.forwarding=1 diff --git a/modules/network/dhcp-1/conf/dhcpd.conf b/modules/network/dhcp-1/conf/dhcpd.conf index 9f4fe1c28..ee171279c 100644 --- a/modules/network/dhcp-1/conf/dhcpd.conf +++ b/modules/network/dhcp-1/conf/dhcpd.conf @@ -1,26 +1,28 @@ -default-lease-time 300; - -failover peer "failover-peer" { - primary; - address 10.10.10.2; - port 847; - peer address 10.10.10.3; - peer port 647; - max-response-delay 60; - max-unacked-updates 10; - mclt 3600; - split 128; - load balance max seconds 3; -} - -subnet 10.10.10.0 netmask 255.255.255.0 { - option ntp-servers 10.10.10.5; - option subnet-mask 255.255.255.0; - option broadcast-address 10.10.10.255; - option routers 10.10.10.1; - option domain-name-servers 10.10.10.4; - pool { - failover peer "failover-peer"; - range 10.10.10.10 10.10.10.20; - } -} +default-lease-time 300; + +failover peer "failover-peer" { + primary; + address 10.10.10.2; + port 847; + peer address 10.10.10.3; + peer port 647; + max-response-delay 60; + max-unacked-updates 10; + mclt 3600; + split 128; + load balance max seconds 3; +} + +subnet 10.10.10.0 netmask 255.255.255.0 { + option ntp-servers 10.10.10.5; + option subnet-mask 255.255.255.0; + option broadcast-address 10.10.10.255; + option routers 10.10.10.1; + option domain-name-servers 10.10.10.4; + interface veth0; + authoritative; + pool { + failover peer "failover-peer"; + range 10.10.10.10 10.10.10.20; + } +} \ No newline at end of file diff --git a/modules/network/dhcp-1/dhcp-1.Dockerfile b/modules/network/dhcp-1/dhcp-1.Dockerfile index a4eb8d90a..b47378045 100644 --- a/modules/network/dhcp-1/dhcp-1.Dockerfile +++ b/modules/network/dhcp-1/dhcp-1.Dockerfile @@ -18,6 +18,12 @@ FROM test-run/base:latest ARG MODULE_NAME=dhcp-1 ARG MODULE_DIR=modules/network/$MODULE_NAME +# Install all necessary packages +RUN apt-get install -y wget + +#Update the oui.txt file from ieee +RUN wget http://standards-oui.ieee.org/oui.txt -P /usr/local/etc/ + # Install dhcp server RUN apt-get install -y isc-dhcp-server radvd @@ -28,4 +34,4 @@ COPY $MODULE_DIR/conf /testrun/conf COPY $MODULE_DIR/bin /testrun/bin # Copy over all python files -COPY $MODULE_DIR/python /testrun/python +COPY $MODULE_DIR/python /testrun/python \ No newline at end of file diff --git a/modules/network/dhcp-1/python/src/grpc/dhcp_config.py b/modules/network/dhcp-1/python/src/grpc/dhcp_config.py deleted file mode 100644 index 99d6bdebd..000000000 --- a/modules/network/dhcp-1/python/src/grpc/dhcp_config.py +++ /dev/null @@ -1,303 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Contains all the necessary classes to maintain the -DHCP server's configuration""" -import re - -CONFIG_FILE = '/etc/dhcp/dhcpd.conf' -CONFIG_FILE_TEST = 'network/modules/dhcp-1/conf/dhcpd.conf' - -DEFAULT_LEASE_TIME_KEY = 'default-lease-time' - - -class DHCPConfig: - """Represents the DHCP Servers configuration and gives access to modify it""" - - def __init__(self): - self._default_lease_time = 300 - self.subnets = [] - self._peer = None - - def write_config(self): - conf = str(self) - print('Writing config: \n' + conf) - with open(CONFIG_FILE, 'w', encoding='UTF-8') as conf_file: - conf_file.write(conf) - - def resolve_config(self): - with open(CONFIG_FILE, 'r', encoding='UTF-8') as f: - conf = f.read() - self.resolve_subnets(conf) - self._peer = DHCPFailoverPeer(conf) - - def resolve_subnets(self, conf): - self.subnets = [] - regex = r'(subnet.*)' - subnets = re.findall(regex, conf, re.MULTILINE | re.DOTALL) - for subnet in subnets: - dhcp_subnet = DHCPSubnet(subnet) - self.subnets.append(dhcp_subnet) - - def set_range(self, start, end, subnet=0, pool=0): - print('Setting Range for pool ') - print(self.subnets[subnet].pools[pool]) - self.subnets[subnet].pools[pool].range_start = start - self.subnets[subnet].pools[pool].range_end = end - - # def resolve_settings(self, conf): - # lines = conf.split('\n') - # for line in lines: - # if DEFAULT_LEASE_TIME_KEY in line: - # self._default_lease_time = line.strip().split( - # DEFAULT_LEASE_TIME_KEY)[1].strip().split(';')[0] - - # self.peer = peer - - def __str__(self): - - config = """\r{DEFAULT_LEASE_TIME_KEY} {DEFAULT_LEASE_TIME};""" - - config = config.format(length='multi-line', - DEFAULT_LEASE_TIME_KEY=DEFAULT_LEASE_TIME_KEY, - DEFAULT_LEASE_TIME=self._default_lease_time) - - config += '\n\n' + str(self.peer) - for subnet in self._subnets: - config += '\n\n' + str(subnet) - return str(config) - - -FAILOVER_PEER_KEY = 'failover peer' -PRIMARY_KEY = 'primary' -ADDRESS_KEY = 'address' -PORT_KEY = 'port' -PEER_ADDRESS_KEY = 'peer address' -PEER_PORT_KEY = 'peer port' -MAX_RESPONSE_DELAY_KEY = 'max-response-delay' -MAX_UNACKED_UPDATES_KEY = 'max-unacked-updates' -MCLT_KEY = 'mclt' -SPLIT_KEY = 'split' -LOAD_BALANCE_MAX_SECONDS_KEY = 'load balance max seconds' - - -class DHCPFailoverPeer: - """Contains all information to define the DHCP failover peer""" - - def __init__(self, config): - self.name = None - self.primary = False - self.address = None - self.port = None - self.peer_address = None - self.peer_port = None - self.max_response_delay = None - self.max_unacked_updates = None - self.mclt = None - self.split = None - self.load_balance_max_seconds = None - self.peer = None - - self.resolve_peer(config) - - def __str__(self): - config = '{FAILOVER_PEER_KEY} \"{FAILOVER_PEER}\" {{\n' - config += '\tprimary;' if self.primary else 'secondary;' - config += """\n\t{ADDRESS_KEY} {ADDRESS}; - {PORT_KEY} {PORT}; - {PEER_ADDRESS_KEY} {PEER_ADDRESS}; - {PEER_PORT_KEY} {PEER_PORT}; - {MAX_RESPONSE_DELAY_KEY} {MAX_RESPONSE_DELAY}; - {MAX_UNACKED_UPDATES_KEY} {MAX_UNACKED_UPDATES}; - {MCLT_KEY} {MCLT}; - {SPLIT_KEY} {SPLIT}; - {LOAD_BALANCE_MAX_SECONDS_KEY} {LOAD_BALANCE_MAX_SECONDS}; - \r}}""" - - return config.format( - length='multi-line', - FAILOVER_PEER_KEY=FAILOVER_PEER_KEY, - FAILOVER_PEER=self.name, - ADDRESS_KEY=ADDRESS_KEY, - ADDRESS=self.address, - PORT_KEY=PORT_KEY, - PORT=self.port, - PEER_ADDRESS_KEY=PEER_ADDRESS_KEY, - PEER_ADDRESS=self.peer_address, - PEER_PORT_KEY=PEER_PORT_KEY, - PEER_PORT=self.peer_port, - MAX_RESPONSE_DELAY_KEY=MAX_RESPONSE_DELAY_KEY, - MAX_RESPONSE_DELAY=self.max_response_delay, - MAX_UNACKED_UPDATES_KEY=MAX_UNACKED_UPDATES_KEY, - MAX_UNACKED_UPDATES=self.max_unacked_updates, - MCLT_KEY=MCLT_KEY, - MCLT=self.mclt, - SPLIT_KEY=SPLIT_KEY, - SPLIT=self.split, - LOAD_BALANCE_MAX_SECONDS_KEY=LOAD_BALANCE_MAX_SECONDS_KEY, - LOAD_BALANCE_MAX_SECONDS=self.load_balance_max_seconds) - - def resolve_peer(self, conf): - peer = '' - lines = conf.split('\n') - for line in lines: - if line.startswith(FAILOVER_PEER_KEY) or len(peer) > 0: - if len(peer) <= 0: - self.name = line.strip().split(FAILOVER_PEER_KEY)[1].strip().split( - '{')[0].split('\"')[1] - peer += line + '\n' - if PRIMARY_KEY in line: - self.primary = True - elif ADDRESS_KEY in line and PEER_ADDRESS_KEY not in line: - self.address = line.strip().split(ADDRESS_KEY)[1].strip().split( - ';')[0] - elif PORT_KEY in line and PEER_PORT_KEY not in line: - self.port = line.strip().split(PORT_KEY)[1].strip().split(';')[0] - elif PEER_ADDRESS_KEY in line: - self.peer_address = line.strip().split( - PEER_ADDRESS_KEY)[1].strip().split(';')[0] - elif PEER_PORT_KEY in line: - self.peer_port = line.strip().split(PEER_PORT_KEY)[1].strip().split( - ';')[0] - elif MAX_RESPONSE_DELAY_KEY in line: - self.max_response_delay = line.strip().split( - MAX_RESPONSE_DELAY_KEY)[1].strip().split(';')[0] - elif MAX_UNACKED_UPDATES_KEY in line: - self.max_unacked_updates = line.strip().split( - MAX_UNACKED_UPDATES_KEY)[1].strip().split(';')[0] - elif MCLT_KEY in line: - self.mclt = line.strip().split(MCLT_KEY)[1].strip().split(';')[0] - elif SPLIT_KEY in line: - self.split = line.strip().split(SPLIT_KEY)[1].strip().split(';')[0] - elif LOAD_BALANCE_MAX_SECONDS_KEY in line: - self.load_balance_max_seconds = line.strip().split( - LOAD_BALANCE_MAX_SECONDS_KEY)[1].strip().split(';')[0] - if line.endswith('}') and len(peer) > 0: - break - self.peer = peer - - -NTP_OPTION_KEY = 'option ntp-servers' -SUBNET_MASK_OPTION_KEY = 'option subnet-mask' -BROADCAST_OPTION_KEY = 'option broadcast-address' -ROUTER_OPTION_KEY = 'option routers' -DNS_OPTION_KEY = 'option domain-name-servers' - - -class DHCPSubnet: - """Represents the DHCP Servers subnet configuration""" - - def __init__(self, subnet): - self._ntp_servers = None - self._subnet_mask = None - self._broadcast = None - self._routers = None - self._dns_servers = None - self.pools = [] - - self.resolve_subnet(subnet) - self.resolve_pools(subnet) - - def __str__(self): - config = """subnet 10.10.10.0 netmask {SUBNET_MASK_OPTION} {{ - \r\t{NTP_OPTION_KEY} {NTP_OPTION}; - \r\t{SUBNET_MASK_OPTION_KEY} {SUBNET_MASK_OPTION}; - \r\t{BROADCAST_OPTION_KEY} {BROADCAST_OPTION}; - \r\t{ROUTER_OPTION_KEY} {ROUTER_OPTION}; - \r\t{DNS_OPTION_KEY} {DNS_OPTION};""" - - config = config.format(length='multi-line', - NTP_OPTION_KEY=NTP_OPTION_KEY, - NTP_OPTION=self._ntp_servers, - SUBNET_MASK_OPTION_KEY=SUBNET_MASK_OPTION_KEY, - SUBNET_MASK_OPTION=self._subnet_mask, - BROADCAST_OPTION_KEY=BROADCAST_OPTION_KEY, - BROADCAST_OPTION=self._broadcast, - ROUTER_OPTION_KEY=ROUTER_OPTION_KEY, - ROUTER_OPTION=self._routers, - DNS_OPTION_KEY=DNS_OPTION_KEY, - DNS_OPTION=self._dns_servers) - for pool in self.pools: - config += '\n\t' + str(pool) - - config += '\n\r}' - return config - - def resolve_subnet(self, subnet): - subnet_parts = subnet.split('\n') - for part in subnet_parts: - if NTP_OPTION_KEY in part: - self._ntp_servers = part.strip().split(NTP_OPTION_KEY)[1].strip().split( - ';')[0] - elif SUBNET_MASK_OPTION_KEY in part: - self._subnet_mask = part.strip().split( - SUBNET_MASK_OPTION_KEY)[1].strip().split(';')[0] - elif BROADCAST_OPTION_KEY in part: - self._broadcast = part.strip().split( - BROADCAST_OPTION_KEY)[1].strip().split(';')[0] - elif ROUTER_OPTION_KEY in part: - self._routers = part.strip().split(ROUTER_OPTION_KEY)[1].strip().split( - ';')[0] - elif DNS_OPTION_KEY in part: - self._dns_servers = part.strip().split(DNS_OPTION_KEY)[1].strip().split( - ';')[0] - - def resolve_pools(self, subnet): - regex = r'(pool.*)\}' - pools = re.findall(regex, subnet, re.MULTILINE | re.DOTALL) - for pool in pools: - dhcp_pool = DHCPPool(pool) - self.pools.append(dhcp_pool) - - -FAILOVER_KEY = 'failover peer' -RANGE_KEY = 'range' - - -class DHCPPool: - """Represents a DHCP Servers subnet pool configuration""" - - def __init__(self, pool): - self.failover_peer = None - self.range_start = None - self.range_end = None - self.resolve_pool(pool) - - def __str__(self): - - config = """pool {{ - \r\t\t{FAILOVER_KEY} "{FAILOVER}"; - \r\t\t{RANGE_KEY} {RANGE_START} {RANGE_END}; - \r\t}}""" - - return config.format( - length='multi-line', - FAILOVER_KEY=FAILOVER_KEY, - FAILOVER=self.failover_peer, - RANGE_KEY=RANGE_KEY, - RANGE_START=self.range_start, - RANGE_END=self.range_end, - ) - - def resolve_pool(self, pool): - pool_parts = pool.split('\n') - # pool_parts = pool.split("\n") - for part in pool_parts: - if FAILOVER_KEY in part: - self.failover_peer = part.strip().split(FAILOVER_KEY)[1].strip().split( - ';')[0].replace('\"', '') - if RANGE_KEY in part: - pool_range = part.strip().split(RANGE_KEY)[1].strip().split(';')[0] - self.range_start = pool_range.split(' ')[0].strip() - self.range_end = pool_range.split(' ')[1].strip() diff --git a/modules/network/dhcp-1/python/src/grpc/network_service.py b/modules/network/dhcp-1/python/src/grpc/network_service.py deleted file mode 100644 index 64aab8a07..000000000 --- a/modules/network/dhcp-1/python/src/grpc/network_service.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""gRPC Network Service for the DHCP Server network module""" -import proto.grpc_pb2_grpc as pb2_grpc -import proto.grpc_pb2 as pb2 - -from dhcp_config import DHCPConfig - - -class NetworkService(pb2_grpc.NetworkModule): - """gRPC endpoints for the DHCP Server""" - - def __init__(self): - self._dhcp_config = DHCPConfig() - - def GetDHCPRange(self, request, context): # pylint: disable=W0613 - """ - Resolve the current DHCP configuration and return - the first range from the first subnet in the file - """ - self._dhcp_config.resolve_config() - pool = self._dhcp_config.subnets[0].pools[0] - return pb2.DHCPRange(code=200, start=pool.range_start, end=pool.range_end) - - def SetDHCPRange(self, request, context): # pylint: disable=W0613 - """ - Change DHCP configuration and set the - the first range from the first subnet in the configuration - """ - - print('Setting DHCPRange') - print('Start: ' + request.start) - print('End: ' + request.end) - self._dhcp_config.resolve_config() - self._dhcp_config.set_range(request.start, request.end, 0, 0) - self._dhcp_config.write_config() - return pb2.Response(code=200, message='DHCP Range Set') - - def GetStatus(self, request, context): # pylint: disable=W0613 - """ - Return the current status of the network module - """ - # ToDo: Figure out how to resolve the current DHCP status - dhcp_status = True - message = str({'dhcpStatus': dhcp_status}) - return pb2.Response(code=200, message=message) diff --git a/modules/network/dhcp-1/python/src/grpc/proto/grpc.proto b/modules/network/dhcp-1/python/src/grpc/proto/grpc.proto deleted file mode 100644 index 8e2732620..000000000 --- a/modules/network/dhcp-1/python/src/grpc/proto/grpc.proto +++ /dev/null @@ -1,36 +0,0 @@ -syntax = "proto3"; - -service NetworkModule { - - rpc GetDHCPRange(GetDHCPRangeRequest) returns (DHCPRange) {}; - - rpc SetDHCPRange(DHCPRange) returns (Response) {}; - - rpc GetStatus(GetStatusRequest) returns (Response) {}; - - rpc GetIPAddress(GetIPAddressRequest) returns (Response) {}; - - rpc SetLeaseAddress(SetLeaseAddressRequest) returns (Response) {}; - -} - -message Response { - int32 code = 1; - string message = 2; -} - -message DHCPRange { - int32 code = 1; - string start = 2; - string end = 3; -} - -message GetDHCPRangeRequest {} - -message GetIPAddressRequest {} - -message GetStatusRequest {} - -message SetLeaseAddressRequest { - string ipAddress = 1; -} \ No newline at end of file diff --git a/modules/network/dhcp-1/python/src/grpc/__init__.py b/modules/network/dhcp-1/python/src/grpc_server/__init__.py similarity index 100% rename from modules/network/dhcp-1/python/src/grpc/__init__.py rename to modules/network/dhcp-1/python/src/grpc_server/__init__.py diff --git a/modules/network/dhcp-1/python/src/grpc_server/dhcp_config.py b/modules/network/dhcp-1/python/src/grpc_server/dhcp_config.py new file mode 100644 index 000000000..444faa87c --- /dev/null +++ b/modules/network/dhcp-1/python/src/grpc_server/dhcp_config.py @@ -0,0 +1,493 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains all the necessary classes to maintain the +DHCP server's configuration""" +import re +from common import logger + +LOG_NAME = 'dhcp_config' +LOGGER = None + +CONFIG_FILE = '/etc/dhcp/dhcpd.conf' + +DEFAULT_LEASE_TIME_KEY = 'default-lease-time' + + +class DHCPConfig: + """Represents the DHCP Servers configuration and gives access to modify it""" + + def __init__(self): + self._default_lease_time = 300 + self._subnets = [] + self._peer = None + self._reserved_hosts = [] + global LOGGER + LOGGER = logger.get_logger(LOG_NAME, 'dhcp-1') + + def add_reserved_host(self, hostname, hw_addr, ip_addr): + host = DHCPReservedHost(hostname=hostname, + hw_addr=hw_addr, + fixed_addr=ip_addr) + self._reserved_hosts.append(host) + + def delete_reserved_host(self, hw_addr): + for host in self._reserved_hosts: + if hw_addr == host.hw_addr: + self._reserved_hosts.remove(host) + + def disable_failover(self): + self._peer.disable() + for subnet in self._subnets: + subnet.disable_peer() + + def enable_failover(self): + self._peer.enable() + for subnet in self._subnets: + subnet.enable_peer() + + def get_reserved_host(self, hw_addr): + for host in self._reserved_hosts: + if hw_addr == host.hw_addr: + return host + + def write_config(self, config=None): + if config is None: + conf = str(self) + with open(CONFIG_FILE, 'w', encoding='UTF-8') as conf_file: + conf_file.write(conf) + else: + with open(CONFIG_FILE, 'w', encoding='UTF-8') as conf_file: + conf_file.write(config) + + def _get_config(self, config_file=CONFIG_FILE): + content = None + with open(config_file, 'r', encoding='UTF-8') as f: + content = f.read() + return content + + def make(self, conf): + try: + self._subnets = self.resolve_subnets(conf) + self._peer = DHCPFailoverPeer(conf) + self._reserved_hosts = self.resolve_reserved_hosts(conf) + except Exception as e: # pylint: disable=W0718 + print('Failed to make DHCPConfig: ' + str(e)) + + def resolve_config(self, config_file=CONFIG_FILE): + try: + conf = self._get_config(config_file) + self._subnets = self.resolve_subnets(conf) + self._peer = DHCPFailoverPeer(conf) + self._reserved_hosts = self.resolve_reserved_hosts(conf) + except Exception as e: # pylint: disable=W0718 + print('Failed to resolve config: ' + str(e)) + + def resolve_subnets(self, conf): + subnets = [] + regex = r'(subnet.*)' + subnets_conf = re.findall(regex, conf, re.MULTILINE | re.DOTALL) + for subnet in subnets_conf: + dhcp_subnet = DHCPSubnet(subnet) + subnets.append(dhcp_subnet) + return subnets + + def resolve_reserved_hosts(self, conf): + hosts = [] + host_start = 0 + while True: + host_start = conf.find('host', host_start) + if host_start < 0: + break + else: + host_end = conf.find('}', host_start) + host = DHCPReservedHost(config=conf[host_start:host_end + 1]) + hosts.append(host) + host_start = host_end + 1 + return hosts + + def set_range(self, start, end, subnet=0, pool=0): + # Calculate the subnet from the range + octets = start.split('.') + octets[-1] = '0' + dhcp_subnet = '.'.join(octets) + + #Update the subnet and range + self._subnets[subnet].set_subnet(dhcp_subnet) + self._subnets[subnet].pools[pool].set_range(start, end) + + def __str__(self): + + # Encode the top level config options + config = """{DEFAULT_LEASE_TIME_KEY} {DEFAULT_LEASE_TIME};""" + config = config.format(length='multi-line', + DEFAULT_LEASE_TIME_KEY=DEFAULT_LEASE_TIME_KEY, + DEFAULT_LEASE_TIME=self._default_lease_time) + + # Encode the failover peer + config += '\n\n' + str(self._peer) + + # Encode the subnets + for subnet in self._subnets: + config += '\n\n' + str(subnet) + + # Encode the reserved hosts + for host in self._reserved_hosts: + config += '\n' + str(host) + + return str(config) + + +FAILOVER_PEER_KEY = 'failover peer' +PRIMARY_KEY = 'primary' +ADDRESS_KEY = 'address' +PORT_KEY = 'port' +PEER_ADDRESS_KEY = 'peer address' +PEER_PORT_KEY = 'peer port' +MAX_RESPONSE_DELAY_KEY = 'max-response-delay' +MAX_UNACKED_UPDATES_KEY = 'max-unacked-updates' +MCLT_KEY = 'mclt' +SPLIT_KEY = 'split' +LOAD_BALANCE_MAX_SECONDS_KEY = 'load balance max seconds' + + +class DHCPFailoverPeer: + """Contains all information to define the DHCP failover peer""" + + def __init__(self, config): + self.name = None + self.primary = False + self.address = None + self.port = None + self.peer_address = None + self.peer_port = None + self.max_response_delay = None + self.max_unacked_updates = None + self.mclt = None + self.split = None + self.load_balance_max_seconds = None + self.peer = None + self.enabled = True + + self.resolve_peer(config) + + def __str__(self): + config = '{FAILOVER_PEER_KEY} \"{FAILOVER_PEER}\" {{\n' + config += '\tprimary;' if self.primary else 'secondary;' + config += '\n\t{ADDRESS_KEY} {ADDRESS};' if self.address is not None else '' + config += '\n\t{PORT_KEY} {PORT};' if self.port is not None else '' + config += '\n\t{PEER_ADDRESS_KEY} {PEER_ADDRESS};' if self.peer_address is not None else '' + config += '\n\t{PEER_PORT_KEY} {PEER_PORT};' if self.peer_port is not None else '' + config += '\n\t{MAX_RESPONSE_DELAY_KEY} {MAX_RESPONSE_DELAY};' if self.max_response_delay is not None else '' + config += '\n\t{MAX_UNACKED_UPDATES_KEY} {MAX_UNACKED_UPDATES};' if self.max_unacked_updates is not None else '' + config += '\n\t{MCLT_KEY} {MCLT};' if self.mclt is not None else '' + config += '\n\t{SPLIT_KEY} {SPLIT};' if self.split is not None else '' + config += '\n\t{LOAD_BALANCE_MAX_SECONDS_KEY} {LOAD_BALANCE_MAX_SECONDS};' if self.load_balance_max_seconds is not None else '' + config += '\n\r}}' + + config = config.format( + length='multi-line', + FAILOVER_PEER_KEY=FAILOVER_PEER_KEY, + FAILOVER_PEER=self.name, + ADDRESS_KEY=ADDRESS_KEY, + ADDRESS=self.address, + PORT_KEY=PORT_KEY, + PORT=self.port, + PEER_ADDRESS_KEY=PEER_ADDRESS_KEY, + PEER_ADDRESS=self.peer_address, + PEER_PORT_KEY=PEER_PORT_KEY, + PEER_PORT=self.peer_port, + MAX_RESPONSE_DELAY_KEY=MAX_RESPONSE_DELAY_KEY, + MAX_RESPONSE_DELAY=self.max_response_delay, + MAX_UNACKED_UPDATES_KEY=MAX_UNACKED_UPDATES_KEY, + MAX_UNACKED_UPDATES=self.max_unacked_updates, + MCLT_KEY=MCLT_KEY, + MCLT=self.mclt, + SPLIT_KEY=SPLIT_KEY, + SPLIT=self.split, + LOAD_BALANCE_MAX_SECONDS_KEY=LOAD_BALANCE_MAX_SECONDS_KEY, + LOAD_BALANCE_MAX_SECONDS=self.load_balance_max_seconds) + + if not self.enabled: + lines = config.strip().split('\n') + for i in range(len(lines)-1): + lines[i] = '#' + lines[i] + lines[-1] = '#' + lines[-1].strip() # Handle the last line separately + config = '\n'.join(lines) + + return config + + def disable(self): + self.enabled = False + + def enable(self): + self.enabled = True + + def resolve_peer(self, conf): + peer = '' + lines = conf.split('\n') + for line in lines: + if line.startswith(FAILOVER_PEER_KEY) or len(peer) > 0: + if len(peer) <= 0: + self.name = line.strip().split(FAILOVER_PEER_KEY)[1].strip().split( + '{')[0].split('\"')[1] + peer += line + '\n' + if PRIMARY_KEY in line: + self.primary = True + elif ADDRESS_KEY in line and PEER_ADDRESS_KEY not in line: + self.address = line.strip().split(ADDRESS_KEY)[1].strip().split( + ';')[0] + elif PORT_KEY in line and PEER_PORT_KEY not in line: + self.port = line.strip().split(PORT_KEY)[1].strip().split(';')[0] + elif PEER_ADDRESS_KEY in line: + self.peer_address = line.strip().split( + PEER_ADDRESS_KEY)[1].strip().split(';')[0] + elif PEER_PORT_KEY in line: + self.peer_port = line.strip().split(PEER_PORT_KEY)[1].strip().split( + ';')[0] + elif MAX_RESPONSE_DELAY_KEY in line: + self.max_response_delay = line.strip().split( + MAX_RESPONSE_DELAY_KEY)[1].strip().split(';')[0] + elif MAX_UNACKED_UPDATES_KEY in line: + self.max_unacked_updates = line.strip().split( + MAX_UNACKED_UPDATES_KEY)[1].strip().split(';')[0] + elif MCLT_KEY in line: + self.mclt = line.strip().split(MCLT_KEY)[1].strip().split(';')[0] + elif SPLIT_KEY in line: + self.split = line.strip().split(SPLIT_KEY)[1].strip().split(';')[0] + elif LOAD_BALANCE_MAX_SECONDS_KEY in line: + self.load_balance_max_seconds = line.strip().split( + LOAD_BALANCE_MAX_SECONDS_KEY)[1].strip().split(';')[0] + if line.endswith('}') and len(peer) > 0: + break + self.peer = peer + + +SUBNET_KEY = 'subnet' +NTP_OPTION_KEY = 'option ntp-servers' +SUBNET_MASK_OPTION_KEY = 'option subnet-mask' +BROADCAST_OPTION_KEY = 'option broadcast-address' +ROUTER_OPTION_KEY = 'option routers' +DNS_OPTION_KEY = 'option domain-name-servers' +INTERFACE_KEY = 'interface' +AUTHORITATIVE_KEY = 'authoritative' + + +class DHCPSubnet: + """Represents the DHCP Servers subnet configuration""" + + def __init__(self, subnet): + self._authoritative = False + self._subnet = None + self._ntp_servers = None + self._subnet_mask = None + self._broadcast = None + self._routers = None + self._dns_servers = None + self._interface = None + self.pools = [] + + self.resolve_subnet(subnet) + self.resolve_pools(subnet) + + def __str__(self): + config = 'subnet {SUBNET_OPTION} netmask {SUBNET_MASK_OPTION} {{' + config += '\n\t{NTP_OPTION_KEY} {NTP_OPTION};' if self._ntp_servers is not None else '' + config += '\n\t{SUBNET_MASK_OPTION_KEY} {SUBNET_MASK_OPTION};' if self._subnet_mask is not None else '' + config += '\n\t{BROADCAST_OPTION_KEY} {BROADCAST_OPTION};' if self._broadcast is not None else '' + config += '\n\t{ROUTER_OPTION_KEY} {ROUTER_OPTION};' if self._routers is not None else '' + config += '\n\t{DNS_OPTION_KEY} {DNS_OPTION};' if self._dns_servers is not None else '' + config += '\n\t{INTERFACE_KEY} {INTERFACE_OPTION};' if self._interface is not None else '' + config += '\n\t{AUTHORITATIVE_KEY};' if self._authoritative else '' + + + config = config.format(length='multi-line', + SUBNET_OPTION=self._subnet, + NTP_OPTION_KEY=NTP_OPTION_KEY, + NTP_OPTION=self._ntp_servers, + SUBNET_MASK_OPTION_KEY=SUBNET_MASK_OPTION_KEY, + SUBNET_MASK_OPTION=self._subnet_mask, + BROADCAST_OPTION_KEY=BROADCAST_OPTION_KEY, + BROADCAST_OPTION=self._broadcast, + ROUTER_OPTION_KEY=ROUTER_OPTION_KEY, + ROUTER_OPTION=self._routers, + DNS_OPTION_KEY=DNS_OPTION_KEY, + DNS_OPTION=self._dns_servers, + INTERFACE_KEY=INTERFACE_KEY, + INTERFACE_OPTION=self._interface, + AUTHORITATIVE_KEY=AUTHORITATIVE_KEY) + + # if not self._authoritative: + # config = config.replace(AUTHORITATIVE_KEY, '#' + AUTHORITATIVE_KEY) + + for pool in self.pools: + config += '\n\t' + str(pool) + + config += '\n}' + return config + + def disable_peer(self): + for pool in self.pools: + pool.disable_peer() + + def enable_peer(self): + for pool in self.pools: + pool.enable_peer() + + def set_subnet(self, subnet, netmask=None): + if netmask is None: + netmask = '255.255.255.0' + self._subnet = subnet + self._subnet_mask = netmask + + # Calculate the broadcast from the subnet + octets = subnet.split('.') + octets[-1] = '255' + dhcp_broadcast = '.'.join(octets) + + self._broadcast = dhcp_broadcast + + def resolve_subnet(self, subnet): + subnet_parts = subnet.split('\n') + for part in subnet_parts: + if part.strip().startswith(SUBNET_KEY): + self._subnet = part.strip().split()[1] + elif NTP_OPTION_KEY in part: + self._ntp_servers = part.strip().split(NTP_OPTION_KEY)[1].strip().split( + ';')[0] + elif SUBNET_MASK_OPTION_KEY in part: + self._subnet_mask = part.strip().split( + SUBNET_MASK_OPTION_KEY)[1].strip().split(';')[0] + elif BROADCAST_OPTION_KEY in part: + self._broadcast = part.strip().split( + BROADCAST_OPTION_KEY)[1].strip().split(';')[0] + elif ROUTER_OPTION_KEY in part: + self._routers = part.strip().split(ROUTER_OPTION_KEY)[1].strip().split( + ';')[0] + elif DNS_OPTION_KEY in part: + self._dns_servers = part.strip().split(DNS_OPTION_KEY)[1].strip().split( + ';')[0] + elif INTERFACE_KEY in part: + self._interface = part.strip().split(INTERFACE_KEY)[1].strip().split( + ';')[0] + elif AUTHORITATIVE_KEY in part: + self._authoritative = True + + def resolve_pools(self, subnet): + regex = r'(pool.*)\}' + pools = re.findall(regex, subnet, re.MULTILINE | re.DOTALL) + for pool in pools: + dhcp_pool = DHCPPool(pool) + self.pools.append(dhcp_pool) + + +FAILOVER_KEY = 'failover peer' +RANGE_KEY = 'range' + + +class DHCPPool: + """Represents a DHCP Servers subnet pool configuration""" + + def __init__(self, pool): + self.failover_peer = None + self.range_start = None + self.range_end = None + self.resolve_pool(pool) + self._peer_enabled = True + + def __str__(self): + config = 'pool {{' + config += '\n\t\t{FAILOVER_KEY} "{FAILOVER}";' if self.failover_peer is not None else '' + config += '\n\t\t{RANGE_KEY} {RANGE_START} {RANGE_END};' if self.range_start is not None and self.range_end is not None else '' + config += '\n\t}}' + + config = config.format( + length='multi-line', + FAILOVER_KEY=FAILOVER_KEY, + FAILOVER=self.failover_peer, + RANGE_KEY=RANGE_KEY, + RANGE_START=self.range_start, + RANGE_END=self.range_end, + ) + + if not self._peer_enabled: + config = config.replace(FAILOVER_KEY, '#' + FAILOVER_KEY) + + return config + + def disable_peer(self): + self._peer_enabled = False + + def enable_peer(self): + self._peer_enabled = True + + def set_range(self, start, end): + self.range_start = start + self.range_end = end + + def resolve_pool(self, pool): + pool_parts = pool.split('\n') + for part in pool_parts: + if FAILOVER_KEY in part: + self.failover_peer = part.strip().split(FAILOVER_KEY)[1].strip().split( + ';')[0].replace('\"', '') + if RANGE_KEY in part: + pool_range = part.strip().split(RANGE_KEY)[1].strip().split(';')[0] + self.range_start = pool_range.split(' ')[0].strip() + self.range_end = pool_range.split(' ')[1].strip() + + +HOST_KEY = 'host' +HARDWARE_KEY = 'hardware ethernet' +FIXED_ADDRESS_KEY = 'fixed-address' + + +class DHCPReservedHost: + """Represents a DHCP Servers subnet pool configuration""" + + def __init__(self, hostname=None, hw_addr=None, fixed_addr=None, config=None): + if config is None: + self.host = hostname + self.hw_addr = hw_addr + self.fixed_addr = fixed_addr + else: + self.resolve_host(config) + + def __str__(self): + + config = """{HOST_KEY} {HOSTNAME} {{ + \r\t{HARDWARE_KEY} {HW_ADDR}; + \r\t{FIXED_ADDRESS_KEY} {RESERVED_IP}; + \r}}""" + + config = config.format( + length='multi-line', + HOST_KEY=HOST_KEY, + HOSTNAME=self.host, + HARDWARE_KEY=HARDWARE_KEY, + HW_ADDR=self.hw_addr, + FIXED_ADDRESS_KEY=FIXED_ADDRESS_KEY, + RESERVED_IP=self.fixed_addr, + ) + return config + + def resolve_host(self, reserved_host): + host_parts = reserved_host.split('\n') + for part in host_parts: + if HOST_KEY in part: + self.host = part.strip().split(HOST_KEY)[1].strip().split('{')[0] + elif HARDWARE_KEY in part: + self.hw_addr = part.strip().split(HARDWARE_KEY)[1].strip().split(';')[0] + elif FIXED_ADDRESS_KEY in part: + self.fixed_addr = part.strip().split( + FIXED_ADDRESS_KEY)[1].strip().split(';')[0] diff --git a/modules/network/dhcp-1/python/src/grpc_server/dhcp_config_test.py b/modules/network/dhcp-1/python/src/grpc_server/dhcp_config_test.py new file mode 100644 index 000000000..2cc78403a --- /dev/null +++ b/modules/network/dhcp-1/python/src/grpc_server/dhcp_config_test.py @@ -0,0 +1,103 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Unit Testing for the DHCP Server config""" +import unittest +from dhcp_config import DHCPConfig +import os + +CONFIG_FILE = 'conf/dhcpd.conf' + +DHCP_CONFIG = None + +def get_config_file_path(): + dhcp_config = DHCPConfig() + current_dir = os.path.dirname(os.path.abspath(__file__)) + module_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(current_dir)))) + conf_file = os.path.join(module_dir,CONFIG_FILE) + return conf_file + +def get_config(): + dhcp_config = DHCPConfig() + dhcp_config.resolve_config(get_config_file_path()) + return dhcp_config + +class DHCPConfigTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + # Resolve the config + global DHCP_CONFIG + DHCP_CONFIG = get_config() + + def test_resolve_config(self): + print('Test Resolve Config:\n' + str(DHCP_CONFIG)) + + # Resolve the raw config file + with open(get_config_file_path(),'r') as f: + lines = f.readlines() + + # Get the resolved config as a + conf_parts = str(DHCP_CONFIG).split('\n') + + # dhcpd conf is not picky about spacing so we just + # need to check contents of each line for matching + # to make sure evertying matches + for i in range(len(lines)): + self.assertEqual(lines[i].strip(),conf_parts[i].strip()) + + def test_disable_failover(self): + DHCP_CONFIG.disable_failover() + print('Test Disable Config:\n' + str(DHCP_CONFIG)) + config_lines = str(DHCP_CONFIG._peer).split('\n') + for line in config_lines: + self.assertTrue(line.startswith('#')) + + def test_enable_failover(self): + DHCP_CONFIG.enable_failover() + print('Test Enable Config:\n' + str(DHCP_CONFIG)) + config_lines = str(DHCP_CONFIG._peer).split('\n') + for line in config_lines: + self.assertFalse(line.startswith('#')) + + def test_add_reserved_host(self): + DHCP_CONFIG.add_reserved_host('test','00:11:22:33:44:55','192.168.10.5') + host = DHCP_CONFIG.get_reserved_host('00:11:22:33:44:55') + self.assertIsNotNone(host) + print('AddHostConfig:\n' + str(DHCP_CONFIG)) + + def test_delete_reserved_host(self): + DHCP_CONFIG.delete_reserved_host('00:11:22:33:44:55') + host = DHCP_CONFIG.get_reserved_host('00:11:22:33:44:55') + self.assertIsNone(host) + print('DeleteHostConfig:\n' + str(DHCP_CONFIG)) + + def test_resolve_config_with_hosts(self): + DHCP_CONFIG.add_reserved_host('test','00:11:22:33:44:55','192.168.10.5') + config_with_hosts = DHCPConfig() + config_with_hosts.make(str(DHCP_CONFIG)) + host = config_with_hosts.get_reserved_host('00:11:22:33:44:55') + self.assertIsNotNone(host) + print("ResolveConfigWithHosts:\n" + str(config_with_hosts)) + +if __name__ == '__main__': + suite = unittest.TestSuite() + suite.addTest(DHCPConfigTest('test_resolve_config')) + suite.addTest(DHCPConfigTest('test_disable_failover')) + suite.addTest(DHCPConfigTest('test_enable_failover')) + suite.addTest(DHCPConfigTest('test_add_reserved_host')) + suite.addTest(DHCPConfigTest('test_delete_reserved_host')) + suite.addTest(DHCPConfigTest('test_resolve_config_with_hosts')) + + runner = unittest.TextTestRunner() + runner.run(suite) \ No newline at end of file diff --git a/modules/network/dhcp-1/python/src/grpc_server/dhcp_lease.py b/modules/network/dhcp-1/python/src/grpc_server/dhcp_lease.py new file mode 100644 index 000000000..0d2f43e3b --- /dev/null +++ b/modules/network/dhcp-1/python/src/grpc_server/dhcp_lease.py @@ -0,0 +1,75 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains all the necessary methods to create and monitor DHCP +leases on the server""" +from datetime import datetime +import time + +time_format = '%Y-%m-%d %H:%M:%S' + + +class DHCPLease(object): + """Represents a DHCP Server lease""" + hw_addr = None + ip = None + hostname = None + expires = None + + def __init__(self, lease): + self._make_lease(lease) + + def _make_lease(self, lease): + if lease is not None: + sections_raw = lease.split(' ') + sections = [] + for section in sections_raw: + if section.strip(): + sections.append(section) + self.hw_addr = sections[0] + self.ip = sections[1] + self.hostname = sections[2] + self.expires = sections[3] + '' '' + sections[4] + self.manufacturer = ' '.join(sections[5:]) + + def get_millis(self, timestamp): + dt_obj = datetime.strptime(timestamp, time_format) + millis = dt_obj.timestamp() * 1000 + return millis + + def get_expires_millis(self): + return self.get_millis(self.expires) + + def is_expired(self): + expires_millis = self.get_expires_millis() + cur_time = int(round(time.time()) * 1000) + return cur_time >= expires_millis + + def __str__(self): + lease = {} + if self.hw_addr is not None: + lease['hw_addr'] = self.hw_addr + + if self.ip is not None: + lease['ip'] = self.ip + + if self.hostname is not None: + lease['hostname'] = self.hostname + + if self.expires is not None: + lease['expires'] = self.expires + + if self.manufacturer is not None: + lease['manufacturer'] = self.manufacturer + + return str(lease) diff --git a/modules/network/dhcp-1/python/src/grpc_server/dhcp_leases.py b/modules/network/dhcp-1/python/src/grpc_server/dhcp_leases.py new file mode 100644 index 000000000..698277a02 --- /dev/null +++ b/modules/network/dhcp-1/python/src/grpc_server/dhcp_leases.py @@ -0,0 +1,107 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Used to resolve the DHCP servers lease information""" +import os +from dhcp_lease import DHCPLease +import logger +from common import util + +LOG_NAME = 'dhcp_lease' +LOGGER = None + +DHCP_LEASE_FILES = [ + '/var/lib/dhcp/dhcpd.leases', '/var/lib/dhcp/dhcpd.leases~', + '/var/lib/dhcp/dhcpd6.leases', '/var/lib/dhcp/dhcpd6.leases~' +] +DHCP_CONFIG_FILE = '/etc/dhcp/dhcpd.conf' + + +class DHCPLeases: + """Leases for the DHCP server""" + + def __init__(self): + global LOGGER + LOGGER = logger.get_logger(LOG_NAME, 'dhcp-1') + + def delete_all_hosts(self): + LOGGER.info('Deleting hosts') + for lease in DHCP_LEASE_FILES: + LOGGER.info('Checking file: ' + lease) + if os.path.exists(lease): + LOGGER.info('File Exists: ' + lease) + try: + # Delete existing lease file + os.remove(lease) + except OSError as e: + LOGGER.info(f'Error occurred while deleting the file: {e}') + # Create an empty lease file + with open(lease, 'w', encoding='UTF-8'): + pass + + def get_lease(self, hw_addr): + for lease in self.get_leases(): + if lease.hw_addr == hw_addr: + return lease + + def get_leases(self): + leases = [] + lease_list_raw = self._get_lease_list() + LOGGER.info('Raw Leases:\n' + str(lease_list_raw) + '\n') + lease_list_start = lease_list_raw.find('=========',0) + lease_list_start = lease_list_raw.find('\n',lease_list_start) + lease_list = lease_list_raw[lease_list_start+1:] + lines = lease_list.split('\n') + for line in lines: + try: + lease = DHCPLease(line) + leases.append(lease) + except Exception as e: # pylint: disable=W0718 + # Let non lease lines file without extra checks + LOGGER.error('Making Lease Error: ' + str(e)) + LOGGER.error('Not a valid lease line: ' + line) + return leases + + def delete_lease(self, ip_addr): + LOGGER.info('Deleting lease') + for lease in DHCP_LEASE_FILES: + LOGGER.info('Checking file: ' + lease) + if os.path.exists(lease): + LOGGER.info('File Exists: ' + lease) + try: + # Delete existing lease file + with (open(lease, 'r', encoding='UTF-8')) as f: + contents = f.read() + + while ip_addr in contents: + ix_ip = contents.find(ip_addr) + lease_start = contents.rindex('lease', 0, ix_ip) + lease_end = contents.find('}', lease_start) + LOGGER.info('Lease Location: ' + str(lease_start) + ':' + + str(lease_end)) + contents = contents[0:lease_start] + contents[lease_end + 1:] + + except OSError as e: + LOGGER.info(f'Error occurred while deleting the lease: {e}') + + def _get_lease_list(self): + LOGGER.info('Running lease list command') + try: + result = util.run_command('dhcp-lease-list') + return result[0] + except Exception as e: # pylint: disable=W0718 + LOGGER.error('Error lease list: ' + str(e)) + + def _write_config(self, config): + with open(DHCP_CONFIG_FILE, 'w', encoding='UTF-8') as f: + f.write(config) diff --git a/modules/network/dhcp-1/python/src/grpc_server/network_service.py b/modules/network/dhcp-1/python/src/grpc_server/network_service.py new file mode 100644 index 000000000..bf2b98803 --- /dev/null +++ b/modules/network/dhcp-1/python/src/grpc_server/network_service.py @@ -0,0 +1,157 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""gRPC Network Service for the DHCP Server network module""" +import proto.grpc_pb2_grpc as pb2_grpc +import proto.grpc_pb2 as pb2 + +from dhcp_config import DHCPConfig +from dhcp_leases import DHCPLeases + +import traceback +from common import logger + +LOG_NAME = 'network_service' +LOGGER = None + +class NetworkService(pb2_grpc.NetworkModule): + """gRPC endpoints for the DHCP Server""" + + def __init__(self): + self._dhcp_config = None + self.dhcp_leases = DHCPLeases() + global LOGGER + LOGGER = logger.get_logger(LOG_NAME, 'dhcp-1') + + def _get_dhcp_config(self): + if self._dhcp_config is None: + self._dhcp_config = DHCPConfig() + self._dhcp_config.resolve_config() + return self._dhcp_config + + def AddReservedLease(self, request, context): # pylint: disable=W0613 + LOGGER.info('Add reserved lease called') + try: + dhcp_config = self._get_dhcp_config() + dhcp_config.add_reserved_host(request.hostname, request.hw_addr, + request.ip_addr) + dhcp_config.write_config() + LOGGER.info('Reserved lease added') + return pb2.Response(code=200, message='{}') + except Exception as e: # pylint: disable=W0718 + fail_message = 'Failed to add reserved lease: ' + str(e) + LOGGER.error(fail_message) + LOGGER.error(traceback.format_exc()) + return pb2.Response(code=500, message=fail_message) + + def DeleteReservedLease(self, request, context): # pylint: disable=W0613 + LOGGER.info('Delete reserved lease called') + try: + dhcp_config = self._get_dhcp_config() + dhcp_config.delete_reserved_host(request.hw_addr) + dhcp_config.write_config() + LOGGER.info('Reserved lease deleted') + return pb2.Response(code=200, message='{}') + except Exception as e: # pylint: disable=W0718 + fail_message = 'Failed to delete reserved lease: ' + str(e) + LOGGER.error(fail_message) + LOGGER.error(traceback.format_exc()) + return pb2.Response(code=500, message=fail_message) + + def DisableFailover(self, request, contest): # pylint: disable=W0613 + LOGGER.info('Disable failover called') + try: + dhcp_config = self._get_dhcp_config() + dhcp_config.disable_failover() + dhcp_config.write_config() + LOGGER.info('Failover disabled') + return pb2.Response(code=200, message='{}') + except Exception as e: # pylint: disable=W0718 + fail_message = 'Failed to disable failover: ' + str(e) + LOGGER.error(fail_message) + LOGGER.error(traceback.format_exc()) + return pb2.Response(code=500, message=fail_message) + + def EnableFailover(self, request, contest): # pylint: disable=W0613 + LOGGER.info('Enable failover called') + try: + dhcp_config = self._get_dhcp_config() + dhcp_config.enable_failover() + dhcp_config.write_config() + LOGGER.info('Failover enabled') + return pb2.Response(code=200, message='{}') + except Exception as e: # pylint: disable=W0718 + fail_message = 'Failed to enable failover: ' + str(e) + LOGGER.error(fail_message) + LOGGER.error(traceback.format_exc()) + return pb2.Response(code=500, message=fail_message) + + def GetDHCPRange(self, request, context): # pylint: disable=W0613 + """ + Resolve the current DHCP configuration and return + the first range from the first subnet in the file + """ + LOGGER.info('Get DHCP range called') + try: + pool = self._get_dhcp_config()._subnets[0].pools[0] + return pb2.DHCPRange(code=200, start=pool.range_start, end=pool.range_end) + except Exception as e: # pylint: disable=W0718 + fail_message = 'Failed to get DHCP range: ' + str(e) + LOGGER.error(fail_message) + LOGGER.error(traceback.format_exc()) + return pb2.Response(code=500, message=fail_message) + + def GetLease(self, request, context): # pylint: disable=W0613 + """ + Resolve the current DHCP leased address for the + provided MAC address + """ + LOGGER.info('Get lease called') + try: + lease = self.dhcp_leases.get_lease(request.hw_addr) + if lease is not None: + return pb2.Response(code=200, message=str(lease)) + else: + return pb2.Response(code=200, message='{}') + except Exception as e: # pylint: disable=W0718 + fail_message = 'Failed to get lease: ' + str(e) + LOGGER.error(fail_message) + LOGGER.error(traceback.format_exc()) + return pb2.Response(code=500, message=fail_message) + + def SetDHCPRange(self, request, context): # pylint: disable=W0613 + """ + Change DHCP configuration and set the + the first range from the first subnet in the configuration + """ + LOGGER.info('Set DHCP range called') + try: + dhcp_config = self._get_dhcp_config() + dhcp_config.set_range(request.start, request.end, 0, 0) + dhcp_config.write_config() + LOGGER.info('DHCP range set') + return pb2.Response(code=200, message='DHCP Range Set') + except Exception as e: # pylint: disable=W0718 + fail_message = 'Failed to set DHCP range: ' + str(e) + LOGGER.error(fail_message) + LOGGER.error(traceback.format_exc()) + return pb2.Response(code=500, message=fail_message) + + def GetStatus(self, request, context): # pylint: disable=W0613 + """ + Return the current status of the network module + """ + # ToDo: Figure out how to resolve the current DHCP status + dhcp_status = True + message = str({'dhcpStatus': dhcp_status}) + return pb2.Response(code=200, message=message) diff --git a/modules/network/dhcp-1/python/src/grpc_server/proto/grpc.proto b/modules/network/dhcp-1/python/src/grpc_server/proto/grpc.proto new file mode 100644 index 000000000..d9f56213e --- /dev/null +++ b/modules/network/dhcp-1/python/src/grpc_server/proto/grpc.proto @@ -0,0 +1,59 @@ +syntax = "proto3"; + +service NetworkModule { + + rpc AddReservedLease(AddReservedLeaseRequest) returns (Response) {}; + + rpc DeleteReservedLease(DeleteReservedLeaseRequest) returns (Response) {}; + + rpc DisableFailover(DisableFailoverRequest) returns (Response) {}; + + rpc EnableFailover(EnableFailoverRequest) returns (Response) {}; + + rpc GetDHCPRange(GetDHCPRangeRequest) returns (DHCPRange) {}; + + rpc GetLease(GetLeaseRequest) returns (Response) {}; + + rpc GetStatus(GetStatusRequest) returns (Response) {}; + + rpc SetDHCPRange(SetDHCPRangeRequest) returns (Response) {}; +} + +message AddReservedLeaseRequest { + string hostname = 1; + string hw_addr = 2; + string ip_addr = 3; +} + +message DeleteReservedLeaseRequest { + string hw_addr = 1; +} + +message DisableFailoverRequest {} + +message EnableFailoverRequest {} + +message GetDHCPRangeRequest {} + +message GetLeaseRequest { + string hw_addr = 1; +} + +message GetStatusRequest {} + +message SetDHCPRangeRequest { + int32 code = 1; + string start = 2; + string end = 3; +} + +message Response { + int32 code = 1; + string message = 2; +} + +message DHCPRange { + int32 code = 1; + string start = 2; + string end = 3; +} diff --git a/modules/network/dhcp-2/bin/start_network_service b/modules/network/dhcp-2/bin/start_network_service index 550854d49..723689278 100644 --- a/modules/network/dhcp-2/bin/start_network_service +++ b/modules/network/dhcp-2/bin/start_network_service @@ -20,7 +20,7 @@ DHCP_LOG_FILE=/runtime/network/dhcp2-dhcpd.log RA_PID_FILE=/var/run/radvd/radvd.pid RA_LOG_FILE=/runtime/network/dhcp2-radvd.log -echo "Starrting Network Service..." +echo "Starting Network Service..." #Enable IPv6 Forwarding sysctl net.ipv6.conf.all.forwarding=1 diff --git a/modules/network/dhcp-2/conf/dhcpd.conf b/modules/network/dhcp-2/conf/dhcpd.conf index e73a81441..dcc47a4fe 100644 --- a/modules/network/dhcp-2/conf/dhcpd.conf +++ b/modules/network/dhcp-2/conf/dhcpd.conf @@ -1,24 +1,25 @@ default-lease-time 300; failover peer "failover-peer" { - secondary; - address 10.10.10.3; - port 647; - peer address 10.10.10.2; - peer port 847; - max-response-delay 60; - max-unacked-updates 10; - load balance max seconds 3; + secondary; + address 10.10.10.3; + port 647; + peer address 10.10.10.2; + peer port 847; + max-response-delay 60; + max-unacked-updates 10; + load balance max seconds 3; } subnet 10.10.10.0 netmask 255.255.255.0 { - option ntp-servers 10.10.10.5; - option subnet-mask 255.255.255.0; - option broadcast-address 10.10.10.255; - option routers 10.10.10.1; - option domain-name-servers 10.10.10.4; - pool { - failover peer "failover-peer"; - range 10.10.10.10 10.10.10.20; - } + option ntp-servers 10.10.10.5; + option subnet-mask 255.255.255.0; + option broadcast-address 10.10.10.255; + option routers 10.10.10.1; + option domain-name-servers 10.10.10.4; + interface veth0; + pool { + failover peer "failover-peer"; + range 10.10.10.10 10.10.10.20; + } } diff --git a/modules/network/dhcp-2/python/src/grpc/dhcp_config.py b/modules/network/dhcp-2/python/src/grpc/dhcp_config.py deleted file mode 100644 index f6e79a2ec..000000000 --- a/modules/network/dhcp-2/python/src/grpc/dhcp_config.py +++ /dev/null @@ -1,303 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Contains all the necessary classes to maintain the -DHCP server's configuration""" -import re - -CONFIG_FILE = '/etc/dhcp/dhcpd.conf' -CONFIG_FILE_TEST = 'network/modules/dhcp-2/conf/dhcpd.conf' - -DEFAULT_LEASE_TIME_KEY = 'default-lease-time' - - -class DHCPConfig: - """Represents the DHCP Servers configuration and gives access to modify it""" - - def __init__(self): - self._default_lease_time = 300 - self.subnets = [] - self._peer = None - - def write_config(self): - conf = str(self) - print('Writing config: \n' + conf) - with open(CONFIG_FILE, 'w', encoding='UTF-8') as conf_file: - conf_file.write(conf) - - def resolve_config(self): - with open(CONFIG_FILE, 'r', encoding='UTF-8') as f: - conf = f.read() - self.resolve_subnets(conf) - self._peer = DHCPFailoverPeer(conf) - - def resolve_subnets(self, conf): - self.subnets = [] - regex = r'(subnet.*)' - subnets = re.findall(regex, conf, re.MULTILINE | re.DOTALL) - for subnet in subnets: - dhcp_subnet = DHCPSubnet(subnet) - self.subnets.append(dhcp_subnet) - - def set_range(self, start, end, subnet=0, pool=0): - print('Setting Range for pool ') - print(self.subnets[subnet].pools[pool]) - self.subnets[subnet].pools[pool].range_start = start - self.subnets[subnet].pools[pool].range_end = end - - # def resolve_settings(self, conf): - # lines = conf.split('\n') - # for line in lines: - # if DEFAULT_LEASE_TIME_KEY in line: - # self._default_lease_time = line.strip().split( - # DEFAULT_LEASE_TIME_KEY)[1].strip().split(';')[0] - - # self.peer = peer - - def __str__(self): - - config = """\r{DEFAULT_LEASE_TIME_KEY} {DEFAULT_LEASE_TIME};""" - - config = config.format(length='multi-line', - DEFAULT_LEASE_TIME_KEY=DEFAULT_LEASE_TIME_KEY, - DEFAULT_LEASE_TIME=self._default_lease_time) - - config += '\n\n' + str(self.peer) - for subnet in self._subnets: - config += '\n\n' + str(subnet) - return str(config) - - -FAILOVER_PEER_KEY = 'failover peer' -PRIMARY_KEY = 'primary' -ADDRESS_KEY = 'address' -PORT_KEY = 'port' -PEER_ADDRESS_KEY = 'peer address' -PEER_PORT_KEY = 'peer port' -MAX_RESPONSE_DELAY_KEY = 'max-response-delay' -MAX_UNACKED_UPDATES_KEY = 'max-unacked-updates' -MCLT_KEY = 'mclt' -SPLIT_KEY = 'split' -LOAD_BALANCE_MAX_SECONDS_KEY = 'load balance max seconds' - - -class DHCPFailoverPeer: - """Contains all information to define the DHCP failover peer""" - - def __init__(self, config): - self.name = None - self.primary = False - self.address = None - self.port = None - self.peer_address = None - self.peer_port = None - self.max_response_delay = None - self.max_unacked_updates = None - self.mclt = None - self.split = None - self.load_balance_max_seconds = None - self.peer = None - - self.resolve_peer(config) - - def __str__(self): - config = '{FAILOVER_PEER_KEY} \"{FAILOVER_PEER}\" {{\n' - config += '\tprimary;' if self.primary else 'secondary;' - config += """\n\t{ADDRESS_KEY} {ADDRESS}; - {PORT_KEY} {PORT}; - {PEER_ADDRESS_KEY} {PEER_ADDRESS}; - {PEER_PORT_KEY} {PEER_PORT}; - {MAX_RESPONSE_DELAY_KEY} {MAX_RESPONSE_DELAY}; - {MAX_UNACKED_UPDATES_KEY} {MAX_UNACKED_UPDATES}; - {MCLT_KEY} {MCLT}; - {SPLIT_KEY} {SPLIT}; - {LOAD_BALANCE_MAX_SECONDS_KEY} {LOAD_BALANCE_MAX_SECONDS}; - \r}}""" - - return config.format( - length='multi-line', - FAILOVER_PEER_KEY=FAILOVER_PEER_KEY, - FAILOVER_PEER=self.name, - ADDRESS_KEY=ADDRESS_KEY, - ADDRESS=self.address, - PORT_KEY=PORT_KEY, - PORT=self.port, - PEER_ADDRESS_KEY=PEER_ADDRESS_KEY, - PEER_ADDRESS=self.peer_address, - PEER_PORT_KEY=PEER_PORT_KEY, - PEER_PORT=self.peer_port, - MAX_RESPONSE_DELAY_KEY=MAX_RESPONSE_DELAY_KEY, - MAX_RESPONSE_DELAY=self.max_response_delay, - MAX_UNACKED_UPDATES_KEY=MAX_UNACKED_UPDATES_KEY, - MAX_UNACKED_UPDATES=self.max_unacked_updates, - MCLT_KEY=MCLT_KEY, - MCLT=self.mclt, - SPLIT_KEY=SPLIT_KEY, - SPLIT=self.split, - LOAD_BALANCE_MAX_SECONDS_KEY=LOAD_BALANCE_MAX_SECONDS_KEY, - LOAD_BALANCE_MAX_SECONDS=self.load_balance_max_seconds) - - def resolve_peer(self, conf): - peer = '' - lines = conf.split('\n') - for line in lines: - if line.startswith(FAILOVER_PEER_KEY) or len(peer) > 0: - if len(peer) <= 0: - self.name = line.strip().split(FAILOVER_PEER_KEY)[1].strip().split( - '{')[0].split('\"')[1] - peer += line + '\n' - if PRIMARY_KEY in line: - self.primary = True - elif ADDRESS_KEY in line and PEER_ADDRESS_KEY not in line: - self.address = line.strip().split(ADDRESS_KEY)[1].strip().split( - ';')[0] - elif PORT_KEY in line and PEER_PORT_KEY not in line: - self.port = line.strip().split(PORT_KEY)[1].strip().split(';')[0] - elif PEER_ADDRESS_KEY in line: - self.peer_address = line.strip().split( - PEER_ADDRESS_KEY)[1].strip().split(';')[0] - elif PEER_PORT_KEY in line: - self.peer_port = line.strip().split(PEER_PORT_KEY)[1].strip().split( - ';')[0] - elif MAX_RESPONSE_DELAY_KEY in line: - self.max_response_delay = line.strip().split( - MAX_RESPONSE_DELAY_KEY)[1].strip().split(';')[0] - elif MAX_UNACKED_UPDATES_KEY in line: - self.max_unacked_updates = line.strip().split( - MAX_UNACKED_UPDATES_KEY)[1].strip().split(';')[0] - elif MCLT_KEY in line: - self.mclt = line.strip().split(MCLT_KEY)[1].strip().split(';')[0] - elif SPLIT_KEY in line: - self.split = line.strip().split(SPLIT_KEY)[1].strip().split(';')[0] - elif LOAD_BALANCE_MAX_SECONDS_KEY in line: - self.load_balance_max_seconds = line.strip().split( - LOAD_BALANCE_MAX_SECONDS_KEY)[1].strip().split(';')[0] - if line.endswith('}') and len(peer) > 0: - break - self.peer = peer - - -NTP_OPTION_KEY = 'option ntp-servers' -SUBNET_MASK_OPTION_KEY = 'option subnet-mask' -BROADCAST_OPTION_KEY = 'option broadcast-address' -ROUTER_OPTION_KEY = 'option routers' -DNS_OPTION_KEY = 'option domain-name-servers' - - -class DHCPSubnet: - """Represents the DHCP Servers subnet configuration""" - - def __init__(self, subnet): - self._ntp_servers = None - self._subnet_mask = None - self._broadcast = None - self._routers = None - self._dns_servers = None - self.pools = [] - - self.resolve_subnet(subnet) - self.resolve_pools(subnet) - - def __str__(self): - config = """subnet 10.10.10.0 netmask {SUBNET_MASK_OPTION} {{ - \r\t{NTP_OPTION_KEY} {NTP_OPTION}; - \r\t{SUBNET_MASK_OPTION_KEY} {SUBNET_MASK_OPTION}; - \r\t{BROADCAST_OPTION_KEY} {BROADCAST_OPTION}; - \r\t{ROUTER_OPTION_KEY} {ROUTER_OPTION}; - \r\t{DNS_OPTION_KEY} {DNS_OPTION};""" - - config = config.format(length='multi-line', - NTP_OPTION_KEY=NTP_OPTION_KEY, - NTP_OPTION=self._ntp_servers, - SUBNET_MASK_OPTION_KEY=SUBNET_MASK_OPTION_KEY, - SUBNET_MASK_OPTION=self._subnet_mask, - BROADCAST_OPTION_KEY=BROADCAST_OPTION_KEY, - BROADCAST_OPTION=self._broadcast, - ROUTER_OPTION_KEY=ROUTER_OPTION_KEY, - ROUTER_OPTION=self._routers, - DNS_OPTION_KEY=DNS_OPTION_KEY, - DNS_OPTION=self._dns_servers) - for pool in self.pools: - config += '\n\t' + str(pool) - - config += '\n\r}' - return config - - def resolve_subnet(self, subnet): - subnet_parts = subnet.split('\n') - for part in subnet_parts: - if NTP_OPTION_KEY in part: - self._ntp_servers = part.strip().split(NTP_OPTION_KEY)[1].strip().split( - ';')[0] - elif SUBNET_MASK_OPTION_KEY in part: - self._subnet_mask = part.strip().split( - SUBNET_MASK_OPTION_KEY)[1].strip().split(';')[0] - elif BROADCAST_OPTION_KEY in part: - self._broadcast = part.strip().split( - BROADCAST_OPTION_KEY)[1].strip().split(';')[0] - elif ROUTER_OPTION_KEY in part: - self._routers = part.strip().split(ROUTER_OPTION_KEY)[1].strip().split( - ';')[0] - elif DNS_OPTION_KEY in part: - self._dns_servers = part.strip().split(DNS_OPTION_KEY)[1].strip().split( - ';')[0] - - def resolve_pools(self, subnet): - regex = r'(pool.*)\}' - pools = re.findall(regex, subnet, re.MULTILINE | re.DOTALL) - for pool in pools: - dhcp_pool = DHCPPool(pool) - self.pools.append(dhcp_pool) - - -FAILOVER_KEY = 'failover peer' -RANGE_KEY = 'range' - - -class DHCPPool: - """Represents a DHCP Servers subnet pool configuration""" - - def __init__(self, pool): - self.failover_peer = None - self.range_start = None - self.range_end = None - self.resolve_pool(pool) - - def __str__(self): - - config = """pool {{ - \r\t\t{FAILOVER_KEY} "{FAILOVER}"; - \r\t\t{RANGE_KEY} {RANGE_START} {RANGE_END}; - \r\t}}""" - - return config.format( - length='multi-line', - FAILOVER_KEY=FAILOVER_KEY, - FAILOVER=self.failover_peer, - RANGE_KEY=RANGE_KEY, - RANGE_START=self.range_start, - RANGE_END=self.range_end, - ) - - def resolve_pool(self, pool): - pool_parts = pool.split('\n') - # pool_parts = pool.split("\n") - for part in pool_parts: - if FAILOVER_KEY in part: - self.failover_peer = part.strip().split(FAILOVER_KEY)[1].strip().split( - ';')[0].replace('\"', '') - if RANGE_KEY in part: - pool_range = part.strip().split(RANGE_KEY)[1].strip().split(';')[0] - self.range_start = pool_range.split(' ')[0].strip() - self.range_end = pool_range.split(' ')[1].strip() diff --git a/modules/network/dhcp-2/python/src/grpc/network_service.py b/modules/network/dhcp-2/python/src/grpc/network_service.py deleted file mode 100644 index 64aab8a07..000000000 --- a/modules/network/dhcp-2/python/src/grpc/network_service.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""gRPC Network Service for the DHCP Server network module""" -import proto.grpc_pb2_grpc as pb2_grpc -import proto.grpc_pb2 as pb2 - -from dhcp_config import DHCPConfig - - -class NetworkService(pb2_grpc.NetworkModule): - """gRPC endpoints for the DHCP Server""" - - def __init__(self): - self._dhcp_config = DHCPConfig() - - def GetDHCPRange(self, request, context): # pylint: disable=W0613 - """ - Resolve the current DHCP configuration and return - the first range from the first subnet in the file - """ - self._dhcp_config.resolve_config() - pool = self._dhcp_config.subnets[0].pools[0] - return pb2.DHCPRange(code=200, start=pool.range_start, end=pool.range_end) - - def SetDHCPRange(self, request, context): # pylint: disable=W0613 - """ - Change DHCP configuration and set the - the first range from the first subnet in the configuration - """ - - print('Setting DHCPRange') - print('Start: ' + request.start) - print('End: ' + request.end) - self._dhcp_config.resolve_config() - self._dhcp_config.set_range(request.start, request.end, 0, 0) - self._dhcp_config.write_config() - return pb2.Response(code=200, message='DHCP Range Set') - - def GetStatus(self, request, context): # pylint: disable=W0613 - """ - Return the current status of the network module - """ - # ToDo: Figure out how to resolve the current DHCP status - dhcp_status = True - message = str({'dhcpStatus': dhcp_status}) - return pb2.Response(code=200, message=message) diff --git a/modules/network/dhcp-2/python/src/grpc/proto/grpc.proto b/modules/network/dhcp-2/python/src/grpc/proto/grpc.proto deleted file mode 100644 index 8e2732620..000000000 --- a/modules/network/dhcp-2/python/src/grpc/proto/grpc.proto +++ /dev/null @@ -1,36 +0,0 @@ -syntax = "proto3"; - -service NetworkModule { - - rpc GetDHCPRange(GetDHCPRangeRequest) returns (DHCPRange) {}; - - rpc SetDHCPRange(DHCPRange) returns (Response) {}; - - rpc GetStatus(GetStatusRequest) returns (Response) {}; - - rpc GetIPAddress(GetIPAddressRequest) returns (Response) {}; - - rpc SetLeaseAddress(SetLeaseAddressRequest) returns (Response) {}; - -} - -message Response { - int32 code = 1; - string message = 2; -} - -message DHCPRange { - int32 code = 1; - string start = 2; - string end = 3; -} - -message GetDHCPRangeRequest {} - -message GetIPAddressRequest {} - -message GetStatusRequest {} - -message SetLeaseAddressRequest { - string ipAddress = 1; -} \ No newline at end of file diff --git a/modules/network/dhcp-2/python/src/grpc/__init__.py b/modules/network/dhcp-2/python/src/grpc_server/__init__.py similarity index 100% rename from modules/network/dhcp-2/python/src/grpc/__init__.py rename to modules/network/dhcp-2/python/src/grpc_server/__init__.py diff --git a/modules/network/dhcp-2/python/src/grpc_server/dhcp_config.py b/modules/network/dhcp-2/python/src/grpc_server/dhcp_config.py new file mode 100644 index 000000000..444faa87c --- /dev/null +++ b/modules/network/dhcp-2/python/src/grpc_server/dhcp_config.py @@ -0,0 +1,493 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains all the necessary classes to maintain the +DHCP server's configuration""" +import re +from common import logger + +LOG_NAME = 'dhcp_config' +LOGGER = None + +CONFIG_FILE = '/etc/dhcp/dhcpd.conf' + +DEFAULT_LEASE_TIME_KEY = 'default-lease-time' + + +class DHCPConfig: + """Represents the DHCP Servers configuration and gives access to modify it""" + + def __init__(self): + self._default_lease_time = 300 + self._subnets = [] + self._peer = None + self._reserved_hosts = [] + global LOGGER + LOGGER = logger.get_logger(LOG_NAME, 'dhcp-1') + + def add_reserved_host(self, hostname, hw_addr, ip_addr): + host = DHCPReservedHost(hostname=hostname, + hw_addr=hw_addr, + fixed_addr=ip_addr) + self._reserved_hosts.append(host) + + def delete_reserved_host(self, hw_addr): + for host in self._reserved_hosts: + if hw_addr == host.hw_addr: + self._reserved_hosts.remove(host) + + def disable_failover(self): + self._peer.disable() + for subnet in self._subnets: + subnet.disable_peer() + + def enable_failover(self): + self._peer.enable() + for subnet in self._subnets: + subnet.enable_peer() + + def get_reserved_host(self, hw_addr): + for host in self._reserved_hosts: + if hw_addr == host.hw_addr: + return host + + def write_config(self, config=None): + if config is None: + conf = str(self) + with open(CONFIG_FILE, 'w', encoding='UTF-8') as conf_file: + conf_file.write(conf) + else: + with open(CONFIG_FILE, 'w', encoding='UTF-8') as conf_file: + conf_file.write(config) + + def _get_config(self, config_file=CONFIG_FILE): + content = None + with open(config_file, 'r', encoding='UTF-8') as f: + content = f.read() + return content + + def make(self, conf): + try: + self._subnets = self.resolve_subnets(conf) + self._peer = DHCPFailoverPeer(conf) + self._reserved_hosts = self.resolve_reserved_hosts(conf) + except Exception as e: # pylint: disable=W0718 + print('Failed to make DHCPConfig: ' + str(e)) + + def resolve_config(self, config_file=CONFIG_FILE): + try: + conf = self._get_config(config_file) + self._subnets = self.resolve_subnets(conf) + self._peer = DHCPFailoverPeer(conf) + self._reserved_hosts = self.resolve_reserved_hosts(conf) + except Exception as e: # pylint: disable=W0718 + print('Failed to resolve config: ' + str(e)) + + def resolve_subnets(self, conf): + subnets = [] + regex = r'(subnet.*)' + subnets_conf = re.findall(regex, conf, re.MULTILINE | re.DOTALL) + for subnet in subnets_conf: + dhcp_subnet = DHCPSubnet(subnet) + subnets.append(dhcp_subnet) + return subnets + + def resolve_reserved_hosts(self, conf): + hosts = [] + host_start = 0 + while True: + host_start = conf.find('host', host_start) + if host_start < 0: + break + else: + host_end = conf.find('}', host_start) + host = DHCPReservedHost(config=conf[host_start:host_end + 1]) + hosts.append(host) + host_start = host_end + 1 + return hosts + + def set_range(self, start, end, subnet=0, pool=0): + # Calculate the subnet from the range + octets = start.split('.') + octets[-1] = '0' + dhcp_subnet = '.'.join(octets) + + #Update the subnet and range + self._subnets[subnet].set_subnet(dhcp_subnet) + self._subnets[subnet].pools[pool].set_range(start, end) + + def __str__(self): + + # Encode the top level config options + config = """{DEFAULT_LEASE_TIME_KEY} {DEFAULT_LEASE_TIME};""" + config = config.format(length='multi-line', + DEFAULT_LEASE_TIME_KEY=DEFAULT_LEASE_TIME_KEY, + DEFAULT_LEASE_TIME=self._default_lease_time) + + # Encode the failover peer + config += '\n\n' + str(self._peer) + + # Encode the subnets + for subnet in self._subnets: + config += '\n\n' + str(subnet) + + # Encode the reserved hosts + for host in self._reserved_hosts: + config += '\n' + str(host) + + return str(config) + + +FAILOVER_PEER_KEY = 'failover peer' +PRIMARY_KEY = 'primary' +ADDRESS_KEY = 'address' +PORT_KEY = 'port' +PEER_ADDRESS_KEY = 'peer address' +PEER_PORT_KEY = 'peer port' +MAX_RESPONSE_DELAY_KEY = 'max-response-delay' +MAX_UNACKED_UPDATES_KEY = 'max-unacked-updates' +MCLT_KEY = 'mclt' +SPLIT_KEY = 'split' +LOAD_BALANCE_MAX_SECONDS_KEY = 'load balance max seconds' + + +class DHCPFailoverPeer: + """Contains all information to define the DHCP failover peer""" + + def __init__(self, config): + self.name = None + self.primary = False + self.address = None + self.port = None + self.peer_address = None + self.peer_port = None + self.max_response_delay = None + self.max_unacked_updates = None + self.mclt = None + self.split = None + self.load_balance_max_seconds = None + self.peer = None + self.enabled = True + + self.resolve_peer(config) + + def __str__(self): + config = '{FAILOVER_PEER_KEY} \"{FAILOVER_PEER}\" {{\n' + config += '\tprimary;' if self.primary else 'secondary;' + config += '\n\t{ADDRESS_KEY} {ADDRESS};' if self.address is not None else '' + config += '\n\t{PORT_KEY} {PORT};' if self.port is not None else '' + config += '\n\t{PEER_ADDRESS_KEY} {PEER_ADDRESS};' if self.peer_address is not None else '' + config += '\n\t{PEER_PORT_KEY} {PEER_PORT};' if self.peer_port is not None else '' + config += '\n\t{MAX_RESPONSE_DELAY_KEY} {MAX_RESPONSE_DELAY};' if self.max_response_delay is not None else '' + config += '\n\t{MAX_UNACKED_UPDATES_KEY} {MAX_UNACKED_UPDATES};' if self.max_unacked_updates is not None else '' + config += '\n\t{MCLT_KEY} {MCLT};' if self.mclt is not None else '' + config += '\n\t{SPLIT_KEY} {SPLIT};' if self.split is not None else '' + config += '\n\t{LOAD_BALANCE_MAX_SECONDS_KEY} {LOAD_BALANCE_MAX_SECONDS};' if self.load_balance_max_seconds is not None else '' + config += '\n\r}}' + + config = config.format( + length='multi-line', + FAILOVER_PEER_KEY=FAILOVER_PEER_KEY, + FAILOVER_PEER=self.name, + ADDRESS_KEY=ADDRESS_KEY, + ADDRESS=self.address, + PORT_KEY=PORT_KEY, + PORT=self.port, + PEER_ADDRESS_KEY=PEER_ADDRESS_KEY, + PEER_ADDRESS=self.peer_address, + PEER_PORT_KEY=PEER_PORT_KEY, + PEER_PORT=self.peer_port, + MAX_RESPONSE_DELAY_KEY=MAX_RESPONSE_DELAY_KEY, + MAX_RESPONSE_DELAY=self.max_response_delay, + MAX_UNACKED_UPDATES_KEY=MAX_UNACKED_UPDATES_KEY, + MAX_UNACKED_UPDATES=self.max_unacked_updates, + MCLT_KEY=MCLT_KEY, + MCLT=self.mclt, + SPLIT_KEY=SPLIT_KEY, + SPLIT=self.split, + LOAD_BALANCE_MAX_SECONDS_KEY=LOAD_BALANCE_MAX_SECONDS_KEY, + LOAD_BALANCE_MAX_SECONDS=self.load_balance_max_seconds) + + if not self.enabled: + lines = config.strip().split('\n') + for i in range(len(lines)-1): + lines[i] = '#' + lines[i] + lines[-1] = '#' + lines[-1].strip() # Handle the last line separately + config = '\n'.join(lines) + + return config + + def disable(self): + self.enabled = False + + def enable(self): + self.enabled = True + + def resolve_peer(self, conf): + peer = '' + lines = conf.split('\n') + for line in lines: + if line.startswith(FAILOVER_PEER_KEY) or len(peer) > 0: + if len(peer) <= 0: + self.name = line.strip().split(FAILOVER_PEER_KEY)[1].strip().split( + '{')[0].split('\"')[1] + peer += line + '\n' + if PRIMARY_KEY in line: + self.primary = True + elif ADDRESS_KEY in line and PEER_ADDRESS_KEY not in line: + self.address = line.strip().split(ADDRESS_KEY)[1].strip().split( + ';')[0] + elif PORT_KEY in line and PEER_PORT_KEY not in line: + self.port = line.strip().split(PORT_KEY)[1].strip().split(';')[0] + elif PEER_ADDRESS_KEY in line: + self.peer_address = line.strip().split( + PEER_ADDRESS_KEY)[1].strip().split(';')[0] + elif PEER_PORT_KEY in line: + self.peer_port = line.strip().split(PEER_PORT_KEY)[1].strip().split( + ';')[0] + elif MAX_RESPONSE_DELAY_KEY in line: + self.max_response_delay = line.strip().split( + MAX_RESPONSE_DELAY_KEY)[1].strip().split(';')[0] + elif MAX_UNACKED_UPDATES_KEY in line: + self.max_unacked_updates = line.strip().split( + MAX_UNACKED_UPDATES_KEY)[1].strip().split(';')[0] + elif MCLT_KEY in line: + self.mclt = line.strip().split(MCLT_KEY)[1].strip().split(';')[0] + elif SPLIT_KEY in line: + self.split = line.strip().split(SPLIT_KEY)[1].strip().split(';')[0] + elif LOAD_BALANCE_MAX_SECONDS_KEY in line: + self.load_balance_max_seconds = line.strip().split( + LOAD_BALANCE_MAX_SECONDS_KEY)[1].strip().split(';')[0] + if line.endswith('}') and len(peer) > 0: + break + self.peer = peer + + +SUBNET_KEY = 'subnet' +NTP_OPTION_KEY = 'option ntp-servers' +SUBNET_MASK_OPTION_KEY = 'option subnet-mask' +BROADCAST_OPTION_KEY = 'option broadcast-address' +ROUTER_OPTION_KEY = 'option routers' +DNS_OPTION_KEY = 'option domain-name-servers' +INTERFACE_KEY = 'interface' +AUTHORITATIVE_KEY = 'authoritative' + + +class DHCPSubnet: + """Represents the DHCP Servers subnet configuration""" + + def __init__(self, subnet): + self._authoritative = False + self._subnet = None + self._ntp_servers = None + self._subnet_mask = None + self._broadcast = None + self._routers = None + self._dns_servers = None + self._interface = None + self.pools = [] + + self.resolve_subnet(subnet) + self.resolve_pools(subnet) + + def __str__(self): + config = 'subnet {SUBNET_OPTION} netmask {SUBNET_MASK_OPTION} {{' + config += '\n\t{NTP_OPTION_KEY} {NTP_OPTION};' if self._ntp_servers is not None else '' + config += '\n\t{SUBNET_MASK_OPTION_KEY} {SUBNET_MASK_OPTION};' if self._subnet_mask is not None else '' + config += '\n\t{BROADCAST_OPTION_KEY} {BROADCAST_OPTION};' if self._broadcast is not None else '' + config += '\n\t{ROUTER_OPTION_KEY} {ROUTER_OPTION};' if self._routers is not None else '' + config += '\n\t{DNS_OPTION_KEY} {DNS_OPTION};' if self._dns_servers is not None else '' + config += '\n\t{INTERFACE_KEY} {INTERFACE_OPTION};' if self._interface is not None else '' + config += '\n\t{AUTHORITATIVE_KEY};' if self._authoritative else '' + + + config = config.format(length='multi-line', + SUBNET_OPTION=self._subnet, + NTP_OPTION_KEY=NTP_OPTION_KEY, + NTP_OPTION=self._ntp_servers, + SUBNET_MASK_OPTION_KEY=SUBNET_MASK_OPTION_KEY, + SUBNET_MASK_OPTION=self._subnet_mask, + BROADCAST_OPTION_KEY=BROADCAST_OPTION_KEY, + BROADCAST_OPTION=self._broadcast, + ROUTER_OPTION_KEY=ROUTER_OPTION_KEY, + ROUTER_OPTION=self._routers, + DNS_OPTION_KEY=DNS_OPTION_KEY, + DNS_OPTION=self._dns_servers, + INTERFACE_KEY=INTERFACE_KEY, + INTERFACE_OPTION=self._interface, + AUTHORITATIVE_KEY=AUTHORITATIVE_KEY) + + # if not self._authoritative: + # config = config.replace(AUTHORITATIVE_KEY, '#' + AUTHORITATIVE_KEY) + + for pool in self.pools: + config += '\n\t' + str(pool) + + config += '\n}' + return config + + def disable_peer(self): + for pool in self.pools: + pool.disable_peer() + + def enable_peer(self): + for pool in self.pools: + pool.enable_peer() + + def set_subnet(self, subnet, netmask=None): + if netmask is None: + netmask = '255.255.255.0' + self._subnet = subnet + self._subnet_mask = netmask + + # Calculate the broadcast from the subnet + octets = subnet.split('.') + octets[-1] = '255' + dhcp_broadcast = '.'.join(octets) + + self._broadcast = dhcp_broadcast + + def resolve_subnet(self, subnet): + subnet_parts = subnet.split('\n') + for part in subnet_parts: + if part.strip().startswith(SUBNET_KEY): + self._subnet = part.strip().split()[1] + elif NTP_OPTION_KEY in part: + self._ntp_servers = part.strip().split(NTP_OPTION_KEY)[1].strip().split( + ';')[0] + elif SUBNET_MASK_OPTION_KEY in part: + self._subnet_mask = part.strip().split( + SUBNET_MASK_OPTION_KEY)[1].strip().split(';')[0] + elif BROADCAST_OPTION_KEY in part: + self._broadcast = part.strip().split( + BROADCAST_OPTION_KEY)[1].strip().split(';')[0] + elif ROUTER_OPTION_KEY in part: + self._routers = part.strip().split(ROUTER_OPTION_KEY)[1].strip().split( + ';')[0] + elif DNS_OPTION_KEY in part: + self._dns_servers = part.strip().split(DNS_OPTION_KEY)[1].strip().split( + ';')[0] + elif INTERFACE_KEY in part: + self._interface = part.strip().split(INTERFACE_KEY)[1].strip().split( + ';')[0] + elif AUTHORITATIVE_KEY in part: + self._authoritative = True + + def resolve_pools(self, subnet): + regex = r'(pool.*)\}' + pools = re.findall(regex, subnet, re.MULTILINE | re.DOTALL) + for pool in pools: + dhcp_pool = DHCPPool(pool) + self.pools.append(dhcp_pool) + + +FAILOVER_KEY = 'failover peer' +RANGE_KEY = 'range' + + +class DHCPPool: + """Represents a DHCP Servers subnet pool configuration""" + + def __init__(self, pool): + self.failover_peer = None + self.range_start = None + self.range_end = None + self.resolve_pool(pool) + self._peer_enabled = True + + def __str__(self): + config = 'pool {{' + config += '\n\t\t{FAILOVER_KEY} "{FAILOVER}";' if self.failover_peer is not None else '' + config += '\n\t\t{RANGE_KEY} {RANGE_START} {RANGE_END};' if self.range_start is not None and self.range_end is not None else '' + config += '\n\t}}' + + config = config.format( + length='multi-line', + FAILOVER_KEY=FAILOVER_KEY, + FAILOVER=self.failover_peer, + RANGE_KEY=RANGE_KEY, + RANGE_START=self.range_start, + RANGE_END=self.range_end, + ) + + if not self._peer_enabled: + config = config.replace(FAILOVER_KEY, '#' + FAILOVER_KEY) + + return config + + def disable_peer(self): + self._peer_enabled = False + + def enable_peer(self): + self._peer_enabled = True + + def set_range(self, start, end): + self.range_start = start + self.range_end = end + + def resolve_pool(self, pool): + pool_parts = pool.split('\n') + for part in pool_parts: + if FAILOVER_KEY in part: + self.failover_peer = part.strip().split(FAILOVER_KEY)[1].strip().split( + ';')[0].replace('\"', '') + if RANGE_KEY in part: + pool_range = part.strip().split(RANGE_KEY)[1].strip().split(';')[0] + self.range_start = pool_range.split(' ')[0].strip() + self.range_end = pool_range.split(' ')[1].strip() + + +HOST_KEY = 'host' +HARDWARE_KEY = 'hardware ethernet' +FIXED_ADDRESS_KEY = 'fixed-address' + + +class DHCPReservedHost: + """Represents a DHCP Servers subnet pool configuration""" + + def __init__(self, hostname=None, hw_addr=None, fixed_addr=None, config=None): + if config is None: + self.host = hostname + self.hw_addr = hw_addr + self.fixed_addr = fixed_addr + else: + self.resolve_host(config) + + def __str__(self): + + config = """{HOST_KEY} {HOSTNAME} {{ + \r\t{HARDWARE_KEY} {HW_ADDR}; + \r\t{FIXED_ADDRESS_KEY} {RESERVED_IP}; + \r}}""" + + config = config.format( + length='multi-line', + HOST_KEY=HOST_KEY, + HOSTNAME=self.host, + HARDWARE_KEY=HARDWARE_KEY, + HW_ADDR=self.hw_addr, + FIXED_ADDRESS_KEY=FIXED_ADDRESS_KEY, + RESERVED_IP=self.fixed_addr, + ) + return config + + def resolve_host(self, reserved_host): + host_parts = reserved_host.split('\n') + for part in host_parts: + if HOST_KEY in part: + self.host = part.strip().split(HOST_KEY)[1].strip().split('{')[0] + elif HARDWARE_KEY in part: + self.hw_addr = part.strip().split(HARDWARE_KEY)[1].strip().split(';')[0] + elif FIXED_ADDRESS_KEY in part: + self.fixed_addr = part.strip().split( + FIXED_ADDRESS_KEY)[1].strip().split(';')[0] diff --git a/modules/network/dhcp-2/python/src/grpc_server/dhcp_config_test.py b/modules/network/dhcp-2/python/src/grpc_server/dhcp_config_test.py new file mode 100644 index 000000000..2cc78403a --- /dev/null +++ b/modules/network/dhcp-2/python/src/grpc_server/dhcp_config_test.py @@ -0,0 +1,103 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Unit Testing for the DHCP Server config""" +import unittest +from dhcp_config import DHCPConfig +import os + +CONFIG_FILE = 'conf/dhcpd.conf' + +DHCP_CONFIG = None + +def get_config_file_path(): + dhcp_config = DHCPConfig() + current_dir = os.path.dirname(os.path.abspath(__file__)) + module_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(current_dir)))) + conf_file = os.path.join(module_dir,CONFIG_FILE) + return conf_file + +def get_config(): + dhcp_config = DHCPConfig() + dhcp_config.resolve_config(get_config_file_path()) + return dhcp_config + +class DHCPConfigTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + # Resolve the config + global DHCP_CONFIG + DHCP_CONFIG = get_config() + + def test_resolve_config(self): + print('Test Resolve Config:\n' + str(DHCP_CONFIG)) + + # Resolve the raw config file + with open(get_config_file_path(),'r') as f: + lines = f.readlines() + + # Get the resolved config as a + conf_parts = str(DHCP_CONFIG).split('\n') + + # dhcpd conf is not picky about spacing so we just + # need to check contents of each line for matching + # to make sure evertying matches + for i in range(len(lines)): + self.assertEqual(lines[i].strip(),conf_parts[i].strip()) + + def test_disable_failover(self): + DHCP_CONFIG.disable_failover() + print('Test Disable Config:\n' + str(DHCP_CONFIG)) + config_lines = str(DHCP_CONFIG._peer).split('\n') + for line in config_lines: + self.assertTrue(line.startswith('#')) + + def test_enable_failover(self): + DHCP_CONFIG.enable_failover() + print('Test Enable Config:\n' + str(DHCP_CONFIG)) + config_lines = str(DHCP_CONFIG._peer).split('\n') + for line in config_lines: + self.assertFalse(line.startswith('#')) + + def test_add_reserved_host(self): + DHCP_CONFIG.add_reserved_host('test','00:11:22:33:44:55','192.168.10.5') + host = DHCP_CONFIG.get_reserved_host('00:11:22:33:44:55') + self.assertIsNotNone(host) + print('AddHostConfig:\n' + str(DHCP_CONFIG)) + + def test_delete_reserved_host(self): + DHCP_CONFIG.delete_reserved_host('00:11:22:33:44:55') + host = DHCP_CONFIG.get_reserved_host('00:11:22:33:44:55') + self.assertIsNone(host) + print('DeleteHostConfig:\n' + str(DHCP_CONFIG)) + + def test_resolve_config_with_hosts(self): + DHCP_CONFIG.add_reserved_host('test','00:11:22:33:44:55','192.168.10.5') + config_with_hosts = DHCPConfig() + config_with_hosts.make(str(DHCP_CONFIG)) + host = config_with_hosts.get_reserved_host('00:11:22:33:44:55') + self.assertIsNotNone(host) + print("ResolveConfigWithHosts:\n" + str(config_with_hosts)) + +if __name__ == '__main__': + suite = unittest.TestSuite() + suite.addTest(DHCPConfigTest('test_resolve_config')) + suite.addTest(DHCPConfigTest('test_disable_failover')) + suite.addTest(DHCPConfigTest('test_enable_failover')) + suite.addTest(DHCPConfigTest('test_add_reserved_host')) + suite.addTest(DHCPConfigTest('test_delete_reserved_host')) + suite.addTest(DHCPConfigTest('test_resolve_config_with_hosts')) + + runner = unittest.TextTestRunner() + runner.run(suite) \ No newline at end of file diff --git a/modules/network/dhcp-2/python/src/grpc_server/dhcp_lease.py b/modules/network/dhcp-2/python/src/grpc_server/dhcp_lease.py new file mode 100644 index 000000000..0d2f43e3b --- /dev/null +++ b/modules/network/dhcp-2/python/src/grpc_server/dhcp_lease.py @@ -0,0 +1,75 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains all the necessary methods to create and monitor DHCP +leases on the server""" +from datetime import datetime +import time + +time_format = '%Y-%m-%d %H:%M:%S' + + +class DHCPLease(object): + """Represents a DHCP Server lease""" + hw_addr = None + ip = None + hostname = None + expires = None + + def __init__(self, lease): + self._make_lease(lease) + + def _make_lease(self, lease): + if lease is not None: + sections_raw = lease.split(' ') + sections = [] + for section in sections_raw: + if section.strip(): + sections.append(section) + self.hw_addr = sections[0] + self.ip = sections[1] + self.hostname = sections[2] + self.expires = sections[3] + '' '' + sections[4] + self.manufacturer = ' '.join(sections[5:]) + + def get_millis(self, timestamp): + dt_obj = datetime.strptime(timestamp, time_format) + millis = dt_obj.timestamp() * 1000 + return millis + + def get_expires_millis(self): + return self.get_millis(self.expires) + + def is_expired(self): + expires_millis = self.get_expires_millis() + cur_time = int(round(time.time()) * 1000) + return cur_time >= expires_millis + + def __str__(self): + lease = {} + if self.hw_addr is not None: + lease['hw_addr'] = self.hw_addr + + if self.ip is not None: + lease['ip'] = self.ip + + if self.hostname is not None: + lease['hostname'] = self.hostname + + if self.expires is not None: + lease['expires'] = self.expires + + if self.manufacturer is not None: + lease['manufacturer'] = self.manufacturer + + return str(lease) diff --git a/modules/network/dhcp-2/python/src/grpc_server/dhcp_leases.py b/modules/network/dhcp-2/python/src/grpc_server/dhcp_leases.py new file mode 100644 index 000000000..08e6feabe --- /dev/null +++ b/modules/network/dhcp-2/python/src/grpc_server/dhcp_leases.py @@ -0,0 +1,107 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Used to resolve the DHCP servers lease information""" +import os +from dhcp_lease import DHCPLease +import logger +from common import util + +LOG_NAME = 'dhcp_lease' +LOGGER = None + +DHCP_LEASE_FILES = [ + '/var/lib/dhcp/dhcpd.leases', '/var/lib/dhcp/dhcpd.leases~', + '/var/lib/dhcp/dhcpd6.leases', '/var/lib/dhcp/dhcpd6.leases~' +] +DHCP_CONFIG_FILE = '/etc/dhcp/dhcpd.conf' + + +class DHCPLeases: + """Leases for the DHCP server""" + + def __init__(self): + global LOGGER + LOGGER = logger.get_logger(LOG_NAME, 'dhcp-2') + + def delete_all_hosts(self): + LOGGER.info('Deleting hosts') + for lease in DHCP_LEASE_FILES: + LOGGER.info('Checking file: ' + lease) + if os.path.exists(lease): + LOGGER.info('File Exists: ' + lease) + try: + # Delete existing lease file + os.remove(lease) + except OSError as e: + LOGGER.info(f'Error occurred while deleting the file: {e}') + # Create an empty lease file + with open(lease, 'w', encoding='UTF-8'): + pass + + def get_lease(self, hw_addr): + for lease in self.get_leases(): + if lease.hw_addr == hw_addr: + return lease + + def get_leases(self): + leases = [] + lease_list_raw = self._get_lease_list() + LOGGER.info('Raw Leases:\n' + str(lease_list_raw) + '\n') + lease_list_start = lease_list_raw.find('=========',0) + lease_list_start = lease_list_raw.find('\n',lease_list_start) + lease_list = lease_list_raw[lease_list_start+1:] + lines = lease_list.split('\n') + for line in lines: + try: + lease = DHCPLease(line) + leases.append(lease) + except Exception as e: # pylint: disable=W0718 + # Let non lease lines file without extra checks + LOGGER.error('Making Lease Error: ' + str(e)) + LOGGER.error('Not a valid lease line: ' + line) + return leases + + def delete_lease(self, ip_addr): + LOGGER.info('Deleting lease') + for lease in DHCP_LEASE_FILES: + LOGGER.info('Checking file: ' + lease) + if os.path.exists(lease): + LOGGER.info('File Exists: ' + lease) + try: + # Delete existing lease file + with (open(lease, 'r', encoding='UTF-8')) as f: + contents = f.read() + + while ip_addr in contents: + ix_ip = contents.find(ip_addr) + lease_start = contents.rindex('lease', 0, ix_ip) + lease_end = contents.find('}', lease_start) + LOGGER.info('Lease Location: ' + str(lease_start) + ':' + + str(lease_end)) + contents = contents[0:lease_start] + contents[lease_end + 1:] + + except OSError as e: + LOGGER.info(f'Error occurred while deleting the lease: {e}') + + def _get_lease_list(self): + LOGGER.info('Running lease list command') + try: + result = util.run_command('dhcp-lease-list') + return result[0] + except Exception as e: # pylint: disable=W0718 + LOGGER.error('Error lease list: ' + str(e)) + + def _write_config(self, config): + with open(DHCP_CONFIG_FILE, 'w', encoding='UTF-8') as f: + f.write(config) diff --git a/modules/network/dhcp-2/python/src/grpc_server/network_service.py b/modules/network/dhcp-2/python/src/grpc_server/network_service.py new file mode 100644 index 000000000..053d26d6b --- /dev/null +++ b/modules/network/dhcp-2/python/src/grpc_server/network_service.py @@ -0,0 +1,157 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""gRPC Network Service for the DHCP Server network module""" +import proto.grpc_pb2_grpc as pb2_grpc +import proto.grpc_pb2 as pb2 + +from dhcp_config import DHCPConfig +from dhcp_leases import DHCPLeases + +import traceback +from common import logger + +LOG_NAME = 'network_service' +LOGGER = None + +class NetworkService(pb2_grpc.NetworkModule): + """gRPC endpoints for the DHCP Server""" + + def __init__(self): + self._dhcp_config = None + self.dhcp_leases = DHCPLeases() + global LOGGER + LOGGER = logger.get_logger(LOG_NAME, 'dhcp-2') + + def _get_dhcp_config(self): + if self._dhcp_config is None: + self._dhcp_config = DHCPConfig() + self._dhcp_config.resolve_config() + return self._dhcp_config + + def AddReservedLease(self, request, context): # pylint: disable=W0613 + LOGGER.info('Add reserved lease called') + try: + dhcp_config = self._get_dhcp_config() + dhcp_config.add_reserved_host(request.hostname, request.hw_addr, + request.ip_addr) + dhcp_config.write_config() + LOGGER.info('Reserved lease added') + return pb2.Response(code=200, message='{}') + except Exception as e: # pylint: disable=W0718 + fail_message = 'Failed to add reserved lease: ' + str(e) + LOGGER.error(fail_message) + LOGGER.error(traceback.format_exc()) + return pb2.Response(code=500, message=fail_message) + + def DeleteReservedLease(self, request, context): # pylint: disable=W0613 + LOGGER.info('Delete reserved lease called') + try: + dhcp_config = self._get_dhcp_config() + dhcp_config.delete_reserved_host(request.hw_addr) + dhcp_config.write_config() + LOGGER.info('Reserved lease deleted') + return pb2.Response(code=200, message='{}') + except Exception as e: # pylint: disable=W0718 + fail_message = 'Failed to delete reserved lease: ' + str(e) + LOGGER.error(fail_message) + LOGGER.error(traceback.format_exc()) + return pb2.Response(code=500, message=fail_message) + + def DisableFailover(self, request, contest): # pylint: disable=W0613 + LOGGER.info('Disable failover called') + try: + dhcp_config = self._get_dhcp_config() + dhcp_config.disable_failover() + dhcp_config.write_config() + LOGGER.info('Failover disabled') + return pb2.Response(code=200, message='{}') + except Exception as e: # pylint: disable=W0718 + fail_message = 'Failed to disable failover: ' + str(e) + LOGGER.error(fail_message) + LOGGER.error(traceback.format_exc()) + return pb2.Response(code=500, message=fail_message) + + def EnableFailover(self, request, contest): # pylint: disable=W0613 + LOGGER.info('Enable failover called') + try: + dhcp_config = self._get_dhcp_config() + dhcp_config.enable_failover() + dhcp_config.write_config() + LOGGER.info('Failover enabled') + return pb2.Response(code=200, message='{}') + except Exception as e: # pylint: disable=W0718 + fail_message = 'Failed to enable failover: ' + str(e) + LOGGER.error(fail_message) + LOGGER.error(traceback.format_exc()) + return pb2.Response(code=500, message=fail_message) + + def GetDHCPRange(self, request, context): # pylint: disable=W0613 + """ + Resolve the current DHCP configuration and return + the first range from the first subnet in the file + """ + LOGGER.info('Get DHCP range called') + try: + pool = self._get_dhcp_config()._subnets[0].pools[0] + return pb2.DHCPRange(code=200, start=pool.range_start, end=pool.range_end) + except Exception as e: # pylint: disable=W0718 + fail_message = 'Failed to get DHCP range: ' + str(e) + LOGGER.error(fail_message) + LOGGER.error(traceback.format_exc()) + return pb2.Response(code=500, message=fail_message) + + def GetLease(self, request, context): # pylint: disable=W0613 + """ + Resolve the current DHCP leased address for the + provided MAC address + """ + LOGGER.info('Get lease called') + try: + lease = self.dhcp_leases.get_lease(request.hw_addr) + if lease is not None: + return pb2.Response(code=200, message=str(lease)) + else: + return pb2.Response(code=200, message='{}') + except Exception as e: # pylint: disable=W0718 + fail_message = 'Failed to get lease: ' + str(e) + LOGGER.error(fail_message) + LOGGER.error(traceback.format_exc()) + return pb2.Response(code=500, message=fail_message) + + def SetDHCPRange(self, request, context): # pylint: disable=W0613 + """ + Change DHCP configuration and set the + the first range from the first subnet in the configuration + """ + LOGGER.info('Set DHCP range called') + try: + dhcp_config = self._get_dhcp_config() + dhcp_config.set_range(request.start, request.end, 0, 0) + dhcp_config.write_config() + LOGGER.info('DHCP range set') + return pb2.Response(code=200, message='DHCP Range Set') + except Exception as e: # pylint: disable=W0718 + fail_message = 'Failed to set DHCP range: ' + str(e) + LOGGER.error(fail_message) + LOGGER.error(traceback.format_exc()) + return pb2.Response(code=500, message=fail_message) + + def GetStatus(self, request, context): # pylint: disable=W0613 + """ + Return the current status of the network module + """ + # ToDo: Figure out how to resolve the current DHCP status + dhcp_status = True + message = str({'dhcpStatus': dhcp_status}) + return pb2.Response(code=200, message=message) diff --git a/modules/network/dhcp-2/python/src/grpc_server/proto/grpc.proto b/modules/network/dhcp-2/python/src/grpc_server/proto/grpc.proto new file mode 100644 index 000000000..b6a11a75b --- /dev/null +++ b/modules/network/dhcp-2/python/src/grpc_server/proto/grpc.proto @@ -0,0 +1,59 @@ +syntax = "proto3"; + +service NetworkModule { + + rpc AddReservedLease(AddReservedLeaseRequest) returns (Response) {}; + + rpc DeleteReservedLease(DeleteReservedLeaseRequest) returns (Response) {}; + + rpc DisableFailover(DisableFailoverRequest) returns (Response) {}; + + rpc EnableFailover(EnableFailoverRequest) returns (Response) {}; + + rpc GetDHCPRange(GetDHCPRangeRequest) returns (DHCPRange) {}; + + rpc GetLease(GetLeaseRequest) returns (Response) {}; + + rpc GetStatus(GetStatusRequest) returns (Response) {}; + + rpc SetDHCPRange(SetDHCPRangeRequest) returns (Response) {}; +} + +message AddReservedLeaseRequest { + string hostname = 1; + string hw_addr = 2; + string ip_addr = 3; +} + +message DeleteReservedLeaseRequest { + string hw_addr = 1; +} + +message DisableFailoverRequest {} + +message EnableFailoverRequest {} + +message GetDHCPRangeRequest {} + +message GetLeaseRequest { + string hw_addr = 1; +} + +message GetStatusRequest {} + +message SetDHCPRangeRequest { + int32 code = 1; + string start = 2; + string end = 3; +} + +message Response { + int32 code = 1; + string message = 2; +} + +message DHCPRange { + int32 code = 1; + string start = 2; + string end = 3; +} diff --git a/modules/test/base/base.Dockerfile b/modules/test/base/base.Dockerfile index 9c7f2bac2..10344cbc7 100644 --- a/modules/test/base/base.Dockerfile +++ b/modules/test/base/base.Dockerfile @@ -36,5 +36,13 @@ RUN dos2unix /testrun/bin/* # Make sure all the bin files are executable RUN chmod u+x /testrun/bin/* +# Copy over all network module gRPC proto files +ARG NET_MODULE_DIR=modules/network +ARG NET_MODULE_PROTO_DIR=python/src/grpc_server/proto/grpc.proto +ARG CONTAINER_PROTO_DIR=testrun/python/src/grpc_server/proto + +COPY $NET_MODULE_DIR/dhcp-1/$NET_MODULE_PROTO_DIR $CONTAINER_PROTO_DIR/dhcp1/ +COPY $NET_MODULE_DIR/dhcp-2/$NET_MODULE_PROTO_DIR $CONTAINER_PROTO_DIR/dhcp2/ + # Start the test module ENTRYPOINT [ "/testrun/bin/start_module" ] \ No newline at end of file diff --git a/modules/test/base/bin/setup_grpc_clients b/modules/test/base/bin/setup_grpc_clients new file mode 100644 index 000000000..30efe5002 --- /dev/null +++ b/modules/test/base/bin/setup_grpc_clients @@ -0,0 +1,34 @@ +#!/bin/bash -e + +GRPC_DIR="/testrun/python/src/grpc_server" +GRPC_PROTO_DIR="proto" +GRPC_PROTO_FILE="grpc.proto" + +# Build the grpc proto file +build_grpc_client(){ + MODULE=$1 + echo "Building gRPC proto: $MODULE" + python3 -m grpc_tools.protoc --proto_path=. ./$GRPC_PROTO_DIR/$MODULE/$GRPC_PROTO_FILE --python_out=. --grpc_python_out=. +} + +# Build the grpc proto files for every module that has a proto defined +build_grpc_clients(){ + + for dir in "$GRPC_DIR/$GRPC_PROTO_DIR"/*/;do + if [ -f $dir/$GRPC_PROTO_FILE ];then + # Extract the last folder name + last_folder="${dir%%/}" + last_folder="${last_folder##*/}" + build_grpc_client "$last_folder" + fi + done +} + +# Move into the grpc directory. +# This is necessary to build the proto files +# with the correct import paths +pushd $GRPC_DIR >/dev/null 2>&1 + +build_grpc_clients + +popd >/dev/null 2>&1 \ No newline at end of file diff --git a/modules/test/base/bin/setup_python_path b/modules/test/base/bin/setup_python_path new file mode 100644 index 000000000..8201bbb36 --- /dev/null +++ b/modules/test/base/bin/setup_python_path @@ -0,0 +1,25 @@ +#!/bin/bash + +ROOT_DIRECTORY="/testrun/python/src" + +# Function to recursively add subdirectories to PYTHONPATH +add_subdirectories_to_pythonpath() { + local directory=$1 + local subdirectories=( "$directory"/* ) + local subdirectory + + for subdirectory in "${subdirectories[@]}"; do + if [ -d "$subdirectory" ]; then + export PYTHONPATH="$PYTHONPATH:$subdirectory" + add_subdirectories_to_pythonpath "$subdirectory" + fi + done +} + +# Set PYTHONPATH initially to an empty string +export PYTHONPATH="" + +# Add all subdirectories to PYTHONPATH +add_subdirectories_to_pythonpath "$ROOT_DIRECTORY" + +echo "$PYTHONPATH" \ No newline at end of file diff --git a/modules/test/base/bin/start_module b/modules/test/base/bin/start_module index 5f6e1ee35..82c9d26bf 100644 --- a/modules/test/base/bin/start_module +++ b/modules/test/base/bin/start_module @@ -57,10 +57,21 @@ then exit 1 fi -echo "Starting module $MODULE_NAME..." +# Setup the PYTHONPATH so all imports work as expected +echo "Setting up PYTHONPATH..." +export PYTHONPATH=$($BIN_DIR/setup_python_path) +echo "PYTHONPATH: $PYTHONPATH" + +# Build all gRPC files from the proto for use in +# gRPC clients for communications to network modules +echo "Building gRPC files from available proto files..." +$BIN_DIR/setup_grpc_clients +echo "Configuring binary files..." $BIN_DIR/setup_binaries $BIN_DIR +echo "Starting module $MODULE_NAME..." + # Only start network services if the test container needs # a network connection to run its tests if [ $NETWORK_REQUIRED == "true" ];then @@ -78,9 +89,9 @@ then if [[ ! -z $GRPC_PORT && ! $GRPC_PORT == "null" ]] then echo "gRPC port resolved from config: $GRPC_PORT" - $BIN_DIR/start_grpc "-p $GRPC_PORT" & + $BIN_DIR/start_grpc "-p $GRPC_PORT" else - $BIN_DIR/start_grpc & + $BIN_DIR/start_grpc fi fi diff --git a/modules/test/base/python/src/grpc/proto/dhcp1/client.py b/modules/test/base/python/src/grpc/proto/dhcp1/client.py new file mode 100644 index 000000000..921929edb --- /dev/null +++ b/modules/test/base/python/src/grpc/proto/dhcp1/client.py @@ -0,0 +1,98 @@ +import grpc +import grpc_pb2_grpc as pb2_grpc +import grpc_pb2 as pb2 + +DEFAULT_PORT = '5001' +DEFAULT_HOST = '10.10.10.2' # Default DHCP1 server + + +class Client(): + + def __init__(self, port=DEFAULT_PORT, host=DEFAULT_HOST): + self._port = port + self._host = host + + # Create a gRPC channel to connect to the server + self._channel = grpc.insecure_channel(self._host + ':' + self._port) + + # Create a gRPC stub + self._stub = pb2_grpc.NetworkModuleStub(self._channel) + + def add_reserved_lease(self, hostname, hw_addr, ip_addr): + # Create a request message + request = pb2.AddReservedLeaseRequest() + request.hostname = hostname + request.hw_addr = hw_addr + request.ip_addr = ip_addr + + # Make the RPC call + response = self._stub.AddReservedLease(request) + + return response + + def delete_reserved_lease(self, hw_addr): + # Create a request message + request = pb2.DeleteReservedLeaseRequest() + request.hw_addr = hw_addr + + # Make the RPC call + response = self._stub.DeleteReservedLease(request) + + return response + + def disable_failover(self): + # Create a request message + request = pb2.DisableFailoverRequest() + + # Make the RPC call + response = self._stub.DisableFailover(request) + + return response + + def enable_failover(self): + # Create a request message + request = pb2.EnableFailoverRequest() + + # Make the RPC call + response = self._stub.EnableFailover(request) + + return response + + def get_dhcp_range(self): + # Create a request message + request = pb2.GetDHCPRangeRequest() + + # Make the RPC call + response = self._stub.GetDHCPRange(request) + + return response + + def get_lease(self,hw_addr): + # Create a request message + request = pb2.GetLeaseRequest() + request.hw_addr=hw_addr + + # Make the RPC call + response = self._stub.GetLease(request) + + return response + + def get_status(self): + # Create a request message + request = pb2.GetStatusRequest() + + # Make the RPC call + response = self._stub.GetStatus(request) + + return response + + def set_dhcp_range(self,start,end): + # Create a request message + request = pb2.SetDHCPRangeRequest() + request.start=start + request.end=end + + # Make the RPC call + response = self._stub.SetDHCPRange(request) + + return response diff --git a/modules/test/conn/conn.Dockerfile b/modules/test/conn/conn.Dockerfile index 1714f49f2..5d8148335 100644 --- a/modules/test/conn/conn.Dockerfile +++ b/modules/test/conn/conn.Dockerfile @@ -17,6 +17,8 @@ FROM test-run/base-test:latest ARG MODULE_NAME=conn ARG MODULE_DIR=modules/test/$MODULE_NAME +ARG GRPC_PROTO_DIR=/testrun/python/src/grpc/proto/dhcp +ARG GRPC_PROTO_FILE="grpc.proto" # Install all necessary packages RUN apt-get install -y wget @@ -37,4 +39,4 @@ COPY $MODULE_DIR/conf /testrun/conf COPY $MODULE_DIR/bin /testrun/bin # Copy over all python files -COPY $MODULE_DIR/python /testrun/python +COPY $MODULE_DIR/python /testrun/python \ No newline at end of file diff --git a/modules/test/conn/python/src/connection_module.py b/modules/test/conn/python/src/connection_module.py index 196c335d8..a1727df23 100644 --- a/modules/test/conn/python/src/connection_module.py +++ b/modules/test/conn/python/src/connection_module.py @@ -17,6 +17,7 @@ import sys from scapy.all import * from test_module import TestModule +from dhcp1.client import Client as DHCPClient1 LOG_NAME = "test_connection" LOGGER = None @@ -33,6 +34,34 @@ def __init__(self, module): super().__init__(module_name=module, log_name=LOG_NAME) global LOGGER LOGGER = self._get_logger() + self.dhcp1_client = DHCPClient1() + + # ToDo: Move this into some level of testing, leave for + # reference until tests are implemented with these calls + # response = self.dhcp1_client.add_reserved_lease('test','00:11:22:33:44:55','10.10.10.21') + # print("AddLeaseResp: " + str(response)) + + # response = self.dhcp1_client.delete_reserved_lease('00:11:22:33:44:55') + # print("DelLeaseResp: " + str(response)) + + # response = self.dhcp1_client.disable_failover() + # print("FailoverDisabled: " + str(response)) + + # response = self.dhcp1_client.enable_failover() + # print("FailoverEnabled: " + str(response)) + + # response = self.dhcp1_client.get_dhcp_range() + # print("DHCP Range: " + str(response)) + + # response = self.dhcp1_client.get_lease(self._device_mac) + # print("Lease: " + str(response)) + + # response = self.dhcp1_client.get_status() + # print("Status: " + str(response)) + + # response = self.dhcp1_client.set_dhcp_range('10.10.10.20','10.10.10.30') + # print("Set Range: " + str(response)) + def _connection_mac_address(self): LOGGER.info("Running connection.mac_address") diff --git a/testing/test_baseline b/testing/test_baseline index ac47a5cfa..f12d124de 100755 --- a/testing/test_baseline +++ b/testing/test_baseline @@ -82,4 +82,4 @@ more $TESTRUN_OUT pytest testing/ -exit $? +exit $? \ No newline at end of file diff --git a/testing/unit_test/run_tests.sh b/testing/unit_test/run_tests.sh new file mode 100644 index 000000000..5b1ed6257 --- /dev/null +++ b/testing/unit_test/run_tests.sh @@ -0,0 +1,18 @@ +#!/bin/bash -e + +# This script should be run from within the unit_test directory. If +# it is run outside this directory, paths will not be resolved correctly. + +# Move into the root directory of test-run +pushd ../../ >/dev/null 2>&1 + +echo "Root Dir: $PWD" + +# Setup the python path +export PYTHONPATH="$PWD/framework/python/src" + +# Run the DHCP Unit tests +python3 -u $PWD/modules/network/dhcp-1/python/src/grpc_server/dhcp_config_test.py +python3 -u $PWD/modules/network/dhcp-2/python/src/grpc_server/dhcp_config_test.py + +popd >/dev/null 2>&1 \ No newline at end of file From af8367c704ca02acd4e2b1937f667610986618d6 Mon Sep 17 00:00:00 2001 From: jhughesbiot <50999916+jhughesbiot@users.noreply.github.com> Date: Thu, 6 Jul 2023 12:31:31 -0700 Subject: [PATCH 45/48] Dhcp (#67) * Add initial work for ip control module * Implement ip control module with additional cleanup methods * Update link check to not use error stream * Add error checking around container network configurations * Add network cleanup for namespaces and links * formatting * initial work on adding grpc functions for dhcp tests * rework code to allow for better usage and unit testing * working poc for test containers and grpc client to dhcp-1 * Move grpc client code into base image * Move grpc proto builds outside of dockerfile into module startup script * Setup pythonpath var in test module base startup process misc cleanup * pylinting and logging updates * Add python path resolving to network modules Update grpc path to prevent conflicts misc pylinting * Change lease resolving method to fix pylint issue * cleanup unit tests * cleanup unit tests * Add grpc updates to dhcp2 module Update dhcp_config to deal with missing optional variables * Add grpc updates to dhcp2 module Update dhcp_config to deal with missing optional variables * fix line endings * misc cleanup * Move isc-dhcp-server and radvd to services Move DHCP server monitoring and booting to python script * Add grpc methods to interact with dhcp_server module Update dhcp_server to control radvd server directly from calls Fix radvd service status method * Add updates to dhcp2 module Update radvd service * Add license headers --- modules/network/dhcp-1/bin/radvd-service | 55 ++++++++ .../network/dhcp-1/bin/start_network_service | 56 ++------ modules/network/dhcp-1/conf/isc-dhcp-server | 4 + modules/network/dhcp-1/dhcp-1.Dockerfile | 2 +- .../python/src/grpc_server/dhcp_server.py | 130 ++++++++++++++++++ .../python/src/grpc_server/network_service.py | 43 +++++- .../python/src/grpc_server/proto/grpc.proto | 16 ++- .../python/src/grpc_server/radvd_server.py | 55 ++++++++ modules/network/dhcp-2/bin/radvd-service | 55 ++++++++ .../network/dhcp-2/bin/start_network_service | 56 ++------ modules/network/dhcp-2/conf/isc-dhcp-server | 4 + modules/network/dhcp-2/dhcp-2.Dockerfile | 11 +- .../python/src/grpc_server/dhcp_config.py | 4 +- .../python/src/grpc_server/dhcp_server.py | 130 ++++++++++++++++++ .../python/src/grpc_server/network_service.py | 43 +++++- .../python/src/grpc_server/proto/grpc.proto | 14 +- .../python/src/grpc_server/radvd_server.py | 55 ++++++++ 17 files changed, 622 insertions(+), 111 deletions(-) create mode 100644 modules/network/dhcp-1/bin/radvd-service create mode 100644 modules/network/dhcp-1/conf/isc-dhcp-server create mode 100644 modules/network/dhcp-1/python/src/grpc_server/dhcp_server.py create mode 100644 modules/network/dhcp-1/python/src/grpc_server/radvd_server.py create mode 100644 modules/network/dhcp-2/bin/radvd-service create mode 100644 modules/network/dhcp-2/conf/isc-dhcp-server create mode 100644 modules/network/dhcp-2/python/src/grpc_server/dhcp_server.py create mode 100644 modules/network/dhcp-2/python/src/grpc_server/radvd_server.py diff --git a/modules/network/dhcp-1/bin/radvd-service b/modules/network/dhcp-1/bin/radvd-service new file mode 100644 index 000000000..1cfe499cb --- /dev/null +++ b/modules/network/dhcp-1/bin/radvd-service @@ -0,0 +1,55 @@ +#!/bin/bash + +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +RA_PID_FILE=/var/run/radvd/radvd.pid +RA_LOG_FILE=/runtime/network/dhcp1-radvd.log + +stop_radvd(){ + # Directly kill by PID file reference + if [ -f "$RA_PID_FILE" ]; then + kill -9 $(cat $RA_PID_FILE) || true + rm -f $RA_PID_FILE + fi +} + +start_radvd(){ + /usr/sbin/radvd -m logfile -l $RA_LOG_FILE -p $RA_PID_FILE +} + +case "$1" in + start) + start_radvd + ;; + stop) + stop_radvd + ;; + restart) + stop_radvd + sleep 1 + start_radvd + ;; + status) + if [ -f "$RA_PID_FILE" ]; then + echo "radvd service is running." + else + echo "radvd service is not running." + fi + ;; + *) + echo "Usage: $0 {start|stop|status|restart}" + exit 1 + ;; +esac \ No newline at end of file diff --git a/modules/network/dhcp-1/bin/start_network_service b/modules/network/dhcp-1/bin/start_network_service index 9f4a3dc51..82b4c6e33 100644 --- a/modules/network/dhcp-1/bin/start_network_service +++ b/modules/network/dhcp-1/bin/start_network_service @@ -29,63 +29,23 @@ sysctl -p # Create leases file if needed touch /var/lib/dhcp/dhcpd.leases -#Create directory for radvd +# Create directory for radvd mkdir /var/run/radvd -#Create and set permissions on the log files +# Create and set permissions on the log files touch $DHCP_LOG_FILE touch $RA_LOG_FILE chown $HOST_USER $DHCP_LOG_FILE chown $HOST_USER $RA_LOG_FILE -#Move the config files to the correct location +# Move the config files to the correct location +cp /testrun/conf/isc-dhcp-server /etc/default/ cp /testrun/conf/dhcpd.conf /etc/dhcp/dhcpd.conf cp /testrun/conf/radvd.conf /etc/radvd.conf -# Restart dhcp server when config changes -while true; do +# Move the radvd-sevice file to the correct location +cp /testrun/bin/radvd-service /usr/local/bin/ - new_checksum=$(md5sum $CONFIG_FILE) - - if [ "$checksum" == "$new_checksum" ]; then - sleep 2 - continue - fi - - echo Config changed. Restarting dhcp server at $(date).. - - if [ -f $DHCP_PID_FILE ]; then - kill -9 $(cat $DHCP_PID_FILE) || true - rm -f $DHCP_PID_FILE - fi - - if [ -f $RA_PID_FILE ]; then - kill -9 $(cat $RA_PID_FILE) || true - rm -f $RA_PID_FILE - fi - - checksum=$new_checksum - - echo Starting isc-dhcp-server at $(date) - - radvd -m logfile -l $RA_LOG_FILE -p $RA_PID_FILE - dhcpd -d &> $DHCP_LOG_FILE & - - while [ ! -f $DHCP_PID_FILE ]; do - echo Waiting for $DHCP_PID_FILE... - sleep 2 - done - - echo $DHCP_PID_FILE now available - - while [ ! -f $RA_PID_FILE ]; do - echo Waiting for $RA_PID_FILE... - sleep 2 - done - - echo $RA_PID_FILE now available - - echo Server now stable - -done \ No newline at end of file +# Start the DHCP Server +python3 -u /testrun/python/src/grpc_server/dhcp_server.py \ No newline at end of file diff --git a/modules/network/dhcp-1/conf/isc-dhcp-server b/modules/network/dhcp-1/conf/isc-dhcp-server new file mode 100644 index 000000000..44db95cd9 --- /dev/null +++ b/modules/network/dhcp-1/conf/isc-dhcp-server @@ -0,0 +1,4 @@ +# On what interfaces should the DHCP server (dhcpd) serve DHCP requests? +# Separate multiple interfaces with spaces, e.g. "eth0 eth1". +INTERFACESv4="veth0" +#INTERFACESv6="veth0" diff --git a/modules/network/dhcp-1/dhcp-1.Dockerfile b/modules/network/dhcp-1/dhcp-1.Dockerfile index b47378045..6b941d878 100644 --- a/modules/network/dhcp-1/dhcp-1.Dockerfile +++ b/modules/network/dhcp-1/dhcp-1.Dockerfile @@ -25,7 +25,7 @@ RUN apt-get install -y wget RUN wget http://standards-oui.ieee.org/oui.txt -P /usr/local/etc/ # Install dhcp server -RUN apt-get install -y isc-dhcp-server radvd +RUN apt-get install -y isc-dhcp-server radvd systemd # Copy over all configuration files COPY $MODULE_DIR/conf /testrun/conf diff --git a/modules/network/dhcp-1/python/src/grpc_server/dhcp_server.py b/modules/network/dhcp-1/python/src/grpc_server/dhcp_server.py new file mode 100644 index 000000000..2f67b0c2d --- /dev/null +++ b/modules/network/dhcp-1/python/src/grpc_server/dhcp_server.py @@ -0,0 +1,130 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains all the necessary classes to maintain the +DHCP server""" +import time +from common import logger +from common import util +from dhcp_config import DHCPConfig +from radvd_server import RADVDServer + +CONFIG_FILE = '/etc/dhcp/dhcpd.conf' +LOG_NAME = 'dhcp_server' +LOGGER = None + + +class DHCPServer: + """Represents the DHCP Server""" + + def __init__(self): + global LOGGER + LOGGER = logger.get_logger(LOG_NAME, 'dhcp-1') + self.dhcp_config = DHCPConfig() + self.radvd = RADVDServer() + self.dhcp_config.resolve_config() + + def restart(self): + LOGGER.info("Restarting DHCP Server") + isc_started = util.run_command("service isc-dhcp-server restart", False) + radvd_started = self.radvd.restart() + started = isc_started and radvd_started + LOGGER.info("DHCP Restarted: " + str(started)) + return started + + def start(self): + LOGGER.info("Starting DHCP Server") + isc_started = util.run_command("service isc-dhcp-server start", False) + radvd_started = self.radvd.start() + started = isc_started and radvd_started + LOGGER.info("DHCP Started: " + str(started)) + return started + + def stop(self): + LOGGER.info("Stopping DHCP Server") + isc_stopped = util.run_command("service isc-dhcp-server stop", False) + radvd_stopped = self.radvd.stop() + stopped = isc_stopped and radvd_stopped + LOGGER.info("DHCP Stopped: " + str(stopped)) + return stopped + + def is_running(self): + LOGGER.info("Checking DHCP Status") + response = util.run_command("service isc-dhcp-server status") + isc_running = response[0] == 'Status of ISC DHCPv4 server: dhcpd is running.' + radvd_running = self.radvd.is_running() + running = isc_running and radvd_running + LOGGER.info("DHCP Status: " + str(running)) + return running + + def boot(self): + LOGGER.info("Booting DHCP Server") + isc_booted = False + radvd_booted = False + if self.is_running(): + LOGGER.info("Stopping isc-dhcp-server") + stopped = self.stop() + LOGGER.info("isc-dhcp-server stopped: " + str(stopped)) + + if self.radvd.is_running(): + LOGGER.info("Stopping RADVD") + stopped = self.radvd.stop() + LOGGER.info("radvd stopped: " + str(stopped)) + + LOGGER.info("Starting isc-dhcp-server") + if self.start(): + isc_booted = False + # Scan for 5 seconds if not yet ready + for i in range(5): + time.sleep(1) + isc_booted = self.is_running() + if isc_booted: + break; + LOGGER.info("isc-dhcp-server started: " + str(isc_booted)) + + LOGGER.info("Starting RADVD") + if self.radvd.start(): + radvd_booted = False + # Scan for 5 seconds if not yet ready + for i in range(5): + time.sleep(1) + radvd_booted = self.radvd.is_running() + if radvd_booted: + break; + LOGGER.info("RADVD started: " + str(radvd_booted)) + + + + return isc_booted and radvd_booted + +def run(): + dhcp_server = DHCPServer() + booted = dhcp_server.boot() + + if not booted: + LOGGER.error('DHCP Server Failed to boot. Exiting') + sys.exit(1) + + config = str(dhcp_server.dhcp_config) + while True: + dhcp_server.dhcp_config.resolve_config() + new_config = str(dhcp_server.dhcp_config) + if config != new_config: + LOGGER.info("DHCP Config Changed") + config = new_config + success = dhcp_server.restart() + success = dhcp_server.radvd.restart() + time.sleep(1) + +if __name__ == '__main__': + run() diff --git a/modules/network/dhcp-1/python/src/grpc_server/network_service.py b/modules/network/dhcp-1/python/src/grpc_server/network_service.py index bf2b98803..a693ac3a1 100644 --- a/modules/network/dhcp-1/python/src/grpc_server/network_service.py +++ b/modules/network/dhcp-1/python/src/grpc_server/network_service.py @@ -15,6 +15,7 @@ import proto.grpc_pb2_grpc as pb2_grpc import proto.grpc_pb2 as pb2 +from dhcp_server import DHCPServer from dhcp_config import DHCPConfig from dhcp_leases import DHCPLeases @@ -28,6 +29,7 @@ class NetworkService(pb2_grpc.NetworkModule): """gRPC endpoints for the DHCP Server""" def __init__(self): + self._dhcp_server = DHCPServer() self._dhcp_config = None self.dhcp_leases = DHCPLeases() global LOGGER @@ -39,6 +41,42 @@ def _get_dhcp_config(self): self._dhcp_config.resolve_config() return self._dhcp_config + def RestartDHCPServer(self, request, context): # pylint: disable=W0613 + LOGGER.info('Restarting DHCP server') + try: + started = self._dhcp_server.restart() + LOGGER.info('DHCP server restarted: ' + (str(started))) + return pb2.Response(code=200, message='{}') + except Exception as e: # pylint: disable=W0718 + fail_message = 'Failed to restart DHCP server: ' + str(e) + LOGGER.error(fail_message) + LOGGER.error(traceback.format_exc()) + return pb2.Response(code=500, message=fail_message) + + def StartDHCPServer(self, request, context): # pylint: disable=W0613 + LOGGER.info('Starting DHCP server') + try: + started = self._dhcp_server.start() + LOGGER.info('DHCP server started: ' + (str(started))) + return pb2.Response(code=200, message='{}') + except Exception as e: # pylint: disable=W0718 + fail_message = 'Failed to start DHCP server: ' + str(e) + LOGGER.error(fail_message) + LOGGER.error(traceback.format_exc()) + return pb2.Response(code=500, message=fail_message) + + def StopDHCPServer(self, request, context): # pylint: disable=W0613 + LOGGER.info('Stopping DHCP server') + try: + stopped = self._dhcp_server.stop() + LOGGER.info('DHCP server stopped: ' + (str(stopped))) + return pb2.Response(code=200, message='{}') + except Exception as e: # pylint: disable=W0718 + fail_message = 'Failed to stop DHCP server: ' + str(e) + LOGGER.error(fail_message) + LOGGER.error(traceback.format_exc()) + return pb2.Response(code=500, message=fail_message) + def AddReservedLease(self, request, context): # pylint: disable=W0613 LOGGER.info('Add reserved lease called') try: @@ -151,7 +189,6 @@ def GetStatus(self, request, context): # pylint: disable=W0613 """ Return the current status of the network module """ - # ToDo: Figure out how to resolve the current DHCP status - dhcp_status = True + dhcp_status = self._dhcp_server.is_running() message = str({'dhcpStatus': dhcp_status}) - return pb2.Response(code=200, message=message) + return pb2.Response(code=200, message=message) \ No newline at end of file diff --git a/modules/network/dhcp-1/python/src/grpc_server/proto/grpc.proto b/modules/network/dhcp-1/python/src/grpc_server/proto/grpc.proto index d9f56213e..e6abda674 100644 --- a/modules/network/dhcp-1/python/src/grpc_server/proto/grpc.proto +++ b/modules/network/dhcp-1/python/src/grpc_server/proto/grpc.proto @@ -2,6 +2,12 @@ syntax = "proto3"; service NetworkModule { + rpc RestartDHCPServer(RestartDHCPServerRequest) returns (Response) {}; + + rpc StartDHCPServer(StartDHCPServerRequest) returns (Response) {}; + + rpc StopDHCPServer(StopDHCPServerRequest) returns (Response) {}; + rpc AddReservedLease(AddReservedLeaseRequest) returns (Response) {}; rpc DeleteReservedLease(DeleteReservedLeaseRequest) returns (Response) {}; @@ -29,6 +35,12 @@ message DeleteReservedLeaseRequest { string hw_addr = 1; } +message RestartDHCPServerRequest {} + +message StartDHCPServerRequest {} + +message StopDHCPServerRequest {} + message DisableFailoverRequest {} message EnableFailoverRequest {} @@ -53,7 +65,7 @@ message Response { } message DHCPRange { - int32 code = 1; + int32 code = 1; string start = 2; string end = 3; -} +} \ No newline at end of file diff --git a/modules/network/dhcp-1/python/src/grpc_server/radvd_server.py b/modules/network/dhcp-1/python/src/grpc_server/radvd_server.py new file mode 100644 index 000000000..48e063e61 --- /dev/null +++ b/modules/network/dhcp-1/python/src/grpc_server/radvd_server.py @@ -0,0 +1,55 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains all the necessary classes to maintain the +DHCP server""" +import time +from common import logger +from common import util +from dhcp_config import DHCPConfig + +CONFIG_FILE = '/etc/dhcp/dhcpd.conf' +LOG_NAME = 'radvd' +LOGGER = None + + +class RADVDServer: + """Represents the RADVD Server""" + + def __init__(self): + global LOGGER + LOGGER = logger.get_logger(LOG_NAME, 'dhcp-1') + + def restart(self): + LOGGER.info("Restarting RADVD Server") + response = util.run_command("radvd-service restart", False) + LOGGER.info("RADVD Restarted: " + str(response)) + return response + + def start(self): + LOGGER.info("Starting RADVD Server") + response = util.run_command("radvd-service start", False) + LOGGER.info("RADVD Started: " + str(response)) + return response + + def stop(self): + LOGGER.info("Stopping RADVD Server") + response = util.run_command("radvd-service stop", False) + LOGGER.info("RADVD Stopped: " + str(response)) + return response + + def is_running(self): + LOGGER.info("Checking RADVD Status") + response = util.run_command("radvd-service status") + LOGGER.info("RADVD Status: " + str(response)) + return response[0] == 'radvd service is running.' diff --git a/modules/network/dhcp-2/bin/radvd-service b/modules/network/dhcp-2/bin/radvd-service new file mode 100644 index 000000000..912c64ee3 --- /dev/null +++ b/modules/network/dhcp-2/bin/radvd-service @@ -0,0 +1,55 @@ +#!/bin/bash + +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +RA_PID_FILE=/var/run/radvd/radvd.pid +RA_LOG_FILE=/runtime/network/dhcp2-radvd.log + +stop_radvd(){ + # Directly kill by PID file reference + if [ -f "$RA_PID_FILE" ]; then + kill -9 $(cat $RA_PID_FILE) || true + rm -f $RA_PID_FILE + fi +} + +start_radvd(){ + /usr/sbin/radvd -m logfile -l $RA_LOG_FILE -p $RA_PID_FILE +} + +case "$1" in + start) + start_radvd + ;; + stop) + stop_radvd + ;; + restart) + stop_radvd + sleep 1 + start_radvd + ;; + status) + if [ -f "$RA_PID_FILE" ]; then + echo "radvd service is running." + else + echo "radvd service is not running." + fi + ;; + *) + echo "Usage: $0 {start|stop|status|restart}" + exit 1 + ;; +esac \ No newline at end of file diff --git a/modules/network/dhcp-2/bin/start_network_service b/modules/network/dhcp-2/bin/start_network_service index 723689278..ed7d3125e 100644 --- a/modules/network/dhcp-2/bin/start_network_service +++ b/modules/network/dhcp-2/bin/start_network_service @@ -29,63 +29,23 @@ sysctl -p # Create leases file if needed touch /var/lib/dhcp/dhcpd.leases -#Create directory for radvd +# Create directory for radvd mkdir /var/run/radvd -#Create and set permissions on the log files +# Create and set permissions on the log files touch $DHCP_LOG_FILE touch $RA_LOG_FILE chown $HOST_USER $DHCP_LOG_FILE chown $HOST_USER $RA_LOG_FILE -#Move the config files to the correct location +# Move the config files to the correct location +cp /testrun/conf/isc-dhcp-server /etc/default/ cp /testrun/conf/dhcpd.conf /etc/dhcp/dhcpd.conf cp /testrun/conf/radvd.conf /etc/radvd.conf -# Restart dhcp server when config changes -while true; do +# Move the radvd-sevice file to the correct location +cp /testrun/bin/radvd-service /usr/local/bin/ - new_checksum=$(md5sum $CONFIG_FILE) - - if [ "$checksum" == "$new_checksum" ]; then - sleep 2 - continue - fi - - echo Config changed. Restarting dhcp server at $(date).. - - if [ -f $DHCP_PID_FILE ]; then - kill -9 $(cat $DHCP_PID_FILE) || true - rm -f $DHCP_PID_FILE - fi - - if [ -f $RA_PID_FILE ]; then - kill -9 $(cat $RA_PID_FILE) || true - rm -f $RA_PID_FILE - fi - - checksum=$new_checksum - - echo Starting isc-dhcp-server at $(date) - - radvd -m logfile -l $RA_LOG_FILE -p $RA_PID_FILE - dhcpd -d &> $DHCP_LOG_FILE & - - while [ ! -f $DHCP_PID_FILE ]; do - echo Waiting for $DHCP_PID_FILE... - sleep 2 - done - - echo $DHCP_PID_FILE now available - - while [ ! -f $RA_PID_FILE ]; do - echo Waiting for $RA_PID_FILE... - sleep 2 - done - - echo $RA_PID_FILE now available - - echo Server now stable - -done \ No newline at end of file +# Start the DHCP Server +python3 -u /testrun/python/src/grpc_server/dhcp_server.py \ No newline at end of file diff --git a/modules/network/dhcp-2/conf/isc-dhcp-server b/modules/network/dhcp-2/conf/isc-dhcp-server new file mode 100644 index 000000000..44db95cd9 --- /dev/null +++ b/modules/network/dhcp-2/conf/isc-dhcp-server @@ -0,0 +1,4 @@ +# On what interfaces should the DHCP server (dhcpd) serve DHCP requests? +# Separate multiple interfaces with spaces, e.g. "eth0 eth1". +INTERFACESv4="veth0" +#INTERFACESv6="veth0" diff --git a/modules/network/dhcp-2/dhcp-2.Dockerfile b/modules/network/dhcp-2/dhcp-2.Dockerfile index df77cb811..153aa50e7 100644 --- a/modules/network/dhcp-2/dhcp-2.Dockerfile +++ b/modules/network/dhcp-2/dhcp-2.Dockerfile @@ -18,8 +18,14 @@ FROM test-run/base:latest ARG MODULE_NAME=dhcp-2 ARG MODULE_DIR=modules/network/$MODULE_NAME +# Install all necessary packages +RUN apt-get install -y wget + +#Update the oui.txt file from ieee +RUN wget http://standards-oui.ieee.org/oui.txt -P /usr/local/etc/ + # Install dhcp server -RUN apt-get install -y isc-dhcp-server radvd +RUN apt-get install -y isc-dhcp-server radvd systemd # Copy over all configuration files COPY $MODULE_DIR/conf /testrun/conf @@ -28,5 +34,4 @@ COPY $MODULE_DIR/conf /testrun/conf COPY $MODULE_DIR/bin /testrun/bin # Copy over all python files -COPY $MODULE_DIR/python /testrun/python - +COPY $MODULE_DIR/python /testrun/python \ No newline at end of file diff --git a/modules/network/dhcp-2/python/src/grpc_server/dhcp_config.py b/modules/network/dhcp-2/python/src/grpc_server/dhcp_config.py index 444faa87c..33cb5938c 100644 --- a/modules/network/dhcp-2/python/src/grpc_server/dhcp_config.py +++ b/modules/network/dhcp-2/python/src/grpc_server/dhcp_config.py @@ -33,7 +33,7 @@ def __init__(self): self._peer = None self._reserved_hosts = [] global LOGGER - LOGGER = logger.get_logger(LOG_NAME, 'dhcp-1') + LOGGER = logger.get_logger(LOG_NAME, 'dhcp-2') def add_reserved_host(self, hostname, hw_addr, ip_addr): host = DHCPReservedHost(hostname=hostname, @@ -490,4 +490,4 @@ def resolve_host(self, reserved_host): self.hw_addr = part.strip().split(HARDWARE_KEY)[1].strip().split(';')[0] elif FIXED_ADDRESS_KEY in part: self.fixed_addr = part.strip().split( - FIXED_ADDRESS_KEY)[1].strip().split(';')[0] + FIXED_ADDRESS_KEY)[1].strip().split(';')[0] \ No newline at end of file diff --git a/modules/network/dhcp-2/python/src/grpc_server/dhcp_server.py b/modules/network/dhcp-2/python/src/grpc_server/dhcp_server.py new file mode 100644 index 000000000..1431d6ddd --- /dev/null +++ b/modules/network/dhcp-2/python/src/grpc_server/dhcp_server.py @@ -0,0 +1,130 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains all the necessary classes to maintain the +DHCP server""" +import time +from common import logger +from common import util +from dhcp_config import DHCPConfig +from radvd_server import RADVDServer + +CONFIG_FILE = '/etc/dhcp/dhcpd.conf' +LOG_NAME = 'dhcp_server' +LOGGER = None + + +class DHCPServer: + """Represents the DHCP Server""" + + def __init__(self): + global LOGGER + LOGGER = logger.get_logger(LOG_NAME, 'dhcp-2') + self.dhcp_config = DHCPConfig() + self.radvd = RADVDServer() + self.dhcp_config.resolve_config() + + def restart(self): + LOGGER.info("Restarting DHCP Server") + isc_started = util.run_command("service isc-dhcp-server restart", False) + radvd_started = self.radvd.restart() + started = isc_started and radvd_started + LOGGER.info("DHCP Restarted: " + str(started)) + return started + + def start(self): + LOGGER.info("Starting DHCP Server") + isc_started = util.run_command("service isc-dhcp-server start", False) + radvd_started = self.radvd.start() + started = isc_started and radvd_started + LOGGER.info("DHCP Started: " + str(started)) + return started + + def stop(self): + LOGGER.info("Stopping DHCP Server") + isc_stopped = util.run_command("service isc-dhcp-server stop", False) + radvd_stopped = self.radvd.stop() + stopped = isc_stopped and radvd_stopped + LOGGER.info("DHCP Stopped: " + str(stopped)) + return stopped + + def is_running(self): + LOGGER.info("Checking DHCP Status") + response = util.run_command("service isc-dhcp-server status") + isc_running = response[0] == 'Status of ISC DHCPv4 server: dhcpd is running.' + radvd_running = self.radvd.is_running() + running = isc_running and radvd_running + LOGGER.info("DHCP Status: " + str(running)) + return running + + def boot(self): + LOGGER.info("Booting DHCP Server") + isc_booted = False + radvd_booted = False + if self.is_running(): + LOGGER.info("Stopping isc-dhcp-server") + stopped = self.stop() + LOGGER.info("isc-dhcp-server stopped: " + str(stopped)) + + if self.radvd.is_running(): + LOGGER.info("Stopping RADVD") + stopped = self.radvd.stop() + LOGGER.info("radvd stopped: " + str(stopped)) + + LOGGER.info("Starting isc-dhcp-server") + if self.start(): + isc_booted = False + # Scan for 5 seconds if not yet ready + for i in range(5): + time.sleep(1) + isc_booted = self.is_running() + if isc_booted: + break; + LOGGER.info("isc-dhcp-server started: " + str(isc_booted)) + + LOGGER.info("Starting RADVD") + if self.radvd.start(): + radvd_booted = False + # Scan for 5 seconds if not yet ready + for i in range(5): + time.sleep(1) + radvd_booted = self.radvd.is_running() + if radvd_booted: + break; + LOGGER.info("RADVD started: " + str(radvd_booted)) + + + + return isc_booted and radvd_booted + +def run(): + dhcp_server = DHCPServer() + booted = dhcp_server.boot() + + if not booted: + LOGGER.error('DHCP Server Failed to boot. Exiting') + sys.exit(1) + + config = str(dhcp_server.dhcp_config) + while True: + dhcp_server.dhcp_config.resolve_config() + new_config = str(dhcp_server.dhcp_config) + if config != new_config: + LOGGER.info("DHCP Config Changed") + config = new_config + success = dhcp_server.restart() + success = dhcp_server.radvd.restart() + time.sleep(1) + +if __name__ == '__main__': + run() diff --git a/modules/network/dhcp-2/python/src/grpc_server/network_service.py b/modules/network/dhcp-2/python/src/grpc_server/network_service.py index 053d26d6b..5af9e6c44 100644 --- a/modules/network/dhcp-2/python/src/grpc_server/network_service.py +++ b/modules/network/dhcp-2/python/src/grpc_server/network_service.py @@ -15,6 +15,7 @@ import proto.grpc_pb2_grpc as pb2_grpc import proto.grpc_pb2 as pb2 +from dhcp_server import DHCPServer from dhcp_config import DHCPConfig from dhcp_leases import DHCPLeases @@ -28,6 +29,7 @@ class NetworkService(pb2_grpc.NetworkModule): """gRPC endpoints for the DHCP Server""" def __init__(self): + self._dhcp_server = DHCPServer() self._dhcp_config = None self.dhcp_leases = DHCPLeases() global LOGGER @@ -39,6 +41,42 @@ def _get_dhcp_config(self): self._dhcp_config.resolve_config() return self._dhcp_config + def RestartDHCPServer(self, request, context): # pylint: disable=W0613 + LOGGER.info('Restarting DHCP server') + try: + started = self._dhcp_server.restart() + LOGGER.info('DHCP server restarted: ' + (str(started))) + return pb2.Response(code=200, message='{}') + except Exception as e: # pylint: disable=W0718 + fail_message = 'Failed to restart DHCP server: ' + str(e) + LOGGER.error(fail_message) + LOGGER.error(traceback.format_exc()) + return pb2.Response(code=500, message=fail_message) + + def StartDHCPServer(self, request, context): # pylint: disable=W0613 + LOGGER.info('Starting DHCP server') + try: + started = self._dhcp_server.start() + LOGGER.info('DHCP server started: ' + (str(started))) + return pb2.Response(code=200, message='{}') + except Exception as e: # pylint: disable=W0718 + fail_message = 'Failed to start DHCP server: ' + str(e) + LOGGER.error(fail_message) + LOGGER.error(traceback.format_exc()) + return pb2.Response(code=500, message=fail_message) + + def StopDHCPServer(self, request, context): # pylint: disable=W0613 + LOGGER.info('Stopping DHCP server') + try: + stopped = self._dhcp_server.stop() + LOGGER.info('DHCP server stopped: ' + (str(stopped))) + return pb2.Response(code=200, message='{}') + except Exception as e: # pylint: disable=W0718 + fail_message = 'Failed to stop DHCP server: ' + str(e) + LOGGER.error(fail_message) + LOGGER.error(traceback.format_exc()) + return pb2.Response(code=500, message=fail_message) + def AddReservedLease(self, request, context): # pylint: disable=W0613 LOGGER.info('Add reserved lease called') try: @@ -151,7 +189,6 @@ def GetStatus(self, request, context): # pylint: disable=W0613 """ Return the current status of the network module """ - # ToDo: Figure out how to resolve the current DHCP status - dhcp_status = True + dhcp_status = self._dhcp_server.is_running() message = str({'dhcpStatus': dhcp_status}) - return pb2.Response(code=200, message=message) + return pb2.Response(code=200, message=message) \ No newline at end of file diff --git a/modules/network/dhcp-2/python/src/grpc_server/proto/grpc.proto b/modules/network/dhcp-2/python/src/grpc_server/proto/grpc.proto index b6a11a75b..e6abda674 100644 --- a/modules/network/dhcp-2/python/src/grpc_server/proto/grpc.proto +++ b/modules/network/dhcp-2/python/src/grpc_server/proto/grpc.proto @@ -2,6 +2,12 @@ syntax = "proto3"; service NetworkModule { + rpc RestartDHCPServer(RestartDHCPServerRequest) returns (Response) {}; + + rpc StartDHCPServer(StartDHCPServerRequest) returns (Response) {}; + + rpc StopDHCPServer(StopDHCPServerRequest) returns (Response) {}; + rpc AddReservedLease(AddReservedLeaseRequest) returns (Response) {}; rpc DeleteReservedLease(DeleteReservedLeaseRequest) returns (Response) {}; @@ -29,6 +35,12 @@ message DeleteReservedLeaseRequest { string hw_addr = 1; } +message RestartDHCPServerRequest {} + +message StartDHCPServerRequest {} + +message StopDHCPServerRequest {} + message DisableFailoverRequest {} message EnableFailoverRequest {} @@ -56,4 +68,4 @@ message DHCPRange { int32 code = 1; string start = 2; string end = 3; -} +} \ No newline at end of file diff --git a/modules/network/dhcp-2/python/src/grpc_server/radvd_server.py b/modules/network/dhcp-2/python/src/grpc_server/radvd_server.py new file mode 100644 index 000000000..0c6ef90d6 --- /dev/null +++ b/modules/network/dhcp-2/python/src/grpc_server/radvd_server.py @@ -0,0 +1,55 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains all the necessary classes to maintain the +DHCP server""" +import time +from common import logger +from common import util +from dhcp_config import DHCPConfig + +CONFIG_FILE = '/etc/dhcp/dhcpd.conf' +LOG_NAME = 'radvd' +LOGGER = None + + +class RADVDServer: + """Represents the RADVD Server""" + + def __init__(self): + global LOGGER + LOGGER = logger.get_logger(LOG_NAME, 'dhcp-2') + + def restart(self): + LOGGER.info("Restarting RADVD Server") + response = util.run_command("radvd-service restart", False) + LOGGER.info("RADVD Restarted: " + str(response)) + return response + + def start(self): + LOGGER.info("Starting RADVD Server") + response = util.run_command("radvd-service start", False) + LOGGER.info("RADVD Started: " + str(response)) + return response + + def stop(self): + LOGGER.info("Stopping RADVD Server") + response = util.run_command("radvd-service stop", False) + LOGGER.info("RADVD Stopped: " + str(response)) + return response + + def is_running(self): + LOGGER.info("Checking RADVD Status") + response = util.run_command("radvd-service status") + LOGGER.info("RADVD Status: " + str(response)) + return response[0] == 'radvd service is running.' From 7dd5772a275ac6530b594c7b149da1677e026a6c Mon Sep 17 00:00:00 2001 From: jhughesbiot <50999916+jhughesbiot@users.noreply.github.com> Date: Thu, 6 Jul 2023 15:22:52 -0700 Subject: [PATCH 46/48] Add connection.dhcp_address test (#68) --- modules/test/conn/conf/module_config.json | 5 +++++ .../test/conn/python/src/connection_module.py | 19 +++++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/modules/test/conn/conf/module_config.json b/modules/test/conn/conf/module_config.json index 0f599c5d3..4053b4e26 100644 --- a/modules/test/conn/conf/module_config.json +++ b/modules/test/conn/conf/module_config.json @@ -12,6 +12,11 @@ "timeout": 30 }, "tests": [ + { + "name": "connection.dhcp_address", + "description": "The device under test has received an IP address from the DHCP server and responds to an ICMP echo (ping) request", + "expected_behavior": "The device is not setup with a static IP address. The device accepts an IP address from a DHCP server (RFC 2131) and responds succesfully to an ICMP echo (ping) request." + }, { "name": "connection.mac_address", "description": "Check and note device physical address.", diff --git a/modules/test/conn/python/src/connection_module.py b/modules/test/conn/python/src/connection_module.py index a1727df23..5b3bf7038 100644 --- a/modules/test/conn/python/src/connection_module.py +++ b/modules/test/conn/python/src/connection_module.py @@ -62,6 +62,25 @@ def __init__(self, module): # response = self.dhcp1_client.set_dhcp_range('10.10.10.20','10.10.10.30') # print("Set Range: " + str(response)) + def _connection_dhcp_address(self): + LOGGER.info("Running connection.dhcp_address") + response = self.dhcp1_client.get_lease(self._device_mac) + LOGGER.info("DHCP Lease resolved:\n" + str(response)) + if response.code == 200: + lease = eval(response.message) + if 'ip' in lease: + ip_addr = lease['ip'] + LOGGER.info("IP Resolved: " + ip_addr) + LOGGER.info("Attempting to ping device..."); + ping_success = self._ping(self._device_ipv4_addr) + LOGGER.info("Ping Success: " + str(ping_success)) + if ping_success: + return True, "Device responded to leased ip address" + else: + return False, "Device did not respond to leased ip address" + else: + LOGGER.info("No DHCP lease found for: " + self._device_mac) + return False, "No DHCP lease found for: " + self._device_mac def _connection_mac_address(self): LOGGER.info("Running connection.mac_address") From 9ef0d4fad7d4d0fadd0fcb56b4a850757f04cd6b Mon Sep 17 00:00:00 2001 From: J Boddey Date: Wed, 12 Jul 2023 09:37:20 +0100 Subject: [PATCH 47/48] Add NTP tests (#60) * Add ntp support test * Add extra log message * Modify descriptions * Pylint * Pylint (#69) --------- Co-authored-by: jhughesbiot <50999916+jhughesbiot@users.noreply.github.com> --- .../python/src/net_orc/network_validator.py | 12 +- .../python/src/test_orc/test_orchestrator.py | 17 +- .../python/src/grpc_server/dhcp_config.py | 47 +++--- .../src/grpc_server/dhcp_config_test.py | 150 +++++++++--------- .../python/src/grpc_server/dhcp_server.py | 72 ++++----- .../python/src/grpc_server/network_service.py | 24 +-- .../python/src/grpc_server/radvd_server.py | 26 ++- .../python/src/grpc_server/dhcp_config.py | 47 ++++-- .../src/grpc_server/dhcp_config_test.py | 147 +++++++++-------- .../python/src/grpc_server/dhcp_server.py | 72 ++++----- .../python/src/grpc_server/network_service.py | 25 +-- .../python/src/grpc_server/radvd_server.py | 26 ++- modules/test/base/python/src/test_module.py | 9 +- .../test/conn/python/src/connection_module.py | 99 ++++++------ modules/test/nmap/nmap.Dockerfile | 4 +- modules/test/ntp/bin/start_test_module | 42 +++++ modules/test/ntp/conf/module_config.json | 27 ++++ modules/test/ntp/ntp.Dockerfile | 20 +++ modules/test/ntp/python/requirements.txt | 1 + modules/test/ntp/python/src/ntp_module.py | 79 +++++++++ modules/test/ntp/python/src/run.py | 75 +++++++++ resources/devices/template/device_config.json | 29 ++++ testing/test_pylint | 2 +- 23 files changed, 672 insertions(+), 380 deletions(-) create mode 100644 modules/test/ntp/bin/start_test_module create mode 100644 modules/test/ntp/conf/module_config.json create mode 100644 modules/test/ntp/ntp.Dockerfile create mode 100644 modules/test/ntp/python/requirements.txt create mode 100644 modules/test/ntp/python/src/ntp_module.py create mode 100644 modules/test/ntp/python/src/run.py diff --git a/framework/python/src/net_orc/network_validator.py b/framework/python/src/net_orc/network_validator.py index a4c51eb2d..f82787af5 100644 --- a/framework/python/src/net_orc/network_validator.py +++ b/framework/python/src/net_orc/network_validator.py @@ -193,7 +193,7 @@ def _get_os_user(self): LOGGER.error('An OS error occurred while retrieving the login name.') except Exception as error: # Catch any other unexpected exceptions - LOGGER.error('An exception occurred:', error) + LOGGER.error('An exception occurred:', error) return user def _get_user(self): @@ -203,15 +203,15 @@ def _get_user(self): except (KeyError, ImportError, ModuleNotFoundError, OSError) as e: # Handle specific exceptions individually if isinstance(e, KeyError): - LOGGER.error("USER environment variable not set or unavailable.") + LOGGER.error('USER environment variable not set or unavailable.') elif isinstance(e, ImportError): - LOGGER.error("Unable to import the getpass module.") + LOGGER.error('Unable to import the getpass module.') elif isinstance(e, ModuleNotFoundError): - LOGGER.error("The getpass module was not found.") + LOGGER.error('The getpass module was not found.') elif isinstance(e, OSError): - LOGGER.error("An OS error occurred while retrieving the username.") + LOGGER.error('An OS error occurred while retrieving the username.') else: - LOGGER.error("An exception occurred:", e) + LOGGER.error('An exception occurred:', e) return user def _get_device_status(self, module): diff --git a/framework/python/src/test_orc/test_orchestrator.py b/framework/python/src/test_orc/test_orchestrator.py index 4bc9fc003..fef4e5bb5 100644 --- a/framework/python/src/test_orc/test_orchestrator.py +++ b/framework/python/src/test_orc/test_orchestrator.py @@ -13,16 +13,14 @@ # limitations under the License. """Provides high level management of the test orchestrator.""" -import getpass import os import json import time import shutil import docker from docker.types import Mount -from common import logger +from common import logger, util from test_orc.module import TestModule -from common import util LOG_NAME = "test_orc" LOGGER = logger.get_logger("test_orc") @@ -61,7 +59,7 @@ def start(self): # Setup the output directory self._host_user = util.get_host_user() os.makedirs(RUNTIME_DIR, exist_ok=True) - util.run_command(f'chown -R {self._host_user} {RUNTIME_DIR}') + util.run_command(f"chown -R {self._host_user} {RUNTIME_DIR}") self._load_test_modules() self.build_test_modules() @@ -102,7 +100,7 @@ def _generate_results(self, device): results[module.name] = module_results except (FileNotFoundError, PermissionError, json.JSONDecodeError) as results_error: - LOGGER.error("Error occured whilst obbtaining results for module " + module.name) + LOGGER.error(f"Error occured whilst obbtaining results for module {module.name}") LOGGER.debug(results_error) out_file = os.path.join( @@ -110,7 +108,7 @@ def _generate_results(self, device): "runtime/test/" + device.mac_addr.replace(":", "") + "/results.json") with open(out_file, "w", encoding="utf-8") as f: json.dump(results, f, indent=2) - util.run_command(f'chown -R {self._host_user} {out_file}') + util.run_command(f"chown -R {self._host_user} {out_file}") return results def test_in_progress(self): @@ -140,18 +138,19 @@ def _run_test_module(self, module, device): container_runtime_dir = os.path.join( self._root_path, "runtime/test/" + device.mac_addr.replace(":", "") + "/" + module.name) - network_runtime_dir = os.path.join(self._root_path, "runtime/network") os.makedirs(container_runtime_dir) + network_runtime_dir = os.path.join(self._root_path, "runtime/network") + device_startup_capture = os.path.join( self._root_path, "runtime/test/" + device.mac_addr.replace(":", "") + "/startup.pcap") - util.run_command(f'chown -R {self._host_user} {device_startup_capture}') + util.run_command(f"chown -R {self._host_user} {device_startup_capture}") device_monitor_capture = os.path.join( self._root_path, "runtime/test/" + device.mac_addr.replace(":", "") + "/monitor.pcap") - util.run_command(f'chown -R {self._host_user} {device_monitor_capture}') + util.run_command(f"chown -R {self._host_user} {device_monitor_capture}") client = docker.from_env() diff --git a/modules/network/dhcp-1/python/src/grpc_server/dhcp_config.py b/modules/network/dhcp-1/python/src/grpc_server/dhcp_config.py index 444faa87c..6f003014c 100644 --- a/modules/network/dhcp-1/python/src/grpc_server/dhcp_config.py +++ b/modules/network/dhcp-1/python/src/grpc_server/dhcp_config.py @@ -18,9 +18,7 @@ LOG_NAME = 'dhcp_config' LOGGER = None - CONFIG_FILE = '/etc/dhcp/dhcpd.conf' - DEFAULT_LEASE_TIME_KEY = 'default-lease-time' @@ -186,13 +184,18 @@ def __str__(self): config += '\tprimary;' if self.primary else 'secondary;' config += '\n\t{ADDRESS_KEY} {ADDRESS};' if self.address is not None else '' config += '\n\t{PORT_KEY} {PORT};' if self.port is not None else '' - config += '\n\t{PEER_ADDRESS_KEY} {PEER_ADDRESS};' if self.peer_address is not None else '' - config += '\n\t{PEER_PORT_KEY} {PEER_PORT};' if self.peer_port is not None else '' - config += '\n\t{MAX_RESPONSE_DELAY_KEY} {MAX_RESPONSE_DELAY};' if self.max_response_delay is not None else '' - config += '\n\t{MAX_UNACKED_UPDATES_KEY} {MAX_UNACKED_UPDATES};' if self.max_unacked_updates is not None else '' + config += ('\n\t{PEER_ADDRESS_KEY} {PEER_ADDRESS};' + if self.peer_address is not None else '') + config += ('\n\t{PEER_PORT_KEY} {PEER_PORT};' + if self.peer_port is not None else '') + config += ('\n\t{MAX_RESPONSE_DELAY_KEY} {MAX_RESPONSE_DELAY};' + if self.max_response_delay is not None else '') + config += ('\n\t{MAX_UNACKED_UPDATES_KEY} {MAX_UNACKED_UPDATES};' + if self.max_unacked_updates is not None else '') config += '\n\t{MCLT_KEY} {MCLT};' if self.mclt is not None else '' config += '\n\t{SPLIT_KEY} {SPLIT};' if self.split is not None else '' - config += '\n\t{LOAD_BALANCE_MAX_SECONDS_KEY} {LOAD_BALANCE_MAX_SECONDS};' if self.load_balance_max_seconds is not None else '' + config += ('\n\t{LOAD_BALANCE_MAX_SECONDS_KEY} {LOAD_BALANCE_MAX_SECONDS};' + if self.load_balance_max_seconds is not None else '') config += '\n\r}}' config = config.format( @@ -220,9 +223,9 @@ def __str__(self): if not self.enabled: lines = config.strip().split('\n') - for i in range(len(lines)-1): + for i in range(len(lines) - 1): lines[i] = '#' + lines[i] - lines[-1] = '#' + lines[-1].strip() # Handle the last line separately + lines[-1] = '#' + lines[-1].strip() # Handle the last line separately config = '\n'.join(lines) return config @@ -302,15 +305,20 @@ def __init__(self, subnet): def __str__(self): config = 'subnet {SUBNET_OPTION} netmask {SUBNET_MASK_OPTION} {{' - config += '\n\t{NTP_OPTION_KEY} {NTP_OPTION};' if self._ntp_servers is not None else '' - config += '\n\t{SUBNET_MASK_OPTION_KEY} {SUBNET_MASK_OPTION};' if self._subnet_mask is not None else '' - config += '\n\t{BROADCAST_OPTION_KEY} {BROADCAST_OPTION};' if self._broadcast is not None else '' - config += '\n\t{ROUTER_OPTION_KEY} {ROUTER_OPTION};' if self._routers is not None else '' - config += '\n\t{DNS_OPTION_KEY} {DNS_OPTION};' if self._dns_servers is not None else '' - config += '\n\t{INTERFACE_KEY} {INTERFACE_OPTION};' if self._interface is not None else '' + config += ('\n\t{NTP_OPTION_KEY} {NTP_OPTION};' + if self._ntp_servers is not None else '') + config += ('\n\t{SUBNET_MASK_OPTION_KEY} {SUBNET_MASK_OPTION};' + if self._subnet_mask is not None else '') + config += ('\n\t{BROADCAST_OPTION_KEY} {BROADCAST_OPTION};' + if self._broadcast is not None else '') + config += ('\n\t{ROUTER_OPTION_KEY} {ROUTER_OPTION};' + if self._routers is not None else '') + config += ('\n\t{DNS_OPTION_KEY} {DNS_OPTION};' + if self._dns_servers is not None else '') + config += ('\n\t{INTERFACE_KEY} {INTERFACE_OPTION};' + if self._interface is not None else '') config += '\n\t{AUTHORITATIVE_KEY};' if self._authoritative else '' - config = config.format(length='multi-line', SUBNET_OPTION=self._subnet, NTP_OPTION_KEY=NTP_OPTION_KEY, @@ -407,8 +415,11 @@ def __init__(self, pool): def __str__(self): config = 'pool {{' - config += '\n\t\t{FAILOVER_KEY} "{FAILOVER}";' if self.failover_peer is not None else '' - config += '\n\t\t{RANGE_KEY} {RANGE_START} {RANGE_END};' if self.range_start is not None and self.range_end is not None else '' + config += ('\n\t\t{FAILOVER_KEY} "{FAILOVER}";' + if self.failover_peer is not None else '') + config += ('\n\t\t{RANGE_KEY} {RANGE_START} {RANGE_END};' + if self.range_start is not None and self.range_end is not None + else '') config += '\n\t}}' config = config.format( diff --git a/modules/network/dhcp-1/python/src/grpc_server/dhcp_config_test.py b/modules/network/dhcp-1/python/src/grpc_server/dhcp_config_test.py index 2cc78403a..a34ff4e31 100644 --- a/modules/network/dhcp-1/python/src/grpc_server/dhcp_config_test.py +++ b/modules/network/dhcp-1/python/src/grpc_server/dhcp_config_test.py @@ -17,87 +17,89 @@ import os CONFIG_FILE = 'conf/dhcpd.conf' - DHCP_CONFIG = None def get_config_file_path(): - dhcp_config = DHCPConfig() - current_dir = os.path.dirname(os.path.abspath(__file__)) - module_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(current_dir)))) - conf_file = os.path.join(module_dir,CONFIG_FILE) - return conf_file + current_dir = os.path.dirname(os.path.abspath(__file__)) + module_dir = os.path.dirname( + os.path.dirname(os.path.dirname(os.path.abspath(current_dir)))) + conf_file = os.path.join(module_dir, CONFIG_FILE) + return conf_file + def get_config(): - dhcp_config = DHCPConfig() - dhcp_config.resolve_config(get_config_file_path()) - return dhcp_config + dhcp_config = DHCPConfig() + dhcp_config.resolve_config(get_config_file_path()) + return dhcp_config + class DHCPConfigTest(unittest.TestCase): - @classmethod - def setUpClass(cls): - # Resolve the config - global DHCP_CONFIG - DHCP_CONFIG = get_config() - - def test_resolve_config(self): - print('Test Resolve Config:\n' + str(DHCP_CONFIG)) - - # Resolve the raw config file - with open(get_config_file_path(),'r') as f: - lines = f.readlines() - - # Get the resolved config as a - conf_parts = str(DHCP_CONFIG).split('\n') - - # dhcpd conf is not picky about spacing so we just - # need to check contents of each line for matching - # to make sure evertying matches - for i in range(len(lines)): - self.assertEqual(lines[i].strip(),conf_parts[i].strip()) - - def test_disable_failover(self): - DHCP_CONFIG.disable_failover() - print('Test Disable Config:\n' + str(DHCP_CONFIG)) - config_lines = str(DHCP_CONFIG._peer).split('\n') - for line in config_lines: - self.assertTrue(line.startswith('#')) - - def test_enable_failover(self): - DHCP_CONFIG.enable_failover() - print('Test Enable Config:\n' + str(DHCP_CONFIG)) - config_lines = str(DHCP_CONFIG._peer).split('\n') - for line in config_lines: - self.assertFalse(line.startswith('#')) - - def test_add_reserved_host(self): - DHCP_CONFIG.add_reserved_host('test','00:11:22:33:44:55','192.168.10.5') - host = DHCP_CONFIG.get_reserved_host('00:11:22:33:44:55') - self.assertIsNotNone(host) - print('AddHostConfig:\n' + str(DHCP_CONFIG)) - - def test_delete_reserved_host(self): - DHCP_CONFIG.delete_reserved_host('00:11:22:33:44:55') - host = DHCP_CONFIG.get_reserved_host('00:11:22:33:44:55') - self.assertIsNone(host) - print('DeleteHostConfig:\n' + str(DHCP_CONFIG)) - - def test_resolve_config_with_hosts(self): - DHCP_CONFIG.add_reserved_host('test','00:11:22:33:44:55','192.168.10.5') - config_with_hosts = DHCPConfig() - config_with_hosts.make(str(DHCP_CONFIG)) - host = config_with_hosts.get_reserved_host('00:11:22:33:44:55') - self.assertIsNotNone(host) - print("ResolveConfigWithHosts:\n" + str(config_with_hosts)) + @classmethod + def setUpClass(cls): + # Resolve the config + global DHCP_CONFIG + DHCP_CONFIG = get_config() + + def test_resolve_config(self): + print('Test Resolve Config:\n' + str(DHCP_CONFIG)) + + # Resolve the raw config file + with open(get_config_file_path(), 'r', encoding='UTF-8') as f: + lines = f.readlines() + + # Get the resolved config as a + conf_parts = str(DHCP_CONFIG).split('\n') + + # dhcpd conf is not picky about spacing so we just + # need to check contents of each line for matching + # to make sure evertying matches + for i in range(len(lines)): + self.assertEqual(lines[i].strip(), conf_parts[i].strip()) + + def test_disable_failover(self): + DHCP_CONFIG.disable_failover() + print('Test Disable Config:\n' + str(DHCP_CONFIG)) + config_lines = str(DHCP_CONFIG._peer).split('\n') + for line in config_lines: + self.assertTrue(line.startswith('#')) + + def test_enable_failover(self): + DHCP_CONFIG.enable_failover() + print('Test Enable Config:\n' + str(DHCP_CONFIG)) + config_lines = str(DHCP_CONFIG._peer).split('\n') + for line in config_lines: + self.assertFalse(line.startswith('#')) + + def test_add_reserved_host(self): + DHCP_CONFIG.add_reserved_host('test', '00:11:22:33:44:55', '192.168.10.5') + host = DHCP_CONFIG.get_reserved_host('00:11:22:33:44:55') + self.assertIsNotNone(host) + print('AddHostConfig:\n' + str(DHCP_CONFIG)) + + def test_delete_reserved_host(self): + DHCP_CONFIG.delete_reserved_host('00:11:22:33:44:55') + host = DHCP_CONFIG.get_reserved_host('00:11:22:33:44:55') + self.assertIsNone(host) + print('DeleteHostConfig:\n' + str(DHCP_CONFIG)) + + def test_resolve_config_with_hosts(self): + DHCP_CONFIG.add_reserved_host('test', '00:11:22:33:44:55', '192.168.10.5') + config_with_hosts = DHCPConfig() + config_with_hosts.make(str(DHCP_CONFIG)) + host = config_with_hosts.get_reserved_host('00:11:22:33:44:55') + self.assertIsNotNone(host) + print('ResolveConfigWithHosts:\n' + str(config_with_hosts)) + if __name__ == '__main__': - suite = unittest.TestSuite() - suite.addTest(DHCPConfigTest('test_resolve_config')) - suite.addTest(DHCPConfigTest('test_disable_failover')) - suite.addTest(DHCPConfigTest('test_enable_failover')) - suite.addTest(DHCPConfigTest('test_add_reserved_host')) - suite.addTest(DHCPConfigTest('test_delete_reserved_host')) - suite.addTest(DHCPConfigTest('test_resolve_config_with_hosts')) - - runner = unittest.TextTestRunner() - runner.run(suite) \ No newline at end of file + suite = unittest.TestSuite() + suite.addTest(DHCPConfigTest('test_resolve_config')) + suite.addTest(DHCPConfigTest('test_disable_failover')) + suite.addTest(DHCPConfigTest('test_enable_failover')) + suite.addTest(DHCPConfigTest('test_add_reserved_host')) + suite.addTest(DHCPConfigTest('test_delete_reserved_host')) + suite.addTest(DHCPConfigTest('test_resolve_config_with_hosts')) + + runner = unittest.TextTestRunner() + runner.run(suite) diff --git a/modules/network/dhcp-1/python/src/grpc_server/dhcp_server.py b/modules/network/dhcp-1/python/src/grpc_server/dhcp_server.py index 2f67b0c2d..5e88d59fe 100644 --- a/modules/network/dhcp-1/python/src/grpc_server/dhcp_server.py +++ b/modules/network/dhcp-1/python/src/grpc_server/dhcp_server.py @@ -13,6 +13,7 @@ # limitations under the License. """Contains all the necessary classes to maintain the DHCP server""" +import sys import time from common import logger from common import util @@ -35,75 +36,74 @@ def __init__(self): self.dhcp_config.resolve_config() def restart(self): - LOGGER.info("Restarting DHCP Server") - isc_started = util.run_command("service isc-dhcp-server restart", False) + LOGGER.info('Restarting DHCP Server') + isc_started = util.run_command('service isc-dhcp-server restart', False) radvd_started = self.radvd.restart() started = isc_started and radvd_started - LOGGER.info("DHCP Restarted: " + str(started)) + LOGGER.info('DHCP Restarted: ' + str(started)) return started def start(self): - LOGGER.info("Starting DHCP Server") - isc_started = util.run_command("service isc-dhcp-server start", False) + LOGGER.info('Starting DHCP Server') + isc_started = util.run_command('service isc-dhcp-server start', False) radvd_started = self.radvd.start() started = isc_started and radvd_started - LOGGER.info("DHCP Started: " + str(started)) + LOGGER.info('DHCP Started: ' + str(started)) return started def stop(self): - LOGGER.info("Stopping DHCP Server") - isc_stopped = util.run_command("service isc-dhcp-server stop", False) + LOGGER.info('Stopping DHCP Server') + isc_stopped = util.run_command('service isc-dhcp-server stop', False) radvd_stopped = self.radvd.stop() stopped = isc_stopped and radvd_stopped - LOGGER.info("DHCP Stopped: " + str(stopped)) + LOGGER.info('DHCP Stopped: ' + str(stopped)) return stopped def is_running(self): - LOGGER.info("Checking DHCP Status") - response = util.run_command("service isc-dhcp-server status") - isc_running = response[0] == 'Status of ISC DHCPv4 server: dhcpd is running.' + LOGGER.info('Checking DHCP Status') + response = util.run_command('service isc-dhcp-server status') + isc_running = response[ + 0] == 'Status of ISC DHCPv4 server: dhcpd is running.' radvd_running = self.radvd.is_running() running = isc_running and radvd_running - LOGGER.info("DHCP Status: " + str(running)) + LOGGER.info('DHCP Status: ' + str(running)) return running def boot(self): - LOGGER.info("Booting DHCP Server") + LOGGER.info('Booting DHCP Server') isc_booted = False radvd_booted = False if self.is_running(): - LOGGER.info("Stopping isc-dhcp-server") + LOGGER.info('Stopping isc-dhcp-server') stopped = self.stop() - LOGGER.info("isc-dhcp-server stopped: " + str(stopped)) + LOGGER.info('isc-dhcp-server stopped: ' + str(stopped)) if self.radvd.is_running(): - LOGGER.info("Stopping RADVD") + LOGGER.info('Stopping RADVD') stopped = self.radvd.stop() - LOGGER.info("radvd stopped: " + str(stopped)) + LOGGER.info('radvd stopped: ' + str(stopped)) - LOGGER.info("Starting isc-dhcp-server") + LOGGER.info('Starting isc-dhcp-server') if self.start(): isc_booted = False # Scan for 5 seconds if not yet ready - for i in range(5): + for _ in range(5): time.sleep(1) isc_booted = self.is_running() if isc_booted: - break; - LOGGER.info("isc-dhcp-server started: " + str(isc_booted)) + break + LOGGER.info('isc-dhcp-server started: ' + str(isc_booted)) - LOGGER.info("Starting RADVD") + LOGGER.info('Starting RADVD') if self.radvd.start(): radvd_booted = False # Scan for 5 seconds if not yet ready - for i in range(5): + for _ in range(5): time.sleep(1) radvd_booted = self.radvd.is_running() if radvd_booted: - break; - LOGGER.info("RADVD started: " + str(radvd_booted)) - - + break + LOGGER.info('RADVD started: ' + str(radvd_booted)) return isc_booted and radvd_booted @@ -117,14 +117,14 @@ def run(): config = str(dhcp_server.dhcp_config) while True: - dhcp_server.dhcp_config.resolve_config() - new_config = str(dhcp_server.dhcp_config) - if config != new_config: - LOGGER.info("DHCP Config Changed") - config = new_config - success = dhcp_server.restart() - success = dhcp_server.radvd.restart() - time.sleep(1) + dhcp_server.dhcp_config.resolve_config() + new_config = str(dhcp_server.dhcp_config) + if config != new_config: + LOGGER.info('DHCP Config Changed') + config = new_config + dhcp_server.restart() + dhcp_server.radvd.restart() + time.sleep(1) if __name__ == '__main__': run() diff --git a/modules/network/dhcp-1/python/src/grpc_server/network_service.py b/modules/network/dhcp-1/python/src/grpc_server/network_service.py index a693ac3a1..043ca49b3 100644 --- a/modules/network/dhcp-1/python/src/grpc_server/network_service.py +++ b/modules/network/dhcp-1/python/src/grpc_server/network_service.py @@ -47,7 +47,7 @@ def RestartDHCPServer(self, request, context): # pylint: disable=W0613 started = self._dhcp_server.restart() LOGGER.info('DHCP server restarted: ' + (str(started))) return pb2.Response(code=200, message='{}') - except Exception as e: # pylint: disable=W0718 + except Exception as e: # pylint: disable=W0718 fail_message = 'Failed to restart DHCP server: ' + str(e) LOGGER.error(fail_message) LOGGER.error(traceback.format_exc()) @@ -59,7 +59,7 @@ def StartDHCPServer(self, request, context): # pylint: disable=W0613 started = self._dhcp_server.start() LOGGER.info('DHCP server started: ' + (str(started))) return pb2.Response(code=200, message='{}') - except Exception as e: # pylint: disable=W0718 + except Exception as e: # pylint: disable=W0718 fail_message = 'Failed to start DHCP server: ' + str(e) LOGGER.error(fail_message) LOGGER.error(traceback.format_exc()) @@ -71,12 +71,12 @@ def StopDHCPServer(self, request, context): # pylint: disable=W0613 stopped = self._dhcp_server.stop() LOGGER.info('DHCP server stopped: ' + (str(stopped))) return pb2.Response(code=200, message='{}') - except Exception as e: # pylint: disable=W0718 + except Exception as e: # pylint: disable=W0718 fail_message = 'Failed to stop DHCP server: ' + str(e) LOGGER.error(fail_message) LOGGER.error(traceback.format_exc()) return pb2.Response(code=500, message=fail_message) - + def AddReservedLease(self, request, context): # pylint: disable=W0613 LOGGER.info('Add reserved lease called') try: @@ -86,7 +86,7 @@ def AddReservedLease(self, request, context): # pylint: disable=W0613 dhcp_config.write_config() LOGGER.info('Reserved lease added') return pb2.Response(code=200, message='{}') - except Exception as e: # pylint: disable=W0718 + except Exception as e: # pylint: disable=W0718 fail_message = 'Failed to add reserved lease: ' + str(e) LOGGER.error(fail_message) LOGGER.error(traceback.format_exc()) @@ -100,7 +100,7 @@ def DeleteReservedLease(self, request, context): # pylint: disable=W0613 dhcp_config.write_config() LOGGER.info('Reserved lease deleted') return pb2.Response(code=200, message='{}') - except Exception as e: # pylint: disable=W0718 + except Exception as e: # pylint: disable=W0718 fail_message = 'Failed to delete reserved lease: ' + str(e) LOGGER.error(fail_message) LOGGER.error(traceback.format_exc()) @@ -114,7 +114,7 @@ def DisableFailover(self, request, contest): # pylint: disable=W0613 dhcp_config.write_config() LOGGER.info('Failover disabled') return pb2.Response(code=200, message='{}') - except Exception as e: # pylint: disable=W0718 + except Exception as e: # pylint: disable=W0718 fail_message = 'Failed to disable failover: ' + str(e) LOGGER.error(fail_message) LOGGER.error(traceback.format_exc()) @@ -128,7 +128,7 @@ def EnableFailover(self, request, contest): # pylint: disable=W0613 dhcp_config.write_config() LOGGER.info('Failover enabled') return pb2.Response(code=200, message='{}') - except Exception as e: # pylint: disable=W0718 + except Exception as e: # pylint: disable=W0718 fail_message = 'Failed to enable failover: ' + str(e) LOGGER.error(fail_message) LOGGER.error(traceback.format_exc()) @@ -143,7 +143,7 @@ def GetDHCPRange(self, request, context): # pylint: disable=W0613 try: pool = self._get_dhcp_config()._subnets[0].pools[0] return pb2.DHCPRange(code=200, start=pool.range_start, end=pool.range_end) - except Exception as e: # pylint: disable=W0718 + except Exception as e: # pylint: disable=W0718 fail_message = 'Failed to get DHCP range: ' + str(e) LOGGER.error(fail_message) LOGGER.error(traceback.format_exc()) @@ -161,7 +161,7 @@ def GetLease(self, request, context): # pylint: disable=W0613 return pb2.Response(code=200, message=str(lease)) else: return pb2.Response(code=200, message='{}') - except Exception as e: # pylint: disable=W0718 + except Exception as e: # pylint: disable=W0718 fail_message = 'Failed to get lease: ' + str(e) LOGGER.error(fail_message) LOGGER.error(traceback.format_exc()) @@ -179,7 +179,7 @@ def SetDHCPRange(self, request, context): # pylint: disable=W0613 dhcp_config.write_config() LOGGER.info('DHCP range set') return pb2.Response(code=200, message='DHCP Range Set') - except Exception as e: # pylint: disable=W0718 + except Exception as e: # pylint: disable=W0718 fail_message = 'Failed to set DHCP range: ' + str(e) LOGGER.error(fail_message) LOGGER.error(traceback.format_exc()) @@ -191,4 +191,4 @@ def GetStatus(self, request, context): # pylint: disable=W0613 """ dhcp_status = self._dhcp_server.is_running() message = str({'dhcpStatus': dhcp_status}) - return pb2.Response(code=200, message=message) \ No newline at end of file + return pb2.Response(code=200, message=message) diff --git a/modules/network/dhcp-1/python/src/grpc_server/radvd_server.py b/modules/network/dhcp-1/python/src/grpc_server/radvd_server.py index 48e063e61..8bb1d0539 100644 --- a/modules/network/dhcp-1/python/src/grpc_server/radvd_server.py +++ b/modules/network/dhcp-1/python/src/grpc_server/radvd_server.py @@ -13,10 +13,8 @@ # limitations under the License. """Contains all the necessary classes to maintain the DHCP server""" -import time from common import logger from common import util -from dhcp_config import DHCPConfig CONFIG_FILE = '/etc/dhcp/dhcpd.conf' LOG_NAME = 'radvd' @@ -31,25 +29,25 @@ def __init__(self): LOGGER = logger.get_logger(LOG_NAME, 'dhcp-1') def restart(self): - LOGGER.info("Restarting RADVD Server") - response = util.run_command("radvd-service restart", False) - LOGGER.info("RADVD Restarted: " + str(response)) + LOGGER.info('Restarting RADVD Server') + response = util.run_command('radvd-service restart', False) + LOGGER.info('RADVD Restarted: ' + str(response)) return response def start(self): - LOGGER.info("Starting RADVD Server") - response = util.run_command("radvd-service start", False) - LOGGER.info("RADVD Started: " + str(response)) + LOGGER.info('Starting RADVD Server') + response = util.run_command('radvd-service start', False) + LOGGER.info('RADVD Started: ' + str(response)) return response def stop(self): - LOGGER.info("Stopping RADVD Server") - response = util.run_command("radvd-service stop", False) - LOGGER.info("RADVD Stopped: " + str(response)) + LOGGER.info('Stopping RADVD Server') + response = util.run_command('radvd-service stop', False) + LOGGER.info('RADVD Stopped: ' + str(response)) return response def is_running(self): - LOGGER.info("Checking RADVD Status") - response = util.run_command("radvd-service status") - LOGGER.info("RADVD Status: " + str(response)) + LOGGER.info('Checking RADVD Status') + response = util.run_command('radvd-service status') + LOGGER.info('RADVD Status: ' + str(response)) return response[0] == 'radvd service is running.' diff --git a/modules/network/dhcp-2/python/src/grpc_server/dhcp_config.py b/modules/network/dhcp-2/python/src/grpc_server/dhcp_config.py index 33cb5938c..5da5e4cf2 100644 --- a/modules/network/dhcp-2/python/src/grpc_server/dhcp_config.py +++ b/modules/network/dhcp-2/python/src/grpc_server/dhcp_config.py @@ -186,13 +186,18 @@ def __str__(self): config += '\tprimary;' if self.primary else 'secondary;' config += '\n\t{ADDRESS_KEY} {ADDRESS};' if self.address is not None else '' config += '\n\t{PORT_KEY} {PORT};' if self.port is not None else '' - config += '\n\t{PEER_ADDRESS_KEY} {PEER_ADDRESS};' if self.peer_address is not None else '' - config += '\n\t{PEER_PORT_KEY} {PEER_PORT};' if self.peer_port is not None else '' - config += '\n\t{MAX_RESPONSE_DELAY_KEY} {MAX_RESPONSE_DELAY};' if self.max_response_delay is not None else '' - config += '\n\t{MAX_UNACKED_UPDATES_KEY} {MAX_UNACKED_UPDATES};' if self.max_unacked_updates is not None else '' + config += ('\n\t{PEER_ADDRESS_KEY} {PEER_ADDRESS};' + if self.peer_address is not None else '') + config += ('\n\t{PEER_PORT_KEY} {PEER_PORT};' + if self.peer_port is not None else '') + config += ('\n\t{MAX_RESPONSE_DELAY_KEY} {MAX_RESPONSE_DELAY};' + if self.max_response_delay is not None else '') + config += ('\n\t{MAX_UNACKED_UPDATES_KEY} {MAX_UNACKED_UPDATES};' + if self.max_unacked_updates is not None else '') config += '\n\t{MCLT_KEY} {MCLT};' if self.mclt is not None else '' config += '\n\t{SPLIT_KEY} {SPLIT};' if self.split is not None else '' - config += '\n\t{LOAD_BALANCE_MAX_SECONDS_KEY} {LOAD_BALANCE_MAX_SECONDS};' if self.load_balance_max_seconds is not None else '' + config += ('\n\t{LOAD_BALANCE_MAX_SECONDS_KEY} {LOAD_BALANCE_MAX_SECONDS};' + if self.load_balance_max_seconds is not None else '') config += '\n\r}}' config = config.format( @@ -220,9 +225,9 @@ def __str__(self): if not self.enabled: lines = config.strip().split('\n') - for i in range(len(lines)-1): + for i in range(len(lines) - 1): lines[i] = '#' + lines[i] - lines[-1] = '#' + lines[-1].strip() # Handle the last line separately + lines[-1] = '#' + lines[-1].strip() # Handle the last line separately config = '\n'.join(lines) return config @@ -302,15 +307,20 @@ def __init__(self, subnet): def __str__(self): config = 'subnet {SUBNET_OPTION} netmask {SUBNET_MASK_OPTION} {{' - config += '\n\t{NTP_OPTION_KEY} {NTP_OPTION};' if self._ntp_servers is not None else '' - config += '\n\t{SUBNET_MASK_OPTION_KEY} {SUBNET_MASK_OPTION};' if self._subnet_mask is not None else '' - config += '\n\t{BROADCAST_OPTION_KEY} {BROADCAST_OPTION};' if self._broadcast is not None else '' - config += '\n\t{ROUTER_OPTION_KEY} {ROUTER_OPTION};' if self._routers is not None else '' - config += '\n\t{DNS_OPTION_KEY} {DNS_OPTION};' if self._dns_servers is not None else '' - config += '\n\t{INTERFACE_KEY} {INTERFACE_OPTION};' if self._interface is not None else '' + config += ('\n\t{NTP_OPTION_KEY} {NTP_OPTION};' + if self._ntp_servers is not None else '') + config += ('\n\t{SUBNET_MASK_OPTION_KEY} {SUBNET_MASK_OPTION};' + if self._subnet_mask is not None else '') + config += ('\n\t{BROADCAST_OPTION_KEY} {BROADCAST_OPTION};' + if self._broadcast is not None else '') + config += ('\n\t{ROUTER_OPTION_KEY} {ROUTER_OPTION};' + if self._routers is not None else '') + config += ('\n\t{DNS_OPTION_KEY} {DNS_OPTION};' + if self._dns_servers is not None else '') + config += ('\n\t{INTERFACE_KEY} {INTERFACE_OPTION};' + if self._interface is not None else '') config += '\n\t{AUTHORITATIVE_KEY};' if self._authoritative else '' - config = config.format(length='multi-line', SUBNET_OPTION=self._subnet, NTP_OPTION_KEY=NTP_OPTION_KEY, @@ -407,8 +417,11 @@ def __init__(self, pool): def __str__(self): config = 'pool {{' - config += '\n\t\t{FAILOVER_KEY} "{FAILOVER}";' if self.failover_peer is not None else '' - config += '\n\t\t{RANGE_KEY} {RANGE_START} {RANGE_END};' if self.range_start is not None and self.range_end is not None else '' + config += ('\n\t\t{FAILOVER_KEY} "{FAILOVER}";' + if self.failover_peer is not None else '') + config += ('\n\t\t{RANGE_KEY} {RANGE_START} {RANGE_END};' + if self.range_start is not None and self.range_end is not None + else '') config += '\n\t}}' config = config.format( @@ -490,4 +503,4 @@ def resolve_host(self, reserved_host): self.hw_addr = part.strip().split(HARDWARE_KEY)[1].strip().split(';')[0] elif FIXED_ADDRESS_KEY in part: self.fixed_addr = part.strip().split( - FIXED_ADDRESS_KEY)[1].strip().split(';')[0] \ No newline at end of file + FIXED_ADDRESS_KEY)[1].strip().split(';')[0] diff --git a/modules/network/dhcp-2/python/src/grpc_server/dhcp_config_test.py b/modules/network/dhcp-2/python/src/grpc_server/dhcp_config_test.py index 2cc78403a..b07f57b27 100644 --- a/modules/network/dhcp-2/python/src/grpc_server/dhcp_config_test.py +++ b/modules/network/dhcp-2/python/src/grpc_server/dhcp_config_test.py @@ -17,87 +17,86 @@ import os CONFIG_FILE = 'conf/dhcpd.conf' - DHCP_CONFIG = None def get_config_file_path(): - dhcp_config = DHCPConfig() - current_dir = os.path.dirname(os.path.abspath(__file__)) - module_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(current_dir)))) - conf_file = os.path.join(module_dir,CONFIG_FILE) - return conf_file + current_dir = os.path.dirname(os.path.abspath(__file__)) + module_dir = os.path.dirname( + os.path.dirname(os.path.dirname(os.path.abspath(current_dir)))) + conf_file = os.path.join(module_dir, CONFIG_FILE) + return conf_file def get_config(): - dhcp_config = DHCPConfig() - dhcp_config.resolve_config(get_config_file_path()) - return dhcp_config + dhcp_config = DHCPConfig() + dhcp_config.resolve_config(get_config_file_path()) + return dhcp_config class DHCPConfigTest(unittest.TestCase): - @classmethod - def setUpClass(cls): - # Resolve the config - global DHCP_CONFIG - DHCP_CONFIG = get_config() - - def test_resolve_config(self): - print('Test Resolve Config:\n' + str(DHCP_CONFIG)) - - # Resolve the raw config file - with open(get_config_file_path(),'r') as f: - lines = f.readlines() - - # Get the resolved config as a - conf_parts = str(DHCP_CONFIG).split('\n') - - # dhcpd conf is not picky about spacing so we just - # need to check contents of each line for matching - # to make sure evertying matches - for i in range(len(lines)): - self.assertEqual(lines[i].strip(),conf_parts[i].strip()) - - def test_disable_failover(self): - DHCP_CONFIG.disable_failover() - print('Test Disable Config:\n' + str(DHCP_CONFIG)) - config_lines = str(DHCP_CONFIG._peer).split('\n') - for line in config_lines: - self.assertTrue(line.startswith('#')) - - def test_enable_failover(self): - DHCP_CONFIG.enable_failover() - print('Test Enable Config:\n' + str(DHCP_CONFIG)) - config_lines = str(DHCP_CONFIG._peer).split('\n') - for line in config_lines: - self.assertFalse(line.startswith('#')) - - def test_add_reserved_host(self): - DHCP_CONFIG.add_reserved_host('test','00:11:22:33:44:55','192.168.10.5') - host = DHCP_CONFIG.get_reserved_host('00:11:22:33:44:55') - self.assertIsNotNone(host) - print('AddHostConfig:\n' + str(DHCP_CONFIG)) - - def test_delete_reserved_host(self): - DHCP_CONFIG.delete_reserved_host('00:11:22:33:44:55') - host = DHCP_CONFIG.get_reserved_host('00:11:22:33:44:55') - self.assertIsNone(host) - print('DeleteHostConfig:\n' + str(DHCP_CONFIG)) - - def test_resolve_config_with_hosts(self): - DHCP_CONFIG.add_reserved_host('test','00:11:22:33:44:55','192.168.10.5') - config_with_hosts = DHCPConfig() - config_with_hosts.make(str(DHCP_CONFIG)) - host = config_with_hosts.get_reserved_host('00:11:22:33:44:55') - self.assertIsNotNone(host) - print("ResolveConfigWithHosts:\n" + str(config_with_hosts)) + @classmethod + def setUpClass(cls): + # Resolve the config + global DHCP_CONFIG + DHCP_CONFIG = get_config() + + def test_resolve_config(self): + print('Test Resolve Config:\n' + str(DHCP_CONFIG)) + + # Resolve the raw config file + with open(get_config_file_path(), 'r', encoding='UTF-8') as f: + lines = f.readlines() + + # Get the resolved config as a + conf_parts = str(DHCP_CONFIG).split('\n') + + # dhcpd conf is not picky about spacing so we just + # need to check contents of each line for matching + # to make sure evertying matches + for i in range(len(lines)): + self.assertEqual(lines[i].strip(), conf_parts[i].strip()) + + def test_disable_failover(self): + DHCP_CONFIG.disable_failover() + print('Test Disable Config:\n' + str(DHCP_CONFIG)) + config_lines = str(DHCP_CONFIG._peer).split('\n') + for line in config_lines: + self.assertTrue(line.startswith('#')) + + def test_enable_failover(self): + DHCP_CONFIG.enable_failover() + print('Test Enable Config:\n' + str(DHCP_CONFIG)) + config_lines = str(DHCP_CONFIG._peer).split('\n') + for line in config_lines: + self.assertFalse(line.startswith('#')) + + def test_add_reserved_host(self): + DHCP_CONFIG.add_reserved_host('test', '00:11:22:33:44:55', '192.168.10.5') + host = DHCP_CONFIG.get_reserved_host('00:11:22:33:44:55') + self.assertIsNotNone(host) + print('AddHostConfig:\n' + str(DHCP_CONFIG)) + + def test_delete_reserved_host(self): + DHCP_CONFIG.delete_reserved_host('00:11:22:33:44:55') + host = DHCP_CONFIG.get_reserved_host('00:11:22:33:44:55') + self.assertIsNone(host) + print('DeleteHostConfig:\n' + str(DHCP_CONFIG)) + + def test_resolve_config_with_hosts(self): + DHCP_CONFIG.add_reserved_host('test', '00:11:22:33:44:55', '192.168.10.5') + config_with_hosts = DHCPConfig() + config_with_hosts.make(str(DHCP_CONFIG)) + host = config_with_hosts.get_reserved_host('00:11:22:33:44:55') + self.assertIsNotNone(host) + print('ResolveConfigWithHosts:\n' + str(config_with_hosts)) if __name__ == '__main__': - suite = unittest.TestSuite() - suite.addTest(DHCPConfigTest('test_resolve_config')) - suite.addTest(DHCPConfigTest('test_disable_failover')) - suite.addTest(DHCPConfigTest('test_enable_failover')) - suite.addTest(DHCPConfigTest('test_add_reserved_host')) - suite.addTest(DHCPConfigTest('test_delete_reserved_host')) - suite.addTest(DHCPConfigTest('test_resolve_config_with_hosts')) - - runner = unittest.TextTestRunner() - runner.run(suite) \ No newline at end of file + suite = unittest.TestSuite() + suite.addTest(DHCPConfigTest('test_resolve_config')) + suite.addTest(DHCPConfigTest('test_disable_failover')) + suite.addTest(DHCPConfigTest('test_enable_failover')) + suite.addTest(DHCPConfigTest('test_add_reserved_host')) + suite.addTest(DHCPConfigTest('test_delete_reserved_host')) + suite.addTest(DHCPConfigTest('test_resolve_config_with_hosts')) + + runner = unittest.TextTestRunner() + runner.run(suite) diff --git a/modules/network/dhcp-2/python/src/grpc_server/dhcp_server.py b/modules/network/dhcp-2/python/src/grpc_server/dhcp_server.py index 1431d6ddd..67a31c2cb 100644 --- a/modules/network/dhcp-2/python/src/grpc_server/dhcp_server.py +++ b/modules/network/dhcp-2/python/src/grpc_server/dhcp_server.py @@ -13,6 +13,7 @@ # limitations under the License. """Contains all the necessary classes to maintain the DHCP server""" +import sys import time from common import logger from common import util @@ -35,75 +36,74 @@ def __init__(self): self.dhcp_config.resolve_config() def restart(self): - LOGGER.info("Restarting DHCP Server") - isc_started = util.run_command("service isc-dhcp-server restart", False) + LOGGER.info('Restarting DHCP Server') + isc_started = util.run_command('service isc-dhcp-server restart', False) radvd_started = self.radvd.restart() started = isc_started and radvd_started - LOGGER.info("DHCP Restarted: " + str(started)) + LOGGER.info('DHCP Restarted: ' + str(started)) return started def start(self): - LOGGER.info("Starting DHCP Server") - isc_started = util.run_command("service isc-dhcp-server start", False) + LOGGER.info('Starting DHCP Server') + isc_started = util.run_command('service isc-dhcp-server start', False) radvd_started = self.radvd.start() started = isc_started and radvd_started - LOGGER.info("DHCP Started: " + str(started)) + LOGGER.info('DHCP Started: ' + str(started)) return started def stop(self): - LOGGER.info("Stopping DHCP Server") - isc_stopped = util.run_command("service isc-dhcp-server stop", False) + LOGGER.info('Stopping DHCP Server') + isc_stopped = util.run_command('service isc-dhcp-server stop', False) radvd_stopped = self.radvd.stop() stopped = isc_stopped and radvd_stopped - LOGGER.info("DHCP Stopped: " + str(stopped)) + LOGGER.info('DHCP Stopped: ' + str(stopped)) return stopped def is_running(self): - LOGGER.info("Checking DHCP Status") - response = util.run_command("service isc-dhcp-server status") - isc_running = response[0] == 'Status of ISC DHCPv4 server: dhcpd is running.' + LOGGER.info('Checking DHCP Status') + response = util.run_command('service isc-dhcp-server status') + isc_running = response[ + 0] == 'Status of ISC DHCPv4 server: dhcpd is running.' radvd_running = self.radvd.is_running() running = isc_running and radvd_running - LOGGER.info("DHCP Status: " + str(running)) + LOGGER.info('DHCP Status: ' + str(running)) return running def boot(self): - LOGGER.info("Booting DHCP Server") + LOGGER.info('Booting DHCP Server') isc_booted = False radvd_booted = False if self.is_running(): - LOGGER.info("Stopping isc-dhcp-server") + LOGGER.info('Stopping isc-dhcp-server') stopped = self.stop() - LOGGER.info("isc-dhcp-server stopped: " + str(stopped)) + LOGGER.info('isc-dhcp-server stopped: ' + str(stopped)) if self.radvd.is_running(): - LOGGER.info("Stopping RADVD") + LOGGER.info('Stopping RADVD') stopped = self.radvd.stop() - LOGGER.info("radvd stopped: " + str(stopped)) + LOGGER.info('radvd stopped: ' + str(stopped)) - LOGGER.info("Starting isc-dhcp-server") + LOGGER.info('Starting isc-dhcp-server') if self.start(): isc_booted = False # Scan for 5 seconds if not yet ready - for i in range(5): + for _ in range(5): time.sleep(1) isc_booted = self.is_running() if isc_booted: - break; - LOGGER.info("isc-dhcp-server started: " + str(isc_booted)) + break + LOGGER.info('isc-dhcp-server started: ' + str(isc_booted)) - LOGGER.info("Starting RADVD") + LOGGER.info('Starting RADVD') if self.radvd.start(): radvd_booted = False # Scan for 5 seconds if not yet ready - for i in range(5): + for _ in range(5): time.sleep(1) radvd_booted = self.radvd.is_running() if radvd_booted: - break; - LOGGER.info("RADVD started: " + str(radvd_booted)) - - + break + LOGGER.info('RADVD started: ' + str(radvd_booted)) return isc_booted and radvd_booted @@ -117,14 +117,14 @@ def run(): config = str(dhcp_server.dhcp_config) while True: - dhcp_server.dhcp_config.resolve_config() - new_config = str(dhcp_server.dhcp_config) - if config != new_config: - LOGGER.info("DHCP Config Changed") - config = new_config - success = dhcp_server.restart() - success = dhcp_server.radvd.restart() - time.sleep(1) + dhcp_server.dhcp_config.resolve_config() + new_config = str(dhcp_server.dhcp_config) + if config != new_config: + LOGGER.info('DHCP Config Changed') + config = new_config + dhcp_server.restart() + dhcp_server.radvd.restart() + time.sleep(1) if __name__ == '__main__': run() diff --git a/modules/network/dhcp-2/python/src/grpc_server/network_service.py b/modules/network/dhcp-2/python/src/grpc_server/network_service.py index 5af9e6c44..f9deba965 100644 --- a/modules/network/dhcp-2/python/src/grpc_server/network_service.py +++ b/modules/network/dhcp-2/python/src/grpc_server/network_service.py @@ -25,6 +25,7 @@ LOG_NAME = 'network_service' LOGGER = None + class NetworkService(pb2_grpc.NetworkModule): """gRPC endpoints for the DHCP Server""" @@ -47,7 +48,7 @@ def RestartDHCPServer(self, request, context): # pylint: disable=W0613 started = self._dhcp_server.restart() LOGGER.info('DHCP server restarted: ' + (str(started))) return pb2.Response(code=200, message='{}') - except Exception as e: # pylint: disable=W0718 + except Exception as e: # pylint: disable=W0718 fail_message = 'Failed to restart DHCP server: ' + str(e) LOGGER.error(fail_message) LOGGER.error(traceback.format_exc()) @@ -59,7 +60,7 @@ def StartDHCPServer(self, request, context): # pylint: disable=W0613 started = self._dhcp_server.start() LOGGER.info('DHCP server started: ' + (str(started))) return pb2.Response(code=200, message='{}') - except Exception as e: # pylint: disable=W0718 + except Exception as e: # pylint: disable=W0718 fail_message = 'Failed to start DHCP server: ' + str(e) LOGGER.error(fail_message) LOGGER.error(traceback.format_exc()) @@ -71,12 +72,12 @@ def StopDHCPServer(self, request, context): # pylint: disable=W0613 stopped = self._dhcp_server.stop() LOGGER.info('DHCP server stopped: ' + (str(stopped))) return pb2.Response(code=200, message='{}') - except Exception as e: # pylint: disable=W0718 + except Exception as e: # pylint: disable=W0718 fail_message = 'Failed to stop DHCP server: ' + str(e) LOGGER.error(fail_message) LOGGER.error(traceback.format_exc()) return pb2.Response(code=500, message=fail_message) - + def AddReservedLease(self, request, context): # pylint: disable=W0613 LOGGER.info('Add reserved lease called') try: @@ -86,7 +87,7 @@ def AddReservedLease(self, request, context): # pylint: disable=W0613 dhcp_config.write_config() LOGGER.info('Reserved lease added') return pb2.Response(code=200, message='{}') - except Exception as e: # pylint: disable=W0718 + except Exception as e: # pylint: disable=W0718 fail_message = 'Failed to add reserved lease: ' + str(e) LOGGER.error(fail_message) LOGGER.error(traceback.format_exc()) @@ -100,7 +101,7 @@ def DeleteReservedLease(self, request, context): # pylint: disable=W0613 dhcp_config.write_config() LOGGER.info('Reserved lease deleted') return pb2.Response(code=200, message='{}') - except Exception as e: # pylint: disable=W0718 + except Exception as e: # pylint: disable=W0718 fail_message = 'Failed to delete reserved lease: ' + str(e) LOGGER.error(fail_message) LOGGER.error(traceback.format_exc()) @@ -114,7 +115,7 @@ def DisableFailover(self, request, contest): # pylint: disable=W0613 dhcp_config.write_config() LOGGER.info('Failover disabled') return pb2.Response(code=200, message='{}') - except Exception as e: # pylint: disable=W0718 + except Exception as e: # pylint: disable=W0718 fail_message = 'Failed to disable failover: ' + str(e) LOGGER.error(fail_message) LOGGER.error(traceback.format_exc()) @@ -128,7 +129,7 @@ def EnableFailover(self, request, contest): # pylint: disable=W0613 dhcp_config.write_config() LOGGER.info('Failover enabled') return pb2.Response(code=200, message='{}') - except Exception as e: # pylint: disable=W0718 + except Exception as e: # pylint: disable=W0718 fail_message = 'Failed to enable failover: ' + str(e) LOGGER.error(fail_message) LOGGER.error(traceback.format_exc()) @@ -143,7 +144,7 @@ def GetDHCPRange(self, request, context): # pylint: disable=W0613 try: pool = self._get_dhcp_config()._subnets[0].pools[0] return pb2.DHCPRange(code=200, start=pool.range_start, end=pool.range_end) - except Exception as e: # pylint: disable=W0718 + except Exception as e: # pylint: disable=W0718 fail_message = 'Failed to get DHCP range: ' + str(e) LOGGER.error(fail_message) LOGGER.error(traceback.format_exc()) @@ -161,7 +162,7 @@ def GetLease(self, request, context): # pylint: disable=W0613 return pb2.Response(code=200, message=str(lease)) else: return pb2.Response(code=200, message='{}') - except Exception as e: # pylint: disable=W0718 + except Exception as e: # pylint: disable=W0718 fail_message = 'Failed to get lease: ' + str(e) LOGGER.error(fail_message) LOGGER.error(traceback.format_exc()) @@ -179,7 +180,7 @@ def SetDHCPRange(self, request, context): # pylint: disable=W0613 dhcp_config.write_config() LOGGER.info('DHCP range set') return pb2.Response(code=200, message='DHCP Range Set') - except Exception as e: # pylint: disable=W0718 + except Exception as e: # pylint: disable=W0718 fail_message = 'Failed to set DHCP range: ' + str(e) LOGGER.error(fail_message) LOGGER.error(traceback.format_exc()) @@ -191,4 +192,4 @@ def GetStatus(self, request, context): # pylint: disable=W0613 """ dhcp_status = self._dhcp_server.is_running() message = str({'dhcpStatus': dhcp_status}) - return pb2.Response(code=200, message=message) \ No newline at end of file + return pb2.Response(code=200, message=message) diff --git a/modules/network/dhcp-2/python/src/grpc_server/radvd_server.py b/modules/network/dhcp-2/python/src/grpc_server/radvd_server.py index 0c6ef90d6..bc5d8b55f 100644 --- a/modules/network/dhcp-2/python/src/grpc_server/radvd_server.py +++ b/modules/network/dhcp-2/python/src/grpc_server/radvd_server.py @@ -13,10 +13,8 @@ # limitations under the License. """Contains all the necessary classes to maintain the DHCP server""" -import time from common import logger from common import util -from dhcp_config import DHCPConfig CONFIG_FILE = '/etc/dhcp/dhcpd.conf' LOG_NAME = 'radvd' @@ -31,25 +29,25 @@ def __init__(self): LOGGER = logger.get_logger(LOG_NAME, 'dhcp-2') def restart(self): - LOGGER.info("Restarting RADVD Server") - response = util.run_command("radvd-service restart", False) - LOGGER.info("RADVD Restarted: " + str(response)) + LOGGER.info('Restarting RADVD Server') + response = util.run_command('radvd-service restart', False) + LOGGER.info('RADVD Restarted: ' + str(response)) return response def start(self): - LOGGER.info("Starting RADVD Server") - response = util.run_command("radvd-service start", False) - LOGGER.info("RADVD Started: " + str(response)) + LOGGER.info('Starting RADVD Server') + response = util.run_command('radvd-service start', False) + LOGGER.info('RADVD Started: ' + str(response)) return response def stop(self): - LOGGER.info("Stopping RADVD Server") - response = util.run_command("radvd-service stop", False) - LOGGER.info("RADVD Stopped: " + str(response)) + LOGGER.info('Stopping RADVD Server') + response = util.run_command('radvd-service stop', False) + LOGGER.info('RADVD Stopped: ' + str(response)) return response def is_running(self): - LOGGER.info("Checking RADVD Status") - response = util.run_command("radvd-service status") - LOGGER.info("RADVD Status: " + str(response)) + LOGGER.info('Checking RADVD Status') + response = util.run_command('radvd-service status') + LOGGER.info('RADVD Status: ' + str(response)) return response[0] == 'radvd service is running.' diff --git a/modules/test/base/python/src/test_module.py b/modules/test/base/python/src/test_module.py index 5342e36f8..2a892b810 100644 --- a/modules/test/base/python/src/test_module.py +++ b/modules/test/base/python/src/test_module.py @@ -89,14 +89,13 @@ def run_tests(self): else: result = getattr(self, test_method_name)() else: - LOGGER.info('Test ' + test['name'] + ' not resolved. Skipping') + LOGGER.info(f'Test {test["name"]} not resolved. Skipping') result = None else: - LOGGER.info('Test ' + test['name'] + ' disabled. Skipping') + LOGGER.info(f'Test {test["name"]} disabled. Skipping') if result is not None: - success = None - if isinstance(result,bool): - test['result'] = 'compliant' if result else 'non-compliant' + if isinstance(result, bool): + test['result'] = 'compliant' if result else 'non-compliant' else: test['result'] = 'compliant' if result[0] else 'non-compliant' test['result_details'] = result[1] diff --git a/modules/test/conn/python/src/connection_module.py b/modules/test/conn/python/src/connection_module.py index 5b3bf7038..b4635ffb8 100644 --- a/modules/test/conn/python/src/connection_module.py +++ b/modules/test/conn/python/src/connection_module.py @@ -11,17 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Connection test module""" import util import sys -from scapy.all import * +import json +from scapy.all import rdpcap, DHCP, Ether from test_module import TestModule from dhcp1.client import Client as DHCPClient1 -LOG_NAME = "test_connection" +LOG_NAME = 'test_connection' LOGGER = None -OUI_FILE="/usr/local/etc/oui.txt" +OUI_FILE = '/usr/local/etc/oui.txt' DHCP_SERVER_CAPTURE_FILE = '/runtime/network/dhcp-1.pcap' STARTUP_CAPTURE_FILE = '/runtime/device/startup.pcap' MONITOR_CAPTURE_FILE = '/runtime/device/monitor.pcap' @@ -35,10 +35,11 @@ def __init__(self, module): global LOGGER LOGGER = self._get_logger() self.dhcp1_client = DHCPClient1() - - # ToDo: Move this into some level of testing, leave for + + # ToDo: Move this into some level of testing, leave for # reference until tests are implemented with these calls - # response = self.dhcp1_client.add_reserved_lease('test','00:11:22:33:44:55','10.10.10.21') + # response = self.dhcp1_client.add_reserved_lease( + # 'test','00:11:22:33:44:55','10.10.10.21') # print("AddLeaseResp: " + str(response)) # response = self.dhcp1_client.delete_reserved_lease('00:11:22:33:44:55') @@ -63,52 +64,52 @@ def __init__(self, module): # print("Set Range: " + str(response)) def _connection_dhcp_address(self): - LOGGER.info("Running connection.dhcp_address") + LOGGER.info('Running connection.dhcp_address') response = self.dhcp1_client.get_lease(self._device_mac) - LOGGER.info("DHCP Lease resolved:\n" + str(response)) + LOGGER.info('DHCP Lease resolved:\n' + str(response)) if response.code == 200: - lease = eval(response.message) + lease = eval(response.message) # pylint: disable=E0203 if 'ip' in lease: ip_addr = lease['ip'] - LOGGER.info("IP Resolved: " + ip_addr) - LOGGER.info("Attempting to ping device..."); + LOGGER.info('IP Resolved: ' + ip_addr) + LOGGER.info('Attempting to ping device...') ping_success = self._ping(self._device_ipv4_addr) - LOGGER.info("Ping Success: " + str(ping_success)) + LOGGER.info('Ping Success: ' + str(ping_success)) if ping_success: - return True, "Device responded to leased ip address" + return True, 'Device responded to leased ip address' else: - return False, "Device did not respond to leased ip address" + return False, 'Device did not respond to leased ip address' else: - LOGGER.info("No DHCP lease found for: " + self._device_mac) - return False, "No DHCP lease found for: " + self._device_mac + LOGGER.info('No DHCP lease found for: ' + self._device_mac) + return False, 'No DHCP lease found for: ' + self._device_mac def _connection_mac_address(self): - LOGGER.info("Running connection.mac_address") + LOGGER.info('Running connection.mac_address') if self._device_mac is not None: - LOGGER.info("MAC address found: " + self._device_mac) - return True, "MAC address found: " + self._device_mac + LOGGER.info('MAC address found: ' + self._device_mac) + return True, 'MAC address found: ' + self._device_mac else: - LOGGER.info("No MAC address found: " + self._device_mac) - return False, "No MAC address found." + LOGGER.info('No MAC address found: ' + self._device_mac) + return False, 'No MAC address found.' def _connection_mac_oui(self): - LOGGER.info("Running connection.mac_oui") + LOGGER.info('Running connection.mac_oui') manufacturer = self._get_oui_manufacturer(self._device_mac) if manufacturer is not None: - LOGGER.info("OUI Manufacturer found: " + manufacturer) - return True, "OUI Manufacturer found: " + manufacturer + LOGGER.info('OUI Manufacturer found: ' + manufacturer) + return True, 'OUI Manufacturer found: ' + manufacturer else: - LOGGER.info("No OUI Manufacturer found for: " + self._device_mac) - return False, "No OUI Manufacturer found for: " + self._device_mac + LOGGER.info('No OUI Manufacturer found for: ' + self._device_mac) + return False, 'No OUI Manufacturer found for: ' + self._device_mac def _connection_single_ip(self): - LOGGER.info("Running connection.single_ip") + LOGGER.info('Running connection.single_ip') result = None if self._device_mac is None: - LOGGER.info("No MAC address found: ") - return result, "No MAC address found." - + LOGGER.info('No MAC address found: ') + return result, 'No MAC address found.' + # Read all the pcap files containing DHCP packet information packets = rdpcap(DHCP_SERVER_CAPTURE_FILE) packets.append(rdpcap(STARTUP_CAPTURE_FILE)) @@ -116,50 +117,48 @@ def _connection_single_ip(self): # Extract MAC addresses from DHCP packets mac_addresses = set() - LOGGER.info("Inspecting: " + str(len(packets)) + " packets") + LOGGER.info('Inspecting: ' + str(len(packets)) + ' packets') for packet in packets: # Option[1] = message-type, option 3 = DHCPREQUEST - if DHCP in packet and packet[DHCP].options[0][1] == 3: - mac_address = packet[Ether].src - mac_addresses.add(mac_address.upper()) + if DHCP in packet and packet[DHCP].options[0][1] == 3: + mac_address = packet[Ether].src + mac_addresses.add(mac_address.upper()) # Check if the device mac address is in the list of DHCPREQUESTs result = self._device_mac.upper() in mac_addresses - LOGGER.info("DHCPREQUEST detected from device: " + str(result)) + LOGGER.info('DHCPREQUEST detected from device: ' + str(result)) # Check the unique MAC addresses to see if they match the device for mac_address in mac_addresses: - LOGGER.info("DHCPREQUEST from MAC address: " + mac_address) - result &= self._device_mac.upper() == mac_address + LOGGER.info('DHCPREQUEST from MAC address: ' + mac_address) + result &= self._device_mac.upper() == mac_address return result - def _connection_target_ping(self): - LOGGER.info("Running connection.target_ping") + LOGGER.info('Running connection.target_ping') # If the ipv4 address wasn't resolved yet, try again if self._device_ipv4_addr is None: - self._device_ipv4_addr = self._get_device_ipv4(self) + self._device_ipv4_addr = self._get_device_ipv4(self) if self._device_ipv4_addr is None: - LOGGER.error("No device IP could be resolved") + LOGGER.error('No device IP could be resolved') sys.exit(1) else: return self._ping(self._device_ipv4_addr) - def _get_oui_manufacturer(self,mac_address): + def _get_oui_manufacturer(self, mac_address): # Do some quick fixes on the format of the mac_address # to match the oui file pattern - mac_address = mac_address.replace(":","-").upper() - with open(OUI_FILE, "r") as file: - for line in file: - if mac_address.startswith(line[:8]): - start = line.index("(hex)") + len("(hex)") - return line[start:].strip() # Extract the company name + mac_address = mac_address.replace(':', '-').upper() + with open(OUI_FILE, 'r', encoding='UTF-8') as file: + for line in file: + if mac_address.startswith(line[:8]): + start = line.index('(hex)') + len('(hex)') + return line[start:].strip() # Extract the company name return None def _ping(self, host): cmd = 'ping -c 1 ' + str(host) success = util.run_command(cmd, output=False) return success - \ No newline at end of file diff --git a/modules/test/nmap/nmap.Dockerfile b/modules/test/nmap/nmap.Dockerfile index 1789da382..ea90ee06f 100644 --- a/modules/test/nmap/nmap.Dockerfile +++ b/modules/test/nmap/nmap.Dockerfile @@ -18,10 +18,10 @@ FROM test-run/base-test:latest ARG MODULE_NAME=nmap ARG MODULE_DIR=modules/test/$MODULE_NAME -#Load the requirements file +# Load the requirements file COPY $MODULE_DIR/python/requirements.txt /testrun/python -#Install all python requirements for the module +# Install all python requirements for the module RUN pip3 install -r /testrun/python/requirements.txt # Copy over all configuration files diff --git a/modules/test/ntp/bin/start_test_module b/modules/test/ntp/bin/start_test_module new file mode 100644 index 000000000..a09349cf9 --- /dev/null +++ b/modules/test/ntp/bin/start_test_module @@ -0,0 +1,42 @@ +#!/bin/bash + +# An example startup script that does the bare minimum to start +# a test module via a pyhon script. Each test module should include a +# start_test_module file that overwrites this one to boot all of its +# specific requirements to run. + +# Define where the python source files are located +PYTHON_SRC_DIR=/testrun/python/src + +# Fetch module name +MODULE_NAME=$1 + +# Default interface should be veth0 for all containers +DEFAULT_IFACE=veth0 + +# Allow a user to define an interface by passing it into this script +DEFINED_IFACE=$2 + +# Select which interace to use +if [[ -z $DEFINED_IFACE || "$DEFINED_IFACE" == "null" ]] +then + echo "No interface defined, defaulting to veth0" + INTF=$DEFAULT_IFACE +else + INTF=$DEFINED_IFACE +fi + +# Create and set permissions on the log files +LOG_FILE=/runtime/output/$MODULE_NAME.log +RESULT_FILE=/runtime/output/$MODULE_NAME-result.json +touch $LOG_FILE +touch $RESULT_FILE +chown $HOST_USER $LOG_FILE +chown $HOST_USER $RESULT_FILE + +# Run the python scrip that will execute the tests for this module +# -u flag allows python print statements +# to be logged by docker by running unbuffered +python3 -u $PYTHON_SRC_DIR/run.py "-m $MODULE_NAME" + +echo Module has finished \ No newline at end of file diff --git a/modules/test/ntp/conf/module_config.json b/modules/test/ntp/conf/module_config.json new file mode 100644 index 000000000..288474868 --- /dev/null +++ b/modules/test/ntp/conf/module_config.json @@ -0,0 +1,27 @@ +{ + "config": { + "meta": { + "name": "ntp", + "display_name": "NTP", + "description": "NTP test" + }, + "network": false, + "docker": { + "depends_on": "base", + "enable_container": true, + "timeout": 30 + }, + "tests":[ + { + "name": "ntp.network.ntp_support", + "description": "Does the device request network time sync as client as per RFC 5905 - Network Time Protocol Version 4: Protocol and Algorithms Specification", + "expected_behavior": "The device sends an NTPv4 request to the configured NTP server." + }, + { + "name": "ntp.network.ntp_dhcp", + "description": "Accept NTP address over DHCP", + "expected_behavior": "Device can accept NTP server address, provided by the DHCP server (DHCP OFFER PACKET)" + } + ] + } +} diff --git a/modules/test/ntp/ntp.Dockerfile b/modules/test/ntp/ntp.Dockerfile new file mode 100644 index 000000000..33b06287e --- /dev/null +++ b/modules/test/ntp/ntp.Dockerfile @@ -0,0 +1,20 @@ +# Image name: test-run/ntp-test +FROM test-run/base-test:latest + +ARG MODULE_NAME=ntp +ARG MODULE_DIR=modules/test/$MODULE_NAME + +# Load the requirements file +COPY $MODULE_DIR/python/requirements.txt /testrun/python + +# Install all python requirements for the module +RUN pip3 install -r /testrun/python/requirements.txt + +# Copy over all configuration files +COPY $MODULE_DIR/conf /testrun/conf + +# Copy over all binary files +COPY $MODULE_DIR/bin /testrun/bin + +# Copy over all python files +COPY $MODULE_DIR/python /testrun/python \ No newline at end of file diff --git a/modules/test/ntp/python/requirements.txt b/modules/test/ntp/python/requirements.txt new file mode 100644 index 000000000..93b351f44 --- /dev/null +++ b/modules/test/ntp/python/requirements.txt @@ -0,0 +1 @@ +scapy \ No newline at end of file diff --git a/modules/test/ntp/python/src/ntp_module.py b/modules/test/ntp/python/src/ntp_module.py new file mode 100644 index 000000000..4053ce98a --- /dev/null +++ b/modules/test/ntp/python/src/ntp_module.py @@ -0,0 +1,79 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""NTP test module""" +from test_module import TestModule +from scapy.all import rdpcap, NTP, IP + +LOG_NAME = 'test_ntp' +NTP_SERVER_CAPTURE_FILE = '/runtime/network/ntp.pcap' +STARTUP_CAPTURE_FILE = '/runtime/device/startup.pcap' +MONITOR_CAPTURE_FILE = '/runtime/device/monitor.pcap' +LOGGER = None + +class NTPModule(TestModule): + """NTP Test module""" + + def __init__(self, module): + super().__init__(module_name=module, log_name=LOG_NAME) + # TODO: This should be fetched dynamically + self._ntp_server = '10.10.10.5' + + global LOGGER + LOGGER = self._get_logger() + + def _ntp_network_ntp_support(self): + LOGGER.info('Running ntp.network.ntp_support') + + packet_capture = rdpcap(STARTUP_CAPTURE_FILE) + rdpcap(MONITOR_CAPTURE_FILE) + + device_sends_ntp4 = False + device_sends_ntp3 = False + + for packet in packet_capture: + + if NTP in packet and packet.src == self._device_mac: + if packet[NTP].version == 4: + device_sends_ntp4 = True + LOGGER.info(f'Device sent NTPv4 request to {packet[IP].dst}') + elif packet[NTP].version == 3: + device_sends_ntp3 = True + LOGGER.info(f'Device sent NTPv3 request to {packet[IP].dst}') + + if not (device_sends_ntp3 or device_sends_ntp4): + LOGGER.info('Device has not sent any NTP requests') + + return device_sends_ntp4 and not device_sends_ntp3 + + def _ntp_network_ntp_dhcp(self): + LOGGER.info('Running ntp.network.ntp_dhcp') + + packet_capture = rdpcap(STARTUP_CAPTURE_FILE) + rdpcap(MONITOR_CAPTURE_FILE) + + device_sends_ntp = False + + for packet in packet_capture: + + if NTP in packet and packet.src == self._device_mac: + device_sends_ntp = True + if packet[IP].dst == self._ntp_server: + LOGGER.info('Device sent NTP request to DHCP provided NTP server') + return True + + if not device_sends_ntp: + LOGGER.info('Device has not sent any NTP requests') + else: + LOGGER.info('Device has not sent NTP requests to DHCP provided NTP server') + + return False diff --git a/modules/test/ntp/python/src/run.py b/modules/test/ntp/python/src/run.py new file mode 100644 index 000000000..685bb4083 --- /dev/null +++ b/modules/test/ntp/python/src/run.py @@ -0,0 +1,75 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run NTP test module""" +import argparse +import signal +import sys +import logger + +from ntp_module import NTPModule + +LOG_NAME = "ntp_runner" +LOGGER = logger.get_logger(LOG_NAME) + + +class NTPModuleRunner: + """Run the NTP module tests.""" + def __init__(self, module): + + signal.signal(signal.SIGINT, self._handler) + signal.signal(signal.SIGTERM, self._handler) + signal.signal(signal.SIGABRT, self._handler) + signal.signal(signal.SIGQUIT, self._handler) + self.add_logger(module) + + LOGGER.info("Starting NTP test module") + + self._test_module = NTPModule(module) + self._test_module.run_tests() + + LOGGER.info("NTP test module finished") + + def add_logger(self, module): + global LOGGER + LOGGER = logger.get_logger(LOG_NAME, module) + + def _handler(self, signum): + LOGGER.debug("SigtermEnum: " + str(signal.SIGTERM)) + LOGGER.debug("Exit signal received: " + str(signum)) + if signum in (2, signal.SIGTERM): + LOGGER.info("Exit signal received. Stopping test module...") + LOGGER.info("Test module stopped") + sys.exit(1) + + +def run(): + parser = argparse.ArgumentParser( + description="NTP Module Help", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + parser.add_argument( + "-m", + "--module", + help="Define the module name to be used to create the log file") + + args = parser.parse_args() + + # For some reason passing in the args from bash adds an extra + # space before the argument so we'll just strip out extra space + NTPModuleRunner(args.module.strip()) + + +if __name__ == "__main__": + run() diff --git a/resources/devices/template/device_config.json b/resources/devices/template/device_config.json index 3bb804b22..1e92de25d 100644 --- a/resources/devices/template/device_config.json +++ b/resources/devices/template/device_config.json @@ -14,6 +14,35 @@ } } }, + "connection": { + "enabled": true, + "tests": { + "connection.mac_address": { + "enabled": true + }, + "connection.mac_oui": { + "enabled": true + }, + "connection.target_ping": { + "enabled": true + } + , + "connection.single_ip": { + "enabled": true + } + } + }, + "ntp": { + "enabled": true, + "tests": { + "ntp.network.ntp_support": { + "enabled": true + }, + "ntp.network.ntp_dhcp": { + "enabled": true + } + } + }, "baseline": { "enabled": false, "tests": { diff --git a/testing/test_pylint b/testing/test_pylint index 5cd1dff73..2ba696af5 100755 --- a/testing/test_pylint +++ b/testing/test_pylint @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -ERROR_LIMIT=1100 +ERROR_LIMIT=100 sudo cmd/install From 2ae337d8ab458a12eebfb81ef13154b5bae16f51 Mon Sep 17 00:00:00 2001 From: J Boddey Date: Wed, 12 Jul 2023 10:24:22 +0100 Subject: [PATCH 48/48] Add ipv6 tests (#65) * Add ipv6 tests * Check for ND_NS --- modules/network/base/bin/start_module | 6 +-- .../network/dhcp-1/bin/start_network_service | 1 - modules/network/dhcp-1/conf/radvd.conf | 1 + modules/test/conn/conf/module_config.json | 10 +++++ .../test/conn/python/src/connection_module.py | 41 ++++++++++++++++++- 5 files changed, 54 insertions(+), 5 deletions(-) diff --git a/modules/network/base/bin/start_module b/modules/network/base/bin/start_module index 6de62f1a5..8e8cb5e4b 100644 --- a/modules/network/base/bin/start_module +++ b/modules/network/base/bin/start_module @@ -29,7 +29,7 @@ useradd $HOST_USER sysctl net.ipv6.conf.all.disable_ipv6=0 sysctl -p -#Read in the config file +# Read in the config file CONF_FILE="/testrun/conf/module_config.json" CONF=`cat $CONF_FILE` @@ -92,8 +92,8 @@ then fi fi -#Small pause to let all core services stabalize +# Small pause to let all core services stabalize sleep 3 -#Start the networking service +# Start the networking service $BIN_DIR/start_network_service $MODULE_NAME $INTF \ No newline at end of file diff --git a/modules/network/dhcp-1/bin/start_network_service b/modules/network/dhcp-1/bin/start_network_service index 82b4c6e33..413c48ceb 100644 --- a/modules/network/dhcp-1/bin/start_network_service +++ b/modules/network/dhcp-1/bin/start_network_service @@ -38,7 +38,6 @@ touch $RA_LOG_FILE chown $HOST_USER $DHCP_LOG_FILE chown $HOST_USER $RA_LOG_FILE - # Move the config files to the correct location cp /testrun/conf/isc-dhcp-server /etc/default/ cp /testrun/conf/dhcpd.conf /etc/dhcp/dhcpd.conf diff --git a/modules/network/dhcp-1/conf/radvd.conf b/modules/network/dhcp-1/conf/radvd.conf index f6d6f30d9..89995785f 100644 --- a/modules/network/dhcp-1/conf/radvd.conf +++ b/modules/network/dhcp-1/conf/radvd.conf @@ -8,5 +8,6 @@ interface veth0 AdvOnLink on; AdvAutonomous on; AdvRouterAddr on; + AdvSourceLLAddress off; }; }; \ No newline at end of file diff --git a/modules/test/conn/conf/module_config.json b/modules/test/conn/conf/module_config.json index 4053b4e26..496b6aada 100644 --- a/modules/test/conn/conf/module_config.json +++ b/modules/test/conn/conf/module_config.json @@ -36,6 +36,16 @@ "name": "connection.target_ping", "description": "The device under test responds to an ICMP echo (ping) request.", "expected_behavior": "The device under test responds to an ICMP echo (ping) request." + }, + { + "name": "connection.ipv6_slaac", + "description": "The device forms a valid IPv6 address as a combination of the IPv6 router prefix and the device interface identifier", + "expected_behavior": "The device under test complies with RFC4862 and forms a valid IPv6 SLAAC address" + }, + { + "name": "connection.ipv6_ping", + "description": "The device responds to an IPv6 ping (ICMPv6 Echo) request to the SLAAC address", + "expected_behavior": "The device responds to the ping as per RFC4443" } ] } diff --git a/modules/test/conn/python/src/connection_module.py b/modules/test/conn/python/src/connection_module.py index b4635ffb8..0b11fde24 100644 --- a/modules/test/conn/python/src/connection_module.py +++ b/modules/test/conn/python/src/connection_module.py @@ -25,6 +25,7 @@ DHCP_SERVER_CAPTURE_FILE = '/runtime/network/dhcp-1.pcap' STARTUP_CAPTURE_FILE = '/runtime/device/startup.pcap' MONITOR_CAPTURE_FILE = '/runtime/device/monitor.pcap' +SLAAC_PREFIX = "fd10:77be:4186" class ConnectionModule(TestModule): @@ -83,6 +84,8 @@ def _connection_dhcp_address(self): LOGGER.info('No DHCP lease found for: ' + self._device_mac) return False, 'No DHCP lease found for: ' + self._device_mac + self._ipv6_addr = None + def _connection_mac_address(self): LOGGER.info('Running connection.mac_address') if self._device_mac is not None: @@ -158,7 +161,43 @@ def _get_oui_manufacturer(self, mac_address): return line[start:].strip() # Extract the company name return None + def _connection_ipv6_slaac(self): + LOGGER.info("Running connection.ipv6_slaac") + packet_capture = rdpcap(MONITOR_CAPTURE_FILE) + + sends_ipv6 = False + + for packet in packet_capture: + if IPv6 in packet and packet.src == self._device_mac: + sends_ipv6 = True + if ICMPv6ND_NS in packet: + ipv6_addr = str(packet[ICMPv6ND_NS].tgt) + if ipv6_addr.startswith(SLAAC_PREFIX): + self._ipv6_addr = ipv6_addr + LOGGER.info(f"Device has formed SLAAC address {ipv6_addr}") + return True + + if sends_ipv6: + LOGGER.info("Device does not support IPv6 SLAAC") + else: + LOGGER.info("Device does not support IPv6") + return False + + def _connection_ipv6_ping(self): + LOGGER.info("Running connection.ipv6_ping") + + if self._ipv6_addr is None: + LOGGER.info("No IPv6 SLAAC address found. Cannot ping") + return + + if self._ping(self._ipv6_addr): + LOGGER.info(f"Device responds to IPv6 ping on {self._ipv6_addr}") + return True + else: + LOGGER.info("Device does not respond to IPv6 ping") + return False + def _ping(self, host): - cmd = 'ping -c 1 ' + str(host) + cmd = "ping -c 1 " + str(host) success = util.run_command(cmd, output=False) return success