diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 000000000..852476aeb --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,32 @@ +--- +name: Bug report +about: Create a report to help us identify and resolve bugs +title: '' +labels: bug +assignees: '' + +--- + +**Describe the bug** +A clear and concise description of what the bug is. + +**To Reproduce** +Steps to reproduce the behavior: +1. Go to '...' +2. Click on '....' +3. Scroll down to '....' +4. See error + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Error logs** +If applicable, provide a log from https://gist.github.com/ + +**Environment (please provide the following information about your setup):** + - OS: [e.g. Ubuntu] + - Version [e.g. 22.04] + - Additional hardware (network adapters) + +**Additional context** +Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 000000000..9fd0ca896 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,17 @@ +--- +name: Feature request +about: Suggest a new feature or change request +title: '' +labels: request +assignees: '' + +--- + +**What is the problem your feature is trying to solve?** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you think would solve the problem** +A clear and concise description of what you want to happen. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml new file mode 100644 index 000000000..fbdbe442c --- /dev/null +++ b/.github/workflows/testing.yml @@ -0,0 +1,30 @@ +name: Testrun test suite + +on: + push: + pull_request: + schedule: + - cron: '0 13 * * *' + +jobs: + testrun: + name: Baseline + runs-on: ubuntu-20.04 + timeout-minutes: 20 + steps: + - name: Checkout source + uses: actions/checkout@v2.3.4 + - name: Run tests + shell: bash {0} + run: testing/test_baseline + + pylint: + name: Pylint + runs-on: ubuntu-20.04 + timeout-minutes: 20 + steps: + - name: Checkout source + uses: actions/checkout@v2.3.4 + - name: Run tests + shell: bash {0} + run: testing/test_pylint diff --git a/.gitignore b/.gitignore index 93fe84e64..5dfc1f6f9 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,7 @@ -venv/ -net_orc/ -.vscode/ \ No newline at end of file +runtime/ +venv/ +.vscode/ +error +pylint.out +local/ +__pycache__/ \ No newline at end of file diff --git a/.pylintrc b/.pylintrc new file mode 100644 index 000000000..4e89b0c10 --- /dev/null +++ b/.pylintrc @@ -0,0 +1,429 @@ +# This Pylint rcfile contains a best-effort configuration to uphold the +# best-practices and style described in the Google Python style guide: +# https://google.github.io/styleguide/pyguide.html +# +# Its canonical open-source location is: +# https://google.github.io/styleguide/pylintrc + +[MASTER] + +# Files or directories to be skipped. They should be base names, not paths. +ignore=third_party + +# Files or directories matching the regex patterns are skipped. The regex +# matches against base names, not paths. +ignore-patterns= + +# Pickle collected data for later comparisons. +persistent=no + +# List of plugins (as comma separated values of python modules names) to load, +# usually to register additional checkers. +load-plugins= + +# Use multiple processes to speed up Pylint. +jobs=4 + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED +confidence= + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +#enable= + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once).You can also use "--disable=all" to +# disable everything first and then reenable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use"--disable=all --enable=classes +# --disable=W" +disable=abstract-method, + apply-builtin, + arguments-differ, + attribute-defined-outside-init, + backtick, + bad-option-value, + basestring-builtin, + buffer-builtin, + c-extension-no-member, + consider-using-enumerate, + cmp-builtin, + cmp-method, + coerce-builtin, + coerce-method, + delslice-method, + div-method, + duplicate-code, + eq-without-hash, + execfile-builtin, + file-builtin, + filter-builtin-not-iterating, + fixme, + getslice-method, + global-statement, + hex-method, + idiv-method, + implicit-str-concat, + import-error, + import-self, + import-star-module-level, + inconsistent-return-statements, + input-builtin, + intern-builtin, + invalid-str-codec, + locally-disabled, + long-builtin, + long-suffix, + map-builtin-not-iterating, + misplaced-comparison-constant, + missing-function-docstring, + metaclass-assignment, + next-method-called, + next-method-defined, + no-absolute-import, + no-else-break, + no-else-continue, + no-else-raise, + no-else-return, + no-init, # added + no-member, + no-name-in-module, + no-self-use, + nonzero-method, + oct-method, + old-division, + old-ne-operator, + old-octal-literal, + old-raise-syntax, + parameter-unpacking, + print-statement, + raising-string, + range-builtin-not-iterating, + raw_input-builtin, + rdiv-method, + reduce-builtin, + relative-import, + reload-builtin, + round-builtin, + setslice-method, + signature-differs, + standarderror-builtin, + suppressed-message, + sys-max-int, + too-few-public-methods, + too-many-ancestors, + too-many-arguments, + too-many-boolean-expressions, + too-many-branches, + too-many-instance-attributes, + too-many-locals, + too-many-nested-blocks, + too-many-public-methods, + too-many-return-statements, + too-many-statements, + trailing-newlines, + unichr-builtin, + unicode-builtin, + unnecessary-pass, + unpacking-in-except, + useless-else-on-loop, + useless-object-inheritance, + useless-suppression, + using-cmp-argument, + wrong-import-order, + xrange-builtin, + zip-builtin-not-iterating, + + +[REPORTS] + +# Set the output format. Available formats are text, parseable, colorized, msvs +# (visual studio) and html. You can also give a reporter class, eg +# mypackage.mymodule.MyReporterClass. +output-format=text + +# Tells whether to display a full report or only the messages +reports=no + +# Python expression which should return a note less than 10 (10 is the highest +# note). You have access to the variables errors warning, statement which +# respectively contain the number of errors / warnings messages and the total +# number of statements analyzed. This is used by the global evaluation report +# (RP0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details +#msg-template= + + +[BASIC] + +# Good variable names which should always be accepted, separated by a comma +good-names=main,_ + +# Bad variable names which should always be refused, separated by a comma +bad-names= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Include a hint for the correct naming format with invalid-name +include-naming-hint=no + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +property-classes=abc.abstractproperty,cached_property.cached_property,cached_property.threaded_cached_property,cached_property.cached_property_with_ttl,cached_property.threaded_cached_property_with_ttl + +# Regular expression matching correct function names +function-rgx=^(?:(?PsetUp|tearDown|setUpModule|tearDownModule)|(?P_?[A-Z][a-zA-Z0-9]*)|(?P_?[a-z][a-z0-9_]*))$ + +# Regular expression matching correct variable names +variable-rgx=^[a-z][a-z0-9_]*$ + +# Regular expression matching correct constant names +const-rgx=^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$ + +# Regular expression matching correct attribute names +attr-rgx=^_{0,2}[a-z][a-z0-9_]*$ + +# Regular expression matching correct argument names +argument-rgx=^[a-z][a-z0-9_]*$ + +# Regular expression matching correct class attribute names +class-attribute-rgx=^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$ + +# Regular expression matching correct inline iteration names +inlinevar-rgx=^[a-z][a-z0-9_]*$ + +# Regular expression matching correct class names +class-rgx=^_?[A-Z][a-zA-Z0-9]*$ + +# Regular expression matching correct module names +module-rgx=^(_?[a-z][a-z0-9_]*|__init__)$ + +# Regular expression matching correct method names +method-rgx=(?x)^(?:(?P_[a-z0-9_]+__|runTest|setUp|tearDown|setUpTestCase|tearDownTestCase|setupSelf|tearDownClass|setUpClass|(test|assert)_*[A-Z0-9][a-zA-Z0-9_]*|next)|(?P_{0,2}[A-Z][a-zA-Z0-9_]*)|(?P_{0,2}[a-z][a-z0-9_]*))$ + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=(__.*__|main|test.*|.*test|.*Test)$ + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=10 + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager,contextlib2.contextmanager + +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis. It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules= + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + + +[FORMAT] + +# Maximum number of characters on a single line. +max-line-length=80 + +# TODO(https://github.com/PyCQA/pylint/issues/3352): Direct pylint to exempt +# lines made too long by directives to pytype. + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=(?x)( + ^\s*(\#\ )??$| + ^\s*(from\s+\S+\s+)?import\s+.+$) + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=yes + +# Maximum number of lines in a module +max-module-lines=99999 + +# String used as indentation unit. The internal Google style guide mandates 2 +# spaces. Google's externaly-published style guide says 4, consistent with +# PEP 8. Here, we use 2 spaces, for conformity with many open-sourced Google +# projects (like TensorFlow). +indent-string=' ' + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=TODO + + +[STRING] + +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=yes + + +[VARIABLES] + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# A regular expression matching the name of dummy variables (i.e. expectedly +# not used). +dummy-variables-rgx=^\*{0,2}(_$|unused_|dummy_) + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid to define new builtins when possible. +additional-builtins= + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_,_cb + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six,six.moves,past.builtins,future.builtins,functools + + +[LOGGING] + +# Logging modules to check that the string format arguments are in logging +# function parameter format +logging-modules=logging,absl.logging,tensorflow.io.logging + + +[SIMILARITIES] + +# Minimum lines number of a similarity. +min-similarity-lines=4 + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + +# Ignore imports when computing similarities. +ignore-imports=no + + +[SPELLING] + +# Spelling dictionary name. Available dictionaries: none. To make it working +# install python-enchant package. +spelling-dict= + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to indicated private dictionary in +# --spelling-private-dict-file option instead of raising a message. +spelling-store-unknown-words=no + + +[IMPORTS] + +# Deprecated modules which should not be used, separated by a comma +deprecated-modules=regsub, + TERMIOS, + Bastion, + rexec, + sets + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report RP0402 must not be disabled) +import-graph= + +# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled) +ext-import-graph= + +# Create a graph of internal dependencies in the given file (report RP0402 must +# not be disabled) +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant, absl + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + + +[CLASSES] + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict, + _fields, + _replace, + _source, + _make + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls, + class_ + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when being caught. Defaults to +# "Exception" +overgeneral-exceptions=StandardError, + Exception, + BaseException \ No newline at end of file diff --git a/README.md b/README.md index b374bdbf5..41c559499 100644 --- a/README.md +++ b/README.md @@ -37,6 +37,9 @@ Test Run cannot automate everything, and so additional manual testing may be req ## Roadmap :chart_with_upwards_trend: Test Run will constantly evolve to further support end-users by automating device network behaviour against industry standards. +## Issue reporting :triangular_flag_on_post: +If the application has come across a problem at any point during setup or use, please raise an issue under the [issues tab](https://github.com/auto-iot/test-run/issues). Issue templates exist for both bug reports and feature requests. If neither of these are appropriate for your issue, raise a blank issue instead. + ## Contributing :keyboard: The contributing requirements can be found in [CONTRIBUTING.md](CONTRIBUTING.md). In short, checkout the [Google CLA](https://cla.developers.google.com/) site to get started. diff --git a/cmd/install b/cmd/install index 351eb4129..23e463158 100755 --- a/cmd/install +++ b/cmd/install @@ -1,18 +1,13 @@ #!/bin/bash -e -GIT_URL=https://github.com/auto-iot -NET_ORC_DIR=net_orc - python3 -m venv venv source venv/bin/activate -pip3 install -r etc/requirements.txt +pip3 install -r framework/requirements.txt -rm -rf $NET_ORC_DIR -git clone $GIT_URL/network-orchestrator $NET_ORC_DIR -chown -R $USER $NET_ORC_DIR +pip3 install -r net_orc/python/requirements.txt -pip3 install -r $NET_ORC_DIR/python/requirements.txt +pip3 install -r test_orc/python/requirements.txt -deactivate \ No newline at end of file +deactivate diff --git a/cmd/start b/cmd/start index 43a295338..d146f413d 100755 --- a/cmd/start +++ b/cmd/start @@ -5,6 +5,12 @@ if [[ "$EUID" -ne 0 ]]; then exit 1 fi +# Ensure that /var/run/netns folder exists +mkdir -p /var/run/netns + +# Clear up existing runtime files +rm -rf runtime + # Check if python modules exist. Install if not [ ! -d "venv" ] && cmd/install @@ -12,6 +18,27 @@ fi source venv/bin/activate # TODO: Execute python code -python -u framework/run.py +python -u framework/test_runner.py $@ + +# TODO: Work in progress code for containerization of OVS module +# asyncRun() { +# "$@" & +# pid="$!" +# echo "PID Running: " $pid +# trap "echo 'Stopping PID $pid'; kill -SIGTERM $pid" SIGINT SIGTERM + +# sleep 10 + +# # A signal emitted while waiting will make the wait command return code > 128 +# # Let's wrap it in a loop that doesn't end before the process is indeed stopped +# while kill -0 $pid > /dev/null 2>&1; do +# #while $(kill -0 $pid 2>/dev/null); do +# wait +# done +# } + +# # -u flag allows python print statements +# # to be logged by docker by running unbuffered +# asyncRun python3 -u python/src/run.py $@ deactivate \ No newline at end of file diff --git a/conf/system.json.example b/conf/system.json.example index 379545ad6..ecf480104 100644 --- a/conf/system.json.example +++ b/conf/system.json.example @@ -1,7 +1,10 @@ { - "network": { - "device_intf": "enx123456789123", - "internet_intf": "enx123456789124" - }, - "log_level": "INFO" + "network": { + "device_intf": "enx123456789123", + "internet_intf": "enx123456789124" + }, + "log_level": "INFO", + "startup_timeout": 60, + "monitor_period": 300, + "runtime": 1200 } \ No newline at end of file diff --git a/etc/requirements.txt b/etc/requirements.txt deleted file mode 100644 index 56b8f0f66..000000000 --- a/etc/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -netifaces \ No newline at end of file diff --git a/framework/.gitignore b/framework/.gitignore deleted file mode 100644 index ba0430d26..000000000 --- a/framework/.gitignore +++ /dev/null @@ -1 +0,0 @@ -__pycache__/ \ No newline at end of file diff --git a/framework/device.py b/framework/device.py new file mode 100644 index 000000000..eef275d54 --- /dev/null +++ b/framework/device.py @@ -0,0 +1,13 @@ +"""Track device object information.""" + +from network_device import NetworkDevice +from dataclasses import dataclass + + +@dataclass +class Device(NetworkDevice): + """Represents a physical device and it's configuration.""" + + make: str = None + model: str = None + test_modules: str = None diff --git a/framework/logger.py b/framework/logger.py index 25970bd21..d4702cb38 100644 --- a/framework/logger.py +++ b/framework/logger.py @@ -1,23 +1,49 @@ -"""Manages all things logging.""" +"""Manages stream and file loggers.""" import json import logging import os LOGGERS = {} -_LOG_FORMAT = "%(asctime)s %(name)-8s %(levelname)-7s %(message)s" +_LOG_FORMAT = '%(asctime)s %(name)-8s %(levelname)-7s %(message)s' _DATE_FORMAT = '%b %02d %H:%M:%S' -_CONF_DIR="conf" -_CONF_FILE_NAME="system.json" +_DEFAULT_LOG_LEVEL = logging.INFO +_LOG_LEVEL = logging.INFO +_CONF_DIR = 'conf' +_CONF_FILE_NAME = 'system.json' +_LOG_DIR = 'runtime/testing/' -with open(os.path.join(_CONF_DIR, _CONF_FILE_NAME), encoding='utf-8') as config_file: - system_conf_json = json.load(config_file) - log_level_str = system_conf_json['log_level'] - log_level = logging.getLevelName(log_level_str) +# Set log level +with open(os.path.join(_CONF_DIR, _CONF_FILE_NAME), + encoding='utf-8') as system_conf_file: + system_conf_json = json.load(system_conf_file) +log_level_str = system_conf_json['log_level'] -logging.basicConfig(format=_LOG_FORMAT, datefmt=_DATE_FORMAT, level=log_level) +temp_log = logging.getLogger('temp') +try: + temp_log.setLevel(logging.getLevelName(log_level_str)) + _LOG_LEVEL = logging.getLevelName(log_level_str) +except ValueError: + print('Invalid log level set in ' + _CONF_DIR + '/' + _CONF_FILE_NAME + + '. Using INFO as log level') + _LOG_LEVEL = _DEFAULT_LOG_LEVEL -def get_logger(name): - """Returns the logger belonging to the class calling the method.""" - if name not in LOGGERS: - LOGGERS[name] = logging.getLogger(name) - return LOGGERS[name] +log_format = logging.Formatter(fmt=_LOG_FORMAT, datefmt=_DATE_FORMAT) + +def add_file_handler(log, log_file): + handler = logging.FileHandler(_LOG_DIR + log_file + '.log') + handler.setFormatter(log_format) + log.addHandler(handler) + +def add_stream_handler(log): + handler = logging.StreamHandler() + handler.setFormatter(log_format) + log.addHandler(handler) + +def get_logger(name, log_file=None): + if name not in LOGGERS: + LOGGERS[name] = logging.getLogger(name) + LOGGERS[name].setLevel(_LOG_LEVEL) + add_stream_handler(LOGGERS[name]) + if log_file is not None: + add_file_handler(LOGGERS[name], log_file) + return LOGGERS[name] diff --git a/framework/requirements.txt b/framework/requirements.txt new file mode 100644 index 000000000..ca56948f4 --- /dev/null +++ b/framework/requirements.txt @@ -0,0 +1 @@ +requests<2.29.0 \ No newline at end of file diff --git a/framework/run.py b/framework/run.py deleted file mode 100644 index ad7c038ee..000000000 --- a/framework/run.py +++ /dev/null @@ -1,5 +0,0 @@ -"""Starts Test Run.""" - -from testrun import TestRun - -testrun = TestRun() diff --git a/framework/test_runner.py b/framework/test_runner.py new file mode 100644 index 000000000..0733d4353 --- /dev/null +++ b/framework/test_runner.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python3 +"""Wrapper for the TestRun that simplifies +virtual testing procedure by allowing direct calling +from the command line. + +Run using the provided command scripts in the cmd folder. +E.g sudo cmd/start +""" + +import argparse +import sys +from testrun import TestRun +import logger +import signal + +LOGGER = logger.get_logger("runner") + + +class TestRunner: + """Controls and starts the Test Run application.""" + + def __init__(self, + config_file=None, + validate=True, + net_only=False, + single_intf=False): + self._register_exits() + self.test_run = TestRun(config_file=config_file, + validate=validate, + net_only=net_only, + single_intf=single_intf) + + def _register_exits(self): + signal.signal(signal.SIGINT, self._exit_handler) + signal.signal(signal.SIGTERM, self._exit_handler) + signal.signal(signal.SIGABRT, self._exit_handler) + signal.signal(signal.SIGQUIT, self._exit_handler) + + def _exit_handler(self, signum, arg): # pylint: disable=unused-argument + LOGGER.debug("Exit signal received: " + str(signum)) + if signum in (2, signal.SIGTERM): + LOGGER.info("Exit signal received.") + # Kill all container services quickly + # If we're here, we want everything to stop immediately + # and don't care about a gracefully shutdown + self._stop(True) + sys.exit(1) + + def stop(self, kill=False): + self.test_run.stop(kill) + + def start(self): + self.test_run.start() + LOGGER.info("Test Run has finished") + + +def parse_args(): + parser = argparse.ArgumentParser( + description="Test Run", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument( + "-f", + "--config-file", + default=None, + help="Define the configuration file for Test Run and Network Orchestrator" + ) + parser.add_argument( + "--no-validate", + action="store_true", + help="Turn off the validation of the network after network boot") + parser.add_argument("-net", + "--net-only", + action="store_true", + help="Run the network only, do not run tests") + parser.add_argument("--single-intf", + action="store_true", + help="Single interface mode (experimental)") + parsed_args = parser.parse_known_args()[0] + return parsed_args + + +if __name__ == "__main__": + args = parse_args() + runner = TestRunner(config_file=args.config_file, + validate=not args.no_validate, + net_only=args.net_only, + single_intf=args.single_intf) + runner.start() diff --git a/framework/testrun.py b/framework/testrun.py index 225bed853..94ad2ef9f 100644 --- a/framework/testrun.py +++ b/framework/testrun.py @@ -1,98 +1,194 @@ -"""The overall control of the Test Run application. - -This file provides the integration between all of the -Test Run components, such as net_orc, test_orc and test_ui. - -Run using the provided command scripts in the cmd folder. -E.g sudo cmd/start -""" - -import os -import sys -import json -import signal -import time -import logger - -# Locate parent directory -current_dir = os.path.dirname(os.path.realpath(__file__)) -parent_dir = os.path.dirname(current_dir) - -# Add net_orc to Python path -net_orc_dir = os.path.join(parent_dir, 'net_orc', 'python', 'src') -sys.path.append(net_orc_dir) - -import network_orchestrator as net_orc # pylint: disable=wrong-import-position - -LOGGER = logger.get_logger('test_run') -CONFIG_FILE = "conf/system.json" -EXAMPLE_CONFIG_FILE = "conf/system.json.example" -RUNTIME = 300 - -class TestRun: # pylint: disable=too-few-public-methods - """Test Run controller. - - Creates an instance of the network orchestrator, test - orchestrator and user interface. - """ - - def __init__(self): - LOGGER.info("Starting Test Run") - - # Catch any exit signals - self._register_exits() - - self._start_network() - - # Keep application running - time.sleep(RUNTIME) - - self._stop_network() - - def _register_exits(self): - signal.signal(signal.SIGINT, self._exit_handler) - signal.signal(signal.SIGTERM, self._exit_handler) - signal.signal(signal.SIGABRT, self._exit_handler) - signal.signal(signal.SIGQUIT, self._exit_handler) - - def _exit_handler(self, signum, arg): # pylint: disable=unused-argument - LOGGER.debug("Exit signal received: " + str(signum)) - if signum in (2, signal.SIGTERM): - LOGGER.info("Exit signal received.") - self._stop_network() - - def _load_config(self): - """Loads all settings from the config file into memory.""" - if not os.path.isfile(CONFIG_FILE): - LOGGER.error("Configuration file is not present at " + CONFIG_FILE) - LOGGER.info("An example is present in " + EXAMPLE_CONFIG_FILE) - sys.exit(1) - - with open(CONFIG_FILE, 'r', encoding='UTF-8') as config_file_open: - config_json = json.load(config_file_open) - self._net_orc.import_config(config_json) - - def _start_network(self): - # Create an instance of the network orchestrator - self._net_orc = net_orc.NetworkOrchestrator() - - # Load config file and pass to other components - self._load_config() - - # Load and build any unbuilt network containers - self._net_orc.load_network_modules() - self._net_orc.build_network_modules() - - # Create baseline network - self._net_orc.create_net() - - # Launch network service containers - self._net_orc.start_network_services() - - LOGGER.info("Network is ready.") - - def _stop_network(self): - LOGGER.info("Stopping Test Run") - self._net_orc.stop_networking_services(kill=True) - self._net_orc.restore_net() - sys.exit(0) +"""The overall control of the Test Run application. + +This file provides the integration between all of the +Test Run components, such as net_orc, test_orc and test_ui. + +Run using the provided command scripts in the cmd folder. +E.g sudo cmd/start +""" +import os +import sys +import json +import signal +import time +import logger + +# Locate parent directory +current_dir = os.path.dirname(os.path.realpath(__file__)) +parent_dir = os.path.dirname(current_dir) + +# Add net_orc to Python path +net_orc_dir = os.path.join(parent_dir, 'net_orc', 'python', 'src') +sys.path.append(net_orc_dir) + +# Add test_orc to Python path +test_orc_dir = os.path.join(parent_dir, 'test_orc', 'python', 'src') +sys.path.append(test_orc_dir) + +from listener import NetworkEvent # pylint: disable=wrong-import-position,import-outside-toplevel +import test_orchestrator as test_orc # pylint: disable=wrong-import-position,import-outside-toplevel +import network_orchestrator as net_orc # pylint: disable=wrong-import-position,import-outside-toplevel + +from device import Device # pylint: disable=wrong-import-position,import-outside-toplevel + +LOGGER = logger.get_logger('test_run') +CONFIG_FILE = 'conf/system.json' +EXAMPLE_CONFIG_FILE = 'conf/system.json.example' +RUNTIME = 1500 + +LOCAL_DEVICES_DIR = 'local/devices' +RESOURCE_DEVICES_DIR = 'resources/devices' +DEVICE_CONFIG = 'device_config.json' +DEVICE_MAKE = 'make' +DEVICE_MODEL = 'model' +DEVICE_MAC_ADDR = 'mac_addr' +DEVICE_TEST_MODULES = 'test_modules' + + +class TestRun: # pylint: disable=too-few-public-methods + """Test Run controller. + + Creates an instance of the network orchestrator, test + orchestrator and user interface. + """ + + def __init__(self, + config_file=CONFIG_FILE, + validate=True, + net_only=False, + single_intf=False): + self._devices = [] + self._net_only = net_only + self._single_intf = single_intf + + # Catch any exit signals + self._register_exits() + + # Expand the config file to absolute pathing + config_file_abs = self._get_config_abs(config_file=config_file) + + self._net_orc = net_orc.NetworkOrchestrator( + config_file=config_file_abs, + validate=validate, + async_monitor=not self._net_only, + single_intf = self._single_intf) + + self._test_orc = test_orc.TestOrchestrator(self._net_orc) + + def start(self): + + self._load_all_devices() + + if self._net_only: + LOGGER.info('Network only option configured, no tests will be run') + self._start_network() + else: + self._start_network() + self._test_orc.start() + + self._net_orc.listener.register_callback( + self._device_stable, + [NetworkEvent.DEVICE_STABLE] + ) + self._net_orc.listener.register_callback( + self._device_discovered, + [NetworkEvent.DEVICE_DISCOVERED] + ) + + LOGGER.info('Waiting for devices on the network...') + + # Check timeout and whether testing is currently + # in progress before stopping + time.sleep(RUNTIME) + + self.stop() + + def stop(self, kill=False): + self._stop_tests() + self._stop_network(kill=kill) + + def _register_exits(self): + signal.signal(signal.SIGINT, self._exit_handler) + signal.signal(signal.SIGTERM, self._exit_handler) + signal.signal(signal.SIGABRT, self._exit_handler) + signal.signal(signal.SIGQUIT, self._exit_handler) + + def _exit_handler(self, signum, arg): # pylint: disable=unused-argument + LOGGER.debug('Exit signal received: ' + str(signum)) + if signum in (2, signal.SIGTERM): + LOGGER.info('Exit signal received.') + self.stop(kill=True) + sys.exit(1) + + def _get_config_abs(self, config_file=None): + if config_file is None: + # If not defined, use relative pathing to local file + config_file = os.path.join(parent_dir, CONFIG_FILE) + + # Expand the config file to absolute pathing + return os.path.abspath(config_file) + + def _start_network(self): + # Start the network orchestrator + self._net_orc.start() + + def _run_tests(self, device): + """Iterate through and start all test modules.""" + + # To Do: Make this configurable + time.sleep(60) # Let device bootup + + self._test_orc.run_test_modules(device) + + def _stop_network(self, kill=False): + self._net_orc.stop(kill=kill) + + def _stop_tests(self): + self._test_orc.stop() + + def _load_all_devices(self): + self._load_devices(device_dir=LOCAL_DEVICES_DIR) + self._load_devices(device_dir=RESOURCE_DEVICES_DIR) + + def _load_devices(self, device_dir): + LOGGER.debug('Loading devices from ' + device_dir) + + os.makedirs(device_dir, exist_ok=True) + + for device_folder in os.listdir(device_dir): + with open(os.path.join(device_dir, device_folder, DEVICE_CONFIG), + encoding='utf-8') as device_config_file: + device_config_json = json.load(device_config_file) + + device_make = device_config_json.get(DEVICE_MAKE) + device_model = device_config_json.get(DEVICE_MODEL) + mac_addr = device_config_json.get(DEVICE_MAC_ADDR) + test_modules = device_config_json.get(DEVICE_TEST_MODULES) + + device = Device(make=device_make, + model=device_model, + mac_addr=mac_addr, + test_modules=json.dumps(test_modules)) + self._devices.append(device) + + def get_device(self, mac_addr): + """Returns a loaded device object from the device mac address.""" + for device in self._devices: + if device.mac_addr == mac_addr: + return device + + def _device_discovered(self, mac_addr): + device = self.get_device(mac_addr) + if device is not None: + LOGGER.info( + f'Discovered {device.make} {device.model} on the network') + else: + device = Device(mac_addr=mac_addr) + self._devices.append(device) + LOGGER.info( + f'A new device has been discovered with mac address {mac_addr}') + + def _device_stable(self, mac_addr): + device = self.get_device(mac_addr) + LOGGER.info(f'Device with mac address {mac_addr} is ready for testing.') + self._test_orc.run_test_modules(device) diff --git a/net_orc/.gitignore b/net_orc/.gitignore new file mode 100644 index 000000000..2d77147eb --- /dev/null +++ b/net_orc/.gitignore @@ -0,0 +1,133 @@ +# Runtime folder +runtime/ +.vscode/ + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ diff --git a/net_orc/docker-compose.yml b/net_orc/docker-compose.yml new file mode 100644 index 000000000..8c50d766a --- /dev/null +++ b/net_orc/docker-compose.yml @@ -0,0 +1,64 @@ +version: "3.7" + +services: + + base: + build: + context: network/modules/base + dockerfile: base.Dockerfile + image: test-run/base + container_name: tr-ct-base + + ovs: + depends_on: + - base + build: + context: network/modules/ovs + dockerfile: ovs.Dockerfile + image: test-run/ovs + network_mode: host + container_name: tr-ct-ovs + stdin_open: true + privileged: true + volumes: + - $PWD/network/modules/ovs/python:/ovs/python + # Mount host open vswitch socket to allow container + # access to control open vswitch on the host + - /var/run/openvswitch/db.sock:/var/run/openvswitch/db.sock + # Mount host network namespace to allow container + # access to assign proper namespaces to containers + - /var/run/netns:/var/run/netns + + netorch: + depends_on: + - base + build: + context: . + dockerfile: orchestrator.Dockerfile + image: test-run/orchestrator + network_mode: host + privileged: true + volumes: + - $PWD/cmd:/orchestrator/cmd + - $PWD/network:/orchestrator/network + - $PWD/python:/orchestrator/python + # Mount host docker socket to allow container access + # control docker containers on the host + - /var/run/docker.sock:/var/run/docker.sock + # Mount host open vswitch socket to allow container + # access to control open vswitch on the host + - /var/run/openvswitch/db.sock:/var/run/openvswitch/db.sock + # Mount host network namespace to allow container + # access to assign proper namespaces to containers + - /var/run/netns:/var/run/netns + # Mount the host process information to allow container + # access to configure docker containers and namespaces properly + - /proc:/proc + container_name: network_orchestrator + stdin_open: true + working_dir: /orchestrator + #entrypoint: ["cmd/start"] + # Give more time for stopping so when we stop the container it has + # time to stop all network services gracefuly + stop_grace_period: 60s + entrypoint: ["python3","-u","python/src/run.py"] diff --git a/net_orc/network/devices/faux-dev/bin/get_default_gateway b/net_orc/network/devices/faux-dev/bin/get_default_gateway new file mode 100644 index 000000000..f6f1e2a0d --- /dev/null +++ b/net_orc/network/devices/faux-dev/bin/get_default_gateway @@ -0,0 +1,3 @@ +#!/bin/bash -e + +route | grep default | awk '{print $2}' \ No newline at end of file diff --git a/net_orc/network/devices/faux-dev/bin/start_dhcp_client b/net_orc/network/devices/faux-dev/bin/start_dhcp_client new file mode 100644 index 000000000..de9270c82 --- /dev/null +++ b/net_orc/network/devices/faux-dev/bin/start_dhcp_client @@ -0,0 +1,16 @@ +#!/bin/bash -e + +# Fetch the interface +INTF=$1 + +PID_FILE=/var/run/dhclient.pid + +echo "Starting DHCP Client on interface $INTF" + +#Kill any existing running dhclient process +if [ -f $PID_FILE ]; then + kill -9 $(cat $PID_FILE) || true + rm -f $PID_FILE +fi + +dhclient $INTF \ No newline at end of file diff --git a/net_orc/network/devices/faux-dev/bin/start_network_service b/net_orc/network/devices/faux-dev/bin/start_network_service new file mode 100644 index 000000000..b727d2091 --- /dev/null +++ b/net_orc/network/devices/faux-dev/bin/start_network_service @@ -0,0 +1,39 @@ +#!/bin/bash -e + +# Directory where all binaries will be loaded +BIN_DIR="/testrun/bin" + +# Fetch module name +MODULE_NAME=$1 + +# Default interface should be veth0 for all containers +DEFAULT_IFACE=veth0 + +# Allow a user to define an interface by passing it into this script +DEFINED_IFACE=$2 + +# Select which interace to use +if [[ -z $DEFINED_IFACE || "$DEFINED_IFACE" == "null" ]] +then + echo "No interface defined, defaulting to veth0" + INTF=$DEFAULT_IFACE +else + INTF=$DEFINED_IFACE +fi + +#Create and set permissions on the output files +LOG_FILE=/runtime/validation/$MODULE_NAME.log +RESULT_FILE=/runtime/validation/result.json +touch $LOG_FILE +touch $RESULT_FILE +chown $HOST_USER:$HOST_USER $LOG_FILE +chown $HOST_USER:$HOST_USER $RESULT_FILE + +# Start dhclient +$BIN_DIR/start_dhcp_client $INTF + +# -u flag allows python print statements +# to be logged by docker by running unbuffered +exec python3 -u /testrun/python/src/run.py "-m $MODULE_NAME" + +echo Network validator complete \ No newline at end of file diff --git a/net_orc/network/devices/faux-dev/conf/module_config.json b/net_orc/network/devices/faux-dev/conf/module_config.json new file mode 100644 index 000000000..afde8c629 --- /dev/null +++ b/net_orc/network/devices/faux-dev/conf/module_config.json @@ -0,0 +1,11 @@ +{ + "config": { + "meta": { + "name": "faux-dev", + "description": "Faux device to test network modules are functioning properly" + }, + "docker": { + "timeout": 60 + } + } +} \ No newline at end of file diff --git a/net_orc/network/devices/faux-dev/faux-dev.Dockerfile b/net_orc/network/devices/faux-dev/faux-dev.Dockerfile new file mode 100644 index 000000000..1686341b5 --- /dev/null +++ b/net_orc/network/devices/faux-dev/faux-dev.Dockerfile @@ -0,0 +1,20 @@ +# Image name: test-run/faux-dev +FROM test-run/base:latest + +#Update and get all additional requirements not contained in the base image +RUN apt-get update --fix-missing + +# NTP requireds interactive installation so we're going to turn that off +ARG DEBIAN_FRONTEND=noninteractive + +# Install dhcp client and ntp client +RUN apt-get install -y isc-dhcp-client ntp ntpdate + +# Copy over all configuration files +COPY network/devices/faux-dev/conf /testrun/conf + +# Load device binary files +COPY network/devices/faux-dev/bin /testrun/bin + +# Copy over all python files +COPY network/devices/faux-dev/python /testrun/python \ No newline at end of file diff --git a/net_orc/network/devices/faux-dev/python/src/dhcp_check.py b/net_orc/network/devices/faux-dev/python/src/dhcp_check.py new file mode 100644 index 000000000..82dd6e31f --- /dev/null +++ b/net_orc/network/devices/faux-dev/python/src/dhcp_check.py @@ -0,0 +1,87 @@ +"""Used to check if the DHCP server is functioning as expected""" + +import time +import logger + +LOGGER = None +LOG_NAME = 'dhcp_validator' +DHCP_LEASE_FILE = '/var/lib/dhcp/dhclient.leases' +IP_ADDRESS_KEY = 'fixed-address' +DNS_OPTION_KEY = 'option domain-name-servers' +GATEWAY_OPTION_KEY = 'option routers' +NTP_OPTION_KEY = 'option ntp-servers' + + +class DHCPValidator: + """Validates all expected test behaviors around the DHCP server""" + + def __init__(self, module): + self._dhcp_lease = None + self.dhcp_lease_test = False + self.add_logger(module) + + def add_logger(self, module): + global LOGGER + LOGGER = logger.get_logger(LOG_NAME, module) + + def print_test_results(self): + self.print_test_result('DHCP lease test', self.dhcp_lease_test) + + def print_test_result(self, test_name, result): + LOGGER.info(test_name + ': Pass' if result else test_name + ': Fail') + + def get_dhcp_lease(self): + """Returns the current DHCP lease.""" + return self._dhcp_lease + + def validate(self): + self._resolve_dhcp_lease() + LOGGER.info('IP Addr: ' + self._dhcp_lease.ip_addr) + LOGGER.info('Gateway: ' + self._dhcp_lease.gateway) + LOGGER.info('DNS Server: ' + self._dhcp_lease.dns_server) + LOGGER.info('NTP Server: ' + self._dhcp_lease.ntp_server) + + def _resolve_dhcp_lease(self): + LOGGER.info('Resolving DHCP lease...') + while self._dhcp_lease is None: + time.sleep(5) + try: + with open(DHCP_LEASE_FILE, 'r', encoding='UTF-8') as lease_file: + lines = lease_file.read() + LOGGER.debug('Lease file:\n' + lines) + leases = lines.split('lease ') + # Last lease is the current lease + cur_lease = leases[-1] + if cur_lease is not None: + LOGGER.debug('Current lease: ' + cur_lease) + self._dhcp_lease = DHCPLease() + self.dhcp_lease_test = True + # Iterate over entire lease and pick the parts we care about + lease_parts = cur_lease.split('\n') + for part in lease_parts: + part_clean = part.strip() + if part_clean.startswith(IP_ADDRESS_KEY): + self._dhcp_lease.ip_addr = part_clean[len(IP_ADDRESS_KEY + ):-1].strip() + elif part_clean.startswith(DNS_OPTION_KEY): + self._dhcp_lease.dns_server = part_clean[len(DNS_OPTION_KEY + ):-1].strip() + elif part_clean.startswith(GATEWAY_OPTION_KEY): + self._dhcp_lease.gateway = part_clean[len(GATEWAY_OPTION_KEY + ):-1].strip() + elif part_clean.startswith(NTP_OPTION_KEY): + self._dhcp_lease.ntp_server = part_clean[len(NTP_OPTION_KEY + ):-1].strip() + except Exception: # pylint: disable=broad-exception-caught + LOGGER.error('DHCP Resolved Error') + LOGGER.info('DHCP lease resolved') + + +class DHCPLease: + """Stores information about a device's DHCP lease.""" + + def __init__(self): + self.ip_addr = None + self.gateway = None + self.dns_server = None + self.ntp_server = None diff --git a/net_orc/network/devices/faux-dev/python/src/dns_check.py b/net_orc/network/devices/faux-dev/python/src/dns_check.py new file mode 100644 index 000000000..73a72e8c8 --- /dev/null +++ b/net_orc/network/devices/faux-dev/python/src/dns_check.py @@ -0,0 +1,103 @@ +"""Used to check if the DNS server is functioning as expected""" + +import logger +import time +import util +import subprocess + +LOGGER = None +LOG_NAME = 'dns_validator' +HOST_PING = 'google.com' +CAPTURE_FILE = '/runtime/network/faux-dev.pcap' +DNS_CONFIG_FILE = '/etc/resolv.conf' + + +class DNSValidator: + """Validates all expected test behaviors around the DNS server""" + + def __init__(self, module): + self._dns_server = None + self.dns_resolution_test = False + self.dns_dhcp_server_test = False + self.add_logger(module) + + def add_logger(self, module): + global LOGGER + LOGGER = logger.get_logger(LOG_NAME, module) + + def print_test_results(self): + self.print_test_result('DNS resolution test', self.dns_resolution_test) + self.print_test_result('DNS DHCP server test', self.dns_dhcp_server_test) + + def print_test_result(self, test_name, result): + LOGGER.info(test_name + ': Pass' if result else test_name + ': Fail') + + def validate(self, dhcp_lease): + self._dns_server = dhcp_lease.dns_server + self._set_dns_server() + self._check_dns_traffic() + + def _check_dns_traffic(self): + LOGGER.info('Checking DNS traffic for DNS server: ' + self._dns_server) + + # Ping a host to generate DNS traffic + if self._ping(HOST_PING)[0]: + LOGGER.info('Ping success') + self.dns_resolution_test = True + else: + LOGGER.info('Ping failed') + + # Some delay between pings and DNS traffic in the capture file + # so give some delay before we try to query again + time.sleep(5) + + # Check if the device has sent any DNS requests + filter_to_dns = f'dst port 53 and dst host {self._dns_server}' + to_dns = self._exec_tcpdump(filter_to_dns) + num_query_dns = len(to_dns) + LOGGER.info('DNS queries found: ' + str(num_query_dns)) + dns_traffic_detected = len(to_dns) > 0 + if dns_traffic_detected: + LOGGER.info('DNS traffic detected to configured DHCP DNS server') + self.dns_dhcp_server_test = True + else: + LOGGER.error('No DNS traffic detected') + + # Docker containeres resolve DNS servers from the host + # and do not play nice with normal networking methods + # so we need to set our DNS servers manually + def _set_dns_server(self): + with open(DNS_CONFIG_FILE, 'w', encoding='utf-8') as f: + f.write('nameserver ' + self._dns_server) + + # Generate DNS traffic by doing a simple ping by hostname + def _ping(self, host): + cmd = 'ping -c 5 ' + host + success = util.run_command(cmd, LOGGER) + return success + + def _exec_tcpdump(self, tcpdump_filter): + """ + Args + tcpdump_filter: Filter to pass onto tcpdump file + capture_file: Optional capture file to look + Returns + List of packets matching the filter + """ + command = f'tcpdump -tttt -n -r {CAPTURE_FILE} {tcpdump_filter}' + + LOGGER.debug('tcpdump command: ' + command) + + process = subprocess.Popen(command, + universal_newlines=True, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + text = str(process.stdout.read()).rstrip() + + LOGGER.debug('tcpdump response: ' + text) + + if text: + return text.split('\n') + + return [] diff --git a/net_orc/network/devices/faux-dev/python/src/gateway_check.py b/net_orc/network/devices/faux-dev/python/src/gateway_check.py new file mode 100644 index 000000000..85fe35db0 --- /dev/null +++ b/net_orc/network/devices/faux-dev/python/src/gateway_check.py @@ -0,0 +1,40 @@ +"""Used to check if the Gateway server is functioning as expected""" + +import logger +import util + +LOGGER = None +LOG_NAME = 'gateway_validator' + + +class GatewayValidator: + """Validates all expected test behaviors around the Gateway server""" + + def __init__(self, module): + self._gateway = None + self.default_gateway_test = False + self.add_logger(module) + + def add_logger(self, module): + global LOGGER + LOGGER = logger.get_logger(LOG_NAME, module) + + def print_test_results(self): + self.print_test_result('Default gateway test', self.default_gateway_test) + + def print_test_result(self, test_name, result): + LOGGER.info(test_name + ': Pass' if result else test_name + ': Fail') + + def validate(self, dhcp_lease): + self._gateway = dhcp_lease.gateway + self.check_default_gateway() + + def check_default_gateway(self): + LOGGER.info('Checking default gateway matches DHCP gateway: ' + + self._gateway) + cmd = '/testrun/bin/get_default_gateway' + success, default_gateway = util.run_command(cmd, LOGGER) + if success: + LOGGER.info('Default gateway resolved: ' + default_gateway) + if default_gateway == self._gateway: + self.default_gateway_test = True diff --git a/net_orc/network/devices/faux-dev/python/src/logger.py b/net_orc/network/devices/faux-dev/python/src/logger.py new file mode 100644 index 000000000..97d7f935a --- /dev/null +++ b/net_orc/network/devices/faux-dev/python/src/logger.py @@ -0,0 +1,47 @@ +"""Sets up the logger to be used for the faux-device.""" + +import json +import logging +import os + +LOGGERS = {} +_LOG_FORMAT = '%(asctime)s %(name)-8s %(levelname)-7s %(message)s' +_DATE_FORMAT = '%b %02d %H:%M:%S' +_CONF_DIR = 'conf' +_CONF_FILE_NAME = 'system.json' +_LOG_DIR = '/runtime/validation' + +# Set log level +with open(os.path.join(_CONF_DIR, _CONF_FILE_NAME), + encoding='utf-8') as conf_file: + system_conf_json = json.load(conf_file) + +log_level_str = system_conf_json['log_level'] +log_level = logging.getLevelName(log_level_str) + +log_format = logging.Formatter(fmt=_LOG_FORMAT, datefmt=_DATE_FORMAT) + + +def add_file_handler(log, log_file): + """Add file handler to existing log.""" + handler = logging.FileHandler(os.path.join(_LOG_DIR, log_file + '.log')) + handler.setFormatter(log_format) + log.addHandler(handler) + + +def add_stream_handler(log): + """Add stream handler to existing log.""" + handler = logging.StreamHandler() + handler.setFormatter(log_format) + log.addHandler(handler) + + +def get_logger(name, log_file=None): + """Return logger for requesting class.""" + if name not in LOGGERS: + LOGGERS[name] = logging.getLogger(name) + LOGGERS[name].setLevel(log_level) + add_stream_handler(LOGGERS[name]) + if log_file is not None: + add_file_handler(LOGGERS[name], log_file) + return LOGGERS[name] diff --git a/net_orc/network/devices/faux-dev/python/src/ntp_check.py b/net_orc/network/devices/faux-dev/python/src/ntp_check.py new file mode 100644 index 000000000..ceef164c6 --- /dev/null +++ b/net_orc/network/devices/faux-dev/python/src/ntp_check.py @@ -0,0 +1,79 @@ +"""Used to check if the NTP server is functioning as expected""" +import time +import logger +import util + +LOGGER = None +LOG_NAME = "ntp_validator" +ATTEMPTS = 3 + + +class NTPValidator: + """Perform testing of the NTP server.""" + + def __init__(self, module): + self._ntp_server = None + self.ntp_sync_test = False + self.add_logger(module) + + def add_logger(self, module): + global LOGGER + LOGGER = logger.get_logger(LOG_NAME, module) + + def print_test_results(self): + """Print all test results to log.""" + self.print_test_result("NTP sync test", self.ntp_sync_test) + + def print_test_result(self, test_name, result): + """Output test result to log.""" + LOGGER.info(test_name + ": Pass" if result else test_name + ": Fail") + + def validate(self, dhcp_lease): + """Call NTP sync test.""" + self._ntp_server = dhcp_lease.ntp_server + self.check_ntp() + + def check_ntp(self): + """Perform NTP sync test.""" + if self._ntp_server is not None: + attempt = 0 + LOGGER.info(f"Attempting to sync to NTP server: {self._ntp_server}") + LOGGER.info("Attempts allowed: " + str(ATTEMPTS)) + + # If we don't ping before syncing, this will fail. + while attempt < ATTEMPTS and not self.ntp_sync_test: + attempt += 1 + if self.ping_ntp_server(): + self.sync_ntp() + if not self.ntp_sync_test: + LOGGER.info("Waiting 5 seconds before next attempt") + time.sleep(5) + else: + LOGGER.info("No NTP server available from DHCP lease") + + def sync_ntp(self): + """Send NTP request to server.""" + LOGGER.info("Sending NTP Sync Request to: " + self._ntp_server) + cmd = "ntpdate " + self._ntp_server + ntp_response = util.run_command(cmd, LOGGER)[1] + LOGGER.info("NTP sync response: " + ntp_response) + if "adjust time server " + self._ntp_server in ntp_response: + LOGGER.info("NTP sync succesful") + self.ntp_sync_test = True + else: + LOGGER.info("NTP client failed to sync to server") + + def ping_ntp_server(self): + """Ping NTP server before sending a time request.""" + LOGGER.info("Pinging NTP server before syncing...") + if self.ping(self._ntp_server): + LOGGER.info("NTP server successfully pinged") + return True + LOGGER.info("NTP server did not respond to ping") + return False + + def ping(self, host): + """Send ping request to host.""" + cmd = "ping -c 1 " + host + success = util.run_command(cmd, LOGGER) + return success diff --git a/net_orc/network/devices/faux-dev/python/src/run.py b/net_orc/network/devices/faux-dev/python/src/run.py new file mode 100644 index 000000000..062a1a643 --- /dev/null +++ b/net_orc/network/devices/faux-dev/python/src/run.py @@ -0,0 +1,125 @@ +"""Used to run all the various validator modules for the faux-device""" + +import argparse +import json +import os +import signal +import sys + +import logger +from dns_check import DNSValidator +from dhcp_check import DHCPValidator +from gateway_check import GatewayValidator +from ntp_check import NTPValidator + +RESULTS_DIR = '/runtime/validation/' +LOGGER = logger.get_logger('validator') + + +class FauxDevice: + """Represents a virtual testing device.""" + + def __init__(self, module): + + signal.signal(signal.SIGINT, self._handler) + signal.signal(signal.SIGTERM, self._handler) + signal.signal(signal.SIGABRT, self._handler) + signal.signal(signal.SIGQUIT, self._handler) + + self.dhcp_validator = DHCPValidator(module) + self.dns_validator = DNSValidator(module) + self.gateway_validator = GatewayValidator(module) + self.ntp_validator = NTPValidator(module) + + self._module = module + self.run_tests() + results = self.generate_results() + self.write_results(results) + + def run_tests(self): + """Execute configured network tests.""" + + # Run DHCP tests first since everything hinges + # on basic DHCP compliance first + self.dhcp_validator.validate() + + dhcp_lease = self.dhcp_validator.get_dhcp_lease() + + # Use current lease from dhcp tests to validate DNS behaviors + self.dns_validator.validate(dhcp_lease) + + # Use current lease from dhcp tests to validate default gateway + self.gateway_validator.validate(dhcp_lease) + + # Use current lease from dhcp tests to validate ntp server + self.ntp_validator.validate(dhcp_lease) + + def print_test_results(self): + """Print test results to log.""" + self.dhcp_validator.print_test_results() + self.dns_validator.print_test_results() + self.gateway_validator.print_test_results() + self.ntp_validator.print_test_results() + + def generate_results(self): + """Transform test results into JSON format.""" + + results = [] + results.append( + self.generate_result('dhcp_lease', self.dhcp_validator.dhcp_lease_test)) + results.append( + self.generate_result('dns_from_dhcp', + self.dns_validator.dns_dhcp_server_test)) + results.append( + self.generate_result('dns_resolution', + self.dns_validator.dns_resolution_test)) + results.append( + self.generate_result('gateway_default', + self.gateway_validator.default_gateway_test)) + results.append( + self.generate_result('ntp_sync', self.ntp_validator.ntp_sync_test)) + json_results = json.dumps({'results': results}, indent=2) + + return json_results + + def write_results(self, results): + """Write test results to file.""" + results_file = os.path.join(RESULTS_DIR, 'result.json') + LOGGER.info('Writing results to ' + results_file) + with open(results_file, 'w', encoding='utf-8') as f: + f.write(results) + + def generate_result(self, test_name, test_result): + """Return JSON object for test result.""" + if test_result is not None: + result = 'compliant' if test_result else 'non-compliant' + else: + result = 'skipped' + LOGGER.info(test_name + ': ' + result) + res_dict = {'name': test_name, 'result': result} + return res_dict + + def _handler(self, signum, frame): # pylint: disable=unused-argument + if signum in (2, signal.SIGTERM): + sys.exit(1) + + +def run(argv): # pylint: disable=unused-argument + """Run the network validator.""" + parser = argparse.ArgumentParser( + description='Faux Device _validator', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument( + '-m', + '--module', + help='Define the module name to be used to create the log file') + + args = parser.parse_args() + + # For some reason passing in the args from bash adds an extra + # space before the argument so we'll just strip out extra space + FauxDevice(args.module.strip()) + + +if __name__ == '__main__': + run(sys.argv) diff --git a/net_orc/network/devices/faux-dev/python/src/util.py b/net_orc/network/devices/faux-dev/python/src/util.py new file mode 100644 index 000000000..6848206b4 --- /dev/null +++ b/net_orc/network/devices/faux-dev/python/src/util.py @@ -0,0 +1,30 @@ +"""Provides basic utilities for the faux-device.""" +import subprocess +import shlex + +# Runs a process at the os level +# By default, returns the standard output and error output +# If the caller sets optional output parameter to False, +# will only return a boolean result indicating if it was +# succesful in running the command. Failure is indicated +# by any return code from the process other than zero. + + +def run_command(cmd, logger, output=True): + success = False + process = subprocess.Popen(shlex.split(cmd), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + stdout, stderr = process.communicate() + + if process.returncode != 0: + err_msg = f'{stderr.strip()}. Code: {process.returncode}' + logger.error('Command Failed: ' + cmd) + logger.error('Error: ' + err_msg) + else: + success = True + + if output: + return success, stdout.strip().decode('utf-8') + else: + return success, None diff --git a/net_orc/network/modules/base/base.Dockerfile b/net_orc/network/modules/base/base.Dockerfile new file mode 100644 index 000000000..2400fd1c6 --- /dev/null +++ b/net_orc/network/modules/base/base.Dockerfile @@ -0,0 +1,23 @@ +# Image name: test-run/base +FROM ubuntu:jammy + +# Install common software +RUN apt-get update && apt-get install -y net-tools iputils-ping tcpdump iproute2 jq python3 python3-pip dos2unix + +#Setup the base python requirements +COPY network/modules/base/python /testrun/python + +# Install all python requirements for the module +RUN pip3 install -r /testrun/python/requirements.txt + +# Add the bin files +COPY network/modules/base/bin /testrun/bin + +# Remove incorrect line endings +RUN dos2unix /testrun/bin/* + +# Make sure all the bin files are executable +RUN chmod u+x /testrun/bin/* + +#Start the network module +ENTRYPOINT [ "/testrun/bin/start_module" ] \ No newline at end of file diff --git a/net_orc/network/modules/base/bin/capture b/net_orc/network/modules/base/bin/capture new file mode 100644 index 000000000..8a8430feb --- /dev/null +++ b/net_orc/network/modules/base/bin/capture @@ -0,0 +1,30 @@ +#!/bin/bash -e + +# Fetch module name +MODULE_NAME=$1 + +# Define the local file location for the capture to be saved +PCAP_DIR="/runtime/network/" +PCAP_FILE=$MODULE_NAME.pcap + +# Default interface should be veth0 for all containers +DEFAULT_IFACE=veth0 + +# Allow a user to define an interface by passing it into this script +DEFINED_IFACE=$2 + +# Select which interace to use +if [[ -z $DEFINED_IFACE ]] +then + INTERFACE=$DEFAULT_IFACE +else + INTERFACE=$DEFINED_IFACE +fi + +# Create the output directory and start the capture +mkdir -p $PCAP_DIR +chown $HOST_USER:$HOST_USER $PCAP_DIR +tcpdump -i $INTERFACE -w $PCAP_DIR/$PCAP_FILE -Z $HOST_USER & + +#Small pause to let the capture to start +sleep 1 \ No newline at end of file diff --git a/net_orc/network/modules/base/bin/setup_binaries b/net_orc/network/modules/base/bin/setup_binaries new file mode 100644 index 000000000..3535ead3c --- /dev/null +++ b/net_orc/network/modules/base/bin/setup_binaries @@ -0,0 +1,10 @@ +#!/bin/bash -e + +# Directory where all binaries will be loaded +BIN_DIR=$1 + +# Remove incorrect line endings +dos2unix $BIN_DIR/* + +# Make sure all the bin files are executable +chmod u+x $BIN_DIR/* \ No newline at end of file diff --git a/net_orc/network/modules/base/bin/start_grpc b/net_orc/network/modules/base/bin/start_grpc new file mode 100644 index 000000000..9792b4bd4 --- /dev/null +++ b/net_orc/network/modules/base/bin/start_grpc @@ -0,0 +1,17 @@ +#!/bin/bash -e + +GRPC_DIR="/testrun/python/src/grpc" +GRPC_PROTO_DIR="proto" +GRPC_PROTO_FILE="grpc.proto" + +#Move into the grpc directory +pushd $GRPC_DIR >/dev/null 2>&1 + +#Build the grpc proto file every time before starting server +python3 -m grpc_tools.protoc --proto_path=. ./$GRPC_PROTO_DIR/$GRPC_PROTO_FILE --python_out=. --grpc_python_out=. + +popd >/dev/null 2>&1 + +#Start the grpc server +python3 -u $GRPC_DIR/start_server.py $@ + diff --git a/net_orc/network/modules/base/bin/start_module b/net_orc/network/modules/base/bin/start_module new file mode 100644 index 000000000..7fdcbc404 --- /dev/null +++ b/net_orc/network/modules/base/bin/start_module @@ -0,0 +1,79 @@ +#!/bin/bash + +# Directory where all binaries will be loaded +BIN_DIR="/testrun/bin" + +# Default interface should be veth0 for all containers +DEFAULT_IFACE=veth0 + +# Create a local user that matches the same as the host +# to be used for correct file ownership for various logs +# HOST_USER mapped in via docker container environemnt variables +useradd $HOST_USER + +# Enable IPv6 for all containers +sysctl net.ipv6.conf.all.disable_ipv6=0 +sysctl -p + +#Read in the config file +CONF_FILE="/testrun/conf/module_config.json" +CONF=`cat $CONF_FILE` + +if [[ -z $CONF ]] +then + echo "No config file present at $CONF_FILE. Exiting startup." + exit 1 +fi + +# Extract the necessary config parameters +MODULE_NAME=$(echo "$CONF" | jq -r '.config.meta.name') +DEFINED_IFACE=$(echo "$CONF" | jq -r '.config.network.interface') +GRPC=$(echo "$CONF" | jq -r '.config.grpc') + +# Validate the module name is present +if [[ -z "$MODULE_NAME" || "$MODULE_NAME" == "null" ]] +then + echo "No module name present in $CONF_FILE. Exiting startup." + exit 1 +fi + +# Select which interace to use +if [[ -z $DEFINED_IFACE || "$DEFINED_IFACE" == "null" ]] +then + echo "No Interface Defined, defaulting to veth0" + INTF=$DEFAULT_IFACE +else + INTF=$DEFINED_IFACE +fi + +echo "Starting module $MODULE_NAME on local interface $INTF..." + +$BIN_DIR/setup_binaries $BIN_DIR + +# Wait for interface to become ready +$BIN_DIR/wait_for_interface $INTF + +# Small pause to let the interface stabalize before starting the capture +#sleep 1 + +# Start network capture +$BIN_DIR/capture $MODULE_NAME $INTF + +# Start the grpc server +if [[ ! -z $GRPC && ! $GRPC == "null" ]] +then + GRPC_PORT=$(echo "$GRPC" | jq -r '.port') + if [[ ! -z $GRPC_PORT && ! $GRPC_PORT == "null" ]] + then + echo "gRPC port resolved from config: $GRPC_PORT" + $BIN_DIR/start_grpc "-p $GRPC_PORT" & + else + $BIN_DIR/start_grpc & + fi +fi + +#Small pause to let all core services stabalize +sleep 3 + +#Start the networking service +$BIN_DIR/start_network_service $MODULE_NAME $INTF \ No newline at end of file diff --git a/net_orc/network/modules/base/bin/start_network_service b/net_orc/network/modules/base/bin/start_network_service new file mode 100644 index 000000000..7d13750b8 --- /dev/null +++ b/net_orc/network/modules/base/bin/start_network_service @@ -0,0 +1,10 @@ +#!/bin/bash + +# Place holder function for testing and validation +# Each network module should include a start_networkig_service +# file that overwrites this one to boot all of the its specific +# requirements to run. + +echo "Starting network service..." +echo "This is not a real network service, just a test" +echo "Network service started" \ No newline at end of file diff --git a/net_orc/network/modules/base/bin/wait_for_interface b/net_orc/network/modules/base/bin/wait_for_interface new file mode 100644 index 000000000..1377705d8 --- /dev/null +++ b/net_orc/network/modules/base/bin/wait_for_interface @@ -0,0 +1,21 @@ +#!/bin/bash + +# Default interface should be veth0 for all containers +DEFAULT_IFACE=veth0 + +# Allow a user to define an interface by passing it into this script +DEFINED_IFACE=$1 + +# Select which interace to use +if [[ -z $DEFINED_IFACE ]] +then + INTF=$DEFAULT_IFACE +else + INTF=$DEFINED_IFACE +fi + +# Wait for local interface to be ready +while ! ip link show $INTF; do + echo $INTF is not yet ready. Waiting 3 seconds + sleep 3 +done \ No newline at end of file diff --git a/net_orc/network/modules/base/conf/module_config.json b/net_orc/network/modules/base/conf/module_config.json new file mode 100644 index 000000000..1f3a47ba2 --- /dev/null +++ b/net_orc/network/modules/base/conf/module_config.json @@ -0,0 +1,12 @@ +{ + "config": { + "meta": { + "name": "base", + "display_name": "Base", + "description": "Base image" + }, + "docker": { + "enable_container": false + } + } +} \ No newline at end of file diff --git a/net_orc/network/modules/base/python/requirements.txt b/net_orc/network/modules/base/python/requirements.txt new file mode 100644 index 000000000..9c4e2b056 --- /dev/null +++ b/net_orc/network/modules/base/python/requirements.txt @@ -0,0 +1,2 @@ +grpcio +grpcio-tools \ No newline at end of file diff --git a/net_orc/network/modules/base/python/src/grpc/start_server.py b/net_orc/network/modules/base/python/src/grpc/start_server.py new file mode 100644 index 000000000..b4016c831 --- /dev/null +++ b/net_orc/network/modules/base/python/src/grpc/start_server.py @@ -0,0 +1,37 @@ +"""Base class for starting the gRPC server for a network module.""" +from concurrent import futures +import grpc +import proto.grpc_pb2_grpc as pb2_grpc +from network_service import NetworkService +import argparse + +DEFAULT_PORT = '5001' + + +def serve(port): + server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) + pb2_grpc.add_NetworkModuleServicer_to_server(NetworkService(), server) + server.add_insecure_port('[::]:' + port) + server.start() + server.wait_for_termination() + + +def run(): + parser = argparse.ArgumentParser( + description='GRPC Server for Network Module', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('-p', + '--port', + default=DEFAULT_PORT, + help='Define the default port to run the server on.') + + args = parser.parse_args() + + port = args.port + + print('gRPC server starting on port ' + port) + serve(port) + + +if __name__ == '__main__': + run() diff --git a/net_orc/network/modules/base/python/src/logger.py b/net_orc/network/modules/base/python/src/logger.py new file mode 100644 index 000000000..abec00f69 --- /dev/null +++ b/net_orc/network/modules/base/python/src/logger.py @@ -0,0 +1,48 @@ +"""Sets up the logger to be used for the network modules.""" +import json +import logging +import os + +LOGGERS = {} +_LOG_FORMAT = '%(asctime)s %(name)-8s %(levelname)-7s %(message)s' +_DATE_FORMAT = '%b %02d %H:%M:%S' +_DEFAULT_LEVEL = logging.INFO +_CONF_DIR = 'conf' +_CONF_FILE_NAME = 'system.json' +_LOG_DIR = '/runtime/network/' + +# Set log level +try: + with open(os.path.join(_CONF_DIR, _CONF_FILE_NAME), + encoding='UTF-8') as config_json_file: + system_conf_json = json.load(config_json_file) + + log_level_str = system_conf_json['log_level'] + log_level = logging.getLevelName(log_level_str) +except OSError: + # TODO: Print out warning that log level is incorrect or missing + LOG_LEVEL = _DEFAULT_LEVEL + +log_format = logging.Formatter(fmt=_LOG_FORMAT, datefmt=_DATE_FORMAT) + + +def add_file_handler(log, log_file): + handler = logging.FileHandler(_LOG_DIR + log_file + '.log') + handler.setFormatter(log_format) + log.addHandler(handler) + + +def add_stream_handler(log): + handler = logging.StreamHandler() + handler.setFormatter(log_format) + log.addHandler(handler) + + +def get_logger(name, log_file=None): + if name not in LOGGERS: + LOGGERS[name] = logging.getLogger(name) + LOGGERS[name].setLevel(log_level) + add_stream_handler(LOGGERS[name]) + if log_file is not None: + add_file_handler(LOGGERS[name], log_file) + return LOGGERS[name] diff --git a/net_orc/network/modules/dhcp-1/bin/start_network_service b/net_orc/network/modules/dhcp-1/bin/start_network_service new file mode 100644 index 000000000..e8e0ad06c --- /dev/null +++ b/net_orc/network/modules/dhcp-1/bin/start_network_service @@ -0,0 +1,77 @@ +#!/bin/bash + +CONFIG_FILE=/etc/dhcp/dhcpd.conf +DHCP_PID_FILE=/var/run/dhcpd.pid +DHCP_LOG_FILE=/runtime/network/dhcp1-dhcpd.log +RA_PID_FILE=/var/run/radvd/radvd.pid +RA_LOG_FILE=/runtime/network/dhcp1-radvd.log + +echo "Starrting Network Service..." + +#Enable IPv6 Forwarding +sysctl net.ipv6.conf.all.forwarding=1 +sysctl -p + +# Create leases file if needed +touch /var/lib/dhcp/dhcpd.leases + +#Create directory for radvd +mkdir /var/run/radvd + +#Create and set permissions on the log files +touch $DHCP_LOG_FILE +touch $RA_LOG_FILE +chown $HOST_USER:$HOST_USER $DHCP_LOG_FILE +chown $HOST_USER:$HOST_USER $RA_LOG_FILE + + +#Move the config files to the correct location +cp /testrun/conf/dhcpd.conf /etc/dhcp/dhcpd.conf +cp /testrun/conf/radvd.conf /etc/radvd.conf + +# Restart dhcp server when config changes +while true; do + + new_checksum=$(md5sum $CONFIG_FILE) + + if [ "$checksum" == "$new_checksum" ]; then + sleep 2 + continue + fi + + echo Config changed. Restarting dhcp server at $(date).. + + if [ -f $DHCP_PID_FILE ]; then + kill -9 $(cat $DHCP_PID_FILE) || true + rm -f $DHCP_PID_FILE + fi + + if [ -f $RA_PID_FILE ]; then + kill -9 $(cat $RA_PID_FILE) || true + rm -f $RA_PID_FILE + fi + + checksum=$new_checksum + + echo Starting isc-dhcp-server at $(date) + + radvd -m logfile -l $RA_LOG_FILE -p $RA_PID_FILE + dhcpd -d &> $DHCP_LOG_FILE & + + while [ ! -f $DHCP_PID_FILE ]; do + echo Waiting for $DHCP_PID_FILE... + sleep 2 + done + + echo $DHCP_PID_FILE now available + + while [ ! -f $RA_PID_FILE ]; do + echo Waiting for $RA_PID_FILE... + sleep 2 + done + + echo $RA_PID_FILE now available + + echo Server now stable + +done \ No newline at end of file diff --git a/net_orc/network/modules/dhcp-1/conf/dhcpd.conf b/net_orc/network/modules/dhcp-1/conf/dhcpd.conf new file mode 100644 index 000000000..9f4fe1c28 --- /dev/null +++ b/net_orc/network/modules/dhcp-1/conf/dhcpd.conf @@ -0,0 +1,26 @@ +default-lease-time 300; + +failover peer "failover-peer" { + primary; + address 10.10.10.2; + port 847; + peer address 10.10.10.3; + peer port 647; + max-response-delay 60; + max-unacked-updates 10; + mclt 3600; + split 128; + load balance max seconds 3; +} + +subnet 10.10.10.0 netmask 255.255.255.0 { + option ntp-servers 10.10.10.5; + option subnet-mask 255.255.255.0; + option broadcast-address 10.10.10.255; + option routers 10.10.10.1; + option domain-name-servers 10.10.10.4; + pool { + failover peer "failover-peer"; + range 10.10.10.10 10.10.10.20; + } +} diff --git a/net_orc/network/modules/dhcp-1/conf/module_config.json b/net_orc/network/modules/dhcp-1/conf/module_config.json new file mode 100644 index 000000000..4a41eee3f --- /dev/null +++ b/net_orc/network/modules/dhcp-1/conf/module_config.json @@ -0,0 +1,26 @@ +{ + "config": { + "meta": { + "name": "dhcp-1", + "display_name": "DHCP Primary", + "description": "Primary DHCP server with IPv6 SLAAC" + }, + "network": { + "interface": "veth0", + "enable_wan": false, + "ip_index": 2 + }, + "grpc":{ + "port": 5001 + }, + "docker": { + "depends_on": "base", + "mounts": [ + { + "source": "runtime/network", + "target": "/runtime/network" + } + ] + } + } +} \ No newline at end of file diff --git a/net_orc/network/modules/dhcp-1/conf/radvd.conf b/net_orc/network/modules/dhcp-1/conf/radvd.conf new file mode 100644 index 000000000..f6d6f30d9 --- /dev/null +++ b/net_orc/network/modules/dhcp-1/conf/radvd.conf @@ -0,0 +1,12 @@ +interface veth0 +{ + AdvSendAdvert on; + AdvManagedFlag off; + MinRtrAdvInterval 30; + MaxRtrAdvInterval 60; + prefix fd10:77be:4186::/64 { + AdvOnLink on; + AdvAutonomous on; + AdvRouterAddr on; + }; +}; \ No newline at end of file diff --git a/net_orc/network/modules/dhcp-1/dhcp-1.Dockerfile b/net_orc/network/modules/dhcp-1/dhcp-1.Dockerfile new file mode 100644 index 000000000..99804e0e3 --- /dev/null +++ b/net_orc/network/modules/dhcp-1/dhcp-1.Dockerfile @@ -0,0 +1,14 @@ +# Image name: test-run/dhcp-primary +FROM test-run/base:latest + +# Install dhcp server +RUN apt-get install -y isc-dhcp-server radvd + +# Copy over all configuration files +COPY network/modules/dhcp-1/conf /testrun/conf + +# Copy over all binary files +COPY network/modules/dhcp-1/bin /testrun/bin + +# Copy over all python files +COPY network/modules/dhcp-1/python /testrun/python diff --git a/net_orc/network/modules/dhcp-1/python/src/grpc/__init__.py b/net_orc/network/modules/dhcp-1/python/src/grpc/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/net_orc/network/modules/dhcp-1/python/src/grpc/dhcp_config.py b/net_orc/network/modules/dhcp-1/python/src/grpc/dhcp_config.py new file mode 100644 index 000000000..23e1b4047 --- /dev/null +++ b/net_orc/network/modules/dhcp-1/python/src/grpc/dhcp_config.py @@ -0,0 +1,289 @@ +"""Contains all the necessary classes to maintain the +DHCP server's configuration""" +import re + +CONFIG_FILE = '/etc/dhcp/dhcpd.conf' +CONFIG_FILE_TEST = 'network/modules/dhcp-1/conf/dhcpd.conf' + +DEFAULT_LEASE_TIME_KEY = 'default-lease-time' + + +class DHCPConfig: + """Represents the DHCP Servers configuration and gives access to modify it""" + + def __init__(self): + self._default_lease_time = 300 + self.subnets = [] + self._peer = None + + def write_config(self): + conf = str(self) + print('Writing config: \n' + conf) + with open(CONFIG_FILE, 'w', encoding='UTF-8') as conf_file: + conf_file.write(conf) + + def resolve_config(self): + with open(CONFIG_FILE, 'r', encoding='UTF-8') as f: + conf = f.read() + self.resolve_subnets(conf) + self._peer = DHCPFailoverPeer(conf) + + def resolve_subnets(self, conf): + self.subnets = [] + regex = r'(subnet.*)' + subnets = re.findall(regex, conf, re.MULTILINE | re.DOTALL) + for subnet in subnets: + dhcp_subnet = DHCPSubnet(subnet) + self.subnets.append(dhcp_subnet) + + def set_range(self, start, end, subnet=0, pool=0): + print('Setting Range for pool ') + print(self.subnets[subnet].pools[pool]) + self.subnets[subnet].pools[pool].range_start = start + self.subnets[subnet].pools[pool].range_end = end + + # def resolve_settings(self, conf): + # lines = conf.split('\n') + # for line in lines: + # if DEFAULT_LEASE_TIME_KEY in line: + # self._default_lease_time = line.strip().split( + # DEFAULT_LEASE_TIME_KEY)[1].strip().split(';')[0] + + # self.peer = peer + + def __str__(self): + + config = """\r{DEFAULT_LEASE_TIME_KEY} {DEFAULT_LEASE_TIME};""" + + config = config.format(length='multi-line', + DEFAULT_LEASE_TIME_KEY=DEFAULT_LEASE_TIME_KEY, + DEFAULT_LEASE_TIME=self._default_lease_time) + + config += '\n\n' + str(self.peer) + for subnet in self._subnets: + config += '\n\n' + str(subnet) + return str(config) + + +FAILOVER_PEER_KEY = 'failover peer' +PRIMARY_KEY = 'primary' +ADDRESS_KEY = 'address' +PORT_KEY = 'port' +PEER_ADDRESS_KEY = 'peer address' +PEER_PORT_KEY = 'peer port' +MAX_RESPONSE_DELAY_KEY = 'max-response-delay' +MAX_UNACKED_UPDATES_KEY = 'max-unacked-updates' +MCLT_KEY = 'mclt' +SPLIT_KEY = 'split' +LOAD_BALANCE_MAX_SECONDS_KEY = 'load balance max seconds' + + +class DHCPFailoverPeer: + """Contains all information to define the DHCP failover peer""" + + def __init__(self, config): + self.name = None + self.primary = False + self.address = None + self.port = None + self.peer_address = None + self.peer_port = None + self.max_response_delay = None + self.max_unacked_updates = None + self.mclt = None + self.split = None + self.load_balance_max_seconds = None + self.peer = None + + self.resolve_peer(config) + + def __str__(self): + config = '{FAILOVER_PEER_KEY} \"{FAILOVER_PEER}\" {{\n' + config += '\tprimary;' if self.primary else 'secondary;' + config += """\n\t{ADDRESS_KEY} {ADDRESS}; + {PORT_KEY} {PORT}; + {PEER_ADDRESS_KEY} {PEER_ADDRESS}; + {PEER_PORT_KEY} {PEER_PORT}; + {MAX_RESPONSE_DELAY_KEY} {MAX_RESPONSE_DELAY}; + {MAX_UNACKED_UPDATES_KEY} {MAX_UNACKED_UPDATES}; + {MCLT_KEY} {MCLT}; + {SPLIT_KEY} {SPLIT}; + {LOAD_BALANCE_MAX_SECONDS_KEY} {LOAD_BALANCE_MAX_SECONDS}; + \r}}""" + + return config.format( + length='multi-line', + FAILOVER_PEER_KEY=FAILOVER_PEER_KEY, + FAILOVER_PEER=self.name, + ADDRESS_KEY=ADDRESS_KEY, + ADDRESS=self.address, + PORT_KEY=PORT_KEY, + PORT=self.port, + PEER_ADDRESS_KEY=PEER_ADDRESS_KEY, + PEER_ADDRESS=self.peer_address, + PEER_PORT_KEY=PEER_PORT_KEY, + PEER_PORT=self.peer_port, + MAX_RESPONSE_DELAY_KEY=MAX_RESPONSE_DELAY_KEY, + MAX_RESPONSE_DELAY=self.max_response_delay, + MAX_UNACKED_UPDATES_KEY=MAX_UNACKED_UPDATES_KEY, + MAX_UNACKED_UPDATES=self.max_unacked_updates, + MCLT_KEY=MCLT_KEY, + MCLT=self.mclt, + SPLIT_KEY=SPLIT_KEY, + SPLIT=self.split, + LOAD_BALANCE_MAX_SECONDS_KEY=LOAD_BALANCE_MAX_SECONDS_KEY, + LOAD_BALANCE_MAX_SECONDS=self.load_balance_max_seconds) + + def resolve_peer(self, conf): + peer = '' + lines = conf.split('\n') + for line in lines: + if line.startswith(FAILOVER_PEER_KEY) or len(peer) > 0: + if len(peer) <= 0: + self.name = line.strip().split(FAILOVER_PEER_KEY)[1].strip().split( + '{')[0].split('\"')[1] + peer += line + '\n' + if PRIMARY_KEY in line: + self.primary = True + elif ADDRESS_KEY in line and PEER_ADDRESS_KEY not in line: + self.address = line.strip().split(ADDRESS_KEY)[1].strip().split( + ';')[0] + elif PORT_KEY in line and PEER_PORT_KEY not in line: + self.port = line.strip().split(PORT_KEY)[1].strip().split(';')[0] + elif PEER_ADDRESS_KEY in line: + self.peer_address = line.strip().split( + PEER_ADDRESS_KEY)[1].strip().split(';')[0] + elif PEER_PORT_KEY in line: + self.peer_port = line.strip().split(PEER_PORT_KEY)[1].strip().split( + ';')[0] + elif MAX_RESPONSE_DELAY_KEY in line: + self.max_response_delay = line.strip().split( + MAX_RESPONSE_DELAY_KEY)[1].strip().split(';')[0] + elif MAX_UNACKED_UPDATES_KEY in line: + self.max_unacked_updates = line.strip().split( + MAX_UNACKED_UPDATES_KEY)[1].strip().split(';')[0] + elif MCLT_KEY in line: + self.mclt = line.strip().split(MCLT_KEY)[1].strip().split(';')[0] + elif SPLIT_KEY in line: + self.split = line.strip().split(SPLIT_KEY)[1].strip().split(';')[0] + elif LOAD_BALANCE_MAX_SECONDS_KEY in line: + self.load_balance_max_seconds = line.strip().split( + LOAD_BALANCE_MAX_SECONDS_KEY)[1].strip().split(';')[0] + if line.endswith('}') and len(peer) > 0: + break + self.peer = peer + + +NTP_OPTION_KEY = 'option ntp-servers' +SUBNET_MASK_OPTION_KEY = 'option subnet-mask' +BROADCAST_OPTION_KEY = 'option broadcast-address' +ROUTER_OPTION_KEY = 'option routers' +DNS_OPTION_KEY = 'option domain-name-servers' + + +class DHCPSubnet: + """Represents the DHCP Servers subnet configuration""" + + def __init__(self, subnet): + self._ntp_servers = None + self._subnet_mask = None + self._broadcast = None + self._routers = None + self._dns_servers = None + self.pools = [] + + self.resolve_subnet(subnet) + self.resolve_pools(subnet) + + def __str__(self): + config = """subnet 10.10.10.0 netmask {SUBNET_MASK_OPTION} {{ + \r\t{NTP_OPTION_KEY} {NTP_OPTION}; + \r\t{SUBNET_MASK_OPTION_KEY} {SUBNET_MASK_OPTION}; + \r\t{BROADCAST_OPTION_KEY} {BROADCAST_OPTION}; + \r\t{ROUTER_OPTION_KEY} {ROUTER_OPTION}; + \r\t{DNS_OPTION_KEY} {DNS_OPTION};""" + + config = config.format(length='multi-line', + NTP_OPTION_KEY=NTP_OPTION_KEY, + NTP_OPTION=self._ntp_servers, + SUBNET_MASK_OPTION_KEY=SUBNET_MASK_OPTION_KEY, + SUBNET_MASK_OPTION=self._subnet_mask, + BROADCAST_OPTION_KEY=BROADCAST_OPTION_KEY, + BROADCAST_OPTION=self._broadcast, + ROUTER_OPTION_KEY=ROUTER_OPTION_KEY, + ROUTER_OPTION=self._routers, + DNS_OPTION_KEY=DNS_OPTION_KEY, + DNS_OPTION=self._dns_servers) + for pool in self.pools: + config += '\n\t' + str(pool) + + config += '\n\r}' + return config + + def resolve_subnet(self, subnet): + subnet_parts = subnet.split('\n') + for part in subnet_parts: + if NTP_OPTION_KEY in part: + self._ntp_servers = part.strip().split(NTP_OPTION_KEY)[1].strip().split( + ';')[0] + elif SUBNET_MASK_OPTION_KEY in part: + self._subnet_mask = part.strip().split( + SUBNET_MASK_OPTION_KEY)[1].strip().split(';')[0] + elif BROADCAST_OPTION_KEY in part: + self._broadcast = part.strip().split( + BROADCAST_OPTION_KEY)[1].strip().split(';')[0] + elif ROUTER_OPTION_KEY in part: + self._routers = part.strip().split(ROUTER_OPTION_KEY)[1].strip().split( + ';')[0] + elif DNS_OPTION_KEY in part: + self._dns_servers = part.strip().split(DNS_OPTION_KEY)[1].strip().split( + ';')[0] + + def resolve_pools(self, subnet): + regex = r'(pool.*)\}' + pools = re.findall(regex, subnet, re.MULTILINE | re.DOTALL) + for pool in pools: + dhcp_pool = DHCPPool(pool) + self.pools.append(dhcp_pool) + + +FAILOVER_KEY = 'failover peer' +RANGE_KEY = 'range' + + +class DHCPPool: + """Represents a DHCP Servers subnet pool configuration""" + + def __init__(self, pool): + self.failover_peer = None + self.range_start = None + self.range_end = None + self.resolve_pool(pool) + + def __str__(self): + + config = """pool {{ + \r\t\t{FAILOVER_KEY} "{FAILOVER}"; + \r\t\t{RANGE_KEY} {RANGE_START} {RANGE_END}; + \r\t}}""" + + return config.format( + length='multi-line', + FAILOVER_KEY=FAILOVER_KEY, + FAILOVER=self.failover_peer, + RANGE_KEY=RANGE_KEY, + RANGE_START=self.range_start, + RANGE_END=self.range_end, + ) + + def resolve_pool(self, pool): + pool_parts = pool.split('\n') + # pool_parts = pool.split("\n") + for part in pool_parts: + if FAILOVER_KEY in part: + self.failover_peer = part.strip().split(FAILOVER_KEY)[1].strip().split( + ';')[0].replace('\"', '') + if RANGE_KEY in part: + pool_range = part.strip().split(RANGE_KEY)[1].strip().split(';')[0] + self.range_start = pool_range.split(' ')[0].strip() + self.range_end = pool_range.split(' ')[1].strip() diff --git a/net_orc/network/modules/dhcp-1/python/src/grpc/network_service.py b/net_orc/network/modules/dhcp-1/python/src/grpc/network_service.py new file mode 100644 index 000000000..49732b362 --- /dev/null +++ b/net_orc/network/modules/dhcp-1/python/src/grpc/network_service.py @@ -0,0 +1,44 @@ +"""gRPC Network Service for the DHCP Server network module""" +import proto.grpc_pb2_grpc as pb2_grpc +import proto.grpc_pb2 as pb2 + +from dhcp_config import DHCPConfig + + +class NetworkService(pb2_grpc.NetworkModule): + """gRPC endpoints for the DHCP Server""" + + def __init__(self): + self._dhcp_config = DHCPConfig() + + def GetDHCPRange(self, request, context): # pylint: disable=W0613 + """ + Resolve the current DHCP configuration and return + the first range from the first subnet in the file + """ + self._dhcp_config.resolve_config() + pool = self._dhcp_config.subnets[0].pools[0] + return pb2.DHCPRange(code=200, start=pool.range_start, end=pool.range_end) + + def SetDHCPRange(self, request, context): # pylint: disable=W0613 + """ + Change DHCP configuration and set the + the first range from the first subnet in the configuration + """ + + print('Setting DHCPRange') + print('Start: ' + request.start) + print('End: ' + request.end) + self._dhcp_config.resolve_config() + self._dhcp_config.set_range(request.start, request.end, 0, 0) + self._dhcp_config.write_config() + return pb2.Response(code=200, message='DHCP Range Set') + + def GetStatus(self, request, context): # pylint: disable=W0613 + """ + Return the current status of the network module + """ + # ToDo: Figure out how to resolve the current DHCP status + dhcp_status = True + message = str({'dhcpStatus': dhcp_status}) + return pb2.Response(code=200, message=message) diff --git a/net_orc/network/modules/dhcp-1/python/src/grpc/proto/grpc.proto b/net_orc/network/modules/dhcp-1/python/src/grpc/proto/grpc.proto new file mode 100644 index 000000000..8e2732620 --- /dev/null +++ b/net_orc/network/modules/dhcp-1/python/src/grpc/proto/grpc.proto @@ -0,0 +1,36 @@ +syntax = "proto3"; + +service NetworkModule { + + rpc GetDHCPRange(GetDHCPRangeRequest) returns (DHCPRange) {}; + + rpc SetDHCPRange(DHCPRange) returns (Response) {}; + + rpc GetStatus(GetStatusRequest) returns (Response) {}; + + rpc GetIPAddress(GetIPAddressRequest) returns (Response) {}; + + rpc SetLeaseAddress(SetLeaseAddressRequest) returns (Response) {}; + +} + +message Response { + int32 code = 1; + string message = 2; +} + +message DHCPRange { + int32 code = 1; + string start = 2; + string end = 3; +} + +message GetDHCPRangeRequest {} + +message GetIPAddressRequest {} + +message GetStatusRequest {} + +message SetLeaseAddressRequest { + string ipAddress = 1; +} \ No newline at end of file diff --git a/net_orc/network/modules/dhcp-2/bin/start_network_service b/net_orc/network/modules/dhcp-2/bin/start_network_service new file mode 100644 index 000000000..d58174695 --- /dev/null +++ b/net_orc/network/modules/dhcp-2/bin/start_network_service @@ -0,0 +1,77 @@ +#!/bin/bash + +CONFIG_FILE=/etc/dhcp/dhcpd.conf +DHCP_PID_FILE=/var/run/dhcpd.pid +DHCP_LOG_FILE=/runtime/network/dhcp2-dhcpd.log +RA_PID_FILE=/var/run/radvd/radvd.pid +RA_LOG_FILE=/runtime/network/dhcp2-radvd.log + +echo "Starrting Network Service..." + +#Enable IPv6 Forwarding +sysctl net.ipv6.conf.all.forwarding=1 +sysctl -p + +# Create leases file if needed +touch /var/lib/dhcp/dhcpd.leases + +#Create directory for radvd +mkdir /var/run/radvd + +#Create and set permissions on the log files +touch $DHCP_LOG_FILE +touch $RA_LOG_FILE +chown $HOST_USER:$HOST_USER $DHCP_LOG_FILE +chown $HOST_USER:$HOST_USER $RA_LOG_FILE + + +#Move the config files to the correct location +cp /testrun/conf/dhcpd.conf /etc/dhcp/dhcpd.conf +cp /testrun/conf/radvd.conf /etc/radvd.conf + +# Restart dhcp server when config changes +while true; do + + new_checksum=$(md5sum $CONFIG_FILE) + + if [ "$checksum" == "$new_checksum" ]; then + sleep 2 + continue + fi + + echo Config changed. Restarting dhcp server at $(date).. + + if [ -f $DHCP_PID_FILE ]; then + kill -9 $(cat $DHCP_PID_FILE) || true + rm -f $DHCP_PID_FILE + fi + + if [ -f $RA_PID_FILE ]; then + kill -9 $(cat $RA_PID_FILE) || true + rm -f $RA_PID_FILE + fi + + checksum=$new_checksum + + echo Starting isc-dhcp-server at $(date) + + radvd -m logfile -l $RA_LOG_FILE -p $RA_PID_FILE + dhcpd -d &> $DHCP_LOG_FILE & + + while [ ! -f $DHCP_PID_FILE ]; do + echo Waiting for $DHCP_PID_FILE... + sleep 2 + done + + echo $DHCP_PID_FILE now available + + while [ ! -f $RA_PID_FILE ]; do + echo Waiting for $RA_PID_FILE... + sleep 2 + done + + echo $RA_PID_FILE now available + + echo Server now stable + +done \ No newline at end of file diff --git a/net_orc/network/modules/dhcp-2/conf/dhcpd.conf b/net_orc/network/modules/dhcp-2/conf/dhcpd.conf new file mode 100644 index 000000000..e73a81441 --- /dev/null +++ b/net_orc/network/modules/dhcp-2/conf/dhcpd.conf @@ -0,0 +1,24 @@ +default-lease-time 300; + +failover peer "failover-peer" { + secondary; + address 10.10.10.3; + port 647; + peer address 10.10.10.2; + peer port 847; + max-response-delay 60; + max-unacked-updates 10; + load balance max seconds 3; +} + +subnet 10.10.10.0 netmask 255.255.255.0 { + option ntp-servers 10.10.10.5; + option subnet-mask 255.255.255.0; + option broadcast-address 10.10.10.255; + option routers 10.10.10.1; + option domain-name-servers 10.10.10.4; + pool { + failover peer "failover-peer"; + range 10.10.10.10 10.10.10.20; + } +} diff --git a/net_orc/network/modules/dhcp-2/conf/module_config.json b/net_orc/network/modules/dhcp-2/conf/module_config.json new file mode 100644 index 000000000..bd719604d --- /dev/null +++ b/net_orc/network/modules/dhcp-2/conf/module_config.json @@ -0,0 +1,26 @@ +{ + "config": { + "meta": { + "name": "dhcp-2", + "display_name": "DHCP Secondary", + "description": "Secondary DHCP server with IPv6 SLAAC" + }, + "network": { + "interface": "veth0", + "enable_wan": false, + "ip_index": 3 + }, + "grpc":{ + "port": 5001 + }, + "docker": { + "depends_on": "base", + "mounts": [ + { + "source": "runtime/network", + "target": "/runtime/network" + } + ] + } + } +} \ No newline at end of file diff --git a/net_orc/network/modules/dhcp-2/conf/radvd.conf b/net_orc/network/modules/dhcp-2/conf/radvd.conf new file mode 100644 index 000000000..f6d6f30d9 --- /dev/null +++ b/net_orc/network/modules/dhcp-2/conf/radvd.conf @@ -0,0 +1,12 @@ +interface veth0 +{ + AdvSendAdvert on; + AdvManagedFlag off; + MinRtrAdvInterval 30; + MaxRtrAdvInterval 60; + prefix fd10:77be:4186::/64 { + AdvOnLink on; + AdvAutonomous on; + AdvRouterAddr on; + }; +}; \ No newline at end of file diff --git a/net_orc/network/modules/dhcp-2/dhcp-2.Dockerfile b/net_orc/network/modules/dhcp-2/dhcp-2.Dockerfile new file mode 100644 index 000000000..989992570 --- /dev/null +++ b/net_orc/network/modules/dhcp-2/dhcp-2.Dockerfile @@ -0,0 +1,14 @@ +# Image name: test-run/dhcp-primary +FROM test-run/base:latest + +# Install dhcp server +RUN apt-get install -y isc-dhcp-server radvd + +# Copy over all configuration files +COPY network/modules/dhcp-2/conf /testrun/conf + +# Copy over all binary files +COPY network/modules/dhcp-2/bin /testrun/bin + +# Copy over all python files +COPY network/modules/dhcp-2/python /testrun/python diff --git a/net_orc/network/modules/dhcp-2/python/src/grpc/__init__.py b/net_orc/network/modules/dhcp-2/python/src/grpc/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/net_orc/network/modules/dhcp-2/python/src/grpc/dhcp_config.py b/net_orc/network/modules/dhcp-2/python/src/grpc/dhcp_config.py new file mode 100644 index 000000000..1d93c2d34 --- /dev/null +++ b/net_orc/network/modules/dhcp-2/python/src/grpc/dhcp_config.py @@ -0,0 +1,289 @@ +"""Contains all the necessary classes to maintain the +DHCP server's configuration""" +import re + +CONFIG_FILE = '/etc/dhcp/dhcpd.conf' +CONFIG_FILE_TEST = 'network/modules/dhcp-2/conf/dhcpd.conf' + +DEFAULT_LEASE_TIME_KEY = 'default-lease-time' + + +class DHCPConfig: + """Represents the DHCP Servers configuration and gives access to modify it""" + + def __init__(self): + self._default_lease_time = 300 + self.subnets = [] + self._peer = None + + def write_config(self): + conf = str(self) + print('Writing config: \n' + conf) + with open(CONFIG_FILE, 'w', encoding='UTF-8') as conf_file: + conf_file.write(conf) + + def resolve_config(self): + with open(CONFIG_FILE, 'r', encoding='UTF-8') as f: + conf = f.read() + self.resolve_subnets(conf) + self._peer = DHCPFailoverPeer(conf) + + def resolve_subnets(self, conf): + self.subnets = [] + regex = r'(subnet.*)' + subnets = re.findall(regex, conf, re.MULTILINE | re.DOTALL) + for subnet in subnets: + dhcp_subnet = DHCPSubnet(subnet) + self.subnets.append(dhcp_subnet) + + def set_range(self, start, end, subnet=0, pool=0): + print('Setting Range for pool ') + print(self.subnets[subnet].pools[pool]) + self.subnets[subnet].pools[pool].range_start = start + self.subnets[subnet].pools[pool].range_end = end + + # def resolve_settings(self, conf): + # lines = conf.split('\n') + # for line in lines: + # if DEFAULT_LEASE_TIME_KEY in line: + # self._default_lease_time = line.strip().split( + # DEFAULT_LEASE_TIME_KEY)[1].strip().split(';')[0] + + # self.peer = peer + + def __str__(self): + + config = """\r{DEFAULT_LEASE_TIME_KEY} {DEFAULT_LEASE_TIME};""" + + config = config.format(length='multi-line', + DEFAULT_LEASE_TIME_KEY=DEFAULT_LEASE_TIME_KEY, + DEFAULT_LEASE_TIME=self._default_lease_time) + + config += '\n\n' + str(self.peer) + for subnet in self._subnets: + config += '\n\n' + str(subnet) + return str(config) + + +FAILOVER_PEER_KEY = 'failover peer' +PRIMARY_KEY = 'primary' +ADDRESS_KEY = 'address' +PORT_KEY = 'port' +PEER_ADDRESS_KEY = 'peer address' +PEER_PORT_KEY = 'peer port' +MAX_RESPONSE_DELAY_KEY = 'max-response-delay' +MAX_UNACKED_UPDATES_KEY = 'max-unacked-updates' +MCLT_KEY = 'mclt' +SPLIT_KEY = 'split' +LOAD_BALANCE_MAX_SECONDS_KEY = 'load balance max seconds' + + +class DHCPFailoverPeer: + """Contains all information to define the DHCP failover peer""" + + def __init__(self, config): + self.name = None + self.primary = False + self.address = None + self.port = None + self.peer_address = None + self.peer_port = None + self.max_response_delay = None + self.max_unacked_updates = None + self.mclt = None + self.split = None + self.load_balance_max_seconds = None + self.peer = None + + self.resolve_peer(config) + + def __str__(self): + config = '{FAILOVER_PEER_KEY} \"{FAILOVER_PEER}\" {{\n' + config += '\tprimary;' if self.primary else 'secondary;' + config += """\n\t{ADDRESS_KEY} {ADDRESS}; + {PORT_KEY} {PORT}; + {PEER_ADDRESS_KEY} {PEER_ADDRESS}; + {PEER_PORT_KEY} {PEER_PORT}; + {MAX_RESPONSE_DELAY_KEY} {MAX_RESPONSE_DELAY}; + {MAX_UNACKED_UPDATES_KEY} {MAX_UNACKED_UPDATES}; + {MCLT_KEY} {MCLT}; + {SPLIT_KEY} {SPLIT}; + {LOAD_BALANCE_MAX_SECONDS_KEY} {LOAD_BALANCE_MAX_SECONDS}; + \r}}""" + + return config.format( + length='multi-line', + FAILOVER_PEER_KEY=FAILOVER_PEER_KEY, + FAILOVER_PEER=self.name, + ADDRESS_KEY=ADDRESS_KEY, + ADDRESS=self.address, + PORT_KEY=PORT_KEY, + PORT=self.port, + PEER_ADDRESS_KEY=PEER_ADDRESS_KEY, + PEER_ADDRESS=self.peer_address, + PEER_PORT_KEY=PEER_PORT_KEY, + PEER_PORT=self.peer_port, + MAX_RESPONSE_DELAY_KEY=MAX_RESPONSE_DELAY_KEY, + MAX_RESPONSE_DELAY=self.max_response_delay, + MAX_UNACKED_UPDATES_KEY=MAX_UNACKED_UPDATES_KEY, + MAX_UNACKED_UPDATES=self.max_unacked_updates, + MCLT_KEY=MCLT_KEY, + MCLT=self.mclt, + SPLIT_KEY=SPLIT_KEY, + SPLIT=self.split, + LOAD_BALANCE_MAX_SECONDS_KEY=LOAD_BALANCE_MAX_SECONDS_KEY, + LOAD_BALANCE_MAX_SECONDS=self.load_balance_max_seconds) + + def resolve_peer(self, conf): + peer = '' + lines = conf.split('\n') + for line in lines: + if line.startswith(FAILOVER_PEER_KEY) or len(peer) > 0: + if len(peer) <= 0: + self.name = line.strip().split(FAILOVER_PEER_KEY)[1].strip().split( + '{')[0].split('\"')[1] + peer += line + '\n' + if PRIMARY_KEY in line: + self.primary = True + elif ADDRESS_KEY in line and PEER_ADDRESS_KEY not in line: + self.address = line.strip().split(ADDRESS_KEY)[1].strip().split( + ';')[0] + elif PORT_KEY in line and PEER_PORT_KEY not in line: + self.port = line.strip().split(PORT_KEY)[1].strip().split(';')[0] + elif PEER_ADDRESS_KEY in line: + self.peer_address = line.strip().split( + PEER_ADDRESS_KEY)[1].strip().split(';')[0] + elif PEER_PORT_KEY in line: + self.peer_port = line.strip().split(PEER_PORT_KEY)[1].strip().split( + ';')[0] + elif MAX_RESPONSE_DELAY_KEY in line: + self.max_response_delay = line.strip().split( + MAX_RESPONSE_DELAY_KEY)[1].strip().split(';')[0] + elif MAX_UNACKED_UPDATES_KEY in line: + self.max_unacked_updates = line.strip().split( + MAX_UNACKED_UPDATES_KEY)[1].strip().split(';')[0] + elif MCLT_KEY in line: + self.mclt = line.strip().split(MCLT_KEY)[1].strip().split(';')[0] + elif SPLIT_KEY in line: + self.split = line.strip().split(SPLIT_KEY)[1].strip().split(';')[0] + elif LOAD_BALANCE_MAX_SECONDS_KEY in line: + self.load_balance_max_seconds = line.strip().split( + LOAD_BALANCE_MAX_SECONDS_KEY)[1].strip().split(';')[0] + if line.endswith('}') and len(peer) > 0: + break + self.peer = peer + + +NTP_OPTION_KEY = 'option ntp-servers' +SUBNET_MASK_OPTION_KEY = 'option subnet-mask' +BROADCAST_OPTION_KEY = 'option broadcast-address' +ROUTER_OPTION_KEY = 'option routers' +DNS_OPTION_KEY = 'option domain-name-servers' + + +class DHCPSubnet: + """Represents the DHCP Servers subnet configuration""" + + def __init__(self, subnet): + self._ntp_servers = None + self._subnet_mask = None + self._broadcast = None + self._routers = None + self._dns_servers = None + self.pools = [] + + self.resolve_subnet(subnet) + self.resolve_pools(subnet) + + def __str__(self): + config = """subnet 10.10.10.0 netmask {SUBNET_MASK_OPTION} {{ + \r\t{NTP_OPTION_KEY} {NTP_OPTION}; + \r\t{SUBNET_MASK_OPTION_KEY} {SUBNET_MASK_OPTION}; + \r\t{BROADCAST_OPTION_KEY} {BROADCAST_OPTION}; + \r\t{ROUTER_OPTION_KEY} {ROUTER_OPTION}; + \r\t{DNS_OPTION_KEY} {DNS_OPTION};""" + + config = config.format(length='multi-line', + NTP_OPTION_KEY=NTP_OPTION_KEY, + NTP_OPTION=self._ntp_servers, + SUBNET_MASK_OPTION_KEY=SUBNET_MASK_OPTION_KEY, + SUBNET_MASK_OPTION=self._subnet_mask, + BROADCAST_OPTION_KEY=BROADCAST_OPTION_KEY, + BROADCAST_OPTION=self._broadcast, + ROUTER_OPTION_KEY=ROUTER_OPTION_KEY, + ROUTER_OPTION=self._routers, + DNS_OPTION_KEY=DNS_OPTION_KEY, + DNS_OPTION=self._dns_servers) + for pool in self.pools: + config += '\n\t' + str(pool) + + config += '\n\r}' + return config + + def resolve_subnet(self, subnet): + subnet_parts = subnet.split('\n') + for part in subnet_parts: + if NTP_OPTION_KEY in part: + self._ntp_servers = part.strip().split(NTP_OPTION_KEY)[1].strip().split( + ';')[0] + elif SUBNET_MASK_OPTION_KEY in part: + self._subnet_mask = part.strip().split( + SUBNET_MASK_OPTION_KEY)[1].strip().split(';')[0] + elif BROADCAST_OPTION_KEY in part: + self._broadcast = part.strip().split( + BROADCAST_OPTION_KEY)[1].strip().split(';')[0] + elif ROUTER_OPTION_KEY in part: + self._routers = part.strip().split(ROUTER_OPTION_KEY)[1].strip().split( + ';')[0] + elif DNS_OPTION_KEY in part: + self._dns_servers = part.strip().split(DNS_OPTION_KEY)[1].strip().split( + ';')[0] + + def resolve_pools(self, subnet): + regex = r'(pool.*)\}' + pools = re.findall(regex, subnet, re.MULTILINE | re.DOTALL) + for pool in pools: + dhcp_pool = DHCPPool(pool) + self.pools.append(dhcp_pool) + + +FAILOVER_KEY = 'failover peer' +RANGE_KEY = 'range' + + +class DHCPPool: + """Represents a DHCP Servers subnet pool configuration""" + + def __init__(self, pool): + self.failover_peer = None + self.range_start = None + self.range_end = None + self.resolve_pool(pool) + + def __str__(self): + + config = """pool {{ + \r\t\t{FAILOVER_KEY} "{FAILOVER}"; + \r\t\t{RANGE_KEY} {RANGE_START} {RANGE_END}; + \r\t}}""" + + return config.format( + length='multi-line', + FAILOVER_KEY=FAILOVER_KEY, + FAILOVER=self.failover_peer, + RANGE_KEY=RANGE_KEY, + RANGE_START=self.range_start, + RANGE_END=self.range_end, + ) + + def resolve_pool(self, pool): + pool_parts = pool.split('\n') + # pool_parts = pool.split("\n") + for part in pool_parts: + if FAILOVER_KEY in part: + self.failover_peer = part.strip().split(FAILOVER_KEY)[1].strip().split( + ';')[0].replace('\"', '') + if RANGE_KEY in part: + pool_range = part.strip().split(RANGE_KEY)[1].strip().split(';')[0] + self.range_start = pool_range.split(' ')[0].strip() + self.range_end = pool_range.split(' ')[1].strip() diff --git a/net_orc/network/modules/dhcp-2/python/src/grpc/network_service.py b/net_orc/network/modules/dhcp-2/python/src/grpc/network_service.py new file mode 100644 index 000000000..49732b362 --- /dev/null +++ b/net_orc/network/modules/dhcp-2/python/src/grpc/network_service.py @@ -0,0 +1,44 @@ +"""gRPC Network Service for the DHCP Server network module""" +import proto.grpc_pb2_grpc as pb2_grpc +import proto.grpc_pb2 as pb2 + +from dhcp_config import DHCPConfig + + +class NetworkService(pb2_grpc.NetworkModule): + """gRPC endpoints for the DHCP Server""" + + def __init__(self): + self._dhcp_config = DHCPConfig() + + def GetDHCPRange(self, request, context): # pylint: disable=W0613 + """ + Resolve the current DHCP configuration and return + the first range from the first subnet in the file + """ + self._dhcp_config.resolve_config() + pool = self._dhcp_config.subnets[0].pools[0] + return pb2.DHCPRange(code=200, start=pool.range_start, end=pool.range_end) + + def SetDHCPRange(self, request, context): # pylint: disable=W0613 + """ + Change DHCP configuration and set the + the first range from the first subnet in the configuration + """ + + print('Setting DHCPRange') + print('Start: ' + request.start) + print('End: ' + request.end) + self._dhcp_config.resolve_config() + self._dhcp_config.set_range(request.start, request.end, 0, 0) + self._dhcp_config.write_config() + return pb2.Response(code=200, message='DHCP Range Set') + + def GetStatus(self, request, context): # pylint: disable=W0613 + """ + Return the current status of the network module + """ + # ToDo: Figure out how to resolve the current DHCP status + dhcp_status = True + message = str({'dhcpStatus': dhcp_status}) + return pb2.Response(code=200, message=message) diff --git a/net_orc/network/modules/dhcp-2/python/src/grpc/proto/grpc.proto b/net_orc/network/modules/dhcp-2/python/src/grpc/proto/grpc.proto new file mode 100644 index 000000000..8e2732620 --- /dev/null +++ b/net_orc/network/modules/dhcp-2/python/src/grpc/proto/grpc.proto @@ -0,0 +1,36 @@ +syntax = "proto3"; + +service NetworkModule { + + rpc GetDHCPRange(GetDHCPRangeRequest) returns (DHCPRange) {}; + + rpc SetDHCPRange(DHCPRange) returns (Response) {}; + + rpc GetStatus(GetStatusRequest) returns (Response) {}; + + rpc GetIPAddress(GetIPAddressRequest) returns (Response) {}; + + rpc SetLeaseAddress(SetLeaseAddressRequest) returns (Response) {}; + +} + +message Response { + int32 code = 1; + string message = 2; +} + +message DHCPRange { + int32 code = 1; + string start = 2; + string end = 3; +} + +message GetDHCPRangeRequest {} + +message GetIPAddressRequest {} + +message GetStatusRequest {} + +message SetLeaseAddressRequest { + string ipAddress = 1; +} \ No newline at end of file diff --git a/net_orc/network/modules/dns/bin/start_network_service b/net_orc/network/modules/dns/bin/start_network_service new file mode 100644 index 000000000..4537033c0 --- /dev/null +++ b/net_orc/network/modules/dns/bin/start_network_service @@ -0,0 +1,48 @@ +#!/bin/bash + +CONFIG_FILE=/etc/dnsmasq.conf +PID_FILE=/var/run/dnsmasq.pid +LOG_FILE=/runtime/network/dns.log + +echo Starting dns + +cp /testrun/conf/dnsmasq.conf /etc/dnsmasq.conf + +# Route internet traffic through gateway +ip route add default via 10.10.10.1 dev veth0 + +# Restart dnsmasq when config changes +while true; do + + new_checksum=$(md5sum $CONFIG_FILE) + + if [ "$checksum" == "$new_checksum" ]; then + sleep 2 + continue + fi + + echo Config changed. Restarting dnsmasq at $(date).. + + if [ -f $PID_FILE ]; then + kill -9 $(cat $PID_FILE) || true + rm -f $PID_FILE + fi + + checksum=$new_checksum + + echo Starting dnsmasq at $(date) + + dnsmasq --log-facility=$LOG_FILE -u $HOST_USER & + + while [ ! -f $PID_FILE ]; do + echo Waiting for $PID_FILE... + sleep 2 + done + + # Group flag doesn't work for some reason on dnsmasq + # so we'll manually change the group to the log file + chgrp $HOST_USER $LOG_FILE + + echo $PID_FILE now available + +done \ No newline at end of file diff --git a/net_orc/network/modules/dns/conf/dnsmasq.conf b/net_orc/network/modules/dns/conf/dnsmasq.conf new file mode 100644 index 000000000..5513a9220 --- /dev/null +++ b/net_orc/network/modules/dns/conf/dnsmasq.conf @@ -0,0 +1,5 @@ +server=8.8.8.8 + +interface=veth0 + +log-queries \ No newline at end of file diff --git a/net_orc/network/modules/dns/conf/module_config.json b/net_orc/network/modules/dns/conf/module_config.json new file mode 100644 index 000000000..cad1c02ef --- /dev/null +++ b/net_orc/network/modules/dns/conf/module_config.json @@ -0,0 +1,23 @@ +{ + "config": { + "meta": { + "name": "dns", + "display_name": "DNS", + "description": "A DNS server" + }, + "network": { + "interface": "veth0", + "enable_wan": false, + "ip_index": 4 + }, + "docker": { + "depends_on": "base", + "mounts": [ + { + "source": "runtime/network", + "target": "/runtime/network" + } + ] + } + } +} \ No newline at end of file diff --git a/net_orc/network/modules/dns/dns.Dockerfile b/net_orc/network/modules/dns/dns.Dockerfile new file mode 100644 index 000000000..84c1c7eb1 --- /dev/null +++ b/net_orc/network/modules/dns/dns.Dockerfile @@ -0,0 +1,14 @@ +# Image name: test-run/dns +FROM test-run/base:latest + +#Update and get all additional requirements not contained in the base image +RUN apt-get update --fix-missing + +#Install dnsmasq +RUN apt-get install -y dnsmasq + +# Copy over all configuration files +COPY network/modules/dns/conf /testrun/conf + +# Copy over all binary files +COPY network/modules/dns/bin /testrun/bin diff --git a/net_orc/network/modules/gateway/bin/start_network_service b/net_orc/network/modules/gateway/bin/start_network_service new file mode 100644 index 000000000..b1b31d335 --- /dev/null +++ b/net_orc/network/modules/gateway/bin/start_network_service @@ -0,0 +1,30 @@ +#!/bin/bash + +LOCAL_INTF=veth0 +EXT_INTF=eth1 + +echo Starting gateway + +/testrun/bin/wait_for_interface $EXT_INT + +# Enable IPv6 forwarding +sysctl net.ipv6.conf.eth1.accept_ra=1 +sysctl net.ipv6.conf.default.forwarding=1 +sysctl -p + +# Start dhclient if external interface does not have IP +if ! ip addr show $EXT_INTF | fgrep 'inet '; then + echo No inet address for $EXT_INTF, initiating dhcp client... + dhclient $EXT_INTF +fi + +# Enable NAT to the outside world +echo 1 > /proc/sys/net/ipv4/ip_forward +iptables -t nat -A POSTROUTING -o $EXT_INTF -j MASQUERADE +iptables -A FORWARD -i $EXT_INTF -o $LOCAL_INTF -m state --state RELATED,ESTABLISHED -j ACCEPT +iptables -A FORWARD -i $LOCAL_INTF -o $EXT_INTF -j ACCEPT + +# Keep gateway running until killed by framework +while true; do + sleep 10 +done diff --git a/net_orc/network/modules/gateway/conf/module_config.json b/net_orc/network/modules/gateway/conf/module_config.json new file mode 100644 index 000000000..5b39339ce --- /dev/null +++ b/net_orc/network/modules/gateway/conf/module_config.json @@ -0,0 +1,23 @@ +{ + "config": { + "meta": { + "name": "gateway", + "display_name": "Gateway", + "description": "Enable internet connectivity on device bridge" + }, + "network": { + "interface": "veth0", + "enable_wan": true, + "ip_index": 1 + }, + "docker": { + "depends_on": "base", + "mounts": [ + { + "source": "runtime/network", + "target": "/runtime/network" + } + ] + } + } +} \ No newline at end of file diff --git a/net_orc/network/modules/gateway/gateway.Dockerfile b/net_orc/network/modules/gateway/gateway.Dockerfile new file mode 100644 index 000000000..b7085ebac --- /dev/null +++ b/net_orc/network/modules/gateway/gateway.Dockerfile @@ -0,0 +1,11 @@ +# Image name: test-run/gateway +FROM test-run/base:latest + +# Install required packages +RUN apt-get install -y iptables isc-dhcp-client + +# Copy over all configuration files +COPY network/modules/gateway/conf /testrun/conf + +# Copy over all binary files +COPY network/modules/gateway/bin /testrun/bin diff --git a/net_orc/network/modules/ntp/bin/start_network_service b/net_orc/network/modules/ntp/bin/start_network_service new file mode 100644 index 000000000..4c0c5dc74 --- /dev/null +++ b/net_orc/network/modules/ntp/bin/start_network_service @@ -0,0 +1,13 @@ +#!/bin/bash + +PYTHON_SRC_DIR=/testrun/python/src +LOG_FILE="/runtime/network/ntp.log" + +echo Starting ntp + +#Create and set permissions on the log file +touch $LOG_FILE +chown $HOST_USER:$HOST_USER $LOG_FILE + +#Start the NTP server +python3 -u $PYTHON_SRC_DIR/ntp_server.py > $LOG_FILE diff --git a/net_orc/network/modules/ntp/conf/module_config.json b/net_orc/network/modules/ntp/conf/module_config.json new file mode 100644 index 000000000..e3dbdc8f1 --- /dev/null +++ b/net_orc/network/modules/ntp/conf/module_config.json @@ -0,0 +1,23 @@ +{ + "config": { + "meta": { + "name": "ntp", + "display_name": "NTP", + "description": "An NTP server" + }, + "network": { + "interface": "veth0", + "enable_wan": false, + "ip_index": 5 + }, + "docker": { + "depends_on": "base", + "mounts": [ + { + "source": "runtime/network", + "target": "/runtime/network" + } + ] + } + } +} \ No newline at end of file diff --git a/net_orc/network/modules/ntp/ntp.Dockerfile b/net_orc/network/modules/ntp/ntp.Dockerfile new file mode 100644 index 000000000..3474a504e --- /dev/null +++ b/net_orc/network/modules/ntp/ntp.Dockerfile @@ -0,0 +1,13 @@ +# Image name: test-run/ntp +FROM test-run/base:latest + +# Copy over all configuration files +COPY network/modules/ntp/conf /testrun/conf + +# Copy over all binary files +COPY network/modules/ntp/bin /testrun/bin + +# Copy over all python files +COPY network/modules/ntp/python /testrun/python + +EXPOSE 123/udp diff --git a/net_orc/network/modules/ntp/python/src/ntp_server.py b/net_orc/network/modules/ntp/python/src/ntp_server.py new file mode 100644 index 000000000..602585196 --- /dev/null +++ b/net_orc/network/modules/ntp/python/src/ntp_server.py @@ -0,0 +1,324 @@ +"""NTP Server""" +import datetime +import socket +import struct +import time +import queue + +import threading +import select + +task_queue = queue.Queue() +stop_flag = False + + +def system_to_ntp_time(timestamp): + """Convert a system time to a NTP time. + + Parameters: + timestamp -- timestamp in system time + + Returns: + corresponding NTP time + """ + return timestamp + NTP.NTP_DELTA + + +def _to_int(timestamp): + """Return the integral part of a timestamp. + + Parameters: + timestamp -- NTP timestamp + + Retuns: + integral part + """ + return int(timestamp) + + +def _to_frac(timestamp, n=32): + """Return the fractional part of a timestamp. + + Parameters: + timestamp -- NTP timestamp + n -- number of bits of the fractional part + + Retuns: + fractional part + """ + return int(abs(timestamp - _to_int(timestamp)) * 2**n) + + +def _to_time(integ, frac, n=32): + """Return a timestamp from an integral and fractional part. + + Parameters: + integ -- integral part + frac -- fractional part + n -- number of bits of the fractional part + + Retuns: + timestamp + """ + return integ + float(frac) / 2**n + + +class NTPException(Exception): + """Exception raised by this module.""" + pass + + +class NTP: + """Helper class defining constants.""" + + _SYSTEM_EPOCH = datetime.date(*time.gmtime(0)[0:3]) + """system epoch""" + _NTP_EPOCH = datetime.date(1900, 1, 1) + """NTP epoch""" + NTP_DELTA = (_SYSTEM_EPOCH - _NTP_EPOCH).days * 24 * 3600 + """delta between system and NTP time""" + + REF_ID_TABLE = { + 'DNC': 'DNC routing protocol', + 'NIST': 'NIST public modem', + 'TSP': 'TSP time protocol', + 'DTS': 'Digital Time Service', + 'ATOM': 'Atomic clock (calibrated)', + 'VLF': 'VLF radio (OMEGA, etc)', + 'callsign': 'Generic radio', + 'LORC': 'LORAN-C radionavidation', + 'GOES': 'GOES UHF environment satellite', + 'GPS': 'GPS UHF satellite positioning', + } + """reference identifier table""" + + STRATUM_TABLE = { + 0: 'unspecified', + 1: 'primary reference', + } + """stratum table""" + + MODE_TABLE = { + 0: 'unspecified', + 1: 'symmetric active', + 2: 'symmetric passive', + 3: 'client', + 4: 'server', + 5: 'broadcast', + 6: 'reserved for NTP control messages', + 7: 'reserved for private use', + } + """mode table""" + + LEAP_TABLE = { + 0: 'no warning', + 1: 'last minute has 61 seconds', + 2: 'last minute has 59 seconds', + 3: 'alarm condition (clock not synchronized)', + } + """leap indicator table""" + + +class NTPPacket: + """NTP packet class. + + This represents an NTP packet. + """ + + _PACKET_FORMAT = '!B B B b 11I' + """packet format to pack/unpack""" + + def __init__(self, version=4, mode=3, tx_timestamp=0): + """Constructor. + + Parameters: + version -- NTP version + mode -- packet mode (client, server) + tx_timestamp -- packet transmit timestamp + """ + self.leap = 0 + """leap second indicator""" + self.version = version + """version""" + self.mode = mode + """mode""" + self.stratum = 0 + """stratum""" + self.poll = 0 + """poll interval""" + self.precision = 0 + """precision""" + self.root_delay = 0 + """root delay""" + self.root_dispersion = 0 + """root dispersion""" + self.ref_id = 0 + """reference clock identifier""" + self.ref_timestamp = 0 + """reference timestamp""" + self.orig_timestamp = 0 + self.orig_timestamp_high = 0 + self.orig_timestamp_low = 0 + """originate timestamp""" + self.recv_timestamp = 0 + """receive timestamp""" + self.tx_timestamp = tx_timestamp + self.tx_timestamp_high = 0 + self.tx_timestamp_low = 0 + """tansmit timestamp""" + + def to_data(self): + """Convert this NTPPacket to a buffer that can be sent over a socket. + + Returns: + buffer representing this packet + + Raises: + NTPException -- in case of invalid field + """ + try: + packed = struct.pack( + NTPPacket._PACKET_FORMAT, + (self.leap << 6 | self.version << 3 | self.mode), + self.stratum, + self.poll, + self.precision, + _to_int(self.root_delay) << 16 | _to_frac(self.root_delay, 16), + _to_int(self.root_dispersion) << 16 + | _to_frac(self.root_dispersion, 16), + self.ref_id, + _to_int(self.ref_timestamp), + _to_frac(self.ref_timestamp), + #Change by lichen, avoid loss of precision + self.orig_timestamp_high, + self.orig_timestamp_low, + _to_int(self.recv_timestamp), + _to_frac(self.recv_timestamp), + _to_int(self.tx_timestamp), + _to_frac(self.tx_timestamp)) + except struct.error as exc: + raise NTPException('Invalid NTP packet fields.') from exc + return packed + + def from_data(self, data): + """Populate this instance from a NTP packet payload received from + the network. + + Parameters: + data -- buffer payload + + Raises: + NTPException -- in case of invalid packet format + """ + try: + unpacked = struct.unpack( + NTPPacket._PACKET_FORMAT, + data[0:struct.calcsize(NTPPacket._PACKET_FORMAT)]) + except struct.error as exc: + raise NTPException('Invalid NTP packet.') from exc + + self.leap = unpacked[0] >> 6 & 0x3 + self.version = unpacked[0] >> 3 & 0x7 + self.mode = unpacked[0] & 0x7 + self.stratum = unpacked[1] + self.poll = unpacked[2] + self.precision = unpacked[3] + self.root_delay = float(unpacked[4]) / 2**16 + self.root_dispersion = float(unpacked[5]) / 2**16 + self.ref_id = unpacked[6] + self.ref_timestamp = _to_time(unpacked[7], unpacked[8]) + self.orig_timestamp = _to_time(unpacked[9], unpacked[10]) + self.orig_timestamp_high = unpacked[9] + self.orig_timestamp_low = unpacked[10] + self.recv_timestamp = _to_time(unpacked[11], unpacked[12]) + self.tx_timestamp = _to_time(unpacked[13], unpacked[14]) + self.tx_timestamp_high = unpacked[13] + self.tx_timestamp_low = unpacked[14] + + def get_tx_timestamp(self): + return (self.tx_timestamp_high, self.tx_timestamp_low) + + def set_origin_timestamp(self, high, low): + self.orig_timestamp_high = high + self.orig_timestamp_low = low + + +class RecvThread(threading.Thread): + """Thread class to recieve all requests""" + def __init__(self): + threading.Thread.__init__(self) + #self.local_socket = local_socket + + def run(self): + while True: + if stop_flag: + print('RecvThread Ended') + break + rlist, wlist, elist = select.select([local_socket], [], [], 1) # pylint: disable=unused-variable + if len(rlist) != 0: + print(f'Received {len(rlist)} packets') + for temp_socket in rlist: + try: + data, addr = temp_socket.recvfrom(1024) + recv_timestamp = system_to_ntp_time(time.time()) + task_queue.put((data, addr, recv_timestamp)) + except socket.error as msg: + print(msg) + + +class WorkThread(threading.Thread): + """Thread class to process all requests and respond""" + def __init__(self): + threading.Thread.__init__(self) + #self.local_socket = local_socket + + def run(self): + while True: + if stop_flag: + print('WorkThread Ended') + break + try: + data, addr, recv_timestamp = task_queue.get(timeout=1) + recv_packet = NTPPacket() + recv_packet.from_data(data) + timestamp_high, timestamp_low = recv_packet.get_tx_timestamp() + send_packet = NTPPacket(version=4, mode=4) + send_packet.stratum = 2 + send_packet.poll = 10 + + # send_packet.precision = 0xfa + # send_packet.root_delay = 0x0bfa + # send_packet.root_dispersion = 0x0aa7 + # send_packet.ref_id = 0x808a8c2c + + send_packet.ref_timestamp = recv_timestamp - 5 + send_packet.set_origin_timestamp(timestamp_high, timestamp_low) + send_packet.recv_timestamp = recv_timestamp + send_packet.tx_timestamp = system_to_ntp_time(time.time()) + local_socket.sendto(send_packet.to_data(), addr) + print(f'Sent to {addr[0]}:{addr[1]}') + except queue.Empty: + continue + + +listen_ip = '0.0.0.0' +listen_port = 123 +local_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) +local_socket.bind((listen_ip, listen_port)) +print('local socket: ', local_socket.getsockname()) +recvThread = RecvThread() +recvThread.start() +workThread = WorkThread() +workThread.start() + +while True: + try: + time.sleep(0.5) + except KeyboardInterrupt: + print('Exiting...') + stop_flag = True + recvThread.join() + workThread.join() + #local_socket.close() + print('Exited') + break diff --git a/net_orc/network/modules/ovs/bin/start_network_service b/net_orc/network/modules/ovs/bin/start_network_service new file mode 100644 index 000000000..7c38f484a --- /dev/null +++ b/net_orc/network/modules/ovs/bin/start_network_service @@ -0,0 +1,22 @@ +#!/bin/bash -e + +if [[ "$EUID" -ne 0 ]]; then + echo "Must run as root." + exit 1 +fi + +asyncRun() { + "$@" & + pid="$!" + trap "echo 'Stopping PID $pid'; kill -SIGTERM $pid" SIGINT SIGTERM + + # A signal emitted while waiting will make the wait command return code > 128 + # Let's wrap it in a loop that doesn't end before the process is indeed stopped + while kill -0 $pid > /dev/null 2>&1; do + wait + done +} + +# -u flag allows python print statements +# to be logged by docker by running unbuffered +asyncRun exec python3 -u /ovs/python/src/run.py \ No newline at end of file diff --git a/net_orc/network/modules/ovs/conf/module_config.json b/net_orc/network/modules/ovs/conf/module_config.json new file mode 100644 index 000000000..8a440d0ae --- /dev/null +++ b/net_orc/network/modules/ovs/conf/module_config.json @@ -0,0 +1,24 @@ +{ + "config": { + "meta": { + "name": "ovs", + "display_name": "OVS", + "description": "Setup and configure Open vSwitch" + }, + "network": { + "interface": "veth0", + "enable_wan": false, + "ip_index": 6, + "host": true + }, + "docker": { + "depends_on": "base", + "mounts": [ + { + "source": "runtime/network", + "target": "/runtime/network" + } + ] + } + } +} \ No newline at end of file diff --git a/net_orc/network/modules/ovs/ovs.Dockerfile b/net_orc/network/modules/ovs/ovs.Dockerfile new file mode 100644 index 000000000..cd4710e66 --- /dev/null +++ b/net_orc/network/modules/ovs/ovs.Dockerfile @@ -0,0 +1,20 @@ +# Image name: test-run/orchestrator +FROM test-run/base:latest + +#Update and get all additional requirements not contained in the base image +RUN apt-get update --fix-missing + +#Install openvswitch +RUN apt-get install -y openvswitch-switch + +# Copy over all configuration files +COPY network/modules/ovs/conf /testrun/conf + +# Copy over all binary files +COPY network/modules/ovs/bin /testrun/bin + +# Copy over all python files +COPY network/modules/ovs/python /testrun/python + +#Install all python requirements for the module +RUN pip3 install -r /testrun/python/requirements.txt \ No newline at end of file diff --git a/net_orc/network/modules/ovs/python/requirements.txt b/net_orc/network/modules/ovs/python/requirements.txt new file mode 100644 index 000000000..e69de29bb diff --git a/net_orc/network/modules/ovs/python/src/logger.py b/net_orc/network/modules/ovs/python/src/logger.py new file mode 100644 index 000000000..23e697e43 --- /dev/null +++ b/net_orc/network/modules/ovs/python/src/logger.py @@ -0,0 +1,16 @@ +"""Sets up the logger to be used for the ovs modules.""" +import logging + +LOGGERS = {} +_LOG_FORMAT = '%(asctime)s %(name)-8s %(levelname)-7s %(message)s' +_DATE_FORMAT = '%b %02d %H:%M:%S' + +# Set level to debug if set as runtime flag +logging.basicConfig(format=_LOG_FORMAT, + datefmt=_DATE_FORMAT, + level=logging.INFO) + +def get_logger(name): + if name not in LOGGERS: + LOGGERS[name] = logging.getLogger(name) + return LOGGERS[name] diff --git a/net_orc/network/modules/ovs/python/src/ovs_control.py b/net_orc/network/modules/ovs/python/src/ovs_control.py new file mode 100644 index 000000000..765c50f92 --- /dev/null +++ b/net_orc/network/modules/ovs/python/src/ovs_control.py @@ -0,0 +1,105 @@ +"""OVS Control Module""" +import json +import logger +import util + +CONFIG_FILE = '/ovs/conf/system.json' +DEVICE_BRIDGE = 'tr-d' +INTERNET_BRIDGE = 'tr-c' +LOGGER = logger.get_logger('ovs_ctrl') + +class OVSControl: + """OVS Control""" + def __init__(self): + self._int_intf = None + self._dev_intf = None + self._load_config() + + def add_bridge(self, bridge_name): + LOGGER.info('Adding OVS Bridge: ' + bridge_name) + # Create the bridge using ovs-vsctl commands + # Uses the --may-exist option to prevent failures + # if this bridge already exists by this name it won't fail + # and will not modify the existing bridge + success=util.run_command('ovs-vsctl --may-exist add-br ' + bridge_name) + return success + + def add_port(self,port, bridge_name): + LOGGER.info('Adding Port ' + port + ' to OVS Bridge: ' + bridge_name) + # Add a port to the bridge using ovs-vsctl commands + # Uses the --may-exist option to prevent failures + # if this port already exists on the bridge and will not + # modify the existing bridge + success=util.run_command(f"""ovs-vsctl --may-exist + add-port {bridge_name} {port}""") + return success + + def create_net(self): + LOGGER.info('Creating baseline network') + + # Create data plane + self.add_bridge(DEVICE_BRIDGE) + + # Create control plane + self.add_bridge(INTERNET_BRIDGE) + + # Remove IP from internet adapter + self.set_interface_ip(self._int_intf,'0.0.0.0') + + # Add external interfaces to data and control plane + self.add_port(self._dev_intf,DEVICE_BRIDGE) + self.add_port(self._int_intf,INTERNET_BRIDGE) + + # # Set ports up + self.set_bridge_up(DEVICE_BRIDGE) + self.set_bridge_up(INTERNET_BRIDGE) + + def delete_bridge(self,bridge_name): + LOGGER.info('Deleting OVS Bridge: ' + bridge_name) + # Delete the bridge using ovs-vsctl commands + # Uses the --if-exists option to prevent failures + # if this bridge does not exists + success=util.run_command('ovs-vsctl --if-exists del-br ' + bridge_name) + return success + + def _load_config(self): + LOGGER.info('Loading Configuration: ' + CONFIG_FILE) + with open(CONFIG_FILE, 'r', encoding='utf-8') as conf_file: + config_json = json.load(conf_file) + self._int_intf = config_json['internet_intf'] + self._dev_intf = config_json['device_intf'] + LOGGER.info('Configuration Loaded') + LOGGER.info('Internet Interface: ' + self._int_intf) + LOGGER.info('Device Interface: ' + self._dev_intf) + + def restore_net(self): + LOGGER.info('Restoring Network...') + # Delete data plane + self.delete_bridge(DEVICE_BRIDGE) + + # Delete control plane + self.delete_bridge(INTERNET_BRIDGE) + + LOGGER.info('Network is restored') + + def show_config(self): + LOGGER.info('Show current config of OVS') + success=util.run_command('ovs-vsctl show') + return success + + def set_bridge_up(self,bridge_name): + LOGGER.info('Setting Bridge device to up state: ' + bridge_name) + success=util.run_command('ip link set dev ' + bridge_name + ' up') + return success + + def set_interface_ip(self,interface, ip_addr): + LOGGER.info('Setting interface ' + interface + ' to ' + ip_addr) + # Remove IP from internet adapter + util.run_command('ifconfig ' + interface + ' 0.0.0.0') + +if __name__ == '__main__': + ovs = OVSControl() + ovs.create_net() + ovs.show_config() + ovs.restore_net() + ovs.show_config() diff --git a/net_orc/network/modules/ovs/python/src/run.py b/net_orc/network/modules/ovs/python/src/run.py new file mode 100644 index 000000000..5787a74e6 --- /dev/null +++ b/net_orc/network/modules/ovs/python/src/run.py @@ -0,0 +1,54 @@ +"""Run OVS module""" +import logger +import signal +import sys +import time + +from ovs_control import OVSControl + +LOGGER = logger.get_logger('ovs_control_run') + +class OVSControlRun: + """Run the OVS module.""" + def __init__(self): + + signal.signal(signal.SIGINT, self.handler) + signal.signal(signal.SIGTERM, self.handler) + signal.signal(signal.SIGABRT, self.handler) + signal.signal(signal.SIGQUIT, self.handler) + + LOGGER.info('Starting OVS Control') + + # Get all components ready + self._ovs_control = OVSControl() + + self._ovs_control.restore_net() + + self._ovs_control.create_net() + + self._ovs_control.show_config() + + # Get network ready (via Network orchestrator) + LOGGER.info('Network is ready. Waiting for device information...') + + #Loop forever until process is stopped + while True: + LOGGER.info('OVS Running') + time.sleep(1000) + + # TODO: This time should be configurable (How long to hold before exiting, + # this could be infinite too) + #time.sleep(300) + + # Tear down network + #self._ovs_control.shutdown() + + def handler(self, signum): + LOGGER.info('SigtermEnum: ' + str(signal.SIGTERM)) + LOGGER.info('Exit signal received: ' + str(signum)) + if (signum == 2 or signal == signal.SIGTERM): + LOGGER.info('Exit signal received. Restoring network...') + self._ovs_control.shutdown() + sys.exit(1) + +ovs = OVSControlRun() diff --git a/net_orc/network/modules/ovs/python/src/util.py b/net_orc/network/modules/ovs/python/src/util.py new file mode 100644 index 000000000..a3ebbb10a --- /dev/null +++ b/net_orc/network/modules/ovs/python/src/util.py @@ -0,0 +1,23 @@ +"""Provides basic utilities for a ovs module.""" +import subprocess +import logger + +LOGGER = logger.get_logger('util') + +def run_command(cmd): + success = False + process = subprocess.Popen(cmd.split(), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + stdout, stderr = process.communicate() + if process.returncode != 0: + err_msg = f'{stderr.strip()}. Code: {process.returncode}' + LOGGER.error('Command Failed: ' + cmd) + LOGGER.error('Error: ' + err_msg) + else: + msg = stdout.strip().decode('utf-8') + succ_msg = f'{msg}. Code: {process.returncode}' + LOGGER.info('Command Success: ' + cmd) + LOGGER.info('Success: ' + succ_msg) + success = True + return success diff --git a/net_orc/network/modules/radius/bin/start_network_service b/net_orc/network/modules/radius/bin/start_network_service new file mode 100644 index 000000000..e27a828dd --- /dev/null +++ b/net_orc/network/modules/radius/bin/start_network_service @@ -0,0 +1,20 @@ +#!/bin/bash + +PYTHON_SRC_DIR=/testrun/python/src +CONF_DIR="/testrun/conf" +LOG_FILE="/runtime/network/radius.log" + +echo Starting authenticator.py + +cp $CONF_DIR/eap /etc/freeradius/3.0/mods-available/eap + +# Do we want to mount resources/network/{module} to the network module to avoid file copying during build? +cp $CONF_DIR/ca.crt /etc/ssl/certs/ca-certificates.crt + +python3 -u $PYTHON_SRC_DIR/authenticator.py & + +#Create and set permissions on the log file +touch $LOG_FILE +chown $HOST_USER:$HOST_USER $LOG_FILE + +freeradius -f -X &> $LOG_FILE \ No newline at end of file diff --git a/net_orc/network/modules/radius/conf/ca.crt b/net_orc/network/modules/radius/conf/ca.crt new file mode 100644 index 000000000..bb8aadf6a --- /dev/null +++ b/net_orc/network/modules/radius/conf/ca.crt @@ -0,0 +1,30 @@ +-----BEGIN CERTIFICATE----- +MIIFDzCCAvegAwIBAgIJAOb7lZzENM1TMA0GCSqGSIb3DQEBCwUAMB0xCzAJBgNV +BAYTAkZSMQ4wDAYDVQQKDAVDQVRUSzAgFw0yMjEwMDcwODIxNTVaGA8yMDcyMDky +NDA4MjE1NVowHTELMAkGA1UEBhMCRlIxDjAMBgNVBAoMBUNBVFRLMIICIjANBgkq +hkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAsZ+wd41TfLs5Vh5Wz1ESqIxwzu3iHWjJ +KbOlpnPdI6uPo9DU5xdmhcH0jc/RVis+EVn1ylFyzN3l4uIACah1Dk3frFXN/LWc +EzN7DyyHO56HZ5IpOFazVMQn5xrRwsglRop6et+Azqm+3xDpBSoKg8YhBAUsezuT +N0XlpsN3BMLjVXfwrTV1ECKP0Emg3qP3EaKRm1EdQ0uVNRNe24q5EDWiLnqlD14a +X5w1hHAj0Rr9kmKo+fs9WL7vIzbgy6xccfkKE8Wk7IR/xabTNjC5x+/7Pscqthic +tGYQ+Rm4Z1XTYDKBgoFHdI2ouscmiceqxESu3hW/IBe3iLin84kGywRGrzjLcOFI +adAj+0y3lGGV7Vw2RI3bUA6oOM8V1zbFUsZLq6+ylmvw0HQLAUeBODo6Iwu8ACxT +8/A+LmBUZFk7copLfvqFUmt8vjP7XiDuYsGvVJrTc6MJWWOITqyirhAkcP/vPoNK +l8PXhLGo66xG+hC57gCm3d3IwkXNLW6UhCHIuUa6LTTTaTehy2unDEm7Rt4ghWlw +2JuDr7QcZrWrRj1OwVAiPNkjLCF30aKxnVQxc2JY9W3H+xRC0YlDNmOpdHHvuJfS +1y1tNUq+fZQGybubDsa0l0LHfoKRGfeFXnxT6tyvNnGEaJG9mkLPXPkEBuadrnvA +oZeymb/D440CAwEAAaNQME4wHQYDVR0OBBYEFHKNGWOtO3haPEkZSVfgnxbEbTs3 +MB8GA1UdIwQYMBaAFHKNGWOtO3haPEkZSVfgnxbEbTs3MAwGA1UdEwQFMAMBAf8w +DQYJKoZIhvcNAQELBQADggIBAGzuawd3tYwDCnO84eZ+NT0uqQk37ss1fdwgDY7G +dAaeSb0oW5+tejdiLzWsKgHL7g6ClEOS3f2Vvo65fsH7BA5Ppmhk+4U6ppqbBiGG +v5WqnRz7OpdMTqunFsmYhbgbu+OZX3TTg6hkIYxFHlHDMpcMwMYtlWsPRZSlTM09 +BbaWyhqTZjbUIxemwc3JW+5zRYoA2ii/Om/2/9iUbngVqEilmUrflMcfn81ddate +0XwMcm/qhyKU+CIAPXmmtLkTms66FSSXMfqy1HizzSsCFntozUA7mtPRm53IsGpR +TOdGTe5Y5jJ/dlXwmZ5dmWBR8qlyxLpG0iB7KWNxs+V7B6kCFU3BhiLPiS/BnDap +EE1JDKu1jktJhxeAhmSsrvZ10bCKZW+dQbSjqr3wScYok/f05daB97LaAs869jra +93uJ7dYA9gfUtkaqZW9oqPrIO3FNZLL5D1z6eWcGC2+3MLhrtNTov3fthFGJyWf7 +iCBdQYofeR4EA4nfI+QcM2HAHNtChGESZ/8p/eBSU4GQW7zURELIKJ5OeTJZGAgs +bMbNbqbiyzCSuM2CHTN+Nw0rMc9AXkqSV57scCu/2ui1z1GKWeI65hKhwc++IXP7 +lJWv710T4+9DOgoi5sFNNLbRcVmkUeodFje83PTs+U/hgvQHW1+RTJ4ESTPMqVf1 +VTyk +-----END CERTIFICATE----- \ No newline at end of file diff --git a/net_orc/network/modules/radius/conf/eap b/net_orc/network/modules/radius/conf/eap new file mode 100644 index 000000000..a868f16cd --- /dev/null +++ b/net_orc/network/modules/radius/conf/eap @@ -0,0 +1,602 @@ +eap { + + default_eap_type = tls + + # A list is maintained to correlate EAP-Response + # packets with EAP-Request packets. After a + # configurable length of time, entries in the list + # expire, and are deleted. + # + timer_expire = 60 + + # There are many EAP types, but the server has support + # for only a limited subset. If the server receives + # a request for an EAP type it does not support, then + # it normally rejects the request. By setting this + # configuration to "yes", you can tell the server to + # instead keep processing the request. Another module + # MUST then be configured to proxy the request to + # another RADIUS server which supports that EAP type. + # + # If another module is NOT configured to handle the + # request, then the request will still end up being + # rejected. + # + ignore_unknown_eap_types = no + + # Cisco AP1230B firmware 12.2(13)JA1 has a bug. When given + # a User-Name attribute in an Access-Accept, it copies one + # more byte than it should. + # + # We can work around it by configurably adding an extra + # zero byte. + # + cisco_accounting_username_bug = no + + # Help prevent DoS attacks by limiting the number of + # sessions that the server is tracking. For simplicity, + # this is taken from the "max_requests" directive in + # radiusd.conf. + # + max_sessions = ${max_requests} + + # Common TLS configuration for TLS-based EAP types + # ------------------------------------------------ + # + # See raddb/certs/README.md for additional comments + # on certificates. + # + # If OpenSSL was not found at the time the server was + # built, the "tls", "ttls", and "peap" sections will + # be ignored. + # + # If you do not currently have certificates signed by + # a trusted CA you may use the 'snakeoil' certificates. + # Included with the server in raddb/certs. + # + # If these certificates have not been auto-generated: + # cd raddb/certs + # make + # + # These test certificates SHOULD NOT be used in a normal + # deployment. They are created only to make it easier + # to install the server, and to perform some simple + # tests with EAP-TLS, TTLS, or PEAP. + # + # Note that you should NOT use a globally known CA here! + # e.g. using a Verisign cert as a "known CA" means that + # ANYONE who has a certificate signed by them can + # authenticate via EAP-TLS! This is likely not what you want. + # + tls-config tls-common { + private_key_password = whatever + private_key_file = /etc/ssl/private/ssl-cert-snakeoil.key + + # If Private key & Certificate are located in + # the same file, then private_key_file & + # certificate_file must contain the same file + # name. + # + # If ca_file (below) is not used, then the + # certificate_file below SHOULD also include all of + # the intermediate CA certificates used to sign the + # server certificate, but NOT the root CA. + # + # Including the ROOT CA certificate is not useful and + # merely inflates the exchanged data volume during + # the TLS negotiation. + # + # This file should contain the server certificate, + # followed by intermediate certificates, in order. + # i.e. If we have a server certificate signed by CA1, + # which is signed by CA2, which is signed by a root + # CA, then the "certificate_file" should contain + # server.pem, followed by CA1.pem, followed by + # CA2.pem. + # + # When using "ca_file" or "ca_dir", the + # "certificate_file" should contain only + # "server.pem". And then you may (or may not) need + # to set "auto_chain", depending on your version of + # OpenSSL. + # + # In short, SSL / TLS certificates are complex. + # There are many versions of software, each of which + # behave slightly differently. It is impossible to + # give advice which will work everywhere. Instead, + # we give general guidelines. + # + certificate_file = /etc/ssl/certs/ssl-cert-snakeoil.pem + + # Trusted Root CA list + # + # This file can contain multiple CA certificates. + # ALL of the CA's in this list will be trusted to + # issue client certificates for authentication. + # + # In general, you should use self-signed + # certificates for 802.1x (EAP) authentication. + # In that case, this CA file should contain + # *one* CA certificate. + # + ca_file = /etc/ssl/certs/ca-certificates.crt + + # Check the Certificate Revocation List + # + # 1) Copy CA certificates and CRLs to same directory. + # 2) Execute 'c_rehash '. + # 'c_rehash' is OpenSSL's command. + # 3) uncomment the lines below. + # 5) Restart radiusd + # check_crl = yes + + # Check if intermediate CAs have been revoked. + # check_all_crl = yes + + ca_path = ${cadir} + + # OpenSSL does not reload contents of ca_path dir over time. + # That means that if check_crl is enabled and CRLs are loaded + # from ca_path dir, at some point CRLs will expire and + # RADIUSd will stop authenticating users. + # If ca_path_reload_interval is non-zero, it will force OpenSSL + # to reload all data from ca_path periodically + # + # Flush ca_path each hour + # ca_path_reload_interval = 3600 + + + # Accept an expired Certificate Revocation List + # + # allow_expired_crl = no + + # If check_cert_issuer is set, the value will + # be checked against the DN of the issuer in + # the client certificate. If the values do not + # match, the certificate verification will fail, + # rejecting the user. + # + # This check can be done more generally by checking + # the value of the TLS-Client-Cert-Issuer attribute. + # This check can be done via any mechanism you + # choose. + # + # check_cert_issuer = "/C=GB/ST=Berkshire/L=Newbury/O=My Company Ltd" + + # If check_cert_cn is set, the value will + # be xlat'ed and checked against the CN + # in the client certificate. If the values + # do not match, the certificate verification + # will fail rejecting the user. + # + # This check is done only if the previous + # "check_cert_issuer" is not set, or if + # the check succeeds. + # + # This check can be done more generally by writing + # "unlang" statements to examine the value of the + # TLS-Client-Cert-Common-Name attribute. + # + # check_cert_cn = %{User-Name} + + # + # This configuration item only applies when there is + # an intermediate CA between the "root" CA, and the + # client certificate. If we trust the root CA, then + # by definition we also trust ANY intermediate CA + # which is signed by that root. This means ANOTHER + # intermediate CA can issue client certificates, and + # have them accepted by the EAP module. + # + # The solution is to list ONLY the trusted CAs in the + # FreeRADIUS configuration, and then set this + # configuration item to "yes". + # + # Then, when the server receives a client certificate + # from an untrusted CA, that authentication request + # can be rejected. + # + # It is possible to do these checks in "unlang", by + # checking for unknown names in the + # TLS-Cert-Common-Name attribute, but that is + # more complex. So we add a configuration option + # which can be set once, and which works for all + # possible intermediate CAs, no matter what their + # value. + # + # reject_unknown_intermediate_ca = no + + # Set this option to specify the allowed + # TLS cipher suites. The format is listed + # in "man 1 ciphers". + # + cipher_list = "DEFAULT" + + # If enabled, OpenSSL will use server cipher list + # (possibly defined by cipher_list option above) + # for choosing right cipher suite rather than + # using client-specified list which is OpenSSl default + # behavior. Setting this to "yes" means that OpenSSL + # will choose the servers ciphers, even if they do not + # best match what the client sends. + # + # TLS negotiation is usually good, but can be imperfect. + # This setting allows administrators to "fine tune" it + # if necessary. + # + cipher_server_preference = no + + # You can selectively disable TLS versions for + # compatability with old client devices. + # + # If your system has OpenSSL 1.1.0 or greater, do NOT + # use these. Instead, set tls_min_version and + # tls_max_version. + # +# disable_tlsv1_2 = yes +# disable_tlsv1_1 = yes +# disable_tlsv1 = yes + + + # Set min / max TLS version. + # + # Generally speaking you should NOT use TLS 1.0 or + # TLS 1.1. They are old, possibly insecure, and + # deprecated. However, it is sometimes necessary to + # enable it for compatibility with legact systems. + # We recommend replacing those legacy systems, and + # using at least TLS 1.2. + # + # Some Debian versions disable older versions of TLS, + # and requires the application to manually enable + # them. + # + # If you are running such a distribution, you should + # set these options, otherwise older clients will not + # be able to connect. + # + # Allowed values are "1.0", "1.1", "1.2", and "1.3". + # + # As of 2021, it is STRONGLY RECOMMENDED to set + # + # tls_min_version = "1.2" + # + # Older TLS versions are insecure and deprecated. + # + # In order to enable TLS 1.0 and TLS 1.1, you may + # also need to update cipher_list below to: + # + # * OpenSSL >= 3.x + # + # cipher_list = "DEFAULT@SECLEVEL=0" + # + # * OpenSSL < 3.x + # + # cipher_list = "DEFAULT@SECLEVEL=1" + # + # The values must be in quotes. + # + # We also STRONGLY RECOMMEND to set + # + # tls_max_version = "1.2" + # + # While the server will accept "1.3" as a value, + # most EAP supplicants WILL NOT DO TLS 1.3 PROPERLY. + # + # i.e. they WILL NOT WORK, SO DO NOT ASK QUESTIONS ON + # THE LIST ABOUT WHY IT DOES NOT WORK. + # + # The TLS 1.3 support is here for future + # compatibility, as clients get upgraded, and people + # don't upgrade their copies of FreeRADIUS. + # + # Also note that we only support TLS 1.3 for EAP-TLS. + # Other versions of EAP (PEAP, TTLS, FAST) DO NOT + # SUPPORT TLS 1.3. + # + tls_min_version = "1.2" + tls_max_version = "1.2" + + # Elliptical cryptography configuration + # + # This configuration should be one of the following: + # + # * a name of the curve to use, e.g. "prime256v1". + # + # * a colon separated list of curve NIDs or names. + # + # * an empty string, in which case OpenSSL will choose + # the "best" curve for the situation. + # + # For supported curve names, please run + # + # openssl ecparam -list_curves + # + ecdh_curve = "" + + # Session resumption / fast reauthentication + # cache. + # + # The cache contains the following information: + # + # session Id - unique identifier, managed by SSL + # User-Name - from the Access-Accept + # Stripped-User-Name - from the Access-Request + # Cached-Session-Policy - from the Access-Accept + # + # See also the "store" subsection below for + # additional attributes which can be cached. + # + # The "Cached-Session-Policy" is the name of a + # policy which should be applied to the cached + # session. This policy can be used to assign + # VLANs, IP addresses, etc. It serves as a useful + # way to re-apply the policy from the original + # Access-Accept to the subsequent Access-Accept + # for the cached session. + # + # On session resumption, these attributes are + # copied from the cache, and placed into the + # reply list. + # + # You probably also want "use_tunneled_reply = yes" + # when using fast session resumption. + # + # You can check if a session has been resumed by + # looking for the existence of the EAP-Session-Resumed + # attribute. Note that this attribute will *only* + # exist in the "post-auth" section. + # + # CAVEATS: The cache is stored and reloaded BEFORE + # the "post-auth" section is run. This limitation + # makes caching more difficult than it should be. In + # practice, it means that the first authentication + # session must set the reply attributes before the + # post-auth section is run. + # + # When the session is resumed, the attributes are + # restored and placed into the session-state list. + # + cache { + # Enable it. The default is "no". Deleting the entire "cache" + # subsection also disables caching. + # + # The session cache requires the use of the + # "name" and "persist_dir" configuration + # items, below. + # + # The internal OpenSSL session cache has been permanently + # disabled. + # + # You can disallow resumption for a particular user by adding the + # following attribute to the control item list: + # + # Allow-Session-Resumption = No + # + # If "enable = no" below, you CANNOT enable resumption for just one + # user by setting the above attribute to "yes". + # + enable = no + + # Lifetime of the cached entries, in hours. The sessions will be + # deleted/invalidated after this time. + # + lifetime = 24 # hours + + # Internal "name" of the session cache. Used to + # distinguish which TLS context sessions belong to. + # + # The server will generate a random value if unset. + # This will change across server restart so you MUST + # set the "name" if you want to persist sessions (see + # below). + # + # name = "EAP module" + + # Simple directory-based storage of sessions. + # Two files per session will be written, the SSL + # state and the cached VPs. This will persist session + # across server restarts. + # + # The default directory is ${logdir}, for historical + # reasons. You should ${db_dir} instead. And check + # the value of db_dir in the main radiusd.conf file. + # It should not point to ${raddb} + # + # The server will need write perms, and the directory + # should be secured from anyone else. You might want + # a script to remove old files from here periodically: + # + # find ${logdir}/tlscache -mtime +2 -exec rm -f {} \; + # + # This feature REQUIRES "name" option be set above. + # + # persist_dir = "${logdir}/tlscache" + + # + # As of 3.0.20, it is possible to partially + # control which attributes exist in the + # session cache. This subsection lists + # attributes which are taken from the reply, + # and saved to the on-disk cache. When the + # session is resumed, these attributes are + # added to the "session-state" list. The + # default configuration will then take care + # of copying them to the reply. + # + store { + Tunnel-Private-Group-Id + } + } + + # Client certificates can be validated via an + # external command. This allows dynamic CRLs or OCSP + # to be used. + # + # This configuration is commented out in the + # default configuration. Uncomment it, and configure + # the correct paths below to enable it. + # + # If OCSP checking is enabled, and the OCSP checks fail, + # the verify section is not run. + # + # If OCSP checking is disabled, the verify section is + # run on successful certificate validation. + # + verify { + # If the OCSP checks succeed, the verify section + # is run to allow additional checks. + # + # If you want to skip verify on OCSP success, + # uncomment this configuration item, and set it + # to "yes". + # + # skip_if_ocsp_ok = no + + # A temporary directory where the client + # certificates are stored. This directory + # MUST be owned by the UID of the server, + # and MUST not be accessible by any other + # users. When the server starts, it will do + # "chmod go-rwx" on the directory, for + # security reasons. The directory MUST + # exist when the server starts. + # + # You should also delete all of the files + # in the directory when the server starts. + # + # tmpdir = /tmp/radiusd + + # The command used to verify the client cert. + # We recommend using the OpenSSL command-line + # tool. + # + # The ${..ca_path} text is a reference to + # the ca_path variable defined above. + # + # The %{TLS-Client-Cert-Filename} is the name + # of the temporary file containing the cert + # in PEM format. This file is automatically + # deleted by the server when the command + # returns. + # + # client = "/path/to/openssl verify -CApath ${..ca_path} %{TLS-Client-Cert-Filename}" + } + + # OCSP Configuration + # + # Certificates can be verified against an OCSP + # Responder. This makes it possible to immediately + # revoke certificates without the distribution of + # new Certificate Revocation Lists (CRLs). + # + ocsp { + # Enable it. The default is "no". + # Deleting the entire "ocsp" subsection + # also disables ocsp checking + # + enable = no + + # The OCSP Responder URL can be automatically + # extracted from the certificate in question. + # To override the OCSP Responder URL set + # "override_cert_url = yes". + # + override_cert_url = yes + + # If the OCSP Responder address is not extracted from + # the certificate, the URL can be defined here. + # + url = "http://127.0.0.1/ocsp/" + + # If the OCSP Responder can not cope with nonce + # in the request, then it can be disabled here. + # + # For security reasons, disabling this option + # is not recommended as nonce protects against + # replay attacks. + # + # Note that Microsoft AD Certificate Services OCSP + # Responder does not enable nonce by default. It is + # more secure to enable nonce on the responder than + # to disable it in the query here. + # See http://technet.microsoft.com/en-us/library/cc770413%28WS.10%29.aspx + # + # use_nonce = yes + + # Number of seconds before giving up waiting + # for OCSP response. 0 uses system default. + # + # timeout = 0 + + # Normally an error in querying the OCSP + # responder (no response from server, server did + # not understand the request, etc) will result in + # a validation failure. + # + # To treat these errors as 'soft' failures and + # still accept the certificate, enable this + # option. + # + # Warning: this may enable clients with revoked + # certificates to connect if the OCSP responder + # is not available. Use with caution. + # + # softfail = no + } + + # + # The server can present different certificates based + # on the realm presented in EAP. See + # raddb/certs/realms/README.md for examples of how to + # configure this. + # + # Note that the default is to use the same set of + # realm certificates for both EAP and RadSec! If + # this is not what you want, you should use different + # subdirectories or each, e.g. ${certdir}/realms/radsec/, + # and ${certdir}/realms/eap/ + # + # realm_dir = ${certdir}/realms/ + } + + # EAP-TLS + # + # The TLS configuration for TLS-based EAP types is held in + # the "tls-config" section, above. + # + tls { + # Point to the common TLS configuration + # + tls = tls-common + + # As part of checking a client certificate, the EAP-TLS + # sets some attributes such as TLS-Client-Cert-Common-Name. This + # virtual server has access to these attributes, and can + # be used to accept or reject the request. + # + # virtual_server = check-eap-tls + + # You can control whether or not EAP-TLS requires a + # client certificate by setting + # + # configurable_client_cert = yes + # + # Once that setting has been changed, you can then set + # + # EAP-TLS-Require-Client-Cert = No + # + # in the control items for a request, and the EAP-TLS + # module will not require a client certificate from + # the supplicant. + # + # WARNING: This configuration should only be used + # when the users are placed into a "captive portal" + # or "walled garden", where they have limited network + # access. Otherwise the configuraton will allow + # anyone on the network, without authenticating them! + # +# configurable_client_cert = no + } + +} diff --git a/net_orc/network/modules/radius/conf/module_config.json b/net_orc/network/modules/radius/conf/module_config.json new file mode 100644 index 000000000..ce8fbd52f --- /dev/null +++ b/net_orc/network/modules/radius/conf/module_config.json @@ -0,0 +1,23 @@ +{ + "config": { + "meta": { + "name": "radius", + "display_name": "Radius", + "description": "Enable port based authentication" + }, + "network": { + "interface": "veth0", + "enable_wan": false, + "ip_index": 7 + }, + "docker": { + "depends_on": "base", + "mounts": [ + { + "source": "runtime/network", + "target": "/runtime/network" + } + ] + } + } +} \ No newline at end of file diff --git a/net_orc/network/modules/radius/python/requirements.txt b/net_orc/network/modules/radius/python/requirements.txt new file mode 100644 index 000000000..37d126cb1 --- /dev/null +++ b/net_orc/network/modules/radius/python/requirements.txt @@ -0,0 +1,3 @@ +eventlet +pbr +transitions \ No newline at end of file diff --git a/net_orc/network/modules/radius/python/src/authenticator.py b/net_orc/network/modules/radius/python/src/authenticator.py new file mode 100644 index 000000000..32f4ac221 --- /dev/null +++ b/net_orc/network/modules/radius/python/src/authenticator.py @@ -0,0 +1,45 @@ +"""Authenticator for the RADIUS Server""" +from chewie.chewie import Chewie +import logging + +_LOG_FORMAT = '%(asctime)s %(name)-8s %(levelname)-7s %(message)s' +_DATE_FORMAT = '%b %02d %H:%M:%S' +INTERFACE_NAME = 'veth0' +RADIUS_SERVER_IP = '127.0.0.1' +RADIUS_SERVER_PORT = 1812 +RADIUS_SERVER_SECRET = 'testing123' + + +class Authenticator(): + """Authenticator for the RADIUS Server""" + def __init__(self): + self.chewie = Chewie(INTERFACE_NAME, + self._get_logger(), + self._auth_handler, + self._failure_handler, + self._logoff_handler, + radius_server_ip=RADIUS_SERVER_IP, + radius_server_port=RADIUS_SERVER_PORT, + radius_server_secret=RADIUS_SERVER_SECRET) + self.chewie.run() + + def _get_logger(self): + logging.basicConfig(format=_LOG_FORMAT, + datefmt=_DATE_FORMAT, + level=logging.INFO) + logger = logging.getLogger('chewie') + return logger + + def _auth_handler(self, address, group_address, *args, **kwargs): # pylint: disable=unused-argument + print('Successful auth for ' + str(address) + ' on port '+ + str(group_address)) + + def _failure_handler(self, address, group_address): + print('Failed auth for ' + str(address) + ' on port ' + str(group_address)) + + def _logoff_handler(self, address, group_address): + print('Log off reported for ' + str(address) + ' on port ' + + str(group_address)) + + +authenticator = Authenticator() diff --git a/net_orc/network/modules/radius/radius.Dockerfile b/net_orc/network/modules/radius/radius.Dockerfile new file mode 100644 index 000000000..a72313826 --- /dev/null +++ b/net_orc/network/modules/radius/radius.Dockerfile @@ -0,0 +1,26 @@ +# Image name: test-run/radius +FROM test-run/base:latest + +# Install radius and git +RUN apt-get update && apt-get install -y openssl freeradius git + +# Clone chewie from source. +RUN git clone --branch 0.0.25 https://github.com/faucetsdn/chewie + +# Install chewie as Python module +RUN pip3 install chewie/ + +EXPOSE 1812/udp +EXPOSE 1813/udp + +# Copy over all configuration files +COPY network/modules/radius/conf /testrun/conf + +# Copy over all binary files +COPY network/modules/radius/bin /testrun/bin + +# Copy over all python files +COPY network/modules/radius/python /testrun/python + +# Install all python requirements for the module +RUN pip3 install -r /testrun/python/requirements.txt \ No newline at end of file diff --git a/net_orc/network/modules/template/bin/start_network_service b/net_orc/network/modules/template/bin/start_network_service new file mode 100644 index 000000000..94ae0def9 --- /dev/null +++ b/net_orc/network/modules/template/bin/start_network_service @@ -0,0 +1,13 @@ +#!/bin/bash + +# Place holder function for testing and validation +# Each network module should include a start_networkig_service +# file that overwrites this one to boot all of the its specific +# requirements to run. + +echo "Starting network service..." +echo "This is not a real network service, just a test" +echo "Network service started" + +# Do Nothing, just keep the module alive +while true; do sleep 1; done \ No newline at end of file diff --git a/net_orc/network/modules/template/conf/module_config.json b/net_orc/network/modules/template/conf/module_config.json new file mode 100644 index 000000000..c767c9ad6 --- /dev/null +++ b/net_orc/network/modules/template/conf/module_config.json @@ -0,0 +1,27 @@ +{ + "config": { + "meta": { + "name": "template", + "display_name": "Template", + "description": "Template for building network service modules" + }, + "network": { + "interface": "veth0", + "enable_wan": false, + "ip_index": 9 + }, + "grpc": { + "port": 50001 + }, + "docker": { + "enable_container": false, + "depends_on": "base", + "mounts": [ + { + "source": "runtime/network", + "target": "/runtime/network" + } + ] + } + } +} \ No newline at end of file diff --git a/net_orc/network/modules/template/python/src/template_main.py b/net_orc/network/modules/template/python/src/template_main.py new file mode 100644 index 000000000..df2452550 --- /dev/null +++ b/net_orc/network/modules/template/python/src/template_main.py @@ -0,0 +1,4 @@ +"""Python code for the template module.""" + +if __name__ == "__main__": + print("Template main") diff --git a/net_orc/network/modules/template/template.Dockerfile b/net_orc/network/modules/template/template.Dockerfile new file mode 100644 index 000000000..45f9da6d9 --- /dev/null +++ b/net_orc/network/modules/template/template.Dockerfile @@ -0,0 +1,11 @@ +# Image name: test-run/template +FROM test-run/base:latest + +# Copy over all configuration files +COPY network/modules/template/conf /testrun/conf + +# Load device binary files +COPY network/modules/template/bin /testrun/bin + +# Copy over all python files +COPY network/modules/template/python /testrun/python \ No newline at end of file diff --git a/net_orc/orchestrator.Dockerfile b/net_orc/orchestrator.Dockerfile new file mode 100644 index 000000000..f062a33d4 --- /dev/null +++ b/net_orc/orchestrator.Dockerfile @@ -0,0 +1,22 @@ +# Image name: test-run/orchestrator +FROM test-run/base:latest + +#Update and get all additional requirements not contained in the base image +RUN apt-get update + +RUN apt-get install -y python3-pip curl openvswitch-switch + +#Download and install docker client +ENV DOCKERVERSION=20.10.2 +RUN curl -fsSLO https://download.docker.com/linux/static/stable/x86_64/docker-${DOCKERVERSION}.tgz \ + && tar xzvf docker-${DOCKERVERSION}.tgz --strip 1 -C /usr/local/bin docker/docker \ + && rm docker-${DOCKERVERSION}.tgz + +#Create a directory to load all the app files into +RUN mkdir /python + +#Load the requirements file +COPY python/requirements.txt /python + +#Install all python requirements for the module +RUN pip3 install -r python/requirements.txt diff --git a/net_orc/python/requirements.txt b/net_orc/python/requirements.txt new file mode 100644 index 000000000..5d8f29214 --- /dev/null +++ b/net_orc/python/requirements.txt @@ -0,0 +1,4 @@ +docker +ipaddress +netifaces +scapy \ No newline at end of file diff --git a/net_orc/python/src/listener.py b/net_orc/python/src/listener.py new file mode 100644 index 000000000..de7a07616 --- /dev/null +++ b/net_orc/python/src/listener.py @@ -0,0 +1,70 @@ +"""Intercepts network traffic between network services and the device +under test.""" +import threading +from scapy.all import AsyncSniffer, DHCP, get_if_hwaddr +import logger +from network_event import NetworkEvent + +LOGGER = logger.get_logger('listener') + +DHCP_DISCOVER = 1 +DHCP_OFFER = 2 +DHCP_REQUEST = 3 +DHCP_ACK = 5 +CONTAINER_MAC_PREFIX = '9a:02:57:1e:8f' + + +class Listener: + """Methods to start and stop the network listener.""" + + def __init__(self, device_intf): + self._device_intf = device_intf + self._device_intf_mac = get_if_hwaddr(self._device_intf) + + self._sniffer = AsyncSniffer(iface=self._device_intf, + prn=self._packet_callback) + + self._callbacks = [] + self._discovered_devices = [] + + def start_listener(self): + """Start sniffing packets on the device interface.""" + self._sniffer.start() + + def stop_listener(self): + """Stop sniffing packets on the device interface.""" + self._sniffer.stop() + + def is_running(self): + """Determine whether the sniffer is running.""" + return self._sniffer.running + + def register_callback(self, callback, events=[]): # pylint: disable=dangerous-default-value + """Register a callback for specified events.""" + self._callbacks.append({'callback': callback, 'events': events}) + + def call_callback(self, net_event, *args): + for callback in self._callbacks: + if net_event in callback['events']: + callback_thread = threading.Thread(target=callback['callback'], + name='Callback thread', + args=args) + callback_thread.start() + + def _packet_callback(self, packet): + + # DHCP ACK callback + if DHCP in packet and self._get_dhcp_type(packet) == DHCP_ACK: + self.call_callback(NetworkEvent.DHCP_LEASE_ACK, packet) + + # New device discovered callback + if not packet.src is None and packet.src not in self._discovered_devices: + # Ignore packets originating from our containers + if packet.src.startswith( + CONTAINER_MAC_PREFIX) or packet.src == self._device_intf_mac: + return + self._discovered_devices.append(packet.src) + self.call_callback(NetworkEvent.DEVICE_DISCOVERED, packet.src) + + def _get_dhcp_type(self, packet): + return packet[DHCP].options[0][1] diff --git a/net_orc/python/src/network_device.py b/net_orc/python/src/network_device.py new file mode 100644 index 000000000..1b856da16 --- /dev/null +++ b/net_orc/python/src/network_device.py @@ -0,0 +1,10 @@ +"""Track device object information.""" +from dataclasses import dataclass + + +@dataclass +class NetworkDevice: + """Represents a physical device and it's configuration.""" + + mac_addr: str + ip_addr: str = None diff --git a/net_orc/python/src/network_event.py b/net_orc/python/src/network_event.py new file mode 100644 index 000000000..f56adf494 --- /dev/null +++ b/net_orc/python/src/network_event.py @@ -0,0 +1,9 @@ +"""Specify the various types of network events to be reported.""" +from enum import Enum + + +class NetworkEvent(Enum): + """All possible network events.""" + DEVICE_DISCOVERED = 1 + DEVICE_STABLE = 2 + DHCP_LEASE_ACK = 3 diff --git a/net_orc/python/src/network_orchestrator.py b/net_orc/python/src/network_orchestrator.py new file mode 100644 index 000000000..53a94b795 --- /dev/null +++ b/net_orc/python/src/network_orchestrator.py @@ -0,0 +1,789 @@ +"""Network orchestrator is responsible for managing +all of the virtual network services""" +import getpass +import ipaddress +import json +import os +from scapy.all import sniff, wrpcap, BOOTP +import shutil +import subprocess +import sys +import time +import threading +import docker +from docker.types import Mount +import logger +import util +from listener import Listener +from network_device import NetworkDevice +from network_event import NetworkEvent +from network_validator import NetworkValidator + +LOGGER = logger.get_logger('net_orc') +CONFIG_FILE = 'conf/system.json' +EXAMPLE_CONFIG_FILE = 'conf/system.json.example' +RUNTIME_DIR = 'runtime' +DEVICES_DIR = 'devices' +MONITOR_PCAP = 'monitor.pcap' +NET_DIR = 'runtime/network' +NETWORK_MODULES_DIR = 'network/modules' +NETWORK_MODULE_METADATA = 'conf/module_config.json' +DEVICE_BRIDGE = 'tr-d' +INTERNET_BRIDGE = 'tr-c' +PRIVATE_DOCKER_NET = 'tr-private-net' +CONTAINER_NAME = 'network_orchestrator' + +RUNTIME_KEY = 'runtime' +MONITOR_PERIOD_KEY = 'monitor_period' +STARTUP_TIMEOUT_KEY = 'startup_timeout' +DEFAULT_STARTUP_TIMEOUT = 60 +DEFAULT_RUNTIME = 1200 +DEFAULT_MONITOR_PERIOD = 300 + +RUNTIME = 1500 + + +class NetworkOrchestrator: + """Manage and controls a virtual testing network.""" + + def __init__(self, + config_file=CONFIG_FILE, + validate=True, + async_monitor=False, + single_intf=False): + + self._runtime = DEFAULT_RUNTIME + self._startup_timeout = DEFAULT_STARTUP_TIMEOUT + self._monitor_period = DEFAULT_MONITOR_PERIOD + + self._int_intf = None + self._dev_intf = None + self._single_intf = single_intf + + self.listener = None + self._net_modules = [] + self._devices = [] + self.validate = validate + self.async_monitor = async_monitor + + self._path = os.path.dirname( + os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) + + self.validator = NetworkValidator() + shutil.rmtree(os.path.join(os.getcwd(), NET_DIR), ignore_errors=True) + self.network_config = NetworkConfig() + self.load_config(config_file) + + def start(self): + """Start the network orchestrator.""" + + LOGGER.info('Starting Network Orchestrator') + # Get all components ready + self.load_network_modules() + + # Restore the network first if required + self.stop(kill=True) + + self.start_network() + + if self.async_monitor: + # Run the monitor method asynchronously to keep this method non-blocking + self._monitor_thread = threading.Thread(target=self.monitor_network) + self._monitor_thread.daemon = True + self._monitor_thread.start() + else: + self.monitor_network() + + def start_network(self): + """Start the virtual testing network.""" + LOGGER.info('Starting network') + + self.build_network_modules() + self.create_net() + self.start_network_services() + + if self.validate: + # Start the validator after network is ready + self.validator.start() + + # Get network ready (via Network orchestrator) + LOGGER.info('Network is ready.') + + def stop(self, kill=False): + """Stop the network orchestrator.""" + self.stop_validator(kill=kill) + self.stop_network(kill=kill) + + def stop_validator(self, kill=False): + """Stop the network validator.""" + # Shutdown the validator + self.validator.stop(kill=kill) + + def stop_network(self, kill=False): + """Stop the virtual testing network.""" + # Shutdown network + self.stop_networking_services(kill=kill) + self.restore_net() + + def monitor_network(self): + # TODO: This time should be configurable (How long to hold before exiting, + # this could be infinite too) + time.sleep(RUNTIME) + + self.stop() + + def load_config(self, config_file=None): + if config_file is None: + # If not defined, use relative pathing to local file + self._config_file = os.path.join(self._path, CONFIG_FILE) + else: + # If defined, use as provided + self._config_file = config_file + + if not os.path.isfile(self._config_file): + LOGGER.error('Configuration file is not present at ' + config_file) + LOGGER.info('An example is present in '+ EXAMPLE_CONFIG_FILE) + sys.exit(1) + + LOGGER.info('Loading config file: ' + os.path.abspath(self._config_file)) + with open(self._config_file, encoding='UTF-8') as config_json_file: + config_json = json.load(config_json_file) + self.import_config(config_json) + + def _device_discovered(self, mac_addr): + + LOGGER.debug( + f'Discovered device {mac_addr}. Waiting for device to obtain IP') + device = self._get_device(mac_addr=mac_addr) + os.makedirs( + os.path.join(RUNTIME_DIR, DEVICES_DIR, device.mac_addr.replace(':', + ''))) + + timeout = time.time() + self._startup_timeout + + while time.time() < timeout: + if device.ip_addr is None: + time.sleep(3) + else: + break + + if device.ip_addr is None: + LOGGER.info( + f'Timed out whilst waiting for {mac_addr} to obtain an IP address') + return + + LOGGER.info( + f'Device with mac addr {device.mac_addr} has obtained IP address ' + f'{device.ip_addr}') + + self._start_device_monitor(device) + + def _dhcp_lease_ack(self, packet): + mac_addr = packet[BOOTP].chaddr.hex(':')[0:17] + device = self._get_device(mac_addr=mac_addr) + device.ip_addr = packet[BOOTP].yiaddr + + def _start_device_monitor(self, device): + """Start a timer until the steady state has been reached and + callback the steady state method for this device.""" + LOGGER.info( + f'Monitoring device with mac addr {device.mac_addr} ' + f'for {str(self._monitor_period)} seconds') + + packet_capture = sniff(iface=self._dev_intf, timeout=self._monitor_period) + wrpcap( + os.path.join(RUNTIME_DIR, DEVICES_DIR, device.mac_addr.replace(':', ''), + 'monitor.pcap'), packet_capture) + self.listener.call_callback(NetworkEvent.DEVICE_STABLE, device.mac_addr) + + def _get_device(self, mac_addr): + for device in self._devices: + if device.mac_addr == mac_addr: + return device + + device = NetworkDevice(mac_addr=mac_addr) + self._devices.append(device) + return device + + def import_config(self, json_config): + self._int_intf = json_config['network']['internet_intf'] + self._dev_intf = json_config['network']['device_intf'] + + if RUNTIME_KEY in json_config: + self._runtime = json_config[RUNTIME_KEY] + if STARTUP_TIMEOUT_KEY in json_config: + self._startup_timeout = json_config[STARTUP_TIMEOUT_KEY] + if MONITOR_PERIOD_KEY in json_config: + self._monitor_period = json_config[MONITOR_PERIOD_KEY] + + def _check_network_services(self): + LOGGER.debug('Checking network modules...') + for net_module in self._net_modules: + if net_module.enable_container: + LOGGER.debug('Checking network module: ' + net_module.display_name) + success = self._ping(net_module) + if success: + LOGGER.debug(net_module.display_name + ' responded succesfully: ' + + str(success)) + else: + LOGGER.error(net_module.display_name + ' failed to respond to ping') + + def _ping(self, net_module): + host = net_module.net_config.ipv4_address + namespace = 'tr-ctns-' + net_module.dir_name + cmd = 'ip netns exec ' + namespace + ' ping -c 1 ' + str(host) + success = util.run_command(cmd, output=False) + return success + + def _create_private_net(self): + client = docker.from_env() + try: + network = client.networks.get(PRIVATE_DOCKER_NET) + network.remove() + except docker.errors.NotFound: + pass + + # TODO: These should be made into variables + ipam_pool = docker.types.IPAMPool(subnet='100.100.0.0/16', + iprange='100.100.100.0/24') + + ipam_config = docker.types.IPAMConfig(pool_configs=[ipam_pool]) + + client.networks.create(PRIVATE_DOCKER_NET, + ipam=ipam_config, + internal=True, + check_duplicate=True, + driver='macvlan') + + def _ci_pre_network_create(self): + """ Stores network properties to restore network after + network creation and flushes internet interface + """ + + self._ethmac = subprocess.check_output( + f'cat /sys/class/net/{self._int_intf}/address', + shell=True).decode('utf-8').strip() + self._gateway = subprocess.check_output( + 'ip route | head -n 1 | awk \'{print $3}\'', + shell=True).decode('utf-8').strip() + self._ipv4 = subprocess.check_output( + f'ip a show {self._int_intf} | grep \"inet \" | awk \'{{print $2}}\'', + shell=True).decode('utf-8').strip() + self._ipv6 = subprocess.check_output( + f'ip a show {self._int_intf} | grep inet6 | awk \'{{print $2}}\'', + shell=True).decode('utf-8').strip() + self._brd = subprocess.check_output( + f'ip a show {self._int_intf} | grep \"inet \" | awk \'{{print $4}}\'', + shell=True).decode('utf-8').strip() + + def _ci_post_network_create(self): + """ Restore network connection in CI environment """ + LOGGER.info('post cr') + util.run_command(f'ip address del {self._ipv4} dev {self._int_intf}') + util.run_command(f'ip -6 address del {self._ipv6} dev {self._int_intf}') + util.run_command( + f'ip link set dev {self._int_intf} address 00:B0:D0:63:C2:26') + util.run_command(f'ip addr flush dev {self._int_intf}') + util.run_command(f'ip addr add dev {self._int_intf} 0.0.0.0') + util.run_command( + f'ip addr add dev {INTERNET_BRIDGE} {self._ipv4} broadcast {self._brd}') + util.run_command(f'ip -6 addr add {self._ipv6} dev {INTERNET_BRIDGE} ') + util.run_command( + f'systemd-resolve --interface {INTERNET_BRIDGE} --set-dns 8.8.8.8') + util.run_command(f'ip link set dev {INTERNET_BRIDGE} up') + util.run_command(f'dhclient {INTERNET_BRIDGE}') + util.run_command('ip route del default via 10.1.0.1') + util.run_command( + f'ip route add default via {self._gateway} ' + f'src {self._ipv4[:-3]} metric 100 dev {INTERNET_BRIDGE}') + + def create_net(self): + LOGGER.info('Creating baseline network') + + if not util.interface_exists(self._int_intf) or not util.interface_exists( + self._dev_intf): + LOGGER.error('Configured interfaces are not ready for use. ' + + 'Ensure both interfaces are connected.') + sys.exit(1) + + if self._single_intf: + self._ci_pre_network_create() + + # Create data plane + util.run_command('ovs-vsctl add-br ' + DEVICE_BRIDGE) + + # Create control plane + util.run_command('ovs-vsctl add-br ' + INTERNET_BRIDGE) + + # Add external interfaces to data and control plane + util.run_command('ovs-vsctl add-port ' + DEVICE_BRIDGE + ' ' + + self._dev_intf) + util.run_command('ovs-vsctl add-port ' + INTERNET_BRIDGE + ' ' + + self._int_intf) + + # Enable forwarding of eapol packets + util.run_command('ovs-ofctl add-flow ' + DEVICE_BRIDGE + + ' \'table=0, dl_dst=01:80:c2:00:00:03, actions=flood\'') + + # Remove IP from internet adapter + util.run_command('ifconfig ' + self._int_intf + ' 0.0.0.0') + + # Set ports up + util.run_command('ip link set dev ' + DEVICE_BRIDGE + ' up') + util.run_command('ip link set dev ' + INTERNET_BRIDGE + ' up') + + if self._single_intf: + self._ci_post_network_create() + + self._create_private_net() + + self.listener = Listener(self._dev_intf) + self.listener.register_callback(self._device_discovered, + [NetworkEvent.DEVICE_DISCOVERED]) + self.listener.register_callback(self._dhcp_lease_ack, + [NetworkEvent.DHCP_LEASE_ACK]) + self.listener.start_listener() + + def load_network_modules(self): + """Load network modules from module_config.json.""" + LOGGER.debug('Loading network modules from /' + NETWORK_MODULES_DIR) + + loaded_modules = 'Loaded the following network modules: ' + net_modules_dir = os.path.join(self._path, NETWORK_MODULES_DIR) + + for module_dir in os.listdir(net_modules_dir): + + if self._get_network_module(module_dir) is None: + loaded_module = self._load_network_module(module_dir) + loaded_modules += loaded_module.dir_name + ' ' + + LOGGER.info(loaded_modules) + + def _load_network_module(self, module_dir): + + net_modules_dir = os.path.join(self._path, NETWORK_MODULES_DIR) + + net_module = NetworkModule() + + # Load module information + with open(os.path.join(self._path, net_modules_dir, module_dir, + NETWORK_MODULE_METADATA), 'r', + encoding='UTF-8') as module_file_open: + net_module_json = json.load(module_file_open) + + net_module.name = net_module_json['config']['meta']['name'] + net_module.display_name = net_module_json['config']['meta']['display_name'] + net_module.description = net_module_json['config']['meta']['description'] + net_module.dir = os.path.join(self._path, net_modules_dir, module_dir) + net_module.dir_name = module_dir + net_module.build_file = module_dir + '.Dockerfile' + net_module.container_name = 'tr-ct-' + net_module.dir_name + net_module.image_name = 'test-run/' + net_module.dir_name + + # Attach folder mounts to network module + if 'docker' in net_module_json['config']: + + if 'mounts' in net_module_json['config']['docker']: + for mount_point in net_module_json['config']['docker']['mounts']: + net_module.mounts.append( + Mount(target=mount_point['target'], + source=os.path.join(os.getcwd(), mount_point['source']), + type='bind')) + + if 'depends_on' in net_module_json['config']['docker']: + depends_on_module = net_module_json['config']['docker']['depends_on'] + if self._get_network_module(depends_on_module) is None: + self._load_network_module(depends_on_module) + + # Determine if this is a container or just an image/template + if 'enable_container' in net_module_json['config']['docker']: + net_module.enable_container = net_module_json['config']['docker'][ + 'enable_container'] + + # Load network service networking configuration + if net_module.enable_container: + + net_module.net_config.enable_wan = net_module_json['config']['network'][ + 'enable_wan'] + net_module.net_config.ip_index = net_module_json['config']['network'][ + 'ip_index'] + + net_module.net_config.host = False if not 'host' in net_module_json[ + 'config']['network'] else net_module_json['config']['network']['host'] + + net_module.net_config.ipv4_address = self.network_config.ipv4_network[ + net_module.net_config.ip_index] + net_module.net_config.ipv4_network = self.network_config.ipv4_network + + net_module.net_config.ipv6_address = self.network_config.ipv6_network[ + net_module.net_config.ip_index] + net_module.net_config.ipv6_network = self.network_config.ipv6_network + + self._net_modules.append(net_module) + return net_module + + def build_network_modules(self): + LOGGER.info('Building network modules...') + for net_module in self._net_modules: + self._build_module(net_module) + + def _build_module(self, net_module): + LOGGER.debug('Building network module ' + net_module.dir_name) + client = docker.from_env() + client.images.build(dockerfile=os.path.join(net_module.dir, + net_module.build_file), + path=self._path, + forcerm=True, + tag='test-run/' + net_module.dir_name) + + def _get_network_module(self, name): + for net_module in self._net_modules: + if name in (net_module.display_name, net_module.name, + net_module.dir_name): + return net_module + return None + + # Start the OVS network module + # This should always be called before loading all + # other modules to allow for a properly setup base + # network + def _start_ovs_module(self): + self._start_network_service(self._get_network_module('OVS')) + + def _start_network_service(self, net_module): + + LOGGER.debug('Starting net service ' + net_module.display_name) + network = 'host' if net_module.net_config.host else PRIVATE_DOCKER_NET + LOGGER.debug(f"""Network: {network}, image name: {net_module.image_name}, + container name: {net_module.container_name}""") + try: + client = docker.from_env() + net_module.container = client.containers.run( + net_module.image_name, + auto_remove=True, + cap_add=['NET_ADMIN'], + name=net_module.container_name, + hostname=net_module.container_name, + network=PRIVATE_DOCKER_NET, + privileged=True, + detach=True, + mounts=net_module.mounts, + environment={'HOST_USER': getpass.getuser()}) + except docker.errors.ContainerError as error: + LOGGER.error('Container run error') + LOGGER.error(error) + + if network != 'host': + self._attach_service_to_network(net_module) + + def _stop_service_module(self, net_module, kill=False): + LOGGER.debug('Stopping Service container ' + net_module.container_name) + try: + container = self._get_service_container(net_module) + if container is not None: + if kill: + LOGGER.debug('Killing container:' + net_module.container_name) + container.kill() + else: + LOGGER.debug('Stopping container:' + net_module.container_name) + container.stop() + LOGGER.debug('Container stopped:' + net_module.container_name) + except Exception as error: # pylint: disable=W0703 + LOGGER.error('Container stop error') + LOGGER.error(error) + + def _get_service_container(self, net_module): + LOGGER.debug('Resolving service container: ' + net_module.container_name) + container = None + try: + client = docker.from_env() + container = client.containers.get(net_module.container_name) + except docker.errors.NotFound: + LOGGER.debug('Container ' + net_module.container_name + ' not found') + except Exception as e: # pylint: disable=W0703 + LOGGER.error('Failed to resolve container') + LOGGER.error(e) + return container + + def stop_networking_services(self, kill=False): + LOGGER.info('Stopping network services') + for net_module in self._net_modules: + # Network modules may just be Docker images, + # so we do not want to stop them + if not net_module.enable_container: + continue + self._stop_service_module(net_module, kill) + + def start_network_services(self): + LOGGER.info('Starting network services') + + os.makedirs(os.path.join(os.getcwd(), NET_DIR), exist_ok=True) + + for net_module in self._net_modules: + + # TODO: There should be a better way of doing this + # Do not try starting OVS module again, as it should already be running + if 'OVS' != net_module.display_name: + + # Network modules may just be Docker images, + # so we do not want to start them as containers + if not net_module.enable_container: + continue + + self._start_network_service(net_module) + + LOGGER.info('All network services are running') + self._check_network_services() + + def attach_test_module_to_network(self, test_module): + LOGGER.debug('Attaching test module ' + test_module.display_name + + ' to device bridge') + + # Device bridge interface example: + # tr-d-t-baseline (Test Run Device Interface for Test container) + bridge_intf = DEVICE_BRIDGE + '-t-' + test_module.dir_name + + # Container interface example: + # tr-cti-baseline-test (Test Run Container Interface for test container) + container_intf = 'tr-tci-' + test_module.dir_name + + # Container network namespace name + container_net_ns = 'tr-test-' + test_module.dir_name + + # Create interface pair + util.run_command('ip link add ' + bridge_intf + ' type veth peer name ' + + container_intf) + + # Add bridge interface to device bridge + util.run_command('ovs-vsctl add-port ' + DEVICE_BRIDGE + ' ' + bridge_intf) + + # Get PID for running container + # TODO: Some error checking around missing PIDs might be required + container_pid = util.run_command('docker inspect -f {{.State.Pid}} ' + + test_module.container_name)[0] + + # Create symlink for container network namespace + util.run_command('ln -sf /proc/' + container_pid + + '/ns/net /var/run/netns/' + container_net_ns) + + # Attach container interface to container network namespace + util.run_command('ip link set ' + container_intf + ' netns ' + + container_net_ns) + + # Rename container interface name to veth0 + util.run_command('ip netns exec ' + container_net_ns + ' ip link set dev ' + + container_intf + ' name veth0') + + # Set MAC address of container interface + util.run_command('ip netns exec ' + container_net_ns + + ' ip link set dev veth0 address 9a:02:57:1e:8f:' + + str(test_module.ip_index)) + + # Set IP address of container interface + ipv4_address = self.network_config.ipv4_network[test_module.ip_index] + ipv6_address = self.network_config.ipv6_network[test_module.ip_index] + + ipv4_address_with_prefix = str(ipv4_address) + '/' + str( + self.network_config.ipv4_network.prefixlen) + ipv6_address_with_prefix = str(ipv6_address) + '/' + str( + self.network_config.ipv6_network.prefixlen) + + util.run_command('ip netns exec ' + container_net_ns + ' ip addr add ' + + ipv4_address_with_prefix + ' dev veth0') + + util.run_command('ip netns exec ' + container_net_ns + ' ip addr add ' + + ipv6_address_with_prefix + ' dev veth0') + + # Set interfaces up + util.run_command('ip link set dev ' + bridge_intf + ' up') + util.run_command('ip netns exec ' + container_net_ns + + ' ip link set dev veth0 up') + + # TODO: Let's move this into a separate script? It does not look great + def _attach_service_to_network(self, net_module): + LOGGER.debug('Attaching net service ' + net_module.display_name + + ' to device bridge') + + # Device bridge interface example: + # tr-di-dhcp (Test Run Device Interface for DHCP container) + bridge_intf = DEVICE_BRIDGE + 'i-' + net_module.dir_name + + # Container interface example: + # tr-cti-dhcp (Test Run Container Interface for DHCP container) + container_intf = 'tr-cti-' + net_module.dir_name + + # Container network namespace name + container_net_ns = 'tr-ctns-' + net_module.dir_name + + # Create interface pair + util.run_command('ip link add ' + bridge_intf + ' type veth peer name ' + + container_intf) + + # Add bridge interface to device bridge + util.run_command('ovs-vsctl add-port ' + DEVICE_BRIDGE + ' ' + bridge_intf) + + # Get PID for running container + # TODO: Some error checking around missing PIDs might be required + container_pid = util.run_command('docker inspect -f {{.State.Pid}} ' + + net_module.container_name)[0] + + # Create symlink for container network namespace + util.run_command('ln -sf /proc/' + container_pid + + '/ns/net /var/run/netns/' + container_net_ns) + + # Attach container interface to container network namespace + util.run_command('ip link set ' + container_intf + ' netns ' + + container_net_ns) + + # Rename container interface name to veth0 + util.run_command('ip netns exec ' + container_net_ns + ' ip link set dev ' + + container_intf + ' name veth0') + + # Set MAC address of container interface + util.run_command('ip netns exec ' + container_net_ns + + ' ip link set dev veth0 address 9a:02:57:1e:8f:' + + str(net_module.net_config.ip_index)) + + # Set IP address of container interface + util.run_command('ip netns exec ' + container_net_ns + ' ip addr add ' + + net_module.net_config.get_ipv4_addr_with_prefix() + + ' dev veth0') + + util.run_command('ip netns exec ' + container_net_ns + ' ip addr add ' + + net_module.net_config.get_ipv6_addr_with_prefix() + + ' dev veth0') + + # Set interfaces up + util.run_command('ip link set dev ' + bridge_intf + ' up') + util.run_command('ip netns exec ' + container_net_ns + + ' ip link set dev veth0 up') + + if net_module.net_config.enable_wan: + LOGGER.debug('Attaching net service ' + net_module.display_name + + ' to internet bridge') + + # Internet bridge interface example: + # tr-ci-dhcp (Test Run Control (Internet) Interface for DHCP container) + bridge_intf = INTERNET_BRIDGE + 'i-' + net_module.dir_name + + # Container interface example: + # tr-cti-dhcp (Test Run Container Interface for DHCP container) + container_intf = 'tr-cti-' + net_module.dir_name + + # Create interface pair + util.run_command('ip link add ' + bridge_intf + ' type veth peer name ' + + container_intf) + + # Attach bridge interface to internet bridge + util.run_command('ovs-vsctl add-port ' + INTERNET_BRIDGE + ' ' + + bridge_intf) + + # Attach container interface to container network namespace + util.run_command('ip link set ' + container_intf + ' netns ' + + container_net_ns) + + # Rename container interface name to eth1 + util.run_command('ip netns exec ' + container_net_ns + + ' ip link set dev ' + container_intf + ' name eth1') + + # Set MAC address of container interface + util.run_command('ip netns exec ' + container_net_ns + + ' ip link set dev eth1 address 9a:02:57:1e:8f:0' + + str(net_module.net_config.ip_index)) + + # Set interfaces up + util.run_command('ip link set dev ' + bridge_intf + ' up') + util.run_command('ip netns exec ' + container_net_ns + + ' ip link set dev eth1 up') + + def restore_net(self): + + LOGGER.info('Clearing baseline network') + + if hasattr(self, 'listener' + ) and self.listener is not None and self.listener.is_running(): + self.listener.stop_listener() + + client = docker.from_env() + + # Stop all network containers if still running + for net_module in self._net_modules: + try: + container = client.containers.get('tr-ct-' + net_module.dir_name) + container.kill() + except Exception: # pylint: disable=W0703 + continue + + # Delete data plane + util.run_command('ovs-vsctl --if-exists del-br tr-d') + + # Delete control plane + util.run_command('ovs-vsctl --if-exists del-br tr-c') + + # Restart internet interface + if util.interface_exists(self._int_intf): + util.run_command('ip link set ' + self._int_intf + ' down') + util.run_command('ip link set ' + self._int_intf + ' up') + + LOGGER.info('Network is restored') + + +class NetworkModule: + """Define all the properties of a Network Module""" + + def __init__(self): + self.name = None + self.display_name = None + self.description = None + + self.container = None + self.container_name = None + self.image_name = None + + # Absolute path + self.dir = None + self.dir_name = None + self.build_file = None + self.mounts = [] + + self.enable_container = True + + self.net_config = NetworkModuleNetConfig() + + +# The networking configuration for a network module + + +class NetworkModuleNetConfig: + """Define all the properties of the network config + for a network module""" + + def __init__(self): + + self.enable_wan = False + + self.ip_index = 0 + self.ipv4_address = None + self.ipv4_network = None + self.ipv6_address = None + self.ipv6_network = None + + self.host = False + + def get_ipv4_addr_with_prefix(self): + return format(self.ipv4_address) + '/' + str(self.ipv4_network.prefixlen) + + def get_ipv6_addr_with_prefix(self): + return format(self.ipv6_address) + '/' + str(self.ipv6_network.prefixlen) + + +# Represents the current configuration of the network for the device bridge + + +class NetworkConfig: + """Define all the properties of the network configuration""" + + # TODO: Let's get this from a configuration file + def __init__(self): + self.ipv4_network = ipaddress.ip_network('10.10.10.0/24') + self.ipv6_network = ipaddress.ip_network('fd10:77be:4186::/64') diff --git a/net_orc/python/src/network_validator.py b/net_orc/python/src/network_validator.py new file mode 100644 index 000000000..83ca6f671 --- /dev/null +++ b/net_orc/python/src/network_validator.py @@ -0,0 +1,262 @@ +"""Holds logic for validation of network services prior to runtime.""" +import json +import os +import shutil +import time +import docker +from docker.types import Mount +import getpass +import logger +import util + +LOGGER = logger.get_logger('validator') +OUTPUT_DIR = 'runtime/validation' +DEVICES_DIR = 'network/devices' +DEVICE_METADATA = 'conf/module_config.json' +DEVICE_BRIDGE = 'tr-d' +CONF_DIR = 'conf' +CONF_FILE = 'system.json' + + +class NetworkValidator: + """Perform validation of network services.""" + + def __init__(self): + self._net_devices = [] + + self._path = os.path.dirname( + os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) + + self._device_dir = os.path.join(self._path, DEVICES_DIR) + + shutil.rmtree(os.path.join(self._path, OUTPUT_DIR), ignore_errors=True) + + def start(self): + """Start the network validator.""" + LOGGER.info('Starting validator') + self._load_devices() + self._build_network_devices() + self._start_network_devices() + + def stop(self, kill=False): + """Stop the network validator.""" + LOGGER.info('Stopping validator') + self._stop_network_devices(kill) + LOGGER.info('Validator stopped') + + def _build_network_devices(self): + LOGGER.debug('Building network validators...') + for net_device in self._net_devices: + self._build_device(net_device) + + def _build_device(self, net_device): + LOGGER.debug('Building network validator ' + net_device.dir_name) + try: + client = docker.from_env() + client.images.build(dockerfile=os.path.join(net_device.dir, + net_device.build_file), + path=self._path, + forcerm=True, + tag='test-run/' + net_device.dir_name) + LOGGER.debug('Validator device built: ' + net_device.dir_name) + except docker.errors.BuildError as error: + LOGGER.error('Container build error') + LOGGER.error(error) + + def _load_devices(self): + + LOGGER.info(f'Loading validators from {DEVICES_DIR}') + + loaded_devices = 'Loaded the following validators: ' + + for module_dir in os.listdir(self._device_dir): + + device = FauxDevice() + + # Load basic module information + with open(os.path.join(self._device_dir, module_dir, DEVICE_METADATA), + encoding='utf-8') as device_config_file: + device_json = json.load(device_config_file) + + device.name = device_json['config']['meta']['name'] + device.description = device_json['config']['meta']['description'] + + device.dir = os.path.join(self._path, self._device_dir, module_dir) + device.dir_name = module_dir + device.build_file = module_dir + '.Dockerfile' + device.container_name = 'tr-ct-' + device.dir_name + device.image_name = 'test-run/' + device.dir_name + + runtime_source = os.path.join(os.getcwd(), OUTPUT_DIR, device.name) + conf_source = os.path.join(os.getcwd(), CONF_DIR) + os.makedirs(runtime_source, exist_ok=True) + + device.mounts = [ + Mount(target='/runtime/validation', + source=runtime_source, + type='bind'), + Mount(target='/conf', source=conf_source, type='bind', + read_only=True), + Mount(target='/runtime/network', source=runtime_source, type='bind') + ] + + if 'timeout' in device_json['config']['docker']: + device.timeout = device_json['config']['docker']['timeout'] + + # Determine if this is a container or just an image/template + if 'enable_container' in device_json['config']['docker']: + device.enable_container = device_json['config']['docker'][ + 'enable_container'] + + self._net_devices.append(device) + + loaded_devices += device.dir_name + ' ' + + LOGGER.info(loaded_devices) + + def _start_network_devices(self): + LOGGER.debug('Starting network devices') + for net_device in self._net_devices: + self._start_network_device(net_device) + + def _start_network_device(self, device): + LOGGER.info('Starting device ' + device.name) + LOGGER.debug('Image name: ' + device.image_name) + LOGGER.debug('Container name: ' + device.container_name) + + try: + client = docker.from_env() + device.container = client.containers.run( + device.image_name, + auto_remove=True, + cap_add=['NET_ADMIN'], + name=device.container_name, + hostname=device.container_name, + network='none', + privileged=True, + detach=True, + mounts=device.mounts, + environment={'HOST_USER': getpass.getuser()}) + except docker.errors.ContainerError as error: + LOGGER.error('Container run error') + LOGGER.error(error) + + self._attach_device_to_network(device) + + # Determine the module timeout time + test_module_timeout = time.time() + device.timeout + status = self._get_device_status(device) + + while time.time() < test_module_timeout and status == 'running': + time.sleep(1) + status = self._get_device_status(device) + + LOGGER.info('Validation device ' + device.name + ' has finished') + + def _get_device_status(self, module): + container = self._get_device_container(module) + if container is not None: + return container.status + return None + + def _attach_device_to_network(self, device): + LOGGER.debug('Attaching device ' + device.name + ' to device bridge') + + # Device bridge interface example: tr-di-dhcp + # (Test Run Device Interface for DHCP container) + bridge_intf = DEVICE_BRIDGE + 'i-' + device.dir_name + + # Container interface example: + # tr-cti-dhcp (Test Run Container Interface for DHCP container) + container_intf = 'tr-cti-' + device.dir_name + + # Container network namespace name + container_net_ns = 'tr-ctns-' + device.dir_name + + # Create interface pair + util.run_command('ip link add ' + bridge_intf + ' type veth peer name ' + + container_intf) + + # Add bridge interface to device bridge + util.run_command('ovs-vsctl add-port ' + DEVICE_BRIDGE + ' ' + bridge_intf) + + # Get PID for running container + # TODO: Some error checking around missing PIDs might be required + container_pid = util.run_command('docker inspect -f {{.State.Pid}} ' + + device.container_name)[0] + + # Create symlink for container network namespace + util.run_command('ln -sf /proc/' + container_pid + + '/ns/net /var/run/netns/' + container_net_ns) + + # Attach container interface to container network namespace + util.run_command('ip link set ' + container_intf + ' netns ' + + container_net_ns) + + # Rename container interface name to veth0 + util.run_command('ip netns exec ' + container_net_ns + ' ip link set dev ' + + container_intf + ' name veth0') + + # Set interfaces up + util.run_command('ip link set dev ' + bridge_intf + ' up') + util.run_command('ip netns exec ' + container_net_ns + + ' ip link set dev veth0 up') + + def _stop_network_device(self, net_device, kill=False): + LOGGER.debug('Stopping device container ' + net_device.container_name) + try: + container = self._get_device_container(net_device) + if container is not None: + if kill: + LOGGER.debug('Killing container:' + net_device.container_name) + container.kill() + else: + LOGGER.debug('Stopping container:' + net_device.container_name) + container.stop() + LOGGER.debug('Container stopped:' + net_device.container_name) + except Exception as e: # pylint: disable=W0703 + LOGGER.error('Container stop error') + LOGGER.error(e) + + def _get_device_container(self, net_device): + LOGGER.debug('Resolving device container: ' + net_device.container_name) + container = None + try: + client = docker.from_env() + container = client.containers.get(net_device.container_name) + except docker.errors.NotFound: + LOGGER.debug('Container ' + net_device.container_name + ' not found') + except Exception as e: # pylint: disable=W0703 + LOGGER.error('Failed to resolve container') + LOGGER.error(e) + return container + + def _stop_network_devices(self, kill=False): + LOGGER.debug('Stopping devices') + for net_device in self._net_devices: + # Devices may just be Docker images, so we do not want to stop them + if not net_device.enable_container: + continue + self._stop_network_device(net_device, kill) + + +class FauxDevice: # pylint: disable=too-few-public-methods,too-many-instance-attributes + """Represent a faux device.""" + + def __init__(self): + self.name = 'Unknown device' + self.description = 'Unknown description' + + self.container = None + self.container_name = None + self.image_name = None + + # Absolute path + self.dir = None + + self.dir_name = None + self.build_file = None + self.mounts = [] + + self.enable_container = True + self.timeout = 60 diff --git a/net_orc/python/src/util.py b/net_orc/python/src/util.py new file mode 100644 index 000000000..a7b07ddf9 --- /dev/null +++ b/net_orc/python/src/util.py @@ -0,0 +1,41 @@ +"""Provides basic utilities for the network orchestrator.""" +import subprocess +import shlex +import logger +import netifaces + +LOGGER = logger.get_logger('util') + + +def run_command(cmd, output=True): + """Runs a process at the os level + By default, returns the standard output and error output + If the caller sets optional output parameter to False, + will only return a boolean result indicating if it was + succesful in running the command. Failure is indicated + by any return code from the process other than zero.""" + + success = False + process = subprocess.Popen(shlex.split(cmd), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + stdout, stderr = process.communicate() + + if process.returncode != 0 and output: + err_msg = f'{stderr.strip()}. Code: {process.returncode}' + LOGGER.error('Command Failed: ' + cmd) + LOGGER.error('Error: ' + err_msg) + else: + success = True + if output: + return stdout.strip().decode('utf-8'), stderr + else: + return success + + +def interface_exists(interface): + return interface in netifaces.interfaces() + + +def prettify(mac_string): + return ':'.join([f'{ord(b):02x}' for b in mac_string]) diff --git a/resources/devices/Template/device_config.json b/resources/devices/Template/device_config.json new file mode 100644 index 000000000..7a3d4441c --- /dev/null +++ b/resources/devices/Template/device_config.json @@ -0,0 +1,147 @@ +{ + "make": "Manufacturer X", + "model": "Device X", + "mac_addr": "aa:bb:cc:dd:ee:ff", + "test_modules": { + "dns": { + "enabled": true, + "tests": { + "dns.network.from_device": { + "enabled": true + }, + "dns.network.from_dhcp": { + "enabled": true + } + } + }, + "baseline": { + "enabled": true, + "tests": { + "baseline.passe": { + "enabled": true + }, + "baseline.pass": { + "enabled": true + }, + "baseline.skip": { + "enabled": true + } + } + }, + "nmap": { + "enabled": true, + "tests": { + "security.nmap.ports": { + "enabled": true, + "security.services.ftp": { + "tcp_ports": { + "20": { + "allowed": false + }, + "21": { + "allowed": false + } + } + }, + "security.services.ssh": { + "tcp_ports": { + "22": { + "allowed": true + } + } + }, + "security.services.telnet": { + "tcp_ports": { + "23": { + "allowed": false + } + } + }, + "security.services.smtp": { + "tcp_ports": { + "25": { + "allowed": false + }, + "465": { + "allowed": false + }, + "587": { + "allowed": false + } + } + }, + "security.services.http": { + "tcp_ports": { + "80": { + "allowed": false + } + } + }, + "security.services.pop": { + "tcp_ports": { + "110": { + "allowed": false + } + } + }, + "security.services.imap": { + "tcp_ports": { + "143": { + "allowed": false + } + } + }, + "security.services.snmpv3": { + "tcp_ports": { + "161": { + "allowed": false + }, + "162": { + "allowed": false + } + }, + "udp_ports": { + "161": { + "allowed": false + }, + "162": { + "allowed": false + } + } + }, + "security.services.https": { + "tcp_ports": { + "80": { + "allowed": false + } + } + }, + "security.services.vnc": { + "tcp_ports": { + "5500": { + "allowed": false + }, + "5800": { + "allowed": false + } + } + }, + "security.services.tftp": { + "udp_ports": { + "69": { + "allowed": false + } + } + }, + "security.services.ntp": { + "udp_ports": { + "123": { + "allowed": false + } + } + } + } + } + } + } +} \ No newline at end of file diff --git a/test_orc/modules/base/base.Dockerfile b/test_orc/modules/base/base.Dockerfile new file mode 100644 index 000000000..a508caef7 --- /dev/null +++ b/test_orc/modules/base/base.Dockerfile @@ -0,0 +1,23 @@ +# Image name: test-run/base-test +FROM ubuntu:jammy + +# Install common software +RUN apt-get update && apt-get install -y net-tools iputils-ping tcpdump iproute2 jq python3 python3-pip dos2unix nmap --fix-missing + +# Setup the base python requirements +COPY modules/base/python /testrun/python + +# Install all python requirements for the module +RUN pip3 install -r /testrun/python/requirements.txt + +# Add the bin files +COPY modules/base/bin /testrun/bin + +# Remove incorrect line endings +RUN dos2unix /testrun/bin/* + +# Make sure all the bin files are executable +RUN chmod u+x /testrun/bin/* + +# Start the test module +ENTRYPOINT [ "/testrun/bin/start_module" ] \ No newline at end of file diff --git a/test_orc/modules/base/bin/capture b/test_orc/modules/base/bin/capture new file mode 100644 index 000000000..facb6acf7 --- /dev/null +++ b/test_orc/modules/base/bin/capture @@ -0,0 +1,19 @@ +#!/bin/bash -e + +# Fetch module name +MODULE_NAME=$1 + +# Define the local file location for the capture to be saved +PCAP_DIR="/runtime/output" +PCAP_FILE=$MODULE_NAME.pcap + +# Allow a user to define an interface by passing it into this script +INTERFACE=$2 + +# Create the output directory and start the capture +mkdir -p $PCAP_DIR +chown $HOST_USER:$HOST_USER $PCAP_DIR +tcpdump -i $INTERFACE -w $PCAP_DIR/$PCAP_FILE -Z $HOST_USER & + +# Small pause to let the capture to start +sleep 1 \ No newline at end of file diff --git a/test_orc/modules/base/bin/get_ipv4_addr b/test_orc/modules/base/bin/get_ipv4_addr new file mode 100644 index 000000000..09a19bc13 --- /dev/null +++ b/test_orc/modules/base/bin/get_ipv4_addr @@ -0,0 +1,8 @@ +#!/bin/bash + +NET=$1 +MAC=$2 + +IP_ADDR=$(nmap -sP $NET | grep -B 2 $MAC | head -n 1 | cut -d " " -f 5) + +echo $IP_ADDR \ No newline at end of file diff --git a/test_orc/modules/base/bin/setup_binaries b/test_orc/modules/base/bin/setup_binaries new file mode 100644 index 000000000..3535ead3c --- /dev/null +++ b/test_orc/modules/base/bin/setup_binaries @@ -0,0 +1,10 @@ +#!/bin/bash -e + +# Directory where all binaries will be loaded +BIN_DIR=$1 + +# Remove incorrect line endings +dos2unix $BIN_DIR/* + +# Make sure all the bin files are executable +chmod u+x $BIN_DIR/* \ No newline at end of file diff --git a/test_orc/modules/base/bin/start_grpc b/test_orc/modules/base/bin/start_grpc new file mode 100644 index 000000000..917381e89 --- /dev/null +++ b/test_orc/modules/base/bin/start_grpc @@ -0,0 +1,17 @@ +#!/bin/bash -e + +GRPC_DIR="/testrun/python/src/grpc" +GRPC_PROTO_DIR="proto" +GRPC_PROTO_FILE="grpc.proto" + +# Move into the grpc directory +pushd $GRPC_DIR >/dev/null 2>&1 + +# Build the grpc proto file every time before starting server +python3 -m grpc_tools.protoc --proto_path=. ./$GRPC_PROTO_DIR/$GRPC_PROTO_FILE --python_out=. --grpc_python_out=. + +popd >/dev/null 2>&1 + +# Start the grpc server +python3 -u $GRPC_DIR/start_server.py $@ + diff --git a/test_orc/modules/base/bin/start_module b/test_orc/modules/base/bin/start_module new file mode 100644 index 000000000..6adc53f58 --- /dev/null +++ b/test_orc/modules/base/bin/start_module @@ -0,0 +1,71 @@ +#!/bin/bash + +# Directory where all binaries will be loaded +BIN_DIR="/testrun/bin" + +# Default interface should be veth0 for all containers +IFACE=veth0 + +# Create a local user that matches the same as the host +# to be used for correct file ownership for various logs +# HOST_USER mapped in via docker container environemnt variables +useradd $HOST_USER + +# Enable IPv6 for all containers +sysctl net.ipv6.conf.all.disable_ipv6=0 +sysctl -p + +# Read in the config file +CONF_FILE="/testrun/conf/module_config.json" +CONF=`cat $CONF_FILE` + +if [[ -z $CONF ]] +then + echo "No config file present at $CONF_FILE. Exiting startup." + exit 1 +fi + +# Extract the necessary config parameters +MODULE_NAME=$(echo "$CONF" | jq -r '.config.meta.name') +NETWORK_REQUIRED=$(echo "$CONF" | jq -r '.config.network') +GRPC=$(echo "$CONF" | jq -r '.config.grpc') + +# Validate the module name is present +if [[ -z "$MODULE_NAME" || "$MODULE_NAME" == "null" ]] +then + echo "No module name present in $CONF_FILE. Exiting startup." + exit 1 +fi + +echo "Starting module $MODULE_NAME..." + +$BIN_DIR/setup_binaries $BIN_DIR + +# Only start network services if the test container needs +# a network connection to run its tests +if [ $NETWORK_REQUIRED == "true" ];then + # Wait for interface to become ready + $BIN_DIR/wait_for_interface $IFACE + + # Start network capture + $BIN_DIR/capture $MODULE_NAME $IFACE +fi + +# Start the grpc server +if [[ ! -z $GRPC && ! $GRPC == "null" ]] +then + GRPC_PORT=$(echo "$GRPC" | jq -r '.port') + if [[ ! -z $GRPC_PORT && ! $GRPC_PORT == "null" ]] + then + echo "gRPC port resolved from config: $GRPC_PORT" + $BIN_DIR/start_grpc "-p $GRPC_PORT" & + else + $BIN_DIR/start_grpc & + fi +fi + +# Small pause to let all core services stabalize +sleep 3 + +# Start the networking service +$BIN_DIR/start_test_module $MODULE_NAME $IFACE \ No newline at end of file diff --git a/test_orc/modules/base/bin/wait_for_interface b/test_orc/modules/base/bin/wait_for_interface new file mode 100644 index 000000000..c9c1682f0 --- /dev/null +++ b/test_orc/modules/base/bin/wait_for_interface @@ -0,0 +1,10 @@ +#!/bin/bash + +# Allow a user to define an interface by passing it into this script +INTF=$1 + +# Wait for local interface to be ready +while ! ip link show $INTF; do + echo $INTF is not yet ready. Waiting 3 seconds + sleep 3 +done \ No newline at end of file diff --git a/test_orc/modules/base/conf/module_config.json b/test_orc/modules/base/conf/module_config.json new file mode 100644 index 000000000..7288dacfd --- /dev/null +++ b/test_orc/modules/base/conf/module_config.json @@ -0,0 +1,13 @@ +{ + "config": { + "meta": { + "name": "base", + "display_name": "Base", + "description": "Base image" + }, + "network": false, + "docker": { + "enable_container": false + } + } +} \ No newline at end of file diff --git a/test_orc/modules/base/python/requirements.txt b/test_orc/modules/base/python/requirements.txt new file mode 100644 index 000000000..9c4e2b056 --- /dev/null +++ b/test_orc/modules/base/python/requirements.txt @@ -0,0 +1,2 @@ +grpcio +grpcio-tools \ No newline at end of file diff --git a/test_orc/modules/base/python/src/grpc/start_server.py b/test_orc/modules/base/python/src/grpc/start_server.py new file mode 100644 index 000000000..b4016c831 --- /dev/null +++ b/test_orc/modules/base/python/src/grpc/start_server.py @@ -0,0 +1,37 @@ +"""Base class for starting the gRPC server for a network module.""" +from concurrent import futures +import grpc +import proto.grpc_pb2_grpc as pb2_grpc +from network_service import NetworkService +import argparse + +DEFAULT_PORT = '5001' + + +def serve(port): + server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) + pb2_grpc.add_NetworkModuleServicer_to_server(NetworkService(), server) + server.add_insecure_port('[::]:' + port) + server.start() + server.wait_for_termination() + + +def run(): + parser = argparse.ArgumentParser( + description='GRPC Server for Network Module', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('-p', + '--port', + default=DEFAULT_PORT, + help='Define the default port to run the server on.') + + args = parser.parse_args() + + port = args.port + + print('gRPC server starting on port ' + port) + serve(port) + + +if __name__ == '__main__': + run() diff --git a/test_orc/modules/base/python/src/logger.py b/test_orc/modules/base/python/src/logger.py new file mode 100644 index 000000000..42124beea --- /dev/null +++ b/test_orc/modules/base/python/src/logger.py @@ -0,0 +1,48 @@ +"""Sets up the logger to be used for the test modules.""" +import json +import logging +import os + +LOGGERS = {} +_LOG_FORMAT = '%(asctime)s %(name)-8s %(levelname)-7s %(message)s' +_DATE_FORMAT = '%b %02d %H:%M:%S' +_DEFAULT_LEVEL = logging.INFO +_CONF_DIR = 'conf' +_CONF_FILE_NAME = 'system.json' +_LOG_DIR = '/runtime/output/' + +# Set log level +try: + with open(os.path.join(_CONF_DIR, _CONF_FILE_NAME), + encoding='UTF-8') as config_json_file: + system_conf_json = json.load(config_json_file) + + log_level_str = system_conf_json['log_level'] + log_level = logging.getLevelName(log_level_str) +except OSError: + # TODO: Print out warning that log level is incorrect or missing + log_level = _DEFAULT_LEVEL + +log_format = logging.Formatter(fmt=_LOG_FORMAT, datefmt=_DATE_FORMAT) + + +def add_file_handler(log, log_file): + handler = logging.FileHandler(_LOG_DIR + log_file + '.log') + handler.setFormatter(log_format) + log.addHandler(handler) + + +def add_stream_handler(log): + handler = logging.StreamHandler() + handler.setFormatter(log_format) + log.addHandler(handler) + + +def get_logger(name, log_file=None): + if name not in LOGGERS: + LOGGERS[name] = logging.getLogger(name) + LOGGERS[name].setLevel(log_level) + add_stream_handler(LOGGERS[name]) + if log_file is not None: + add_file_handler(LOGGERS[name], log_file) + return LOGGERS[name] diff --git a/test_orc/modules/base/python/src/test_module.py b/test_orc/modules/base/python/src/test_module.py new file mode 100644 index 000000000..8e10a3637 --- /dev/null +++ b/test_orc/modules/base/python/src/test_module.py @@ -0,0 +1,111 @@ +"""Base class for all core test module functions""" +import json +import logger +import os +import util +from datetime import datetime + +LOGGER = None +RESULTS_DIR = '/runtime/output/' +CONF_FILE = '/testrun/conf/module_config.json' + + +class TestModule: + """An example test module.""" + + def __init__(self, module_name, log_name): + self._module_name = module_name + self._device_mac = os.environ['DEVICE_MAC'] + self._ipv4_subnet = os.environ['IPV4_SUBNET'] + self._ipv6_subnet = os.environ['IPV6_SUBNET'] + self._add_logger(log_name=log_name, module_name=module_name) + self._config = self._read_config() + self._device_ipv4_addr = None + self._device_ipv6_addr = None + + def _add_logger(self, log_name, module_name): + global LOGGER + LOGGER = logger.get_logger(log_name, module_name) + + def _get_logger(self): + return LOGGER + + def _get_tests(self): + device_test_module = self._get_device_test_module() + return self._get_device_tests(device_test_module) + + def _get_device_tests(self, device_test_module): + module_tests = self._config['config']['tests'] + if device_test_module is None: + return module_tests + elif not device_test_module['enabled']: + return [] + else: + for test in module_tests: + # Resolve device specific configurations for the test if it exists + # and update module test config with device config options + if test['name'] in device_test_module['tests']: + dev_test_config = device_test_module['tests'][test['name']] + if 'config' in test: + test['config'].update(dev_test_config) + return module_tests + + def _get_device_test_module(self): + # TODO: Make DEVICE_TEST_MODULES a static string + if 'DEVICE_TEST_MODULES' in os.environ: + test_modules = json.loads(os.environ['DEVICE_TEST_MODULES']) + if self._module_name in test_modules: + return test_modules[self._module_name] + return None + + def run_tests(self): + if self._config['config']['network']: + self._device_ipv4_addr = self._get_device_ipv4() + LOGGER.info('Device IP Resolved: ' + str(self._device_ipv4_addr)) + tests = self._get_tests() + for test in tests: + test_method_name = '_' + test['name'].replace('.', '_') + result = None + if ('enabled' in test and test['enabled']) or 'enabled' not in test: + LOGGER.info('Attempting to run test: ' + test['name']) + test['start'] = datetime.now().isoformat() + # Resolve the correct python method by test name and run test + if hasattr(self, test_method_name): + if 'config' in test: + result = getattr(self, test_method_name)(config=test['config']) + else: + result = getattr(self, test_method_name)() + else: + LOGGER.info('Test ' + test['name'] + ' not resolved. Skipping') + result = None + else: + LOGGER.info('Test ' + test['name'] + ' disabled. Skipping') + if result is not None: + test['result'] = 'compliant' if result else 'non-compliant' + else: + test['result'] = 'skipped' + test['end'] = datetime.now().isoformat() + duration = datetime.fromisoformat(test['end']) - datetime.fromisoformat( + test['start']) + test['duration'] = str(duration) + json_results = json.dumps({'results': tests}, indent=2) + self._write_results(json_results) + + def _read_config(self): + with open(CONF_FILE, encoding='utf-8') as f: + config = json.load(f) + return config + + def _write_results(self, results): + results_file = RESULTS_DIR + self._module_name + '-result.json' + LOGGER.info('Writing results to ' + results_file) + with open(results_file, 'w', encoding='utf-8') as f: + f.write(results) + + def _get_device_ipv4(self): + command = f"""/testrun/bin/get_ipv4_addr {self._ipv4_subnet} + {self._device_mac.upper()}""" + text = util.run_command(command)[0] + if text: + return text.split('\n')[0] + return None diff --git a/test_orc/modules/base/python/src/util.py b/test_orc/modules/base/python/src/util.py new file mode 100644 index 000000000..d387db796 --- /dev/null +++ b/test_orc/modules/base/python/src/util.py @@ -0,0 +1,29 @@ +"""Provides basic utilities for a test module.""" +import subprocess +import shlex +import logger + +LOGGER = logger.get_logger('util') + +# Runs a process at the os level +# By default, returns the standard output and error output +# If the caller sets optional output parameter to False, +# will only return a boolean result indicating if it was +# succesful in running the command. Failure is indicated +# by any return code from the process other than zero. +def run_command(cmd, output=True): + success = False + process = subprocess.Popen(shlex.split(cmd), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + stdout, stderr = process.communicate() + if process.returncode != 0 and output: + err_msg = f'{stderr.strip()}. Code: {process.returncode}' + LOGGER.error('Command Failed: ' + cmd) + LOGGER.error('Error: ' + err_msg) + else: + success = True + if output: + return stdout.strip().decode('utf-8'), stderr + else: + return success diff --git a/test_orc/modules/baseline/baseline.Dockerfile b/test_orc/modules/baseline/baseline.Dockerfile new file mode 100644 index 000000000..5b634e6ee --- /dev/null +++ b/test_orc/modules/baseline/baseline.Dockerfile @@ -0,0 +1,11 @@ +# Image name: test-run/baseline-test +FROM test-run/base-test:latest + +# Copy over all configuration files +COPY modules/baseline/conf /testrun/conf + +# Load device binary files +COPY modules/baseline/bin /testrun/bin + +# Copy over all python files +COPY modules/baseline/python /testrun/python \ No newline at end of file diff --git a/test_orc/modules/baseline/bin/start_test_module b/test_orc/modules/baseline/bin/start_test_module new file mode 100644 index 000000000..2938eb0f8 --- /dev/null +++ b/test_orc/modules/baseline/bin/start_test_module @@ -0,0 +1,42 @@ +#!/bin/bash + +# An example startup script that does the bare minimum to start +# a test module via a pyhon script. Each test module should include a +# start_test_module file that overwrites this one to boot all of its +# specific requirements to run. + +# Define where the python source files are located +PYTHON_SRC_DIR=/testrun/python/src + +# Fetch module name +MODULE_NAME=$1 + +# Default interface should be veth0 for all containers +DEFAULT_IFACE=veth0 + +# Allow a user to define an interface by passing it into this script +DEFINED_IFACE=$2 + +# Select which interace to use +if [[ -z $DEFINED_IFACE || "$DEFINED_IFACE" == "null" ]] +then + echo "No interface defined, defaulting to veth0" + INTF=$DEFAULT_IFACE +else + INTF=$DEFINED_IFACE +fi + +# Create and set permissions on the log files +LOG_FILE=/runtime/output/$MODULE_NAME.log +RESULT_FILE=/runtime/output/$MODULE_NAME-result.json +touch $LOG_FILE +touch $RESULT_FILE +chown $HOST_USER:$HOST_USER $LOG_FILE +chown $HOST_USER:$HOST_USER $RESULT_FILE + +# Run the python scrip that will execute the tests for this module +# -u flag allows python print statements +# to be logged by docker by running unbuffered +python3 -u $PYTHON_SRC_DIR/run.py "-m $MODULE_NAME" + +echo Module has finished \ No newline at end of file diff --git a/test_orc/modules/baseline/conf/module_config.json b/test_orc/modules/baseline/conf/module_config.json new file mode 100644 index 000000000..4c0cd08d8 --- /dev/null +++ b/test_orc/modules/baseline/conf/module_config.json @@ -0,0 +1,32 @@ +{ + "config": { + "meta": { + "name": "baseline", + "display_name": "Baseline", + "description": "Baseline test" + }, + "network": false, + "docker": { + "depends_on": "base", + "enable_container": true, + "timeout": 30 + }, + "tests":[ + { + "name": "baseline.pass", + "description": "Simulate a compliant test", + "expected_behavior": "A compliant test result is generated" + }, + { + "name": "baseline.fail", + "description": "Simulate a non-compliant test", + "expected_behavior": "A non-compliant test result is generated" + }, + { + "name": "baseline.skip", + "description": "Simulate a skipped test", + "expected_behavior": "A skipped test result is generated" + } + ] + } +} \ No newline at end of file diff --git a/test_orc/modules/baseline/python/src/baseline_module.py b/test_orc/modules/baseline/python/src/baseline_module.py new file mode 100644 index 000000000..083123436 --- /dev/null +++ b/test_orc/modules/baseline/python/src/baseline_module.py @@ -0,0 +1,28 @@ +"""Baseline test module""" +from test_module import TestModule + +LOG_NAME = "test_baseline" +LOGGER = None + + +class BaselineModule(TestModule): + """An example testing module.""" + + def __init__(self, module): + super().__init__(module_name=module, log_name=LOG_NAME) + global LOGGER + LOGGER = self._get_logger() + + def _baseline_pass(self): + LOGGER.info("Running baseline pass test") + LOGGER.info("Baseline pass test finished") + return True + + def _baseline_fail(self): + LOGGER.info("Running baseline pass test") + LOGGER.info("Baseline pass test finished") + return False + + def _baseline_skip(self): + LOGGER.info("Running baseline pass test") + LOGGER.info("Baseline pass test finished") diff --git a/test_orc/modules/baseline/python/src/run.py b/test_orc/modules/baseline/python/src/run.py new file mode 100644 index 000000000..1892ed8ae --- /dev/null +++ b/test_orc/modules/baseline/python/src/run.py @@ -0,0 +1,55 @@ +"""Run Baseline module""" +import argparse +import signal +import sys +import logger + +from baseline_module import BaselineModule + +LOGGER = logger.get_logger('test_module') +RUNTIME = 1500 + + +class BaselineModuleRunner: + """An example runner class for test modules.""" + + def __init__(self, module): + + signal.signal(signal.SIGINT, self._handler) + signal.signal(signal.SIGTERM, self._handler) + signal.signal(signal.SIGABRT, self._handler) + signal.signal(signal.SIGQUIT, self._handler) + + LOGGER.info('Starting Baseline Module') + + self._test_module = BaselineModule(module) + self._test_module.run_tests() + + def _handler(self, signum): + LOGGER.debug('SigtermEnum: ' + str(signal.SIGTERM)) + LOGGER.debug('Exit signal received: ' + str(signum)) + if signum in (2, signal.SIGTERM): + LOGGER.info('Exit signal received. Stopping test module...') + LOGGER.info('Test module stopped') + sys.exit(1) + + +def run(): + parser = argparse.ArgumentParser( + description='Baseline Module Help', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + parser.add_argument( + '-m', + '--module', + help='Define the module name to be used to create the log file') + + args = parser.parse_args() + + # For some reason passing in the args from bash adds an extra + # space before the argument so we'll just strip out extra space + BaselineModuleRunner(args.module.strip()) + + +if __name__ == '__main__': + run() diff --git a/test_orc/modules/dns/bin/start_test_module b/test_orc/modules/dns/bin/start_test_module new file mode 100644 index 000000000..2938eb0f8 --- /dev/null +++ b/test_orc/modules/dns/bin/start_test_module @@ -0,0 +1,42 @@ +#!/bin/bash + +# An example startup script that does the bare minimum to start +# a test module via a pyhon script. Each test module should include a +# start_test_module file that overwrites this one to boot all of its +# specific requirements to run. + +# Define where the python source files are located +PYTHON_SRC_DIR=/testrun/python/src + +# Fetch module name +MODULE_NAME=$1 + +# Default interface should be veth0 for all containers +DEFAULT_IFACE=veth0 + +# Allow a user to define an interface by passing it into this script +DEFINED_IFACE=$2 + +# Select which interace to use +if [[ -z $DEFINED_IFACE || "$DEFINED_IFACE" == "null" ]] +then + echo "No interface defined, defaulting to veth0" + INTF=$DEFAULT_IFACE +else + INTF=$DEFINED_IFACE +fi + +# Create and set permissions on the log files +LOG_FILE=/runtime/output/$MODULE_NAME.log +RESULT_FILE=/runtime/output/$MODULE_NAME-result.json +touch $LOG_FILE +touch $RESULT_FILE +chown $HOST_USER:$HOST_USER $LOG_FILE +chown $HOST_USER:$HOST_USER $RESULT_FILE + +# Run the python scrip that will execute the tests for this module +# -u flag allows python print statements +# to be logged by docker by running unbuffered +python3 -u $PYTHON_SRC_DIR/run.py "-m $MODULE_NAME" + +echo Module has finished \ No newline at end of file diff --git a/test_orc/modules/dns/conf/module_config.json b/test_orc/modules/dns/conf/module_config.json new file mode 100644 index 000000000..b8ff36c97 --- /dev/null +++ b/test_orc/modules/dns/conf/module_config.json @@ -0,0 +1,27 @@ +{ + "config": { + "meta": { + "name": "dns", + "display_name": "DNS", + "description": "DNS test" + }, + "network": false, + "docker": { + "depends_on": "base", + "enable_container": true, + "timeout": 30 + }, + "tests":[ + { + "name": "dns.network.from_device", + "description": "Verify the device sends DNS requests", + "expected_behavior": "The device sends DNS requests." + }, + { + "name": "dns.network.from_dhcp", + "description": "Verify the device allows for a DNS server to be entered automatically", + "expected_behavior": "The device sends DNS requests to the DNS server provided by the DHCP server" + } + ] + } +} \ No newline at end of file diff --git a/test_orc/modules/dns/dns.Dockerfile b/test_orc/modules/dns/dns.Dockerfile new file mode 100644 index 000000000..7c3497bc3 --- /dev/null +++ b/test_orc/modules/dns/dns.Dockerfile @@ -0,0 +1,11 @@ +# Image name: test-run/baseline-test +FROM test-run/base-test:latest + +# Copy over all configuration files +COPY modules/dns/conf /testrun/conf + +# Load device binary files +COPY modules/dns/bin /testrun/bin + +# Copy over all python files +COPY modules/dns/python /testrun/python \ No newline at end of file diff --git a/test_orc/modules/dns/python/src/dns_module.py b/test_orc/modules/dns/python/src/dns_module.py new file mode 100644 index 000000000..58ce48123 --- /dev/null +++ b/test_orc/modules/dns/python/src/dns_module.py @@ -0,0 +1,76 @@ +"""DNS test module""" +import subprocess +from test_module import TestModule + +LOG_NAME = 'test_dns' +CAPTURE_FILE = '/runtime/network/dns.pcap' +LOGGER = None + + +class DNSModule(TestModule): + """DNS Test module""" + + def __init__(self, module): + super().__init__(module_name=module, log_name=LOG_NAME) + self._dns_server = '10.10.10.4' + global LOGGER + LOGGER = self._get_logger() + + def _check_dns_traffic(self, tcpdump_filter): + to_dns = self._exec_tcpdump(tcpdump_filter) + num_query_dns = len(to_dns) + LOGGER.info('DNS queries found: ' + str(num_query_dns)) + dns_traffic_detected = len(to_dns) > 0 + LOGGER.info('DNS traffic detected: ' + str(dns_traffic_detected)) + return dns_traffic_detected + + def _dns_network_from_dhcp(self): + LOGGER.info('Checking DNS traffic for configured DHCP DNS server: ' + + self._dns_server) + + # Check if the device DNS traffic is to appropriate server + tcpdump_filter = (f'dst port 53 and dst host {self._dns_server}', + f' and ether src {self._device_mac}') + + result = self._check_dns_traffic(tcpdump_filter=tcpdump_filter) + + LOGGER.info('DNS traffic detected to configured DHCP DNS server: ' + + str(result)) + return result + + def _dns_network_from_device(self): + LOGGER.info('Checking DNS traffic from device: ' + self._device_mac) + + # Check if the device DNS traffic is to appropriate server + tcpdump_filter = f'dst port 53 and ether src {self._device_mac}' + + result = self._check_dns_traffic(tcpdump_filter=tcpdump_filter) + + LOGGER.info('DNS traffic detected from device: ' + str(result)) + return result + + def _exec_tcpdump(self, tcpdump_filter): + """ + Args + tcpdump_filter: Filter to pass onto tcpdump file + capture_file: Optional capture file to look + Returns + List of packets matching the filter + """ + command = f'tcpdump -tttt -n -r {CAPTURE_FILE} {tcpdump_filter}' + + LOGGER.debug('tcpdump command: ' + command) + + process = subprocess.Popen(command, + universal_newlines=True, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + text = str(process.stdout.read()).rstrip() + + LOGGER.debug('tcpdump response: ' + text) + + if text: + return text.split('\n') + + return [] diff --git a/test_orc/modules/dns/python/src/run.py b/test_orc/modules/dns/python/src/run.py new file mode 100644 index 000000000..4cd991804 --- /dev/null +++ b/test_orc/modules/dns/python/src/run.py @@ -0,0 +1,62 @@ +"""Run DNS test module""" +import argparse +import signal +import sys +import logger + +from dns_module import DNSModule + +LOG_NAME = "dns_module" +LOGGER = logger.get_logger(LOG_NAME) +RUNTIME = 1500 + + +class DNSModuleRunner: + """Run the DNS module tests.""" + def __init__(self, module): + + signal.signal(signal.SIGINT, self._handler) + signal.signal(signal.SIGTERM, self._handler) + signal.signal(signal.SIGABRT, self._handler) + signal.signal(signal.SIGQUIT, self._handler) + self.add_logger(module) + + LOGGER.info("Starting DNS Test Module") + + self._test_module = DNSModule(module) + self._test_module.run_tests() + + LOGGER.info("DNS Test Module Finished") + + def add_logger(self, module): + global LOGGER + LOGGER = logger.get_logger(LOG_NAME, module) + + def _handler(self, signum): + LOGGER.debug("SigtermEnum: " + str(signal.SIGTERM)) + LOGGER.debug("Exit signal received: " + str(signum)) + if signum in (2, signal.SIGTERM): + LOGGER.info("Exit signal received. Stopping test module...") + LOGGER.info("Test module stopped") + sys.exit(1) + + +def run(): + parser = argparse.ArgumentParser( + description="Test Module DNS", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + parser.add_argument( + "-m", + "--module", + help="Define the module name to be used to create the log file") + + args = parser.parse_args() + + # For some reason passing in the args from bash adds an extra + # space before the argument so we'll just strip out extra space + DNSModuleRunner(args.module.strip()) + + +if __name__ == "__main__": + run() diff --git a/test_orc/modules/nmap/bin/start_test_module b/test_orc/modules/nmap/bin/start_test_module new file mode 100644 index 000000000..4bb7e9f96 --- /dev/null +++ b/test_orc/modules/nmap/bin/start_test_module @@ -0,0 +1,42 @@ +#!/bin/bash + +# An example startup script that does the bare minimum to start +# a test module via a pyhon script. Each test module should include a +# start_test_module file that overwrites this one to boot all of its +# specific requirements to run. + +# Define where the python source files are located +PYTHON_SRC_DIR=/testrun/python/src + +# Fetch module name +MODULE_NAME=$1 + +# Default interface should be veth0 for all containers +DEFAULT_IFACE=veth0 + +# Allow a user to define an interface by passing it into this script +DEFINED_IFACE=$2 + +# Select which interace to use +if [[ -z $DEFINED_IFACE || "$DEFINED_IFACE" == "null" ]] +then + echo "No interface defined, defaulting to veth0" + INTF=$DEFAULT_IFACE +else + INTF=$DEFINED_IFACE +fi + +# Create and set permissions on the log files +LOG_FILE=/runtime/output/$MODULE_NAME.log +RESULT_FILE=/runtime/output/$MODULE_NAME-result.json +touch $LOG_FILE +touch $RESULT_FILE +chown $HOST_USER:$HOST_USER $LOG_FILE +chown $HOST_USER:$HOST_USER $RESULT_FILE + +# Run the python scrip that will execute the tests for this module +# -u flag allows python print statements +# to be logged by docker by running unbuffered +python3 -u $PYTHON_SRC_DIR/run.py "-m $MODULE_NAME" + +echo Module has finished \ No newline at end of file diff --git a/test_orc/modules/nmap/conf/module_config.json b/test_orc/modules/nmap/conf/module_config.json new file mode 100644 index 000000000..5449327a1 --- /dev/null +++ b/test_orc/modules/nmap/conf/module_config.json @@ -0,0 +1,176 @@ +{ + "config": { + "meta": { + "name": "nmap", + "display_name": "nmap", + "description": "Scan for open ports using nmap" + }, + "network": true, + "docker": { + "enable_container": true, + "timeout": 600 + }, + "tests": [ + { + "name": "security.nmap.ports", + "description": "Run an nmap scan of open ports", + "expected_behavior": "Report all open ports", + "config": { + "security.services.ftp": { + "tcp_ports": { + "20": { + "allowed": false, + "description": "File Transfer Protocol (FTP) Server Data Transfer" + }, + "21": { + "allowed": false, + "description": "File Transfer Protocol (FTP) Server Data Transfer" + } + }, + "description": "Check FTP port 20/21 is disabled and FTP is not running on any port", + "expected_behavior": "There is no FTP service running on any port" + }, + "security.services.ssh": { + "tcp_ports": { + "22": { + "allowed": true, + "description": "Secure Shell (SSH) server" + } + }, + "description": "Check TELNET port 23 is disabled and TELNET is not running on any port", + "expected_behavior": "There is no FTP service running on any port" + }, + "security.services.telnet": { + "tcp_ports": { + "23": { + "allowed": false, + "description": "Telnet Server" + } + }, + "description": "Check TELNET port 23 is disabled and TELNET is not running on any port", + "expected_behavior": "There is no FTP service running on any port" + }, + "security.services.smtp": { + "tcp_ports": { + "25": { + "allowed": false, + "description": "Simple Mail Transfer Protocol (SMTP) Server" + }, + "465": { + "allowed": false, + "description": "Simple Mail Transfer Protocol over SSL (SMTPS) Server" + }, + "587": { + "allowed": false, + "description": "Simple Mail Transfer Protocol via TLS (SMTPS) Server" + } + }, + "description": "Check SMTP port 25 is disabled and ports 465 or 587 with SSL encryption are (not?) enabled and SMTP is not running on any port.", + "expected_behavior": "There is no smtp service running on any port" + }, + "security.services.http": { + "tcp_ports": { + "80": { + "service_scan": { + "script": "http-methods" + }, + "allowed": false, + "description": "Administrative Insecure Web-Server" + } + }, + "description": "Check that there is no HTTP server running on any port", + "expected_behavior": "Device is unreachable on port 80 (or any other port) and only responds to HTTPS requests on port 443 (or any other port if HTTP is used at all)" + }, + "security.services.pop": { + "tcp_ports": { + "110": { + "allowed": false, + "description": "Post Office Protocol v3 (POP3) Server" + } + }, + "description": "Check POP port 110 is disalbed and POP is not running on any port", + "expected_behavior": "There is no pop service running on any port" + }, + "security.services.imap": { + "tcp_ports": { + "143": { + "allowed": false, + "description": "Internet Message Access Protocol (IMAP) Server" + } + }, + "description": "Check IMAP port 143 is disabled and IMAP is not running on any port", + "expected_behavior": "There is no imap service running on any port" + }, + "security.services.snmpv3": { + "tcp_ports": { + "161": { + "allowed": false, + "description": "Simple Network Management Protocol (SNMP)" + }, + "162": { + "allowed": false, + "description": "Simple Network Management Protocol (SNMP) Trap" + } + }, + "udp_ports": { + "161": { + "allowed": false, + "description": "Simple Network Management Protocol (SNMP)" + }, + "162": { + "allowed": false, + "description": "Simple Network Management Protocol (SNMP) Trap" + } + }, + "description": "Check SNMP port 161/162 is disabled. If SNMP is an essential service, check it supports version 3", + "expected_behavior": "Device is unreachable on port 161 (or any other port) and device is unreachable on port 162 (or any other port) unless SNMP is essential in which case it is SNMPv3 is used." + }, + "security.services.https": { + "tcp_ports": { + "80": { + "allowed": false, + "description": "Administrative Secure Web-Server" + } + }, + "description": "Check that if there is a web server running it is running on a secure port.", + "expected_behavior": "Device only responds to HTTPS requests on port 443 (or any other port if HTTP is used at all)" + }, + "security.services.vnc": { + "tcp_ports": { + "5800": { + "allowed": false, + "description": "Virtual Network Computing (VNC) Remote Frame Buffer Protocol Over HTTP" + }, + "5500": { + "allowed": false, + "description": "Virtual Network Computing (VNC) Remote Frame Buffer Protocol" + } + }, + "description": "Check VNC is disabled on any port", + "expected_behavior": "Device cannot be accessed /connected to via VNc on any port" + }, + "security.services.tftp": { + "udp_ports": { + "69": { + "allowed": false, + "description": "Trivial File Transfer Protocol (TFTP) Server" + } + }, + "description": "Check TFTP port 69 is disabled (UDP)", + "expected_behavior": "There is no tftp service running on any port" + }, + "security.services.ntp": { + "udp_ports": { + "123": { + "allowed": false, + "description": "Network Time Protocol (NTP) Server" + } + }, + "description": "Check NTP port 123 is disabled and the device is not operating as an NTP server", + "expected_behavior": "The device dos not respond to NTP requests when it's IP is set as the NTP server on another device" + } + } + } + ] + } +} \ No newline at end of file diff --git a/test_orc/modules/nmap/nmap.Dockerfile b/test_orc/modules/nmap/nmap.Dockerfile new file mode 100644 index 000000000..12f23dde7 --- /dev/null +++ b/test_orc/modules/nmap/nmap.Dockerfile @@ -0,0 +1,11 @@ +# Image name: test-run/baseline-test +FROM test-run/base-test:latest + +# Copy over all configuration files +COPY modules/nmap/conf /testrun/conf + +# Load device binary files +COPY modules/nmap/bin /testrun/bin + +# Copy over all python files +COPY modules/nmap/python /testrun/python \ No newline at end of file diff --git a/test_orc/modules/nmap/python/src/nmap_module.py b/test_orc/modules/nmap/python/src/nmap_module.py new file mode 100644 index 000000000..876343a0f --- /dev/null +++ b/test_orc/modules/nmap/python/src/nmap_module.py @@ -0,0 +1,227 @@ +"""NMAP test module""" +import time +import util +import json +import threading +from test_module import TestModule + +LOG_NAME = "test_nmap" +LOGGER = None + + +class NmapModule(TestModule): + """NMAP Test module""" + def __init__(self, module): + super().__init__(module_name=module, log_name=LOG_NAME) + self._unallowed_ports = [] + self._scan_tcp_results = None + self._udp_tcp_results = None + self._script_scan_results = None + global LOGGER + LOGGER = self._get_logger() + + def _security_nmap_ports(self, config): + LOGGER.info("Running security.nmap.ports test") + + # Delete the enabled key from the config if it exists + # to prevent it being treated as a test key + if "enabled" in config: + del config["enabled"] + + if self._device_ipv4_addr is not None: + # Run the monitor method asynchronously to keep this method non-blocking + self._tcp_scan_thread = threading.Thread(target=self._scan_tcp_ports, + args=(config, )) + self._udp_scan_thread = threading.Thread(target=self._scan_udp_ports, + args=(config, )) + self._script_scan_thread = threading.Thread(target=self._scan_scripts, + args=(config, )) + + self._tcp_scan_thread.daemon = True + self._udp_scan_thread.daemon = True + self._script_scan_thread.daemon = True + + self._tcp_scan_thread.start() + self._udp_scan_thread.start() + self._script_scan_thread.start() + + while self._tcp_scan_thread.is_alive() or self._udp_scan_thread.is_alive( + ) or self._script_scan_thread.is_alive(): + time.sleep(1) + + LOGGER.debug("TCP scan results: " + str(self._scan_tcp_results)) + LOGGER.debug("UDP scan results: " + str(self._scan_udp_results)) + LOGGER.debug("Service scan results: " + str(self._script_scan_results)) + self._process_port_results(tests=config) + LOGGER.info("Unallowed Ports: " + str(self._unallowed_ports)) + LOGGER.info("Script scan results:\n" + + json.dumps(self._script_scan_results)) + return len(self._unallowed_ports) == 0 + else: + LOGGER.info("Device ip address not resolved, skipping") + return None + + def _process_port_results(self, tests): + for test in tests: + LOGGER.info("Checking results for test: " + str(test)) + self._check_scan_results(test_config=tests[test]) + + def _check_scan_results(self, test_config): + port_config = {} + if "tcp_ports" in test_config: + port_config.update(test_config["tcp_ports"]) + elif "udp_ports" in test_config: + port_config.update(test_config["udp_ports"]) + + scan_results = {} + if self._scan_tcp_results is not None: + scan_results.update(self._scan_tcp_results) + if self._scan_udp_results is not None: + scan_results.update(self._scan_udp_results) + if self._script_scan_results is not None: + scan_results.update(self._script_scan_results) + if port_config is not None: + for port, config in port_config.items(): + result = None + LOGGER.info("Checking port: " + str(port)) + LOGGER.debug("Port config: " + str(config)) + if port in scan_results: + if scan_results[port]["state"] == "open": + if not config["allowed"]: + LOGGER.info("Unallowed port open") + self._unallowed_ports.append(str(port)) + result = False + else: + LOGGER.info("Allowed port open") + result = True + else: + LOGGER.info("Port is closed") + result = True + else: + LOGGER.info("Port not detected, closed") + result = True + + if result is not None: + config["result"] = "compliant" if result else "non-compliant" + else: + config["result"] = "skipped" + + def _scan_scripts(self, tests): + scan_results = {} + LOGGER.info("Checing for scan scripts") + for test in tests: + test_config = tests[test] + if "tcp_ports" in test_config: + for port in test_config["tcp_ports"]: + port_config = test_config["tcp_ports"][port] + if "service_scan" in port_config: + LOGGER.info("Service Scan Detected for: " + str(port)) + svc = port_config["service_scan"] + scan_results.update(self._scan_tcp_with_script(svc["script"])) + if "udp_ports" in test_config: + for port in test_config["udp_ports"]: + if "service_scan" in port: + LOGGER.info("Service Scan Detected for: " + str(port)) + svc = port["service_scan"] + self._scan_udp_with_script(svc["script"], port) + scan_results.update(self._scan_tcp_with_script(svc["script"])) + self._script_scan_results = scan_results + + def _scan_tcp_with_script(self, script_name, ports=None): + LOGGER.info("Running TCP nmap scan with script " + script_name) + scan_options = " -v -n T3 --host-timeout=6m -A --script " + script_name + port_options = " --open " + if ports is None: + port_options += " -p- " + else: + port_options += " -p" + ports + " " + results_file = f"/runtime/output/{self._module_name}-script_name.log" + nmap_options = scan_options + port_options + " -oG " + results_file + nmap_results = util.run_command("nmap " + nmap_options + " " + + self._device_ipv4_addr)[0] + LOGGER.info("Nmap TCP script scan complete") + LOGGER.info("nmap script results\n" + str(nmap_results)) + return self._process_nmap_results(nmap_results=nmap_results) + + def _scan_udp_with_script(self, script_name, ports=None): + LOGGER.info("Running UDP nmap scan with script " + script_name) + scan_options = " --sU -Pn -n --script " + script_name + port_options = " --open " + if ports is None: + port_options += " -p- " + else: + port_options += " -p" + ports + " " + nmap_options = scan_options + port_options + nmap_results = util.run_command("nmap " + nmap_options + + self._device_ipv4_addr)[0] + LOGGER.info("Nmap UDP script scan complete") + LOGGER.info("nmap script results\n" + str(nmap_results)) + return self._process_nmap_results(nmap_results=nmap_results) + + def _scan_tcp_ports(self, tests): + max_port = 1000 + ports = [] + for test in tests: + test_config = tests[test] + if "tcp_ports" in test_config: + for port in test_config["tcp_ports"]: + if int(port) > max_port: + ports.append(port) + ports_to_scan = "1-" + str(max_port) + if len(ports) > 0: + ports_to_scan += "," + ",".join(ports) + LOGGER.info("Running nmap TCP port scan") + LOGGER.info("TCP ports: " + str(ports_to_scan)) + nmap_results = util.run_command(f"""nmap -sT -sV -Pn -v -p {ports_to_scan} + --version-intensity 7 -T4 {self._device_ipv4_addr}""")[0] + LOGGER.info("TCP port scan complete") + self._scan_tcp_results = self._process_nmap_results( + nmap_results=nmap_results) + + def _scan_udp_ports(self, tests): + ports = [] + for test in tests: + test_config = tests[test] + if "udp_ports" in test_config: + for port in test_config["udp_ports"]: + ports.append(port) + if len(ports) > 0: + port_list = ",".join(ports) + LOGGER.info("Running nmap UDP port scan") + LOGGER.info("UDP ports: " + str(port_list)) + nmap_results = util.run_command( + f"nmap -sU -sV -p {port_list} {self._device_ipv4_addr}")[0] + LOGGER.info("UDP port scan complete") + self._scan_udp_results = self._process_nmap_results( + nmap_results=nmap_results) + + def _process_nmap_results(self, nmap_results): + results = {} + LOGGER.info("nmap results\n" + str(nmap_results)) + if nmap_results: + if "Service Info" in nmap_results: + rows = nmap_results.split("PORT")[1].split("Service Info")[0].split( + "\n") + elif "PORT" in nmap_results: + rows = nmap_results.split("PORT")[1].split("MAC Address")[0].split("\n") + if rows: + for result in rows[1:-1]: # Iterate skipping the header and tail rows + cols = result.split() + port = cols[0].split("/")[0] + # If results do not start with a a port number, + # it is likely a bleed over from previous result so + # we need to ignore it + if port.isdigit(): + version = "" + if len(cols) > 3: + # recombine full version information that may contain spaces + version = " ".join(cols[3:]) + port_result = { + cols[0].split("/")[0]: { + "state": cols[1], + "service": cols[2], + "version": version + } + } + results.update(port_result) + return results diff --git a/test_orc/modules/nmap/python/src/run.py b/test_orc/modules/nmap/python/src/run.py new file mode 100644 index 000000000..959e30f87 --- /dev/null +++ b/test_orc/modules/nmap/python/src/run.py @@ -0,0 +1,54 @@ +"""Run NMAP test module""" +import argparse +import signal +import sys +import logger + +from nmap_module import NmapModule + +LOGGER = logger.get_logger('test_module') + + +class NmapModuleRunner: + """Run the NMAP module tests.""" + + def __init__(self, module): + + signal.signal(signal.SIGINT, self._handler) + signal.signal(signal.SIGTERM, self._handler) + signal.signal(signal.SIGABRT, self._handler) + signal.signal(signal.SIGQUIT, self._handler) + + LOGGER.info('Starting nmap Module') + + self._test_module = NmapModule(module) + self._test_module.run_tests() + + def _handler(self, signum): + LOGGER.debug('SigtermEnum: ' + str(signal.SIGTERM)) + LOGGER.debug('Exit signal received: ' + str(signum)) + if signum in (2, signal.SIGTERM): + LOGGER.info('Exit signal received. Stopping test module...') + LOGGER.info('Test module stopped') + sys.exit(1) + + +def run(): + parser = argparse.ArgumentParser( + description='Nmap Module Help', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + parser.add_argument( + '-m', + '--module', + help='Define the module name to be used to create the log file') + + args = parser.parse_args() + + # For some reason passing in the args from bash adds an extra + # space before the argument so we'll just strip out extra space + NmapModuleRunner(args.module.strip()) + + +if __name__ == '__main__': + run() diff --git a/test_orc/python/requirements.txt b/test_orc/python/requirements.txt new file mode 100644 index 000000000..e69de29bb diff --git a/test_orc/python/src/module.py b/test_orc/python/src/module.py new file mode 100644 index 000000000..72791f86e --- /dev/null +++ b/test_orc/python/src/module.py @@ -0,0 +1,28 @@ +"""Represemts a test module.""" +from dataclasses import dataclass +from docker.models.containers import Container + + +@dataclass +class TestModule: # pylint: disable=too-few-public-methods,too-many-instance-attributes + """Represents a test module.""" + + name: str = None + display_name: str = None + description: str = None + + build_file: str = None + container: Container = None + container_name: str = None + image_name: str = None + enable_container: bool = True + network: bool = True + + timeout: int = 60 + + # Absolute path + dir: str = None + dir_name: str = None + + #Set IP Index for all test modules + ip_index: str = 9 diff --git a/test_orc/python/src/runner.py b/test_orc/python/src/runner.py new file mode 100644 index 000000000..d82935057 --- /dev/null +++ b/test_orc/python/src/runner.py @@ -0,0 +1,41 @@ +"""Provides high level management of the test orchestrator.""" +import time +import logger + +LOGGER = logger.get_logger('runner') + + +class Runner: + """Holds the state of the testing for one device.""" + + def __init__(self, test_orc, device): + self._test_orc = test_orc + self._device = device + + def run(self): + self._run_test_modules() + + def _run_test_modules(self): + """Iterates through each test module and starts the container.""" + LOGGER.info('Running test modules...') + for module in self._test_modules: + self.run_test_module(module) + LOGGER.info('All tests complete') + + def run_test_module(self, module): + """Start the test container and extract the results.""" + + if module is None or not module.enable_container: + return + + self._test_orc.start_test_module(module) + + # Determine the module timeout time + test_module_timeout = time.time() + module.timeout + status = self._test_orc.get_module_status(module) + + while time.time() < test_module_timeout and status == 'running': + time.sleep(1) + status = self._test_orc.get_module_status(module) + + LOGGER.info(f'Test module {module.display_name} has finished') diff --git a/test_orc/python/src/test_orchestrator.py b/test_orc/python/src/test_orchestrator.py new file mode 100644 index 000000000..4b65bae12 --- /dev/null +++ b/test_orc/python/src/test_orchestrator.py @@ -0,0 +1,278 @@ +"""Provides high level management of the test orchestrator.""" +import getpass +import os +import json +import time +import shutil +import docker +from docker.types import Mount +import logger +from module import TestModule + +LOG_NAME = "test_orc" +LOGGER = logger.get_logger("test_orc") +RUNTIME_DIR = "runtime" +TEST_MODULES_DIR = "modules" +MODULE_CONFIG = "conf/module_config.json" + + +class TestOrchestrator: + """Manages and controls the test modules.""" + + def __init__(self, net_orc): + self._test_modules = [] + self._module_config = None + self._net_orc = net_orc + + self._path = os.path.dirname( + os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) + + # Resolve the path to the test-run folder + self._root_path = os.path.abspath(os.path.join(self._path, os.pardir)) + + shutil.rmtree(os.path.join(self._root_path, RUNTIME_DIR), + ignore_errors=True) + os.makedirs(os.path.join(self._root_path, RUNTIME_DIR), exist_ok=True) + + def start(self): + LOGGER.info("Starting Test Orchestrator") + self._load_test_modules() + self.build_test_modules() + + def stop(self): + """Stop any running tests""" + self._stop_modules() + + def run_test_modules(self, device): + """Iterates through each test module and starts the container.""" + LOGGER.info( + f"Running test modules on device with mac addr {device.mac_addr}") + for module in self._test_modules: + self._run_test_module(module, device) + LOGGER.info("All tests complete") + LOGGER.info(f"""Completed running test modules on device + with mac addr {device.mac_addr}""") + self._generate_results(device) + + def _generate_results(self, device): + results = {} + results["device"] = {} + if device.make is not None: + results["device"]["make"] = device.make + if device.make is not None: + results["device"]["model"] = device.model + results["device"]["mac_addr"] = device.mac_addr + for module in self._test_modules: + if module.enable_container and self._is_module_enabled(module, device): + container_runtime_dir = os.path.join( + self._root_path, "runtime/test/" + + device.mac_addr.replace(":", "") + "/" + module.name) + results_file = f"{container_runtime_dir}/{module.name}-result.json" + try: + with open(results_file, "r", encoding="UTF-8") as f: + module_results = json.load(f) + results[module.name] = module_results + except (FileNotFoundError, PermissionError, + json.JSONDecodeError) as results_error: + LOGGER.error("Module Results Errror " + module.name) + LOGGER.debug(results_error) + + out_file = os.path.join( + self._root_path, + "runtime/test/" + device.mac_addr.replace(":", "") + "/results.json") + with open(out_file, "w", encoding="utf-8") as f: + json.dump(results, f, indent=2) + return results + + def _is_module_enabled(self, module, device): + enabled = True + if device.test_modules is not None: + test_modules = json.loads(device.test_modules) + if module.name in test_modules: + if "enabled" in test_modules[module.name]: + enabled = test_modules[module.name]["enabled"] + return enabled + + def _run_test_module(self, module, device): + """Start the test container and extract the results.""" + + if module is None or not module.enable_container: + return + + if not self._is_module_enabled(module, device): + return + + LOGGER.info("Running test module " + module.name) + + try: + container_runtime_dir = os.path.join( + self._root_path, "runtime/test/" + device.mac_addr.replace(":", "") + + "/" + module.name) + network_runtime_dir = os.path.join(self._root_path, "runtime/network") + os.makedirs(container_runtime_dir) + + client = docker.from_env() + + module.container = client.containers.run( + module.image_name, + auto_remove=True, + cap_add=["NET_ADMIN"], + name=module.container_name, + hostname=module.container_name, + privileged=True, + detach=True, + mounts=[ + Mount(target="/runtime/output", + source=container_runtime_dir, + type="bind"), + Mount(target="/runtime/network", + source=network_runtime_dir, + type="bind", + read_only=True), + ], + environment={ + "HOST_USER": getpass.getuser(), + "DEVICE_MAC": device.mac_addr, + "DEVICE_TEST_MODULES": device.test_modules, + "IPV4_SUBNET": self._net_orc.network_config.ipv4_network, + "IPV6_SUBNET": self._net_orc.network_config.ipv6_network + }) + except (docker.errors.APIError, + docker.errors.ContainerError) as container_error: + LOGGER.error("Test module " + module.name + " has failed to start") + LOGGER.debug(container_error) + return + + # Mount the test container to the virtual network if requried + if module.network: + LOGGER.debug("Attaching test module to the network") + self._net_orc.attach_test_module_to_network(module) + + # Determine the module timeout time + test_module_timeout = time.time() + module.timeout + status = self._get_module_status(module) + + while time.time() < test_module_timeout and status == "running": + time.sleep(1) + status = self._get_module_status(module) + + LOGGER.info("Test module " + module.name + " has finished") + + def _get_module_status(self, module): + container = self._get_module_container(module) + if container is not None: + return container.status + return None + + def _get_test_module(self, name): + for test_module in self._test_modules: + if name in [ + test_module.display_name, test_module.name, test_module.dir_name + ]: + return test_module + return None + + def _get_module_container(self, module): + container = None + try: + client = docker.from_env() + container = client.containers.get(module.container_name) + except docker.errors.NotFound: + LOGGER.debug("Container " + module.container_name + " not found") + except docker.errors.APIError as error: + LOGGER.error("Failed to resolve container") + LOGGER.error(error) + return container + + def _load_test_modules(self): + """Load network modules from module_config.json.""" + LOGGER.debug("Loading test modules from /" + TEST_MODULES_DIR) + + loaded_modules = "Loaded the following test modules: " + test_modules_dir = os.path.join(self._path, TEST_MODULES_DIR) + + for module_dir in os.listdir(test_modules_dir): + + if self._get_test_module(module_dir) is None: + loaded_module = self._load_test_module(module_dir) + loaded_modules += loaded_module.dir_name + " " + + LOGGER.info(loaded_modules) + + def _load_test_module(self, module_dir): + """Import module configuration from module_config.json.""" + + modules_dir = os.path.join(self._path, TEST_MODULES_DIR) + + # Load basic module information + module = TestModule() + with open(os.path.join(self._path, modules_dir, module_dir, MODULE_CONFIG), + encoding="UTF-8") as module_config_file: + module_json = json.load(module_config_file) + + module.name = module_json["config"]["meta"]["name"] + module.display_name = module_json["config"]["meta"]["display_name"] + module.description = module_json["config"]["meta"]["description"] + module.dir = os.path.join(self._path, modules_dir, module_dir) + module.dir_name = module_dir + module.build_file = module_dir + ".Dockerfile" + module.container_name = "tr-ct-" + module.dir_name + "-test" + module.image_name = "test-run/" + module.dir_name + "-test" + + if "timeout" in module_json["config"]["docker"]: + module.timeout = module_json["config"]["docker"]["timeout"] + + # Determine if this is a container or just an image/template + if "enable_container" in module_json["config"]["docker"]: + module.enable_container = module_json["config"]["docker"][ + "enable_container"] + + if "depends_on" in module_json["config"]["docker"]: + depends_on_module = module_json["config"]["docker"]["depends_on"] + if self._get_test_module(depends_on_module) is None: + self._load_test_module(depends_on_module) + + self._test_modules.append(module) + return module + + def build_test_modules(self): + """Build all test modules.""" + LOGGER.info("Building test modules...") + for module in self._test_modules: + self._build_test_module(module) + + def _build_test_module(self, module): + LOGGER.debug("Building docker image for module " + module.dir_name) + client = docker.from_env() + try: + client.images.build( + dockerfile=os.path.join(module.dir, module.build_file), + path=self._path, + forcerm=True, # Cleans up intermediate containers during build + tag=module.image_name) + except docker.errors.BuildError as error: + LOGGER.error(error) + + def _stop_modules(self, kill=False): + LOGGER.info("Stopping test modules") + for module in self._test_modules: + # Test modules may just be Docker images, so we do not want to stop them + if not module.enable_container: + continue + self._stop_module(module, kill) + LOGGER.info("All test modules have been stopped") + + def _stop_module(self, module, kill=False): + LOGGER.debug("Stopping test module " + module.container_name) + try: + container = module.container + if container is not None: + if kill: + LOGGER.debug("Killing container:" + module.container_name) + container.kill() + else: + LOGGER.debug("Stopping container:" + module.container_name) + container.stop() + LOGGER.debug("Container stopped:" + module.container_name) + except docker.errors.NotFound: + pass diff --git a/testing/docker/ci_baseline/Dockerfile b/testing/docker/ci_baseline/Dockerfile new file mode 100644 index 000000000..7c3c1eebd --- /dev/null +++ b/testing/docker/ci_baseline/Dockerfile @@ -0,0 +1,10 @@ +FROM ubuntu:jammy + +#Update and get all additional requirements not contained in the base image +RUN apt-get update && apt-get -y upgrade + +RUN apt-get install -y isc-dhcp-client ntpdate coreutils moreutils inetutils-ping curl jq dnsutils + +COPY entrypoint.sh /entrypoint.sh + +ENTRYPOINT ["/entrypoint.sh"] \ No newline at end of file diff --git a/testing/docker/ci_baseline/entrypoint.sh b/testing/docker/ci_baseline/entrypoint.sh new file mode 100755 index 000000000..bc2da3ec2 --- /dev/null +++ b/testing/docker/ci_baseline/entrypoint.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +OUT=/out/testrun_ci.json + +NTP_SERVER=10.10.10.5 +DNS_SERVER=10.10.10.4 + +function wout(){ + temp=${1//./\".\"} + key=${temp:1}\" + echo $key + value=$2 + jq "$key+=\"$value\"" $OUT | sponge $OUT +} + + +dig @8.8.8.8 +short www.google.com + +# DHCP +ip addr flush dev eth0 +PID_FILE=/var/run/dhclient.pid +if [ -f $PID_FILE ]; then + kill -9 $(cat $PID_FILE) || true + rm -f $PID_FILE +fi +dhclient -v eth0 + +echo "{}" > $OUT + +# Gen network +main_intf=$(ip route | grep '^default' | awk '{print $NF}') + +wout .network.main_intf $main_intf +wout .network.gateway $(ip route | head -n 1 | awk '{print $3}') +wout .network.ipv4 $(ip a show $main_intf | grep "inet " | awk '{print $2}') +wout .network.ipv6 $(ip a show $main_intf | grep inet6 | awk '{print $2}') +wout .network.ethmac $(cat /sys/class/net/$main_intf/address) + +wout .dns_response $(dig @$DNS_SERVER +short www.google.com | tail -1) +wout .ntp_offset $(ntpdate -q $NTP_SERVER | tail -1 | sed -E 's/.*offset ([-=0-9\.]*) sec/\1/') + +# INTERNET CONNECTION +google_com_response=$(curl -LI http://www.google.com -o /dev/null -w '%{http_code}\n' -s) +wout .network.internet $google_com_response + +# DHCP LEASE +while read pre name value; do + if [[ $pre != option ]]; then + continue; + fi + + wout .dhcp.$name $(echo "${value%;}" | tr -d '\"\\') + +done < <(grep -B 99 -m 1 "}" /var/lib/dhcp/dhclient.leases) + +cat $OUT \ No newline at end of file diff --git a/testing/test_baseline b/testing/test_baseline new file mode 100755 index 000000000..d7fc1e5c5 --- /dev/null +++ b/testing/test_baseline @@ -0,0 +1,73 @@ + +#!/bin/bash -e + +TESTRUN_OUT=/tmp/testrun.log + +# Setup requirements +sudo apt-get update +sudo apt-get install openvswitch-common openvswitch-switch tcpdump jq moreutils coreutils + +pip3 install pytest + +# Setup device network +sudo ip link add dev endev0a type veth peer name endev0b +sudo ip link set dev endev0a up +sudo ip link set dev endev0b up +sudo docker network create -d macvlan -o parent=endev0b endev0 + +# Start OVS +sudo /usr/share/openvswitch/scripts/ovs-ctl start + +# Fix due to ordering +sudo docker build ./net_orc/ -t test-run/base -f net_orc/network/modules/base/base.Dockerfile + +# Build Test Container +sudo docker build ./testing/docker/ci_baseline -t ci1 -f ./testing/docker/ci_baseline/Dockerfile + +cat <conf/system.json +{ + "network": { + "device_intf": "endev0a", + "internet_intf": "eth0" + }, + "log_level": "DEBUG" +} +EOF + +sudo cmd/install + +sudo cmd/start --single-intf > $TESTRUN_OUT 2>&1 & +TPID=$! + +# Time to wait for testrun to be ready +WAITING=600 +for i in `seq 1 $WAITING`; do + if [[ -n $(fgrep "Waiting for devices on the network" $TESTRUN_OUT) ]]; then + break + fi + + if [[ ! -d /proc/$TPID ]]; then + cat $TESTRUN_OUT + echo "error encountered starting test run" + exit 1 + fi + + sleep 1 +done + +if [[ $i -eq $WAITING ]]; then + cat $TESTRUN_OUT + echo "failed after waiting $WAITING seconds for test-run start" + exit 1 +fi + +# Load Test Container +sudo docker run --network=endev0 --cap-add=NET_ADMIN -v /tmp:/out --privileged ci1 + +echo "Done baseline test" + +more $TESTRUN_OUT + +pytest testing/ + +exit $? diff --git a/testing/test_baseline.py b/testing/test_baseline.py new file mode 100644 index 000000000..b356983dd --- /dev/null +++ b/testing/test_baseline.py @@ -0,0 +1,54 @@ +import json +import pytest +import re +import os + +NTP_SERVER = '10.10.10.5' +DNS_SERVER = '10.10.10.4' + +CI_BASELINE_OUT = '/tmp/testrun_ci.json' + +@pytest.fixture +def container_data(): + dir = os.path.dirname(os.path.abspath(__file__)) + with open(CI_BASELINE_OUT, encoding='utf-8') as f: + return json.load(f) + +@pytest.fixture +def validator_results(): + dir = os.path.dirname(os.path.abspath(__file__)) + with open(os.path.join(dir, + '../', + 'runtime/validation/faux-dev/result.json'), + encoding='utf-8') as f: + return json.load(f) + +@pytest.mark.skip(reason='requires internet') +def test_internet_connectivity(container_data): + assert container_data['network']['internet'] == 200 + +def test_dhcp_ntp_option(container_data): + """ Check DHCP gives NTP server as option """ + assert container_data['dhcp']['ntp-servers'] == NTP_SERVER + +def test_dhcp_dns_option(container_data): + assert container_data['dhcp']['domain-name-servers'] == DNS_SERVER + +def test_assigned_ipv4_address(container_data): + assert int(container_data['network']['ipv4'].split('.')[-1][:-3]) > 10 + +def test_ntp_server_reachable(container_data): + assert not 'no servers' in container_data['ntp_offset'] + +def test_dns_server_reachable(container_data): + assert not 'no servers' in container_data['dns_response'] + +def test_dns_server_resolves(container_data): + assert re.match(r'[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}', + container_data['dns_response']) + +@pytest.mark.skip(reason='requires internet') +def test_validator_results_compliant(validator_results): + results = [True if x['result'] == 'compliant' else False + for x in validator_results['results']] + assert all(results) diff --git a/testing/test_pylint b/testing/test_pylint new file mode 100755 index 000000000..e3ade62b5 --- /dev/null +++ b/testing/test_pylint @@ -0,0 +1,27 @@ +#!/bin/bash + +ERROR_LIMIT=1100 + +sudo cmd/install + +source venv/bin/activate +sudo pip3 install pylint + +files=$(find . -path ./venv -prune -o -name '*.py' -print) + +OUT=pylint.out + +rm -f $OUT && touch $OUT + +pylint $files -ry --extension-pkg-allow-list=docker --evaluation="error + warning + refactor + convention" 2>/dev/null | tee -a $OUT + +new_errors=$(cat $OUT | grep -oP "(?!=^Your code has been rated at)([0-9]+)(?=\.00/10[ \(]?)" ) + +echo "$new_errors > $ERROR_LIMIT?" +if (( $new_errors > $ERROR_LIMIT)); then + echo new errors $new_errors > error limit $ERROR_LIMIT + echo failing .. + exit 1 +fi + +exit 0