Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 38 additions & 4 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,25 @@ on:
# run every night at midnight
- cron: '0 0 * * *'

# TODO: Our workflow is far from ideal. We need to refactor it into multiple
# ones and only run commands which are needed for some steps for those steps and
# not for all
jobs:
pre_job:
name: Skip Duplicate Jobs Pre Job
runs-on: ubuntu-latest
outputs:
should_skip: ${{ steps.skip_check.outputs.should_skip }}
steps:
- id: skip_check
uses: fkirc/skip-duplicate-actions@4c656bbdb6906310fa6213604828008bc28fe55d # v3.3.0
with:
cancel_others: 'true'
github_token: ${{ github.token }}

ci:
needs: pre_job
if: ${{ needs.pre_job.outputs.should_skip != 'true' }}
name: '${{ matrix.name }} - python (${{ matrix.python-version }})'
runs-on: ubuntu-latest
strategy:
Expand All @@ -40,13 +57,13 @@ jobs:
- name: 'Unit Tests'
task: 'ci-unit'
python-version: '3.6'
- name: 'Integration Tests'
task: 'ci-integration'
python-version: '3.6'
# This job is slow so we only run in on a daily basis
# - name: 'Micro Benchmarks'
# task: 'micro-benchmarks'
# python-version: '3.6'
- name: 'Integration Tests'
task: 'ci-integration'
python-version: '3.6'
services:
mongo:
image: mongo:4.0
Expand Down Expand Up @@ -82,6 +99,23 @@ jobs:
#- 15671:15671/tcp # Management: SSL port
#- 25672:25672/tcp # inter-node or CLI
#- 4369:4369/tcp # epmd
#

# Used for the coordination backend for integration tests
# TODO: Only start this for integration tests via job step
# https://github.bokerqi.topmunity/t/conditional-services-in-a-job/135301/3
redis:
# Docker Hub image
image: redis
# Set health checks to wait until redis has started
options: >-
--name "redis"
--health-cmd "redis-cli ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I would add --name redis so that it is easier to target in docker commands like:
docker exec redis echo command to run

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, for now I just added it as a service container, but once I have some more time and once I tackle GHA workflow refactoring into multiple jobs, I will re-organize it so we only spin it up for integration tests.

At this point I don't think we get tons if we move it to a task and conditionally spin it up there since we need to refactor GHA workflow sooner or later since it's a bit messy and instead of those various ifs, we should utilize multiple jobs...

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

And done - added the container name (will push it once the build is done).

ports:
- 6379:6379/tcp

env:
TASK: '${{ matrix.task }}'
Expand Down Expand Up @@ -191,7 +225,7 @@ jobs:
- name: Install requirements
run: |
./scripts/ci/install-requirements.sh
- name: Setup integration tests
- name: Setup Integration Tests
run: |
# prep a ci-specific dev conf file that uses runner instead of stanley
# this user is the username of the user in GitHub actions, used for SSH, etc during
Expand Down
19 changes: 19 additions & 0 deletions scripts/github/prepare-integration.sh
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,25 @@ fi
# shellcheck disable=SC1091
source ./virtualenv/bin/activate

# Enable coordination backend to avoid race conditions with orquesta tests due
# to the lack of the coordination backend
sed -i "s#\#url = redis://localhost#url = redis://127.0.0.1#g" ./conf/st2.dev.conf
sed -i "s#\#url = redis://localhost#url = redis://127.0.0.1#g" ./conf/st2.ci.conf || true

echo "Used config for the tests"
echo ""
echo "st2.dev.conf"
echo ""
cat conf/st2.dev.conf
echo ""
echo "st2.ci.conf"
echo ""
cat conf/st2.ci.conf || true
echo ""

# Needed by the coordination backend
pip install "redis==3.5.3"

# install st2 client
python ./st2client/setup.py develop
st2 --version
Expand Down
4 changes: 4 additions & 0 deletions st2common/st2common/service_setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@
from st2common.services import coordination
from st2common.logging.misc import add_global_filters_for_all_loggers
from st2common.constants.error_messages import PYTHON2_DEPRECATION
from st2common.services.coordination import get_driver_name

# Note: This is here for backward compatibility.
# Function has been moved in a standalone module to avoid expensive in-direct
Expand Down Expand Up @@ -147,6 +148,9 @@ def setup(

LOG.info("Using logging config: %s", logging_config_path)

LOG.info("Using coordination driver: %s", get_driver_name())
LOG.info("Using metrics driver: %s", cfg.CONF.metrics.driver)

is_debug_enabled = cfg.CONF.debug or cfg.CONF.system.debug

try:
Expand Down
13 changes: 13 additions & 0 deletions st2common/st2common/services/coordination.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,6 +175,19 @@ def configured():
return backend_configured and not mock_backend


def get_driver_name() -> str:
"""
Return coordination driver name (aka protocol part from the URI / URL).
"""
url = cfg.CONF.coordination.url

if not url:
return None

driver_name = url.split("://")[0]
return driver_name


def coordinator_setup(start_heart=True):
"""
Sets up the client for the coordination service.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,11 +74,13 @@ def test_audit_log_level_is_filtered_if_log_level_is_not_debug_or_audit(self):
process.send_signal(signal.SIGKILL)

# Verify first 4 environment related log messages
stdout = "\n".join(process.stdout.read().decode("utf-8").split("\n")[:4])
stdout = "\n".join(process.stdout.read().decode("utf-8").split("\n")[:6])
self.assertIn("INFO [-] Using Python:", stdout)
self.assertIn("INFO [-] Using fs encoding:", stdout)
self.assertIn("INFO [-] Using config files:", stdout)
self.assertIn("INFO [-] Using logging config:", stdout)
self.assertIn("INFO [-] Using coordination driver:", stdout)
self.assertIn("INFO [-] Using metrics driver:", stdout)

# 1. INFO log level - audit messages should not be included
process = self._start_process(config_path=ST2_CONFIG_INFO_LL_PATH)
Expand All @@ -88,8 +90,8 @@ def test_audit_log_level_is_filtered_if_log_level_is_not_debug_or_audit(self):
eventlet.sleep(3)
process.send_signal(signal.SIGKILL)

# First 4 log lines are debug messages about the environment which are always logged
stdout = "\n".join(process.stdout.read().decode("utf-8").split("\n")[4:])
# First 6 log lines are debug messages about the environment which are always logged
stdout = "\n".join(process.stdout.read().decode("utf-8").split("\n")[6:])

self.assertIn("INFO [-]", stdout)
self.assertNotIn("DEBUG [-]", stdout)
Expand All @@ -103,8 +105,8 @@ def test_audit_log_level_is_filtered_if_log_level_is_not_debug_or_audit(self):
eventlet.sleep(5)
process.send_signal(signal.SIGKILL)

# First 4 log lines are debug messages about the environment which are always logged
stdout = "\n".join(process.stdout.read().decode("utf-8").split("\n")[4:])
# First 6 log lines are debug messages about the environment which are always logged
stdout = "\n".join(process.stdout.read().decode("utf-8").split("\n")[6:])

self.assertIn("INFO [-]", stdout)
self.assertIn("DEBUG [-]", stdout)
Expand All @@ -118,8 +120,8 @@ def test_audit_log_level_is_filtered_if_log_level_is_not_debug_or_audit(self):
eventlet.sleep(5)
process.send_signal(signal.SIGKILL)

# First 4 log lines are debug messages about the environment which are always logged
stdout = "\n".join(process.stdout.read().decode("utf-8").split("\n")[4:])
# First 6 log lines are debug messages about the environment which are always logged
stdout = "\n".join(process.stdout.read().decode("utf-8").split("\n")[6:])

self.assertNotIn("INFO [-]", stdout)
self.assertNotIn("DEBUG [-]", stdout)
Expand All @@ -133,8 +135,8 @@ def test_audit_log_level_is_filtered_if_log_level_is_not_debug_or_audit(self):
eventlet.sleep(5)
process.send_signal(signal.SIGKILL)

# First 4 log lines are debug messages about the environment which are always logged
stdout = "\n".join(process.stdout.read().decode("utf-8").split("\n")[4:])
# First 6 log lines are debug messages about the environment which are always logged
stdout = "\n".join(process.stdout.read().decode("utf-8").split("\n")[6:])

self.assertIn("INFO [-]", stdout)
self.assertIn("DEBUG [-]", stdout)
Expand Down
12 changes: 12 additions & 0 deletions st2common/tests/unit/services/test_synchronization.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,16 +39,28 @@ def tearDownClass(cls):
super(SynchronizationTest, cls).tearDownClass()

def test_service_configured(self):
cfg.CONF.set_override(name="url", override=None, group="coordination")
self.assertEqual(coordination.get_driver_name(), None)

cfg.CONF.set_override(
name="url", override="kazoo://127.0.0.1:2181", group="coordination"
)
self.assertTrue(coordination.configured())
self.assertEqual(coordination.get_driver_name(), "kazoo")

cfg.CONF.set_override(name="url", override="file:///tmp", group="coordination")
self.assertFalse(coordination.configured())
self.assertEqual(coordination.get_driver_name(), "file")

cfg.CONF.set_override(name="url", override="zake://", group="coordination")
self.assertFalse(coordination.configured())
self.assertEqual(coordination.get_driver_name(), "zake")

cfg.CONF.set_override(
name="url", override="redis://foo:bar@127.0.0.1", group="coordination"
)
self.assertTrue(coordination.configured())
self.assertEqual(coordination.get_driver_name(), "redis")

def test_lock(self):
name = uuid.uuid4().hex
Expand Down