diff --git a/.travis.yml b/.travis.yml index e4912a9e2a..5d28bf5f6a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,6 +1,7 @@ # Used old infrastructure, needed for integration tests: # http://docs.travis-ci.com/user/workers/standard-infrastructure/ sudo: required +# NOTE: We use precise because tests finish faster than on Xenial dist: precise language: python @@ -55,13 +56,30 @@ matrix: name: "Lint Checks, Packs Tests (Python 2.7)" - env: TASK="compilepy3 ci-py3-unit" CACHE_NAME=py3 COMMAND_THRESHOLD=680 python: 3.6 - name: "Unit Tests (Python 3.6)" + name: "Unit Tests, Pack Tests (Python 3.6)" - env: TASK="ci-py3-integration" CACHE_NAME=py3 COMMAND_THRESHOLD=310 python: 3.6 name: "Integration Tests (Python 3.6)" -services: - - rabbitmq +addons: + apt: + sources: + - mongodb-upstart + - sourceline: 'deb [arch=amd64] http://repo.mongodb.org/apt/ubuntu precise/mongodb-org/3.4 multiverse' + key_url: 'https://www.mongodb.org/static/pgp/server-3.4.asc' + # NOTE: Precise repo doesn't contain Erlang 20.x, latest version is 19.x so we need to use RabbitMQ 3.7.6 + #- sourceline: 'deb [arch=amd64] http://packages.erlang-solutions.com/ubuntu precise contrib' + # key_url: 'https://packages.erlang-solutions.com/ubuntu/erlang_solutions.asc' + #- sourceline: 'deb [arch=amd64] https://dl.bintray.com/rabbitmq/debian precise rabbitmq-server-v3.6.x' + # key_url: 'https://github.com/rabbitmq/signing-keys/releases/download/2.0/rabbitmq-release-signing-key.asc' + - sourceline: 'ppa:git-core/ppa' + packages: + - mongodb-org-server + - mongodb-org-shell + - erlang + - rabbitmq-server + - git + - libffi-dev cache: pip: true @@ -76,39 +94,39 @@ cache: #- .tox/ before_install: - # 1. Install MongoDB 3.4 and latest version of git - - sudo add-apt-repository -y ppa:git-core/ppa - - curl https://www.mongodb.org/static/pgp/server-3.4.asc | sudo apt-key add - - - echo "deb [arch=amd64] http://repo.mongodb.org/apt/ubuntu precise/mongodb-org/3.4 multiverse" | sudo tee -a /etc/apt/sources.list - # Work around for Travis timeout issues, see https://github.com/travis-ci/travis-ci/issues/9112 - - sudo apt-get update --option Acquire::Retries=100 --option Acquire::http::Timeout="60" - - sudo apt-get install mongodb-org-server mongodb-org-shell git libffi-dev -y + # Work around for apt Travis timeout issues, see https://github.com/travis-ci/travis-ci/issues/9112 + #- sudo apt-get update --option Acquire::Retries=100 --option Acquire::http::Timeout="60" - pip install --upgrade "pip>=9.0,<9.1" - sudo pip install --upgrade "virtualenv==15.1.0" install: - ./scripts/travis/install-requirements.sh - - if [ "${TASK}" = 'ci-unit' ] || [ "${TASK}" = 'ci-integration' ] || [ "${TASK}" = 'compilepy3 ci-py3-unit' ] || [ "${TASK}" = 'ci-py3-integration' ]; then sudo .circle/add-itest-user.sh; fi + - if [ "${TASK}" = 'ci-unit' ] || [ "${TASK}" = 'ci-integration' ] || [ "${TASK}" = 'ci-checks ci-packs-tests' ] || [ "${TASK}" = 'compilepy3 ci-py3-unit' ] || [ "${TASK}" = 'ci-py3-integration' ]; then sudo .circle/add-itest-user.sh; fi # Let's enable rabbitmqadmin # See https://github.com/messagebus/lapine/wiki/Testing-on-Travis. before_script: - # key_url no longer works for APT addon # Use a custom mongod.conf which uses various speed optimizations - sudo cp scripts/travis/mongod.conf /etc/mongod.conf # Clean up any old MongoDB 3.4 data files laying around and make sure mongodb user can write to it - sudo rm -rf /var/lib/mongodb ; sudo mkdir /var/lib/mongodb ; sudo chown -R mongodb:mongodb /var/lib/mongodb - sudo service mongod restart ; sleep 5 - sudo service mongod status - - tail -30 /var/log/mongodb/mongod.log - - mongod --version - - git --version - - pip --version - - virtualenv --version + - sudo tail -n 30 /var/log/mongodb/mongod.log + # Use custom RabbitMQ config which enables SSL / TLS listener on port 5671 with test certs + - sudo cp scripts/travis/rabbitmq.config /etc/rabbitmq/rabbitmq.config + # Install rabbitmq_management RabbitMQ plugin + - sudo service rabbitmq-server restart ; sleep 5 - sudo rabbitmq-plugins enable rabbitmq_management - sudo wget http://guest:guest@localhost:15672/cli/rabbitmqadmin -O /usr/local/bin/rabbitmqadmin - sudo chmod +x /usr/local/bin/rabbitmqadmin - sudo service rabbitmq-server restart + - sudo tail -n 30 /var/log/rabbitmq/* + # Print various binary versions + - mongod --version + - git --version + - pip --version + - virtualenv --version # Print out various environment variables info - make play diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 1a8b431a8b..cb0f832f54 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,6 +1,78 @@ Changelog ========= +in development +-------------- + +Added +~~~~~ + +* Add support for various new SSL / TLS related config options (``ssl_keyfile``, ``ssl_certfile``, + ``ssl_ca_certs``, ``ssl_certfile``, ``authentication_mechanism``) to the ``messaging`` section in + ``st2.conf`` config file. + + With those config options, user can configure things such as client based certificate + authentication, client side verification of a server certificate against a specific CA bundle, etc. + + NOTE: Those options are only supported when using a default and officially supported AMQP backend + with RabbitMQ server. (new feature) #4541 +* Add metrics instrumentation to the ``st2notifier`` service. For the available / exposed metrics, + please refer to https://docs.stackstorm.com/reference/metrics.html. (improvement) #4536 + +Changed +~~~~~~~ + +* Update logging code so we exclude log messages with log level ``AUDIT`` from a default service + log file (e.g. ``st2api.log``). Log messages with level ``AUDIT`` are already logged in a + dedicated service audit log file (e.g. ``st2api.audit.log``) so there is no need for them to also + be duplicated and included in regular service log file. + + NOTE: To aid with debugging, audit log messages are also included in a regular log file when log + level is set to ``DEBUG`` or ``system.debug`` config option is set to ``True``. + + Reported by Nick Maludy. (improvement) #4538 #4502 +* Update ``pyyaml`` dependency to the latest version. This latest version fixes an issue which + could result in a code execution vulnerability if code uses ``yaml.load`` in an unsafe manner + on untrusted input. + + NOTE: StackStorm platform itself is not affected, because we already used ``yaml.safe_load`` + everywhere. + + Only custom packs which use ``yaml.load`` with non trusted user input could potentially be + affected. (improvement) #4510 #4552 #4554 +* Update Orquesta to ``v0.4``. #4551 + +Fixed +~~~~~ + +* Fixed the ``packs.pack_install`` / ``!pack install {{ packs }}`` action-alias to not have + redundant patterns. Previously this prevented it from being executed via + ``st2 action-alias execute 'pack install xxx'``. #4511 + + Contributed by Nick Maludy (Encore Technologies) +* Fix datastore value encryption and make sure it also works correctly for unicode (non-ascii) + values. + + Reported by @dswebbthg, @nickbaum. (bug fix) #4513 #4527 #4528 +* Fix a bug with action positional parameter serialization used in local and remote script runner + not working correctly with non-ascii (unicode) values. + + This would prevent actions such as ``core.sendmail`` which utilize positional parameters from + working correctly when a unicode value was provided. + + Reported by @johandahlberg (bug fix) #4533 +* Fix ``core.sendmail`` action so it specifies ``charset=UTF-8`` in the ``Content-Type`` email + header. This way it works correctly when an email subject and / or body contains unicode data. + + Reported by @johandahlberg (bug fix) #4533 4534 +* Fix CLI ``st2 apikey load`` not being idempotent and API endpoint ``/api/v1/apikeys`` not + honoring desired ``ID`` for the new record creation. #4542 +* Moved the lock from concurrency policies into the scheduler to fix a race condition when there + are multiple scheduler instances scheduling execution for action with concurrency policies. + #4481 (bug fix) +* Add retries to scheduler to handle temporary hiccup in DB connection. Refactor scheduler + service to return proper exit code when there is a failure. #4539 (bug fix) + 2.10.1 - December 19, 2018 -------------------------- diff --git a/Makefile b/Makefile index 9a769aca53..0d324e03b7 100644 --- a/Makefile +++ b/Makefile @@ -132,8 +132,11 @@ play: @echo .PHONY: check -check: requirements flake8 checklogs +check: check-requirements flake8 checklogs +# NOTE: We pass --no-deps to the script so we don't install all the +# package dependencies which are already installed as part of "requirements" +# make targets. This speeds up the build .PHONY: install-runners install-runners: @echo "" @@ -143,9 +146,18 @@ install-runners: echo "==========================================================="; \ echo "Installing runner:" $$component; \ echo "==========================================================="; \ - (. $(VIRTUALENV_DIR)/bin/activate; cd $$component; python setup.py develop); \ + (. $(VIRTUALENV_DIR)/bin/activate; cd $$component; python setup.py develop --no-deps); \ done +.PHONY: check-requirements +check-requirements: requirements + @echo + @echo "============== CHECKING REQUIREMENTS ==============" + @echo + # Update requirements and then make sure no files were changed + git status -- *requirements.txt */*requirements.txt | grep -q "nothing to commit" + @echo "All requirements files up-to-date!" + .PHONY: checklogs checklogs: @echo @@ -175,6 +187,19 @@ configgen: requirements .configgen @echo "================== pylint ====================" @echo # Lint st2 components + @for component in $(COMPONENTS); do\ + echo "==========================================================="; \ + echo "Running pylint on" $$component; \ + echo "==========================================================="; \ + . $(VIRTUALENV_DIR)/bin/activate ; pylint -j $(PYLINT_CONCURRENCY) -E --rcfile=./lint-configs/python/.pylintrc --load-plugins=pylint_plugins.api_models --load-plugins=pylint_plugins.db_models $$component/$$component || exit 1; \ + done + # Lint runner modules and packages + @for component in $(COMPONENTS_RUNNERS); do\ + echo "==========================================================="; \ + echo "Running pylint on" $$component; \ + echo "==========================================================="; \ + . $(VIRTUALENV_DIR)/bin/activate ; pylint -j $(PYLINT_CONCURRENCY) -E --rcfile=./lint-configs/python/.pylintrc --load-plugins=pylint_plugins.api_models --load-plugins=pylint_plugins.db_models $$component/*.py || exit 1; \ + done # Lint Python pack management actions . $(VIRTUALENV_DIR)/bin/activate; pylint -j $(PYLINT_CONCURRENCY) -E --rcfile=./lint-configs/python/.pylintrc --load-plugins=pylint_plugins.api_models contrib/packs/actions/*.py || exit 1; . $(VIRTUALENV_DIR)/bin/activate; pylint -j $(PYLINT_CONCURRENCY) -E --rcfile=./lint-configs/python/.pylintrc --load-plugins=pylint_plugins.api_models contrib/packs/actions/*/*.py || exit 1; @@ -827,7 +852,7 @@ debs: ci: ci-checks ci-unit ci-integration ci-mistral ci-packs-tests .PHONY: ci-checks -ci-checks: compile .generated-files-check .pylint .flake8 .bandit .st2client-dependencies-check .st2common-circular-dependencies-check circle-lint-api-spec .rst-check .st2client-install-check +ci-checks: compile .generated-files-check .pylint .flake8 check-requirements .st2client-dependencies-check .st2common-circular-dependencies-check circle-lint-api-spec .rst-check .st2client-install-check .PHONY: ci-py3-unit ci-py3-unit: diff --git a/conf/st2.conf.sample b/conf/st2.conf.sample index c913610639..939f95b25e 100644 --- a/conf/st2.conf.sample +++ b/conf/st2.conf.sample @@ -175,14 +175,26 @@ mask_secrets_blacklist = # comma separated list allowed here. mask_secrets = True [messaging] -# URL of the messaging server. -url = amqp://guest:guest@127.0.0.1:5672// -# How long should we wait between connection retries. -connection_retry_wait = 10000 +# Certificate file used to identify the local connection (client). +ssl_certfile = None # How many times should we retry connection before failing. connection_retries = 10 +# Use SSL / TLS to connect to the messaging server. Same as appending "?ssl=true" at the end of the connection URL string. +ssl = False +# URL of the messaging server. +url = amqp://guest:guest@127.0.0.1:5672// +# Specifies whether a certificate is required from the other side of the connection, and whether it will be validated if provided. +ssl_cert_reqs = None # URL of all the nodes in a messaging service cluster. cluster_urls = # comma separated list allowed here. +# How long should we wait between connection retries. +connection_retry_wait = 10000 +# Private keyfile used to identify the local connection against RabbitMQ. +ssl_keyfile = None +# ca_certs file contains a set of concatenated CA certificates, which are used to validate certificates passed from RabbitMQ. +ssl_ca_certs = None +# Login method to use (AMQPLAIN, PLAIN, EXTERNAL, etc.). +login_method = None [metrics] # Randomly sample and only send metrics for X% of metric operations to the backend. Default value of 1 means no sampling is done and all the metrics are sent to the backend. E.g. 0.1 would mean 10% of operations are sampled. @@ -249,14 +261,18 @@ thread_pool_size = 10 logging = /etc/st2/logging.rulesengine.conf [scheduler] -# How long (in seconds) to sleep between each action scheduler main loop run interval. -sleep_interval = 0.1 -# How often (in seconds) to look for zombie execution requests before rescheduling them. -gc_interval = 10 +# The maximum number of attempts that the scheduler retries on error. +retry_max_attempt = 10 # Location of the logging configuration file. logging = /etc/st2/logging.scheduler.conf +# How long (in seconds) to sleep between each action scheduler main loop run interval. +sleep_interval = 0.1 # The size of the pool used by the scheduler for scheduling executions. pool_size = 10 +# The number of milliseconds to wait in between retries. +retry_wait_msec = 3000 +# How often (in seconds) to look for zombie execution requests before rescheduling them. +gc_interval = 10 [schema] # Version of JSON schema to use. diff --git a/conf/st2.dev.conf b/conf/st2.dev.conf index c266534710..52f9ac61dc 100644 --- a/conf/st2.dev.conf +++ b/conf/st2.dev.conf @@ -91,7 +91,14 @@ ssh_key_file = /home/vagrant/.ssh/stanley_rsa [messaging] url = amqp://guest:guest@127.0.0.1:5672/ -#url = redis://localhost:6379/0 +# Uncomment to test SSL options +#url = amqp://guest:guest@127.0.0.1:5671/ +#ssl = True +#ssl_keyfile = /data/stanley/st2tests/st2tests/fixtures/ssl_certs/client/private_key.pem +#ssl_certfile = /data/stanley/st2tests/st2tests/fixtures/ssl_certs/client/client_certificate.pem +#ssl_ca_certs = /data/stanley/st2tests/st2tests/fixtures/ssl_certs/ca/ca_certificate_bundle.pem +#ssl_cert_reqs = required +#ssl_cert_reqs = required [ssh_runner] remote_dir = /tmp diff --git a/contrib/core/actions/send_mail/send_mail b/contrib/core/actions/send_mail/send_mail index 1d9bdbdc14..6011c43d49 100755 --- a/contrib/core/actions/send_mail/send_mail +++ b/contrib/core/actions/send_mail/send_mail @@ -3,14 +3,27 @@ HOSTNAME=$(hostname -f) LINE_BREAK="" -SENDMAIL=`which sendmail` -if [ $? -ne 0 ]; then - echo "Unable to find sendmail binary in PATH" >&2 - exit 2 +FOOTER="This message was generated by StackStorm action `basename $0` running on `hostname`" + +# Allow user to provide a custom sendmail binary for more flexibility and easier +# testing +SENDMAIL_BINARY=$1 + +if [ "${SENDMAIL_BINARY}" = "None" ]; then + # If path to the sendmail binary is not provided, try to find one in $PATH + SENDMAIL=`which sendmail` + + if [ $? -ne 0 ]; then + echo "Unable to find sendmail binary in PATH" >&2 + exit 2 + fi + + MAIL="$SENDMAIL -t" +else + MAIL="${SENDMAIL_BINARY}" fi +shift -MAIL="$SENDMAIL -t" -FOOTER="This message was generated by StackStorm action `basename $0` running on `hostname`" if [[ $1 =~ '@' ]]; then FROM=$1 else @@ -52,7 +65,7 @@ if [[ -z $trimmed && $SEND_EMPTY_BODY -eq 1 ]] || [[ -n $trimmed ]]; then cat <=3.9.1,<4.0 diff --git a/contrib/core/tests/test_action_sendmail.py b/contrib/core/tests/test_action_sendmail.py new file mode 100644 index 0000000000..4d003aa9be --- /dev/null +++ b/contrib/core/tests/test_action_sendmail.py @@ -0,0 +1,277 @@ +# -*- coding: utf-8 -*- +# Licensed to the StackStorm, Inc ('StackStorm') under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import uuid +import base64 +import tempfile +import socket + +import six +import mock +import mailparser + +from st2common.constants import action as action_constants + +from st2tests.fixturesloader import FixturesLoader +from st2tests.base import RunnerTestCase +from st2tests.base import CleanDbTestCase +from st2tests.base import CleanFilesTestCase + +from local_runner.local_shell_script_runner import LocalShellScriptRunner + +__all__ = [ + 'SendmailActionTestCase' +] + +MOCK_EXECUTION = mock.Mock() +MOCK_EXECUTION.id = '598dbf0c0640fd54bffc688b' +HOSTNAME = socket.gethostname() + + +class SendmailActionTestCase(RunnerTestCase, CleanDbTestCase, CleanFilesTestCase): + """ + NOTE: Those tests rely on stanley user being available on the system and having paswordless + sudo access. + """ + fixtures_loader = FixturesLoader() + + def test_sendmail_default_text_html_content_type(self): + action_parameters = { + 'sendmail_binary': 'cat', + + 'from': 'from.user@example.tld1', + 'to': 'to.user@example.tld2', + 'subject': 'this is subject 1', + 'send_empty_body': False, + 'content_type': 'text/html', + 'body': 'Hello there html.', + 'attachments': '' + } + + expected_body = ('Hello there html.\n' + '

\n' + 'This message was generated by StackStorm action ' + 'send_mail running on %s' % (HOSTNAME)) + + status, _, email_data, message = self._run_action(action_parameters=action_parameters) + self.assertEquals(status, action_constants.LIVEACTION_STATUS_SUCCEEDED) + + # Verify subject contains utf-8 charset and is base64 encoded + self.assertTrue('SUBJECT: =?UTF-8?B?' in email_data) + + self.assertEqual(message.to[0][1], action_parameters['to']) + self.assertEqual(message.from_[0][1], action_parameters['from']) + self.assertEqual(message.subject, action_parameters['subject']) + self.assertEqual(message.body, expected_body) + self.assertEqual(message.content_type, 'text/html; charset=UTF-8') + + def test_sendmail_text_plain_content_type(self): + action_parameters = { + 'sendmail_binary': 'cat', + + 'from': 'from.user@example.tld1', + 'to': 'to.user@example.tld2', + 'subject': 'this is subject 2', + 'send_empty_body': False, + 'content_type': 'text/plain', + 'body': 'Hello there plain.', + 'attachments': '' + } + + expected_body = ('Hello there plain.\n\n' + 'This message was generated by StackStorm action ' + 'send_mail running on %s' % (HOSTNAME)) + + status, _, email_data, message = self._run_action(action_parameters=action_parameters) + self.assertEquals(status, action_constants.LIVEACTION_STATUS_SUCCEEDED) + + # Verify subject contains utf-8 charset and is base64 encoded + self.assertTrue('SUBJECT: =?UTF-8?B?' in email_data) + + self.assertEqual(message.to[0][1], action_parameters['to']) + self.assertEqual(message.from_[0][1], action_parameters['from']) + self.assertEqual(message.subject, action_parameters['subject']) + self.assertEqual(message.body, expected_body) + self.assertEqual(message.content_type, 'text/plain; charset=UTF-8') + + def test_sendmail_utf8_subject_and_body(self): + # 1. tex/html + action_parameters = { + 'sendmail_binary': 'cat', + + 'from': 'from.user@example.tld1', + 'to': 'to.user@example.tld2', + 'subject': u'Γ… unicode subject πŸ˜ƒπŸ˜ƒ', + 'send_empty_body': False, + 'content_type': 'text/html', + 'body': u'Hello there πŸ˜ƒπŸ˜ƒ.', + 'attachments': '' + } + + if six.PY2: + expected_body = (u'Hello there πŸ˜ƒπŸ˜ƒ.\n' + u'

\n' + u'This message was generated by StackStorm action ' + u'send_mail running on %s' % (HOSTNAME)) + else: + expected_body = (u'Hello there \\U0001f603\\U0001f603.\n' + u'

\n' + u'This message was generated by StackStorm action ' + u'send_mail running on %s' % (HOSTNAME)) + + status, _, email_data, message = self._run_action(action_parameters=action_parameters) + self.assertEquals(status, action_constants.LIVEACTION_STATUS_SUCCEEDED) + + # Verify subject contains utf-8 charset and is base64 encoded + self.assertTrue('SUBJECT: =?UTF-8?B?' in email_data) + + self.assertEqual(message.to[0][1], action_parameters['to']) + self.assertEqual(message.from_[0][1], action_parameters['from']) + self.assertEqual(message.subject, action_parameters['subject']) + self.assertEqual(message.body, expected_body) + self.assertEqual(message.content_type, 'text/html; charset=UTF-8') + + # 2. text/plain + action_parameters = { + 'sendmail_binary': 'cat', + + 'from': 'from.user@example.tld1', + 'to': 'to.user@example.tld2', + 'subject': u'Γ… unicode subject πŸ˜ƒπŸ˜ƒ', + 'send_empty_body': False, + 'content_type': 'text/plain', + 'body': u'Hello there πŸ˜ƒπŸ˜ƒ.', + 'attachments': '' + } + + if six.PY2: + expected_body = (u'Hello there πŸ˜ƒπŸ˜ƒ.\n\n' + u'This message was generated by StackStorm action ' + u'send_mail running on %s' % (HOSTNAME)) + else: + expected_body = (u'Hello there \\U0001f603\\U0001f603.\n\n' + u'This message was generated by StackStorm action ' + u'send_mail running on %s' % (HOSTNAME)) + + status, _, email_data, message = self._run_action(action_parameters=action_parameters) + self.assertEquals(status, action_constants.LIVEACTION_STATUS_SUCCEEDED) + + self.assertEqual(message.to[0][1], action_parameters['to']) + self.assertEqual(message.from_[0][1], action_parameters['from']) + self.assertEqual(message.subject, action_parameters['subject']) + self.assertEqual(message.body, expected_body) + self.assertEqual(message.content_type, 'text/plain; charset=UTF-8') + + def test_sendmail_with_attachments(self): + _, path_1 = tempfile.mkstemp() + _, path_2 = tempfile.mkstemp() + os.chmod(path_1, 0o755) + os.chmod(path_2, 0o755) + + self.to_delete_files.append(path_1) + self.to_delete_files.append(path_2) + + with open(path_1, 'w') as fp: + fp.write('content 1') + + with open(path_2, 'w') as fp: + fp.write('content 2') + + action_parameters = { + 'sendmail_binary': 'cat', + + 'from': 'from.user@example.tld1', + 'to': 'to.user@example.tld2', + 'subject': 'this is email with attachments', + 'send_empty_body': False, + 'content_type': 'text/plain', + 'body': 'Hello there plain.', + 'attachments': '%s,%s' % (path_1, path_2) + } + + expected_body = ('Hello there plain.\n\n' + 'This message was generated by StackStorm action ' + 'send_mail running on %s' % (HOSTNAME)) + + status, _, email_data, message = self._run_action(action_parameters=action_parameters) + self.assertEquals(status, action_constants.LIVEACTION_STATUS_SUCCEEDED) + + # Verify subject contains utf-8 charset and is base64 encoded + self.assertTrue('SUBJECT: =?UTF-8?B?' in email_data) + + self.assertEqual(message.to[0][1], action_parameters['to']) + self.assertEqual(message.from_[0][1], action_parameters['from']) + self.assertEqual(message.subject, action_parameters['subject']) + self.assertEqual(message.body, expected_body) + self.assertEqual(message.content_type, + 'multipart/mixed; boundary="ZZ_/afg6432dfgkl.94531q"') + + # There should be 3 message parts - 2 for attachments, one for body + self.assertEqual(email_data.count('--ZZ_/afg6432dfgkl.94531q'), 3) + + # There should be 2 attachments + self.assertEqual(email_data.count('Content-Transfer-Encoding: base64'), 2) + self.assertTrue(base64.b64encode(b'content 1').decode('utf-8') in email_data) + self.assertTrue(base64.b64encode(b'content 2').decode('utf-8') in email_data) + + def _run_action(self, action_parameters): + """ + Run action with the provided action parameters, return status output and + parse the output email data. + """ + models = self.fixtures_loader.load_models( + fixtures_pack='packs/core', fixtures_dict={'actions': ['sendmail.yaml']}) + action_db = models['actions']['sendmail.yaml'] + entry_point = self.fixtures_loader.get_fixture_file_path_abs( + 'packs/core', 'actions', 'send_mail/send_mail') + + runner = self._get_runner(action_db, entry_point=entry_point) + runner.pre_run() + status, result, _ = runner.run(action_parameters) + runner.post_run(status, result) + + # Remove footer added by the action which is not part of raw email data and parse + # the message + if 'stdout' in result: + email_data = result['stdout'] + email_data = email_data.split('\n')[:-2] + email_data = '\n'.join(email_data) + + if six.PY2 and isinstance(email_data, six.text_type): + email_data = email_data.encode('utf-8') + + message = mailparser.parse_from_string(email_data) + else: + email_data = None + message = None + + return (status, result, email_data, message) + + def _get_runner(self, action_db, entry_point): + runner = LocalShellScriptRunner(uuid.uuid4().hex) + runner.execution = MOCK_EXECUTION + runner.action = action_db + runner.action_name = action_db.name + runner.liveaction_id = uuid.uuid4().hex + runner.entry_point = entry_point + runner.runner_parameters = {} + runner.context = dict() + runner.callback = dict() + runner.libs_dir_path = None + runner.auth_token = mock.Mock() + runner.auth_token.token = 'mock-token' + return runner diff --git a/contrib/examples/sensors/fibonacci_sensor.py b/contrib/examples/sensors/fibonacci_sensor.py index 6e41db339e..266e81aba3 100644 --- a/contrib/examples/sensors/fibonacci_sensor.py +++ b/contrib/examples/sensors/fibonacci_sensor.py @@ -5,8 +5,7 @@ class FibonacciSensor(PollingSensor): - def __init__(self, sensor_service, config, - poll_interval=5): + def __init__(self, sensor_service, config, poll_interval=20): super(FibonacciSensor, self).__init__( sensor_service=sensor_service, config=config, @@ -21,11 +20,20 @@ def setup(self): self.a = 0 self.b = 1 self.count = 2 + self.logger = self.sensor_service.get_logger(name=self.__class__.__name__) def poll(self): - fib = self.a + self.b - self.logger.debug('Count: %d, a: %d, b: %d', self.count, self.a, self.b) + # Reset a and b if there are large enough to avoid integer overflow problems + if self.a > 10000 or self.b > 10000: + self.logger.debug('Reseting values to avoid integer overflow issues') + + self.a = 0 + self.b = 1 + self.count = 2 + + fib = (self.a + self.b) + self.logger.debug('Count: %d, a: %d, b: %d, fib: %s', self.count, self.a, self.b, fib) payload = { "count": self.count, diff --git a/contrib/examples/sensors/fibonacci_sensor.yaml b/contrib/examples/sensors/fibonacci_sensor.yaml index 6a2a449f56..5485cd8135 100644 --- a/contrib/examples/sensors/fibonacci_sensor.yaml +++ b/contrib/examples/sensors/fibonacci_sensor.yaml @@ -2,7 +2,7 @@ class_name: "FibonacciSensor" entry_point: "fibonacci_sensor.py" description: "Simple polling sensor that emits fibonacci number." - poll_interval: 5 + poll_interval: 20 trigger_types: - name: "fibonacci" diff --git a/contrib/linux/sensors/file_watch_sensor.py b/contrib/linux/sensors/file_watch_sensor.py index d0a74c71a9..3768e7f4a6 100644 --- a/contrib/linux/sensors/file_watch_sensor.py +++ b/contrib/linux/sensors/file_watch_sensor.py @@ -1,5 +1,7 @@ import os +import eventlet + from logshipper.tail import Tail from st2reactor.sensor.base import Sensor @@ -42,6 +44,9 @@ def add_trigger(self, trigger): if not self._trigger: raise Exception('Trigger %s did not contain a ref.' % trigger) + # Wait a bit to avoid initialization race in logshipper library + eventlet.sleep(1.0) + self._tail.add_file(filename=file_path) self._logger.info('Added file "%s"' % (file_path)) diff --git a/contrib/packs/aliases/pack_install.yaml b/contrib/packs/aliases/pack_install.yaml index 2f267c8318..2705b67e6c 100644 --- a/contrib/packs/aliases/pack_install.yaml +++ b/contrib/packs/aliases/pack_install.yaml @@ -4,10 +4,7 @@ action_ref: "packs.install" pack: "packs" description: "Install/upgrade StackStorm packs." formats: - - display: "pack install [,]" - representation: - - "pack install {{ packs }}" - - display: "pack install [,]" + - display: "pack install [,]" representation: - "pack install {{ packs }}" ack: diff --git a/contrib/runners/action_chain_runner/tests/unit/test_actionchain_cancel.py b/contrib/runners/action_chain_runner/tests/unit/test_actionchain_cancel.py index 1e663f3dab..e53a1673ad 100644 --- a/contrib/runners/action_chain_runner/tests/unit/test_actionchain_cancel.py +++ b/contrib/runners/action_chain_runner/tests/unit/test_actionchain_cancel.py @@ -19,6 +19,9 @@ import os import tempfile +from st2tests import config as test_config +test_config.parse_args() + from st2common.bootstrap import actionsregistrar from st2common.bootstrap import runnersregistrar diff --git a/contrib/runners/action_chain_runner/tests/unit/test_actionchain_pause_resume.py b/contrib/runners/action_chain_runner/tests/unit/test_actionchain_pause_resume.py index 189d74ec34..0b25930a2f 100644 --- a/contrib/runners/action_chain_runner/tests/unit/test_actionchain_pause_resume.py +++ b/contrib/runners/action_chain_runner/tests/unit/test_actionchain_pause_resume.py @@ -19,6 +19,9 @@ import os import tempfile +from st2tests import config as test_config +test_config.parse_args() + from st2common.bootstrap import actionsregistrar from st2common.bootstrap import runnersregistrar diff --git a/contrib/runners/orquesta_runner/in-requirements.txt b/contrib/runners/orquesta_runner/in-requirements.txt index 8b7542e527..b5e1f74671 100644 --- a/contrib/runners/orquesta_runner/in-requirements.txt +++ b/contrib/runners/orquesta_runner/in-requirements.txt @@ -1 +1 @@ -git+https://github.com/StackStorm/orquesta.git@v0.3#egg=orquesta +git+https://github.com/StackStorm/orquesta.git@v0.4#egg=orquesta diff --git a/contrib/runners/orquesta_runner/orquesta_runner/orquesta_runner.py b/contrib/runners/orquesta_runner/orquesta_runner/orquesta_runner.py index b241aad578..5152b212bb 100644 --- a/contrib/runners/orquesta_runner/orquesta_runner/orquesta_runner.py +++ b/contrib/runners/orquesta_runner/orquesta_runner/orquesta_runner.py @@ -68,7 +68,8 @@ def _construct_st2_context(self): 'st2': { 'action_execution_id': str(self.execution.id), 'api_url': api_util.get_full_public_api_url(), - 'user': self.execution.context.get('user', cfg.CONF.system_user.user) + 'user': self.execution.context.get('user', cfg.CONF.system_user.user), + 'pack': self.execution.context.get('pack', None) } } diff --git a/contrib/runners/orquesta_runner/requirements.txt b/contrib/runners/orquesta_runner/requirements.txt index 7ba0329159..c2971e635a 100644 --- a/contrib/runners/orquesta_runner/requirements.txt +++ b/contrib/runners/orquesta_runner/requirements.txt @@ -1,2 +1,2 @@ # Don't edit this file. It's generated automatically! -git+https://github.com/StackStorm/orquesta.git@v0.3#egg=orquesta +git+https://github.com/StackStorm/orquesta.git@v0.4#egg=orquesta diff --git a/contrib/runners/orquesta_runner/tests/unit/test_basic.py b/contrib/runners/orquesta_runner/tests/unit/test_basic.py index 1814588394..4f1febebfd 100644 --- a/contrib/runners/orquesta_runner/tests/unit/test_basic.py +++ b/contrib/runners/orquesta_runner/tests/unit/test_basic.py @@ -138,7 +138,11 @@ def test_run_workflow(self): 'workflow_execution_id': str(wf_ex_db.id), 'action_execution_id': str(ac_ex_db.id), 'api_url': 'http://127.0.0.1/v1', - 'user': username + 'user': username, + 'pack': 'orquesta_tests' + }, + 'parent': { + 'pack': 'orquesta_tests' } } @@ -294,6 +298,40 @@ def test_run_workflow_with_unicode_input(self): self.assertDictEqual(lv_ac_db.result, expected_result) self.assertDictEqual(ac_ex_db.result, expected_result) + def test_run_workflow_action_config_context(self): + wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, 'config-context.yaml') + wf_input = {} + lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name'], parameters=wf_input) + lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db) + + # Assert action execution is running. + lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) + self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING, lv_ac_db.result) + wf_ex_db = wf_db_access.WorkflowExecution.query(action_execution=str(ac_ex_db.id))[0] + self.assertEqual(wf_ex_db.status, ac_const.LIVEACTION_STATUS_RUNNING) + + # Assert task1 is already completed. + query_filters = {'workflow_execution': str(wf_ex_db.id), 'task_id': 'task1'} + tk1_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0] + tk1_ac_ex_db = ex_db_access.ActionExecution.query(task_execution=str(tk1_ex_db.id))[0] + tk1_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk1_ac_ex_db.liveaction['id']) + self.assertEqual(tk1_lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) + self.assertTrue(wf_svc.is_action_execution_under_workflow_context(tk1_ac_ex_db)) + + # Manually handle action execution completion. + wf_svc.handle_action_execution_completion(tk1_ac_ex_db) + + # Assert workflow is completed. + wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id) + self.assertEqual(wf_ex_db.status, wf_states.SUCCEEDED) + lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) + self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) + ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(ac_ex_db.id)) + self.assertEqual(ac_ex_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED) + + # Verify config_context works + self.assertEqual(wf_ex_db.output, {'msg': 'value of config key a'}) + def test_run_workflow_with_action_less_tasks(self): wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, 'action-less-tasks.yaml') wf_input = {'name': 'Thanos'} diff --git a/contrib/runners/orquesta_runner/tests/unit/test_context.py b/contrib/runners/orquesta_runner/tests/unit/test_context.py index 9d8c3fa5d1..ed2a75822c 100644 --- a/contrib/runners/orquesta_runner/tests/unit/test_context.py +++ b/contrib/runners/orquesta_runner/tests/unit/test_context.py @@ -115,7 +115,8 @@ def test_runtime_context(self): expected_st2_ctx = { 'action_execution_id': str(ac_ex_db.id), 'api_url': 'http://127.0.0.1/v1', - 'user': 'stanley' + 'user': 'stanley', + 'pack': 'orquesta_tests' } expected_st2_ctx_with_wf_ex_id = copy.deepcopy(expected_st2_ctx) diff --git a/fixed-requirements.txt b/fixed-requirements.txt index 89ee714048..f9b83b1ec2 100644 --- a/fixed-requirements.txt +++ b/fixed-requirements.txt @@ -9,12 +9,11 @@ kombu==4.2.1 # Note: amqp is used by kombu amqp==2.3.2 # NOTE: Recent version substantially affect the performance and add big import time overhead -# See https://github.com/StackStorm/st2/issues/4160#issuecomment-394386433 for -details +# See https://github.com/StackStorm/st2/issues/4160#issuecomment-394386433 for details oslo.config>=1.12.1,<1.13 oslo.utils>=3.36.2,<=3.37.0 six==1.11.0 -pyyaml==3.13 +pyyaml>=4.2b4,<5.2 requests[security]<2.15,>=2.14.1 apscheduler==3.5.3 gitpython==2.1.11 @@ -23,7 +22,7 @@ pymongo==3.7.2 mongoengine==0.16.0 passlib==1.7.1 lockfile==0.12.2 -python-gnupg==0.4.3 +python-gnupg==0.4.4 jsonpath-rw==1.4.0 pyinotify==0.9.6 semver==2.8.1 @@ -42,8 +41,8 @@ prompt-toolkit==1.0.15 tooz==1.63.1 zake==0.2.2 routes==2.4.1 -flex==6.13.2 webob==1.8.2 +flex==6.14.0 prance==0.9.0 pywinrm==0.3.0 # test requirements below diff --git a/requirements.txt b/requirements.txt index ef3c110a5d..9d32bc948c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,9 +5,9 @@ argcomplete bcrypt cryptography==2.4.1 eventlet==0.24.1 -flex==6.13.2 +flex==6.14.0 git+https://github.com/Kami/logshipper.git@stackstorm_patched#egg=logshipper -git+https://github.com/StackStorm/orquesta.git@v0.3#egg=orquesta +git+https://github.com/StackStorm/orquesta.git@v0.4#egg=orquesta git+https://github.com/StackStorm/python-mistralclient.git@st2-2.10.1#egg=python-mistralclient git+https://github.com/StackStorm/st2-auth-backend-flat-file.git@master#egg=st2-auth-backend-flat-file gitpython==2.1.11 @@ -38,12 +38,12 @@ pymongo==3.7.2 pyrabbit python-dateutil==2.7.5 python-editor==1.0.3 -python-gnupg==0.4.3 +python-gnupg==0.4.4 python-json-logger python-statsd==2.1.0 pytz==2018.7 pywinrm==0.3.0 -pyyaml==3.13 +pyyaml<5.2,>=4.2b4 rednose requests[security]<2.15,>=2.14.1 retrying==1.3.3 diff --git a/scripts/travis/build.sh b/scripts/travis/build.sh index 68647002de..81a3e8fdf9 100755 --- a/scripts/travis/build.sh +++ b/scripts/travis/build.sh @@ -15,7 +15,7 @@ fi if [ ${TASK} == 'checks' ]; then # compile .py files, useful as compatibility syntax check make compile - make pylint flake8 bandit .st2client-dependencies-check .st2common-circular-dependencies-check + make pylint flake8 bandit checkrequirements .st2client-dependencies-check .st2common-circular-dependencies-check elif [ ${TASK} == 'unit' ]; then # compile .py files, useful as compatibility syntax check make compile diff --git a/scripts/travis/rabbitmq.config b/scripts/travis/rabbitmq.config new file mode 100644 index 0000000000..0cf25a732a --- /dev/null +++ b/scripts/travis/rabbitmq.config @@ -0,0 +1,11 @@ +[ + {rabbit, [ + {ssl_listeners, [5671]}, + {ssl_allow_poodle_attack, true}, + {ssl_options, [{cacertfile, "/home/travis/build/StackStorm/st2/st2tests/st2tests/fixtures/ssl_certs/ca/ca_certificate_bundle.pem"}, + {certfile, "/home/travis/build/StackStorm/st2/st2tests/st2tests/fixtures/ssl_certs/server/server_certificate.pem"}, + {keyfile, "/home/travis/build/StackStorm/st2/st2tests/st2tests/fixtures/ssl_certs/server/private_key.pem"}, + {verify, verify_peer}, + {fail_if_no_peer_cert, false}]} + ]} +]. diff --git a/st2actions/bin/runners.sh b/st2actions/bin/runners.sh old mode 100644 new mode 100755 diff --git a/st2actions/bin/st2scheduler b/st2actions/bin/st2scheduler old mode 100644 new mode 100755 diff --git a/st2actions/st2actions/cmd/scheduler.py b/st2actions/st2actions/cmd/scheduler.py index 1ae0096084..9e548d6ebb 100644 --- a/st2actions/st2actions/cmd/scheduler.py +++ b/st2actions/st2actions/cmd/scheduler.py @@ -1,6 +1,21 @@ +# Licensed to the StackStorm, Inc ('StackStorm') under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # Monkey patching should be done as early as possible. # See http://eventlet.net/doc/patching.html#monkeypatching-the-standard-library from __future__ import absolute_import + from st2common.util.monkey_patch import monkey_patch monkey_patch() @@ -36,7 +51,7 @@ def _setup(): _setup_sigterm_handler() -def _run_queuer(): +def _run_scheduler(): LOG.info('(PID=%s) Scheduler started.', os.getpid()) # Lazy load these so that decorator metrics are in place @@ -51,7 +66,9 @@ def _run_queuer(): try: handler.start() entrypoint.start() - entrypoint.wait() + + # Wait on handler first since entrypoint is more durable. + handler.wait() or entrypoint.wait() except (KeyboardInterrupt, SystemExit): LOG.info('(PID=%s) Scheduler stopped.', os.getpid()) @@ -68,6 +85,13 @@ def _run_queuer(): return 1 except: LOG.exception('(PID=%s) Scheduler unexpectedly stopped.', os.getpid()) + + try: + handler.shutdown() + entrypoint.shutdown() + except: + pass + return 1 return 0 @@ -80,7 +104,7 @@ def _teardown(): def main(): try: _setup() - return _run_queuer() + return _run_scheduler() except SystemExit as exit_code: sys.exit(exit_code) except: diff --git a/st2actions/st2actions/notifier/notifier.py b/st2actions/st2actions/notifier/notifier.py index b41fb96d08..b626bb5d13 100644 --- a/st2actions/st2actions/notifier/notifier.py +++ b/st2actions/st2actions/notifier/notifier.py @@ -14,10 +14,10 @@ # limitations under the License. from __future__ import absolute_import + from datetime import datetime import json -from kombu import Connection from oslo_config import cfg from st2common import log as logging @@ -45,6 +45,8 @@ from st2common.constants.keyvalue import FULL_SYSTEM_SCOPE, SYSTEM_SCOPE, DATASTORE_PARENT_SCOPE from st2common.services.keyvalues import KeyValueLookup from st2common.transport.queues import NOTIFIER_ACTIONUPDATE_WORK_QUEUE +from st2common.metrics.base import CounterWithTimer +from st2common.metrics.base import Timer __all__ = [ 'Notifier', @@ -73,6 +75,7 @@ def __init__(self, connection, queues, trigger_dispatcher=None): pack=ACTION_TRIGGER_TYPE['pack'], name=ACTION_TRIGGER_TYPE['name']) + @CounterWithTimer(key='notifier.action.executions') def process(self, execution_db): execution_id = str(execution_db.id) extra = {'execution': execution_db} @@ -86,12 +89,18 @@ def process(self, execution_db): # action execution will be applied by the workflow engine. A policy may affect the # final state of the action execution thereby impacting the state of the workflow. if not workflow_service.is_action_execution_under_workflow_context(execution_db): - policy_service.apply_post_run_policies(liveaction_db) + with CounterWithTimer(key='notifier.apply_post_run_policies'): + policy_service.apply_post_run_policies(liveaction_db) - if liveaction_db.notify is not None: - self._post_notify_triggers(liveaction_db=liveaction_db, execution_db=execution_db) + if liveaction_db.notify: + with CounterWithTimer(key='notifier.notify_trigger.post'): + self._post_notify_triggers(liveaction_db=liveaction_db, + execution_db=execution_db) - self._post_generic_trigger(liveaction_db=liveaction_db, execution_db=execution_db) + if cfg.CONF.action_sensor.enable: + with CounterWithTimer(key='notifier.generic_trigger.post'): + self._post_generic_trigger(liveaction_db=liveaction_db, + execution_db=execution_db) def _get_execution_for_liveaction(self, liveaction): execution = ActionExecution.get(liveaction__id=str(liveaction.id)) @@ -127,7 +136,7 @@ def _post_notify_subsection_triggers(self, liveaction_db=None, execution_db=None notify_subsection=None, default_message_suffix=None): routes = (getattr(notify_subsection, 'routes') or - getattr(notify_subsection, 'channels', None)) + getattr(notify_subsection, 'channels', [])) or [] execution_id = str(execution_db.id) @@ -142,13 +151,15 @@ def _post_notify_subsection_triggers(self, liveaction_db=None, execution_db=None ) try: - message = self._transform_message(message=message, - context=jinja_context) + with Timer(key='notifier.transform_message'): + message = self._transform_message(message=message, + context=jinja_context) except: LOG.exception('Failed (Jinja) transforming `message`.') try: - data = self._transform_data(data=data, context=jinja_context) + with Timer(key='notifier.transform_data'): + data = self._transform_data(data=data, context=jinja_context) except: LOG.exception('Failed (Jinja) transforming `data`.') @@ -187,8 +198,10 @@ def _post_notify_subsection_triggers(self, liveaction_db=None, execution_db=None payload['channel'] = route LOG.debug('POSTing %s for %s. Payload - %s.', NOTIFY_TRIGGER_TYPE['name'], liveaction_db.id, payload) - self._trigger_dispatcher.dispatch(self._notify_trigger, payload=payload, - trace_context=trace_context) + + with CounterWithTimer(key='notifier.notify_trigger.dispatch'): + self._trigger_dispatcher.dispatch(self._notify_trigger, payload=payload, + trace_context=trace_context) except: failed_routes.append(route) @@ -254,8 +267,10 @@ def _post_generic_trigger(self, liveaction_db=None, execution_db=None): trace_context = self._get_trace_context(execution_id=execution_id) LOG.debug('POSTing %s for %s. Payload - %s. TraceContext - %s', ACTION_TRIGGER_TYPE['name'], liveaction_db.id, payload, trace_context) - self._trigger_dispatcher.dispatch(self._action_trigger, payload=payload, - trace_context=trace_context) + + with CounterWithTimer(key='notifier.generic_trigger.dispatch'): + self._trigger_dispatcher.dispatch(self._action_trigger, payload=payload, + trace_context=trace_context) def _get_runner_ref(self, action_ref): """ @@ -268,6 +283,6 @@ def _get_runner_ref(self, action_ref): def get_notifier(): - with Connection(transport_utils.get_messaging_urls()) as conn: + with transport_utils.get_connection() as conn: return Notifier(conn, [NOTIFIER_ACTIONUPDATE_WORK_QUEUE], trigger_dispatcher=TriggerDispatcher(LOG)) diff --git a/st2actions/st2actions/policies/concurrency.py b/st2actions/st2actions/policies/concurrency.py index 43dbf287b7..a55c5cd0be 100644 --- a/st2actions/st2actions/policies/concurrency.py +++ b/st2actions/st2actions/policies/concurrency.py @@ -20,7 +20,6 @@ from st2common.persistence import action as action_access from st2common.policies.concurrency import BaseConcurrencyApplicator from st2common.services import action as action_service -from st2common.services import coordination __all__ = [ @@ -89,47 +88,6 @@ def apply_before(self, target): '"%s" cannot be applied. %s', self._policy_ref, target) return target - # Warn users that the coordination service is not configured. - if not coordination.configured(): - LOG.warn('Coordination service is not configured. Policy enforcement is best effort.') - - # Acquire a distributed lock before querying the database to make sure that only one - # scheduler is scheduling execution for this action. Even if the coordination service - # is not configured, the fake driver using zake or the file driver can still acquire - # a lock for the local process or server respectively. - lock_uid = self._get_lock_uid(target) - LOG.debug('%s is attempting to acquire lock "%s".', self.__class__.__name__, lock_uid) - with self.coordinator.get_lock(lock_uid): - target = self._apply_before(target) - - return target - - def _apply_after(self, target): - # Schedule the oldest delayed executions. - requests = action_access.LiveAction.query( - action=target.action, - status=action_constants.LIVEACTION_STATUS_DELAYED, - order_by=['start_timestamp'], - limit=1 - ) - - if requests: - action_service.update_status( - requests[0], - action_constants.LIVEACTION_STATUS_REQUESTED, - publish=True - ) - - def apply_after(self, target): - target = super(ConcurrencyApplicator, self).apply_after(target=target) - - # Acquire a distributed lock before querying the database to make sure that only one - # scheduler is scheduling execution for this action. Even if the coordination service - # is not configured, the fake driver using zake or the file driver can still acquire - # a lock for the local process or server respectively. - lock_uid = self._get_lock_uid(target) - LOG.debug('%s is attempting to acquire lock "%s".', self.__class__.__name__, lock_uid) - with self.coordinator.get_lock(lock_uid): - self._apply_after(target) + target = self._apply_before(target) return target diff --git a/st2actions/st2actions/policies/concurrency_by_attr.py b/st2actions/st2actions/policies/concurrency_by_attr.py index f9d4061147..b4cb160333 100644 --- a/st2actions/st2actions/policies/concurrency_by_attr.py +++ b/st2actions/st2actions/policies/concurrency_by_attr.py @@ -115,47 +115,6 @@ def apply_before(self, target): if not coordination.configured(): LOG.warn('Coordination service is not configured. Policy enforcement is best effort.') - # Acquire a distributed lock before querying the database to make sure that only one - # scheduler is scheduling execution for this action. Even if the coordination service - # is not configured, the fake driver using zake or the file driver can still acquire - # a lock for the local process or server respectively. - lock_uid = self._get_lock_uid(target) - LOG.debug('%s is attempting to acquire lock "%s".', self.__class__.__name__, lock_uid) - with self.coordinator.get_lock(lock_uid): - target = self._apply_before(target) - - return target - - def _apply_after(self, target): - # Schedule the oldest delayed executions. - filters = self._get_filters(target) - filters['status'] = action_constants.LIVEACTION_STATUS_DELAYED - - requests = action_access.LiveAction.query( - order_by=['start_timestamp'], - limit=1, - **filters - ) - - if requests: - action_service.update_status( - requests[0], - action_constants.LIVEACTION_STATUS_REQUESTED, - publish=True - ) - - def apply_after(self, target): - # Warn users that the coordination service is not configured. - if not coordination.configured(): - LOG.warn('Coordination service is not configured. Policy enforcement is best effort.') - - # Acquire a distributed lock before querying the database to make sure that only one - # scheduler is scheduling execution for this action. Even if the coordination service - # is not configured, the fake driver using zake or the file driver can still acquire - # a lock for the local process or server respectively. - lock_uid = self._get_lock_uid(target) - LOG.debug('%s is attempting to acquire lock "%s".', self.__class__.__name__, lock_uid) - with self.coordinator.get_lock(lock_uid): - self._apply_after(target) + target = self._apply_before(target) return target diff --git a/st2actions/st2actions/resultstracker/resultstracker.py b/st2actions/st2actions/resultstracker/resultstracker.py index 954b1da42e..62f6e746ff 100644 --- a/st2actions/st2actions/resultstracker/resultstracker.py +++ b/st2actions/st2actions/resultstracker/resultstracker.py @@ -14,11 +14,11 @@ # limitations under the License. from __future__ import absolute_import + import eventlet import six from collections import defaultdict -from kombu import Connection from st2common.query.base import QueryContext from st2common import log as logging @@ -111,5 +111,5 @@ def get_querier(self, query_module_name): def get_tracker(): - with Connection(transport_utils.get_messaging_urls()) as conn: + with transport_utils.get_connection() as conn: return ResultsTracker(conn, [RESULTSTRACKER_ACTIONSTATE_WORK_QUEUE]) diff --git a/st2actions/st2actions/scheduler/config.py b/st2actions/st2actions/scheduler/config.py index ed7d7477be..27edfd6634 100644 --- a/st2actions/st2actions/scheduler/config.py +++ b/st2actions/st2actions/scheduler/config.py @@ -19,6 +19,10 @@ from st2common import config as common_config from st2common.constants import system as sys_constants +from st2common import log as logging + + +LOG = logging.getLogger(__name__) def parse_args(args=None): @@ -56,10 +60,18 @@ def _register_service_opts(): 'gc_interval', default=10, help='How often (in seconds) to look for zombie execution requests before rescheduling ' 'them.'), - + cfg.IntOpt( + 'retry_max_attempt', default=10, + help='The maximum number of attempts that the scheduler retries on error.'), + cfg.IntOpt( + 'retry_wait_msec', default=3000, + help='The number of milliseconds to wait in between retries.') ] cfg.CONF.register_opts(scheduler_opts, group='scheduler') -register_opts() +try: + register_opts() +except cfg.DuplicateOptError: + LOG.exception('The scheduler configuration options are already parsed and loaded.') diff --git a/st2actions/st2actions/scheduler/entrypoint.py b/st2actions/st2actions/scheduler/entrypoint.py index 752d6849ea..811a1f7d80 100644 --- a/st2actions/st2actions/scheduler/entrypoint.py +++ b/st2actions/st2actions/scheduler/entrypoint.py @@ -14,7 +14,6 @@ # limitations under the License. from __future__ import absolute_import -from kombu import Connection from st2common import log as logging from st2common.util import date @@ -105,5 +104,5 @@ def _create_execution_queue_item_db_from_liveaction(self, liveaction, delay=None def get_scheduler_entrypoint(): - with Connection(transport_utils.get_messaging_urls()) as conn: + with transport_utils.get_connection() as conn: return SchedulerEntrypoint(conn, [ACTIONSCHEDULER_REQUEST_QUEUE]) diff --git a/st2actions/st2actions/scheduler/handler.py b/st2actions/st2actions/scheduler/handler.py index fcd50a9292..74027b88ed 100644 --- a/st2actions/st2actions/scheduler/handler.py +++ b/st2actions/st2actions/scheduler/handler.py @@ -16,14 +16,18 @@ from __future__ import absolute_import import eventlet +import retrying from oslo_config import cfg from st2common import log as logging from st2common.util import date +from st2common.util import service as service_utils from st2common.constants import action as action_constants +from st2common.constants import policy as policy_constants from st2common.exceptions.db import StackStormDBObjectNotFoundError from st2common.models.db.liveaction import LiveActionDB from st2common.services import action as action_service +from st2common.services import coordination as coordination_service from st2common.services import policies as policy_service from st2common.persistence.liveaction import LiveAction from st2common.persistence.execution_queue import ActionExecutionSchedulingQueue @@ -57,25 +61,38 @@ def __init__(self): self.message_type = LiveActionDB self._shutdown = False self._pool = eventlet.GreenPool(size=cfg.CONF.scheduler.pool_size) + self._coordinator = coordination_service.get_coordinator() + self._main_thread = None + self._cleanup_thread = None def run(self): - LOG.debug('Entering scheduler loop') + LOG.debug('Starting scheduler handler...') while not self._shutdown: eventlet.greenthread.sleep(cfg.CONF.scheduler.sleep_interval) + self.process() - execution_queue_item_db = self._get_next_execution() + @retrying.retry( + retry_on_exception=service_utils.retry_on_exceptions, + stop_max_attempt_number=cfg.CONF.scheduler.retry_max_attempt, + wait_fixed=cfg.CONF.scheduler.retry_wait_msec) + def process(self): + execution_queue_item_db = self._get_next_execution() - if execution_queue_item_db: - self._pool.spawn(self._handle_execution, execution_queue_item_db) + if execution_queue_item_db: + self._pool.spawn(self._handle_execution, execution_queue_item_db) def cleanup(self): - LOG.debug('Starting scheduler garbage collection') + LOG.debug('Starting scheduler garbage collection...') while not self._shutdown: eventlet.greenthread.sleep(cfg.CONF.scheduler.gc_interval) self._handle_garbage_collection() + @retrying.retry( + retry_on_exception=service_utils.retry_on_exceptions, + stop_max_attempt_number=cfg.CONF.scheduler.retry_max_attempt, + wait_fixed=cfg.CONF.scheduler.retry_wait_msec) def _handle_garbage_collection(self): """ Periodically look for executions which have "handling" set to "True" and haven't been @@ -161,16 +178,41 @@ def _handle_execution(self, execution_queue_item_db): ActionExecutionSchedulingQueue.delete(execution_queue_item_db) raise - liveaction_db = self._apply_pre_run(liveaction_db, execution_queue_item_db) + # Identify if the action has policies that require locking. + action_has_policies_require_lock = policy_service.has_policies( + liveaction_db, + policy_types=policy_constants.POLICY_TYPES_REQUIRING_LOCK + ) - if not liveaction_db: - return + # Acquire a distributed lock if the referenced action has specific policies attached. + if action_has_policies_require_lock: + # Warn users that the coordination service is not configured. + if not coordination_service.configured(): + LOG.warn( + 'Coordination backend is not configured. ' + 'Policy enforcement is best effort.' + ) - if self._is_execution_queue_item_runnable(liveaction_db, execution_queue_item_db): - self._update_to_scheduled(liveaction_db, execution_queue_item_db) + # Acquire a distributed lock before querying the database to make sure that only one + # scheduler is scheduling execution for this action. Even if the coordination service + # is not configured, the fake driver using zake or the file driver can still acquire + # a lock for the local process or server respectively. + lock_uid = liveaction_db.action + LOG.debug('%s is attempting to acquire lock "%s".', self.__class__.__name__, lock_uid) + lock = self._coordinator.get_lock(lock_uid) - @staticmethod - def _apply_pre_run(liveaction_db, execution_queue_item_db): + try: + if lock.acquire(blocking=False): + self._regulate_and_schedule(liveaction_db, execution_queue_item_db) + else: + self._delay(liveaction_db, execution_queue_item_db) + finally: + lock.release() + else: + # Otherwise if there is no policy, then schedule away. + self._schedule(liveaction_db, execution_queue_item_db) + + def _regulate_and_schedule(self, liveaction_db, execution_queue_item_db): # Apply policies defined for the action. liveaction_db = policy_service.apply_pre_run_policies(liveaction_db) @@ -190,10 +232,13 @@ def _apply_pre_run(liveaction_db, execution_queue_item_db): liveaction_db = action_service.update_status( liveaction_db, action_constants.LIVEACTION_STATUS_DELAYED, publish=False ) + + execution_queue_item_db.handling = False execution_queue_item_db.scheduled_start_timestamp = date.append_milliseconds_to_time( date.get_datetime_utc_now(), POLICY_DELAYED_EXECUTION_RESCHEDULE_TIME_MS ) + try: ActionExecutionSchedulingQueue.add_or_update(execution_queue_item_db, publish=False) except db_exc.StackStormDBObjectWriteConflictError: @@ -202,16 +247,40 @@ def _apply_pre_run(liveaction_db, execution_queue_item_db): execution_queue_item_db.id ) - return None + return if (liveaction_db.status in action_constants.LIVEACTION_COMPLETED_STATES or liveaction_db.status in action_constants.LIVEACTION_CANCEL_STATES): ActionExecutionSchedulingQueue.delete(execution_queue_item_db) - return None + return + + self._schedule(liveaction_db, execution_queue_item_db) + + def _delay(self, liveaction_db, execution_queue_item_db): + liveaction_db = action_service.update_status( + liveaction_db, action_constants.LIVEACTION_STATUS_DELAYED, publish=False + ) + + execution_queue_item_db.scheduled_start_timestamp = date.append_milliseconds_to_time( + date.get_datetime_utc_now(), + POLICY_DELAYED_EXECUTION_RESCHEDULE_TIME_MS + ) + + try: + execution_queue_item_db.handling = False + ActionExecutionSchedulingQueue.add_or_update(execution_queue_item_db, publish=False) + except db_exc.StackStormDBObjectWriteConflictError: + LOG.warning( + 'Execution queue item update conflict during scheduling: %s', + execution_queue_item_db.id + ) - return liveaction_db + def _schedule(self, liveaction_db, execution_queue_item_db): + if self._is_execution_queue_item_runnable(liveaction_db, execution_queue_item_db): + self._update_to_scheduled(liveaction_db, execution_queue_item_db) - def _is_execution_queue_item_runnable(self, liveaction_db, execution_queue_item_db): + @staticmethod + def _is_execution_queue_item_runnable(liveaction_db, execution_queue_item_db): """ Return True if a particular execution request is runnable. @@ -228,13 +297,14 @@ def _is_execution_queue_item_runnable(self, liveaction_db, execution_queue_item_ return True LOG.info( - '%s is ignoring %s (id=%s) with "%s" status after policies are applied.', - self.__class__.__name__, + 'Scheduler is ignoring %s (id=%s) with "%s" status after policies are applied.', type(execution_queue_item_db), execution_queue_item_db.id, liveaction_db.status ) + ActionExecutionSchedulingQueue.delete(execution_queue_item_db) + return False @staticmethod @@ -272,11 +342,24 @@ def _update_to_scheduled(liveaction_db, execution_queue_item_db): def start(self): self._shutdown = False - eventlet.spawn(self.run) - eventlet.spawn(self.cleanup) + # Spawn the worker threads. + self._main_thread = eventlet.spawn(self.run) + self._cleanup_thread = eventlet.spawn(self.cleanup) + + # Link the threads to the shutdown function. If either of the threads exited with error, + # then initiate shutdown which will allow the waits below to throw exception to the + # main process. + self._main_thread.link(self.shutdown) + self._cleanup_thread.link(self.shutdown) + + def shutdown(self, *args, **kwargs): + if not self._shutdown: + self._shutdown = True - def shutdown(self): - self._shutdown = True + def wait(self): + # Wait for the worker threads to complete. If there is an exception thrown in the thread, + # then the exception will be propagated to the main process for a proper return code. + self._main_thread.wait() or self._cleanup_thread.wait() def get_handler(): diff --git a/st2actions/st2actions/worker.py b/st2actions/st2actions/worker.py index fdb39f0ab5..e36eb0489a 100644 --- a/st2actions/st2actions/worker.py +++ b/st2actions/st2actions/worker.py @@ -17,8 +17,6 @@ import sys import traceback -from kombu import Connection - from st2actions.container.base import RunnerContainer from st2common import log as logging from st2common.constants import action as action_constants @@ -250,5 +248,5 @@ def _resume_action(self, liveaction_db): def get_worker(): - with Connection(transport_utils.get_messaging_urls()) as conn: + with transport_utils.get_connection() as conn: return ActionExecutionDispatcher(conn, ACTIONRUNNER_QUEUES) diff --git a/st2actions/tests/integration/test_action_state_consumer.py b/st2actions/tests/integration/test_action_state_consumer.py index 668ac5c339..3061677ef0 100644 --- a/st2actions/tests/integration/test_action_state_consumer.py +++ b/st2actions/tests/integration/test_action_state_consumer.py @@ -20,8 +20,6 @@ import mock -from kombu import Connection - from st2common.transport.queues import RESULTSTRACKER_ACTIONSTATE_WORK_QUEUE from st2actions.resultstracker.resultstracker import ResultsTracker from st2common.models.db.executionstate import ActionExecutionStateDB @@ -63,7 +61,7 @@ def setUpClass(cls): @mock.patch.object(TestQuerier, 'query', mock.MagicMock(return_value=(False, {}))) def test_process_message(self): - with Connection(transport_utils.get_messaging_urls()) as conn: + with transport_utils.get_connection() as conn: tracker = ResultsTracker(conn, [RESULTSTRACKER_ACTIONSTATE_WORK_QUEUE]) tracker._bootstrap() state = ActionStateConsumerTests.get_state( diff --git a/st2actions/tests/unit/policies/test_concurrency.py b/st2actions/tests/unit/policies/test_concurrency.py index d8285ba69b..301fae2b7d 100644 --- a/st2actions/tests/unit/policies/test_concurrency.py +++ b/st2actions/tests/unit/policies/test_concurrency.py @@ -149,8 +149,9 @@ def test_over_threshold_delay_executions(self): # Execution is expected to be delayed since concurrency threshold is reached. liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'foo-last'}) liveaction, _ = action_service.request(liveaction) - expected_num_exec += 1 # This request is expected to be executed. + expected_num_pubs += 1 # Tally requested state. + self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) # Run the scheduler to schedule action executions. self._process_scheduling_queue() @@ -158,6 +159,11 @@ def test_over_threshold_delay_executions(self): # Since states are being processed async, wait for the liveaction to go into delayed state. liveaction = self._wait_on_status(liveaction, action_constants.LIVEACTION_STATUS_DELAYED) + expected_num_exec += 0 # This request will not be scheduled for execution. + expected_num_pubs += 0 # The delayed status change should not be published. + self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) + self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) + # Mark one of the scheduled/running execution as completed. action_service.update_status( scheduled[0], @@ -165,14 +171,16 @@ def test_over_threshold_delay_executions(self): publish=True ) - expected_num_pubs += 1 # Tally requested state. - - # Once capacity freed up, the delayed execution is published as requested again. - expected_num_pubs += 3 # Tally requested, scheduled, and running state. + expected_num_pubs += 1 # Tally succeeded state. + self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) # Run the scheduler to schedule action executions. self._process_scheduling_queue() + # Once capacity freed up, the delayed execution is published as scheduled. + expected_num_exec += 1 # This request is expected to be executed. + expected_num_pubs += 2 # Tally scheduled and running state. + # Since states are being processed async, wait for the liveaction to be scheduled. liveaction = self._wait_on_statuses(liveaction, SCHEDULED_STATES) self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) @@ -212,8 +220,9 @@ def test_over_threshold_cancel_executions(self): # Execution is expected to be canceled since concurrency threshold is reached. liveaction = LiveActionDB(action='wolfpack.action-2', parameters={'actionstr': 'foo'}) liveaction, _ = action_service.request(liveaction) - expected_num_exec += 0 # This request will not be scheduled for execution. + expected_num_pubs += 1 # Tally requested state. + self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) # Run the scheduler to schedule action executions. self._process_scheduling_queue() @@ -222,6 +231,9 @@ def test_over_threshold_cancel_executions(self): calls = [call(liveaction, action_constants.LIVEACTION_STATUS_CANCELING)] LiveActionPublisher.publish_state.assert_has_calls(calls) expected_num_pubs += 2 # Tally canceling and canceled state changes. + expected_num_exec += 0 # This request will not be scheduled for execution. + self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) + self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) # Assert the action is canceled. liveaction = LiveAction.get_by_id(str(liveaction.id)) @@ -262,8 +274,9 @@ def test_on_cancellation(self): # Execution is expected to be delayed since concurrency threshold is reached. liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'foo'}) liveaction, _ = action_service.request(liveaction) - expected_num_exec += 1 # This request will be scheduled for execution. + expected_num_pubs += 1 # Tally requested state. + self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) # Run the scheduler to schedule action executions. self._process_scheduling_queue() @@ -271,16 +284,23 @@ def test_on_cancellation(self): # Since states are being processed async, wait for the liveaction to go into delayed state. liveaction = self._wait_on_status(liveaction, action_constants.LIVEACTION_STATUS_DELAYED) + expected_num_exec += 0 # This request will not be scheduled for execution. + expected_num_pubs += 0 # The delayed status change should not be published. + self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) + self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) + # Cancel execution. action_service.request_cancellation(scheduled[0], 'stanley') expected_num_pubs += 2 # Tally the canceling and canceled states. - - # Once capacity freed up, the delayed execution is published as requested again. - expected_num_pubs += 3 # Tally requested, scheduled, and running state. + self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) # Run the scheduler to schedule action executions. self._process_scheduling_queue() + # Once capacity freed up, the delayed execution is published as requested again. + expected_num_exec += 1 # This request is expected to be executed. + expected_num_pubs += 2 # Tally scheduled and running state. + # Execution is expected to be rescheduled. liveaction = LiveAction.get_by_id(str(liveaction.id)) self.assertIn(liveaction.status, SCHEDULED_STATES) diff --git a/st2actions/tests/unit/policies/test_concurrency_by_attr.py b/st2actions/tests/unit/policies/test_concurrency_by_attr.py index b38f4e7412..0056b33a4f 100644 --- a/st2actions/tests/unit/policies/test_concurrency_by_attr.py +++ b/st2actions/tests/unit/policies/test_concurrency_by_attr.py @@ -147,7 +147,9 @@ def test_over_threshold_delay_executions(self): # Execution is expected to be delayed since concurrency threshold is reached. liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'foo'}) liveaction, _ = action_service.request(liveaction) + expected_num_pubs += 1 # Tally requested state. + self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) # Run the scheduler to schedule action executions. self._process_scheduling_queue() @@ -156,9 +158,8 @@ def test_over_threshold_delay_executions(self): # liveaction to go into delayed state. liveaction = self._wait_on_status(liveaction, action_constants.LIVEACTION_STATUS_DELAYED) - # Assert the action is delayed. - delayed = liveaction - self.assertEqual(delayed.status, action_constants.LIVEACTION_STATUS_DELAYED) + expected_num_exec += 0 # This request will not be scheduled for execution. + expected_num_pubs += 0 # The delayed status change should not be published. self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) @@ -166,8 +167,6 @@ def test_over_threshold_delay_executions(self): # The execution with actionstr "fu" is over the threshold but actionstr "bar" is not. liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'bar'}) liveaction, _ = action_service.request(liveaction) - expected_num_exec += 1 # This request is expected to be executed. - expected_num_pubs += 3 # Tally requested, scheduled, and running states. # Run the scheduler to schedule action executions. self._process_scheduling_queue() @@ -175,6 +174,8 @@ def test_over_threshold_delay_executions(self): # Since states are being processed asynchronously, wait for the # liveaction to go into scheduled state. liveaction = self._wait_on_statuses(liveaction, SCHEDULED_STATES) + expected_num_exec += 1 # This request is expected to be executed. + expected_num_pubs += 3 # Tally requested, scheduled, and running state. self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) @@ -186,17 +187,15 @@ def test_over_threshold_delay_executions(self): ) expected_num_pubs += 1 # Tally succeeded state. - - # Once capacity freed up, the delayed execution is published as requested again. - expected_num_exec += 1 # The delayed request is expected to be executed. - expected_num_pubs += 3 # Tally requested, scheduled, and running state. + self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) # Run the scheduler to schedule action executions. self._process_scheduling_queue() - # Since states are being processed asynchronously, wait for the - # liveaction to go into scheduled state. + # Once capacity freed up, the delayed execution is published as requested again. liveaction = self._wait_on_statuses(liveaction, SCHEDULED_STATES) + expected_num_exec += 1 # The delayed request is expected to be executed. + expected_num_pubs += 2 # Tally scheduled and running state. self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) @@ -235,8 +234,9 @@ def test_over_threshold_cancel_executions(self): # Execution is expected to be delayed since concurrency threshold is reached. liveaction = LiveActionDB(action='wolfpack.action-2', parameters={'actionstr': 'foo'}) liveaction, _ = action_service.request(liveaction) - expected_num_exec += 0 # This request will not be scheduled for execution. + expected_num_pubs += 1 # Tally requested state. + self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) # Run the scheduler to schedule action executions. self._process_scheduling_queue() @@ -245,12 +245,13 @@ def test_over_threshold_cancel_executions(self): calls = [call(liveaction, action_constants.LIVEACTION_STATUS_CANCELING)] LiveActionPublisher.publish_state.assert_has_calls(calls) expected_num_pubs += 2 # Tally canceling and canceled state changes. + expected_num_exec += 0 # This request will not be scheduled for execution. + self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) + self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) # Assert the action is canceled. canceled = LiveAction.get_by_id(str(liveaction.id)) self.assertEqual(canceled.status, action_constants.LIVEACTION_STATUS_CANCELED) - self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) - self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) @mock.patch.object( runner.MockActionRunner, 'run', @@ -285,7 +286,9 @@ def test_on_cancellation(self): # Execution is expected to be delayed since concurrency threshold is reached. liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'foo'}) liveaction, _ = action_service.request(liveaction) + expected_num_pubs += 1 # Tally requested state. + self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) # Run the scheduler to schedule action executions. self._process_scheduling_queue() @@ -293,10 +296,10 @@ def test_on_cancellation(self): # Since states are being processed asynchronously, wait for the # liveaction to go into delayed state. liveaction = self._wait_on_status(liveaction, action_constants.LIVEACTION_STATUS_DELAYED) - - # Assert the action is delayed. delayed = liveaction - self.assertEqual(delayed.status, action_constants.LIVEACTION_STATUS_DELAYED) + + expected_num_exec += 0 # This request will not be scheduled for execution. + expected_num_pubs += 0 # The delayed status change should not be published. self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) @@ -304,8 +307,6 @@ def test_on_cancellation(self): # The execution with actionstr "fu" is over the threshold but actionstr "bar" is not. liveaction = LiveActionDB(action='wolfpack.action-1', parameters={'actionstr': 'bar'}) liveaction, _ = action_service.request(liveaction) - expected_num_exec += 1 # This request is expected to be executed. - expected_num_pubs += 3 # Tally requested, scheduled, and running states. # Run the scheduler to schedule action executions. self._process_scheduling_queue() @@ -313,23 +314,26 @@ def test_on_cancellation(self): # Since states are being processed asynchronously, wait for the # liveaction to go into scheduled state. liveaction = self._wait_on_statuses(liveaction, SCHEDULED_STATES) + expected_num_exec += 1 # This request is expected to be executed. + expected_num_pubs += 3 # Tally requested, scheduled, and running states. self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) # Cancel execution. action_service.request_cancellation(scheduled[0], 'stanley') expected_num_pubs += 2 # Tally the canceling and canceled states. - - # Once capacity freed up, the delayed execution is published as requested again. - expected_num_exec += 1 # The delayed request is expected to be executed. - expected_num_pubs += 3 # Tally requested, scheduled, and running state. + self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) # Run the scheduler to schedule action executions. self._process_scheduling_queue() + # Once capacity freed up, the delayed execution is published as requested again. + expected_num_exec += 1 # The delayed request is expected to be executed. + expected_num_pubs += 2 # Tally scheduled and running state. + self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) + self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) + # Since states are being processed asynchronously, wait for the # liveaction to go into scheduled state. liveaction = LiveAction.get_by_id(str(delayed.id)) liveaction = self._wait_on_statuses(liveaction, SCHEDULED_STATES) - self.assertEqual(expected_num_pubs, LiveActionPublisher.publish_state.call_count) - self.assertEqual(expected_num_exec, runner.MockActionRunner.run.call_count) diff --git a/st2actions/tests/unit/test_scheduler.py b/st2actions/tests/unit/test_scheduler.py index 05c9f13397..64c9cbae16 100644 --- a/st2actions/tests/unit/test_scheduler.py +++ b/st2actions/tests/unit/test_scheduler.py @@ -17,6 +17,9 @@ import mock +from st2tests import config as test_config +test_config.parse_args() + import st2common from st2tests import ExecutionDbTestCase from st2tests.fixturesloader import FixturesLoader @@ -37,9 +40,6 @@ from st2common.services import executions as execution_service from st2common.exceptions import db as db_exc -from st2tests import config as test_config -test_config.parse_args() - LIVE_ACTION = { 'parameters': { diff --git a/st2actions/tests/unit/test_scheduler_entrypoint.py b/st2actions/tests/unit/test_scheduler_entrypoint.py new file mode 100644 index 0000000000..65f6d2d8ed --- /dev/null +++ b/st2actions/tests/unit/test_scheduler_entrypoint.py @@ -0,0 +1,83 @@ +# Licensed to the StackStorm, Inc ('StackStorm') under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import eventlet +import mock + +from st2tests import config as test_config +test_config.parse_args() + +from st2actions.cmd.scheduler import _run_scheduler +from st2actions.scheduler.handler import ActionExecutionSchedulingQueueHandler +from st2actions.scheduler.entrypoint import SchedulerEntrypoint + +from st2tests.base import CleanDbTestCase + +__all__ = [ + 'SchedulerServiceEntryPointTestCase' +] + + +def mock_handler_run(self): + # NOTE: We use eventlet.sleep to emulate async nature of this process + eventlet.sleep(0.2) + raise Exception('handler run exception') + + +def mock_handler_cleanup(self): + # NOTE: We use eventlet.sleep to emulate async nature of this process + eventlet.sleep(0.2) + raise Exception('handler clean exception') + + +def mock_entrypoint_start(self): + # NOTE: We use eventlet.sleep to emulate async nature of this process + eventlet.sleep(0.2) + raise Exception('entrypoint start exception') + + +class SchedulerServiceEntryPointTestCase(CleanDbTestCase): + @mock.patch.object(ActionExecutionSchedulingQueueHandler, 'run', mock_handler_run) + @mock.patch('st2actions.cmd.scheduler.LOG') + def test_service_exits_correctly_on_fatal_exception_in_handler_run(self, mock_log): + run_thread = eventlet.spawn(_run_scheduler) + result = run_thread.wait() + + self.assertEqual(result, 1) + + mock_log_exception_call = mock_log.exception.call_args_list[0][0][0] + self.assertTrue('Scheduler unexpectedly stopped' in mock_log_exception_call) + + @mock.patch.object(ActionExecutionSchedulingQueueHandler, 'cleanup', mock_handler_cleanup) + @mock.patch('st2actions.cmd.scheduler.LOG') + def test_service_exits_correctly_on_fatal_exception_in_handler_cleanup(self, mock_log): + run_thread = eventlet.spawn(_run_scheduler) + result = run_thread.wait() + + self.assertEqual(result, 1) + + mock_log_exception_call = mock_log.exception.call_args_list[0][0][0] + self.assertTrue('Scheduler unexpectedly stopped' in mock_log_exception_call) + + @mock.patch.object(SchedulerEntrypoint, 'start', mock_entrypoint_start) + @mock.patch('st2actions.cmd.scheduler.LOG') + def test_service_exits_correctly_on_fatal_exception_in_entrypoint_start(self, mock_log): + run_thread = eventlet.spawn(_run_scheduler) + result = run_thread.wait() + + self.assertEqual(result, 1) + + mock_log_exception_call = mock_log.exception.call_args_list[0][0][0] + self.assertTrue('Scheduler unexpectedly stopped' in mock_log_exception_call) diff --git a/st2actions/tests/unit/test_scheduler_retry.py b/st2actions/tests/unit/test_scheduler_retry.py new file mode 100644 index 0000000000..51756e2ce0 --- /dev/null +++ b/st2actions/tests/unit/test_scheduler_retry.py @@ -0,0 +1,120 @@ +# Licensed to the StackStorm, Inc ('StackStorm') under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import eventlet +import mock +import pymongo +import uuid + +from st2tests import config as test_config +test_config.parse_args() + +from st2actions.scheduler import handler +from st2common.models.db import execution_queue as ex_q_db +from st2common.persistence import execution_queue as ex_q_db_access +from st2tests.base import CleanDbTestCase + + +__all__ = [ + 'SchedulerHandlerRetryTestCase' +] + + +MOCK_QUEUE_ITEM = ex_q_db.ActionExecutionSchedulingQueueItemDB(liveaction_id=uuid.uuid4().hex) + + +class SchedulerHandlerRetryTestCase(CleanDbTestCase): + + @mock.patch.object( + handler.ActionExecutionSchedulingQueueHandler, '_get_next_execution', + mock.MagicMock(side_effect=[pymongo.errors.ConnectionFailure(), MOCK_QUEUE_ITEM])) + @mock.patch.object( + eventlet.GreenPool, 'spawn', + mock.MagicMock(return_value=None)) + def test_handler_retry_connection_error(self): + scheduling_queue_handler = handler.ActionExecutionSchedulingQueueHandler() + scheduling_queue_handler.process() + + # Make sure retry occurs and that _handle_execution in process is called. + calls = [mock.call(scheduling_queue_handler._handle_execution, MOCK_QUEUE_ITEM)] + eventlet.GreenPool.spawn.assert_has_calls(calls) + + @mock.patch.object( + handler.ActionExecutionSchedulingQueueHandler, '_get_next_execution', + mock.MagicMock(side_effect=[pymongo.errors.ConnectionFailure()] * 3)) + @mock.patch.object( + eventlet.GreenPool, 'spawn', + mock.MagicMock(return_value=None)) + def test_handler_retries_exhausted(self): + scheduling_queue_handler = handler.ActionExecutionSchedulingQueueHandler() + self.assertRaises(pymongo.errors.ConnectionFailure, scheduling_queue_handler.process) + self.assertEqual(eventlet.GreenPool.spawn.call_count, 0) + + @mock.patch.object( + handler.ActionExecutionSchedulingQueueHandler, '_get_next_execution', + mock.MagicMock(side_effect=KeyError())) + @mock.patch.object( + eventlet.GreenPool, 'spawn', + mock.MagicMock(return_value=None)) + def test_handler_retry_unexpected_error(self): + scheduling_queue_handler = handler.ActionExecutionSchedulingQueueHandler() + self.assertRaises(KeyError, scheduling_queue_handler.process) + self.assertEqual(eventlet.GreenPool.spawn.call_count, 0) + + @mock.patch.object( + ex_q_db_access.ActionExecutionSchedulingQueue, 'query', + mock.MagicMock(side_effect=[pymongo.errors.ConnectionFailure(), [MOCK_QUEUE_ITEM]])) + @mock.patch.object( + ex_q_db_access.ActionExecutionSchedulingQueue, 'add_or_update', + mock.MagicMock(return_value=None)) + def test_handler_gc_retry_connection_error(self): + scheduling_queue_handler = handler.ActionExecutionSchedulingQueueHandler() + scheduling_queue_handler._handle_garbage_collection() + + # Make sure retry occurs and that _handle_execution in process is called. + calls = [mock.call(MOCK_QUEUE_ITEM, publish=False)] + ex_q_db_access.ActionExecutionSchedulingQueue.add_or_update.assert_has_calls(calls) + + @mock.patch.object( + ex_q_db_access.ActionExecutionSchedulingQueue, 'query', + mock.MagicMock(side_effect=[pymongo.errors.ConnectionFailure()] * 3)) + @mock.patch.object( + ex_q_db_access.ActionExecutionSchedulingQueue, 'add_or_update', + mock.MagicMock(return_value=None)) + def test_handler_gc_retries_exhausted(self): + scheduling_queue_handler = handler.ActionExecutionSchedulingQueueHandler() + + self.assertRaises( + pymongo.errors.ConnectionFailure, + scheduling_queue_handler._handle_garbage_collection + ) + + self.assertEqual(ex_q_db_access.ActionExecutionSchedulingQueue.add_or_update.call_count, 0) + + @mock.patch.object( + ex_q_db_access.ActionExecutionSchedulingQueue, 'query', + mock.MagicMock(side_effect=KeyError())) + @mock.patch.object( + ex_q_db_access.ActionExecutionSchedulingQueue, 'add_or_update', + mock.MagicMock(return_value=None)) + def test_handler_gc_unexpected_error(self): + scheduling_queue_handler = handler.ActionExecutionSchedulingQueueHandler() + + self.assertRaises( + KeyError, + scheduling_queue_handler._handle_garbage_collection + ) + + self.assertEqual(ex_q_db_access.ActionExecutionSchedulingQueue.add_or_update.call_count, 0) diff --git a/st2api/tests/unit/controllers/v1/test_auth_api_keys.py b/st2api/tests/unit/controllers/v1/test_auth_api_keys.py index 3a32dc03c1..3a6f8f6e02 100644 --- a/st2api/tests/unit/controllers/v1/test_auth_api_keys.py +++ b/st2api/tests/unit/controllers/v1/test_auth_api_keys.py @@ -197,6 +197,7 @@ def test_post_delete_key(self): def test_post_delete_same_key_hash(self): api_key = { + 'id': '5c5dbb576cb8de06a2d79a4d', 'user': 'herge', 'key_hash': 'ABCDE' } @@ -207,8 +208,9 @@ def test_post_delete_same_key_hash(self): # drop into the DB since API will be masking this value. api_key_db = ApiKey.get_by_id(resp1.json['id']) + self.assertEqual(resp1.json['id'], api_key['id'], 'PK ID of created API should match.') self.assertEqual(api_key_db.key_hash, api_key['key_hash'], 'Key_hash should match.') - self.assertEqual(api_key_db.user, api_key['user'], 'Key_hash should match.') + self.assertEqual(api_key_db.user, api_key['user'], 'User should match.') resp = self.app.delete('/v1/apikeys/%s' % resp1.json['id']) self.assertEqual(resp.status_int, 204) diff --git a/st2client/requirements.txt b/st2client/requirements.txt index a7841f794c..d08e5fef1c 100644 --- a/st2client/requirements.txt +++ b/st2client/requirements.txt @@ -8,7 +8,7 @@ prompt-toolkit==1.0.15 python-dateutil==2.7.5 python-editor==1.0.3 pytz==2018.7 -pyyaml==3.13 +pyyaml<5.2,>=4.2b4 requests[security]<2.15,>=2.14.1 six==1.11.0 sseclient==0.0.19 diff --git a/st2client/st2client/commands/action_alias.py b/st2client/st2client/commands/action_alias.py index 2a5dcd4594..b5ac76e862 100644 --- a/st2client/st2client/commands/action_alias.py +++ b/st2client/st2client/commands/action_alias.py @@ -63,13 +63,13 @@ class ActionAliasMatchCommand(resource.ResourceCommand): def __init__(self, resource, *args, **kwargs): super(ActionAliasMatchCommand, self).__init__( resource, 'match', - 'Get the list of %s that match the command text.' % - resource.get_plural_display_name().lower(), + 'Get the %s that match the command text.' % + resource.get_display_name().lower(), *args, **kwargs) self.parser.add_argument('match_text', metavar='command', - help=('Get the list of %s that match the command text.' % + help=('Get the %s that match the command text.' % resource.get_display_name().lower())) self.parser.add_argument('-a', '--attr', nargs='+', default=self.display_attributes, diff --git a/st2common/in-requirements.txt b/st2common/in-requirements.txt index 93fd72847e..d114191ce0 100644 --- a/st2common/in-requirements.txt +++ b/st2common/in-requirements.txt @@ -9,7 +9,7 @@ jsonschema kombu mongoengine networkx -git+https://github.com/StackStorm/orquesta.git@v0.3#egg=orquesta +git+https://github.com/StackStorm/orquesta.git@v0.4#egg=orquesta oslo.config paramiko pyyaml diff --git a/st2common/requirements.txt b/st2common/requirements.txt index 91197ab059..c35965acce 100644 --- a/st2common/requirements.txt +++ b/st2common/requirements.txt @@ -2,8 +2,8 @@ apscheduler==3.5.3 cryptography==2.4.1 eventlet==0.24.1 -flex==6.13.2 -git+https://github.com/StackStorm/orquesta.git@v0.3#egg=orquesta +flex==6.14.0 +git+https://github.com/StackStorm/orquesta.git@v0.4#egg=orquesta greenlet==0.4.15 ipaddr jinja2 @@ -18,7 +18,7 @@ prometheus_client==0.1.1 pymongo==3.7.2 python-dateutil==2.7.5 python-statsd==2.1.0 -pyyaml==3.13 +pyyaml<5.2,>=4.2b4 requests[security]<2.15,>=2.14.1 retrying==1.3.3 routes==2.4.1 diff --git a/st2common/st2common/config.py b/st2common/st2common/config.py index 100f49faf4..58da172a88 100644 --- a/st2common/st2common/config.py +++ b/st2common/st2common/config.py @@ -226,7 +226,28 @@ def register_opts(ignore_errors=False): help='How many times should we retry connection before failing.'), cfg.IntOpt( 'connection_retry_wait', default=10000, - help='How long should we wait between connection retries.') + help='How long should we wait between connection retries.'), + cfg.BoolOpt( + 'ssl', default=False, + help='Use SSL / TLS to connect to the messaging server. Same as ' + 'appending "?ssl=true" at the end of the connection URL string.'), + cfg.StrOpt( + 'ssl_keyfile', default=None, + help='Private keyfile used to identify the local connection against RabbitMQ.'), + cfg.StrOpt( + 'ssl_certfile', default=None, + help='Certificate file used to identify the local connection (client).'), + cfg.StrOpt( + 'ssl_cert_reqs', default=None, choices='none, optional, required', + help='Specifies whether a certificate is required from the other side of the ' + 'connection, and whether it will be validated if provided.'), + cfg.StrOpt( + 'ssl_ca_certs', default=None, + help='ca_certs file contains a set of concatenated CA certificates, which are ' + 'used to validate certificates passed from RabbitMQ.'), + cfg.StrOpt( + 'login_method', default=None, + help='Login method to use (AMQPLAIN, PLAIN, EXTERNAL, etc.).') ] do_register_opts(messaging_opts, 'messaging', ignore_errors) diff --git a/st2common/st2common/constants/policy.py b/st2common/st2common/constants/policy.py new file mode 100644 index 0000000000..b1303ac2af --- /dev/null +++ b/st2common/st2common/constants/policy.py @@ -0,0 +1,25 @@ +# Licensed to the StackStorm, Inc ('StackStorm') under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = [ + 'POLICY_TYPES_REQUIRING_LOCK' +] + +# Concurrency policies require scheduler to acquire a distributed lock to prevent race +# in scheduling when there are multiple scheduler instances. +POLICY_TYPES_REQUIRING_LOCK = [ + 'action.concurrency', + 'action.concurrency.attr' +] diff --git a/st2common/st2common/log.py b/st2common/st2common/log.py index 1b9186fac3..46efd03bb9 100644 --- a/st2common/st2common/log.py +++ b/st2common/st2common/log.py @@ -46,6 +46,9 @@ 'ignore_statsd_log_messages' ] +# NOTE: We set AUDIT to the highest log level which means AUDIT log messages will always be +# included (e.g. also if log level is set to INFO). To avoid that, we need to explicitly filter +# out AUDIT log level in service setup code. logging.AUDIT = logging.CRITICAL + 10 logging.addLevelName(logging.AUDIT, 'AUDIT') diff --git a/st2common/st2common/middleware/instrumentation.py b/st2common/st2common/middleware/instrumentation.py index ea03b72df1..3906d7c13d 100644 --- a/st2common/st2common/middleware/instrumentation.py +++ b/st2common/st2common/middleware/instrumentation.py @@ -55,6 +55,15 @@ def __call__(self, environ, start_response): # other endpoints because this would result in a lot of unique metrics which is an # anti-pattern and causes unnecessary load on the metrics server. submit_metrics = endpoint.get('x-submit-metrics', True) + operation_id = endpoint.get('operationId', None) + is_get_one_endpoint = bool(operation_id) and (operation_id.endswith('.get') or + operation_id.endswith('.get_one')) + + if is_get_one_endpoint: + # NOTE: We don't submit metrics for any get one API endpoint since this would result + # in potentially too many unique metrics + submit_metrics = False + if not submit_metrics: LOG.debug('Not submitting request metrics for path: %s' % (request.path)) return self.app(environ, start_response) diff --git a/st2common/st2common/models/api/auth.py b/st2common/st2common/models/api/auth.py index e1f9e39106..f51841e63d 100644 --- a/st2common/st2common/models/api/auth.py +++ b/st2common/st2common/models/api/auth.py @@ -143,11 +143,13 @@ def from_model(cls, model, mask_secrets=False): @classmethod def to_model(cls, instance): + # If PrimaryKey ID is provided, - we want to work with existing ST2 API key + id = getattr(instance, 'id', None) user = str(instance.user) if instance.user else None key_hash = getattr(instance, 'key_hash', None) metadata = getattr(instance, 'metadata', {}) enabled = bool(getattr(instance, 'enabled', True)) - model = cls.model(user=user, key_hash=key_hash, metadata=metadata, enabled=enabled) + model = cls.model(id=id, user=user, key_hash=key_hash, metadata=metadata, enabled=enabled) return model diff --git a/st2common/st2common/models/db/__init__.py b/st2common/st2common/models/db/__init__.py index 2daffd4697..04e0b3127f 100644 --- a/st2common/st2common/models/db/__init__.py +++ b/st2common/st2common/models/db/__init__.py @@ -306,11 +306,11 @@ def _get_ssl_kwargs(ssl=False, ssl_keyfile=None, ssl_certfile=None, ssl_cert_req ssl_kwargs['ssl'] = True ssl_kwargs['ssl_certfile'] = ssl_certfile if ssl_cert_reqs: - if ssl_cert_reqs is 'none': + if ssl_cert_reqs == 'none': ssl_cert_reqs = ssl_lib.CERT_NONE - elif ssl_cert_reqs is 'optional': + elif ssl_cert_reqs == 'optional': ssl_cert_reqs = ssl_lib.CERT_OPTIONAL - elif ssl_cert_reqs is 'required': + elif ssl_cert_reqs == 'required': ssl_cert_reqs = ssl_lib.CERT_REQUIRED ssl_kwargs['ssl_cert_reqs'] = ssl_cert_reqs if ssl_ca_certs: diff --git a/st2common/st2common/persistence/execution.py b/st2common/st2common/persistence/execution.py index 4df940566f..83d5a32288 100644 --- a/st2common/st2common/persistence/execution.py +++ b/st2common/st2common/persistence/execution.py @@ -19,7 +19,6 @@ from st2common.models.db.execution import ActionExecutionDB from st2common.models.db.execution import ActionExecutionOutputDB from st2common.persistence.base import Access -from st2common.transport import utils as transport_utils __all__ = [ 'ActionExecution', @@ -38,8 +37,7 @@ def _get_impl(cls): @classmethod def _get_publisher(cls): if not cls.publisher: - cls.publisher = transport.execution.ActionExecutionPublisher( - urls=transport_utils.get_messaging_urls()) + cls.publisher = transport.execution.ActionExecutionPublisher() return cls.publisher @classmethod @@ -57,8 +55,7 @@ def _get_impl(cls): @classmethod def _get_publisher(cls): if not cls.publisher: - cls.publisher = transport.execution.ActionExecutionOutputPublisher( - urls=transport_utils.get_messaging_urls()) + cls.publisher = transport.execution.ActionExecutionOutputPublisher() return cls.publisher @classmethod diff --git a/st2common/st2common/persistence/executionstate.py b/st2common/st2common/persistence/executionstate.py index 5f087e4647..f636a0008d 100644 --- a/st2common/st2common/persistence/executionstate.py +++ b/st2common/st2common/persistence/executionstate.py @@ -14,10 +14,14 @@ # limitations under the License. from __future__ import absolute_import + from st2common import transport from st2common.models.db.executionstate import actionexecstate_access from st2common.persistence import base as persistence -from st2common.transport import utils as transport_utils + +__all__ = [ + 'ActionExecutionState' +] class ActionExecutionState(persistence.Access): @@ -31,6 +35,5 @@ def _get_impl(cls): @classmethod def _get_publisher(cls): if not cls.publisher: - cls.publisher = transport.actionexecutionstate.ActionExecutionStatePublisher( - urls=transport_utils.get_messaging_urls()) + cls.publisher = transport.actionexecutionstate.ActionExecutionStatePublisher() return cls.publisher diff --git a/st2common/st2common/persistence/liveaction.py b/st2common/st2common/persistence/liveaction.py index 88ddcaad59..2eb6015e65 100644 --- a/st2common/st2common/persistence/liveaction.py +++ b/st2common/st2common/persistence/liveaction.py @@ -14,10 +14,14 @@ # limitations under the License. from __future__ import absolute_import + from st2common import transport from st2common.models.db.liveaction import liveaction_access from st2common.persistence import base as persistence -from st2common.transport import utils as transport_utils + +__all__ = [ + 'LiveAction' +] class LiveAction(persistence.StatusBasedResource): @@ -31,8 +35,7 @@ def _get_impl(cls): @classmethod def _get_publisher(cls): if not cls.publisher: - cls.publisher = transport.liveaction.LiveActionPublisher( - urls=transport_utils.get_messaging_urls()) + cls.publisher = transport.liveaction.LiveActionPublisher() return cls.publisher @classmethod diff --git a/st2common/st2common/persistence/sensor.py b/st2common/st2common/persistence/sensor.py index c7547bcf61..e941c2bca5 100644 --- a/st2common/st2common/persistence/sensor.py +++ b/st2common/st2common/persistence/sensor.py @@ -14,10 +14,14 @@ # limitations under the License. from __future__ import absolute_import + from st2common import transport from st2common.models.db.sensor import sensor_type_access from st2common.persistence.base import ContentPackResource -from st2common.transport import utils as transport_utils + +__all__ = [ + 'SensorType' +] class SensorType(ContentPackResource): @@ -31,6 +35,5 @@ def _get_impl(cls): @classmethod def _get_publisher(cls): if not cls.publisher: - cls.publisher = transport.reactor.SensorCUDPublisher( - urls=transport_utils.get_messaging_urls()) + cls.publisher = transport.reactor.SensorCUDPublisher() return cls.publisher diff --git a/st2common/st2common/persistence/trigger.py b/st2common/st2common/persistence/trigger.py index bbe207c263..01787ce38d 100644 --- a/st2common/st2common/persistence/trigger.py +++ b/st2common/st2common/persistence/trigger.py @@ -14,12 +14,18 @@ # limitations under the License. from __future__ import absolute_import + from st2common import log as logging from st2common import transport from st2common.exceptions.db import StackStormDBObjectNotFoundError from st2common.models.db.trigger import triggertype_access, trigger_access, triggerinstance_access from st2common.persistence.base import (Access, ContentPackResource) -from st2common.transport import utils as transport_utils + +__all__ = [ + 'TriggerType', + 'Trigger', + 'TriggerInstance' +] LOG = logging.getLogger(__name__) @@ -43,8 +49,7 @@ def _get_impl(cls): @classmethod def _get_publisher(cls): if not cls.publisher: - cls.publisher = transport.reactor.TriggerCUDPublisher( - urls=transport_utils.get_messaging_urls()) + cls.publisher = transport.reactor.TriggerCUDPublisher() return cls.publisher @classmethod diff --git a/st2common/st2common/persistence/workflow.py b/st2common/st2common/persistence/workflow.py index 3063dd4a9d..933460b9aa 100644 --- a/st2common/st2common/persistence/workflow.py +++ b/st2common/st2common/persistence/workflow.py @@ -19,7 +19,6 @@ from st2common.models import db from st2common.models.db import workflow as wf_db_models from st2common.persistence import base as persistence -from st2common.transport import utils as transport_utils __all__ = [ @@ -39,8 +38,7 @@ def _get_impl(cls): @classmethod def _get_publisher(cls): if not cls.publisher: - cls.publisher = transport.workflow.WorkflowExecutionPublisher( - urls=transport_utils.get_messaging_urls()) + cls.publisher = transport.workflow.WorkflowExecutionPublisher() return cls.publisher diff --git a/st2common/st2common/policies/base.py b/st2common/st2common/policies/base.py index 092d847b31..5528397fbe 100644 --- a/st2common/st2common/policies/base.py +++ b/st2common/st2common/policies/base.py @@ -21,7 +21,6 @@ from st2common import log as logging from st2common.persistence import policy as policy_access -from st2common.services import coordination LOG = logging.getLogger(__name__) @@ -48,10 +47,6 @@ def apply_before(self, target): :rtype: ``object`` """ - # Warn users that the coordination service is not configured - if not coordination.configured(): - LOG.warn('Coordination service is not configured. Policy enforcement is best effort.') - return target def apply_after(self, target): @@ -63,10 +58,6 @@ def apply_after(self, target): :rtype: ``object`` """ - # Warn users that the coordination service is not configured - if not coordination.configured(): - LOG.warn('Coordination service is not configured. Policy enforcement is best effort.') - return target def _get_lock_name(self, values): diff --git a/st2common/st2common/service_setup.py b/st2common/st2common/service_setup.py index 61fbea6bce..bf82e00192 100644 --- a/st2common/st2common/service_setup.py +++ b/st2common/st2common/service_setup.py @@ -22,6 +22,7 @@ import os import sys import traceback +import logging as stdlib_logging from oslo_config import cfg @@ -35,6 +36,7 @@ from st2common.models.utils.profiling import enable_profiling from st2common import triggers from st2common.rbac.migrations import run_all as run_all_rbac_migrations +from st2common.logging.filters import LogLevelFilter # Note: This is here for backward compatibility. # Function has been moved in a standalone module to avoid expensive in-direct @@ -112,6 +114,20 @@ def setup(service, config, setup_db=True, register_mq_exchanges=True, else: raise e + exclude_log_levels = [stdlib_logging.AUDIT] + handlers = stdlib_logging.getLoggerClass().manager.root.handlers + + for handler in handlers: + # If log level is not set to DEBUG we filter out "AUDIT" log messages. This way we avoid + # duplicate "AUDIT" messages in production deployments where default service log level is + # set to "INFO" and we already log messages with level AUDIT to a special dedicated log + # file. + ignore_audit_log_messages = (handler.level >= stdlib_logging.INFO and + handler.level < stdlib_logging.AUDIT) + if not is_debug_enabled and ignore_audit_log_messages: + LOG.debug('Excluding log messages with level "AUDIT" for handler "%s"' % (handler)) + handler.addFilter(LogLevelFilter(log_levels=exclude_log_levels)) + if not is_debug_enabled: # NOTE: statsd logger logs everything by default under INFO so we ignore those log # messages unless verbose / debug mode is used diff --git a/st2common/st2common/services/action.py b/st2common/st2common/services/action.py index c7ffc8508b..19dd95157e 100644 --- a/st2common/st2common/services/action.py +++ b/st2common/st2common/services/action.py @@ -76,11 +76,11 @@ def create_request(liveaction, action_db=None, runnertype_db=None): # Use the user context from the parent action execution. Subtasks in a workflow # action can be invoked by a system user and so we want to use the user context # from the original workflow action. - parent_context = executions.get_parent_context(liveaction) - if parent_context: - parent_user = parent_context.get('user', None) - if parent_user: - liveaction.context['user'] = parent_user + parent_context = executions.get_parent_context(liveaction) or {} + parent_user = parent_context.get('user', None) + + if parent_user: + liveaction.context['user'] = parent_user # Validate action if not action_db: @@ -97,6 +97,9 @@ def create_request(liveaction, action_db=None, runnertype_db=None): if not hasattr(liveaction, 'parameters'): liveaction.parameters = dict() + # For consistency add pack to the context here in addition to RunnerContainer.dispatch() method + liveaction.context['pack'] = action_db.pack + # Validate action parameters. schema = util_schema.get_schema_for_action_parameters(action_db, runnertype_db) validator = util_schema.get_validator() diff --git a/st2common/st2common/services/coordination.py b/st2common/st2common/services/coordination.py index 108dc62ca1..e7dcd41937 100644 --- a/st2common/st2common/services/coordination.py +++ b/st2common/st2common/services/coordination.py @@ -180,46 +180,3 @@ def get_coordinator(): COORDINATOR = coordinator_setup() return COORDINATOR - - -# class LockAcquireError(Exception): -# pass - - -# class lock(object): -# def __init__(self, name, timeout=5000): -# self._name = name -# self._lock = None -# self._timeout = timeout - -# def __call__(self, func): -# @wraps(func) -# def with_lock(*args, **kwds): -# with self: -# return func(*args, **kwds) -# return with_lock - -# def _setup(self): -# if COORDINATOR is None: -# get_coordinator() - -# if not self._lock: -# self._lock = COORDINATOR.get_lock(self._name) - -# if self._timeout <= 0: -# LOG.warning("Failed to secure lock for %s.", self._name) -# raise LockAcquireError("Could not acquire lock for %s" % self._name) - -# def __enter__(self): -# self._setup() - -# LOG.debug("Attempting to secure lock for: %s", self._name) -# if not self._lock.acquire(): -# LOG.info("Could not secure lock for %s. Retrying.", self._name) -# self._timeout -= 1 -# eventlet.sleep(.25) -# self.__enter__() - -# def __exit__(self, *_args, **_kwargs): -# LOG.debug("Releasing lock for: %s", self._name) -# self._lock.release() diff --git a/st2common/st2common/services/policies.py b/st2common/st2common/services/policies.py index cd8ce74280..94da12444e 100644 --- a/st2common/st2common/services/policies.py +++ b/st2common/st2common/services/policies.py @@ -24,6 +24,20 @@ LOG = logging.getLogger(__name__) +def has_policies(lv_ac_db, policy_types=None): + query_params = { + 'resource_ref': lv_ac_db.action, + 'enabled': True + } + + if policy_types: + query_params['policy_type__in'] = policy_types + + policy_dbs = pc_db_access.Policy.query(**query_params) + + return policy_dbs.count() > 0 + + def apply_pre_run_policies(lv_ac_db): LOG.debug('Applying pre-run policies for liveaction "%s".' % str(lv_ac_db.id)) diff --git a/st2common/st2common/services/sensor_watcher.py b/st2common/st2common/services/sensor_watcher.py index 41bcecc022..27892f055e 100644 --- a/st2common/st2common/services/sensor_watcher.py +++ b/st2common/st2common/services/sensor_watcher.py @@ -20,7 +20,6 @@ from __future__ import absolute_import import eventlet from kombu.mixins import ConsumerMixin -from kombu import Connection from st2common import log as logging from st2common.transport import reactor, publishers @@ -89,7 +88,7 @@ def process_task(self, body, message): def start(self): try: - self.connection = Connection(transport_utils.get_messaging_urls()) + self.connection = transport_utils.get_connection() self._updates_thread = eventlet.spawn(self.run) except: LOG.exception('Failed to start sensor_watcher.') diff --git a/st2common/st2common/services/trigger_dispatcher.py b/st2common/st2common/services/trigger_dispatcher.py index 14850a595d..92a46825ae 100644 --- a/st2common/st2common/services/trigger_dispatcher.py +++ b/st2common/st2common/services/trigger_dispatcher.py @@ -40,7 +40,7 @@ def dispatch(self, trigger, payload=None, trace_tag=None, throw_on_validation_er """ Method which dispatches the trigger. - :param trigger: Reference to the TriggerType (.). + :param trigger: Reference to the TriggerTypeDB (.) or TriggerDB object. :type trigger: ``str`` :param payload: Trigger payload. @@ -64,7 +64,7 @@ def dispatch_with_context(self, trigger, payload=None, trace_context=None, """ Method which dispatches the trigger. - :param trigger: Reference to the TriggerType (.). + :param trigger: Reference to the TriggerTypeDB (.) or TriggerDB object. :type trigger: ``str`` :param payload: Trigger payload. diff --git a/st2common/st2common/services/triggerwatcher.py b/st2common/st2common/services/triggerwatcher.py index 787f85c489..2cbe5839fc 100644 --- a/st2common/st2common/services/triggerwatcher.py +++ b/st2common/st2common/services/triggerwatcher.py @@ -15,9 +15,9 @@ # pylint: disable=assignment-from-none from __future__ import absolute_import + import eventlet from kombu.mixins import ConsumerMixin -from kombu import Connection from st2common import log as logging from st2common.persistence.trigger import Trigger @@ -108,7 +108,7 @@ def process_task(self, body, message): def start(self): try: - self.connection = Connection(transport_utils.get_messaging_urls()) + self.connection = transport_utils.get_connection() self._updates_thread = eventlet.spawn(self.run) self._load_thread = eventlet.spawn(self._load_triggers_from_db) except: diff --git a/st2common/st2common/services/workflows.py b/st2common/st2common/services/workflows.py index ed98e9697b..65a2a21040 100644 --- a/st2common/st2common/services/workflows.py +++ b/st2common/st2common/services/workflows.py @@ -537,6 +537,7 @@ def request_action_execution(wf_ex_db, task_ex_db, st2_ctx, ac_ex_req, delay=Non # Set context for the action execution. ac_ex_ctx = { + 'pack': st2_ctx.get('pack'), 'user': st2_ctx.get('user'), 'parent': st2_ctx, 'orquesta': { @@ -887,7 +888,11 @@ def request_next_tasks(wf_ex_db, task_ex_id=None): # Pass down appropriate st2 context to the task and action execution(s). root_st2_ctx = wf_ex_db.context.get('st2', {}) - st2_ctx = {'execution_id': wf_ac_ex_id, 'user': root_st2_ctx.get('user')} + st2_ctx = { + 'execution_id': wf_ac_ex_id, + 'user': root_st2_ctx.get('user'), + 'pack': root_st2_ctx.get('pack') + } if root_st2_ctx.get('api_user'): st2_ctx['api_user'] = root_st2_ctx.get('api_user') diff --git a/st2common/st2common/stream/listener.py b/st2common/st2common/stream/listener.py index 73b4962524..e5b5529f24 100644 --- a/st2common/st2common/stream/listener.py +++ b/st2common/st2common/stream/listener.py @@ -18,7 +18,6 @@ import eventlet -from kombu import Connection from kombu.mixins import ConsumerMixin from oslo_config import cfg @@ -233,13 +232,13 @@ def get_listener(name): if name == 'stream': if not _stream_listener: - with Connection(transport_utils.get_messaging_urls()) as conn: + with transport_utils.get_connection() as conn: _stream_listener = StreamListener(conn) eventlet.spawn_n(listen, _stream_listener) return _stream_listener elif name == 'execution_output': if not _execution_output_listener: - with Connection(transport_utils.get_messaging_urls()) as conn: + with transport_utils.get_connection() as conn: _execution_output_listener = ExecutionOutputListener(conn) eventlet.spawn_n(listen, _execution_output_listener) return _execution_output_listener diff --git a/st2common/st2common/transport/actionexecutionstate.py b/st2common/st2common/transport/actionexecutionstate.py index a46d202f4f..87523930f0 100644 --- a/st2common/st2common/transport/actionexecutionstate.py +++ b/st2common/st2common/transport/actionexecutionstate.py @@ -16,17 +16,23 @@ # All Exchanges and Queues related to liveaction. from __future__ import absolute_import + from kombu import Exchange, Queue + from st2common.transport import publishers +__all__ = [ + 'ActionExecutionStatePublisher' +] + ACTIONEXECUTIONSTATE_XCHG = Exchange('st2.actionexecutionstate', type='topic') class ActionExecutionStatePublisher(publishers.CUDPublisher): - def __init__(self, urls): - super(ActionExecutionStatePublisher, self).__init__(urls, ACTIONEXECUTIONSTATE_XCHG) + def __init__(self): + super(ActionExecutionStatePublisher, self).__init__(exchange=ACTIONEXECUTIONSTATE_XCHG) def get_queue(name, routing_key): diff --git a/st2common/st2common/transport/announcement.py b/st2common/st2common/transport/announcement.py index 72504806af..4f9d69390a 100644 --- a/st2common/st2common/transport/announcement.py +++ b/st2common/st2common/transport/announcement.py @@ -14,13 +14,20 @@ # limitations under the License. from __future__ import absolute_import + from kombu import Exchange, Queue from st2common import log as logging from st2common.constants.trace import TRACE_CONTEXT from st2common.models.api.trace import TraceContext from st2common.transport import publishers -from st2common.transport import utils as transport_utils + +__all__ = [ + 'AnnouncementPublisher', + 'AnnouncementDispatcher', + + 'get_queue' +] LOG = logging.getLogger(__name__) @@ -29,8 +36,8 @@ class AnnouncementPublisher(object): - def __init__(self, urls): - self._publisher = publishers.PoolPublisher(urls=urls) + def __init__(self): + self._publisher = publishers.PoolPublisher() def publish(self, payload, routing_key): self._publisher.publish(payload, ANNOUNCEMENT_XCHG, routing_key) @@ -42,7 +49,7 @@ class AnnouncementDispatcher(object): """ def __init__(self, logger=LOG): - self._publisher = AnnouncementPublisher(urls=transport_utils.get_messaging_urls()) + self._publisher = AnnouncementPublisher() self._logger = logger def dispatch(self, routing_key, payload, trace_context=None): diff --git a/st2common/st2common/transport/bootstrap_utils.py b/st2common/st2common/transport/bootstrap_utils.py index 97acc622d8..eda88f2146 100644 --- a/st2common/st2common/transport/bootstrap_utils.py +++ b/st2common/st2common/transport/bootstrap_utils.py @@ -20,7 +20,6 @@ import six import retrying from oslo_config import cfg -from kombu import Connection from kombu.serialization import register from kombu.serialization import pickle from kombu.serialization import pickle_protocol @@ -141,7 +140,8 @@ def _do_predeclare_queue(channel, queue): def register_exchanges(): LOG.debug('Registering exchanges...') connection_urls = transport_utils.get_messaging_urls() - with Connection(connection_urls) as conn: + + with transport_utils.get_connection() as conn: # Use ConnectionRetryWrapper to deal with rmq clustering etc. retry_wrapper = ConnectionRetryWrapper(cluster_size=len(connection_urls), logger=LOG) diff --git a/st2common/st2common/transport/connection_retry_wrapper.py b/st2common/st2common/transport/connection_retry_wrapper.py index a657ed2afd..30c780e63c 100644 --- a/st2common/st2common/transport/connection_retry_wrapper.py +++ b/st2common/st2common/transport/connection_retry_wrapper.py @@ -14,6 +14,7 @@ # limitations under the License. from __future__ import absolute_import + import eventlet __all__ = ['ConnectionRetryWrapper', 'ClusterRetryContext'] @@ -35,7 +36,14 @@ def __init__(self, cluster_size): # No of nodes attempted. Starts at 1 since the self._nodes_attempted = 1 - def test_should_stop(self): + def test_should_stop(self, e=None): + # Special workaround for "(504) CHANNEL_ERROR - second 'channel.open' seen" which happens + # during tests on Travis and block and slown down the tests + # NOTE: This error is not fatal during tests and we can simply switch to a next connection + # without sleeping. + if "second 'channel.open' seen" in str(e): + return False, -1 + should_stop = True if self._nodes_attempted > self.cluster_size * self.cluster_retry: return should_stop, -1 @@ -91,9 +99,12 @@ def wrapped_callback(connection, channel): retry_wrapper.run(connection=connection, wrapped_callback=wrapped_callback) """ - def __init__(self, cluster_size, logger): + def __init__(self, cluster_size, logger, ensure_max_retries=3): self._retry_context = ClusterRetryContext(cluster_size=cluster_size) self._logger = logger + # How many times to try to retrying establishing a connection in a place where we are + # calling connection.ensure_connection + self._ensure_max_retries = ensure_max_retries def errback(self, exc, interval): self._logger.error('Rabbitmq connection error: %s', exc.message) @@ -117,7 +128,7 @@ def run(self, connection, wrapped_callback): wrapped_callback(connection=connection, channel=channel) should_stop = True except connection.connection_errors + connection.channel_errors as e: - should_stop, wait = self._retry_context.test_should_stop() + should_stop, wait = self._retry_context.test_should_stop(e) # reset channel to None to avoid any channel closing errors. At this point # in case of an exception there should be no channel but that is better to # guarantee. @@ -126,7 +137,10 @@ def run(self, connection, wrapped_callback): # be notified so raise. if should_stop: raise + # -1, 0 and 1+ are handled properly by eventlet.sleep + self._logger.debug('Received RabbitMQ server error, sleeping for %s seconds ' + 'before retrying: %s' % (wait, str(e))) eventlet.sleep(wait) connection.close() @@ -135,10 +149,22 @@ def run(self, connection, wrapped_callback): # entire ConnectionPool simultaneously but that would require writing our own # ConnectionPool. If a server recovers it could happen that the same process # ends up talking to separate nodes in a cluster. - connection.ensure_connection() + def log_error_on_conn_failure(exc, interval): + self._logger.debug('Failed to re-establish connection to RabbitMQ server, ' + 'retrying in %s seconds: %s' % (interval, str(e))) + + try: + # NOTE: This function blocks and tries to restablish a connection for + # indefinetly if "max_retries" argument is not specified + connection.ensure_connection(max_retries=self._ensure_max_retries, + errback=log_error_on_conn_failure) + except Exception: + self._logger.exception('Connections to RabbitMQ cannot be re-established: %s', + str(e)) + raise except Exception as e: - self._logger.exception('Connections to rabbitmq cannot be re-established: %s', + self._logger.exception('Connections to RabbitMQ cannot be re-established: %s', str(e)) # Not being able to publish a message could be a significant issue for an app. raise diff --git a/st2common/st2common/transport/execution.py b/st2common/st2common/transport/execution.py index 0256573e93..1885a72368 100644 --- a/st2common/st2common/transport/execution.py +++ b/st2common/st2common/transport/execution.py @@ -32,13 +32,13 @@ class ActionExecutionPublisher(publishers.CUDPublisher): - def __init__(self, urls): - super(ActionExecutionPublisher, self).__init__(urls, EXECUTION_XCHG) + def __init__(self): + super(ActionExecutionPublisher, self).__init__(exchange=EXECUTION_XCHG) class ActionExecutionOutputPublisher(publishers.CUDPublisher): - def __init__(self, urls): - super(ActionExecutionOutputPublisher, self).__init__(urls, EXECUTION_OUTPUT_XCHG) + def __init__(self): + super(ActionExecutionOutputPublisher, self).__init__(exchange=EXECUTION_OUTPUT_XCHG) def get_queue(name=None, routing_key=None, exclusive=False, auto_delete=False): diff --git a/st2common/st2common/transport/liveaction.py b/st2common/st2common/transport/liveaction.py index 996d57d5b4..b2b7efe238 100644 --- a/st2common/st2common/transport/liveaction.py +++ b/st2common/st2common/transport/liveaction.py @@ -16,9 +16,18 @@ # All Exchanges and Queues related to liveaction. from __future__ import absolute_import + from kombu import Exchange, Queue + from st2common.transport import publishers +__all__ = [ + 'LiveActionPublisher', + + 'get_queue', + 'get_status_management_queue' +] + LIVEACTION_XCHG = Exchange('st2.liveaction', type='topic') LIVEACTION_STATUS_MGMT_XCHG = Exchange('st2.liveaction.status', type='topic') @@ -26,9 +35,9 @@ class LiveActionPublisher(publishers.CUDPublisher, publishers.StatePublisherMixin): - def __init__(self, urls): - publishers.CUDPublisher.__init__(self, urls, LIVEACTION_XCHG) - publishers.StatePublisherMixin.__init__(self, urls, LIVEACTION_STATUS_MGMT_XCHG) + def __init__(self): + publishers.CUDPublisher.__init__(self, exchange=LIVEACTION_XCHG) + publishers.StatePublisherMixin.__init__(self, exchange=LIVEACTION_STATUS_MGMT_XCHG) def get_queue(name, routing_key): diff --git a/st2common/st2common/transport/publishers.py b/st2common/st2common/transport/publishers.py index 76f309bc55..010c24ed39 100644 --- a/st2common/st2common/transport/publishers.py +++ b/st2common/st2common/transport/publishers.py @@ -14,15 +14,23 @@ # limitations under the License. from __future__ import absolute_import + import copy -from kombu import Connection from kombu.messaging import Producer from st2common import log as logging from st2common.metrics.base import Timer +from st2common.transport import utils as transport_utils from st2common.transport.connection_retry_wrapper import ConnectionRetryWrapper +__all__ = [ + 'PoolPublisher', + 'SharedPoolPublishers', + 'CUDPublisher', + 'StatePublisherMixin' +] + ANY_RK = '*' CREATE_RK = 'create' UPDATE_RK = 'update' @@ -32,8 +40,17 @@ class PoolPublisher(object): - def __init__(self, urls): - self.pool = Connection(urls, failover_strategy='round-robin').Pool(limit=10) + def __init__(self, urls=None): + """ + :param urls: Connection URLs to use. If not provided it uses a default value from th + config. + :type urls: ``list`` + """ + urls = urls or transport_utils.get_messaging_urls() + connection = transport_utils.get_connection(urls=urls, + connection_kwargs={'failover_strategy': + 'round-robin'}) + self.pool = connection.Pool(limit=10) self.cluster_size = len(urls) def errback(self, exc, interval): @@ -92,7 +109,8 @@ def get_publisher(self, urls): class CUDPublisher(object): - def __init__(self, urls, exchange): + def __init__(self, exchange): + urls = transport_utils.get_messaging_urls() self._publisher = SharedPoolPublishers().get_publisher(urls=urls) self._exchange = exchange @@ -110,7 +128,8 @@ def publish_delete(self, payload): class StatePublisherMixin(object): - def __init__(self, urls, exchange): + def __init__(self, exchange): + urls = transport_utils.get_messaging_urls() self._state_publisher = SharedPoolPublishers().get_publisher(urls=urls) self._state_exchange = exchange diff --git a/st2common/st2common/transport/reactor.py b/st2common/st2common/transport/reactor.py index 65670c8ffe..944407b413 100644 --- a/st2common/st2common/transport/reactor.py +++ b/st2common/st2common/transport/reactor.py @@ -20,7 +20,6 @@ from st2common.constants.trace import TRACE_CONTEXT from st2common.models.api.trace import TraceContext from st2common.transport import publishers -from st2common.transport import utils as transport_utils __all__ = [ 'TriggerCUDPublisher', @@ -50,8 +49,8 @@ class SensorCUDPublisher(publishers.CUDPublisher): Publisher responsible for publishing Trigger model CUD events. """ - def __init__(self, urls): - super(SensorCUDPublisher, self).__init__(urls, SENSOR_CUD_XCHG) + def __init__(self): + super(SensorCUDPublisher, self).__init__(exchange=SENSOR_CUD_XCHG) class TriggerCUDPublisher(publishers.CUDPublisher): @@ -59,13 +58,13 @@ class TriggerCUDPublisher(publishers.CUDPublisher): Publisher responsible for publishing Trigger model CUD events. """ - def __init__(self, urls): - super(TriggerCUDPublisher, self).__init__(urls, TRIGGER_CUD_XCHG) + def __init__(self): + super(TriggerCUDPublisher, self).__init__(exchange=TRIGGER_CUD_XCHG) class TriggerInstancePublisher(object): - def __init__(self, urls): - self._publisher = publishers.PoolPublisher(urls=urls) + def __init__(self): + self._publisher = publishers.PoolPublisher() def publish_trigger(self, payload=None, routing_key=None): # TODO: We should use trigger reference as a routing key @@ -78,7 +77,7 @@ class TriggerDispatcher(object): """ def __init__(self, logger=LOG): - self._publisher = TriggerInstancePublisher(urls=transport_utils.get_messaging_urls()) + self._publisher = TriggerInstancePublisher() self._logger = logger def dispatch(self, trigger, payload=None, trace_context=None): diff --git a/st2common/st2common/transport/utils.py b/st2common/st2common/transport/utils.py index c416578376..f71e91cc66 100644 --- a/st2common/st2common/transport/utils.py +++ b/st2common/st2common/transport/utils.py @@ -14,13 +14,21 @@ # limitations under the License. from __future__ import absolute_import + +import ssl as ssl_lib + from oslo_config import cfg +from kombu import Connection + +from st2common import log as logging __all__ = [ + 'get_connection', + 'get_messaging_urls' ] -CONF = cfg.CONF +LOG = logging.getLogger(__name__) def get_messaging_urls(): @@ -30,6 +38,82 @@ def get_messaging_urls(): :rtype: ``list`` ''' - if CONF.messaging.cluster_urls: - return CONF.messaging.cluster_urls - return [CONF.messaging.url] + if cfg.CONF.messaging.cluster_urls: + return cfg.CONF.messaging.cluster_urls + return [cfg.CONF.messaging.url] + + +def get_connection(urls=None, connection_kwargs=None): + """ + Retrieve kombu "Conection" class instance configured with all the correct + options using values from the config and provided values. + + :param connection_kwargs: Any additional connection keyword arguments passed directly to the + Connection class constructor. + :type connection_kwargs: ``dict`` + """ + urls = urls or get_messaging_urls() + connection_kwargs = connection_kwargs or {} + + kwargs = {} + + ssl_kwargs = _get_ssl_kwargs(ssl=cfg.CONF.messaging.ssl, + ssl_keyfile=cfg.CONF.messaging.ssl_keyfile, + ssl_certfile=cfg.CONF.messaging.ssl_certfile, + ssl_cert_reqs=cfg.CONF.messaging.ssl_cert_reqs, + ssl_ca_certs=cfg.CONF.messaging.ssl_ca_certs, + login_method=cfg.CONF.messaging.login_method) + + # NOTE: "connection_kwargs" argument passed to this function has precedence over config values + if len(ssl_kwargs) == 1 and ssl_kwargs['ssl'] is True: + kwargs.update({'ssl': True}) + elif len(ssl_kwargs) >= 2: + ssl_kwargs.pop('ssl') + kwargs.update({'ssl': ssl_kwargs}) + + kwargs['login_method'] = cfg.CONF.messaging.login_method + + kwargs.update(connection_kwargs) + + # NOTE: This line contains no secret values so it's OK to log it + LOG.debug('Using SSL context for RabbitMQ connection: %s' % (ssl_kwargs)) + + connection = Connection(urls, **kwargs) + return connection + + +def _get_ssl_kwargs(ssl=False, ssl_keyfile=None, ssl_certfile=None, ssl_cert_reqs=None, + ssl_ca_certs=None, login_method=None): + """ + Return SSL keyword arguments to be used with the kombu.Connection class. + """ + ssl_kwargs = {} + + # NOTE: If "ssl" is not set to True we don't pass "ssl=False" argument to the constructor + # because user could still specify to use SSL by including "?ssl=true" query param at the + # end of the connection URL string + if ssl is True: + ssl_kwargs['ssl'] = True + + if ssl_keyfile: + ssl_kwargs['ssl'] = True + ssl_kwargs['keyfile'] = ssl_keyfile + + if ssl_certfile: + ssl_kwargs['ssl'] = True + ssl_kwargs['certfile'] = ssl_certfile + + if ssl_cert_reqs: + if ssl_cert_reqs == 'none': + ssl_cert_reqs = ssl_lib.CERT_NONE + elif ssl_cert_reqs == 'optional': + ssl_cert_reqs = ssl_lib.CERT_OPTIONAL + elif ssl_cert_reqs == 'required': + ssl_cert_reqs = ssl_lib.CERT_REQUIRED + ssl_kwargs['cert_reqs'] = ssl_cert_reqs + + if ssl_ca_certs: + ssl_kwargs['ssl'] = True + ssl_kwargs['ca_certs'] = ssl_ca_certs + + return ssl_kwargs diff --git a/st2common/st2common/transport/workflow.py b/st2common/st2common/transport/workflow.py index c9e3e58713..a199f1cc01 100644 --- a/st2common/st2common/transport/workflow.py +++ b/st2common/st2common/transport/workflow.py @@ -23,7 +23,9 @@ __all__ = [ 'WorkflowExecutionPublisher', - 'get_queue' + + 'get_queue', + 'get_status_management_queue' ] WORKFLOW_EXECUTION_XCHG = kombu.Exchange('st2.workflow', type='topic') @@ -32,9 +34,9 @@ class WorkflowExecutionPublisher(publishers.CUDPublisher, publishers.StatePublisherMixin): - def __init__(self, urls): - publishers.CUDPublisher.__init__(self, urls, WORKFLOW_EXECUTION_XCHG) - publishers.StatePublisherMixin.__init__(self, urls, WORKFLOW_EXECUTION_STATUS_MGMT_XCHG) + def __init__(self): + publishers.CUDPublisher.__init__(self, exchange=WORKFLOW_EXECUTION_XCHG) + publishers.StatePublisherMixin.__init__(self, exchange=WORKFLOW_EXECUTION_STATUS_MGMT_XCHG) def get_queue(name, routing_key): diff --git a/st2common/st2common/util/action_db.py b/st2common/st2common/util/action_db.py index 7bd2c33a6c..96969492e6 100644 --- a/st2common/st2common/util/action_db.py +++ b/st2common/st2common/util/action_db.py @@ -268,7 +268,16 @@ def serialize_positional_argument(argument_type, argument_value): serialized). """ if argument_type in ['string', 'number', 'float']: - argument_value = str(argument_value) if argument_value else '' + if argument_value is None: + argument_value = six.text_type('') + return argument_value + + if isinstance(argument_value, (int, float)): + argument_value = str(argument_value) + + if not isinstance(argument_value, six.text_type): + # cast string non-unicode values to unicode + argument_value = argument_value.decode('utf-8') elif argument_type == 'boolean': # Booleans are serialized as string "1" and "0" if argument_value is not None: @@ -285,8 +294,8 @@ def serialize_positional_argument(argument_type, argument_value): # None / null is serialized as en empty string argument_value = '' else: - # Other values are simply cast to strings - argument_value = str(argument_value) if argument_value else '' + # Other values are simply cast to unicode string + argument_value = six.text_type(argument_value) if argument_value else '' return argument_value diff --git a/st2common/st2common/util/config_loader.py b/st2common/st2common/util/config_loader.py index b23f3832e0..71966a05b4 100644 --- a/st2common/st2common/util/config_loader.py +++ b/st2common/st2common/util/config_loader.py @@ -224,7 +224,7 @@ def _get_datastore_value_for_expression(self, key, value, config_schema_item=Non def get_config(pack, user): """Returns config for given pack and user. """ - LOG.debug('Attempting to get config') + LOG.debug('Attempting to get config for pack "%s" and user "%s"' % (pack, user)) if pack and user: LOG.debug('Pack and user found. Loading config.') config_loader = ContentPackConfigLoader( diff --git a/st2common/st2common/util/crypto.py b/st2common/st2common/util/crypto.py index e6b0ecfefd..834a6e88db 100644 --- a/st2common/st2common/util/crypto.py +++ b/st2common/st2common/util/crypto.py @@ -216,8 +216,14 @@ def cryptography_symmetric_encrypt(encrypt_key, plaintext): assert isinstance(aes_key_bytes, six.binary_type) assert isinstance(hmac_key_bytes, six.binary_type) + if isinstance(plaintext, (six.text_type, six.string_types)): + # Convert data to bytes + data = plaintext.encode('utf-8') + else: + data = plaintext + # Pad data - data = pkcs5_pad(plaintext) + data = pkcs5_pad(data) # Generate IV iv_bytes = os.urandom(KEYCZAR_AES_BLOCK_SIZE) @@ -230,10 +236,6 @@ def cryptography_symmetric_encrypt(encrypt_key, plaintext): # bytes) so we simply add 5 0's header_bytes = b'00000' - if isinstance(data, (six.text_type, six.string_types)): - # Convert data to bytes - data = data.encode('utf-8') - ciphertext_bytes = encryptor.update(data) + encryptor.finalize() msg_bytes = header_bytes + iv_bytes + ciphertext_bytes @@ -368,7 +370,7 @@ def pkcs5_pad(data): Pad data using PKCS5 """ pad = KEYCZAR_AES_BLOCK_SIZE - len(data) % KEYCZAR_AES_BLOCK_SIZE - data = data + pad * chr(pad) + data = data + pad * chr(pad).encode('utf-8') return data diff --git a/st2common/st2common/util/sandboxing.py b/st2common/st2common/util/sandboxing.py index d5ceb109a6..321d4f0250 100644 --- a/st2common/st2common/util/sandboxing.py +++ b/st2common/st2common/util/sandboxing.py @@ -117,7 +117,10 @@ def get_sandbox_python_path(inherit_from_parent=True, inherit_parent_virtualenv= if inherit_parent_virtualenv and hasattr(sys, 'real_prefix'): # We are running inside virtualenv site_packages_dir = get_python_lib() - assert sys.prefix in site_packages_dir + + sys_prefix = os.path.abspath(sys.prefix) + assert sys_prefix in site_packages_dir + sandbox_python_path.append(site_packages_dir) sandbox_python_path = ':'.join(sandbox_python_path) diff --git a/st2common/st2common/util/service.py b/st2common/st2common/util/service.py new file mode 100644 index 0000000000..9e9a7df92d --- /dev/null +++ b/st2common/st2common/util/service.py @@ -0,0 +1,36 @@ +# Licensed to the StackStorm, Inc ('StackStorm') under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import + +import pymongo + +from st2common import log as logging + + +LOG = logging.getLogger(__name__) + + +def retry_on_exceptions(exc): + LOG.warning('Evaluating retry on exception %s. %s', type(exc), str(exc)) + + is_mongo_connection_error = isinstance(exc, pymongo.errors.ConnectionFailure) + + retrying = is_mongo_connection_error + + if retrying: + LOG.warning('Retrying on exception %s.', type(exc)) + + return retrying diff --git a/st2common/st2common/validators/api/reactor.py b/st2common/st2common/validators/api/reactor.py index 0e0f4397bc..35cbcb9d69 100644 --- a/st2common/st2common/validators/api/reactor.py +++ b/st2common/st2common/validators/api/reactor.py @@ -14,8 +14,9 @@ # limitations under the License. from __future__ import absolute_import -import six +import six +import uuid from oslo_config import cfg from apscheduler.triggers.cron import CronTrigger @@ -113,7 +114,7 @@ def validate_trigger_payload(trigger_type_ref, payload, throw_on_inexistent_trig """ This function validates trigger payload parameters for system and user-defined triggers. - :param trigger_type_ref: Reference of a trigger type or a trigger dictionary object. + :param trigger_type_ref: Reference of a trigger type / trigger / trigger dictionary object. :type trigger_type_ref: ``str`` :param payload: Trigger payload. @@ -144,7 +145,23 @@ def validate_trigger_payload(trigger_type_ref, payload, throw_on_inexistent_trig # System trigger payload_schema = SYSTEM_TRIGGER_TYPES[trigger_type_ref]['payload_schema'] else: + # We assume Trigger ref and not TriggerType ref is passed in if second + # part (trigger name) is a valid UUID version 4 + try: + trigger_uuid = uuid.UUID(trigger_type_ref.split('.')[-1]) + except ValueError: + is_trigger_db = False + else: + is_trigger_db = (trigger_uuid.version == 4) + + if is_trigger_db: + trigger_db = triggers.get_trigger_db_by_ref(trigger_type_ref) + + if trigger_db: + trigger_type_ref = trigger_db.type + trigger_type_db = triggers.get_trigger_type_db(trigger_type_ref) + if not trigger_type_db: # Trigger doesn't exist in the database if throw_on_inexistent_trigger: diff --git a/st2common/tests/integration/test_rabbitmq_ssl_listener.py b/st2common/tests/integration/test_rabbitmq_ssl_listener.py new file mode 100644 index 0000000000..eaa000449b --- /dev/null +++ b/st2common/tests/integration/test_rabbitmq_ssl_listener.py @@ -0,0 +1,188 @@ +# Licensed to the StackStorm, Inc ('StackStorm') under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import + +import os +import ssl +import socket + +import unittest2 +from oslo_config import cfg + +from st2common.transport import utils as transport_utils + +from st2tests.fixturesloader import get_fixtures_base_path + +__all__ = [ + 'RabbitMQTLSListenerTestCase' +] + +CERTS_FIXTURES_PATH = os.path.join(get_fixtures_base_path(), 'ssl_certs/') +ON_TRAVIS = (os.environ.get('TRAVIS', 'false').lower() == 'true') + +NON_SSL_LISTENER_PORT = 5672 +SSL_LISTENER_PORT = 5671 + + +# NOTE: We only run those tests on Travis because at the moment, local vagrant dev VM doesn't +# expose RabbitMQ SSL listener by default +# TODO: Re-enable once we upgrade Travis from Precise to Xenial where latest version of RabbitMQ +# and OpenSSL is available +@unittest2.skip('Skipping until we upgrade to Xenial on Travis') +# @unittest2.skipIf(not ON_TRAVIS, 'Skipping tests because not running on Travis') +class RabbitMQTLSListenerTestCase(unittest2.TestCase): + + def setUp(self): + # Set default values + cfg.CONF.set_override(name='ssl', override=False, group='messaging') + cfg.CONF.set_override(name='ssl_keyfile', override=None, group='messaging') + cfg.CONF.set_override(name='ssl_certfile', override=None, group='messaging') + cfg.CONF.set_override(name='ssl_ca_certs', override=None, group='messaging') + cfg.CONF.set_override(name='ssl_cert_reqs', override=None, group='messaging') + + def test_non_ssl_connection_on_ssl_listener_port_failure(self): + connection = transport_utils.get_connection(urls='amqp://guest:guest@127.0.0.1:5671/') + + expected_msg_1 = '[Errno 104] Connection reset by peer' + expected_msg_2 = 'Socket closed' + + try: + connection.connect() + except Exception as e: + self.assertFalse(connection.connected) + self.assertTrue(isinstance(e, (IOError, socket.error))) + self.assertTrue(expected_msg_1 in str(e) or expected_msg_2 in str(e)) + else: + self.fail('Exception was not thrown') + + if connection: + connection.release() + + def test_ssl_connection_on_ssl_listener_success(self): + # Using query param notation + urls = 'amqp://guest:guest@127.0.0.1:5671/?ssl=true' + connection = transport_utils.get_connection(urls=urls) + + try: + self.assertTrue(connection.connect()) + self.assertTrue(connection.connected) + finally: + if connection: + connection.release() + + # Using messaging.ssl config option + cfg.CONF.set_override(name='ssl', override=True, group='messaging') + + connection = transport_utils.get_connection(urls='amqp://guest:guest@127.0.0.1:5671/') + + try: + self.assertTrue(connection.connect()) + self.assertTrue(connection.connected) + finally: + if connection: + connection.release() + + def test_ssl_connection_ca_certs_provided(self): + ca_cert_path = os.path.join(CERTS_FIXTURES_PATH, 'ca/ca_certificate_bundle.pem') + + cfg.CONF.set_override(name='ssl', override=True, group='messaging') + cfg.CONF.set_override(name='ssl_ca_certs', override=ca_cert_path, group='messaging') + + # 1. Validate server cert against a valid CA bundle (success) - cert required + cfg.CONF.set_override(name='ssl_cert_reqs', override='required', group='messaging') + + connection = transport_utils.get_connection(urls='amqp://guest:guest@127.0.0.1:5671/') + + try: + self.assertTrue(connection.connect()) + self.assertTrue(connection.connected) + finally: + if connection: + connection.release() + + # 2. Validate server cert against other CA bundle (failure) + # CA bundle which was not used to sign the server cert + ca_cert_path = os.path.join('/etc/ssl/certs/thawte_Primary_Root_CA.pem') + + cfg.CONF.set_override(name='ssl_cert_reqs', override='required', group='messaging') + cfg.CONF.set_override(name='ssl_ca_certs', override=ca_cert_path, group='messaging') + + connection = transport_utils.get_connection(urls='amqp://guest:guest@127.0.0.1:5671/') + + expected_msg = r'\[SSL: CERTIFICATE_VERIFY_FAILED\] certificate verify failed' + self.assertRaisesRegexp(ssl.SSLError, expected_msg, connection.connect) + + # 3. Validate server cert against other CA bundle (failure) + ca_cert_path = os.path.join('/etc/ssl/certs/thawte_Primary_Root_CA.pem') + + cfg.CONF.set_override(name='ssl_cert_reqs', override='optional', group='messaging') + cfg.CONF.set_override(name='ssl_ca_certs', override=ca_cert_path, group='messaging') + + connection = transport_utils.get_connection(urls='amqp://guest:guest@127.0.0.1:5671/') + + expected_msg = r'\[SSL: CERTIFICATE_VERIFY_FAILED\] certificate verify failed' + self.assertRaisesRegexp(ssl.SSLError, expected_msg, connection.connect) + + # 4. Validate server cert against other CA bundle (failure) + # We use invalid bundle but cert_reqs is none + ca_cert_path = os.path.join('/etc/ssl/certs/thawte_Primary_Root_CA.pem') + + cfg.CONF.set_override(name='ssl_cert_reqs', override='none', group='messaging') + cfg.CONF.set_override(name='ssl_ca_certs', override=ca_cert_path, group='messaging') + + connection = transport_utils.get_connection(urls='amqp://guest:guest@127.0.0.1:5671/') + + try: + self.assertTrue(connection.connect()) + self.assertTrue(connection.connected) + finally: + if connection: + connection.release() + + def test_ssl_connect_client_side_cert_authentication(self): + # 1. Success, valid client side cert provided + ssl_keyfile = os.path.join(CERTS_FIXTURES_PATH, 'client/private_key.pem') + ssl_certfile = os.path.join(CERTS_FIXTURES_PATH, 'client/client_certificate.pem') + ca_cert_path = os.path.join(CERTS_FIXTURES_PATH, 'ca/ca_certificate_bundle.pem') + + cfg.CONF.set_override(name='ssl_keyfile', override=ssl_keyfile, group='messaging') + cfg.CONF.set_override(name='ssl_certfile', override=ssl_certfile, group='messaging') + cfg.CONF.set_override(name='ssl_cert_reqs', override='required', group='messaging') + cfg.CONF.set_override(name='ssl_ca_certs', override=ca_cert_path, group='messaging') + + connection = transport_utils.get_connection(urls='amqp://guest:guest@127.0.0.1:5671/') + + try: + self.assertTrue(connection.connect()) + self.assertTrue(connection.connected) + finally: + if connection: + connection.release() + + # 2. Invalid client side cert provided - failure + ssl_keyfile = os.path.join(CERTS_FIXTURES_PATH, 'client/private_key.pem') + ssl_certfile = os.path.join(CERTS_FIXTURES_PATH, 'server/server_certificate.pem') + ca_cert_path = os.path.join(CERTS_FIXTURES_PATH, 'ca/ca_certificate_bundle.pem') + + cfg.CONF.set_override(name='ssl_keyfile', override=ssl_keyfile, group='messaging') + cfg.CONF.set_override(name='ssl_certfile', override=ssl_certfile, group='messaging') + cfg.CONF.set_override(name='ssl_cert_reqs', override='required', group='messaging') + cfg.CONF.set_override(name='ssl_ca_certs', override=ca_cert_path, group='messaging') + + connection = transport_utils.get_connection(urls='amqp://guest:guest@127.0.0.1:5671/') + + expected_msg = r'\[X509: KEY_VALUES_MISMATCH\] key values mismatch' + self.assertRaisesRegexp(ssl.SSLError, expected_msg, connection.connect) diff --git a/st2common/tests/integration/test_service_setup_log_level_filtering.py b/st2common/tests/integration/test_service_setup_log_level_filtering.py new file mode 100644 index 0000000000..432834d486 --- /dev/null +++ b/st2common/tests/integration/test_service_setup_log_level_filtering.py @@ -0,0 +1,123 @@ +# Licensed to the StackStorm, Inc ('StackStorm') under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import + +import os +import sys +import signal + +import eventlet +from eventlet.green import subprocess + +from st2tests.base import IntegrationTestCase +from st2tests.fixturesloader import get_fixtures_base_path + +__all__ = [ + 'ServiceSetupLogLevelFilteringTestCase' +] + +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) + +FIXTURES_DIR = get_fixtures_base_path() + +ST2_CONFIG_INFO_LL_PATH = os.path.join(FIXTURES_DIR, 'conf/st2.tests.api.info_log_level.conf') +ST2_CONFIG_INFO_LL_PATH = os.path.abspath(ST2_CONFIG_INFO_LL_PATH) + +ST2_CONFIG_DEBUG_LL_PATH = os.path.join(FIXTURES_DIR, 'conf/st2.tests.api.debug_log_level.conf') +ST2_CONFIG_DEBUG_LL_PATH = os.path.abspath(ST2_CONFIG_DEBUG_LL_PATH) + +ST2_CONFIG_AUDIT_LL_PATH = os.path.join(FIXTURES_DIR, 'conf/st2.tests.api.audit_log_level.conf') +ST2_CONFIG_AUDIT_LL_PATH = os.path.abspath(ST2_CONFIG_AUDIT_LL_PATH) + +ST2_CONFIG_SYSTEM_DEBUG_PATH = os.path.join(FIXTURES_DIR, + 'conf/st2.tests.api.system_debug_true.conf') +ST2_CONFIG_SYSTEM_DEBUG_PATH = os.path.abspath(ST2_CONFIG_SYSTEM_DEBUG_PATH) + +PYTHON_BINARY = sys.executable + +ST2API_BINARY = os.path.join(BASE_DIR, '../../../st2api/bin/st2api') +ST2API_BINARY = os.path.abspath(ST2API_BINARY) + +CMD = [PYTHON_BINARY, ST2API_BINARY, '--config-file'] + + +class ServiceSetupLogLevelFilteringTestCase(IntegrationTestCase): + def test_audit_log_level_is_filtered_if_log_level_is_not_debug_or_audit(self): + # 1. INFO log level - audit messages should not be included + process = self._start_process(config_path=ST2_CONFIG_INFO_LL_PATH) + self.add_process(process=process) + + # Give it some time to start up + eventlet.sleep(3) + process.send_signal(signal.SIGKILL) + + # First 3 log lines are debug messages about the environment which are always logged + stdout = '\n'.join(process.stdout.read().decode('utf-8').split('\n')[3:]) + + self.assertTrue('INFO [-]' in stdout) + self.assertTrue('DEBUG [-]' not in stdout) + self.assertTrue('AUDIT [-]' not in stdout) + + # 2. DEBUG log level - audit messages should be included + process = self._start_process(config_path=ST2_CONFIG_DEBUG_LL_PATH) + self.add_process(process=process) + + # Give it some time to start up + eventlet.sleep(5) + process.send_signal(signal.SIGKILL) + + # First 3 log lines are debug messages about the environment which are always logged + stdout = '\n'.join(process.stdout.read().decode('utf-8').split('\n')[3:]) + + self.assertTrue('INFO [-]' in stdout) + self.assertTrue('DEBUG [-]' in stdout) + self.assertTrue('AUDIT [-]' in stdout) + + # 3. AUDIT log level - audit messages should be included + process = self._start_process(config_path=ST2_CONFIG_AUDIT_LL_PATH) + self.add_process(process=process) + + # Give it some time to start up + eventlet.sleep(5) + process.send_signal(signal.SIGKILL) + + # First 3 log lines are debug messages about the environment which are always logged + stdout = '\n'.join(process.stdout.read().decode('utf-8').split('\n')[3:]) + + self.assertTrue('INFO [-]' not in stdout) + self.assertTrue('DEBUG [-]' not in stdout) + self.assertTrue('AUDIT [-]' in stdout) + + # 2. INFO log level but system.debug set to True + process = self._start_process(config_path=ST2_CONFIG_SYSTEM_DEBUG_PATH) + self.add_process(process=process) + + # Give it some time to start up + eventlet.sleep(5) + process.send_signal(signal.SIGKILL) + + # First 3 log lines are debug messages about the environment which are always logged + stdout = '\n'.join(process.stdout.read().decode('utf-8').split('\n')[3:]) + + self.assertTrue('INFO [-]' in stdout) + self.assertTrue('DEBUG [-]' in stdout) + self.assertTrue('AUDIT [-]' in stdout) + + def _start_process(self, config_path): + cmd = CMD + [config_path] + process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, + shell=False, preexec_fn=os.setsid) + return process diff --git a/st2common/tests/unit/services/test_policy.py b/st2common/tests/unit/services/test_policy.py new file mode 100644 index 0000000000..274a4ff6e1 --- /dev/null +++ b/st2common/tests/unit/services/test_policy.py @@ -0,0 +1,106 @@ +# Licensed to the StackStorm, Inc ('StackStorm') under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import + +import st2tests.config as tests_config +tests_config.parse_args() + +import st2common + +from st2common.bootstrap import policiesregistrar as policies_registrar +from st2common.bootstrap import runnersregistrar as runners_registrar +from st2common.constants import action as action_constants +from st2common.constants import policy as policy_constants +from st2common.models.db import action as action_db_models +from st2common.services import action as action_service +from st2common.services import policies as policy_service + +import st2tests +from st2tests import fixturesloader as fixtures + + +PACK = 'generic' + +TEST_FIXTURES = { + 'actions': [ + 'action1.yaml', # wolfpack.action-1 + 'action2.yaml', # wolfpack.action-2 + 'local.yaml' # core.local + ], + 'policies': [ + 'policy_2.yaml', # mock policy on wolfpack.action-1 + 'policy_5.yaml' # concurrency policy on wolfpack.action-2 + ] +} + + +class PolicyServiceTestCase(st2tests.DbTestCase): + + @classmethod + def setUpClass(cls): + super(PolicyServiceTestCase, cls).setUpClass() + + # Register runners + runners_registrar.register_runners() + + # Register common policy types + policies_registrar.register_policy_types(st2common) + + loader = fixtures.FixturesLoader() + loader.save_fixtures_to_db(fixtures_pack=PACK, + fixtures_dict=TEST_FIXTURES) + + def setUp(self): + super(PolicyServiceTestCase, self).setUp() + + params = {'action': 'wolfpack.action-1', 'parameters': {'actionstr': 'foo-last'}} + self.lv_ac_db_1 = action_db_models.LiveActionDB(**params) + self.lv_ac_db_1, _ = action_service.request(self.lv_ac_db_1) + + params = {'action': 'wolfpack.action-2', 'parameters': {'actionstr': 'foo-last'}} + self.lv_ac_db_2 = action_db_models.LiveActionDB(**params) + self.lv_ac_db_2, _ = action_service.request(self.lv_ac_db_2) + + params = {'action': 'core.local', 'parameters': {'cmd': 'date'}} + self.lv_ac_db_3 = action_db_models.LiveActionDB(**params) + self.lv_ac_db_3, _ = action_service.request(self.lv_ac_db_3) + + def tearDown(self): + action_service.update_status(self.lv_ac_db_1, action_constants.LIVEACTION_STATUS_CANCELED) + action_service.update_status(self.lv_ac_db_2, action_constants.LIVEACTION_STATUS_CANCELED) + action_service.update_status(self.lv_ac_db_3, action_constants.LIVEACTION_STATUS_CANCELED) + + def test_action_has_policies(self): + self.assertTrue(policy_service.has_policies(self.lv_ac_db_1)) + + def test_action_does_not_have_policies(self): + self.assertFalse(policy_service.has_policies(self.lv_ac_db_3)) + + def test_action_has_specific_policies(self): + self.assertTrue( + policy_service.has_policies( + self.lv_ac_db_2, + policy_types=policy_constants.POLICY_TYPES_REQUIRING_LOCK + ) + ) + + def test_action_does_not_have_specific_policies(self): + self.assertFalse( + policy_service.has_policies( + self.lv_ac_db_1, + policy_types=policy_constants.POLICY_TYPES_REQUIRING_LOCK + ) + ) diff --git a/st2common/tests/unit/test_action_db_utils.py b/st2common/tests/unit/test_action_db_utils.py index 3132a2ebd7..061709d61a 100644 --- a/st2common/tests/unit/test_action_db_utils.py +++ b/st2common/tests/unit/test_action_db_utils.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Licensed to the StackStorm, Inc ('StackStorm') under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. @@ -399,6 +400,27 @@ def test_get_args(self): self.assertListEqual(pos_args, expected_pos_args, 'Positional args not parsed / serialized correctly.') + # Test unicode values + params = { + 'actionstr': 'bar č Ε‘ hello Δ‘ č p ΕΎ Ε½ a πŸ’©πŸ˜', + 'actionint': 20, + 'runnerint': 555 + } + expected_pos_args = [ + '20', + '', + u'bar č Ε‘ hello Δ‘ č p ΕΎ Ε½ a πŸ’©πŸ˜', + '', + '', + '', + '' + ] + pos_args, named_args = action_db_utils.get_args(params, ActionDBUtilsTestCase.action_db) + self.assertListEqual(pos_args, expected_pos_args, 'Positional args not parsed correctly.') + self.assertTrue('actionint' not in named_args) + self.assertTrue('actionstr' not in named_args) + self.assertEqual(named_args.get('runnerint'), 555) + @classmethod def _setup_test_models(cls): ActionDBUtilsTestCase.setup_runner() diff --git a/st2common/tests/unit/test_connection_retry_wrapper.py b/st2common/tests/unit/test_connection_retry_wrapper.py index 80c998eab1..97ad1fc035 100644 --- a/st2common/tests/unit/test_connection_retry_wrapper.py +++ b/st2common/tests/unit/test_connection_retry_wrapper.py @@ -36,6 +36,19 @@ def test_single_node_cluster_retry(self): self.assertTrue(should_stop, 'Done trying.') self.assertEqual(wait, -1) + def test_should_stop_second_channel_open_error_should_be_non_fatal(self): + retry_context = ClusterRetryContext(cluster_size=1) + + e = Exception("(504) CHANNEL_ERROR - second 'channel.open' seen") + should_stop, wait = retry_context.test_should_stop(e=e) + self.assertFalse(should_stop) + self.assertEqual(wait, -1) + + e = Exception("CHANNEL_ERROR - second 'channel.open' seen") + should_stop, wait = retry_context.test_should_stop(e=e) + self.assertFalse(should_stop) + self.assertEqual(wait, -1) + def test_multiple_node_cluster_retry(self): cluster_size = 3 last_index = cluster_size * 2 diff --git a/st2common/tests/unit/test_crypto_utils.py b/st2common/tests/unit/test_crypto_utils.py index 54ed22d9b3..2bc62835d8 100644 --- a/st2common/tests/unit/test_crypto_utils.py +++ b/st2common/tests/unit/test_crypto_utils.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Licensed to the StackStorm, Inc ('StackStorm') under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. @@ -53,6 +54,31 @@ def setUpClass(cls): super(CryptoUtilsTestCase, cls).setUpClass() CryptoUtilsTestCase.test_crypto_key = AESKey.generate() + def test_symmetric_encrypt_decrypt_short_string_needs_to_be_padded(self): + original = u'a' + crypto = symmetric_encrypt(CryptoUtilsTestCase.test_crypto_key, original) + plain = symmetric_decrypt(CryptoUtilsTestCase.test_crypto_key, crypto) + self.assertEqual(plain, original) + + def test_symmetric_encrypt_decrypt_utf8_character(self): + values = [ + u'Β£', + u'£££', + u'££££££', + u'č Ε‘ hello Δ‘ č p ΕΎ Ε½', + u'hello πŸ’©', + u'πŸ’©πŸ’©πŸ’©πŸ’©πŸ’©' + u'πŸ’©πŸ’©πŸ’©', + u'πŸ’©πŸ˜' + ] + + for index, original in enumerate(values): + crypto = symmetric_encrypt(CryptoUtilsTestCase.test_crypto_key, original) + plain = symmetric_decrypt(CryptoUtilsTestCase.test_crypto_key, crypto) + self.assertEqual(plain, original) + + self.assertEqual(index, (len(values) - 1)) + def test_symmetric_encrypt_decrypt(self): original = 'secret' crypto = symmetric_encrypt(CryptoUtilsTestCase.test_crypto_key, original) diff --git a/st2common/tests/unit/test_db.py b/st2common/tests/unit/test_db.py index 856d1919d4..bd1cc93e5b 100644 --- a/st2common/tests/unit/test_db.py +++ b/st2common/tests/unit/test_db.py @@ -14,8 +14,10 @@ # limitations under the License. from __future__ import absolute_import -import jsonschema +import ssl + +import jsonschema import mock import mongoengine.connection from oslo_config import cfg @@ -79,6 +81,55 @@ def test_get_ssl_kwargs(self): 'authentication_mechanism': 'MONGODB-X509' }) + # 3. ssl_keyfile provided + ssl_kwargs = _get_ssl_kwargs(ssl_keyfile='/tmp/keyfile') + self.assertEqual(ssl_kwargs, { + 'ssl': True, + 'ssl_keyfile': '/tmp/keyfile', + 'ssl_match_hostname': True + }) + + # 4. ssl_certfile provided + ssl_kwargs = _get_ssl_kwargs(ssl_certfile='/tmp/certfile') + self.assertEqual(ssl_kwargs, { + 'ssl': True, + 'ssl_certfile': '/tmp/certfile', + 'ssl_match_hostname': True + }) + + # 5. ssl_ca_certs provided + ssl_kwargs = _get_ssl_kwargs(ssl_ca_certs='/tmp/ca_certs') + self.assertEqual(ssl_kwargs, { + 'ssl': True, + 'ssl_ca_certs': '/tmp/ca_certs', + 'ssl_match_hostname': True + }) + + # 6. ssl_ca_certs and ssl_cert_reqs combinations + ssl_kwargs = _get_ssl_kwargs(ssl_ca_certs='/tmp/ca_certs', ssl_cert_reqs='none') + self.assertEqual(ssl_kwargs, { + 'ssl': True, + 'ssl_ca_certs': '/tmp/ca_certs', + 'ssl_cert_reqs': ssl.CERT_NONE, + 'ssl_match_hostname': True + }) + + ssl_kwargs = _get_ssl_kwargs(ssl_ca_certs='/tmp/ca_certs', ssl_cert_reqs='optional') + self.assertEqual(ssl_kwargs, { + 'ssl': True, + 'ssl_ca_certs': '/tmp/ca_certs', + 'ssl_cert_reqs': ssl.CERT_OPTIONAL, + 'ssl_match_hostname': True + }) + + ssl_kwargs = _get_ssl_kwargs(ssl_ca_certs='/tmp/ca_certs', ssl_cert_reqs='required') + self.assertEqual(ssl_kwargs, { + 'ssl': True, + 'ssl_ca_certs': '/tmp/ca_certs', + 'ssl_cert_reqs': ssl.CERT_REQUIRED, + 'ssl_match_hostname': True + }) + @mock.patch('st2common.models.db.mongoengine') def test_db_setup(self, mock_mongoengine): db_setup(db_name='name', db_host='host', db_port=12345, username='username', diff --git a/st2common/tests/unit/test_state_publisher.py b/st2common/tests/unit/test_state_publisher.py index 180805a54c..bc66df337f 100644 --- a/st2common/tests/unit/test_state_publisher.py +++ b/st2common/tests/unit/test_state_publisher.py @@ -14,6 +14,7 @@ # limitations under the License. from __future__ import absolute_import + import kombu import mock import mongoengine as me @@ -22,7 +23,7 @@ from st2common.models.db import stormbase from st2common.persistence import base as persistence from st2common.transport import publishers -from st2common.transport import utils as transport_utils + from st2tests import DbTestCase @@ -30,8 +31,8 @@ class FakeModelPublisher(publishers.StatePublisherMixin): - def __init__(self, url): - super(FakeModelPublisher, self).__init__(url, FAKE_STATE_MGMT_XCHG) + def __init__(self): + super(FakeModelPublisher, self).__init__(exchange=FAKE_STATE_MGMT_XCHG) class FakeModelDB(stormbase.StormBaseDB): @@ -49,7 +50,7 @@ def _get_impl(cls): @classmethod def _get_publisher(cls): if not cls.publisher: - cls.publisher = FakeModelPublisher(transport_utils.get_messaging_urls()) + cls.publisher = FakeModelPublisher() return cls.publisher @classmethod diff --git a/st2common/tests/unit/test_transport.py b/st2common/tests/unit/test_transport.py new file mode 100644 index 0000000000..6c217c3347 --- /dev/null +++ b/st2common/tests/unit/test_transport.py @@ -0,0 +1,80 @@ +# Licensed to the StackStorm, Inc ('StackStorm') under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import ssl + +import unittest2 + +from st2common.transport.utils import _get_ssl_kwargs + +__all__ = [ + 'TransportUtilsTestCase' +] + + +class TransportUtilsTestCase(unittest2.TestCase): + def test_get_ssl_kwargs(self): + # 1. No SSL kwargs provided + ssl_kwargs = _get_ssl_kwargs() + self.assertEqual(ssl_kwargs, {}) + + # 2. ssl kwarg provided + ssl_kwargs = _get_ssl_kwargs(ssl=True) + self.assertEqual(ssl_kwargs, { + 'ssl': True + }) + + # 3. ssl_keyfile provided + ssl_kwargs = _get_ssl_kwargs(ssl_keyfile='/tmp/keyfile') + self.assertEqual(ssl_kwargs, { + 'ssl': True, + 'keyfile': '/tmp/keyfile' + }) + + # 4. ssl_certfile provided + ssl_kwargs = _get_ssl_kwargs(ssl_certfile='/tmp/certfile') + self.assertEqual(ssl_kwargs, { + 'ssl': True, + 'certfile': '/tmp/certfile' + }) + + # 5. ssl_ca_certs provided + ssl_kwargs = _get_ssl_kwargs(ssl_ca_certs='/tmp/ca_certs') + self.assertEqual(ssl_kwargs, { + 'ssl': True, + 'ca_certs': '/tmp/ca_certs' + }) + + # 6. ssl_ca_certs and ssl_cert_reqs combinations + ssl_kwargs = _get_ssl_kwargs(ssl_ca_certs='/tmp/ca_certs', ssl_cert_reqs='none') + self.assertEqual(ssl_kwargs, { + 'ssl': True, + 'ca_certs': '/tmp/ca_certs', + 'cert_reqs': ssl.CERT_NONE + }) + + ssl_kwargs = _get_ssl_kwargs(ssl_ca_certs='/tmp/ca_certs', ssl_cert_reqs='optional') + self.assertEqual(ssl_kwargs, { + 'ssl': True, + 'ca_certs': '/tmp/ca_certs', + 'cert_reqs': ssl.CERT_OPTIONAL + }) + + ssl_kwargs = _get_ssl_kwargs(ssl_ca_certs='/tmp/ca_certs', ssl_cert_reqs='required') + self.assertEqual(ssl_kwargs, { + 'ssl': True, + 'ca_certs': '/tmp/ca_certs', + 'cert_reqs': ssl.CERT_REQUIRED + }) diff --git a/st2exporter/st2exporter/worker.py b/st2exporter/st2exporter/worker.py index 49ba9a8ad0..d5a4dcd55b 100644 --- a/st2exporter/st2exporter/worker.py +++ b/st2exporter/st2exporter/worker.py @@ -15,7 +15,6 @@ import eventlet from six.moves import queue -from kombu import Connection from oslo_config import cfg from st2common import log as logging @@ -124,5 +123,5 @@ def _get_all_executions_from_db(self): def get_worker(): - with Connection(transport_utils.get_messaging_urls()) as conn: + with transport_utils.get_connection() as conn: return ExecutionsExporter(conn, [EXPORTER_WORK_QUEUE]) diff --git a/st2reactor/st2reactor/container/sensor_wrapper.py b/st2reactor/st2reactor/container/sensor_wrapper.py index d409886407..b477368ded 100644 --- a/st2reactor/st2reactor/container/sensor_wrapper.py +++ b/st2reactor/st2reactor/container/sensor_wrapper.py @@ -200,6 +200,10 @@ def __init__(self, pack, file_path, class_name, trigger_types, if '--debug' in parent_args: set_log_level_for_all_loggers() + else: + # NOTE: statsd logger logs everything by default under INFO so we ignore those log + # messages unless verbose / debug mode is used + logging.ignore_statsd_log_messages() self._sensor_instance = self._get_sensor_instance() diff --git a/st2reactor/st2reactor/rules/worker.py b/st2reactor/st2reactor/rules/worker.py index 5d8a049e16..32c30cb14f 100644 --- a/st2reactor/st2reactor/rules/worker.py +++ b/st2reactor/st2reactor/rules/worker.py @@ -15,8 +15,6 @@ from __future__ import absolute_import -from kombu import Connection - from st2common import log as logging from st2common.constants.trace import TRACE_CONTEXT, TRACE_ID from st2common.constants import triggers as trigger_constants @@ -119,5 +117,5 @@ def _decompose_pre_ack_process_response(response): def get_worker(): - with Connection(transport_utils.get_messaging_urls()) as conn: + with transport_utils.get_connection() as conn: return TriggerInstanceDispatcher(conn, [RULESENGINE_WORK_QUEUE]) diff --git a/st2reactor/tests/unit/test_sensor_service.py b/st2reactor/tests/unit/test_sensor_service.py index d3058b77f0..214b28dc69 100644 --- a/st2reactor/tests/unit/test_sensor_service.py +++ b/st2reactor/tests/unit/test_sensor_service.py @@ -28,9 +28,14 @@ } -class TriggerTypeMock(object): - def __init__(self, schema={}): - self.payload_schema = schema +class TriggerTypeDBMock(object): + def __init__(self, schema=None): + self.payload_schema = schema or {} + + +class TriggerDBMock(object): + def __init__(self, type=None): + self.type = type class SensorServiceTestCase(unittest2.TestCase): @@ -54,7 +59,7 @@ def tearDown(self): cfg.CONF.system.validate_trigger_payload = self.validate_trigger_payload @mock.patch('st2common.services.triggers.get_trigger_type_db', - mock.MagicMock(return_value=TriggerTypeMock(TEST_SCHEMA))) + mock.MagicMock(return_value=TriggerTypeDBMock(TEST_SCHEMA))) def test_dispatch_success_valid_payload_validation_enabled(self): cfg.CONF.system.validate_trigger_payload = True @@ -75,7 +80,33 @@ def test_dispatch_success_valid_payload_validation_enabled(self): self.assertEqual(self._dispatched_count, 1) @mock.patch('st2common.services.triggers.get_trigger_type_db', - mock.MagicMock(return_value=TriggerTypeMock(TEST_SCHEMA))) + mock.MagicMock(return_value=TriggerTypeDBMock(TEST_SCHEMA))) + @mock.patch('st2common.services.triggers.get_trigger_db_by_ref', + mock.MagicMock(return_value=TriggerDBMock(type='trigger-type-ref'))) + def test_dispatch_success_with_validation_enabled_trigger_reference(self): + # Test a scenario where a Trigger ref and not TriggerType ref is provided + cfg.CONF.system.validate_trigger_payload = True + + # define a valid payload + payload = { + 'name': 'John Doe', + 'age': 25, + 'career': ['foo, Inc.', 'bar, Inc.'], + 'married': True, + 'awards': {'2016': ['hoge prize', 'fuga prize']}, + 'income': 50000 + } + + self.assertEqual(self._dispatched_count, 0) + + # dispatching a trigger + self.sensor_service.dispatch('pack.86582f21-1fbc-44ea-88cb-0cd2b610e93b', payload) + + # This assumed that the target tirgger dispatched + self.assertEqual(self._dispatched_count, 1) + + @mock.patch('st2common.services.triggers.get_trigger_type_db', + mock.MagicMock(return_value=TriggerTypeDBMock(TEST_SCHEMA))) def test_dispatch_success_with_validation_disabled_and_invalid_payload(self): """ Tests that an invalid payload still results in dispatch success with default config @@ -108,7 +139,7 @@ def test_dispatch_success_with_validation_disabled_and_invalid_payload(self): self.assertEqual(self._dispatched_count, 1) @mock.patch('st2common.services.triggers.get_trigger_type_db', - mock.MagicMock(return_value=TriggerTypeMock(TEST_SCHEMA))) + mock.MagicMock(return_value=TriggerTypeDBMock(TEST_SCHEMA))) def test_dispatch_failure_caused_by_incorrect_type(self): # define a invalid payload (the type of 'age' is incorrect) payload = { @@ -131,7 +162,7 @@ def test_dispatch_failure_caused_by_incorrect_type(self): self.assertEqual(self._dispatched_count, 1) @mock.patch('st2common.services.triggers.get_trigger_type_db', - mock.MagicMock(return_value=TriggerTypeMock(TEST_SCHEMA))) + mock.MagicMock(return_value=TriggerTypeDBMock(TEST_SCHEMA))) def test_dispatch_failure_caused_by_lack_of_required_parameter(self): # define a invalid payload (lack of required property) payload = { @@ -149,7 +180,7 @@ def test_dispatch_failure_caused_by_lack_of_required_parameter(self): self.assertEqual(self._dispatched_count, 1) @mock.patch('st2common.services.triggers.get_trigger_type_db', - mock.MagicMock(return_value=TriggerTypeMock(TEST_SCHEMA))) + mock.MagicMock(return_value=TriggerTypeDBMock(TEST_SCHEMA))) def test_dispatch_failure_caused_by_extra_parameter(self): # define a invalid payload ('hobby' is extra) payload = { @@ -162,7 +193,7 @@ def test_dispatch_failure_caused_by_extra_parameter(self): self.assertEqual(self._dispatched_count, 0) @mock.patch('st2common.services.triggers.get_trigger_type_db', - mock.MagicMock(return_value=TriggerTypeMock(TEST_SCHEMA))) + mock.MagicMock(return_value=TriggerTypeDBMock(TEST_SCHEMA))) def test_dispatch_success_with_multiple_type_value(self): payload = { 'name': 'John Doe', @@ -180,7 +211,7 @@ def test_dispatch_success_with_multiple_type_value(self): self.assertEqual(self._dispatched_count, 2) @mock.patch('st2common.services.triggers.get_trigger_type_db', - mock.MagicMock(return_value=TriggerTypeMock(TEST_SCHEMA))) + mock.MagicMock(return_value=TriggerTypeDBMock(TEST_SCHEMA))) def test_dispatch_success_with_null(self): payload = { 'name': 'John Doe', @@ -193,7 +224,7 @@ def test_dispatch_success_with_null(self): self.assertEqual(self._dispatched_count, 1) @mock.patch('st2common.services.triggers.get_trigger_type_db', - mock.MagicMock(return_value=TriggerTypeMock())) + mock.MagicMock(return_value=TriggerTypeDBMock())) def test_dispatch_success_without_payload_schema(self): # the case trigger has no property self.sensor_service.dispatch('trigger-name', {}) diff --git a/st2tests/st2tests/config.py b/st2tests/st2tests/config.py index 470f7adb9d..3cd3300110 100644 --- a/st2tests/st2tests/config.py +++ b/st2tests/st2tests/config.py @@ -150,7 +150,34 @@ def _register_api_opts(): help='URL of the messaging server.'), cfg.ListOpt( 'cluster_urls', default=[], - help='URL of all the nodes in a messaging service cluster.') + help='URL of all the nodes in a messaging service cluster.'), + cfg.IntOpt( + 'connection_retries', default=10, + help='How many times should we retry connection before failing.'), + cfg.IntOpt( + 'connection_retry_wait', default=10000, + help='How long should we wait between connection retries.'), + cfg.BoolOpt( + 'ssl', default=False, + help='Use SSL / TLS to connect to the messaging server. Same as ' + 'appending "?ssl=true" at the end of the connection URL string.'), + cfg.StrOpt( + 'ssl_keyfile', default=None, + help='Private keyfile used to identify the local connection against RabbitMQ.'), + cfg.StrOpt( + 'ssl_certfile', default=None, + help='Certificate file used to identify the local connection (client).'), + cfg.StrOpt( + 'ssl_cert_reqs', default=None, choices='none, optional, required', + help='Specifies whether a certificate is required from the other side of the ' + 'connection, and whether it will be validated if provided.'), + cfg.StrOpt( + 'ssl_ca_certs', default=None, + help='ca_certs file contains a set of concatenated CA certificates, which are ' + 'used to validate certificates passed from RabbitMQ.'), + cfg.StrOpt( + 'login_method', default=None, + help='Login method to use (AMQPLAIN, PLAIN, EXTERNAL, etc.).') ] _register_opts(messaging_opts, group='messaging') @@ -262,6 +289,12 @@ def _register_scheduler_opts(): cfg.FloatOpt( 'gc_interval', default=5, help='How often to look for zombie executions before rescheduling them (in ms).'), + cfg.IntOpt( + 'retry_max_attempt', default=3, + help='The maximum number of attempts that the scheduler retries on error.'), + cfg.IntOpt( + 'retry_wait_msec', default=100, + help='The number of milliseconds to wait in between retries.') ] _register_opts(scheduler_opts, group='scheduler') diff --git a/st2tests/st2tests/fixtures/conf/logging.api.audit.conf b/st2tests/st2tests/fixtures/conf/logging.api.audit.conf new file mode 100644 index 0000000000..3b5f3005f8 --- /dev/null +++ b/st2tests/st2tests/fixtures/conf/logging.api.audit.conf @@ -0,0 +1,44 @@ +[loggers] +keys=root + +[handlers] +keys=consoleHandler, fileHandler, auditHandler + +[formatters] +keys=simpleConsoleFormatter, verboseConsoleFormatter, gelfFormatter + +[logger_root] +level=AUDIT +handlers=consoleHandler, fileHandler, auditHandler + +[handler_consoleHandler] +class=StreamHandler +level=AUDIT +formatter=simpleConsoleFormatter +args=(sys.stdout,) + +[handler_fileHandler] +class=st2common.log.FormatNamedFileHandler +level=AUDIT +formatter=verboseConsoleFormatter +args=("/tmp/st2api.{timestamp}.log",) + +[handler_auditHandler] +class=st2common.log.FormatNamedFileHandler +level=AUDIT +formatter=gelfFormatter +args=("/tmp/st2api.audit.{timestamp}.log",) + +[formatter_simpleConsoleFormatter] +class=st2common.logging.formatters.ConsoleLogFormatter +format=%(asctime)s %(levelname)s [-] %(message)s +datefmt= + +[formatter_verboseConsoleFormatter] +class=st2common.logging.formatters.ConsoleLogFormatter +format=%(asctime)s %(thread)s %(levelname)s %(module)s [-] %(message)s +datefmt= + +[formatter_gelfFormatter] +class=st2common.logging.formatters.GelfLogFormatter +format=%(message)s diff --git a/st2tests/st2tests/fixtures/conf/logging.api.debug.conf b/st2tests/st2tests/fixtures/conf/logging.api.debug.conf new file mode 100644 index 0000000000..1d7e8ca7ed --- /dev/null +++ b/st2tests/st2tests/fixtures/conf/logging.api.debug.conf @@ -0,0 +1,44 @@ +[loggers] +keys=root + +[handlers] +keys=consoleHandler, fileHandler, auditHandler + +[formatters] +keys=simpleConsoleFormatter, verboseConsoleFormatter, gelfFormatter + +[logger_root] +level=DEBUG +handlers=consoleHandler, fileHandler, auditHandler + +[handler_consoleHandler] +class=StreamHandler +level=DEBUG +formatter=simpleConsoleFormatter +args=(sys.stdout,) + +[handler_fileHandler] +class=st2common.log.FormatNamedFileHandler +level=DEBUG +formatter=verboseConsoleFormatter +args=("/tmp/st2api.{timestamp}.log",) + +[handler_auditHandler] +class=st2common.log.FormatNamedFileHandler +level=AUDIT +formatter=gelfFormatter +args=("/tmp/st2api.audit.{timestamp}.log",) + +[formatter_simpleConsoleFormatter] +class=st2common.logging.formatters.ConsoleLogFormatter +format=%(asctime)s %(levelname)s [-] %(message)s +datefmt= + +[formatter_verboseConsoleFormatter] +class=st2common.logging.formatters.ConsoleLogFormatter +format=%(asctime)s %(thread)s %(levelname)s %(module)s [-] %(message)s +datefmt= + +[formatter_gelfFormatter] +class=st2common.logging.formatters.GelfLogFormatter +format=%(message)s diff --git a/st2tests/st2tests/fixtures/conf/logging.api.info.conf b/st2tests/st2tests/fixtures/conf/logging.api.info.conf new file mode 100644 index 0000000000..f035bcdcb6 --- /dev/null +++ b/st2tests/st2tests/fixtures/conf/logging.api.info.conf @@ -0,0 +1,44 @@ +[loggers] +keys=root + +[handlers] +keys=consoleHandler, fileHandler, auditHandler + +[formatters] +keys=simpleConsoleFormatter, verboseConsoleFormatter, gelfFormatter + +[logger_root] +level=INFO +handlers=consoleHandler, fileHandler, auditHandler + +[handler_consoleHandler] +class=StreamHandler +level=INFO +formatter=simpleConsoleFormatter +args=(sys.stdout,) + +[handler_fileHandler] +class=st2common.log.FormatNamedFileHandler +level=INFO +formatter=verboseConsoleFormatter +args=("/tmp/st2api.{timestamp}.log",) + +[handler_auditHandler] +class=st2common.log.FormatNamedFileHandler +level=AUDIT +formatter=gelfFormatter +args=("/tmp/st2api.audit.{timestamp}.log",) + +[formatter_simpleConsoleFormatter] +class=st2common.logging.formatters.ConsoleLogFormatter +format=%(asctime)s %(levelname)s [-] %(message)s +datefmt= + +[formatter_verboseConsoleFormatter] +class=st2common.logging.formatters.ConsoleLogFormatter +format=%(asctime)s %(thread)s %(levelname)s %(module)s [-] %(message)s +datefmt= + +[formatter_gelfFormatter] +class=st2common.logging.formatters.GelfLogFormatter +format=%(message)s diff --git a/st2tests/st2tests/fixtures/conf/st2.tests.api.audit_log_level.conf b/st2tests/st2tests/fixtures/conf/st2.tests.api.audit_log_level.conf new file mode 100644 index 0000000000..dbe36c36db --- /dev/null +++ b/st2tests/st2tests/fixtures/conf/st2.tests.api.audit_log_level.conf @@ -0,0 +1,99 @@ +# Config file used by integration tests + +[database] +db_name = st2-test + +[api] +# Host and port to bind the API server. +host = 127.0.0.1 +port = 9101 +logging = st2tests/st2tests/fixtures/conf/logging.api.audit.conf +mask_secrets = False +# allow_origin is required for handling CORS in st2 web UI. +# allow_origin = http://myhost1.example.com:3000,http://myhost2.example.com:3000 + +[sensorcontainer] +logging = st2tests/conf/logging.sensorcontainer.conf +sensor_node_name = sensornode1 +partition_provider = name:default + +[rulesengine] +logging = st2reactor/conf/logging.rulesengine.conf + +[timersengine] +logging = st2reactor/conf/logging.timersengine.conf + +[actionrunner] +logging = st2actions/conf/logging.conf + +[auth] +host = 127.0.0.1 +port = 9100 +use_ssl = False +debug = False +enable = False +logging = st2tests/conf/logging.auth.conf + +mode = standalone +backend = flat_file +backend_kwargs = {"file_path": "st2auth/conf/htpasswd_dev"} + +# Base URL to the API endpoint excluding the version (e.g. http://myhost.net:9101/) +api_url = http://127.0.0.1:9101/ + +[system] +debug = False +# This way integration tests can write to this directory +base_path = /tmp + +[garbagecollector] +logging = st2reactor/conf/logging.garbagecollector.conf + +action_executions_ttl = 20 +action_executions_output_ttl = 10 +trigger_instances_ttl = 20 +purge_inquiries = True + +collection_interval = 1 +sleep_delay = 0.1 + +[content] +system_packs_base_path = +packs_base_paths = st2tests/st2tests/fixtures/packs/ + +[syslog] +host = 127.0.0.1 +port = 514 +facility = local7 +protocol = udp + +[webui] +# webui_base_url = https://mywebhost.domain + +[log] +excludes = requests,paramiko +redirect_stderr = False +mask_secrets = False + +[system_user] +user = stanley +ssh_key_file = /home/vagrant/.ssh/stanley_rsa + +[messaging] +url = amqp://guest:guest@127.0.0.1:5672/ + +[ssh_runner] +remote_dir = /tmp + +[resultstracker] +logging = st2actions/conf/logging.resultstracker.conf +query_interval = 0.1 + +[notifier] +logging = st2actions/conf/logging.notifier.conf + +[exporter] +logging = st2exporter/conf/logging.exporter.conf + +[mistral] +jitter_interval = 0 diff --git a/st2tests/st2tests/fixtures/conf/st2.tests.api.debug_log_level.conf b/st2tests/st2tests/fixtures/conf/st2.tests.api.debug_log_level.conf new file mode 100644 index 0000000000..caad395240 --- /dev/null +++ b/st2tests/st2tests/fixtures/conf/st2.tests.api.debug_log_level.conf @@ -0,0 +1,99 @@ +# Config file used by integration tests + +[database] +db_name = st2-test + +[api] +# Host and port to bind the API server. +host = 127.0.0.1 +port = 9101 +logging = st2tests/st2tests/fixtures/conf/logging.api.debug.conf +mask_secrets = False +# allow_origin is required for handling CORS in st2 web UI. +# allow_origin = http://myhost1.example.com:3000,http://myhost2.example.com:3000 + +[sensorcontainer] +logging = st2tests/conf/logging.sensorcontainer.conf +sensor_node_name = sensornode1 +partition_provider = name:default + +[rulesengine] +logging = st2reactor/conf/logging.rulesengine.conf + +[timersengine] +logging = st2reactor/conf/logging.timersengine.conf + +[actionrunner] +logging = st2actions/conf/logging.conf + +[auth] +host = 127.0.0.1 +port = 9100 +use_ssl = False +debug = False +enable = False +logging = st2tests/conf/logging.auth.conf + +mode = standalone +backend = flat_file +backend_kwargs = {"file_path": "st2auth/conf/htpasswd_dev"} + +# Base URL to the API endpoint excluding the version (e.g. http://myhost.net:9101/) +api_url = http://127.0.0.1:9101/ + +[system] +debug = False +# This way integration tests can write to this directory +base_path = /tmp + +[garbagecollector] +logging = st2reactor/conf/logging.garbagecollector.conf + +action_executions_ttl = 20 +action_executions_output_ttl = 10 +trigger_instances_ttl = 20 +purge_inquiries = True + +collection_interval = 1 +sleep_delay = 0.1 + +[content] +system_packs_base_path = +packs_base_paths = st2tests/st2tests/fixtures/packs/ + +[syslog] +host = 127.0.0.1 +port = 514 +facility = local7 +protocol = udp + +[webui] +# webui_base_url = https://mywebhost.domain + +[log] +excludes = requests,paramiko +redirect_stderr = False +mask_secrets = False + +[system_user] +user = stanley +ssh_key_file = /home/vagrant/.ssh/stanley_rsa + +[messaging] +url = amqp://guest:guest@127.0.0.1:5672/ + +[ssh_runner] +remote_dir = /tmp + +[resultstracker] +logging = st2actions/conf/logging.resultstracker.conf +query_interval = 0.1 + +[notifier] +logging = st2actions/conf/logging.notifier.conf + +[exporter] +logging = st2exporter/conf/logging.exporter.conf + +[mistral] +jitter_interval = 0 diff --git a/st2tests/st2tests/fixtures/conf/st2.tests.api.info_log_level.conf b/st2tests/st2tests/fixtures/conf/st2.tests.api.info_log_level.conf new file mode 100644 index 0000000000..5cd4e6cd33 --- /dev/null +++ b/st2tests/st2tests/fixtures/conf/st2.tests.api.info_log_level.conf @@ -0,0 +1,99 @@ +# Config file used by integration tests + +[database] +db_name = st2-test + +[api] +# Host and port to bind the API server. +host = 127.0.0.1 +port = 9101 +logging = st2tests/st2tests/fixtures/conf/logging.api.info.conf +mask_secrets = False +# allow_origin is required for handling CORS in st2 web UI. +# allow_origin = http://myhost1.example.com:3000,http://myhost2.example.com:3000 + +[sensorcontainer] +logging = st2tests/conf/logging.sensorcontainer.conf +sensor_node_name = sensornode1 +partition_provider = name:default + +[rulesengine] +logging = st2reactor/conf/logging.rulesengine.conf + +[timersengine] +logging = st2reactor/conf/logging.timersengine.conf + +[actionrunner] +logging = st2actions/conf/logging.conf + +[auth] +host = 127.0.0.1 +port = 9100 +use_ssl = False +debug = False +enable = False +logging = st2tests/conf/logging.auth.conf + +mode = standalone +backend = flat_file +backend_kwargs = {"file_path": "st2auth/conf/htpasswd_dev"} + +# Base URL to the API endpoint excluding the version (e.g. http://myhost.net:9101/) +api_url = http://127.0.0.1:9101/ + +[system] +debug = False +# This way integration tests can write to this directory +base_path = /tmp + +[garbagecollector] +logging = st2reactor/conf/logging.garbagecollector.conf + +action_executions_ttl = 20 +action_executions_output_ttl = 10 +trigger_instances_ttl = 20 +purge_inquiries = True + +collection_interval = 1 +sleep_delay = 0.1 + +[content] +system_packs_base_path = +packs_base_paths = st2tests/st2tests/fixtures/packs/ + +[syslog] +host = 127.0.0.1 +port = 514 +facility = local7 +protocol = udp + +[webui] +# webui_base_url = https://mywebhost.domain + +[log] +excludes = requests,paramiko +redirect_stderr = False +mask_secrets = False + +[system_user] +user = stanley +ssh_key_file = /home/vagrant/.ssh/stanley_rsa + +[messaging] +url = amqp://guest:guest@127.0.0.1:5672/ + +[ssh_runner] +remote_dir = /tmp + +[resultstracker] +logging = st2actions/conf/logging.resultstracker.conf +query_interval = 0.1 + +[notifier] +logging = st2actions/conf/logging.notifier.conf + +[exporter] +logging = st2exporter/conf/logging.exporter.conf + +[mistral] +jitter_interval = 0 diff --git a/st2tests/st2tests/fixtures/conf/st2.tests.api.system_debug_true.conf b/st2tests/st2tests/fixtures/conf/st2.tests.api.system_debug_true.conf new file mode 100644 index 0000000000..3317c11abf --- /dev/null +++ b/st2tests/st2tests/fixtures/conf/st2.tests.api.system_debug_true.conf @@ -0,0 +1,99 @@ +# Config file used by integration tests + +[database] +db_name = st2-test + +[api] +# Host and port to bind the API server. +host = 127.0.0.1 +port = 9101 +logging = st2tests/st2tests/fixtures/conf/logging.api.info.conf +mask_secrets = False +# allow_origin is required for handling CORS in st2 web UI. +# allow_origin = http://myhost1.example.com:3000,http://myhost2.example.com:3000 + +[sensorcontainer] +logging = st2tests/conf/logging.sensorcontainer.conf +sensor_node_name = sensornode1 +partition_provider = name:default + +[rulesengine] +logging = st2reactor/conf/logging.rulesengine.conf + +[timersengine] +logging = st2reactor/conf/logging.timersengine.conf + +[actionrunner] +logging = st2actions/conf/logging.conf + +[auth] +host = 127.0.0.1 +port = 9100 +use_ssl = False +debug = False +enable = False +logging = st2tests/conf/logging.auth.conf + +mode = standalone +backend = flat_file +backend_kwargs = {"file_path": "st2auth/conf/htpasswd_dev"} + +# Base URL to the API endpoint excluding the version (e.g. http://myhost.net:9101/) +api_url = http://127.0.0.1:9101/ + +[system] +debug = True +# This way integration tests can write to this directory +base_path = /tmp + +[garbagecollector] +logging = st2reactor/conf/logging.garbagecollector.conf + +action_executions_ttl = 20 +action_executions_output_ttl = 10 +trigger_instances_ttl = 20 +purge_inquiries = True + +collection_interval = 1 +sleep_delay = 0.1 + +[content] +system_packs_base_path = +packs_base_paths = st2tests/st2tests/fixtures/packs/ + +[syslog] +host = 127.0.0.1 +port = 514 +facility = local7 +protocol = udp + +[webui] +# webui_base_url = https://mywebhost.domain + +[log] +excludes = requests,paramiko +redirect_stderr = False +mask_secrets = False + +[system_user] +user = stanley +ssh_key_file = /home/vagrant/.ssh/stanley_rsa + +[messaging] +url = amqp://guest:guest@127.0.0.1:5672/ + +[ssh_runner] +remote_dir = /tmp + +[resultstracker] +logging = st2actions/conf/logging.resultstracker.conf +query_interval = 0.1 + +[notifier] +logging = st2actions/conf/logging.notifier.conf + +[exporter] +logging = st2exporter/conf/logging.exporter.conf + +[mistral] +jitter_interval = 0 diff --git a/st2tests/st2tests/fixtures/conf/st2.tests.conf b/st2tests/st2tests/fixtures/conf/st2.tests.conf new file mode 100644 index 0000000000..bb97039f03 --- /dev/null +++ b/st2tests/st2tests/fixtures/conf/st2.tests.conf @@ -0,0 +1,100 @@ +# Config file used by integration tests + +[database] +db_name = st2-test + +[api] +# Host and port to bind the API server. +host = 127.0.0.1 +port = 9101 +logging = st2tests/conf/logging.api.conf +mask_secrets = False +# allow_origin is required for handling CORS in st2 web UI. +# allow_origin = http://myhost1.example.com:3000,http://myhost2.example.com:3000 + +[sensorcontainer] +logging = st2tests/conf/logging.sensorcontainer.conf +sensor_node_name = sensornode1 +partition_provider = name:default + +[rulesengine] +logging = st2reactor/conf/logging.rulesengine.conf + +[timersengine] +logging = st2reactor/conf/logging.timersengine.conf + +[actionrunner] +logging = st2actions/conf/logging.conf + +[auth] +host = 127.0.0.1 +port = 9100 +use_ssl = False +debug = False +enable = False +logging = st2tests/conf/logging.auth.conf + +mode = standalone +backend = flat_file +backend_kwargs = {"file_path": "st2auth/conf/htpasswd_dev"} + +# Base URL to the API endpoint excluding the version (e.g. http://myhost.net:9101/) +api_url = http://127.0.0.1:9101/ + +[system] +debug = False + +# This way integration tests can write to this directory +base_path = /tmp + +[garbagecollector] +logging = st2reactor/conf/logging.garbagecollector.conf + +action_executions_ttl = 20 +action_executions_output_ttl = 10 +trigger_instances_ttl = 20 +purge_inquiries = True + +collection_interval = 1 +sleep_delay = 0.1 + +[content] +system_packs_base_path = +packs_base_paths = st2tests/st2tests/fixtures/packs/ + +[syslog] +host = 127.0.0.1 +port = 514 +facility = local7 +protocol = udp + +[webui] +# webui_base_url = https://mywebhost.domain + +[log] +excludes = requests,paramiko +redirect_stderr = False +mask_secrets = False + +[system_user] +user = stanley +ssh_key_file = /home/vagrant/.ssh/stanley_rsa + +[messaging] +url = amqp://guest:guest@127.0.0.1:5672/ + +[ssh_runner] +remote_dir = /tmp + +[resultstracker] +logging = st2actions/conf/logging.resultstracker.conf +query_interval = 0.1 + +[notifier] +logging = st2actions/conf/logging.notifier.conf + +[exporter] +logging = st2exporter/conf/logging.exporter.conf + +[mistral] +jitter_interval = 0 diff --git a/st2tests/st2tests/fixtures/generic/apikeys/apikey1.yaml b/st2tests/st2tests/fixtures/generic/apikeys/apikey1.yaml index 49cc94e8a4..ffdaccad18 100644 --- a/st2tests/st2tests/fixtures/generic/apikeys/apikey1.yaml +++ b/st2tests/st2tests/fixtures/generic/apikeys/apikey1.yaml @@ -1,4 +1,5 @@ --- +id: 58e3f3330c0517062a3fda43 user: bill key_hash: "ec81d4a56f5987b0ae1cff6e152459986e873d6604637fc70d85c0a0daf131b0a830ccd5b6454cc0c95c0ba6e6655933c993325eb3a28bc43af6c1d801a7c1e8" # 1234 metadata: diff --git a/st2tests/st2tests/fixtures/generic/apikeys/apikey2.yaml b/st2tests/st2tests/fixtures/generic/apikeys/apikey2.yaml index fa2755052a..1fed0f928c 100644 --- a/st2tests/st2tests/fixtures/generic/apikeys/apikey2.yaml +++ b/st2tests/st2tests/fixtures/generic/apikeys/apikey2.yaml @@ -1,4 +1,5 @@ --- +id: 5c5ddd776cb8de530e0a1391 user: dilbert key_hash: "17f858ea0bb108feaa91b8eee524c7382e0218ff541783d45996a1149d50dfde4bc19f2e6a591028a2ea08de4211893b246d4eda61dd3c9cf294a2405184ac4b" # 5678 metadata: diff --git a/st2tests/st2tests/fixtures/packs/orquesta_tests/actions/config-context-action.yaml b/st2tests/st2tests/fixtures/packs/orquesta_tests/actions/config-context-action.yaml new file mode 100644 index 0000000000..1e1dc53aa1 --- /dev/null +++ b/st2tests/st2tests/fixtures/packs/orquesta_tests/actions/config-context-action.yaml @@ -0,0 +1,9 @@ +--- + name: "config-context-action" + runner_type: "local-shell-cmd" + enabled: true + entry_point: "" + parameters: + cmd: + immutable: true + default: "echo \"{{ config_context.config_key_a }}\"" diff --git a/st2tests/st2tests/fixtures/packs/orquesta_tests/actions/config-context.yaml b/st2tests/st2tests/fixtures/packs/orquesta_tests/actions/config-context.yaml new file mode 100644 index 0000000000..8c1c5dca72 --- /dev/null +++ b/st2tests/st2tests/fixtures/packs/orquesta_tests/actions/config-context.yaml @@ -0,0 +1,7 @@ +--- +name: config-context +description: Workflow which tests {{ config_context.foo }} notation works default parameter values for workflow actions. +pack: orquesta_tests +runner_type: orquesta +entry_point: workflows/config-context.yaml +enabled: true diff --git a/st2tests/st2tests/fixtures/packs/orquesta_tests/actions/workflows/config-context.yaml b/st2tests/st2tests/fixtures/packs/orquesta_tests/actions/workflows/config-context.yaml new file mode 100644 index 0000000000..796b80e3ff --- /dev/null +++ b/st2tests/st2tests/fixtures/packs/orquesta_tests/actions/workflows/config-context.yaml @@ -0,0 +1,13 @@ +version: 1.0 + +description: Workflow which tests {{ config_context }} functionality. + +output: + - msg: <% ctx().message %> + +tasks: + task1: + action: orquesta_tests.config-context-action + next: + - when: <% succeeded() %> + publish: message=<% result().stdout %> diff --git a/st2tests/st2tests/fixtures/packs/orquesta_tests/config.schema.yaml b/st2tests/st2tests/fixtures/packs/orquesta_tests/config.schema.yaml new file mode 100644 index 0000000000..35f7289000 --- /dev/null +++ b/st2tests/st2tests/fixtures/packs/orquesta_tests/config.schema.yaml @@ -0,0 +1,6 @@ +--- +config_key_a: + description: "Sample config key." + type: "string" + default: "value of config key a" + required: true diff --git a/st2tests/st2tests/fixtures/ssl_certs/README.md b/st2tests/st2tests/fixtures/ssl_certs/README.md new file mode 100644 index 0000000000..d54f4f1e6b --- /dev/null +++ b/st2tests/st2tests/fixtures/ssl_certs/README.md @@ -0,0 +1,10 @@ +# SSL certificates Used for Testing + +This directory contains self signed server and client certificates which are +used by the tests. + +Those certificates are issues and signed by a custom CA which is contained in the ca/ directory. + +Certificate passphrase is ``MySecretPassword``. + +NOTE: Those cerificates will expire on ``notAfter=Feb 11 15:58:38 2024 GMT``. diff --git a/st2tests/st2tests/fixtures/ssl_certs/ca/ca_certificate_bundle.cer b/st2tests/st2tests/fixtures/ssl_certs/ca/ca_certificate_bundle.cer new file mode 100644 index 0000000000..94557aa645 Binary files /dev/null and b/st2tests/st2tests/fixtures/ssl_certs/ca/ca_certificate_bundle.cer differ diff --git a/st2tests/st2tests/fixtures/ssl_certs/ca/ca_certificate_bundle.pem b/st2tests/st2tests/fixtures/ssl_certs/ca/ca_certificate_bundle.pem new file mode 100644 index 0000000000..a194ec97df --- /dev/null +++ b/st2tests/st2tests/fixtures/ssl_certs/ca/ca_certificate_bundle.pem @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE----- +MIICxjCCAa6gAwIBAgIJALjUApUWLemKMA0GCSqGSIb3DQEBCwUAMBMxETAPBgNV +BAMMCE15VGVzdENBMB4XDTE5MDIxMjE1NTcwM1oXDTI0MDIxMTE1NTcwM1owEzER +MA8GA1UEAwwITXlUZXN0Q0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQDxVR7nSFKXUMET0WTtVNjsgD1HDdvIZcDyPGFEMNhtftPv4RmkxeFnKNumHbIu +s2eox6MCT7wK9CKG+38szyMMDkCObYkGCKzZG2yejkjs6Kv74hvML8p+NIz3Cxch +WEuD6ubnSoKl35cVt4/LUTM/IFG36H6f7Q47NYYsWIBMaXUvY5Wbg5SqxD4LMKkx +uDFzITyrA38xvwb96mTkXT/OJEyswAAeWjjoKHWdirknhiFvKXi1T9jdmJTwBnGz +lFUS1Aavkj/Og7el9JjoL6S83mclDPbcD68/kWUliHHr8l1wfAP/oObOm7wpXViU +64nFnHP0/WtTM50urnWjFYjVAgMBAAGjHTAbMAwGA1UdEwQFMAMBAf8wCwYDVR0P +BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4IBAQDwqchOuO85hfRb25LMeB9T0iEpwQdY +cKCD1ASg42Sp/mzlscPmODxILnvm3BItEKbq6mrG2s9i42FRmXu+6D2Bm7k1jDnh +FW/hI5KG5ULQWfkFqgUAWyeSKTF7oK9FRAfROY3K9E/MXxsO10e+ibgZPZjY8RTC +eUihRw3LvIFj3mY3OQ+sBQ4OTh/nPd66trzAJee15ATC0nK0YJTVhLv576DmxOyb +yuESg2l8qvjXI0C/W+MyLCO4sH1hhg+5pjEwiXH3Z1Sk59l7qag21kp53xhvjL7W ++zisXvuZC08wfCPc3RJ6ThRb8MZZKeFpOffVVHBtgv9Aes7IOyVG15XA +-----END CERTIFICATE----- diff --git a/st2tests/st2tests/fixtures/ssl_certs/ca/certs/01.pem b/st2tests/st2tests/fixtures/ssl_certs/ca/certs/01.pem new file mode 100644 index 0000000000..17c4490f8b --- /dev/null +++ b/st2tests/st2tests/fixtures/ssl_certs/ca/certs/01.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC4jCCAcqgAwIBAgIBATANBgkqhkiG9w0BAQsFADATMREwDwYDVQQDDAhNeVRl +c3RDQTAeFw0xOTAyMTIxNTU4MDdaFw0yNDAyMTExNTU4MDdaMCUxEjAQBgNVBAMM +CWxvY2FsaG9zdDEPMA0GA1UECgwGc2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAuLLUdbHqOsUiRnkv2S0fiadrqwfdgaZgVImvMyorVYzoJ5W7 +anJSyWPnV/ly/rjL7toiPhBcVgDuCGkf7CjPN4E5tdxI9ylYk/UHEtMG1ll6kDiF +8hWfHDdktdqnQvuLkUMAA5xgIFfX+UMBuTZk7VowrjnOuljN5eVN89y2fYXXtqC1 +91HilG9VwLewYKQd/Ishb4p2WfxiBIVO+cQpnYB6quvrEYC1XPcRbJuXdrc7KcYn +dWdoj6M7aT1zOnHJrdLtv7F7dkYgV9vqwN7w3ud7uNaEbsHvWz0i+6qjX/uE755N +ZoJ8O8Dx5ug/1lxplnXlfmadIibYPBJatRsSiwIDAQABoy8wLTAJBgNVHRMEAjAA +MAsGA1UdDwQEAwIFoDATBgNVHSUEDDAKBggrBgEFBQcDATANBgkqhkiG9w0BAQsF +AAOCAQEAnhmIUhZwweCqdzGNeoNXXkuXyBf2fFvajHlG2a2pZ8r6/fyQbbJgzo04 +ajjWoUoSW+XB+AfJvT6CTZuMWsGkxYvFAxOoXtLpW0OKqEh55q8diMSb/gOxxwND +vHVb1+VjZBhzxxt0TbXeFngMnBSgVhipKQe49pe0H+rDDYptultl81n2zFLzBKUe +h927CnTJ7cpZe4Di2tMJfVsDJB6piuwPu6GnWhT38Q12I+ryL2xbihIw1B4qDtq6 +nq4lYGnpJCNNXg5JR5S1HeYiQtP0sHgU6SvpgMtzDdbCJ0Nu7EpR5J3ChdQWooGf +uTOThX41qx1p47ho4TA9Ac4K/GRcLg== +-----END CERTIFICATE----- diff --git a/st2tests/st2tests/fixtures/ssl_certs/ca/certs/02.pem b/st2tests/st2tests/fixtures/ssl_certs/ca/certs/02.pem new file mode 100644 index 0000000000..a10ae91143 --- /dev/null +++ b/st2tests/st2tests/fixtures/ssl_certs/ca/certs/02.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC4jCCAcqgAwIBAgIBAjANBgkqhkiG9w0BAQsFADATMREwDwYDVQQDDAhNeVRl +c3RDQTAeFw0xOTAyMTIxNTU4MzhaFw0yNDAyMTExNTU4MzhaMCUxEjAQBgNVBAMM +CWxvY2FsaG9zdDEPMA0GA1UECgwGY2xpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA4HxkZw50MGiWYmlrwJBHAjwsD7lfft9gHrRAeP8iEI0oLIJm +/MmUUIyA2DSDGJCIsP+grkmZawLmu7D0vJIVIUo+OBNUQ/3mACWH9z15AW5s/Ig/ +FZErhBg3RFZS+hXVT639U94uKne+mjh/G4Ej7OYHhBywn+EKakIJuUTs10sF0kW/ +4h1Gx9+Ph3tfYSagNdMDXXft0Knn/X8vMwLF5Eg8ZHKnty30wJRr4r2bqTeSCPS5 +k3bfpcxOAnaSpTDuIoxIp7w9pjwLVAVWvbjqDlU5DrPxpsn29i8STNpJ7My7+12/ +C/QJDrlCJCav1ma04G2QZbyAri3ax/MCeonFsQIDAQABoy8wLTAJBgNVHRMEAjAA +MAsGA1UdDwQEAwIFoDATBgNVHSUEDDAKBggrBgEFBQcDAjANBgkqhkiG9w0BAQsF +AAOCAQEAI+PgF1gsQckqTh71CxqKimM0h5pIGh6H09bSa+9LFLFa60E1zR8rmygw +AD+u6sI5foFbSdUiIDJBmHizvwMmIptGSRw0Znzi/jjbjBmZSNLnk+Vird5grjF4 +Pf7Vkgi/NKzXTS3Y2TUUhk5OZZ6OmszHZ0eGJlUcz6Qa13hcalVHc3FmikeAu5/h +XQuthOQDXJBabgexQ+1K6ft6DDImdQCFcZhYXSb30cRHS9lqIVZbI7Rtk6UqwkvE +hYU0g8BVeVBpL7xYBqfrpdy+vBb28rrLT6Dvgf0giQ3F07S+RAivDWjM53Wyhb7T +6o3h8l49IkcEW1mns9Mj2bPNFSOhSA== +-----END CERTIFICATE----- diff --git a/st2tests/st2tests/fixtures/ssl_certs/ca/index.txt b/st2tests/st2tests/fixtures/ssl_certs/ca/index.txt new file mode 100644 index 0000000000..ad058db53d --- /dev/null +++ b/st2tests/st2tests/fixtures/ssl_certs/ca/index.txt @@ -0,0 +1,2 @@ +V 240211155807Z 01 unknown /CN=localhost/O=server +V 240211155838Z 02 unknown /CN=localhost/O=client diff --git a/st2tests/st2tests/fixtures/ssl_certs/ca/index.txt.attr b/st2tests/st2tests/fixtures/ssl_certs/ca/index.txt.attr new file mode 100644 index 0000000000..8f7e63a347 --- /dev/null +++ b/st2tests/st2tests/fixtures/ssl_certs/ca/index.txt.attr @@ -0,0 +1 @@ +unique_subject = yes diff --git a/st2tests/st2tests/fixtures/ssl_certs/ca/index.txt.attr.old b/st2tests/st2tests/fixtures/ssl_certs/ca/index.txt.attr.old new file mode 100644 index 0000000000..8f7e63a347 --- /dev/null +++ b/st2tests/st2tests/fixtures/ssl_certs/ca/index.txt.attr.old @@ -0,0 +1 @@ +unique_subject = yes diff --git a/st2tests/st2tests/fixtures/ssl_certs/ca/index.txt.old b/st2tests/st2tests/fixtures/ssl_certs/ca/index.txt.old new file mode 100644 index 0000000000..970c83b368 --- /dev/null +++ b/st2tests/st2tests/fixtures/ssl_certs/ca/index.txt.old @@ -0,0 +1 @@ +V 240211155807Z 01 unknown /CN=localhost/O=server diff --git a/st2tests/st2tests/fixtures/ssl_certs/ca/openssl.cnf b/st2tests/st2tests/fixtures/ssl_certs/ca/openssl.cnf new file mode 100644 index 0000000000..a8348fbf15 --- /dev/null +++ b/st2tests/st2tests/fixtures/ssl_certs/ca/openssl.cnf @@ -0,0 +1,54 @@ +[ ca ] +default_ca = testca + +[ testca ] +dir = . +certificate = $dir/ca_certificate_bundle.pem +database = $dir/index.txt +new_certs_dir = $dir/certs +private_key = $dir/private/ca_private_key.pem +serial = $dir/serial + +default_crl_days = 7 +default_days = 1825 +default_md = sha256 + +policy = testca_policy +x509_extensions = certificate_extensions + +[ testca_policy ] +commonName = supplied +stateOrProvinceName = optional +countryName = optional +emailAddress = optional +organizationName = optional +organizationalUnitName = optional +domainComponent = optional + +[ certificate_extensions ] +basicConstraints = CA:false + +[ req ] +default_bits = 2048 +default_keyfile = ./private/ca_private_key.pem +default_md = sha256 +prompt = yes +distinguished_name = root_ca_distinguished_name +x509_extensions = root_ca_extensions + +[ root_ca_distinguished_name ] +commonName = hostname + +[ root_ca_extensions ] +basicConstraints = CA:true +keyUsage = keyCertSign, cRLSign + +[ client_ca_extensions ] +basicConstraints = CA:false +keyUsage = digitalSignature,keyEncipherment +extendedKeyUsage = 1.3.6.1.5.5.7.3.2 + +[ server_ca_extensions ] +basicConstraints = CA:false +keyUsage = digitalSignature,keyEncipherment +extendedKeyUsage = 1.3.6.1.5.5.7.3.1 diff --git a/st2tests/st2tests/fixtures/ssl_certs/ca/private/ca_private_key.pem b/st2tests/st2tests/fixtures/ssl_certs/ca/private/ca_private_key.pem new file mode 100644 index 0000000000..e54d4958cd --- /dev/null +++ b/st2tests/st2tests/fixtures/ssl_certs/ca/private/ca_private_key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDxVR7nSFKXUMET +0WTtVNjsgD1HDdvIZcDyPGFEMNhtftPv4RmkxeFnKNumHbIus2eox6MCT7wK9CKG ++38szyMMDkCObYkGCKzZG2yejkjs6Kv74hvML8p+NIz3CxchWEuD6ubnSoKl35cV +t4/LUTM/IFG36H6f7Q47NYYsWIBMaXUvY5Wbg5SqxD4LMKkxuDFzITyrA38xvwb9 +6mTkXT/OJEyswAAeWjjoKHWdirknhiFvKXi1T9jdmJTwBnGzlFUS1Aavkj/Og7el +9JjoL6S83mclDPbcD68/kWUliHHr8l1wfAP/oObOm7wpXViU64nFnHP0/WtTM50u +rnWjFYjVAgMBAAECggEBAN14Pz8CyQCiFD5KqHOArP4FBbciSbMTZkknDiAVL1j0 +zixSiEUFb8BK55//mphu/c8PPlINuETZHKKBRIlrof8bSTUr4laOOYmYOEsdymDX +eZVTQC1XIl5FfaPtIpHwRITQWoyhfVoZ4b4FUcnFP+FLmJLMov/C/Y9qpDIoGb2E +NbcMEnIz0i573+Ci1k+OLAdthbCigUvwvJ1iLv5m3s1XrRvIu6TDsERXdB/02pFu +XXNgyidR6XVr/MVov898PB5B0eJbX6Iir7avzpS1V/q5kq2pgFFZk8Vfhvw2k07C +l89peWIo+1h8djem/1n1FLD7aRKzFTb6HULS4uoxCDUCgYEA/o23BbC1/LRTq0IW +7I8BqTCe70cnuvWCUtWiTNWzX3INK4Hsxdeztfiu/W5dTiDndZ4CzMAoXt/rxXkw +Dc449FB1wVKCKShZRyeyyboOCpfzW1040JhjmGU4ZBn6T4U2cpaJyLGtcfkFZSeq +2nOiUntVJcPq6vWF2sdJysGSWucCgYEA8rQsf5RaHM6yRFpKrbb+SC8PAEqAZF9N +XZvl64GLHi9PSK/n68iZw1c7j4VjnCC89PH0flpQfkngrffLiy2pi+JdYo7qBKeT +3IFOiQAvylpxCiQMvFqsxz9mhoj3jJdyNGvKXJeQ5PuxRatZOHwpMP+tpQ7uF2zm +DzReoxqZ4uMCgYB1XNFthjPh9yI8a5Q2LRkO8KPWnm/q+xbDKkxSMJUrBGKeFKEd +9n2dALNtlVzfkLwmtluEG3SBiawit+U3+ES6H/6qy2fHohrHe74q0+V1bOl+zlRL +mHcS5FhDjtaho0GfQ1jzdzgIvE+Ie+mCHp5QeRyg9NtyyRCV9hxHp0fbMQKBgQDr +Cqn9c8JBG7twjrC7wvhHF6vDcGMe0VyvRwdHJ9F+jfqOPiywHzkqABTiTR/GV74m +yRsqMnS5mPpKACvSwYnsunANvrHLiC6d4WwZKWEe6q+GTps23eltnGzB5Ws3cINd +WPZE7VOZLlbjTam+FiAeH74el3LkpMW3+9OayWw2WQKBgQD0S0L5OoRjVY6SRPe1 +oKqTwSlay2uzqoAhGQqGeb4SaBaImEfLMQzYQpJ5JWAnAzwHhA7x7iDm3QzB93Fg +id1rdsbfzdlZC40T0IslTYLT/mawiOcAHupDuszgnn1ycFV35915zP9Ijzqaojsn +DRI3H6XpQSJyHUNZo1pCZBXyhg== +-----END PRIVATE KEY----- diff --git a/st2tests/st2tests/fixtures/ssl_certs/ca/serial b/st2tests/st2tests/fixtures/ssl_certs/ca/serial new file mode 100644 index 0000000000..75016ea362 --- /dev/null +++ b/st2tests/st2tests/fixtures/ssl_certs/ca/serial @@ -0,0 +1 @@ +03 diff --git a/st2tests/st2tests/fixtures/ssl_certs/ca/serial.old b/st2tests/st2tests/fixtures/ssl_certs/ca/serial.old new file mode 100644 index 0000000000..9e22bcb8e3 --- /dev/null +++ b/st2tests/st2tests/fixtures/ssl_certs/ca/serial.old @@ -0,0 +1 @@ +02 diff --git a/st2tests/st2tests/fixtures/ssl_certs/client/client_certificate.p12 b/st2tests/st2tests/fixtures/ssl_certs/client/client_certificate.p12 new file mode 100644 index 0000000000..7feead70f4 Binary files /dev/null and b/st2tests/st2tests/fixtures/ssl_certs/client/client_certificate.p12 differ diff --git a/st2tests/st2tests/fixtures/ssl_certs/client/client_certificate.pem b/st2tests/st2tests/fixtures/ssl_certs/client/client_certificate.pem new file mode 100644 index 0000000000..a10ae91143 --- /dev/null +++ b/st2tests/st2tests/fixtures/ssl_certs/client/client_certificate.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC4jCCAcqgAwIBAgIBAjANBgkqhkiG9w0BAQsFADATMREwDwYDVQQDDAhNeVRl +c3RDQTAeFw0xOTAyMTIxNTU4MzhaFw0yNDAyMTExNTU4MzhaMCUxEjAQBgNVBAMM +CWxvY2FsaG9zdDEPMA0GA1UECgwGY2xpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA4HxkZw50MGiWYmlrwJBHAjwsD7lfft9gHrRAeP8iEI0oLIJm +/MmUUIyA2DSDGJCIsP+grkmZawLmu7D0vJIVIUo+OBNUQ/3mACWH9z15AW5s/Ig/ +FZErhBg3RFZS+hXVT639U94uKne+mjh/G4Ej7OYHhBywn+EKakIJuUTs10sF0kW/ +4h1Gx9+Ph3tfYSagNdMDXXft0Knn/X8vMwLF5Eg8ZHKnty30wJRr4r2bqTeSCPS5 +k3bfpcxOAnaSpTDuIoxIp7w9pjwLVAVWvbjqDlU5DrPxpsn29i8STNpJ7My7+12/ +C/QJDrlCJCav1ma04G2QZbyAri3ax/MCeonFsQIDAQABoy8wLTAJBgNVHRMEAjAA +MAsGA1UdDwQEAwIFoDATBgNVHSUEDDAKBggrBgEFBQcDAjANBgkqhkiG9w0BAQsF +AAOCAQEAI+PgF1gsQckqTh71CxqKimM0h5pIGh6H09bSa+9LFLFa60E1zR8rmygw +AD+u6sI5foFbSdUiIDJBmHizvwMmIptGSRw0Znzi/jjbjBmZSNLnk+Vird5grjF4 +Pf7Vkgi/NKzXTS3Y2TUUhk5OZZ6OmszHZ0eGJlUcz6Qa13hcalVHc3FmikeAu5/h +XQuthOQDXJBabgexQ+1K6ft6DDImdQCFcZhYXSb30cRHS9lqIVZbI7Rtk6UqwkvE +hYU0g8BVeVBpL7xYBqfrpdy+vBb28rrLT6Dvgf0giQ3F07S+RAivDWjM53Wyhb7T +6o3h8l49IkcEW1mns9Mj2bPNFSOhSA== +-----END CERTIFICATE----- diff --git a/st2tests/st2tests/fixtures/ssl_certs/client/private_key.pem b/st2tests/st2tests/fixtures/ssl_certs/client/private_key.pem new file mode 100644 index 0000000000..7ddd509e15 --- /dev/null +++ b/st2tests/st2tests/fixtures/ssl_certs/client/private_key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA4HxkZw50MGiWYmlrwJBHAjwsD7lfft9gHrRAeP8iEI0oLIJm +/MmUUIyA2DSDGJCIsP+grkmZawLmu7D0vJIVIUo+OBNUQ/3mACWH9z15AW5s/Ig/ +FZErhBg3RFZS+hXVT639U94uKne+mjh/G4Ej7OYHhBywn+EKakIJuUTs10sF0kW/ +4h1Gx9+Ph3tfYSagNdMDXXft0Knn/X8vMwLF5Eg8ZHKnty30wJRr4r2bqTeSCPS5 +k3bfpcxOAnaSpTDuIoxIp7w9pjwLVAVWvbjqDlU5DrPxpsn29i8STNpJ7My7+12/ +C/QJDrlCJCav1ma04G2QZbyAri3ax/MCeonFsQIDAQABAoIBAFjujqwRGtCOrn0A +PJLF1Yu6IM595qoRfjfLuvr0QB+EfFTduEUO6rXaY7TDYOgbYjuUmahSOfgd5yCW +Iu6NhNdyXSHD7o8dB8ApHitBbC23/G8y3qMBptam7UYiWK8AdUgiqohOLcXfOGBK +X3ia+YuBOZsJ7qL3+TNNRCLkfltvfA4pkCMgfdZUecJcc0jFNMoCBiyk61CnNhLL +uy1oMS7JzqPRM1ySWCdBJFkV1omDHgrgBx7VmympFUJHb6kVUSh/mnPTejTcM1ds +BkNecBbS/w2X9Gb9PSZzLCAEwmJ8J0hRkgDiahN7Q/kNsQ3ca3r03iocJALecBsW +3sujeH0CgYEA+5ewcq9M/sxdZnuZy69v7T2j8Q/FGGF7IQHlT67r80cEtXeAjlrN +0D9I3+cOrvz57Eay0n2hGLWzhyex6TTX9pZozTjcMuqRkB2ztPp3HkjRucpVhGz4 +pbADvO+ZgO87AGW13E8BBDN8BsWHPFpWpwpHvEcp05sFeUdeGqJfcHsCgYEA5Gsj +dndnmxX63it2Fa3I05MynAiqnt9MNm6zcNqPMKauK6xaawZv5FvQSd5MUQa9sj3s +VgYKr9e61u7WMaHqNwn6BUOwMKv26lwjkXW/wV3QMNzn5bzS2CyjWJEjdPq0WqoH +RRvR455mAlhTVFSyOJ279WXUWoPxqDbd/Y+1yMMCgYAlDqmxqrpniUh0kN4NT1Do +G70rA4yfU7RkHzhcbUJZuesqo2hvD1bjRn8AY7MY+TACqkMql9CDqDfCP4mH9P2e +V3cmSyq74SsBlC5lCMNE1ar2d6Py9m4FUZCrYos0n4gMPe70fTqEGOU6xhtuO0wq +HGyGgeDaRyoeO/HTcHkoQwKBgQCFqaQw2KKKAAyzIV+SRAV2uXYuFGwzV5uzZoge +i+aqo37cE5k9c6DaUlfKQgkKiRVMTiwUEqkCSQ0OZOh2VrdFydLCbd+WO6rbbVtq +7SpursT7MumIaDxBP62+UAAdne8X9tMWP7dMqQ4sZR8uA/neY37vlMz0wq0QsDqq +/AN2HQKBgQDZQIZuZwS12f2Mt/E/27I8lyDiVEj59zwxeayxFq8SzUtbWnWeepes +vtsdF19dWXzwI8MjTDhGo45YyKwtNXMp+uiMA0QFo4R07D68VrxAUDYGgnhhAxlZ +Wmq8OapkJUp69GeDgnG0F72eMhrQu6fJN1dpvNAkfZiuyT2BGBc6cA== +-----END RSA PRIVATE KEY----- diff --git a/st2tests/st2tests/fixtures/ssl_certs/client/req.pem b/st2tests/st2tests/fixtures/ssl_certs/client/req.pem new file mode 100644 index 0000000000..58e270e22a --- /dev/null +++ b/st2tests/st2tests/fixtures/ssl_certs/client/req.pem @@ -0,0 +1,15 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIICajCCAVICAQAwJTESMBAGA1UEAwwJbG9jYWxob3N0MQ8wDQYDVQQKDAZjbGll +bnQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDgfGRnDnQwaJZiaWvA +kEcCPCwPuV9+32AetEB4/yIQjSgsgmb8yZRQjIDYNIMYkIiw/6CuSZlrAua7sPS8 +khUhSj44E1RD/eYAJYf3PXkBbmz8iD8VkSuEGDdEVlL6FdVPrf1T3i4qd76aOH8b +gSPs5geEHLCf4QpqQgm5ROzXSwXSRb/iHUbH34+He19hJqA10wNdd+3Qqef9fy8z +AsXkSDxkcqe3LfTAlGvivZupN5II9LmTdt+lzE4CdpKlMO4ijEinvD2mPAtUBVa9 +uOoOVTkOs/Gmyfb2LxJM2knszLv7Xb8L9AkOuUIkJq/WZrTgbZBlvICuLdrH8wJ6 +icWxAgMBAAGgADANBgkqhkiG9w0BAQsFAAOCAQEApuP6zTVRGLa69IXIyGIqDzb6 +NjQxyTbB5SzbtgqvdcBs5EuntsFTmS11umKwzoqT0+Kf3JtwO8pu8rQbX3C/EWOP +/eWqFPnGTCRk0AE+m08XxiAgQrgOxiMj483ka6Qr3OdT7zjW6xUyE0ObD+auD+fx +9siygGy8P9X0x0PqpWQoZm17x3bUfERiIl+oI/BltuUmAfPgELtEIBjcz+Xrslgl +5iV8Rn/+srFwMT80QLt9iypt0Me8IkbKTWpDUVQYEaXA3svCvGuthzeukImmmAPZ +rpcXR6WvYVdb2HekgqZtgvDg4FDeLidK164uTeOlCC/CRLPKyJu9VJpTQamC6g== +-----END CERTIFICATE REQUEST----- diff --git a/st2tests/st2tests/fixtures/ssl_certs/server/private_key.pem b/st2tests/st2tests/fixtures/ssl_certs/server/private_key.pem new file mode 100644 index 0000000000..05924ec179 --- /dev/null +++ b/st2tests/st2tests/fixtures/ssl_certs/server/private_key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAuLLUdbHqOsUiRnkv2S0fiadrqwfdgaZgVImvMyorVYzoJ5W7 +anJSyWPnV/ly/rjL7toiPhBcVgDuCGkf7CjPN4E5tdxI9ylYk/UHEtMG1ll6kDiF +8hWfHDdktdqnQvuLkUMAA5xgIFfX+UMBuTZk7VowrjnOuljN5eVN89y2fYXXtqC1 +91HilG9VwLewYKQd/Ishb4p2WfxiBIVO+cQpnYB6quvrEYC1XPcRbJuXdrc7KcYn +dWdoj6M7aT1zOnHJrdLtv7F7dkYgV9vqwN7w3ud7uNaEbsHvWz0i+6qjX/uE755N +ZoJ8O8Dx5ug/1lxplnXlfmadIibYPBJatRsSiwIDAQABAoIBAQC0UxytYCvwfyFs +rsrxfWWqLsQm8oHoH/ky8E4WZRhz6SOL6ltVnRKIvzpSISCN4vxwUZZXBAAyk6vS +mFhraJiPd2JR1SWD8mEh63uhfFjTk/7eqeDUrxluIgL4rebZtd/YzhJIdDdBvKIH +Ic2f96RoO8MFhzj3pNY5mzwVWCrvtsEY4ygrblQrweqNbcaowJ/YQPPkgvXb6dC3 +IXjBL5IzOwTlnIYhFkuZY736Z8GOw9rcyGxITHAKavWOJkE72drh0gv5rBnu2NLz +Lgta6o+p6/DU1tjq2LRllq1HDL7uy5yGxBtB+uXly22Ur/rQzYBKeRHkj2OqZKlV +kNiyKBipAoGBAOMkqqTu9dd8xPCgu8iQWHlKVwL6gp4Ij/0PCpXL5v5cktyoAvd9 +fb22UGeFLbbdUuctO711oMfMXl8nULafT54WbnSCG2f+oiRacupJQ/QLPQ8nV8Gy +K9+H/rYZ+ggLNkNqjvM5xQZ6/AxZxWEv+qNJfPF0fG1iCWmYh0OrmfDdAoGBANAp +vma47lG3dnQfga88//SJCeuluwumjXvN8gQJvwU1ofaGjRdKxtexWBuZG6BPXnCv +yRm5tWYJnxj+zUF+ImMsd7sd/Iy1PW7gdZtMtjIW4Qmys0IKK3zkwGygayFrnyhg +WU0t63OEiKEJ7mQzvOAmnTG+H7fZ6WWm3gxi+WaHAoGAYDda9YynpMUcY1Wi1d2X +LKG54/AbvjegTrC9aiC6U4sBRukAgLeuuNruijtW1vw/rt9xS9r05U2DuEjeHs2z +GyMjXMT0OQQayM1rmiS43TqZfb7LpKgFf6WK1raAPEILlVkg/pS9Cfa0p8KrInUB +dYOeomUWg/sgQ5Ox0I9zIR0CgYAYxl8a6reykhtPBtDwgloUSJsdqMPyRwhfy8sa +H+7UN+Xm6WyxcPzpfvn1juty0P90efd9UFT+p/Z/ixPyz4hYNVqqso70UD3XjG9y +5FZq774o4VPkcEFsw+0DALS/bYerzovSW7zCKuv3/q6Yzm+UXgQnf3FW+GCG8K1M +3BrC0QKBgC6srVlHBF9FI1D/9yjjx3JIVmKKS7YleAl36t05zCfR46FDPPa7J4/+ +1UzBkEFkn0/Ven8bbkOKr9v7wBjxszCnvZPxDm9oGU8l8TjrZYiuwi0euF+4r61v +HYueOtTDjtOYSPXbQcypA0FjdeHPE5XY6O4I8ti9URyV+M80vijk +-----END RSA PRIVATE KEY----- diff --git a/st2tests/st2tests/fixtures/ssl_certs/server/req.pem b/st2tests/st2tests/fixtures/ssl_certs/server/req.pem new file mode 100644 index 0000000000..5135c2cc33 --- /dev/null +++ b/st2tests/st2tests/fixtures/ssl_certs/server/req.pem @@ -0,0 +1,15 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIICajCCAVICAQAwJTESMBAGA1UEAwwJbG9jYWxob3N0MQ8wDQYDVQQKDAZzZXJ2 +ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC4stR1seo6xSJGeS/Z +LR+Jp2urB92BpmBUia8zKitVjOgnlbtqclLJY+dX+XL+uMvu2iI+EFxWAO4IaR/s +KM83gTm13Ej3KViT9QcS0wbWWXqQOIXyFZ8cN2S12qdC+4uRQwADnGAgV9f5QwG5 +NmTtWjCuOc66WM3l5U3z3LZ9hde2oLX3UeKUb1XAt7BgpB38iyFvinZZ/GIEhU75 +xCmdgHqq6+sRgLVc9xFsm5d2tzspxid1Z2iPoztpPXM6ccmt0u2/sXt2RiBX2+rA +3vDe53u41oRuwe9bPSL7qqNf+4Tvnk1mgnw7wPHm6D/WXGmWdeV+Zp0iJtg8Elq1 +GxKLAgMBAAGgADANBgkqhkiG9w0BAQsFAAOCAQEAmgj0lyN0I+pik9xQnmt7RhC1 +r+5ivX9ndnMmpeN8jI0RqUOEU3CewSsxKihiVpVHqUGJhHKJmsnEh/aiD2dPorK+ +I0NGWXGexk3TfHq/Ey1lwyZc1O9+vOYo/6k3zDhJZg0BekNkYciTsMFpI4h8cDr2 +yV3gzRdFPug2wwBPuKumiJuI6ZQU3G3FjgbUIOox91ZZctH1X3PRFmHjZKiHauwE +3FEzyoJUXPhP/HFGooZ6M81nm5VotozqUbj+pslLGjPdX2stduFfhZOriwH/mKll +7seOwR7GpqOhMDSCfs1gBAZkkyGX+z1hk+hccFJHSO0PLg+32Wtzu1kepBw4kA== +-----END CERTIFICATE REQUEST----- diff --git a/st2tests/st2tests/fixtures/ssl_certs/server/server_certificate.p12 b/st2tests/st2tests/fixtures/ssl_certs/server/server_certificate.p12 new file mode 100644 index 0000000000..7a937f220b Binary files /dev/null and b/st2tests/st2tests/fixtures/ssl_certs/server/server_certificate.p12 differ diff --git a/st2tests/st2tests/fixtures/ssl_certs/server/server_certificate.pem b/st2tests/st2tests/fixtures/ssl_certs/server/server_certificate.pem new file mode 100644 index 0000000000..17c4490f8b --- /dev/null +++ b/st2tests/st2tests/fixtures/ssl_certs/server/server_certificate.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC4jCCAcqgAwIBAgIBATANBgkqhkiG9w0BAQsFADATMREwDwYDVQQDDAhNeVRl +c3RDQTAeFw0xOTAyMTIxNTU4MDdaFw0yNDAyMTExNTU4MDdaMCUxEjAQBgNVBAMM +CWxvY2FsaG9zdDEPMA0GA1UECgwGc2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAuLLUdbHqOsUiRnkv2S0fiadrqwfdgaZgVImvMyorVYzoJ5W7 +anJSyWPnV/ly/rjL7toiPhBcVgDuCGkf7CjPN4E5tdxI9ylYk/UHEtMG1ll6kDiF +8hWfHDdktdqnQvuLkUMAA5xgIFfX+UMBuTZk7VowrjnOuljN5eVN89y2fYXXtqC1 +91HilG9VwLewYKQd/Ishb4p2WfxiBIVO+cQpnYB6quvrEYC1XPcRbJuXdrc7KcYn +dWdoj6M7aT1zOnHJrdLtv7F7dkYgV9vqwN7w3ud7uNaEbsHvWz0i+6qjX/uE755N +ZoJ8O8Dx5ug/1lxplnXlfmadIibYPBJatRsSiwIDAQABoy8wLTAJBgNVHRMEAjAA +MAsGA1UdDwQEAwIFoDATBgNVHSUEDDAKBggrBgEFBQcDATANBgkqhkiG9w0BAQsF +AAOCAQEAnhmIUhZwweCqdzGNeoNXXkuXyBf2fFvajHlG2a2pZ8r6/fyQbbJgzo04 +ajjWoUoSW+XB+AfJvT6CTZuMWsGkxYvFAxOoXtLpW0OKqEh55q8diMSb/gOxxwND +vHVb1+VjZBhzxxt0TbXeFngMnBSgVhipKQe49pe0H+rDDYptultl81n2zFLzBKUe +h927CnTJ7cpZe4Di2tMJfVsDJB6piuwPu6GnWhT38Q12I+ryL2xbihIw1B4qDtq6 +nq4lYGnpJCNNXg5JR5S1HeYiQtP0sHgU6SvpgMtzDdbCJ0Nu7EpR5J3ChdQWooGf +uTOThX41qx1p47ho4TA9Ac4K/GRcLg== +-----END CERTIFICATE----- diff --git a/test-requirements.txt b/test-requirements.txt index a5d5eba385..2619b96004 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,4 +1,5 @@ -coverage +# NOTE: codecov only supports coverage==4.5.2 +coverage==4.5.2 pep8==1.7.1 flake8==3.6.0 astroid==1.6.5 @@ -18,7 +19,7 @@ nose-timer>=0.7.2,<0.8 # splitting tests run on a separate CI machines nose-parallel==0.3.1 # Required by st2client tests -pyyaml<4.0,>=3.12 +pyyaml>=4.2b4,<5.2 RandomWords gunicorn==19.7.1 psutil==5.4.5 diff --git a/tools/config_gen.py b/tools/config_gen.py index 4f593b2389..c3e2c17b2f 100755 --- a/tools/config_gen.py +++ b/tools/config_gen.py @@ -41,7 +41,7 @@ SKIP_GROUPS = ['api_pecan', 'rbac', 'results_tracker'] -# We group auth options together to nake it a bit more clear what applies where +# We group auth options together to make it a bit more clear what applies where AUTH_OPTIONS = { 'common': [ 'enable', @@ -63,7 +63,7 @@ ] } -# Some of the config values change depenending on the environment where this script is ran so we +# Some of the config values change depending on the environment where this script is ran so we # set them to static values to ensure consistent and stable output STATIC_OPTION_VALUES = { 'actionrunner': { diff --git a/tools/migrate_messaging_setup.py b/tools/migrate_messaging_setup.py index ff2396f636..6232e70f7f 100755 --- a/tools/migrate_messaging_setup.py +++ b/tools/migrate_messaging_setup.py @@ -19,9 +19,9 @@ """ from __future__ import absolute_import + import traceback -from kombu import Connection from st2common import config from st2common.transport import reactor from st2common.transport import utils as transport_utils @@ -47,7 +47,7 @@ def migrate(self): self._cleanup_old_queues() def _cleanup_old_queues(self): - with Connection(transport_utils.get_messaging_urls()) as connection: + with transport_utils.get_connection() as connection: for q in self.OLD_QS: bound_q = q(connection.default_channel) try: diff --git a/tools/queue_consumer.py b/tools/queue_consumer.py index 5c7ef19989..91b1aad4cf 100755 --- a/tools/queue_consumer.py +++ b/tools/queue_consumer.py @@ -19,12 +19,13 @@ """ from __future__ import absolute_import + import random import argparse from pprint import pprint from kombu.mixins import ConsumerMixin -from kombu import Connection, Exchange, Queue +from kombu import Exchange, Queue from st2common import config from st2common.transport import utils as transport_utils @@ -59,7 +60,8 @@ def main(queue, exchange, routing_key='#'): queue = Queue(name=queue, exchange=exchange, routing_key=routing_key, auto_delete=True) - with Connection(transport_utils.get_messaging_urls()) as connection: + with transport_utils.get_connection() as connection: + connection.connect() watcher = QueueConsumer(connection=connection, queue=queue) watcher.run() diff --git a/tools/queue_producer.py b/tools/queue_producer.py index c9b01a47cd..9d98c9151e 100755 --- a/tools/queue_producer.py +++ b/tools/queue_producer.py @@ -19,20 +19,21 @@ """ from __future__ import absolute_import + import argparse +import eventlet from kombu import Exchange from st2common import config - -from st2common.transport import utils as transport_utils from st2common.transport.publishers import PoolPublisher def main(exchange, routing_key, payload): exchange = Exchange(exchange, type='topic') - publisher = PoolPublisher(urls=transport_utils.get_messaging_urls()) + publisher = PoolPublisher() publisher.publish(payload=payload, exchange=exchange, routing_key=routing_key) + eventlet.sleep(0.5) if __name__ == '__main__':