From b8c1253df7f62ce4034a16bf8de99c67011e0174 Mon Sep 17 00:00:00 2001 From: Dennis Whitney Date: Mon, 13 Dec 2021 10:54:17 -0600 Subject: [PATCH 1/9] Add no_proxy to index searches --- st2common/st2common/services/packs.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/st2common/st2common/services/packs.py b/st2common/st2common/services/packs.py index 6633fbefde..cdaff7704c 100644 --- a/st2common/st2common/services/packs.py +++ b/st2common/st2common/services/packs.py @@ -83,6 +83,7 @@ def _fetch_and_compile_index(index_urls, logger=None, proxy_config=None): if proxy_config: https_proxy = proxy_config.get("https_proxy", None) http_proxy = proxy_config.get("http_proxy", None) + no_proxy = proxy_config.get("no_proxy", None) ca_bundle_path = proxy_config.get("proxy_ca_bundle_path", None) if https_proxy: @@ -92,6 +93,9 @@ def _fetch_and_compile_index(index_urls, logger=None, proxy_config=None): if http_proxy: proxies_dict["http"] = http_proxy + if no_proxy: + proxies_dict["no"] = no_proxy + for index_url in index_urls: index_status = { "url": index_url, From 300f09a66b9fe7e8111a9d650cd0d3d8106863e6 Mon Sep 17 00:00:00 2001 From: Dennis Whitney Date: Thu, 16 Dec 2021 16:08:46 -0600 Subject: [PATCH 2/9] Updated changelog --- CHANGELOG.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index a787a87496..7a3c4f79f7 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -25,6 +25,10 @@ Fixed * Fix ``st2-self-check`` script reporting falsey success when the nested workflows runs failed. #5487 +* Fixed issue where pack index searches are ignoring no_proxy #5497 + + Contributed by @minsis + 3.6.0 - October 29, 2021 ------------------------ From 8ad5e5dcc4f7fa14c6ab5f1aae5faf240f389d07 Mon Sep 17 00:00:00 2001 From: Dennis Whitney Date: Sat, 5 Feb 2022 11:58:25 -0600 Subject: [PATCH 3/9] work around for requests bug --- st2common/st2common/services/packs.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/st2common/st2common/services/packs.py b/st2common/st2common/services/packs.py index cdaff7704c..ba7bc58b5d 100644 --- a/st2common/st2common/services/packs.py +++ b/st2common/st2common/services/packs.py @@ -20,6 +20,7 @@ import os import requests +from requests.utils import should_bypass_proxies import six from six.moves import range from oslo_config import cfg @@ -97,6 +98,13 @@ def _fetch_and_compile_index(index_urls, logger=None, proxy_config=None): proxies_dict["no"] = no_proxy for index_url in index_urls: + + # TODO: + # Bug in requests doesn't bypass proxies, so we do it ourselves + # If this issue ever gets fixed then we can remove it + # https://github.com/psf/requests/issues/4871 + bypass_proxy = should_bypass_proxies(index_url, proxies_dict["no"]) + index_status = { "url": index_url, "packs": 0, @@ -106,7 +114,11 @@ def _fetch_and_compile_index(index_urls, logger=None, proxy_config=None): index_json = None try: - request = requests.get(index_url, proxies=proxies_dict, verify=verify) + request = requests.get( + index_url, + proxies=proxies_dict if not bypass_proxy else None, + verify=verify if not bypass_proxy else None + ) request.raise_for_status() index_json = request.json() except ValueError as e: From 3d4f538f8dc25a932ddd32157d8cd428b6c44a6c Mon Sep 17 00:00:00 2001 From: Dennis Whitney Date: Sat, 5 Feb 2022 12:14:03 -0600 Subject: [PATCH 4/9] merge from upstream --- .circleci/config.yml | 14 +- .github/workflows/checks.yaml | 25 ++ .github/workflows/ci.yaml | 23 +- .github/workflows/microbenchmarks.yaml | 4 +- .../workflows/orquesta-integration-tests.yaml | 9 +- CHANGELOG.rst | 59 +++++ Makefile | 1 + conf/st2.conf.sample | 6 + contrib/linux/actions/service.py | 3 +- .../orquesta_runner/tests/unit/test_cancel.py | 1 + .../tests/unit/test_pause_and_resume.py | 2 + fixed-requirements.txt | 4 +- requirements.txt | 2 + st2actions/st2actions/cmd/actionrunner.py | 8 +- st2actions/st2actions/cmd/workflow_engine.py | 9 +- st2actions/st2actions/worker.py | 30 +++ st2actions/tests/unit/test_worker.py | 201 ++++++++++++++++ st2api/st2api/controllers/v1/action_views.py | 28 ++- st2api/st2api/controllers/v1/webhooks.py | 1 + .../unit/controllers/v1/test_action_views.py | 14 +- .../unit/controllers/v1/test_webhooks.py | 27 +++ st2client/in-requirements.txt | 3 + st2client/requirements.txt | 2 + st2client/st2client/commands/policy.py | 27 +-- st2client/tests/unit/test_shell.py | 15 ++ st2common/bin/st2ctl | 2 +- st2common/in-requirements.txt | 1 + st2common/requirements.txt | 1 + st2common/st2common/config.py | 22 ++ st2common/st2common/openapi.yaml | 6 +- st2common/st2common/openapi.yaml.j2 | 6 +- st2common/st2common/operators.py | 104 ++++---- st2common/st2common/services/action.py | 34 ++- st2common/st2common/services/executions.py | 2 +- st2common/st2common/services/workflows.py | 2 +- st2common/st2common/transport/consumers.py | 3 + st2common/st2common/util/param.py | 33 +-- st2common/tests/unit/test_operators.py | 225 ++++++++++++++++++ st2common/tests/unit/test_param_utils.py | 20 ++ st2reactor/st2reactor/rules/filter.py | 4 +- st2reactor/tests/unit/test_filter.py | 22 ++ st2tests/st2tests/mocks/datastore.py | 10 +- test-requirements.txt | 8 +- 43 files changed, 873 insertions(+), 150 deletions(-) create mode 100644 .github/workflows/checks.yaml diff --git a/.circleci/config.yml b/.circleci/config.yml index 4e1c4d4105..dc8d4debd0 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -127,9 +127,7 @@ jobs: - setup_remote_docker: reusable: true # default - false exclusive: true # default - true - # Temporary workaround for Circle CI issue - # https://discuss.circleci.com/t/setup-remote-docker-connection-failures/26434 - version: 18.05.0-ce + version: 19.03.14 - run: name: Docker version command: | @@ -176,7 +174,7 @@ jobs: docker cp . st2-packages-vol:${ST2_GITDIR} - run: name: Pull dependent Docker Images - command: .circle/docker-compose2.sh pull ${DISTRO} + command: .circle/docker-compose2.sh pull ${DISTRO} || .circle/docker-compose2.sh pull ${DISTRO} working_directory: ~/st2-packages - run: name: Build the ${DISTRO} Packages @@ -186,14 +184,6 @@ jobs: mkdir -p ~/st2/packages/${DISTRO}/log/ docker cp st2-packages-vol:/root/build/. ~/st2/packages/${DISTRO} working_directory: ~/st2-packages -# # TODO: It works! (~0.5-1min speed-up) Enable CircleCI2.0 cache for pip and wheelhouse later -# - run: -# name: Build the ${DISTRO} Packages 2nd time (compare with pip/wheelhouse cached) -# command: | -# .circle/docker-compose2.sh build ${DISTRO} -# # Once build container finishes we can copy packages directly from it -# docker cp st2-packages-vol:/root/build /tmp/st2-packages -# working_directory: ~/st2-packages - run: name: Test the Packages command: .circle/docker-compose2.sh test ${DISTRO} diff --git a/.github/workflows/checks.yaml b/.github/workflows/checks.yaml new file mode 100644 index 0000000000..976fa5cb05 --- /dev/null +++ b/.github/workflows/checks.yaml @@ -0,0 +1,25 @@ +name: Checks + +on: + pull_request: + types: [assigned, opened, synchronize, reopened, labeled, unlabeled] + branches: + - master + - v[0-9]+.[0-9]+ + +jobs: + # Changelog checker will verify if CHANGELOG.rst was updated for every PR + # See: https://keepachangelog.com/en/1.0.0/ + changelog-checker: + name: Add CHANGELOG.rst + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v1 + - name: Changelog check + # https://github.com/marketplace/actions/changelog-checker + uses: Zomzog/changelog-checker@v1.2.0 + with: + fileName: CHANGELOG.rst + checkNotification: Simple + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index ec135060be..a5d41d3875 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -21,6 +21,9 @@ on: - cron: '0 0 * * *' jobs: + # TODO: Fix the required checks! + # When the pre_job triggers and skips builds, it prevents merging the PR because + # the required checks are reported as skipped instead of passed. # Special job which automatically cancels old runs for the same branch, prevents runs for the # same file set which has already passed, etc. pre_job: @@ -40,7 +43,7 @@ jobs: needs: pre_job # NOTE: We always want to run job on master since we run some additional checks there (code # coverage, etc) - if: ${{ needs.pre_job.outputs.should_skip != 'true' || github.ref == 'refs/heads/master' }} + # if: ${{ needs.pre_job.outputs.should_skip != 'true' || github.ref == 'refs/heads/master' }} name: '${{ matrix.name }} - Python ${{ matrix.python-version-short }}' runs-on: ubuntu-latest strategy: @@ -92,9 +95,9 @@ jobs: # TODO: maybe make the virtualenv a partial cache to exclude st2*? # !virtualenv/lib/python*/site-packages/st2* # !virtualenv/bin/st2* - key: ${{ runner.os }}-v3-python-${{ matrix.python-version }}-${{ hashFiles('requirements.txt', 'test-requirements.txt') }} + key: ${{ runner.os }}-v4-python-${{ matrix.python-version }}-${{ hashFiles('requirements.txt', 'test-requirements.txt') }} restore-keys: | - ${{ runner.os }}-v2-python-${{ matrix.python }}- + ${{ runner.os }}-v4-python-${{ matrix.python }}- - name: Cache APT Dependencies id: cache-apt-deps uses: actions/cache@v2 @@ -135,7 +138,8 @@ jobs: needs: pre_job # NOTE: We always want to run job on master since we run some additional checks there (code # coverage, etc) - if: ${{ needs.pre_job.outputs.should_skip != 'true' || github.ref == 'refs/heads/master' }} + # NB: disabled. See TODO above pre_job + # if: ${{ needs.pre_job.outputs.should_skip != 'true' || github.ref == 'refs/heads/master' }} name: '${{ matrix.name }} - Python ${{ matrix.python-version-short }}' runs-on: ubuntu-latest strategy: @@ -233,9 +237,9 @@ jobs: # TODO: maybe make the virtualenv a partial cache to exclude st2*? # !virtualenv/lib/python*/site-packages/st2* # !virtualenv/bin/st2* - key: ${{ runner.os }}-v3-python-${{ matrix.python-version }}-${{ hashFiles('requirements.txt', 'test-requirements.txt') }} + key: ${{ runner.os }}-v4-python-${{ matrix.python-version }}-${{ hashFiles('requirements.txt', 'test-requirements.txt') }} restore-keys: | - ${{ runner.os }}-python-${{ matrix.python }}- + ${{ runner.os }}-v4-python-${{ matrix.python }}- - name: Cache APT Dependencies id: cache-apt-deps uses: actions/cache@v2 @@ -304,7 +308,7 @@ jobs: needs: pre_job # NOTE: We always want to run job on master since we run some additional checks there (code # coverage, etc) - if: ${{ needs.pre_job.outputs.should_skip != 'true' || github.ref == 'refs/heads/master' }} + # if: ${{ needs.pre_job.outputs.should_skip != 'true' || github.ref == 'refs/heads/master' }} name: '${{ matrix.name }} - Python ${{ matrix.python-version-short }}' runs-on: ubuntu-latest strategy: @@ -428,6 +432,7 @@ jobs: # GitHub is juggling how to set vars for multiple shells. Protect our PATH assumptions. PATH: /home/runner/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + steps: - name: Checkout repository uses: actions/checkout@v2 @@ -448,9 +453,9 @@ jobs: # TODO: maybe make the virtualenv a partial cache to exclude st2*? # !virtualenv/lib/python*/site-packages/st2* # !virtualenv/bin/st2* - key: ${{ runner.os }}-v3-python-${{ matrix.python-version }}-${{ hashFiles('requirements.txt', 'test-requirements.txt') }} + key: ${{ runner.os }}-v4-python-${{ matrix.python-version }}-${{ hashFiles('requirements.txt', 'test-requirements.txt') }} restore-keys: | - ${{ runner.os }}-python-${{ matrix.python }}- + ${{ runner.os }}-v4-python-${{ matrix.python }}- - name: Cache APT Dependencies id: cache-apt-deps uses: actions/cache@v2 diff --git a/.github/workflows/microbenchmarks.yaml b/.github/workflows/microbenchmarks.yaml index 7480c13b3a..674985fe28 100644 --- a/.github/workflows/microbenchmarks.yaml +++ b/.github/workflows/microbenchmarks.yaml @@ -86,9 +86,9 @@ jobs: ~/.cache/pip virtualenv ~/virtualenv - key: ${{ runner.os }}-python-${{ matrix.python-version }}-${{ hashFiles('requirements.txt', 'test-requirements.txt') }} + key: ${{ runner.os }}-v4-python-${{ matrix.python-version }}-${{ hashFiles('requirements.txt', 'test-requirements.txt') }} restore-keys: | - ${{ runner.os }}-python-${{ matrix.python }}- + ${{ runner.os }}-v4-python-${{ matrix.python }}- - name: Cache APT Dependencies id: cache-apt-deps uses: actions/cache@v2 diff --git a/.github/workflows/orquesta-integration-tests.yaml b/.github/workflows/orquesta-integration-tests.yaml index a7733b6512..1e083e62a9 100644 --- a/.github/workflows/orquesta-integration-tests.yaml +++ b/.github/workflows/orquesta-integration-tests.yaml @@ -25,6 +25,9 @@ on: - cron: '0 0 * * *' jobs: + # TODO: Fix the required checks! + # When the pre_job triggers and skips builds, it prevents merging the PR because + # the required checks are reported as skipped instead of passed. # Special job which automatically cancels old runs for the same branch, prevents runs for the # same file set which has already passed, etc. pre_job: @@ -43,7 +46,7 @@ jobs: needs: pre_job # NOTE: We always want to run job on master since we run some additional checks there (code # coverage, etc) - if: ${{ needs.pre_job.outputs.should_skip != 'true' || github.ref == 'refs/heads/master' }} + # if: ${{ needs.pre_job.outputs.should_skip != 'true' || github.ref == 'refs/heads/master' }} name: '${{ matrix.name }} - Python ${{ matrix.python-version-short }}' runs-on: ubuntu-latest strategy: @@ -139,9 +142,9 @@ jobs: # TODO: maybe make the virtualenv a partial cache to exclude st2*? # !virtualenv/lib/python*/site-packages/st2* # !virtualenv/bin/st2* - key: ${{ runner.os }}-v3-python-${{ matrix.python-version }}-${{ hashFiles('requirements.txt', 'test-requirements.txt') }} + key: ${{ runner.os }}-v4-python-${{ matrix.python-version }}-${{ hashFiles('requirements.txt', 'test-requirements.txt') }} restore-keys: | - ${{ runner.os }}-python-${{ matrix.python }}- + ${{ runner.os }}-v4-python-${{ matrix.python }}- - name: Cache APT Dependencies id: cache-apt-deps uses: actions/cache@v2 diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 789bbebf84..61c7d46d6f 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -4,9 +4,38 @@ Changelog in development -------------- +Fixed +~~~~~ + +* Fix Type error for ``time_diff`` critera comparison. convert the timediff value as float to match + ``timedelta.total_seconds()`` return. #5462 + + Contributed by @blackstrip + +* Fix issue with pack option not working when running policy list cli #5534 + + Contributed by @momokuri-3 + +* Fix exception thrown if action parameter contains {{ or {% and no closing jinja characters. #5556 + + contributed by @guzzijones12 + Added ~~~~~ +* Minor updates for RockyLinux. #5552 + Contributed by Amanda McGuinness (@amanda11 intive) + +* Added st2 API get action parameters by ref. #5509 + + API endpoint ``/api/v1/actions/views/parameters/{action_id}`` accepts ``ref_or_id``. + + Contributed by @DavidMeu + +* Enable setting ttl for MockDatastoreService. #5468 + + Contributed by @ytjohn + * Added st2 API and CLI command for actions clone operation. API endpoint ``/api/v1/actions/{ref_or_id}/clone`` takes ``ref_or_id`` of source action. @@ -30,6 +59,32 @@ Added Contributed by @khushboobhatia01 +* Added pysocks python package for SOCKS proxy support. #5460 + + Contributed by @kingsleyadam + +* Added support for multiple LDAP hosts to st2-auth-ldap. #5535, https://github.com/StackStorm/st2-auth-ldap/pull/100 + + Contributed by @ktyogurt + +* Implemented graceful shutdown for action runner. Enabled ``graceful_shutdown`` in ``st2.conf`` file. #5428 + + Contributed by @khushboobhatia01 + +* Enhanced 'search' operator to allow complex criteria matching on payload items. #5482 + + Contributed by @erceth + +* Added cancel/pause/resume requester information to execution context. #5554 + + Contributed by @khushboobhatia01 + +* Added `trigger.headers_lower` to webhook trigger payload. This allows rules to match webhook triggers + without dealing with the case-sensitive nature of `trigger.headers`, as `triggers.headers_lower` providers + the same headers, but with the header name lower cased. #5038 + + Contributed by @Rand01ph + Fixed ~~~~~ @@ -43,6 +98,10 @@ Fixed Contributed by @minsis +* Use byte type lock name which is supported by all tooz drivers. #5529 + + Contributed by @khushboobhatia01 + 3.6.0 - October 29, 2021 ------------------------ diff --git a/Makefile b/Makefile index 9b43e88c56..f22d8d0e02 100644 --- a/Makefile +++ b/Makefile @@ -269,6 +269,7 @@ check-python-packages-nightly: @echo "" test -f $(VIRTUALENV_COMPONENTS_DIR)/bin/activate || $(PYTHON_VERSION) -m venv $(VIRTUALENV_COMPONENTS_DIR) --system-site-packages + $(VIRTUALENV_COMPONENTS_DIR)/bin/pip install wheel @for component in $(COMPONENTS_WITHOUT_ST2TESTS); do \ echo "==========================================================="; \ echo "Checking component:" $$component; \ diff --git a/conf/st2.conf.sample b/conf/st2.conf.sample index 62e1e00f6d..9009cd0199 100644 --- a/conf/st2.conf.sample +++ b/conf/st2.conf.sample @@ -10,6 +10,10 @@ enable = True [actionrunner] # Internal pool size for dispatcher used by regular actions. actions_pool_size = 60 +# How long to wait for process (in seconds) to exit after receiving shutdown signal. +exit_still_active_check = 300 +# This will enable the graceful shutdown and wait for ongoing requests to complete until exit_timeout. +graceful_shutdown = True # location of the logging.conf file logging = /etc/st2/logging.actionrunner.conf # List of pip options to be passed to "pip install" command when installing pack dependencies into pack virtual environment. @@ -18,6 +22,8 @@ pip_opts = # comma separated list allowed here. python_binary = /usr/bin/python # Default log level to use for Python runner actions. Can be overriden on invocation basis using "log_level" runner parameter. python_runner_log_level = DEBUG +# Time interval between subsequent queries to check running executions. +still_active_check_interval = 2 # True to store and stream action output (stdout and stderr) in real-time. stream_output = True # Buffer size to use for real time action output streaming. 0 means unbuffered 1 means line buffered, -1 means system default, which usually means fully buffered and any other positive value means use a buffer of (approximately) that size diff --git a/contrib/linux/actions/service.py b/contrib/linux/actions/service.py index 70db65773b..0226adeef7 100644 --- a/contrib/linux/actions/service.py +++ b/contrib/linux/actions/service.py @@ -69,7 +69,8 @@ def get_linux_distribution(): elif ( re.search(distro, "Redhat") or re.search(distro, "Fedora") - or re.search(distro, "CentOS Linux") + or re.search(distro, "CentOS") + or re.search(distro, "Rocky Linux") ): cmd_args = ["systemctl", args["act"], args["service"]] diff --git a/contrib/runners/orquesta_runner/tests/unit/test_cancel.py b/contrib/runners/orquesta_runner/tests/unit/test_cancel.py index b49fd0f77b..419ff72a0c 100644 --- a/contrib/runners/orquesta_runner/tests/unit/test_cancel.py +++ b/contrib/runners/orquesta_runner/tests/unit/test_cancel.py @@ -118,6 +118,7 @@ def test_cancel(self): lv_ac_db, ac_ex_db = ac_svc.request_cancellation(lv_ac_db, requester) lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_CANCELING) + self.assertEqual(lv_ac_db.context["cancelled_by"], requester) def test_cancel_workflow_cascade_down_to_subworkflow(self): wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, "subworkflow.yaml") diff --git a/contrib/runners/orquesta_runner/tests/unit/test_pause_and_resume.py b/contrib/runners/orquesta_runner/tests/unit/test_pause_and_resume.py index 6ade390029..984887b907 100644 --- a/contrib/runners/orquesta_runner/tests/unit/test_pause_and_resume.py +++ b/contrib/runners/orquesta_runner/tests/unit/test_pause_and_resume.py @@ -118,6 +118,7 @@ def test_pause(self): lv_ac_db, ac_ex_db = ac_svc.request_pause(lv_ac_db, cfg.CONF.system_user.user) lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id)) self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_PAUSING) + self.assertEqual(lv_ac_db.context["paused_by"], cfg.CONF.system_user.user) @mock.patch.object(ac_svc, "is_children_active", mock.MagicMock(return_value=True)) def test_pause_with_active_children(self): @@ -525,6 +526,7 @@ def test_resume(self): workflow_execution=str(wf_ex_dbs[0].id) ) self.assertEqual(len(tk_ex_dbs), 2) + self.assertEqual(lv_ac_db.context["resumed_by"], cfg.CONF.system_user.user) def test_resume_cascade_to_subworkflow(self): wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, "subworkflow.yaml") diff --git a/fixed-requirements.txt b/fixed-requirements.txt index 5f85842837..bd8114df22 100644 --- a/fixed-requirements.txt +++ b/fixed-requirements.txt @@ -35,12 +35,13 @@ oslo.config>=1.12.1,<1.13 oslo.utils<5.0,>=4.0.0 paramiko==2.7.2 passlib==1.7.4 -prance==0.9.0 prompt-toolkit==1.0.15 pyinotify==0.9.6; platform_system=="Linux" pymongo==3.11.3 pyparsing<3 zstandard==0.15.2 +# pyOpenSSL 22.0.0 requires cryptography>=35.0 +pyOpenSSL<=21.0.0 python-editor==1.0.4 python-keyczar==0.716 pytz==2021.1 @@ -74,6 +75,5 @@ nose-parallel==0.4.0 psutil==5.8.0 python-dateutil==2.8.1 python-statsd==2.1.0 -ujson==1.35 orjson==3.5.2 udatetime==0.0.16 diff --git a/requirements.txt b/requirements.txt index aa495cf67c..6dd6d81ef7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -46,10 +46,12 @@ passlib==1.7.4 prettytable==2.1.0 prompt-toolkit==1.0.15 psutil==5.8.0 +pyOpenSSL<=21.0.0 pyinotify==0.9.6; platform_system=="Linux" pymongo==3.11.3 pyparsing<3 pyrabbit +pysocks python-dateutil==2.8.1 python-editor==1.0.4 python-json-logger diff --git a/st2actions/st2actions/cmd/actionrunner.py b/st2actions/st2actions/cmd/actionrunner.py index 76743ab707..05f48f6464 100644 --- a/st2actions/st2actions/cmd/actionrunner.py +++ b/st2actions/st2actions/cmd/actionrunner.py @@ -38,10 +38,10 @@ ACTIONRUNNER = "actionrunner" -def _setup_sigterm_handler(): +def _setup_sigterm_handler(action_worker): def sigterm_handler(signum=None, frame=None): # This will cause SystemExit to be throw and allow for component cleanup. - sys.exit(0) + action_worker.kill() # Register a SIGTERM signal handler which calls sys.exit which causes SystemExit to # be thrown. We catch SystemExit and handle cleanup there. @@ -60,14 +60,12 @@ def _setup(): capabilities=capabilities, ) - _setup_sigterm_handler() - def _run_worker(): LOG.info("(PID=%s) Worker started.", os.getpid()) action_worker = worker.get_worker() - + _setup_sigterm_handler(action_worker) try: action_worker.start() action_worker.wait() diff --git a/st2actions/st2actions/cmd/workflow_engine.py b/st2actions/st2actions/cmd/workflow_engine.py index dba392f100..e6eb65d5a8 100644 --- a/st2actions/st2actions/cmd/workflow_engine.py +++ b/st2actions/st2actions/cmd/workflow_engine.py @@ -40,10 +40,10 @@ WORKFLOW_ENGINE = "workflow_engine" -def setup_sigterm_handler(): +def setup_sigterm_handler(engine): def sigterm_handler(signum=None, frame=None): # This will cause SystemExit to be throw and allow for component cleanup. - sys.exit(0) + engine.kill() # Register a SIGTERM signal handler which calls sys.exit which causes SystemExit to # be thrown. We catch SystemExit and handle cleanup there. @@ -62,14 +62,12 @@ def setup(): capabilities=capabilities, ) - setup_sigterm_handler() - def run_server(): LOG.info("(PID=%s) Workflow engine started.", os.getpid()) engine = workflows.get_engine() - + setup_sigterm_handler(engine) try: engine.start(wait=True) except (KeyboardInterrupt, SystemExit): @@ -79,7 +77,6 @@ def run_server(): except: LOG.exception("(PID=%s) Workflow engine unexpectedly stopped.", os.getpid()) return 1 - return 0 diff --git a/st2actions/st2actions/worker.py b/st2actions/st2actions/worker.py index 1741d60724..30af0d56a7 100644 --- a/st2actions/st2actions/worker.py +++ b/st2actions/st2actions/worker.py @@ -17,6 +17,9 @@ import sys import traceback +from tooz.coordination import GroupNotCreated +from oslo_config import cfg + from st2actions.container.base import RunnerContainer from st2common import log as logging from st2common.constants import action as action_constants @@ -24,12 +27,14 @@ from st2common.exceptions.db import StackStormDBObjectNotFoundError from st2common.models.db.liveaction import LiveActionDB from st2common.persistence.execution import ActionExecution +from st2common.services import coordination from st2common.services import executions from st2common.services import workflows as wf_svc from st2common.transport.consumers import MessageHandler from st2common.transport.consumers import ActionsQueueConsumer from st2common.transport import utils as transport_utils from st2common.util import action_db as action_utils +from st2common.util import concurrency from st2common.util import system_info from st2common.transport import queues @@ -134,7 +139,32 @@ def process(self, liveaction): def shutdown(self): super(ActionExecutionDispatcher, self).shutdown() + + if cfg.CONF.actionrunner.graceful_shutdown: + + coordinator = coordination.get_coordinator() + member_ids = [] + service = "actionrunner" + exit_timeout = cfg.CONF.actionrunner.exit_still_active_check + sleep_delay = cfg.CONF.actionrunner.still_active_check_interval + timeout = 0 + + while timeout < exit_timeout and self._running_liveactions: + try: + member_ids = list( + coordinator.get_members(service.encode("utf-8")).get() + ) + except GroupNotCreated: + pass + + # Check if there are other runners in service registry + if not member_ids: + break + timeout += sleep_delay + concurrency.sleep(sleep_delay) + # Abandon running executions if incomplete + while self._running_liveactions: liveaction_id = self._running_liveactions.pop() try: diff --git a/st2actions/tests/unit/test_worker.py b/st2actions/tests/unit/test_worker.py index d8637b9ac7..0cf4d730f7 100644 --- a/st2actions/tests/unit/test_worker.py +++ b/st2actions/tests/unit/test_worker.py @@ -28,6 +28,7 @@ from st2common.persistence.execution import ActionExecution from st2common.persistence.liveaction import LiveAction from st2common.services import executions +from st2common.services import coordination from st2common.util import date as date_utils from st2common.bootstrap import runnersregistrar as runners_registrar from local_runner.local_shell_command_runner import LocalShellCommandRunner @@ -116,6 +117,9 @@ def test_non_utf8_action_result_string(self): ) def test_worker_shutdown(self): + cfg.CONF.set_override( + name="graceful_shutdown", override=False, group="actionrunner" + ) action_worker = actions_worker.get_worker() temp_file = None @@ -164,3 +168,200 @@ def test_worker_shutdown(self): # _run_action but will not result in KeyError because the discard method is used to # to remove the liveaction from _running_liveactions. runner_thread.wait() + + @mock.patch.object( + coordination.NoOpDriver, + "get_members", + mock.MagicMock(return_value=coordination.NoOpAsyncResult("member-1")), + ) + def test_worker_graceful_shutdown_with_multiple_runners(self): + cfg.CONF.set_override( + name="graceful_shutdown", override=True, group="actionrunner" + ) + action_worker = actions_worker.get_worker() + temp_file = None + + # Create a temporary file that is deleted when the file is closed and then set up an + # action to wait for this file to be deleted. This allows this test to run the action + # over a separate thread, run the shutdown sequence on the main thread, and then let + # the local runner to exit gracefully and allow _run_action to finish execution. + with tempfile.NamedTemporaryFile() as fp: + temp_file = fp.name + self.assertIsNotNone(temp_file) + self.assertTrue(os.path.isfile(temp_file)) + + # Launch the action execution in a separate thread. + params = {"cmd": "while [ -e '%s' ]; do sleep 0.1; done" % temp_file} + liveaction_db = self._get_liveaction_model( + WorkerTestCase.local_action_db, params + ) + liveaction_db = LiveAction.add_or_update(liveaction_db) + executions.create_execution_object(liveaction_db) + runner_thread = eventlet.spawn(action_worker._run_action, liveaction_db) + + # Wait for the worker up to 10s to add the liveaction to _running_liveactions. + for i in range(0, int(10 / 0.1)): + eventlet.sleep(0.1) + if len(action_worker._running_liveactions) > 0: + break + + self.assertEqual(len(action_worker._running_liveactions), 1) + + # Shutdown the worker to trigger the abandon process. + shutdown_thread = eventlet.spawn(action_worker.shutdown) + + # Make sure the temporary file has been deleted. + self.assertFalse(os.path.isfile(temp_file)) + + # Wait for the worker up to 10s to remove the liveaction from _running_liveactions. + for i in range(0, int(10 / 0.1)): + eventlet.sleep(0.1) + if len(action_worker._running_liveactions) < 1: + break + liveaction_db = LiveAction.get_by_id(liveaction_db.id) + + # Verify that _running_liveactions is empty and the liveaction is succeeded. + self.assertEqual(len(action_worker._running_liveactions), 0) + self.assertEqual( + liveaction_db.status, + action_constants.LIVEACTION_STATUS_SUCCEEDED, + str(liveaction_db), + ) + + # Wait for the local runner to complete. This will activate the finally block in + # _run_action but will not result in KeyError because the discard method is used to + # to remove the liveaction from _running_liveactions. + runner_thread.wait() + shutdown_thread.kill() + + def test_worker_graceful_shutdown_with_single_runner(self): + cfg.CONF.set_override( + name="graceful_shutdown", override=True, group="actionrunner" + ) + action_worker = actions_worker.get_worker() + temp_file = None + + # Create a temporary file that is deleted when the file is closed and then set up an + # action to wait for this file to be deleted. This allows this test to run the action + # over a separate thread, run the shutdown sequence on the main thread, and then let + # the local runner to exit gracefully and allow _run_action to finish execution. + with tempfile.NamedTemporaryFile() as fp: + temp_file = fp.name + self.assertIsNotNone(temp_file) + self.assertTrue(os.path.isfile(temp_file)) + + # Launch the action execution in a separate thread. + params = {"cmd": "while [ -e '%s' ]; do sleep 0.1; done" % temp_file} + liveaction_db = self._get_liveaction_model( + WorkerTestCase.local_action_db, params + ) + liveaction_db = LiveAction.add_or_update(liveaction_db) + executions.create_execution_object(liveaction_db) + runner_thread = eventlet.spawn(action_worker._run_action, liveaction_db) + + # Wait for the worker up to 10s to add the liveaction to _running_liveactions. + for i in range(0, int(10 / 0.1)): + eventlet.sleep(0.1) + if len(action_worker._running_liveactions) > 0: + break + + self.assertEqual(len(action_worker._running_liveactions), 1) + + # Shutdown the worker to trigger the abandon process. + shutdown_thread = eventlet.spawn(action_worker.shutdown) + # Wait for action runner shutdown sequence to complete + eventlet.sleep(5) + + # Make sure the temporary file has been deleted. + self.assertFalse(os.path.isfile(temp_file)) + + # Wait for the worker up to 10s to remove the liveaction from _running_liveactions. + for i in range(0, int(10 / 0.1)): + eventlet.sleep(0.1) + if len(action_worker._running_liveactions) < 1: + break + liveaction_db = LiveAction.get_by_id(liveaction_db.id) + + # Verify that _running_liveactions is empty and the liveaction is abandoned. + self.assertEqual(len(action_worker._running_liveactions), 0) + self.assertEqual( + liveaction_db.status, + action_constants.LIVEACTION_STATUS_ABANDONED, + str(liveaction_db), + ) + + # Wait for the local runner to complete. This will activate the finally block in + # _run_action but will not result in KeyError because the discard method is used to + # to remove the liveaction from _running_liveactions. + runner_thread.wait() + shutdown_thread.kill() + + @mock.patch.object( + coordination.NoOpDriver, + "get_members", + mock.MagicMock(return_value=coordination.NoOpAsyncResult("member-1")), + ) + def test_worker_graceful_shutdown_exit_timeout(self): + cfg.CONF.set_override( + name="graceful_shutdown", override=True, group="actionrunner" + ) + cfg.CONF.set_override( + name="exit_still_active_check", override=5, group="actionrunner" + ) + action_worker = actions_worker.get_worker() + temp_file = None + + # Create a temporary file that is deleted when the file is closed and then set up an + # action to wait for this file to be deleted. This allows this test to run the action + # over a separate thread, run the shutdown sequence on the main thread, and then let + # the local runner to exit gracefully and allow _run_action to finish execution. + with tempfile.NamedTemporaryFile() as fp: + temp_file = fp.name + self.assertIsNotNone(temp_file) + self.assertTrue(os.path.isfile(temp_file)) + + # Launch the action execution in a separate thread. + params = {"cmd": "while [ -e '%s' ]; do sleep 0.1; done" % temp_file} + liveaction_db = self._get_liveaction_model( + WorkerTestCase.local_action_db, params + ) + liveaction_db = LiveAction.add_or_update(liveaction_db) + executions.create_execution_object(liveaction_db) + runner_thread = eventlet.spawn(action_worker._run_action, liveaction_db) + + # Wait for the worker up to 10s to add the liveaction to _running_liveactions. + for i in range(0, int(10 / 0.1)): + eventlet.sleep(0.1) + if len(action_worker._running_liveactions) > 0: + break + + self.assertEqual(len(action_worker._running_liveactions), 1) + + # Shutdown the worker to trigger the abandon process. + shutdown_thread = eventlet.spawn(action_worker.shutdown) + # Continue the excution for 5+ seconds to ensure timeout occurs. + eventlet.sleep(6) + + # Make sure the temporary file has been deleted. + self.assertFalse(os.path.isfile(temp_file)) + + # Wait for the worker up to 10s to remove the liveaction from _running_liveactions. + for i in range(0, int(10 / 0.1)): + eventlet.sleep(0.1) + if len(action_worker._running_liveactions) < 1: + break + liveaction_db = LiveAction.get_by_id(liveaction_db.id) + + # Verify that _running_liveactions is empty and the liveaction is abandoned. + self.assertEqual(len(action_worker._running_liveactions), 0) + self.assertEqual( + liveaction_db.status, + action_constants.LIVEACTION_STATUS_ABANDONED, + str(liveaction_db), + ) + + # Wait for the local runner to complete. This will activate the finally block in + # _run_action but will not result in KeyError because the discard method is used to + # to remove the liveaction from _running_liveactions. + runner_thread.wait() + shutdown_thread.kill() diff --git a/st2api/st2api/controllers/v1/action_views.py b/st2api/st2api/controllers/v1/action_views.py index 2e528b5b13..ca6afaae4c 100644 --- a/st2api/st2api/controllers/v1/action_views.py +++ b/st2api/st2api/controllers/v1/action_views.py @@ -26,6 +26,7 @@ from st2common.content import utils from st2common.models.api.action import ActionAPI from st2common.models.utils import action_param_utils +from st2common.models.system.common import ResourceReference from st2common.persistence.action import Action from st2common.persistence.runner import RunnerType from st2common.rbac.types import PermissionType @@ -50,6 +51,18 @@ def _get_action_by_id(id): LOG.exception(msg) abort(http_client.NOT_FOUND, msg) + @staticmethod + def _get_action_by_ref(ref): + try: + action_db = Action.get_by_ref(ref) + if not action_db: + raise ValueError('Referenced action "%s" doesnt exist' % (ref)) + return action_db + except Exception as e: + msg = 'Database lookup for ref="%s" resulted in exception. %s' % (ref, e) + LOG.exception(msg) + abort(http_client.NOT_FOUND, msg) + @staticmethod def _get_runner_by_id(id): try: @@ -70,18 +83,21 @@ def _get_runner_by_name(name): class ParametersViewController(object): - def get_one(self, action_id, requester_user): - return self._get_one(action_id, requester_user=requester_user) + def get_one(self, ref_or_id, requester_user): + return self._get_one(ref_or_id, requester_user=requester_user) @staticmethod - def _get_one(action_id, requester_user): + def _get_one(ref_or_id, requester_user): """ - List merged action & runner parameters by action id. + List merged action & runner parameters by action id or ref. Handle: GET /actions/views/parameters/1 """ - action_db = LookupUtils._get_action_by_id(action_id) + if ResourceReference.is_resource_reference(ref_or_id): + action_db = LookupUtils._get_action_by_ref(ref_or_id) + else: + action_db = LookupUtils._get_action_by_id(ref_or_id) permission_type = PermissionType.ACTION_VIEW rbac_utils = get_rbac_backend().get_utils_class() @@ -193,7 +209,7 @@ def get_all( def _transform_action_api(action_api, requester_user): action_id = action_api.id result = ParametersViewController._get_one( - action_id=action_id, requester_user=requester_user + ref_or_id=action_id, requester_user=requester_user ) action_api.parameters = result.get("parameters", {}) return action_api diff --git a/st2api/st2api/controllers/v1/webhooks.py b/st2api/st2api/controllers/v1/webhooks.py index be0bb75dd0..548f73ba27 100644 --- a/st2api/st2api/controllers/v1/webhooks.py +++ b/st2api/st2api/controllers/v1/webhooks.py @@ -172,6 +172,7 @@ def post(self, hook, webhook_body_api, headers, requester_user): payload = {} payload["headers"] = headers + payload["headers_lower"] = {k.lower(): v for k, v in headers.items()} payload["body"] = body # Dispatch trigger instance for each of the trigger found diff --git a/st2api/tests/unit/controllers/v1/test_action_views.py b/st2api/tests/unit/controllers/v1/test_action_views.py index a28219c04d..bbc76e3760 100644 --- a/st2api/tests/unit/controllers/v1/test_action_views.py +++ b/st2api/tests/unit/controllers/v1/test_action_views.py @@ -199,7 +199,7 @@ class ActionViewsParametersControllerTestCase(FunctionalTest): @mock.patch.object( action_validator, "validate_action", mock.MagicMock(return_value=True) ) - def test_get_one(self): + def test_get_one_by_id(self): post_resp = self.app.post_json("/v1/actions", ACTION_1) action_id = post_resp.json["id"] try: @@ -208,6 +208,18 @@ def test_get_one(self): finally: self.app.delete("/v1/actions/%s" % action_id) + @mock.patch.object( + action_validator, "validate_action", mock.MagicMock(return_value=True) + ) + def test_get_one_by_ref(self): + post_resp = self.app.post_json("/v1/actions", ACTION_1) + action_ref = post_resp.json["ref"] + try: + get_resp = self.app.get("/v1/actions/views/parameters/%s" % action_ref) + self.assertEqual(get_resp.status_int, 200) + finally: + self.app.delete("/v1/actions/%s" % action_ref) + class ActionEntryPointViewControllerTestCase(FunctionalTest): @mock.patch.object( diff --git a/st2api/tests/unit/controllers/v1/test_webhooks.py b/st2api/tests/unit/controllers/v1/test_webhooks.py index 2742b2d09e..f6e17fe39f 100644 --- a/st2api/tests/unit/controllers/v1/test_webhooks.py +++ b/st2api/tests/unit/controllers/v1/test_webhooks.py @@ -388,6 +388,33 @@ def test_authentication_headers_should_be_removed(self, dispatch_mock): ) self.assertNotIn("Cookie", dispatch_mock.call_args[1]["payload"]["headers"]) + @mock.patch.object( + TriggerInstancePublisher, "publish_trigger", mock.MagicMock(return_value=True) + ) + @mock.patch.object( + WebhooksController, "_is_valid_hook", mock.MagicMock(return_value=True) + ) + @mock.patch.object( + HooksHolder, + "get_triggers_for_hook", + mock.MagicMock(return_value=[DUMMY_TRIGGER_DICT]), + ) + @mock.patch("st2common.transport.reactor.TriggerDispatcher.dispatch") + def test_st2_webhook_lower_header(self, dispatch_mock): + data = WEBHOOK_1 + post_resp = self.__do_post( + "git", data, headers={"X-Github-Token": "customvalue"} + ) + self.assertEqual(post_resp.status_int, http_client.ACCEPTED) + self.assertEqual( + dispatch_mock.call_args[1]["payload"]["headers"]["X-Github-Token"], + "customvalue", + ) + self.assertEqual( + dispatch_mock.call_args[1]["payload"]["headers_lower"]["x-github-token"], + "customvalue", + ) + def __do_post(self, hook, webhook, expect_errors=False, headers=None): return self.app.post_json( "/v1/webhooks/" + hook, diff --git a/st2client/in-requirements.txt b/st2client/in-requirements.txt index dc6b73d669..369b36c32b 100644 --- a/st2client/in-requirements.txt +++ b/st2client/in-requirements.txt @@ -18,3 +18,6 @@ cryptography orjson # needed by requests chardet +# required for SOCKS proxy support (HTTP_PROXY, HTTPS_PROXY, NO_PROXY) +pyOpenSSL +pysocks diff --git a/st2client/requirements.txt b/st2client/requirements.txt index 03797d7c7a..ed699899e2 100644 --- a/st2client/requirements.txt +++ b/st2client/requirements.txt @@ -15,6 +15,8 @@ jsonschema==2.6.0 orjson==3.5.2 prettytable==2.1.0 prompt-toolkit==1.0.15 +pyOpenSSL<=21.0.0 +pysocks python-dateutil==2.8.1 python-editor==1.0.4 pytz==2021.1 diff --git a/st2client/st2client/commands/policy.py b/st2client/st2client/commands/policy.py index 31d9090cfb..be9a214c84 100644 --- a/st2client/st2client/commands/policy.py +++ b/st2client/st2client/commands/policy.py @@ -109,20 +109,19 @@ def __init__(self, resource, *args, **kwargs): @resource.add_auth_token_to_kwargs_from_cli def run(self, args, **kwargs): - if args.resource_ref or args.policy_type: - filters = {} - - if args.resource_ref: - filters["resource_ref"] = args.resource_ref - - if args.policy_type: - filters["policy_type"] = args.policy_type - - filters.update(**kwargs) - - return self.manager.query(**filters) - else: - return self.manager.get_all(**kwargs) + filters = {} + if args.pack: + filters["pack"] = args.pack + if args.resource_ref: + filters["resource_ref"] = args.resource_ref + if args.policy_type: + filters["policy_type"] = args.policy_type + filters.update(**kwargs) + include_attributes = self._get_include_attributes(args=args) + if include_attributes: + include_attributes = ",".join(include_attributes) + filters["params"] = {"include_attributes": include_attributes} + return self.manager.query(**filters) class PolicyGetCommand(resource.ContentPackResourceGetCommand): diff --git a/st2client/tests/unit/test_shell.py b/st2client/tests/unit/test_shell.py index aa54839ff6..5eb27714ca 100644 --- a/st2client/tests/unit/test_shell.py +++ b/st2client/tests/unit/test_shell.py @@ -562,6 +562,21 @@ def test_dont_warn_multiple_times(self): shell.LOG.info.call_args_list[1][0][0], "Skipping parsing CLI config" ) + def test_policy_list_with_pack_option(self): + argv = ["policy", "list", "-p", "test"] + mock_obj = mock.MagicMock( + return_value=base.FakeResponse(json.dumps(base.RESOURCES), 200, "OK") + ) + with mock.patch.object(httpclient.HTTPClient, "get", mock_obj): + self.shell.run(argv) + self.assertEqual( + mock_obj.mock_calls[0], + mock.call( + "/policies/?include_attributes=ref%2Cresource_ref%2C" + "policy_type%2Cenabled&pack=test" + ), + ) + class CLITokenCachingTestCase(unittest2.TestCase): def setUp(self): diff --git a/st2common/bin/st2ctl b/st2common/bin/st2ctl index 4e2ff9a295..0f735aa952 100755 --- a/st2common/bin/st2ctl +++ b/st2common/bin/st2ctl @@ -25,7 +25,7 @@ SYSTEMD_RELOADED="" # load in environment to allow override of COMPONENTS and ST2_CONF above # Ubuntu/Debian [ -r /etc/default/st2ctl ] && source /etc/default/st2ctl -# RHEL/CentOS +# RHEL/CentOS/Rocky [ -r /etc/sysconfig/st2ctl ] && source /etc/sysconfig/st2ctl diff --git a/st2common/in-requirements.txt b/st2common/in-requirements.txt index 2e102a63e0..50b2218af4 100644 --- a/st2common/in-requirements.txt +++ b/st2common/in-requirements.txt @@ -37,6 +37,7 @@ routes flex webob jsonpath-rw +pyOpenSSL python-statsd udatetime orjson diff --git a/st2common/requirements.txt b/st2common/requirements.txt index 315b1031e0..ca133d3ffd 100644 --- a/st2common/requirements.txt +++ b/st2common/requirements.txt @@ -29,6 +29,7 @@ networkx>=2.5.1,<2.6 orjson==3.5.2 oslo.config>=1.12.1,<1.13 paramiko==2.7.2 +pyOpenSSL<=21.0.0 pymongo==3.11.3 python-dateutil==2.8.1 python-statsd==2.1.0 diff --git a/st2common/st2common/config.py b/st2common/st2common/config.py index 0c002a8e2f..8ad2f5d1e8 100644 --- a/st2common/st2common/config.py +++ b/st2common/st2common/config.py @@ -500,6 +500,28 @@ def register_opts(ignore_errors=False): dispatcher_pool_opts, group="actionrunner", ignore_errors=ignore_errors ) + graceful_shutdown_opts = [ + cfg.BoolOpt( + "graceful_shutdown", + default=True, + help="This will enable the graceful shutdown and wait for ongoing requests to complete until exit_timeout.", + ), + cfg.IntOpt( + "exit_still_active_check", + default=300, + help="How long to wait for process (in seconds) to exit after receiving shutdown signal.", + ), + cfg.IntOpt( + "still_active_check_interval", + default=2, + help="Time interval between subsequent queries to check running executions.", + ), + ] + + do_register_opts( + graceful_shutdown_opts, group="actionrunner", ignore_errors=ignore_errors + ) + ssh_runner_opts = [ cfg.StrOpt( "remote_dir", diff --git a/st2common/st2common/openapi.yaml b/st2common/st2common/openapi.yaml index edc81eb1ff..107b97a1c2 100644 --- a/st2common/st2common/openapi.yaml +++ b/st2common/st2common/openapi.yaml @@ -465,15 +465,15 @@ paths: description: Unexpected error schema: $ref: '#/definitions/Error' - /api/v1/actions/views/parameters/{action_id}: + /api/v1/actions/views/parameters/{ref_or_id}: get: operationId: st2api.controllers.v1.action_views:parameters_view_controller.get_one description: | Get parameters for an action. parameters: - - name: action_id + - name: ref_or_id in: path - description: Entity id + description: Entity reference or id type: string required: true x-parameters: diff --git a/st2common/st2common/openapi.yaml.j2 b/st2common/st2common/openapi.yaml.j2 index 1397c0d201..a54bb423f1 100644 --- a/st2common/st2common/openapi.yaml.j2 +++ b/st2common/st2common/openapi.yaml.j2 @@ -461,15 +461,15 @@ paths: description: Unexpected error schema: $ref: '#/definitions/Error' - /api/v1/actions/views/parameters/{action_id}: + /api/v1/actions/views/parameters/{ref_or_id}: get: operationId: st2api.controllers.v1.action_views:parameters_view_controller.get_one description: | Get parameters for an action. parameters: - - name: action_id + - name: ref_or_id in: path - description: Entity id + description: Entity reference or id type: string required: true x-parameters: diff --git a/st2common/st2common/operators.py b/st2common/st2common/operators.py index 6896e87658..f7cf6fc43f 100644 --- a/st2common/st2common/operators.py +++ b/st2common/st2common/operators.py @@ -58,8 +58,10 @@ def search(value, criteria_pattern, criteria_condition, check_function): value: the payload list to search condition: one of: - * any - return true if any items of the list match and false if none of them match - * all - return true if all items of the list match and false if any of them do not match + * any - return true if any payload items of the list match all criteria items + * all - return true if all payload items of the list match all criteria items + * all2any - return true if all payload items of the list match any criteria items + * any2any - return true if any payload items match any criteria items pattern: a dictionary of criteria to apply to each item of the list This operator has O(n) algorithmic complexity in terms of number of child patterns. @@ -86,18 +88,20 @@ def search(value, criteria_pattern, criteria_condition, check_function): ] } - And an example usage in criteria: + Example #1 --- criteria: trigger.fields: type: search # Controls whether this criteria has to match any or all items of the list - condition: any # or all + condition: any # or all or all2any or any2any pattern: # Here our context is each item of the list # All of these patterns have to match the item for the item to match # These are simply other operators applied to each item in the list + # "#" and text after are ignored. + # This allows dictionary keys to be unique but refer to the same field item.field_name: type: "equals" pattern: "Status" @@ -105,59 +109,53 @@ def search(value, criteria_pattern, criteria_condition, check_function): item.to_value: type: "equals" pattern: "Approved" + + item.field_name#1: + type: "greaterthan" + pattern: 40 + + item.field_name#2: + type: "lessthan" + pattern: 50 """ + if isinstance(value, dict): + value = [value] + payloadItemMatch = all + patternMatch = all if criteria_condition == "any": - # Any item of the list can match all patterns - rtn = any( - [ - # Any payload item can match - all( - [ - # Match all patterns - check_function( - child_criterion_k, - child_criterion_v, - PayloadLookup( - child_payload, prefix=TRIGGER_ITEM_PAYLOAD_PREFIX - ), - ) - for child_criterion_k, child_criterion_v in six.iteritems( - criteria_pattern - ) - ] - ) - for child_payload in value - ] - ) - elif criteria_condition == "all": - # Every item of the list must match all patterns - rtn = all( - [ - # All payload items must match - all( - [ - # Match all patterns - check_function( - child_criterion_k, - child_criterion_v, - PayloadLookup( - child_payload, prefix=TRIGGER_ITEM_PAYLOAD_PREFIX - ), - ) - for child_criterion_k, child_criterion_v in six.iteritems( - criteria_pattern - ) - ] - ) - for child_payload in value - ] - ) - else: + payloadItemMatch = any + elif criteria_condition == "all2any": + patternMatch = any + elif criteria_condition == "any2any": + payloadItemMatch = any + patternMatch = any + elif criteria_condition != "all": raise UnrecognizedConditionError( - "The '%s' search condition is not recognized, only 'any' " - "and 'all' are allowed" % criteria_condition + "The '%s' condition is not recognized for type search, 'any', 'all', 'any2any'" + " and 'all2any' are allowed" % criteria_condition ) + rtn = payloadItemMatch( + [ + # any/all payload item can match + patternMatch( + [ + # Match any/all patterns + check_function( + child_criterion_k, + child_criterion_v, + PayloadLookup( + child_payload, prefix=TRIGGER_ITEM_PAYLOAD_PREFIX + ), + ) + for child_criterion_k, child_criterion_v in six.iteritems( + criteria_pattern + ) + ] + ) + for child_payload in value + ] + ) return rtn @@ -314,7 +312,7 @@ def _timediff(diff_target, period_seconds, operator): # Note: date_utils.parse uses dateutil.parse which is way more flexible then strptime and # supports many date formats diff_target_utc = date_utils.parse(diff_target) - return operator((utc_now - diff_target_utc).total_seconds(), period_seconds) + return operator((utc_now - diff_target_utc).total_seconds(), float(period_seconds)) def timediff_lt(value, criteria_pattern): diff --git a/st2common/st2common/services/action.py b/st2common/st2common/services/action.py index cb5c9ebbe6..9c026f5507 100644 --- a/st2common/st2common/services/action.py +++ b/st2common/st2common/services/action.py @@ -27,6 +27,7 @@ from st2common.persistence.workflow import TaskExecution from st2common.persistence.workflow import WorkflowExecution from st2common.models.db.execution import ActionExecutionOutputDB +from st2common.models.db.auth import UserDB from st2common.runners import utils as runners_utils from st2common.services import executions from st2common.services import trace as trace_service @@ -214,7 +215,12 @@ def request(liveaction): def update_status( - liveaction, new_status, result=None, publish=True, set_result_size=False + liveaction, + new_status, + result=None, + publish=True, + set_result_size=False, + context=None, ): if liveaction.status == new_status: return liveaction @@ -226,6 +232,7 @@ def update_status( "status": new_status, "result": result, "publish": False, + "context": context, } if new_status in action_constants.LIVEACTION_COMPLETED_STATES: @@ -304,7 +311,10 @@ def request_cancellation(liveaction, requester): else: status = action_constants.LIVEACTION_STATUS_CANCELED - liveaction = update_status(liveaction, status, result=result) + liveaction.context["cancelled_by"] = get_requester(requester) + liveaction = update_status( + liveaction, status, result=result, context=liveaction.context + ) execution = ActionExecution.get(liveaction__id=str(liveaction.id)) @@ -346,7 +356,12 @@ def request_pause(liveaction, requester): % liveaction.id ) - liveaction = update_status(liveaction, action_constants.LIVEACTION_STATUS_PAUSING) + liveaction.context["paused_by"] = get_requester(requester) + liveaction = update_status( + liveaction, + action_constants.LIVEACTION_STATUS_PAUSING, + context=liveaction.context, + ) execution = ActionExecution.get(liveaction__id=str(liveaction.id)) @@ -390,7 +405,12 @@ def request_resume(liveaction, requester): 'not in "paused" state.' % (liveaction.id, liveaction.status) ) - liveaction = update_status(liveaction, action_constants.LIVEACTION_STATUS_RESUMING) + liveaction.context["resumed_by"] = get_requester(requester) + liveaction = update_status( + liveaction, + action_constants.LIVEACTION_STATUS_RESUMING, + context=liveaction.context, + ) execution = ActionExecution.get(liveaction__id=str(liveaction.id)) @@ -608,3 +628,9 @@ def is_action_execution_under_action_chain_context(liveaction): if it contains the chain key in its context dictionary. """ return liveaction.context and "chain" in liveaction.context + + +def get_requester(requester): + if type(requester) == UserDB: + return requester["name"] + return requester diff --git a/st2common/st2common/services/executions.py b/st2common/st2common/services/executions.py index 39ce663272..80706e8f79 100644 --- a/st2common/st2common/services/executions.py +++ b/st2common/st2common/services/executions.py @@ -196,7 +196,7 @@ def update_execution(liveaction_db, publish=True, set_result_size=False): """ execution = ActionExecution.get(liveaction__id=str(liveaction_db.id)) - with coordination.get_coordinator().get_lock(str(liveaction_db.id)): + with coordination.get_coordinator().get_lock(str(liveaction_db.id).encode()): # Skip execution object update when action is already in completed state. if execution.status in action_constants.LIVEACTION_COMPLETED_STATES: LOG.debug( diff --git a/st2common/st2common/services/workflows.py b/st2common/st2common/services/workflows.py index 067583f303..b84671f8b1 100644 --- a/st2common/st2common/services/workflows.py +++ b/st2common/st2common/services/workflows.py @@ -938,7 +938,7 @@ def handle_action_execution_completion(ac_ex_db): task_ex_id = ac_ex_db.context["orquesta"]["task_execution_id"] # Acquire lock before write operations. - with coord_svc.get_coordinator(start_heart=True).get_lock(wf_ex_id): + with coord_svc.get_coordinator(start_heart=True).get_lock(str(wf_ex_id).encode()): # Get execution records for logging purposes. wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_id) task_ex_db = wf_db_access.TaskExecution.get_by_id(task_ex_id) diff --git a/st2common/st2common/transport/consumers.py b/st2common/st2common/transport/consumers.py index 47752f035f..44d867962d 100644 --- a/st2common/st2common/transport/consumers.py +++ b/st2common/st2common/transport/consumers.py @@ -205,6 +205,9 @@ def shutdown(self): LOG.info("Shutting down %s...", self.__class__.__name__) self._queue_consumer.shutdown() + def kill(self): + self._consumer_thread.kill(SystemExit()) + @abc.abstractmethod def process(self, message): pass diff --git a/st2common/st2common/util/param.py b/st2common/st2common/util/param.py index 52e1f025fd..90617a78d9 100644 --- a/st2common/st2common/util/param.py +++ b/st2common/st2common/util/param.py @@ -19,7 +19,7 @@ import six import networkx as nx -from jinja2 import meta +from jinja2 import meta, exceptions from oslo_config import cfg from st2common import log as logging from st2common.util.config_loader import get_config @@ -133,19 +133,24 @@ def _process(G, name, value): ) or jinja_utils.is_jinja_expression(complex_value_str) if is_jinja_expr: - G.add_node(name, template=value) - - template_ast = ENV.parse(value) - LOG.debug("Template ast: %s", template_ast) - # Dependencies of the node represent jinja variables used in the template - # We're connecting nodes with an edge for every depencency to traverse them - # in the right order and also make sure that we don't have missing or cyclic - # dependencies upfront. - dependencies = meta.find_undeclared_variables(template_ast) - LOG.debug("Dependencies: %s", dependencies) - if dependencies: - for dependency in dependencies: - G.add_edge(dependency, name) + try: + template_ast = ENV.parse(value) + G.add_node(name, template=value) + + LOG.debug("Template ast: %s", template_ast) + # Dependencies of the node represent jinja variables used in the template + # We're connecting nodes with an edge for every depencency to traverse them + # in the right order and also make sure that we don't have missing or cyclic + # dependencies upfront. + dependencies = meta.find_undeclared_variables(template_ast) + LOG.debug("Dependencies: %s", dependencies) + if dependencies: + for dependency in dependencies: + G.add_edge(dependency, name) + except exceptions.TemplateSyntaxError: + G.add_node(name, value=value) + # not jinja after all + # is_jinga_expression only checks for {{ or {{% for speed else: G.add_node(name, value=value) diff --git a/st2common/tests/unit/test_operators.py b/st2common/tests/unit/test_operators.py index 5917e4277c..dd00eba6f7 100644 --- a/st2common/tests/unit/test_operators.py +++ b/st2common/tests/unit/test_operators.py @@ -564,6 +564,203 @@ def record_function_args(criterion_k, criterion_v, payload_lookup): ], ) + def _test_function(self, criterion_k, criterion_v, payload_lookup): + op = operators.get_operator(criterion_v["type"]) + return op(payload_lookup.get_value("item.to_value")[0], criterion_v["pattern"]) + + def test_search_any2any(self): + # true if any payload items match any criteria + op = operators.get_operator("search") + + payload = [ + { + "field_name": "waterLevel", + "to_value": 30, + }, + { + "field_name": "waterLevel", + "to_value": 45, + }, + ] + + criteria_pattern = { + "item.waterLevel#1": { + "type": "lessthan", + "pattern": 40, + }, + "item.waterLevel#2": { + "type": "greaterthan", + "pattern": 50, + }, + } + + result = op(payload, criteria_pattern, "any2any", self._test_function) + self.assertTrue(result) + + payload[0]["to_value"] = 44 + + result = op(payload, criteria_pattern, "any2any", self._test_function) + self.assertFalse(result) + + def test_search_any(self): + # true if any payload items match all criteria + op = operators.get_operator("search") + payload = [ + { + "field_name": "waterLevel", + "to_value": 45, + }, + { + "field_name": "waterLevel", + "to_value": 20, + }, + ] + + criteria_pattern = { + "item.waterLevel#1": { + "type": "greaterthan", + "pattern": 40, + }, + "item.waterLevel#2": { + "type": "lessthan", + "pattern": 50, + }, + "item.waterLevel#3": { + "type": "equals", + "pattern": 46, + }, + } + + result = op(payload, criteria_pattern, "any", self._test_function) + self.assertFalse(result) + + payload[0]["to_value"] = 46 + + result = op(payload, criteria_pattern, "any", self._test_function) + self.assertTrue(result) + + payload[0]["to_value"] = 45 + del criteria_pattern["item.waterLevel#3"] + + result = op(payload, criteria_pattern, "any", self._test_function) + self.assertTrue(result) + + def test_search_all2any(self): + # true if all payload items match any criteria + op = operators.get_operator("search") + payload = [ + { + "field_name": "waterLevel", + "to_value": 45, + }, + { + "field_name": "waterLevel", + "to_value": 20, + }, + ] + + criteria_pattern = { + "item.waterLevel#1": { + "type": "greaterthan", + "pattern": 40, + }, + "item.waterLevel#2": { + "type": "lessthan", + "pattern": 50, + }, + "item.waterLevel#3": { + "type": "equals", + "pattern": 46, + }, + } + + result = op(payload, criteria_pattern, "all2any", self._test_function) + self.assertTrue(result) + + criteria_pattern["item.waterLevel#2"]["type"] = "greaterthan" + + result = op(payload, criteria_pattern, "all2any", self._test_function) + self.assertFalse(result) + + def test_search_all(self): + # true if all payload items match all criteria items + op = operators.get_operator("search") + payload = [ + { + "field_name": "waterLevel", + "to_value": 45, + }, + { + "field_name": "waterLevel", + "to_value": 46, + }, + ] + + criteria_pattern = { + "item.waterLevel#1": { + "type": "greaterthan", + "pattern": 40, + }, + "item.waterLevel#2": { + "type": "lessthan", + "pattern": 50, + }, + } + + result = op(payload, criteria_pattern, "all", self._test_function) + self.assertTrue(result) + + payload[0]["to_value"] = 30 + + result = op(payload, criteria_pattern, "all", self._test_function) + self.assertFalse(result) + + payload[0]["to_value"] = 45 + + criteria_pattern["item.waterLevel#3"] = { + "type": "equals", + "pattern": 46, + } + + result = op(payload, criteria_pattern, "all", self._test_function) + self.assertFalse(result) + + def test_search_payload_dict(self): + op = operators.get_operator("search") + payload = { + "field_name": "waterLevel", + "to_value": 45, + } + + criteria_pattern = { + "item.waterLevel#1": { + "type": "greaterthan", + "pattern": 40, + }, + "item.waterLevel#2": { + "type": "lessthan", + "pattern": 50, + }, + } + + result = op(payload, criteria_pattern, "all", self._test_function) + self.assertTrue(result) + + payload["to_value"] = 30 + + result = op(payload, criteria_pattern, "all", self._test_function) + self.assertFalse(result) + + payload["to_value"] = 45 + + criteria_pattern["item.waterLevel#3"] = { + "type": "equals", + "pattern": 46, + } + + result = op(payload, criteria_pattern, "all", self._test_function) + self.assertFalse(result) + class OperatorTest(unittest2.TestCase): def test_matchwildcard(self): @@ -943,6 +1140,20 @@ def test_timediff_lt_fail(self): "Passed test_timediff_lt with None as criteria_pattern.", ) + def test_timediff_lt_webui_value(self): + op = operators.get_operator("timediff_lt") + self.assertTrue( + op(date_utils.get_datetime_utc_now().isoformat(), "10"), + "Failed test_timediff_lt_webui_value.", + ) + + def test_timediff_lt_webui_value_fail(self): + op = operators.get_operator("timediff_lt") + self.assertFalse( + op("2014-07-01T00:01:01.000000", "10"), + "Passed test_timediff_lt_webui_value.", + ) + def test_timediff_gt(self): op = operators.get_operator("timediff_gt") self.assertTrue(op("2014-07-01T00:01:01.000000", 1), "Failed test_timediff_gt.") @@ -958,6 +1169,20 @@ def test_timediff_gt_fail(self): "Passed test_timediff_gt with None as criteria_pattern.", ) + def test_timediff_gt_webui_value(self): + op = operators.get_operator("timediff_gt") + self.assertTrue( + op("2014-07-01T00:01:01.000000", "1"), + "Failed test_timediff_gt_webui_value.", + ) + + def test_timediff_gt_webui_value_fail(self): + op = operators.get_operator("timediff_gt") + self.assertFalse( + op(date_utils.get_datetime_utc_now().isoformat(), "10"), + "Passed test_timediff_gt_webui_value.", + ) + def test_exists(self): op = operators.get_operator("exists") self.assertTrue(op(False, None), "Should return True") diff --git a/st2common/tests/unit/test_param_utils.py b/st2common/tests/unit/test_param_utils.py index c2e5810815..77c88b1cd3 100644 --- a/st2common/tests/unit/test_param_utils.py +++ b/st2common/tests/unit/test_param_utils.py @@ -54,6 +54,26 @@ class ParamsUtilsTest(DbTestCase): action_system_default_db = FIXTURES["actions"]["action_system_default.yaml"] runnertype_db = FIXTURES["runners"]["testrunner1.yaml"] + def test_process_jinja_exception(self): + + action_context = {"api_user": "noob"} + config = {} + G = param_utils._create_graph(action_context, config) + name = "a1" + value = {"test": "http://someurl?value={{a"} + param_utils._process(G, name, value) + self.assertEquals(G.nodes.get(name, {}).get("value"), value) + + def test_process_jinja_template(self): + + action_context = {"api_user": "noob"} + config = {} + G = param_utils._create_graph(action_context, config) + name = "a1" + value = "http://someurl?value={{a}}" + param_utils._process(G, name, value) + self.assertEquals(G.nodes.get(name, {}).get("template"), value) + def test_get_finalized_params(self): params = { "actionstr": "foo", diff --git a/st2reactor/st2reactor/rules/filter.py b/st2reactor/st2reactor/rules/filter.py index 838adc5480..f2b4c7b753 100644 --- a/st2reactor/st2reactor/rules/filter.py +++ b/st2reactor/st2reactor/rules/filter.py @@ -154,8 +154,10 @@ def _check_criterion(self, criterion_k, criterion_v, payload_lookup): return (False, None, None) + # Avoids the dict unique keys limitation. Allows multiple evaluations of the same payload item by a rule. + criterion_k_hash_strip = criterion_k.split("#", 1)[0] try: - matches = payload_lookup.get_value(criterion_k) + matches = payload_lookup.get_value(criterion_k_hash_strip) # pick value if only 1 matches else will end up being an array match. if matches: payload_value = matches[0] if len(matches) > 0 else matches diff --git a/st2reactor/tests/unit/test_filter.py b/st2reactor/tests/unit/test_filter.py index d1e42eaece..fd7c9d5741 100644 --- a/st2reactor/tests/unit/test_filter.py +++ b/st2reactor/tests/unit/test_filter.py @@ -414,3 +414,25 @@ class MockSystemLookup(object): } f = RuleFilter(MOCK_TRIGGER_INSTANCE, MOCK_TRIGGER, rule) self.assertTrue(f.filter()) + + def test_hash_strip_int_value(self): + rule = MOCK_RULE_1 + rule.criteria = { + "trigger.int": {"type": "gt", "pattern": 0}, + "trigger.int#2": {"type": "lt", "pattern": 2}, + } + f = RuleFilter(MOCK_TRIGGER_INSTANCE, MOCK_TRIGGER, rule) + self.assertTrue(f.filter(), "equals check should have passed.") + + rule = MOCK_RULE_1 + rule.criteria = { + "trigger.int": {"type": "gt", "pattern": 2}, + "trigger.int#2": {"type": "lt", "pattern": 3}, + } + f = RuleFilter(MOCK_TRIGGER_INSTANCE, MOCK_TRIGGER, rule) + self.assertFalse(f.filter(), "trigger value is gt than 0 but didn't match.") + + rule = MOCK_RULE_1 + rule.criteria = {"trigger.int#1": {"type": "lt", "pattern": 2}} + f = RuleFilter(MOCK_TRIGGER_INSTANCE, MOCK_TRIGGER, rule) + self.assertTrue(f.filter(), "trigger value is gt than 0 but didn't match.") diff --git a/st2tests/st2tests/mocks/datastore.py b/st2tests/st2tests/mocks/datastore.py index 0282a18ffd..e1adcde153 100644 --- a/st2tests/st2tests/mocks/datastore.py +++ b/st2tests/st2tests/mocks/datastore.py @@ -34,6 +34,7 @@ def __init__(self, logger, pack_name, class_name, api_username=None): self._pack_name = pack_name self._class_name = class_name self._username = api_username or "admin" + self._logger = logger # Holds mock KeyValuePair objects # Key is a KeyValuePair name and value is the KeyValuePair object @@ -96,10 +97,6 @@ def set_value( """ Store a value in a dictionary which is local to this class. """ - if ttl: - raise ValueError( - 'MockDatastoreService.set_value doesn\'t support "ttl" argument' - ) name = self._get_full_key_name(name=name, local=local) @@ -107,6 +104,11 @@ def set_value( instance.id = name instance.name = name instance.value = value + if ttl: + self._logger.warning( + "MockDatastoreService is not able to expire keys based on ttl." + ) + instance.ttl = ttl self._datastore_items[name] = instance return True diff --git a/test-requirements.txt b/test-requirements.txt index b6b78432b8..bcc8594e0b 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -10,7 +10,7 @@ pre-commit==2.1.0 bandit==1.7.0 ipython<6.0.0 isort>=4.2.5 -mock==3.0.3 +mock==4.0.3 nose>=1.3.7 tabulate unittest2 @@ -22,10 +22,10 @@ nose-timer==1.0.1 # splitting tests run on a separate CI machines nose-parallel==0.4.0 # Required by st2client tests -pyyaml==5.4 +pyyaml==5.4.1 RandomWords -gunicorn==19.9.0 -psutil==5.6.6 +gunicorn==20.1.0 +psutil==5.8.0 webtest==2.0.35 rstcheck>=3.3.1,<3.4 tox==3.23.0 From 6e76bb1a1ccf13eb792d2b4f9f5a5df254b2fff5 Mon Sep 17 00:00:00 2001 From: Dennis Whitney Date: Sat, 5 Feb 2022 12:47:28 -0600 Subject: [PATCH 5/9] check if proxies are supposed to be used --- st2common/st2common/services/packs.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/st2common/st2common/services/packs.py b/st2common/st2common/services/packs.py index ba7bc58b5d..e845ce8e76 100644 --- a/st2common/st2common/services/packs.py +++ b/st2common/st2common/services/packs.py @@ -103,7 +103,8 @@ def _fetch_and_compile_index(index_urls, logger=None, proxy_config=None): # Bug in requests doesn't bypass proxies, so we do it ourselves # If this issue ever gets fixed then we can remove it # https://github.com/psf/requests/issues/4871 - bypass_proxy = should_bypass_proxies(index_url, proxies_dict["no"]) + if proxies_dict: + bypass_proxy = should_bypass_proxies(index_url, proxies_dict.get("no")) index_status = { "url": index_url, From ae2dbc22402193b51c5d284124297fec42f747d8 Mon Sep 17 00:00:00 2001 From: Dennis Whitney Date: Sat, 5 Feb 2022 12:50:18 -0600 Subject: [PATCH 6/9] Fix finding key if missing --- st2common/st2common/services/packs.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/st2common/st2common/services/packs.py b/st2common/st2common/services/packs.py index e845ce8e76..045c5d4f9e 100644 --- a/st2common/st2common/services/packs.py +++ b/st2common/st2common/services/packs.py @@ -103,8 +103,7 @@ def _fetch_and_compile_index(index_urls, logger=None, proxy_config=None): # Bug in requests doesn't bypass proxies, so we do it ourselves # If this issue ever gets fixed then we can remove it # https://github.com/psf/requests/issues/4871 - if proxies_dict: - bypass_proxy = should_bypass_proxies(index_url, proxies_dict.get("no")) + bypass_proxy = should_bypass_proxies(index_url, proxies_dict.get("no")) index_status = { "url": index_url, From a7afecef575933fb4150da392f90e274c0710190 Mon Sep 17 00:00:00 2001 From: Dennis Whitney Date: Sat, 5 Feb 2022 13:36:37 -0600 Subject: [PATCH 7/9] fix for linter --- st2common/st2common/services/packs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/st2common/st2common/services/packs.py b/st2common/st2common/services/packs.py index 045c5d4f9e..6c39c308ee 100644 --- a/st2common/st2common/services/packs.py +++ b/st2common/st2common/services/packs.py @@ -117,7 +117,7 @@ def _fetch_and_compile_index(index_urls, logger=None, proxy_config=None): request = requests.get( index_url, proxies=proxies_dict if not bypass_proxy else None, - verify=verify if not bypass_proxy else None + verify=verify if not bypass_proxy else None, ) request.raise_for_status() index_json = request.json() From 80de473cf95622970d6d652b0d9def5472442e3a Mon Sep 17 00:00:00 2001 From: Dennis Whitney Date: Sat, 5 Feb 2022 13:45:52 -0600 Subject: [PATCH 8/9] fix for linter --- CHANGELOG.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 314161f876..e9debee978 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -97,7 +97,7 @@ Fixed * Use byte type lock name which is supported by all tooz drivers. #5529 Contributed by @khushboobhatia01 - + * Fixed issue where pack index searches are ignoring no_proxy #5497 Contributed by @minsis From 224e0e29310a8ebdfba522fb28e59585b8cfc940 Mon Sep 17 00:00:00 2001 From: Dennis Whitney Date: Mon, 7 Mar 2022 10:18:23 -0600 Subject: [PATCH 9/9] Fix ssl verify for bypassing proxy --- st2common/st2common/services/packs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/st2common/st2common/services/packs.py b/st2common/st2common/services/packs.py index 6c39c308ee..ee2d937758 100644 --- a/st2common/st2common/services/packs.py +++ b/st2common/st2common/services/packs.py @@ -117,7 +117,7 @@ def _fetch_and_compile_index(index_urls, logger=None, proxy_config=None): request = requests.get( index_url, proxies=proxies_dict if not bypass_proxy else None, - verify=verify if not bypass_proxy else None, + verify=verify if not bypass_proxy else True, ) request.raise_for_status() index_json = request.json()