From 1d8278ef5e680a4ea57e1c169ff7a29c8c006baa Mon Sep 17 00:00:00 2001 From: guzzijones12 Date: Sun, 15 Mar 2020 12:07:56 -0400 Subject: [PATCH 1/9] add config setting for execution schedule timeout --- CHANGELOG.rst | 1 + conf/st2.conf.sample | 2 ++ st2actions/st2actions/scheduler/config.py | 3 +++ st2actions/st2actions/scheduler/handler.py | 6 ++++-- st2tests/st2tests/config.py | 3 +++ 5 files changed, 13 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 196cb001cc..bd213d07f9 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -17,6 +17,7 @@ Added * Add ``get_entrypoint()`` method to ``ActionResourceManager`` attribute of st2client. #4791 * Add support for orquesta task retry. (new feature) +* Add config option ``scheduler.execution_scheduling_timeout_threshold_min``. Fixes #4887 Changed ~~~~~~~ diff --git a/conf/st2.conf.sample b/conf/st2.conf.sample index 967ed611fa..261f569159 100644 --- a/conf/st2.conf.sample +++ b/conf/st2.conf.sample @@ -279,6 +279,8 @@ pool_size = 10 retry_wait_msec = 3000 # How often (in seconds) to look for zombie execution requests before rescheduling them. gc_interval = 10 +# How often in minutes to check for unscheduled actions +execution_scheduling_timeout_threshold_min = 1 [schema] # Version of JSON schema to use. diff --git a/st2actions/st2actions/scheduler/config.py b/st2actions/st2actions/scheduler/config.py index e507796007..ceee6d0d6c 100644 --- a/st2actions/st2actions/scheduler/config.py +++ b/st2actions/st2actions/scheduler/config.py @@ -50,6 +50,9 @@ def _register_service_opts(): default='/etc/st2/logging.scheduler.conf', help='Location of the logging configuration file.' ), + cfg.FloatOpt( + 'execution_scheduling_timeout_threshold_min', default=1, + help='How long GC to search back in minutes for orphaned scheduled actions'), cfg.IntOpt( 'pool_size', default=10, help='The size of the pool used by the scheduler for scheduling executions.'), diff --git a/st2actions/st2actions/scheduler/handler.py b/st2actions/st2actions/scheduler/handler.py index b252c520e1..173eb965ff 100644 --- a/st2actions/st2actions/scheduler/handler.py +++ b/st2actions/st2actions/scheduler/handler.py @@ -50,7 +50,6 @@ # (< 5 seconds). If an item is still being marked as processing it likely indicates that the # scheduler process which was processing that item crashed or similar so we need to mark it as # "handling=False" so some other scheduler process can pick it up. -EXECUTION_SCHEDUELING_TIMEOUT_THRESHOLD_MS = (60 * 1000) # When a policy delayed execution is detected it will be try to be rescheduled by the scheduler # again in this amount of milliseconds. @@ -62,6 +61,9 @@ def __init__(self): self.message_type = LiveActionDB self._shutdown = False self._pool = eventlet.GreenPool(size=cfg.CONF.scheduler.pool_size) + self._execution_scheduling_timeout_threshold_ms = \ + cfg.CONF.scheduler.execution_scheduling_timeout_threshold_min \ + * 60 * 1000 self._coordinator = coordination_service.get_coordinator(start_heart=True) self._main_thread = None self._cleanup_thread = None @@ -100,7 +102,7 @@ def _reset_handling_flag(self): query = { 'scheduled_start_timestamp__lte': date.append_milliseconds_to_time( date.get_datetime_utc_now(), - -EXECUTION_SCHEDUELING_TIMEOUT_THRESHOLD_MS + -self._execution_scheduling_timeout_threshold_ms ), 'handling': True } diff --git a/st2tests/st2tests/config.py b/st2tests/st2tests/config.py index 3f67d4c119..169af5117a 100644 --- a/st2tests/st2tests/config.py +++ b/st2tests/st2tests/config.py @@ -283,6 +283,9 @@ def _register_ssh_runner_opts(): def _register_scheduler_opts(): scheduler_opts = [ + cfg.FloatOpt( + 'execution_scheduling_timeout_threshold_min', default=1, + help='How long GC to search back in minutes for orphaned scheduled actions'), cfg.IntOpt( 'pool_size', default=10, help='The size of the pool used by the scheduler for scheduling executions.'), From 8d58d966966ca8f9d1bf96dadcc3327102505a4e Mon Sep 17 00:00:00 2001 From: AJ Date: Tue, 14 Apr 2020 13:43:40 -0400 Subject: [PATCH 2/9] Update conf/st2.conf.sample edit comments Co-Authored-By: Eugen C. --- conf/st2.conf.sample | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/conf/st2.conf.sample b/conf/st2.conf.sample index 261f569159..2239ea75d2 100644 --- a/conf/st2.conf.sample +++ b/conf/st2.conf.sample @@ -279,7 +279,7 @@ pool_size = 10 retry_wait_msec = 3000 # How often (in seconds) to look for zombie execution requests before rescheduling them. gc_interval = 10 -# How often in minutes to check for unscheduled actions +# How long GC to search back in minutes for orphaned scheduled actions execution_scheduling_timeout_threshold_min = 1 [schema] @@ -381,4 +381,3 @@ retry_max_jitter_msec = 1000 retry_wait_fixed_msec = 1000 # Max seconds to allow workflow execution be idled before it is identified as orphaned and cancelled by the garbage collector. A value of zero means the feature is disabled. This is disabled by default. gc_max_idle_sec = 0 - From 64ff29f9c2be6e896d95fe5493075ea99464a88c Mon Sep 17 00:00:00 2001 From: AJ Date: Tue, 14 Apr 2020 13:44:22 -0400 Subject: [PATCH 3/9] Update CHANGELOG.rst edit sample config Co-Authored-By: Eugen C. --- CHANGELOG.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index bd213d07f9..86b68c3d4d 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -17,7 +17,7 @@ Added * Add ``get_entrypoint()`` method to ``ActionResourceManager`` attribute of st2client. #4791 * Add support for orquesta task retry. (new feature) -* Add config option ``scheduler.execution_scheduling_timeout_threshold_min``. Fixes #4887 +* Add config option ``scheduler.execution_scheduling_timeout_threshold_min`` to better control the cleanup of scheduled actions that were orphaned. #4886 Changed ~~~~~~~ From 376757aab0074d139f90241c79f9119c6e311b1a Mon Sep 17 00:00:00 2001 From: guzzijones12 Date: Tue, 14 Apr 2020 19:32:29 +0000 Subject: [PATCH 4/9] move comment about scheduling being reprocessed --- st2actions/st2actions/scheduler/handler.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/st2actions/st2actions/scheduler/handler.py b/st2actions/st2actions/scheduler/handler.py index 173eb965ff..01edeb2632 100644 --- a/st2actions/st2actions/scheduler/handler.py +++ b/st2actions/st2actions/scheduler/handler.py @@ -44,13 +44,6 @@ LOG = logging.getLogger(__name__) -# If an ActionExecutionSchedulingQueueItemDB object hasn't been updated fore more than this amount -# of milliseconds, it will be marked as "handled=False". -# As soon as an item is picked by scheduler to be processed, it should be processed very fast -# (< 5 seconds). If an item is still being marked as processing it likely indicates that the -# scheduler process which was processing that item crashed or similar so we need to mark it as -# "handling=False" so some other scheduler process can pick it up. - # When a policy delayed execution is detected it will be try to be rescheduled by the scheduler # again in this amount of milliseconds. POLICY_DELAYED_EXECUTION_RESCHEDULE_TIME_MS = 2500 @@ -61,6 +54,12 @@ def __init__(self): self.message_type = LiveActionDB self._shutdown = False self._pool = eventlet.GreenPool(size=cfg.CONF.scheduler.pool_size) + # If an ActionExecutionSchedulingQueueItemDB object hasn't been updated fore more than this amount + # of milliseconds, it will be marked as "handled=False". + # As soon as an item is picked by scheduler to be processed, it should be processed very fast + # (< 5 seconds). If an item is still being marked as processing it likely indicates that the + # scheduler process which was processing that item crashed or similar so we need to mark it as + # "handling=False" so some other scheduler process can pick it up. self._execution_scheduling_timeout_threshold_ms = \ cfg.CONF.scheduler.execution_scheduling_timeout_threshold_min \ * 60 * 1000 From 4774918517500b699c1a3920116fa8b73010daac Mon Sep 17 00:00:00 2001 From: guzzijones12 Date: Tue, 14 Apr 2020 20:11:02 +0000 Subject: [PATCH 5/9] fix st2.sample.conf --- conf/st2.conf.sample | 1 + 1 file changed, 1 insertion(+) diff --git a/conf/st2.conf.sample b/conf/st2.conf.sample index 2239ea75d2..a5e255e5e2 100644 --- a/conf/st2.conf.sample +++ b/conf/st2.conf.sample @@ -381,3 +381,4 @@ retry_max_jitter_msec = 1000 retry_wait_fixed_msec = 1000 # Max seconds to allow workflow execution be idled before it is identified as orphaned and cancelled by the garbage collector. A value of zero means the feature is disabled. This is disabled by default. gc_max_idle_sec = 0 + From cf4de1bb4013cd8f1ce10056b336cbaecf93ed38 Mon Sep 17 00:00:00 2001 From: guzzijones12 Date: Tue, 14 Apr 2020 20:41:15 +0000 Subject: [PATCH 6/9] update st2.sample.conf --- conf/st2.conf.sample | 320 +++++++++++++++++++++---------------------- 1 file changed, 160 insertions(+), 160 deletions(-) diff --git a/conf/st2.conf.sample b/conf/st2.conf.sample index a5e255e5e2..ceb67010c0 100644 --- a/conf/st2.conf.sample +++ b/conf/st2.conf.sample @@ -8,239 +8,239 @@ enable = True emit_when = succeeded,failed,timeout,canceled,abandoned # comma separated list allowed here. [actionrunner] -# List of pip options to be passed to "pip install" command when installing pack dependencies into pack virtual environment. -pip_opts = # comma separated list allowed here. -# Internal pool size for dispatcher used by regular actions. -actions_pool_size = 60 -# Default log level to use for Python runner actions. Can be overriden on invocation basis using "log_level" runner parameter. -python_runner_log_level = DEBUG -# Internal pool size for dispatcher used by workflow actions. -workflows_pool_size = 40 +# location of the logging.conf file +logging = /etc/st2/logging.actionrunner.conf +# Python binary which will be used by Python actions. +python_binary = /usr/bin/python +# Python 3 binary which will be used by Python actions for packs which use Python 3 virtual environment. +python3_binary = /usr/bin/python3 # Prefix for Python 3 installation (e.g. /opt/python3.6). If not specified, it tries to find Python 3 libraries in /usr/lib and /usr/local/lib. python3_prefix = None # Virtualenv binary which should be used to create pack virtualenvs. virtualenv_binary = /usr/bin/virtualenv -# Python 3 binary which will be used by Python actions for packs which use Python 3 virtual environment. -python3_binary = /usr/bin/python3 -# Buffer size to use for real time action output streaming. 0 means unbuffered 1 means line buffered, -1 means system default, which usually means fully buffered and any other positive value means use a buffer of (approximately) that size -stream_output_buffer_size = -1 +# Default log level to use for Python runner actions. Can be overriden on invocation basis using "log_level" runner parameter. +python_runner_log_level = DEBUG # List of virtualenv options to be passsed to "virtualenv" command that creates pack virtualenv. virtualenv_opts = --system-site-packages # comma separated list allowed here. +# List of pip options to be passed to "pip install" command when installing pack dependencies into pack virtual environment. +pip_opts = # comma separated list allowed here. # True to store and stream action output (stdout and stderr) in real-time. stream_output = True -# location of the logging.conf file -logging = /etc/st2/logging.actionrunner.conf -# Python binary which will be used by Python actions. -python_binary = /usr/bin/python +# Buffer size to use for real time action output streaming. 0 means unbuffered 1 means line buffered, -1 means system default, which usually means fully buffered and any other positive value means use a buffer of (approximately) that size +stream_output_buffer_size = -1 +# Internal pool size for dispatcher used by workflow actions. +workflows_pool_size = 40 +# Internal pool size for dispatcher used by regular actions. +actions_pool_size = 60 [api] +# StackStorm API server host +host = 127.0.0.1 +# StackStorm API server port +port = 9101 # List of origins allowed for api, auth and stream allow_origin = http://127.0.0.1:3000 # comma separated list allowed here. -# location of the logging.conf file -logging = /etc/st2/logging.api.conf -# Maximum limit (page size) argument which can be specified by the user in a query string. -max_page_size = 100 # True to mask secrets in the API responses mask_secrets = True -# StackStorm API server host -host = 127.0.0.1 # None debug = False -# StackStorm API server port -port = 9101 +# location of the logging.conf file +logging = /etc/st2/logging.api.conf +# Maximum limit (page size) argument which can be specified by the user in a query string. +max_page_size = 100 [auth] # Common option - options below apply in both scenarios - when auth service is running as a WSGI # service (e.g. under Apache or Nginx) and when it's running in the standalone mode. -# Enable authentication middleware. -enable = True -# Path to the logging config. -logging = /etc/st2/logging.auth.conf # Base URL to the API endpoint excluding the version api_url = None -# Service token ttl in seconds. -service_token_ttl = 86400 +# Enable authentication middleware. +enable = True # Access token ttl in seconds. token_ttl = 86400 -# Authentication mode (proxy,standalone) -mode = standalone +# Service token ttl in seconds. +service_token_ttl = 86400 +# Path to the logging config. +logging = /etc/st2/logging.auth.conf # Specify to enable debug mode. debug = False +# Authentication mode (proxy,standalone) +mode = standalone # Standalone mode options - options below only apply when auth service is running in the standalone # mode. -# Path to the SSL certificate file. Only used when "use_ssl" is specified. -cert = /etc/apache2/ssl/mycert.crt -# JSON serialized arguments which are passed to the authentication backend in a standalone mode. -backend_kwargs = None # Host on which the service should listen on. host = 127.0.0.1 -# Path to the SSL private key file. Only used when "use_ssl" is specified. -key = /etc/apache2/ssl/mycert.key -# Specify to enable SSL / TLS mode -use_ssl = False # Port on which the service should listen on. port = 9100 +# Specify to enable SSL / TLS mode +use_ssl = False +# Path to the SSL certificate file. Only used when "use_ssl" is specified. +cert = /etc/apache2/ssl/mycert.crt +# Path to the SSL private key file. Only used when "use_ssl" is specified. +key = /etc/apache2/ssl/mycert.key # Authentication backend to use in a standalone mode. Available backends: flat_file. backend = flat_file +# JSON serialized arguments which are passed to the authentication backend in a standalone mode. +backend_kwargs = None [content] -# A URL pointing to the pack index. StackStorm Exchange is used by default. Use a comma-separated list for multiple indexes if you want to get other packs discovered with "st2 pack search". -index_url = https://index.stackstorm.org/v1/index.json # comma separated list allowed here. -# Path to the directory which contains system runners. NOTE: This option has been deprecated and it's unused since StackStorm v3.0.0 -system_runners_base_path = /opt/stackstorm/runners -# Paths which will be searched for runners. NOTE: This option has been deprecated and it's unused since StackStorm v3.0.0 -runners_base_paths = None # User group that can write to packs directory. pack_group = st2packs # Path to the directory which contains system packs. system_packs_base_path = /opt/stackstorm/packs +# Path to the directory which contains system runners. NOTE: This option has been deprecated and it's unused since StackStorm v3.0.0 +system_runners_base_path = /opt/stackstorm/runners # Paths which will be searched for integration packs. packs_base_paths = None +# Paths which will be searched for runners. NOTE: This option has been deprecated and it's unused since StackStorm v3.0.0 +runners_base_paths = None +# A URL pointing to the pack index. StackStorm Exchange is used by default. Use a comma-separated list for multiple indexes if you want to get other packs discovered with "st2 pack search". +index_url = https://index.stackstorm.org/v1/index.json # comma separated list allowed here. [coordination] # Endpoint for the coordination server. url = None -# True to register StackStorm services in a service registry. -service_registry = False # TTL for the lock if backend suports it. lock_timeout = 60 +# True to register StackStorm services in a service registry. +service_registry = False [database] +# host of db server +host = 127.0.0.1 +# port of db server +port = 27017 +# name of database +db_name = st2 # username for db login username = None +# password for db login +password = None +# Connection and server selection timeout (in ms). +connection_timeout = 3000 # Connection retry total time (minutes). connection_retry_max_delay_m = 3 -# ca_certs file contains a set of concatenated CA certificates, which are used to validate certificates passed from MongoDB. -ssl_ca_certs = None -# Certificate file used to identify the localconnection -ssl_certfile = None # Connection retry backoff max (seconds). connection_retry_backoff_max_s = 10 -# If True and `ssl_cert_reqs` is not None, enables hostname verification -ssl_match_hostname = True -# Specifies whether a certificate is required from the other side of the connection, and whether it will be validated if provided -ssl_cert_reqs = None -# Create the connection to mongodb using SSL -ssl = False -# host of db server -host = 127.0.0.1 -# name of database -db_name = st2 # Backoff multiplier (seconds). connection_retry_backoff_mul = 1 -# Specifies database authentication mechanisms. By default, it use SCRAM-SHA-1 with MongoDB 3.0 and later, MONGODB-CR (MongoDB Challenge Response protocol) for older servers. -authentication_mechanism = None +# Create the connection to mongodb using SSL +ssl = False # Private keyfile used to identify the local connection against MongoDB. ssl_keyfile = None -# Connection and server selection timeout (in ms). -connection_timeout = 3000 -# password for db login -password = None -# port of db server -port = 27017 +# Certificate file used to identify the localconnection +ssl_certfile = None +# Specifies whether a certificate is required from the other side of the connection, and whether it will be validated if provided +ssl_cert_reqs = None +# ca_certs file contains a set of concatenated CA certificates, which are used to validate certificates passed from MongoDB. +ssl_ca_certs = None +# If True and `ssl_cert_reqs` is not None, enables hostname verification +ssl_match_hostname = True +# Specifies database authentication mechanisms. By default, it use SCRAM-SHA-1 with MongoDB 3.0 and later, MONGODB-CR (MongoDB Challenge Response protocol) for older servers. +authentication_mechanism = None [exporter] -# location of the logging.exporter.conf file -logging = /etc/st2/logging.exporter.conf # Directory to dump data to. dump_dir = /opt/stackstorm/exports/ +# location of the logging.exporter.conf file +logging = /etc/st2/logging.exporter.conf [garbagecollector] -# Action executions and related objects (live actions, action output objects) older than this value (days) will be automatically deleted. -action_executions_ttl = None -# Trigger instances older than this value (days) will be automatically deleted. -trigger_instances_ttl = None # Location of the logging configuration file. logging = /etc/st2/logging.garbagecollector.conf +# How often to check database for old data and perform garbage collection. +collection_interval = 600 # How long to wait / sleep (in seconds) between collection of different object types. sleep_delay = 2 -# Set to True to perform garbage collection on Inquiries (based on the TTL value per Inquiry) -purge_inquiries = False +# Action executions and related objects (live actions, action output objects) older than this value (days) will be automatically deleted. +action_executions_ttl = None # Action execution output objects (ones generated by action output streaming) older than this value (days) will be automatically deleted. action_executions_output_ttl = 7 -# How often to check database for old data and perform garbage collection. -collection_interval = 600 +# Trigger instances older than this value (days) will be automatically deleted. +trigger_instances_ttl = None +# Set to True to perform garbage collection on Inquiries (based on the TTL value per Inquiry) +purge_inquiries = False [keyvalue] -# Location of the symmetric encryption key for encrypting values in kvstore. This key should be in JSON and should've been generated using st2-generate-symmetric-crypto-key tool. -encryption_key_path = # Allow encryption of values in key value stored qualified as "secret". enable_encryption = True +# Location of the symmetric encryption key for encrypting values in kvstore. This key should be in JSON and should've been generated using st2-generate-symmetric-crypto-key tool. +encryption_key_path = [log] -# Controls if stderr should be redirected to the logs. -redirect_stderr = False # Exclusion list of loggers to omit. excludes = # comma separated list allowed here. -# Blacklist of additional attribute names to mask in the log messages. -mask_secrets_blacklist = # comma separated list allowed here. +# Controls if stderr should be redirected to the logs. +redirect_stderr = False # True to mask secrets in the log files. mask_secrets = True +# Blacklist of additional attribute names to mask in the log messages. +mask_secrets_blacklist = # comma separated list allowed here. [messaging] -# Certificate file used to identify the local connection (client). -ssl_certfile = None -# How many times should we retry connection before failing. -connection_retries = 10 -# Use SSL / TLS to connect to the messaging server. Same as appending "?ssl=true" at the end of the connection URL string. -ssl = False # URL of the messaging server. url = amqp://guest:guest@127.0.0.1:5672// -# Specifies whether a certificate is required from the other side of the connection, and whether it will be validated if provided. -ssl_cert_reqs = None # URL of all the nodes in a messaging service cluster. cluster_urls = # comma separated list allowed here. +# How many times should we retry connection before failing. +connection_retries = 10 # How long should we wait between connection retries. connection_retry_wait = 10000 +# Use SSL / TLS to connect to the messaging server. Same as appending "?ssl=true" at the end of the connection URL string. +ssl = False # Private keyfile used to identify the local connection against RabbitMQ. ssl_keyfile = None +# Certificate file used to identify the local connection (client). +ssl_certfile = None +# Specifies whether a certificate is required from the other side of the connection, and whether it will be validated if provided. +ssl_cert_reqs = None # ca_certs file contains a set of concatenated CA certificates, which are used to validate certificates passed from RabbitMQ. ssl_ca_certs = None # Login method to use (AMQPLAIN, PLAIN, EXTERNAL, etc.). login_method = None [metrics] -# Randomly sample and only send metrics for X% of metric operations to the backend. Default value of 1 means no sampling is done and all the metrics are sent to the backend. E.g. 0.1 would mean 10% of operations are sampled. -sample_rate = 1 -# Destination server to connect to if driver requires connection. -host = 127.0.0.1 -# Optional prefix which is prepended to all the metric names. Comes handy when you want to submit metrics from various environment to the same metric backend instance. -prefix = None # Driver type for metrics collection. driver = noop +# Destination server to connect to if driver requires connection. +host = 127.0.0.1 # Destination port to connect to if driver requires connection. port = 8125 +# Optional prefix which is prepended to all the metric names. Comes handy when you want to submit metrics from various environment to the same metric backend instance. +prefix = None +# Randomly sample and only send metrics for X% of metric operations to the backend. Default value of 1 means no sampling is done and all the metrics are sent to the backend. E.g. 0.1 would mean 10% of operations are sampled. +sample_rate = 1 [mistral] -# URL Mistral uses to talk back to the API.If not provided it defaults to public API URL. Note: This needs to be a base URL without API version (e.g. http://127.0.0.1:9101) -api_url = None +# v2 API root endpoint. +v2_base_url = http://127.0.0.1:8989/v2 # Multiplier for the exponential backoff. retry_exp_msec = 1000 -# Jitter interval to smooth out HTTP requests to mistral tasks and executions API. -jitter_interval = 0.1 -# Allow insecure communication with Mistral. -insecure = False +# Max time for each set of backoff. +retry_exp_max_msec = 300000 +# Max time to stop retrying. +retry_stop_max_msec = 600000 # Username for authentication. keystone_username = None -# Enable results tracking and disable callbacks. -enable_polling = False -# OpenStack project scope. -keystone_project_name = None # Password for authentication. keystone_password = None +# OpenStack project scope. +keystone_project_name = None # Auth endpoint for Keystone. keystone_auth_url = None # Optional certificate to validate endpoint. cacert = None -# v2 API root endpoint. -v2_base_url = http://127.0.0.1:8989/v2 -# Max time to stop retrying. -retry_stop_max_msec = 600000 -# Max time for each set of backoff. -retry_exp_max_msec = 300000 +# Allow insecure communication with Mistral. +insecure = False +# Enable results tracking and disable callbacks. +enable_polling = False +# Jitter interval to smooth out HTTP requests to mistral tasks and executions API. +jitter_interval = 0.1 +# URL Mistral uses to talk back to the API.If not provided it defaults to public API URL. Note: This needs to be a base URL without API version (e.g. http://127.0.0.1:9101) +api_url = None [notifier] # Location of the logging configuration file. @@ -251,36 +251,36 @@ logging = /etc/st2/logging.notifier.conf enable_common_libs = False [resultstracker] +# Number of threads to use to query external workflow systems. +thread_pool_size = 10 # Time interval between queries to external workflow system. query_interval = 5 # Sleep delay in between queries when query queue is empty. empty_q_sleep_time = 1 -# Location of the logging configuration file. -logging = /etc/st2/logging.resultstracker.conf # Sleep delay for query when there is no more worker in pool. no_workers_sleep_time = 1 -# Number of threads to use to query external workflow systems. -thread_pool_size = 10 +# Location of the logging configuration file. +logging = /etc/st2/logging.resultstracker.conf [rulesengine] # Location of the logging configuration file. logging = /etc/st2/logging.rulesengine.conf [scheduler] -# The maximum number of attempts that the scheduler retries on error. -retry_max_attempt = 10 # Location of the logging configuration file. logging = /etc/st2/logging.scheduler.conf -# How long (in seconds) to sleep between each action scheduler main loop run interval. -sleep_interval = 0.1 +# How long GC to search back in minutes for orphaned scheduled actions +execution_scheduling_timeout_threshold_min = 1 # The size of the pool used by the scheduler for scheduling executions. pool_size = 10 -# The number of milliseconds to wait in between retries. -retry_wait_msec = 3000 +# How long (in seconds) to sleep between each action scheduler main loop run interval. +sleep_interval = 0.1 # How often (in seconds) to look for zombie execution requests before rescheduling them. gc_interval = 10 -# How long GC to search back in minutes for orphaned scheduled actions -execution_scheduling_timeout_threshold_min = 1 +# The maximum number of attempts that the scheduler retries on error. +retry_max_attempt = 10 +# The number of milliseconds to wait in between retries. +retry_wait_msec = 3000 [schema] # Version of JSON schema to use. @@ -289,96 +289,96 @@ version = 4 draft = http://json-schema.org/draft-04/schema# [sensorcontainer] -# Provider of sensor node partition config. -partition_provider = {'name': 'default'} -# Run in a single sensor mode where parent process exits when a sensor crashes / dies. This is useful in environments where partitioning, sensor process life cycle and failover is handled by a 3rd party service such as kubernetes. -single_sensor_mode = False # location of the logging.conf file logging = /etc/st2/logging.sensorcontainer.conf # name of the sensor node. sensor_node_name = sensornode1 +# Provider of sensor node partition config. +partition_provider = {'name': 'default'} +# Run in a single sensor mode where parent process exits when a sensor crashes / dies. This is useful in environments where partitioning, sensor process life cycle and failover is handled by a 3rd party service such as kubernetes. +single_sensor_mode = False [ssh_runner] -# Max number of parallel remote SSH actions that should be run. Works only with Paramiko SSH runner. -max_parallel_actions = 50 # Location of the script on the remote filesystem. remote_dir = /tmp +# How partial success of actions run on multiple nodes should be treated. +allow_partial_failure = False +# Max number of parallel remote SSH actions that should be run. Works only with Paramiko SSH runner. +max_parallel_actions = 50 # Use the .ssh/config file. Useful to override ports etc. use_ssh_config = False # Path to the ssh config file. ssh_config_file_path = ~/.ssh/config -# How partial success of actions run on multiple nodes should be treated. -allow_partial_failure = False [stream] -# Specify to enable debug mode. -debug = False # Send empty message every N seconds to keep connection open heartbeat = 25 # StackStorm stream API server host host = 127.0.0.1 -# location of the logging.conf file -logging = /etc/st2/logging.stream.conf # StackStorm API stream, server port port = 9102 +# Specify to enable debug mode. +debug = False +# location of the logging.conf file +logging = /etc/st2/logging.stream.conf [syslog] # Host for the syslog server. host = 127.0.0.1 -# Transport protocol to use (udp / tcp). -protocol = udp # Port for the syslog server. port = 514 # Syslog facility level. facility = local7 +# Transport protocol to use (udp / tcp). +protocol = udp [system] # Enable debug mode. debug = False +# Base path to all st2 artifacts. +base_path = /opt/stackstorm # True to validate parameters for non-system trigger types when creatinga rule. By default, only parameters for system triggers are validated. validate_trigger_parameters = True -# True to validate action and runner output against schema. -validate_output_schema = False # True to validate payload for non-system trigger types when dispatching a trigger inside the sensor. By default, only payload for system triggers is validated. validate_trigger_payload = True -# Base path to all st2 artifacts. -base_path = /opt/stackstorm +# True to validate action and runner output against schema. +validate_output_schema = False [system_user] -# SSH private key for the system user. -ssh_key_file = /home/stanley/.ssh/stanley_rsa # Default system user. user = stanley +# SSH private key for the system user. +ssh_key_file = /home/stanley/.ssh/stanley_rsa [timer] -# Specify to enable timer service. NOTE: Deprecated in favor of timersengine.enable -enable = None -# Timezone pertaining to the location where st2 is run. NOTE: Deprecated in favor of timersengine.local_timezone -local_timezone = None # Location of the logging configuration file. NOTE: Deprecated in favor of timersengine.logging logging = None +# Timezone pertaining to the location where st2 is run. NOTE: Deprecated in favor of timersengine.local_timezone +local_timezone = None +# Specify to enable timer service. NOTE: Deprecated in favor of timersengine.enable +enable = None [timersengine] -# Specify to enable timer service. -enable = True -# Timezone pertaining to the location where st2 is run. -local_timezone = America/Los_Angeles # Location of the logging configuration file. logging = /etc/st2/logging.timersengine.conf +# Timezone pertaining to the location where st2 is run. +local_timezone = America/Los_Angeles +# Specify to enable timer service. +enable = True [webui] # Base https URL to access st2 Web UI. This is used to construct history URLs that are sent out when chatops is used to kick off executions. webui_base_url = https://localhost [workflow_engine] -# Location of the logging configuration file. -logging = /etc/st2/logging.workflowengine.conf # Max time to stop retrying. retry_stop_max_msec = 60000 -# Max jitter interval to smooth out retries. -retry_max_jitter_msec = 1000 # Interval inbetween retries. retry_wait_fixed_msec = 1000 +# Max jitter interval to smooth out retries. +retry_max_jitter_msec = 1000 # Max seconds to allow workflow execution be idled before it is identified as orphaned and cancelled by the garbage collector. A value of zero means the feature is disabled. This is disabled by default. gc_max_idle_sec = 0 +# Location of the logging configuration file. +logging = /etc/st2/logging.workflowengine.conf From 3c05f5e781653d3af89b1e0d11f7a7792ea809ce Mon Sep 17 00:00:00 2001 From: armab Date: Tue, 14 Apr 2020 22:37:12 +0100 Subject: [PATCH 7/9] Fix st2.conf.sample re-generation --- conf/st2.conf.sample | 316 +++++++++++++++++++++---------------------- 1 file changed, 158 insertions(+), 158 deletions(-) diff --git a/conf/st2.conf.sample b/conf/st2.conf.sample index ceb67010c0..3ae734a38f 100644 --- a/conf/st2.conf.sample +++ b/conf/st2.conf.sample @@ -8,239 +8,239 @@ enable = True emit_when = succeeded,failed,timeout,canceled,abandoned # comma separated list allowed here. [actionrunner] -# location of the logging.conf file -logging = /etc/st2/logging.actionrunner.conf -# Python binary which will be used by Python actions. -python_binary = /usr/bin/python -# Python 3 binary which will be used by Python actions for packs which use Python 3 virtual environment. -python3_binary = /usr/bin/python3 +# List of pip options to be passed to "pip install" command when installing pack dependencies into pack virtual environment. +pip_opts = # comma separated list allowed here. +# Internal pool size for dispatcher used by regular actions. +actions_pool_size = 60 +# Default log level to use for Python runner actions. Can be overriden on invocation basis using "log_level" runner parameter. +python_runner_log_level = DEBUG +# Internal pool size for dispatcher used by workflow actions. +workflows_pool_size = 40 # Prefix for Python 3 installation (e.g. /opt/python3.6). If not specified, it tries to find Python 3 libraries in /usr/lib and /usr/local/lib. python3_prefix = None # Virtualenv binary which should be used to create pack virtualenvs. virtualenv_binary = /usr/bin/virtualenv -# Default log level to use for Python runner actions. Can be overriden on invocation basis using "log_level" runner parameter. -python_runner_log_level = DEBUG +# Python 3 binary which will be used by Python actions for packs which use Python 3 virtual environment. +python3_binary = /usr/bin/python3 +# Buffer size to use for real time action output streaming. 0 means unbuffered 1 means line buffered, -1 means system default, which usually means fully buffered and any other positive value means use a buffer of (approximately) that size +stream_output_buffer_size = -1 # List of virtualenv options to be passsed to "virtualenv" command that creates pack virtualenv. virtualenv_opts = --system-site-packages # comma separated list allowed here. -# List of pip options to be passed to "pip install" command when installing pack dependencies into pack virtual environment. -pip_opts = # comma separated list allowed here. # True to store and stream action output (stdout and stderr) in real-time. stream_output = True -# Buffer size to use for real time action output streaming. 0 means unbuffered 1 means line buffered, -1 means system default, which usually means fully buffered and any other positive value means use a buffer of (approximately) that size -stream_output_buffer_size = -1 -# Internal pool size for dispatcher used by workflow actions. -workflows_pool_size = 40 -# Internal pool size for dispatcher used by regular actions. -actions_pool_size = 60 +# location of the logging.conf file +logging = /etc/st2/logging.actionrunner.conf +# Python binary which will be used by Python actions. +python_binary = /usr/bin/python [api] -# StackStorm API server host -host = 127.0.0.1 -# StackStorm API server port -port = 9101 # List of origins allowed for api, auth and stream allow_origin = http://127.0.0.1:3000 # comma separated list allowed here. -# True to mask secrets in the API responses -mask_secrets = True -# None -debug = False # location of the logging.conf file logging = /etc/st2/logging.api.conf # Maximum limit (page size) argument which can be specified by the user in a query string. max_page_size = 100 +# True to mask secrets in the API responses +mask_secrets = True +# StackStorm API server host +host = 127.0.0.1 +# None +debug = False +# StackStorm API server port +port = 9101 [auth] # Common option - options below apply in both scenarios - when auth service is running as a WSGI # service (e.g. under Apache or Nginx) and when it's running in the standalone mode. -# Base URL to the API endpoint excluding the version -api_url = None # Enable authentication middleware. enable = True -# Access token ttl in seconds. -token_ttl = 86400 -# Service token ttl in seconds. -service_token_ttl = 86400 # Path to the logging config. logging = /etc/st2/logging.auth.conf -# Specify to enable debug mode. -debug = False +# Base URL to the API endpoint excluding the version +api_url = None +# Service token ttl in seconds. +service_token_ttl = 86400 +# Access token ttl in seconds. +token_ttl = 86400 # Authentication mode (proxy,standalone) mode = standalone +# Specify to enable debug mode. +debug = False # Standalone mode options - options below only apply when auth service is running in the standalone # mode. -# Host on which the service should listen on. -host = 127.0.0.1 -# Port on which the service should listen on. -port = 9100 -# Specify to enable SSL / TLS mode -use_ssl = False # Path to the SSL certificate file. Only used when "use_ssl" is specified. cert = /etc/apache2/ssl/mycert.crt +# JSON serialized arguments which are passed to the authentication backend in a standalone mode. +backend_kwargs = None +# Host on which the service should listen on. +host = 127.0.0.1 # Path to the SSL private key file. Only used when "use_ssl" is specified. key = /etc/apache2/ssl/mycert.key +# Specify to enable SSL / TLS mode +use_ssl = False +# Port on which the service should listen on. +port = 9100 # Authentication backend to use in a standalone mode. Available backends: flat_file. backend = flat_file -# JSON serialized arguments which are passed to the authentication backend in a standalone mode. -backend_kwargs = None [content] +# A URL pointing to the pack index. StackStorm Exchange is used by default. Use a comma-separated list for multiple indexes if you want to get other packs discovered with "st2 pack search". +index_url = https://index.stackstorm.org/v1/index.json # comma separated list allowed here. +# Path to the directory which contains system runners. NOTE: This option has been deprecated and it's unused since StackStorm v3.0.0 +system_runners_base_path = /opt/stackstorm/runners +# Paths which will be searched for runners. NOTE: This option has been deprecated and it's unused since StackStorm v3.0.0 +runners_base_paths = None # User group that can write to packs directory. pack_group = st2packs # Path to the directory which contains system packs. system_packs_base_path = /opt/stackstorm/packs -# Path to the directory which contains system runners. NOTE: This option has been deprecated and it's unused since StackStorm v3.0.0 -system_runners_base_path = /opt/stackstorm/runners # Paths which will be searched for integration packs. packs_base_paths = None -# Paths which will be searched for runners. NOTE: This option has been deprecated and it's unused since StackStorm v3.0.0 -runners_base_paths = None -# A URL pointing to the pack index. StackStorm Exchange is used by default. Use a comma-separated list for multiple indexes if you want to get other packs discovered with "st2 pack search". -index_url = https://index.stackstorm.org/v1/index.json # comma separated list allowed here. [coordination] # Endpoint for the coordination server. url = None -# TTL for the lock if backend suports it. -lock_timeout = 60 # True to register StackStorm services in a service registry. service_registry = False +# TTL for the lock if backend suports it. +lock_timeout = 60 [database] -# host of db server -host = 127.0.0.1 -# port of db server -port = 27017 -# name of database -db_name = st2 # username for db login username = None -# password for db login -password = None -# Connection and server selection timeout (in ms). -connection_timeout = 3000 # Connection retry total time (minutes). connection_retry_max_delay_m = 3 -# Connection retry backoff max (seconds). -connection_retry_backoff_max_s = 10 -# Backoff multiplier (seconds). -connection_retry_backoff_mul = 1 -# Create the connection to mongodb using SSL -ssl = False -# Private keyfile used to identify the local connection against MongoDB. -ssl_keyfile = None -# Certificate file used to identify the localconnection -ssl_certfile = None -# Specifies whether a certificate is required from the other side of the connection, and whether it will be validated if provided -ssl_cert_reqs = None # ca_certs file contains a set of concatenated CA certificates, which are used to validate certificates passed from MongoDB. ssl_ca_certs = None +# Certificate file used to identify the localconnection +ssl_certfile = None +# Connection retry backoff max (seconds). +connection_retry_backoff_max_s = 10 # If True and `ssl_cert_reqs` is not None, enables hostname verification ssl_match_hostname = True +# Specifies whether a certificate is required from the other side of the connection, and whether it will be validated if provided +ssl_cert_reqs = None +# Create the connection to mongodb using SSL +ssl = False +# host of db server +host = 127.0.0.1 +# name of database +db_name = st2 +# Backoff multiplier (seconds). +connection_retry_backoff_mul = 1 # Specifies database authentication mechanisms. By default, it use SCRAM-SHA-1 with MongoDB 3.0 and later, MONGODB-CR (MongoDB Challenge Response protocol) for older servers. authentication_mechanism = None +# Private keyfile used to identify the local connection against MongoDB. +ssl_keyfile = None +# Connection and server selection timeout (in ms). +connection_timeout = 3000 +# password for db login +password = None +# port of db server +port = 27017 [exporter] -# Directory to dump data to. -dump_dir = /opt/stackstorm/exports/ # location of the logging.exporter.conf file logging = /etc/st2/logging.exporter.conf +# Directory to dump data to. +dump_dir = /opt/stackstorm/exports/ [garbagecollector] -# Location of the logging configuration file. -logging = /etc/st2/logging.garbagecollector.conf -# How often to check database for old data and perform garbage collection. -collection_interval = 600 -# How long to wait / sleep (in seconds) between collection of different object types. -sleep_delay = 2 # Action executions and related objects (live actions, action output objects) older than this value (days) will be automatically deleted. action_executions_ttl = None -# Action execution output objects (ones generated by action output streaming) older than this value (days) will be automatically deleted. -action_executions_output_ttl = 7 # Trigger instances older than this value (days) will be automatically deleted. trigger_instances_ttl = None +# Location of the logging configuration file. +logging = /etc/st2/logging.garbagecollector.conf +# How long to wait / sleep (in seconds) between collection of different object types. +sleep_delay = 2 # Set to True to perform garbage collection on Inquiries (based on the TTL value per Inquiry) purge_inquiries = False +# Action execution output objects (ones generated by action output streaming) older than this value (days) will be automatically deleted. +action_executions_output_ttl = 7 +# How often to check database for old data and perform garbage collection. +collection_interval = 600 [keyvalue] +# Location of the symmetric encryption key for encrypting values in kvstore. This key should be in JSON and should've been generated using st2-generate-symmetric-crypto-key tool. +encryption_key_path = # Allow encryption of values in key value stored qualified as "secret". enable_encryption = True -# Location of the symmetric encryption key for encrypting values in kvstore. This key should be in JSON and should've been generated using st2-generate-symmetric-crypto-key tool. -encryption_key_path = [log] -# Exclusion list of loggers to omit. -excludes = # comma separated list allowed here. # Controls if stderr should be redirected to the logs. redirect_stderr = False -# True to mask secrets in the log files. -mask_secrets = True +# Exclusion list of loggers to omit. +excludes = # comma separated list allowed here. # Blacklist of additional attribute names to mask in the log messages. mask_secrets_blacklist = # comma separated list allowed here. +# True to mask secrets in the log files. +mask_secrets = True [messaging] +# Certificate file used to identify the local connection (client). +ssl_certfile = None +# How many times should we retry connection before failing. +connection_retries = 10 +# Use SSL / TLS to connect to the messaging server. Same as appending "?ssl=true" at the end of the connection URL string. +ssl = False # URL of the messaging server. url = amqp://guest:guest@127.0.0.1:5672// +# Specifies whether a certificate is required from the other side of the connection, and whether it will be validated if provided. +ssl_cert_reqs = None # URL of all the nodes in a messaging service cluster. cluster_urls = # comma separated list allowed here. -# How many times should we retry connection before failing. -connection_retries = 10 # How long should we wait between connection retries. connection_retry_wait = 10000 -# Use SSL / TLS to connect to the messaging server. Same as appending "?ssl=true" at the end of the connection URL string. -ssl = False # Private keyfile used to identify the local connection against RabbitMQ. ssl_keyfile = None -# Certificate file used to identify the local connection (client). -ssl_certfile = None -# Specifies whether a certificate is required from the other side of the connection, and whether it will be validated if provided. -ssl_cert_reqs = None # ca_certs file contains a set of concatenated CA certificates, which are used to validate certificates passed from RabbitMQ. ssl_ca_certs = None # Login method to use (AMQPLAIN, PLAIN, EXTERNAL, etc.). login_method = None [metrics] -# Driver type for metrics collection. -driver = noop +# Randomly sample and only send metrics for X% of metric operations to the backend. Default value of 1 means no sampling is done and all the metrics are sent to the backend. E.g. 0.1 would mean 10% of operations are sampled. +sample_rate = 1 # Destination server to connect to if driver requires connection. host = 127.0.0.1 -# Destination port to connect to if driver requires connection. -port = 8125 # Optional prefix which is prepended to all the metric names. Comes handy when you want to submit metrics from various environment to the same metric backend instance. prefix = None -# Randomly sample and only send metrics for X% of metric operations to the backend. Default value of 1 means no sampling is done and all the metrics are sent to the backend. E.g. 0.1 would mean 10% of operations are sampled. -sample_rate = 1 +# Driver type for metrics collection. +driver = noop +# Destination port to connect to if driver requires connection. +port = 8125 [mistral] -# v2 API root endpoint. -v2_base_url = http://127.0.0.1:8989/v2 +# URL Mistral uses to talk back to the API.If not provided it defaults to public API URL. Note: This needs to be a base URL without API version (e.g. http://127.0.0.1:9101) +api_url = None # Multiplier for the exponential backoff. retry_exp_msec = 1000 -# Max time for each set of backoff. -retry_exp_max_msec = 300000 -# Max time to stop retrying. -retry_stop_max_msec = 600000 +# Jitter interval to smooth out HTTP requests to mistral tasks and executions API. +jitter_interval = 0.1 +# Allow insecure communication with Mistral. +insecure = False # Username for authentication. keystone_username = None -# Password for authentication. -keystone_password = None +# Enable results tracking and disable callbacks. +enable_polling = False # OpenStack project scope. keystone_project_name = None +# Password for authentication. +keystone_password = None # Auth endpoint for Keystone. keystone_auth_url = None # Optional certificate to validate endpoint. cacert = None -# Allow insecure communication with Mistral. -insecure = False -# Enable results tracking and disable callbacks. -enable_polling = False -# Jitter interval to smooth out HTTP requests to mistral tasks and executions API. -jitter_interval = 0.1 -# URL Mistral uses to talk back to the API.If not provided it defaults to public API URL. Note: This needs to be a base URL without API version (e.g. http://127.0.0.1:9101) -api_url = None +# v2 API root endpoint. +v2_base_url = http://127.0.0.1:8989/v2 +# Max time to stop retrying. +retry_stop_max_msec = 600000 +# Max time for each set of backoff. +retry_exp_max_msec = 300000 [notifier] # Location of the logging configuration file. @@ -251,36 +251,36 @@ logging = /etc/st2/logging.notifier.conf enable_common_libs = False [resultstracker] -# Number of threads to use to query external workflow systems. -thread_pool_size = 10 # Time interval between queries to external workflow system. query_interval = 5 # Sleep delay in between queries when query queue is empty. empty_q_sleep_time = 1 -# Sleep delay for query when there is no more worker in pool. -no_workers_sleep_time = 1 # Location of the logging configuration file. logging = /etc/st2/logging.resultstracker.conf +# Sleep delay for query when there is no more worker in pool. +no_workers_sleep_time = 1 +# Number of threads to use to query external workflow systems. +thread_pool_size = 10 [rulesengine] # Location of the logging configuration file. logging = /etc/st2/logging.rulesengine.conf [scheduler] +# The maximum number of attempts that the scheduler retries on error. +retry_max_attempt = 10 # Location of the logging configuration file. logging = /etc/st2/logging.scheduler.conf +# How long (in seconds) to sleep between each action scheduler main loop run interval. +sleep_interval = 0.1 # How long GC to search back in minutes for orphaned scheduled actions execution_scheduling_timeout_threshold_min = 1 # The size of the pool used by the scheduler for scheduling executions. pool_size = 10 -# How long (in seconds) to sleep between each action scheduler main loop run interval. -sleep_interval = 0.1 -# How often (in seconds) to look for zombie execution requests before rescheduling them. -gc_interval = 10 -# The maximum number of attempts that the scheduler retries on error. -retry_max_attempt = 10 # The number of milliseconds to wait in between retries. retry_wait_msec = 3000 +# How often (in seconds) to look for zombie execution requests before rescheduling them. +gc_interval = 10 [schema] # Version of JSON schema to use. @@ -289,96 +289,96 @@ version = 4 draft = http://json-schema.org/draft-04/schema# [sensorcontainer] -# location of the logging.conf file -logging = /etc/st2/logging.sensorcontainer.conf -# name of the sensor node. -sensor_node_name = sensornode1 # Provider of sensor node partition config. partition_provider = {'name': 'default'} # Run in a single sensor mode where parent process exits when a sensor crashes / dies. This is useful in environments where partitioning, sensor process life cycle and failover is handled by a 3rd party service such as kubernetes. single_sensor_mode = False +# location of the logging.conf file +logging = /etc/st2/logging.sensorcontainer.conf +# name of the sensor node. +sensor_node_name = sensornode1 [ssh_runner] -# Location of the script on the remote filesystem. -remote_dir = /tmp -# How partial success of actions run on multiple nodes should be treated. -allow_partial_failure = False # Max number of parallel remote SSH actions that should be run. Works only with Paramiko SSH runner. max_parallel_actions = 50 +# Location of the script on the remote filesystem. +remote_dir = /tmp # Use the .ssh/config file. Useful to override ports etc. use_ssh_config = False # Path to the ssh config file. ssh_config_file_path = ~/.ssh/config +# How partial success of actions run on multiple nodes should be treated. +allow_partial_failure = False [stream] +# Specify to enable debug mode. +debug = False # Send empty message every N seconds to keep connection open heartbeat = 25 # StackStorm stream API server host host = 127.0.0.1 -# StackStorm API stream, server port -port = 9102 -# Specify to enable debug mode. -debug = False # location of the logging.conf file logging = /etc/st2/logging.stream.conf +# StackStorm API stream, server port +port = 9102 [syslog] # Host for the syslog server. host = 127.0.0.1 +# Transport protocol to use (udp / tcp). +protocol = udp # Port for the syslog server. port = 514 # Syslog facility level. facility = local7 -# Transport protocol to use (udp / tcp). -protocol = udp [system] # Enable debug mode. debug = False -# Base path to all st2 artifacts. -base_path = /opt/stackstorm # True to validate parameters for non-system trigger types when creatinga rule. By default, only parameters for system triggers are validated. validate_trigger_parameters = True -# True to validate payload for non-system trigger types when dispatching a trigger inside the sensor. By default, only payload for system triggers is validated. -validate_trigger_payload = True # True to validate action and runner output against schema. validate_output_schema = False +# True to validate payload for non-system trigger types when dispatching a trigger inside the sensor. By default, only payload for system triggers is validated. +validate_trigger_payload = True +# Base path to all st2 artifacts. +base_path = /opt/stackstorm [system_user] -# Default system user. -user = stanley # SSH private key for the system user. ssh_key_file = /home/stanley/.ssh/stanley_rsa +# Default system user. +user = stanley [timer] -# Location of the logging configuration file. NOTE: Deprecated in favor of timersengine.logging -logging = None -# Timezone pertaining to the location where st2 is run. NOTE: Deprecated in favor of timersengine.local_timezone -local_timezone = None # Specify to enable timer service. NOTE: Deprecated in favor of timersengine.enable enable = None +# Timezone pertaining to the location where st2 is run. NOTE: Deprecated in favor of timersengine.local_timezone +local_timezone = None +# Location of the logging configuration file. NOTE: Deprecated in favor of timersengine.logging +logging = None [timersengine] -# Location of the logging configuration file. -logging = /etc/st2/logging.timersengine.conf -# Timezone pertaining to the location where st2 is run. -local_timezone = America/Los_Angeles # Specify to enable timer service. enable = True +# Timezone pertaining to the location where st2 is run. +local_timezone = America/Los_Angeles +# Location of the logging configuration file. +logging = /etc/st2/logging.timersengine.conf [webui] # Base https URL to access st2 Web UI. This is used to construct history URLs that are sent out when chatops is used to kick off executions. webui_base_url = https://localhost [workflow_engine] +# Location of the logging configuration file. +logging = /etc/st2/logging.workflowengine.conf # Max time to stop retrying. retry_stop_max_msec = 60000 -# Interval inbetween retries. -retry_wait_fixed_msec = 1000 # Max jitter interval to smooth out retries. retry_max_jitter_msec = 1000 +# Interval inbetween retries. +retry_wait_fixed_msec = 1000 # Max seconds to allow workflow execution be idled before it is identified as orphaned and cancelled by the garbage collector. A value of zero means the feature is disabled. This is disabled by default. gc_max_idle_sec = 0 -# Location of the logging configuration file. -logging = /etc/st2/logging.workflowengine.conf From 49b3960e9977b3b60f18793e98519ef2b7b90cfe Mon Sep 17 00:00:00 2001 From: armab Date: Tue, 14 Apr 2020 22:42:15 +0100 Subject: [PATCH 8/9] Fix st2.conf.sample re-generation one more time "Thanks" to IDE "fixing" trailing spaces --- conf/st2.conf.sample | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conf/st2.conf.sample b/conf/st2.conf.sample index 3ae734a38f..bd99093c24 100644 --- a/conf/st2.conf.sample +++ b/conf/st2.conf.sample @@ -166,7 +166,7 @@ collection_interval = 600 [keyvalue] # Location of the symmetric encryption key for encrypting values in kvstore. This key should be in JSON and should've been generated using st2-generate-symmetric-crypto-key tool. -encryption_key_path = +encryption_key_path = # Allow encryption of values in key value stored qualified as "secret". enable_encryption = True From 580da94c48b4b79a33368030d2477991e924b211 Mon Sep 17 00:00:00 2001 From: armab Date: Tue, 14 Apr 2020 23:13:16 +0100 Subject: [PATCH 9/9] Fix lint warnings for scheduler/handler.py --- st2actions/st2actions/scheduler/handler.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/st2actions/st2actions/scheduler/handler.py b/st2actions/st2actions/scheduler/handler.py index 01edeb2632..c0a1e4e8b4 100644 --- a/st2actions/st2actions/scheduler/handler.py +++ b/st2actions/st2actions/scheduler/handler.py @@ -54,15 +54,14 @@ def __init__(self): self.message_type = LiveActionDB self._shutdown = False self._pool = eventlet.GreenPool(size=cfg.CONF.scheduler.pool_size) - # If an ActionExecutionSchedulingQueueItemDB object hasn't been updated fore more than this amount - # of milliseconds, it will be marked as "handled=False". - # As soon as an item is picked by scheduler to be processed, it should be processed very fast - # (< 5 seconds). If an item is still being marked as processing it likely indicates that the - # scheduler process which was processing that item crashed or similar so we need to mark it as - # "handling=False" so some other scheduler process can pick it up. + # If an ActionExecutionSchedulingQueueItemDB object hasn't been updated fore more than + # this amount of milliseconds, it will be marked as "handled=False". + # As soon as an item is picked by scheduler to be processed, it should be processed very + # fast (< 5 seconds). If an item is still being marked as processing it likely indicates + # that the scheduler process which was processing that item crashed or similar so we need + # to mark it as "handling=False" so some other scheduler process can pick it up. self._execution_scheduling_timeout_threshold_ms = \ - cfg.CONF.scheduler.execution_scheduling_timeout_threshold_min \ - * 60 * 1000 + cfg.CONF.scheduler.execution_scheduling_timeout_threshold_min * 60 * 1000 self._coordinator = coordination_service.get_coordinator(start_heart=True) self._main_thread = None self._cleanup_thread = None