diff --git a/CHANGELOG.md b/CHANGELOG.md index 4df35d515..5e2872a76 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -55,6 +55,8 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Added `APICAST_CLIENT_REQUEST_HEADER_BUFFERS` variable to allow configure of the NGINX `client_request_header_buffers` directive: [PR #1446](https://github.com/3scale/APIcast/pull/1446), [THREESCALE-10164](https://issues.redhat.com/browse/THREESCALE-10164) +- Added the `APICAST_POLICY_BATCHER_SHARED_MEMORY_SIZE` variable to allow configuration of the batcher policy's share memory size. [PR #1452](https://github.com/3scale/APIcast/pull/1452), [THREESCALE-9537](https://issues.redhat.com/browse/THREESCALE-9537) + ## [3.14.0] 2023-07-25 ### Fixed diff --git a/doc/parameters.md b/doc/parameters.md index ac0b40809..362617b32 100644 --- a/doc/parameters.md +++ b/doc/parameters.md @@ -517,6 +517,13 @@ Sets the maximum number and size of buffers used for reading large client reques The format for this value is defined by the [`large_client_header_buffers` NGINX directive](https://nginx.org/en/docs/http/ngx_http_core_module.html#large_client_header_buffers) +### `APICAST_POLICY_BATCHER_SHARED_MEMORY_SIZE` + +**Default:** 20m +**Value:** string + +Sets the maximum size of shared memory used by batcher policy. The accepted [size units](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#lua_shared_dict) are k and m. + ### `OPENTELEMETRY` This environment variable enables NGINX instrumentation using OpenTelemetry tracing library. diff --git a/gateway/conf.d/backend.conf b/gateway/conf.d/backend.conf index 8c9da83fa..7fb7681ec 100644 --- a/gateway/conf.d/backend.conf +++ b/gateway/conf.d/backend.conf @@ -21,3 +21,15 @@ location /transactions/oauth_authrep.xml { echo "transactions oauth_authrep!"; } + +location /transactions/authorize.xml { + access_by_lua_block { + local delay = tonumber(ngx.var.arg_delay) or 0 + + if delay > 0 then + ngx.sleep(delay) + end + } + + echo "transactions authorize!"; +} diff --git a/gateway/conf/nginx.conf.liquid b/gateway/conf/nginx.conf.liquid index 71caf5721..87fda8569 100644 --- a/gateway/conf/nginx.conf.liquid +++ b/gateway/conf/nginx.conf.liquid @@ -94,15 +94,6 @@ http { {% include file %} {% endfor %} - lua_shared_dict limiter 1m; - - # This shared dictionaries are only used in the 3scale batcher policy. - # This is not ideal, but they'll need to be here until we allow policies to - # modify this template. - lua_shared_dict cached_auths 20m; - lua_shared_dict batched_reports 20m; - lua_shared_dict batched_reports_locks 1m; - {% for file in "sites.d/*.conf" | filesystem %} {% include file %} {% endfor %} diff --git a/gateway/http.d/shdict.conf b/gateway/http.d/shdict.conf index fd1404395..9bb314143 100644 --- a/gateway/http.d/shdict.conf +++ b/gateway/http.d/shdict.conf @@ -2,3 +2,11 @@ lua_shared_dict api_keys 30m; lua_shared_dict rate_limit_headers 20m; lua_shared_dict configuration 10m; lua_shared_dict locks 1m; +lua_shared_dict limiter 1m; + +# This shared dictionaries are only used in the 3scale batcher policy. +# These requirements will remain in place until we allow policy to +# modify this template. +lua_shared_dict cached_auths 20m; +lua_shared_dict batched_reports {{env.APICAST_POLICY_BATCHER_SHARED_MEMORY_SIZE | default: "20m"}}; +lua_shared_dict batched_reports_locks 1m; diff --git a/gateway/src/apicast/policy/3scale_batcher/3scale_batcher.lua b/gateway/src/apicast/policy/3scale_batcher/3scale_batcher.lua index 917d7d4fb..844aa982d 100644 --- a/gateway/src/apicast/policy/3scale_batcher/3scale_batcher.lua +++ b/gateway/src/apicast/policy/3scale_batcher/3scale_batcher.lua @@ -127,20 +127,12 @@ local function update_downtime_cache(cache, transaction, backend_status, cache_h cache_handler(cache, key, { status = backend_status }) end -local function handle_backend_ok(self, transaction, cache_handler) - if cache_handler then - update_downtime_cache(self.backend_downtime_cache, transaction, 200, cache_handler) - end - +local function handle_backend_ok(self, transaction) self.auths_cache:set(transaction, 200) self.reports_batcher:add(transaction) end -local function handle_backend_denied(self, service, transaction, status, headers, cache_handler) - if cache_handler then - update_downtime_cache(self.backend_downtime_cache, transaction, status, cache_handler) - end - +local function handle_backend_denied(self, service, transaction, status, headers) local rejection_reason = rejection_reason_from_headers(headers) self.auths_cache:set(transaction, status, rejection_reason) return error(service, rejection_reason) @@ -182,7 +174,7 @@ end -- might want to introduce a mechanism to avoid this and reduce the number of -- calls to backend. function _M:access(context) - local backend = backend_client:new(context.service, http_ng_resty) + local backend = assert(backend_client:new(context.service, http_ng_resty), 'missing backend') local usage = context.usage or {} local service = context.service local service_id = service.id @@ -216,17 +208,20 @@ function _M:access(context) local formatted_usage = usage:format() local backend_res = backend:authorize(formatted_usage, credentials) context:publish_backend_auth(backend_res) - local backend_status = backend_res.status + local backend_status = backend_res and backend_res.status local cache_handler = context.cache_handler -- Set by Caching policy -- this is needed, because in allow mode, the status maybe is always 200, so -- Request need to go to the Upstream API - update_downtime_cache(self.backend_downtime_cache, transaction, backend_status, cache_handler) + if cache_handler then + update_downtime_cache( + self.backend_downtime_cache, transaction, backend_status, cache_handler) + end if backend_status == 200 then - handle_backend_ok(self, transaction, cache_handler) + handle_backend_ok(self, transaction) elseif backend_status >= 400 and backend_status < 500 then handle_backend_denied( - self, service, transaction, backend_status, backend_res.headers, cache_handler) + self, service, transaction, backend_status, backend_res.headers) else handle_backend_error(self, service, transaction, cache_handler) end diff --git a/gateway/src/apicast/policy/3scale_batcher/README.md b/gateway/src/apicast/policy/3scale_batcher/README.md index f3cbd575f..abaeeff78 100644 --- a/gateway/src/apicast/policy/3scale_batcher/README.md +++ b/gateway/src/apicast/policy/3scale_batcher/README.md @@ -63,3 +63,29 @@ The effectiveness of this policy will depend on the cache hit ratio. For use cases where the variety of services, apps, metrics, etc. is relatively low, caching and batching will be very effective and will increase the throughput of the system significantly. + +### How many key/value pairs can be contained in 20m of shared memory. + +On my Linux x86_64 system `1m` can hold `4033 key/value pairs` with 64-byte keys and +15-byte values. Changing the size of the shared storage to 10m gives `40673 key/value pairs`. +It's a linear growth as expected. + +The following table shows the number of key/value pairs for different storage size: + +| Shared storage size | Key length | Value length | Key/value pairs | +| ---- | ---- | ---- | ---- | +| 10m | 64 | 15 | 40672 | +| | 128 | 15 | 40672 | +| | 256 | 15 | 20336 | +| | 512 | 15 | 10168 | +| 20m | 64 | 15 | 81408 | +| | 128 | 15 | 81408 | +| | 256 | 15 | 40704 | +| | 512 | 15 | 20352 | +| 40m | 64 | 15 | 162848 | +| | 128 | 15 | 162848 | +| | 256 | 15 | 81424 | +| | 512 | 15 | 40712 | + +In practice, the actual number will depend on the size of the key/value pair, the +underlying operating system (OS) architecture and memory segment sizes, etc... More details [here](https://blog.openresty.com/en/nginx-shm-frag/)