From b696a78793fc8c3af93a80dabaf884a83be1468b Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Mon, 22 Jul 2024 09:58:33 +0200 Subject: [PATCH 01/57] In CITGM, skip tests that are flaky there (#3413) Signed-off-by: Matteo Collina --- test/connect-timeout.js | 8 +++++--- test/node-test/debug.js | 8 +++++--- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/test/connect-timeout.js b/test/connect-timeout.js index 0a28e1f250d..ff50eb777a6 100644 --- a/test/connect-timeout.js +++ b/test/connect-timeout.js @@ -6,8 +6,10 @@ const { Client, Pool, errors } = require('..') const net = require('node:net') const assert = require('node:assert') +const skip = !!process.env.CITGM + // Using describe instead of test to avoid the timeout -describe('prioritize socket errors over timeouts', async () => { +describe('prioritize socket errors over timeouts', { skip }, async () => { const t = tspl({ ...assert, after: () => {} }, { plan: 1 }) const client = new Pool('http://foorbar.invalid:1234', { connectTimeout: 1 }) @@ -29,7 +31,7 @@ net.connect = function (options) { return new net.Socket(options) } -test('connect-timeout', async t => { +test('connect-timeout', { skip }, async t => { t = tspl(t, { plan: 1 }) const client = new Client('http://localhost:9000', { @@ -52,7 +54,7 @@ test('connect-timeout', async t => { await t.completed }) -test('connect-timeout', async t => { +test('connect-timeout', { skip }, async t => { t = tspl(t, { plan: 1 }) const client = new Pool('http://localhost:9000', { diff --git a/test/node-test/debug.js b/test/node-test/debug.js index 16de2f2883f..fbef523f5f5 100644 --- a/test/node-test/debug.js +++ b/test/node-test/debug.js @@ -8,7 +8,9 @@ const { tspl } = require('@matteo.collina/tspl') // eslint-disable-next-line no-control-regex const removeEscapeColorsRE = /[\u001b\u009b][[()#;?]*(?:[0-9]{1,4}(?:;[0-9]{0,4})*)?[0-9A-ORZcf-nqry=><]/g -test('debug#websocket', { skip: !process.versions.icu }, async t => { +const isCITGM = !!process.env.CITGM + +test('debug#websocket', { skip: !process.versions.icu || isCITGM }, async t => { const assert = tspl(t, { plan: 6 }) const child = spawn( process.execPath, @@ -44,7 +46,7 @@ test('debug#websocket', { skip: !process.versions.icu }, async t => { await assert.completed }) -test('debug#fetch', async t => { +test('debug#fetch', { skip: isCITGM }, async t => { const assert = tspl(t, { plan: 7 }) const child = spawn( process.execPath, @@ -78,7 +80,7 @@ test('debug#fetch', async t => { await assert.completed }) -test('debug#undici', async t => { +test('debug#undici', { skip: isCITGM }, async t => { // Due to Node.js webpage redirect const assert = tspl(t, { plan: 7 }) const child = spawn( From 99102ccf646f08708d49187ddc835a016cbfd6f9 Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Mon, 22 Jul 2024 10:11:42 +0200 Subject: [PATCH 02/57] Bumped v6.19.3 Signed-off-by: Matteo Collina --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 8187dad6884..3b3450ebfb2 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "undici", - "version": "6.19.2", + "version": "6.19.3", "description": "An HTTP/1.1 client, written from scratch for Node.js", "homepage": "https://undici.nodejs.org", "bugs": { From e51aa88e2564558d324adb288d9d806fa74f06a4 Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Mon, 22 Jul 2024 15:56:18 +0200 Subject: [PATCH 03/57] Update esbuild to 0.19.10 (#3415) Signed-off-by: Matteo Collina --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 3b3450ebfb2..2840382f19b 100644 --- a/package.json +++ b/package.json @@ -62,7 +62,7 @@ "main": "index.js", "types": "index.d.ts", "scripts": { - "build:node": "npx esbuild@0.19.4 index-fetch.js --bundle --platform=node --outfile=undici-fetch.js --define:esbuildDetection=1 --keep-names && node scripts/strip-comments.js", + "build:node": "npx esbuild@0.19.10 index-fetch.js --bundle --platform=node --outfile=undici-fetch.js --define:esbuildDetection=1 --keep-names && node scripts/strip-comments.js", "prebuild:wasm": "node build/wasm.js --prebuild", "build:wasm": "node build/wasm.js --docker", "lint": "standard | snazzy", From 62241c3600513cf0e8eac11cf16ed9dca98a80ac Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Mon, 22 Jul 2024 15:57:07 +0200 Subject: [PATCH 04/57] Bumped v6.19.4 Signed-off-by: Matteo Collina --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 2840382f19b..60679e07fa9 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "undici", - "version": "6.19.3", + "version": "6.19.4", "description": "An HTTP/1.1 client, written from scratch for Node.js", "homepage": "https://undici.nodejs.org", "bugs": { From 93605ab267564f8de6769411797fc6e0b0b9431b Mon Sep 17 00:00:00 2001 From: Richard Lau Date: Mon, 29 Jul 2024 16:02:58 +0100 Subject: [PATCH 05/57] fix: restore externalized Node.js dep compatibility (#3421) * fix: restore externalized Node.js dep compatibility Restore the ability to build Undici compatible with Node.js' `configure --shared-builtin-undici/undici-path ...` build option. Scopes the `hasApk` conditional to only cover the part that requires `apk`. Makes the WASM optimizer (binaryen) optional to allow building on Linux distributions that do not package `binaryen` and must be able to rebuild everything (including tooling) from source. * ci: add workflow for externalized Node.js dep Add a workflow to test building Undici in a way compatible with Node.js built with `configure --shared-builtin-undici/undici-path ...`. This configuration is used by downstream Node.js packagers (e.g. Fedora) who require the ability to be able to build everything from source. --- .github/workflows/nodejs-shared.yml | 102 ++++++++++++++++++++++++++++ CONTRIBUTING.md | 3 +- build/wasm.js | 48 +++++++++---- 3 files changed, 138 insertions(+), 15 deletions(-) create mode 100644 .github/workflows/nodejs-shared.yml diff --git a/.github/workflows/nodejs-shared.yml b/.github/workflows/nodejs-shared.yml new file mode 100644 index 00000000000..c13a793362e --- /dev/null +++ b/.github/workflows/nodejs-shared.yml @@ -0,0 +1,102 @@ +name: Node.js compiled --shared-builtin-undici/undici-path CI + +on: + push: + branches: + - main + - current + - next + - 'v*' + pull_request: + +permissions: + contents: read + +jobs: + test-shared-builtin: + name: Test with Node.js ${{ matrix.version }} compiled --shared-builtin-undici/undici-path + strategy: + fail-fast: false + max-parallel: 0 + matrix: + version: [20, 22] + runs-on: ubuntu-latest + timeout-minutes: 120 + steps: + # Checkout into a subdirectory otherwise Node.js tests will break due to finding Undici's package.json in a parent directory. + - name: Checkout + uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + with: + path: ./undici + persist-credentials: false + + # Setup node, install deps, and build undici prior to building node with `--shared-builtin-undici/undici-path` and testing + - name: Setup Node.js@${{ inputs.version }} + uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 + with: + node-version: ${{ inputs.version }} + + - name: Install dependencies + working-directory: ./undici + run: npm install + + - name: Install wasi-libc + run: sudo apt-get install -y wasi-libc + + - name: Build WASM + working-directory: ./undici + run: | + export EXTERNAL_PATH=${{ github.workspace }}/undici + export WASM_CC=clang + export WASM_CFLAGS='--target=wasm32-wasi --sysroot=/usr' + export WASM_LDFLAGS='-nodefaultlibs' + export WASM_LDLIBS='-lc' + node build/wasm.js + + - name: Determine latest release + id: release + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + with: + result-encoding: string + script: | + const req = await fetch('https://nodejs.org/download/release/index.json') + const releases = await req.json() + + const latest = releases.find((r) => r.version.startsWith('v${{ matrix.version }}')) + return latest.version + + - name: Download and extract source + run: curl https://nodejs.org/download/release/${{ steps.release.outputs.result }}/node-${{ steps.release.outputs.result }}.tar.xz | tar xfJ - + + - name: Install ninja + run: sudo apt-get install ninja-build + + - name: ccache + uses: hendrikmuhs/ccache-action@c92f40bee50034e84c763e33b317c77adaa81c92 #v1.2.13 + with: + key: node(external_undici)${{ matrix.version }} + + - name: Build node + working-directory: ./node-${{ steps.release.outputs.result }} + run: | + export CC="ccache gcc" + export CXX="ccache g++" + rm -rf deps/undici + ./configure --shared-builtin-undici/undici-path ${{ github.workspace }}/undici/loader.js --ninja --prefix=./final + make + make install + echo "$(pwd)/final/bin" >> $GITHUB_PATH + + - name: Print version information + run: | + echo OS: $(node -p "os.version()") + echo Node.js: $(node --version) + echo npm: $(npm --version) + echo git: $(git --version) + echo external config: $(node -e "console.log(process.config)" | grep NODE_SHARED_BUILTIN_UNDICI_UNDICI_PATH) + echo Node.js built-in undici version: $(node -p "process.versions.undici") # undefined for external Undici + + - name: Run tests + working-directory: ./node-${{ steps.release.outputs.result }} + run: tools/test.py -p dots --flaky-tests=dontcare + diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index febb3bcae02..6b6c01d2264 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -157,7 +157,8 @@ If you are packaging `undici` for a distro, this might help if you would like to an unbundled version instead of bundling one in `libnode.so`. To enable this, pass `EXTERNAL_PATH=/path/to/global/node_modules/undici` to `build/wasm.js`. -You shall also pass this path to `--shared-builtin-undici/undici-path` in Node.js's `configure.py`. +Pass this path with `loader.js` appended to `--shared-builtin-undici/undici-path` in Node.js's `configure.py`. +If building on a non-Alpine Linux distribution, you may need to also set the `WASM_CC`, `WASM_CFLAGS`, `WASM_LDFLAGS` and `WASM_LDLIBS` environment variables before running `build/wasm.js`. ### Benchmarks diff --git a/build/wasm.js b/build/wasm.js index 9c88427874a..0e86e11f195 100644 --- a/build/wasm.js +++ b/build/wasm.js @@ -15,6 +15,9 @@ let WASM_CFLAGS = process.env.WASM_CFLAGS || '--sysroot=/usr/share/wasi-sysroot let WASM_LDFLAGS = process.env.WASM_LDFLAGS || '' const WASM_LDLIBS = process.env.WASM_LDLIBS || '' +// For compatibility with Node.js' `configure --shared-builtin-undici/undici-path ...` +const EXTERNAL_PATH = process.env.EXTERNAL_PATH + // These are relevant for undici and should not be overridden WASM_CFLAGS += ' -Ofast -fno-exceptions -fvisibility=hidden -mexec-model=reactor' WASM_LDFLAGS += ' -Wl,-error-limit=0 -Wl,-O3 -Wl,--lto-O3 -Wl,--strip-all' @@ -73,6 +76,9 @@ if (process.argv[2] === '--docker') { const hasApk = (function () { try { execSync('command -v apk'); return true } catch (error) { return false } })() +const hasOptimizer = (function () { + try { execSync('./wasm-opt --version'); return true } catch (error) { return false } +})() if (hasApk) { // Gather information about the tools used for the build const buildInfo = execSync('apk info -v').toString() @@ -81,24 +87,38 @@ if (hasApk) { process.exit(-1) } console.log(buildInfo) +} - // Build wasm binary - execSync(`${WASM_CC} ${WASM_CFLAGS} ${WASM_LDFLAGS} \ - ${join(WASM_SRC, 'src')}/*.c \ - -I${join(WASM_SRC, 'include')} \ - -o ${join(WASM_OUT, 'llhttp.wasm')} \ - ${WASM_LDLIBS}`, { stdio: 'inherit' }) +// Build wasm binary +execSync(`${WASM_CC} ${WASM_CFLAGS} ${WASM_LDFLAGS} \ +${join(WASM_SRC, 'src')}/*.c \ +-I${join(WASM_SRC, 'include')} \ +-o ${join(WASM_OUT, 'llhttp.wasm')} \ +${WASM_LDLIBS}`, { stdio: 'inherit' }) +if (hasOptimizer) { execSync(`./wasm-opt ${WASM_OPT_FLAGS} -o ${join(WASM_OUT, 'llhttp.wasm')} ${join(WASM_OUT, 'llhttp.wasm')}`, { stdio: 'inherit' }) - writeWasmChunk('llhttp.wasm', 'llhttp-wasm.js') +} +writeWasmChunk('llhttp.wasm', 'llhttp-wasm.js') - // Build wasm simd binary - execSync(`${WASM_CC} ${WASM_CFLAGS} -msimd128 ${WASM_LDFLAGS} \ - ${join(WASM_SRC, 'src')}/*.c \ - -I${join(WASM_SRC, 'include')} \ - -o ${join(WASM_OUT, 'llhttp_simd.wasm')} \ - ${WASM_LDLIBS}`, { stdio: 'inherit' }) +// Build wasm simd binary +execSync(`${WASM_CC} ${WASM_CFLAGS} -msimd128 ${WASM_LDFLAGS} \ +${join(WASM_SRC, 'src')}/*.c \ +-I${join(WASM_SRC, 'include')} \ +-o ${join(WASM_OUT, 'llhttp_simd.wasm')} \ +${WASM_LDLIBS}`, { stdio: 'inherit' }) +if (hasOptimizer) { execSync(`./wasm-opt ${WASM_OPT_FLAGS} --enable-simd -o ${join(WASM_OUT, 'llhttp_simd.wasm')} ${join(WASM_OUT, 'llhttp_simd.wasm')}`, { stdio: 'inherit' }) - writeWasmChunk('llhttp_simd.wasm', 'llhttp_simd-wasm.js') +} +writeWasmChunk('llhttp_simd.wasm', 'llhttp_simd-wasm.js') + +// For compatibility with Node.js' `configure --shared-builtin-undici/undici-path ...` +if (EXTERNAL_PATH) { + writeFileSync(join(ROOT, 'loader.js'), ` +'use strict' +globalThis.__UNDICI_IS_NODE__ = true +module.exports = require('node:module').createRequire('${EXTERNAL_PATH}/loader.js')('./index-fetch.js') +delete globalThis.__UNDICI_IS_NODE__ +`) } From 8499c4bbdc9dcff3a5ce36fac41b4e9497814a16 Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Wed, 31 Jul 2024 12:32:26 +0200 Subject: [PATCH 06/57] Bumped v6.19.5 Signed-off-by: Matteo Collina --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 60679e07fa9..88aa14df839 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "undici", - "version": "6.19.4", + "version": "6.19.5", "description": "An HTTP/1.1 client, written from scratch for Node.js", "homepage": "https://undici.nodejs.org", "bugs": { From 638ee3235de2ce4c2096016e0c54ce66e4c24e5e Mon Sep 17 00:00:00 2001 From: Suneil Nyamathi Date: Thu, 8 Aug 2024 23:31:53 -0700 Subject: [PATCH 07/57] fix: memory leak in finalization first appearing in v6.16.0 (#3445) * fix: memory leak Holding a reference to the stream in the finalization registry causes a memory leak, even when consuming the body. * docs: add comment explaining the strong reference * typo --- lib/web/fetch/response.js | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/lib/web/fetch/response.js b/lib/web/fetch/response.js index 8c00835698e..603410a4a63 100644 --- a/lib/web/fetch/response.js +++ b/lib/web/fetch/response.js @@ -34,8 +34,9 @@ const hasFinalizationRegistry = globalThis.FinalizationRegistry && process.versi let registry if (hasFinalizationRegistry) { - registry = new FinalizationRegistry((stream) => { - if (!stream.locked && !isDisturbed(stream) && !isErrored(stream)) { + registry = new FinalizationRegistry((weakRef) => { + const stream = weakRef.deref() + if (stream && !stream.locked && !isDisturbed(stream) && !isErrored(stream)) { stream.cancel('Response object has been garbage collected').catch(noop) } }) @@ -526,7 +527,12 @@ function fromInnerResponse (innerResponse, guard) { setHeadersGuard(response[kHeaders], guard) if (hasFinalizationRegistry && innerResponse.body?.stream) { - registry.register(response, innerResponse.body.stream) + // If the target (response) is reclaimed, the cleanup callback may be called at some point with + // the held value provided for it (innerResponse.body.stream). The held value can be any value: + // a primitive or an object, even undefined. If the held value is an object, the registry keeps + // a strong reference to it (so it can pass it to the cleanup callback later). Reworded from + // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/FinalizationRegistry + registry.register(response, new WeakRef(innerResponse.body.stream)) } return response From b9bf7adfddeb9cd93a9433a0a1d38e39afa5dfb5 Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Fri, 9 Aug 2024 10:36:54 +0200 Subject: [PATCH 08/57] Bumped v6.19.6 Signed-off-by: Matteo Collina --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 88aa14df839..8dc767de3f7 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "undici", - "version": "6.19.5", + "version": "6.19.6", "description": "An HTTP/1.1 client, written from scratch for Node.js", "homepage": "https://undici.nodejs.org", "bugs": { From 09c566756b042798728b59ee4d4d2033d939e4f1 Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Fri, 9 Aug 2024 17:47:49 +0200 Subject: [PATCH 09/57] build: remove -i flag to docker run to allow command being run without a TTY Signed-off-by: Matteo Collina --- build/wasm.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build/wasm.js b/build/wasm.js index 0e86e11f195..2f65c0e1d74 100644 --- a/build/wasm.js +++ b/build/wasm.js @@ -62,7 +62,7 @@ if (process.argv[2] === '--prebuild') { } if (process.argv[2] === '--docker') { - let cmd = `docker run --rm -it --platform=${platform.toString().trim()}` + let cmd = `docker run --rm -t --platform=${platform.toString().trim()}` if (process.platform === 'linux') { cmd += ` --user ${process.getuid()}:${process.getegid()}` } From c81f5a72d653917da08eb7ccc99e332aed51768c Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Fri, 9 Aug 2024 17:48:30 +0200 Subject: [PATCH 10/57] Bumped v6.19.7 Signed-off-by: Matteo Collina --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 8dc767de3f7..86fbc7e26b3 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "undici", - "version": "6.19.6", + "version": "6.19.7", "description": "An HTTP/1.1 client, written from scratch for Node.js", "homepage": "https://undici.nodejs.org", "bugs": { From 52870545df66d9b391bb7200dfccf4490406bf46 Mon Sep 17 00:00:00 2001 From: Suneil Nyamathi Date: Wed, 14 Aug 2024 01:02:46 -0700 Subject: [PATCH 11/57] test: add test for memory leak (#3450) * test: add test for memory leak * lint --- test/fetch/response.js | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/test/fetch/response.js b/test/fetch/response.js index 912c24a40e3..86c57b13658 100644 --- a/test/fetch/response.js +++ b/test/fetch/response.js @@ -2,6 +2,8 @@ const { test } = require('node:test') const assert = require('node:assert') +const { setImmediate } = require('node:timers/promises') +const { AsyncLocalStorage } = require('node:async_hooks') const { tspl } = require('@matteo.collina/tspl') const { Response, @@ -285,3 +287,29 @@ test('fromInnerResponse', () => { assert.strictEqual(getHeadersList(response[kHeaders]), innerResponse.headersList) assert.strictEqual(getHeadersGuard(response[kHeaders]), 'immutable') }) + +test('clone body garbage collection', async () => { + const asyncLocalStorage = new AsyncLocalStorage() + let ref + + await new Promise(resolve => { + asyncLocalStorage.run(new Map(), async () => { + const res = new Response('hello world') + const clone = res.clone() + + asyncLocalStorage.getStore().set('key', clone) + ref = new WeakRef(clone.body) + + await res.text() + await clone.text() // consume body + + resolve() + }) + }) + + await setImmediate() + global.gc() + + const cloneBody = ref.deref() + assert.equal(cloneBody, undefined, 'clone body was not garbage collected') +}) From bc463323b41bf2cfa1855236e2579d0961025e10 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Stan=C4=9Bk?= Date: Wed, 14 Aug 2024 14:48:58 +0200 Subject: [PATCH 12/57] build: parametrize the location of wasm-opt (#3454) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The wasm-opt binary may be available in different place than the local directory (`./wasm-opt`) – for example, in /usr/bin/wasm-opt. Similarly to the other parametrized WASM build options, use WASM_OPT environment variable to identify the wanted binary, with fallback to the previous value. Even with the environment variable available, the `hasOptimizer` is kept to make the optimization optional. Signed-off-by: Jan Staněk --- CONTRIBUTING.md | 1 + build/wasm.js | 7 ++++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6b6c01d2264..68c9b977f1a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -159,6 +159,7 @@ an unbundled version instead of bundling one in `libnode.so`. To enable this, pass `EXTERNAL_PATH=/path/to/global/node_modules/undici` to `build/wasm.js`. Pass this path with `loader.js` appended to `--shared-builtin-undici/undici-path` in Node.js's `configure.py`. If building on a non-Alpine Linux distribution, you may need to also set the `WASM_CC`, `WASM_CFLAGS`, `WASM_LDFLAGS` and `WASM_LDLIBS` environment variables before running `build/wasm.js`. +Similarly, you can set the `WASM_OPT` environment variable to utilize your own `wasm-opt` optimizer. ### Benchmarks diff --git a/build/wasm.js b/build/wasm.js index 2f65c0e1d74..1880ce3dfe4 100644 --- a/build/wasm.js +++ b/build/wasm.js @@ -14,6 +14,7 @@ const WASM_CC = process.env.WASM_CC || 'clang' let WASM_CFLAGS = process.env.WASM_CFLAGS || '--sysroot=/usr/share/wasi-sysroot -target wasm32-unknown-wasi' let WASM_LDFLAGS = process.env.WASM_LDFLAGS || '' const WASM_LDLIBS = process.env.WASM_LDLIBS || '' +const WASM_OPT = process.env.WASM_OPT || './wasm-opt' // For compatibility with Node.js' `configure --shared-builtin-undici/undici-path ...` const EXTERNAL_PATH = process.env.EXTERNAL_PATH @@ -77,7 +78,7 @@ const hasApk = (function () { try { execSync('command -v apk'); return true } catch (error) { return false } })() const hasOptimizer = (function () { - try { execSync('./wasm-opt --version'); return true } catch (error) { return false } + try { execSync(`${WASM_OPT} --version`); return true } catch (error) { return false } })() if (hasApk) { // Gather information about the tools used for the build @@ -97,7 +98,7 @@ ${join(WASM_SRC, 'src')}/*.c \ ${WASM_LDLIBS}`, { stdio: 'inherit' }) if (hasOptimizer) { - execSync(`./wasm-opt ${WASM_OPT_FLAGS} -o ${join(WASM_OUT, 'llhttp.wasm')} ${join(WASM_OUT, 'llhttp.wasm')}`, { stdio: 'inherit' }) + execSync(`${WASM_OPT} ${WASM_OPT_FLAGS} -o ${join(WASM_OUT, 'llhttp.wasm')} ${join(WASM_OUT, 'llhttp.wasm')}`, { stdio: 'inherit' }) } writeWasmChunk('llhttp.wasm', 'llhttp-wasm.js') @@ -109,7 +110,7 @@ ${join(WASM_SRC, 'src')}/*.c \ ${WASM_LDLIBS}`, { stdio: 'inherit' }) if (hasOptimizer) { - execSync(`./wasm-opt ${WASM_OPT_FLAGS} --enable-simd -o ${join(WASM_OUT, 'llhttp_simd.wasm')} ${join(WASM_OUT, 'llhttp_simd.wasm')}`, { stdio: 'inherit' }) + execSync(`${WASM_OPT} ${WASM_OPT_FLAGS} --enable-simd -o ${join(WASM_OUT, 'llhttp_simd.wasm')} ${join(WASM_OUT, 'llhttp_simd.wasm')}`, { stdio: 'inherit' }) } writeWasmChunk('llhttp_simd.wasm', 'llhttp_simd-wasm.js') From 405a2ce3da0a5f7642b8b1df520d4bb26f91be87 Mon Sep 17 00:00:00 2001 From: Khafra Date: Thu, 15 Aug 2024 01:21:38 -0400 Subject: [PATCH 13/57] use bodyUnusable to check if body is unusable (#3460) --- lib/web/fetch/body.js | 9 ++++++--- lib/web/fetch/request.js | 6 +++--- lib/web/fetch/response.js | 4 ++-- 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/lib/web/fetch/body.js b/lib/web/fetch/body.js index 55718ac7c81..a8c996b50eb 100644 --- a/lib/web/fetch/body.js +++ b/lib/web/fetch/body.js @@ -414,7 +414,7 @@ async function consumeBody (object, convertBytesToJSValue, instance) { // 1. If object is unusable, then return a promise rejected // with a TypeError. - if (bodyUnusable(object[kState].body)) { + if (bodyUnusable(object)) { throw new TypeError('Body is unusable: Body has already been read') } @@ -454,7 +454,9 @@ async function consumeBody (object, convertBytesToJSValue, instance) { } // https://fetch.spec.whatwg.org/#body-unusable -function bodyUnusable (body) { +function bodyUnusable (object) { + const body = object[kState].body + // An object including the Body interface mixin is // said to be unusable if its body is non-null and // its body’s stream is disturbed or locked. @@ -496,5 +498,6 @@ module.exports = { extractBody, safelyExtractBody, cloneBody, - mixinBody + mixinBody, + bodyUnusable } diff --git a/lib/web/fetch/request.js b/lib/web/fetch/request.js index bc436aa9705..b63d12efd38 100644 --- a/lib/web/fetch/request.js +++ b/lib/web/fetch/request.js @@ -2,7 +2,7 @@ 'use strict' -const { extractBody, mixinBody, cloneBody } = require('./body') +const { extractBody, mixinBody, cloneBody, bodyUnusable } = require('./body') const { Headers, fill: fillHeaders, HeadersList, setHeadersGuard, getHeadersGuard, setHeadersList, getHeadersList } = require('./headers') const { FinalizationRegistry } = require('./dispatcher-weakref')() const util = require('../../core/util') @@ -557,7 +557,7 @@ class Request { // 40. If initBody is null and inputBody is non-null, then: if (initBody == null && inputBody != null) { // 1. If input is unusable, then throw a TypeError. - if (util.isDisturbed(inputBody.stream) || inputBody.stream.locked) { + if (bodyUnusable(input)) { throw new TypeError( 'Cannot construct a Request with a Request object that has already been used.' ) @@ -759,7 +759,7 @@ class Request { webidl.brandCheck(this, Request) // 1. If this is unusable, then throw a TypeError. - if (this.bodyUsed || this.body?.locked) { + if (bodyUnusable(this)) { throw new TypeError('unusable') } diff --git a/lib/web/fetch/response.js b/lib/web/fetch/response.js index 603410a4a63..9909066fd38 100644 --- a/lib/web/fetch/response.js +++ b/lib/web/fetch/response.js @@ -1,7 +1,7 @@ 'use strict' const { Headers, HeadersList, fill, getHeadersGuard, setHeadersGuard, setHeadersList } = require('./headers') -const { extractBody, cloneBody, mixinBody } = require('./body') +const { extractBody, cloneBody, mixinBody, bodyUnusable } = require('./body') const util = require('../../core/util') const nodeUtil = require('node:util') const { kEnumerableProperty } = util @@ -244,7 +244,7 @@ class Response { webidl.brandCheck(this, Response) // 1. If this is unusable, then throw a TypeError. - if (this.bodyUsed || this.body?.locked) { + if (bodyUnusable(this)) { throw webidl.errors.exception({ header: 'Response.clone', message: 'Body has already been consumed.' From dbb6b40e69b5e7863c95fb439b9a95c66ba3e9f4 Mon Sep 17 00:00:00 2001 From: Aras Abbasi Date: Thu, 15 Aug 2024 08:32:38 +0200 Subject: [PATCH 14/57] perf: non-recursive implementation of euclidian gcd in balanced pool (#3461) --- lib/dispatcher/balanced-pool.js | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/lib/dispatcher/balanced-pool.js b/lib/dispatcher/balanced-pool.js index 15a7e7b5879..1e2de289cb7 100644 --- a/lib/dispatcher/balanced-pool.js +++ b/lib/dispatcher/balanced-pool.js @@ -25,9 +25,23 @@ const kWeight = Symbol('kWeight') const kMaxWeightPerServer = Symbol('kMaxWeightPerServer') const kErrorPenalty = Symbol('kErrorPenalty') +/** + * Calculate the greatest common divisor of two numbers by + * using the Euclidean algorithm. + * + * @param {number} a + * @param {number} b + * @returns {number} + */ function getGreatestCommonDivisor (a, b) { - if (b === 0) return a - return getGreatestCommonDivisor(b, a % b) + if (a === 0) return b + + while (b !== 0) { + const t = b + b = a % b + a = t + } + return a } function defaultFactory (origin, opts) { @@ -105,7 +119,12 @@ class BalancedPool extends PoolBase { } _updateBalancedPoolStats () { - this[kGreatestCommonDivisor] = this[kClients].map(p => p[kWeight]).reduce(getGreatestCommonDivisor, 0) + let result = 0 + for (let i = 0; i < this[kClients].length; i++) { + result = getGreatestCommonDivisor(this[kClients][i][kWeight], result) + } + + this[kGreatestCommonDivisor] = result } removeUpstream (upstream) { From cde714f7f5b8469c5b598bb9d52455891a84386b Mon Sep 17 00:00:00 2001 From: Khafra Date: Thu, 15 Aug 2024 13:27:28 -0400 Subject: [PATCH 15/57] use FinalizationRegistry for cloned response body (#3458) Signed-off-by: Matteo Collina --- lib/web/fetch/body.js | 23 +++++++++++++++++++++-- lib/web/fetch/request.js | 2 +- lib/web/fetch/response.js | 21 +++------------------ test/fetch/fire-and-forget.js | 3 ++- 4 files changed, 27 insertions(+), 22 deletions(-) diff --git a/lib/web/fetch/body.js b/lib/web/fetch/body.js index a8c996b50eb..464e7b50e5c 100644 --- a/lib/web/fetch/body.js +++ b/lib/web/fetch/body.js @@ -16,12 +16,25 @@ const { kState } = require('./symbols') const { webidl } = require('./webidl') const { Blob } = require('node:buffer') const assert = require('node:assert') -const { isErrored } = require('../../core/util') +const { isErrored, isDisturbed } = require('node:stream') const { isArrayBuffer } = require('node:util/types') const { serializeAMimeType } = require('./data-url') const { multipartFormDataParser } = require('./formdata-parser') const textEncoder = new TextEncoder() +function noop () {} + +const hasFinalizationRegistry = globalThis.FinalizationRegistry && process.version.indexOf('v18') !== 0 +let streamRegistry + +if (hasFinalizationRegistry) { + streamRegistry = new FinalizationRegistry((weakRef) => { + const stream = weakRef.deref() + if (stream && !stream.locked && !isDisturbed(stream) && !isErrored(stream)) { + stream.cancel('Response object has been garbage collected').catch(noop) + } + }) +} // https://fetch.spec.whatwg.org/#concept-bodyinit-extract function extractBody (object, keepalive = false) { @@ -264,7 +277,7 @@ function safelyExtractBody (object, keepalive = false) { return extractBody(object, keepalive) } -function cloneBody (body) { +function cloneBody (instance, body) { // To clone a body body, run these steps: // https://fetch.spec.whatwg.org/#concept-body-clone @@ -272,6 +285,10 @@ function cloneBody (body) { // 1. Let « out1, out2 » be the result of teeing body’s stream. const [out1, out2] = body.stream.tee() + if (hasFinalizationRegistry) { + streamRegistry.register(instance, new WeakRef(out1)) + } + // 2. Set body’s stream to out1. body.stream = out1 @@ -499,5 +516,7 @@ module.exports = { safelyExtractBody, cloneBody, mixinBody, + streamRegistry, + hasFinalizationRegistry, bodyUnusable } diff --git a/lib/web/fetch/request.js b/lib/web/fetch/request.js index b63d12efd38..542ea7fb28a 100644 --- a/lib/web/fetch/request.js +++ b/lib/web/fetch/request.js @@ -877,7 +877,7 @@ function cloneRequest (request) { // 2. If request’s body is non-null, set newRequest’s body to the // result of cloning request’s body. if (request.body != null) { - newRequest.body = cloneBody(request.body) + newRequest.body = cloneBody(newRequest, request.body) } // 3. Return newRequest. diff --git a/lib/web/fetch/response.js b/lib/web/fetch/response.js index 9909066fd38..155dbadd1ad 100644 --- a/lib/web/fetch/response.js +++ b/lib/web/fetch/response.js @@ -1,7 +1,7 @@ 'use strict' const { Headers, HeadersList, fill, getHeadersGuard, setHeadersGuard, setHeadersList } = require('./headers') -const { extractBody, cloneBody, mixinBody, bodyUnusable } = require('./body') +const { extractBody, cloneBody, mixinBody, hasFinalizationRegistry, streamRegistry, bodyUnusable } = require('./body') const util = require('../../core/util') const nodeUtil = require('node:util') const { kEnumerableProperty } = util @@ -26,24 +26,9 @@ const { URLSerializer } = require('./data-url') const { kConstruct } = require('../../core/symbols') const assert = require('node:assert') const { types } = require('node:util') -const { isDisturbed, isErrored } = require('node:stream') const textEncoder = new TextEncoder('utf-8') -const hasFinalizationRegistry = globalThis.FinalizationRegistry && process.version.indexOf('v18') !== 0 -let registry - -if (hasFinalizationRegistry) { - registry = new FinalizationRegistry((weakRef) => { - const stream = weakRef.deref() - if (stream && !stream.locked && !isDisturbed(stream) && !isErrored(stream)) { - stream.cancel('Response object has been garbage collected').catch(noop) - } - }) -} - -function noop () {} - // https://fetch.spec.whatwg.org/#response-class class Response { // Creates network error Response. @@ -327,7 +312,7 @@ function cloneResponse (response) { // 3. If response’s body is non-null, then set newResponse’s body to the // result of cloning response’s body. if (response.body != null) { - newResponse.body = cloneBody(response.body) + newResponse.body = cloneBody(newResponse, response.body) } // 4. Return newResponse. @@ -532,7 +517,7 @@ function fromInnerResponse (innerResponse, guard) { // a primitive or an object, even undefined. If the held value is an object, the registry keeps // a strong reference to it (so it can pass it to the cleanup callback later). Reworded from // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/FinalizationRegistry - registry.register(response, new WeakRef(innerResponse.body.stream)) + streamRegistry.register(response, new WeakRef(innerResponse.body.stream)) } return response diff --git a/test/fetch/fire-and-forget.js b/test/fetch/fire-and-forget.js index d8090885eb8..d356729b14d 100644 --- a/test/fetch/fire-and-forget.js +++ b/test/fetch/fire-and-forget.js @@ -37,8 +37,9 @@ test('does not need the body to be consumed to continue', { timeout: 180_000, sk // eslint-disable-next-line no-undef gc(true) const array = new Array(batch) - for (let i = 0; i < batch; i++) { + for (let i = 0; i < batch; i += 2) { array[i] = fetch(url).catch(() => {}) + array[i + 1] = fetch(url).then(r => r.clone()).catch(() => {}) } await Promise.all(array) await sleep(delay) From 3d3ce0695c8c3f9a8f3c8af90dd42d0569d3f0bb Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Mon, 19 Aug 2024 19:18:39 +0200 Subject: [PATCH 16/57] Bumped v6.19.8 Signed-off-by: Matteo Collina --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 86fbc7e26b3..bf777b24a2b 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "undici", - "version": "6.19.7", + "version": "6.19.8", "description": "An HTTP/1.1 client, written from scratch for Node.js", "homepage": "https://undici.nodejs.org", "bugs": { From 8d28413b33019f80847faed1a37976e70dc6c78e Mon Sep 17 00:00:00 2001 From: eXhumer <62310242+eXhumer@users.noreply.github.com> Date: Mon, 2 Sep 2024 11:18:41 -0600 Subject: [PATCH 17/57] Remove patched dom types (`v6.x` branch) (#3531) --- package.json | 2 +- types/eventsource.d.ts | 2 -- types/filereader.d.ts | 2 +- types/patch.d.ts | 38 -------------------------------------- types/websocket.d.ts | 2 -- 5 files changed, 2 insertions(+), 44 deletions(-) diff --git a/package.json b/package.json index bf777b24a2b..3bfb6d148ac 100644 --- a/package.json +++ b/package.json @@ -105,7 +105,7 @@ "@fastify/busboy": "2.1.1", "@matteo.collina/tspl": "^0.1.1", "@sinonjs/fake-timers": "^11.1.0", - "@types/node": "^18.0.3", + "@types/node": "~18.17.19", "abort-controller": "^3.0.0", "borp": "^0.15.0", "c8": "^10.0.0", diff --git a/types/eventsource.d.ts b/types/eventsource.d.ts index eecda8c0151..deccd730041 100644 --- a/types/eventsource.d.ts +++ b/types/eventsource.d.ts @@ -2,8 +2,6 @@ import { MessageEvent, ErrorEvent } from './websocket' import Dispatcher from './dispatcher' import { - EventTarget, - Event, EventListenerOptions, AddEventListenerOptions, EventListenerOrEventListenerObject diff --git a/types/filereader.d.ts b/types/filereader.d.ts index f05d231b2ff..d1c0f9ef723 100644 --- a/types/filereader.d.ts +++ b/types/filereader.d.ts @@ -1,7 +1,7 @@ /// import { Blob } from 'buffer' -import { DOMException, Event, EventInit, EventTarget } from './patch' +import { DOMException, EventInit } from './patch' export declare class FileReader { __proto__: EventTarget & FileReader diff --git a/types/patch.d.ts b/types/patch.d.ts index 3871acfebc6..4ac38450e67 100644 --- a/types/patch.d.ts +++ b/types/patch.d.ts @@ -6,44 +6,6 @@ export type DOMException = typeof globalThis extends { DOMException: infer T } ? T : any -export type EventTarget = typeof globalThis extends { EventTarget: infer T } - ? T - : { - addEventListener( - type: string, - listener: any, - options?: any, - ): void - dispatchEvent(event: Event): boolean - removeEventListener( - type: string, - listener: any, - options?: any | boolean, - ): void - } - -export type Event = typeof globalThis extends { Event: infer T } - ? T - : { - readonly bubbles: boolean - cancelBubble: () => void - readonly cancelable: boolean - readonly composed: boolean - composedPath(): [EventTarget?] - readonly currentTarget: EventTarget | null - readonly defaultPrevented: boolean - readonly eventPhase: 0 | 2 - readonly isTrusted: boolean - preventDefault(): void - returnValue: boolean - readonly srcElement: EventTarget | null - stopImmediatePropagation(): void - stopPropagation(): void - readonly target: EventTarget | null - readonly timeStamp: number - readonly type: string - } - export interface EventInit { bubbles?: boolean cancelable?: boolean diff --git a/types/websocket.d.ts b/types/websocket.d.ts index d1be45235d4..dfdd8156cce 100644 --- a/types/websocket.d.ts +++ b/types/websocket.d.ts @@ -3,8 +3,6 @@ import type { Blob } from 'buffer' import type { MessagePort } from 'worker_threads' import { - EventTarget, - Event, EventInit, EventListenerOptions, AddEventListenerOptions, From be0c7793257accadf8db545e8b80310513c80a44 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 12 Sep 2024 22:26:51 +0200 Subject: [PATCH 18/57] docs(Backport v6.x): Fix signature of RetryHandler (#3594) Co-authored-by: Jean-Baptiste Richardet --- docs/docs/api/RetryHandler.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/docs/api/RetryHandler.md b/docs/docs/api/RetryHandler.md index 8988ee53010..0dd9f29533f 100644 --- a/docs/docs/api/RetryHandler.md +++ b/docs/docs/api/RetryHandler.md @@ -19,7 +19,7 @@ Extends: [`Dispatch.DispatchOptions`](Dispatcher.md#parameter-dispatchoptions). #### `RetryOptions` -- **retry** `(err: Error, context: RetryContext, callback: (err?: Error | null) => void) => void` (optional) - Function to be called after every retry. It should pass error if no more retries should be performed. +- **retry** `(err: Error, context: RetryContext, callback: (err?: Error | null) => void) => number | null` (optional) - Function to be called after every retry. It should pass error if no more retries should be performed. - **maxRetries** `number` (optional) - Maximum number of retries. Default: `5` - **maxTimeout** `number` (optional) - Maximum number of milliseconds to wait before retrying. Default: `30000` (30 seconds) - **minTimeout** `number` (optional) - Minimum number of milliseconds to wait before retrying. Default: `500` (half a second) From f93d857f8b944d730a0a8c8d8e54cfc7236be11b Mon Sep 17 00:00:00 2001 From: Carlos Fuentes Date: Wed, 18 Sep 2024 12:42:07 +0200 Subject: [PATCH 19/57] deps(dev): update @types/node (#3618) --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 3bfb6d148ac..516e3bec9b3 100644 --- a/package.json +++ b/package.json @@ -105,7 +105,7 @@ "@fastify/busboy": "2.1.1", "@matteo.collina/tspl": "^0.1.1", "@sinonjs/fake-timers": "^11.1.0", - "@types/node": "~18.17.19", + "@types/node": "~18.19.50", "abort-controller": "^3.0.0", "borp": "^0.15.0", "c8": "^10.0.0", From 993618468a1364d2305392334fbe2f1234edad58 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 18 Sep 2024 23:41:46 +0200 Subject: [PATCH 20/57] fix: throw on retry when payload is consume by downstream (#3596) Co-authored-by: KaKa <23028015+climba03003@users.noreply.github.com> Co-authored-by: Carlos Fuentes --- lib/handler/retry-handler.js | 14 +++++++-- test/issue-3356.js | 57 ++++++++++++++++++++++++++++++++++++ 2 files changed, 69 insertions(+), 2 deletions(-) create mode 100644 test/issue-3356.js diff --git a/lib/handler/retry-handler.js b/lib/handler/retry-handler.js index f7dedfa4bac..e00e82faf08 100644 --- a/lib/handler/retry-handler.js +++ b/lib/handler/retry-handler.js @@ -192,8 +192,18 @@ class RetryHandler { if (this.resume != null) { this.resume = null - if (statusCode !== 206) { - return true + // Only Partial Content 206 supposed to provide Content-Range, + // any other status code that partially consumed the payload + // should not be retry because it would result in downstream + // wrongly concatanete multiple responses. + if (statusCode !== 206 && (this.start > 0 || statusCode !== 200)) { + this.abort( + new RequestRetryError('server does not support the range header and the payload was partially consumed', statusCode, { + headers, + data: { count: this.retryCount } + }) + ) + return false } const contentRange = parseRangeHeader(headers['content-range']) diff --git a/test/issue-3356.js b/test/issue-3356.js new file mode 100644 index 00000000000..927208583a9 --- /dev/null +++ b/test/issue-3356.js @@ -0,0 +1,57 @@ +'use strict' + +const { tspl } = require('@matteo.collina/tspl') +const { test, after } = require('node:test') +const { createServer } = require('node:http') +const { once } = require('node:events') + +const { fetch, Agent, RetryAgent } = require('..') + +test('https://github.com/nodejs/undici/issues/3356', async (t) => { + t = tspl(t, { plan: 3 }) + + let shouldRetry = true + const server = createServer() + server.on('request', (req, res) => { + res.writeHead(200, { 'content-type': 'text/plain' }) + if (shouldRetry) { + shouldRetry = false + + res.flushHeaders() + res.write('h') + setTimeout(() => { res.end('ello world!') }, 100) + } else { + res.end('hello world!') + } + }) + + server.listen(0) + + await once(server, 'listening') + + after(async () => { + server.close() + + await once(server, 'close') + }) + + const agent = new RetryAgent(new Agent({ bodyTimeout: 50 }), { + errorCodes: ['UND_ERR_BODY_TIMEOUT'] + }) + + const response = await fetch(`http://localhost:${server.address().port}`, { + dispatcher: agent + }) + setTimeout(async () => { + try { + t.equal(response.status, 200) + // consume response + await response.text() + } catch (err) { + t.equal(err.name, 'TypeError') + t.equal(err.cause.code, 'UND_ERR_REQ_RETRY') + } + }, 200) + + await t.completed +}) From 52ae2f0a99b1d5290e570384d29448cb5f4726e4 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 18 Sep 2024 23:42:06 +0200 Subject: [PATCH 21/57] feat(Backport v6.x): move throwOnError to interceptor (#3595) Co-authored-by: Mert Can Altin Co-authored-by: Carlos Fuentes --- docs/docs/api/Dispatcher.md | 197 ++++++++++++++++++++++++++++ lib/core/errors.js | 13 ++ lib/interceptor/response-error.js | 86 ++++++++++++ test/interceptors/response-error.js | 67 ++++++++++ types/interceptors.d.ts | 4 +- 5 files changed, 366 insertions(+), 1 deletion(-) create mode 100644 lib/interceptor/response-error.js create mode 100644 test/interceptors/response-error.js diff --git a/docs/docs/api/Dispatcher.md b/docs/docs/api/Dispatcher.md index ecc3cfd61f7..574030bf686 100644 --- a/docs/docs/api/Dispatcher.md +++ b/docs/docs/api/Dispatcher.md @@ -984,6 +984,203 @@ client.dispatch( ); ``` +##### `Response Error Interceptor` + +**Introduction** + +The Response Error Interceptor is designed to handle HTTP response errors efficiently. It intercepts responses and throws detailed errors for responses with status codes indicating failure (4xx, 5xx). This interceptor enhances error handling by providing structured error information, including response headers, data, and status codes. + +**ResponseError Class** + +The `ResponseError` class extends the `UndiciError` class and encapsulates detailed error information. It captures the response status code, headers, and data, providing a structured way to handle errors. + +**Definition** + +```js +class ResponseError extends UndiciError { + constructor (message, code, { headers, data }) { + super(message); + this.name = 'ResponseError'; + this.message = message || 'Response error'; + this.code = 'UND_ERR_RESPONSE'; + this.statusCode = code; + this.data = data; + this.headers = headers; + } +} +``` + +**Interceptor Handler** + +The interceptor's handler class extends `DecoratorHandler` and overrides methods to capture response details and handle errors based on the response status code. + +**Methods** + +- **onConnect**: Initializes response properties. +- **onHeaders**: Captures headers and status code. Decodes body if content type is `application/json` or `text/plain`. +- **onData**: Appends chunks to the body if status code indicates an error. +- **onComplete**: Finalizes error handling, constructs a `ResponseError`, and invokes the `onError` method. +- **onError**: Propagates errors to the handler. + +**Definition** + +```js +class Handler extends DecoratorHandler { + // Private properties + #handler; + #statusCode; + #contentType; + #decoder; + #headers; + #body; + + constructor (opts, { handler }) { + super(handler); + this.#handler = handler; + } + + onConnect (abort) { + this.#statusCode = 0; + this.#contentType = null; + this.#decoder = null; + this.#headers = null; + this.#body = ''; + return this.#handler.onConnect(abort); + } + + onHeaders (statusCode, rawHeaders, resume, statusMessage, headers = parseHeaders(rawHeaders)) { + this.#statusCode = statusCode; + this.#headers = headers; + this.#contentType = headers['content-type']; + + if (this.#statusCode < 400) { + return this.#handler.onHeaders(statusCode, rawHeaders, resume, statusMessage, headers); + } + + if (this.#contentType === 'application/json' || this.#contentType === 'text/plain') { + this.#decoder = new TextDecoder('utf-8'); + } + } + + onData (chunk) { + if (this.#statusCode < 400) { + return this.#handler.onData(chunk); + } + this.#body += this.#decoder?.decode(chunk, { stream: true }) ?? ''; + } + + onComplete (rawTrailers) { + if (this.#statusCode >= 400) { + this.#body += this.#decoder?.decode(undefined, { stream: false }) ?? ''; + if (this.#contentType === 'application/json') { + try { + this.#body = JSON.parse(this.#body); + } catch { + // Do nothing... + } + } + + let err; + const stackTraceLimit = Error.stackTraceLimit; + Error.stackTraceLimit = 0; + try { + err = new ResponseError('Response Error', this.#statusCode, this.#headers, this.#body); + } finally { + Error.stackTraceLimit = stackTraceLimit; + } + + this.#handler.onError(err); + } else { + this.#handler.onComplete(rawTrailers); + } + } + + onError (err) { + this.#handler.onError(err); + } +} + +module.exports = (dispatch) => (opts, handler) => opts.throwOnError + ? dispatch(opts, new Handler(opts, { handler })) + : dispatch(opts, handler); +``` + +**Tests** + +Unit tests ensure the interceptor functions correctly, handling both error and non-error responses appropriately. + +**Example Tests** + +- **No Error if `throwOnError` is False**: + +```js +test('should not error if request is not meant to throw error', async (t) => { + const opts = { throwOnError: false }; + const handler = { onError: () => {}, onData: () => {}, onComplete: () => {} }; + const interceptor = createResponseErrorInterceptor((opts, handler) => handler.onComplete()); + assert.doesNotThrow(() => interceptor(opts, handler)); +}); +``` + +- **Error if Status Code is in Specified Error Codes**: + +```js +test('should error if request status code is in the specified error codes', async (t) => { + const opts = { throwOnError: true, statusCodes: [500] }; + const response = { statusCode: 500 }; + let capturedError; + const handler = { + onError: (err) => { capturedError = err; }, + onData: () => {}, + onComplete: () => {} + }; + + const interceptor = createResponseErrorInterceptor((opts, handler) => { + if (opts.throwOnError && opts.statusCodes.includes(response.statusCode)) { + handler.onError(new Error('Response Error')); + } else { + handler.onComplete(); + } + }); + + interceptor({ ...opts, response }, handler); + + await new Promise(resolve => setImmediate(resolve)); + + assert(capturedError, 'Expected error to be captured but it was not.'); + assert.strictEqual(capturedError.message, 'Response Error'); + assert.strictEqual(response.statusCode, 500); +}); +``` + +- **No Error if Status Code is Not in Specified Error Codes**: + +```js +test('should not error if request status code is not in the specified error codes', async (t) => { + const opts = { throwOnError: true, statusCodes: [500] }; + const response = { statusCode: 404 }; + const handler = { + onError: () => {}, + onData: () => {}, + onComplete: () => {} + }; + + const interceptor = createResponseErrorInterceptor((opts, handler) => { + if (opts.throwOnError && opts.statusCodes.includes(response.statusCode)) { + handler.onError(new Error('Response Error')); + } else { + handler.onComplete(); + } + }); + + assert.doesNotThrow(() => interceptor({ ...opts, response }, handler)); +}); +``` + +**Conclusion** + +The Response Error Interceptor provides a robust mechanism for handling HTTP response errors by capturing detailed error information and propagating it through a structured `ResponseError` class. This enhancement improves error handling and debugging capabilities in applications using the interceptor. + ## Instance Events ### Event: `'connect'` diff --git a/lib/core/errors.js b/lib/core/errors.js index 3d69fdbecba..9257875c1c3 100644 --- a/lib/core/errors.js +++ b/lib/core/errors.js @@ -195,6 +195,18 @@ class RequestRetryError extends UndiciError { } } +class ResponseError extends UndiciError { + constructor (message, code, { headers, data }) { + super(message) + this.name = 'ResponseError' + this.message = message || 'Response error' + this.code = 'UND_ERR_RESPONSE' + this.statusCode = code + this.data = data + this.headers = headers + } +} + class SecureProxyConnectionError extends UndiciError { constructor (cause, message, options) { super(message, { cause, ...(options ?? {}) }) @@ -227,5 +239,6 @@ module.exports = { BalancedPoolMissingUpstreamError, ResponseExceededMaxSizeError, RequestRetryError, + ResponseError, SecureProxyConnectionError } diff --git a/lib/interceptor/response-error.js b/lib/interceptor/response-error.js new file mode 100644 index 00000000000..3ded9c87fb7 --- /dev/null +++ b/lib/interceptor/response-error.js @@ -0,0 +1,86 @@ +'use strict' + +const { parseHeaders } = require('../core/util') +const DecoratorHandler = require('../handler/decorator-handler') +const { ResponseError } = require('../core/errors') + +class Handler extends DecoratorHandler { + #handler + #statusCode + #contentType + #decoder + #headers + #body + + constructor (opts, { handler }) { + super(handler) + this.#handler = handler + } + + onConnect (abort) { + this.#statusCode = 0 + this.#contentType = null + this.#decoder = null + this.#headers = null + this.#body = '' + + return this.#handler.onConnect(abort) + } + + onHeaders (statusCode, rawHeaders, resume, statusMessage, headers = parseHeaders(rawHeaders)) { + this.#statusCode = statusCode + this.#headers = headers + this.#contentType = headers['content-type'] + + if (this.#statusCode < 400) { + return this.#handler.onHeaders(statusCode, rawHeaders, resume, statusMessage, headers) + } + + if (this.#contentType === 'application/json' || this.#contentType === 'text/plain') { + this.#decoder = new TextDecoder('utf-8') + } + } + + onData (chunk) { + if (this.#statusCode < 400) { + return this.#handler.onData(chunk) + } + + this.#body += this.#decoder?.decode(chunk, { stream: true }) ?? '' + } + + onComplete (rawTrailers) { + if (this.#statusCode >= 400) { + this.#body += this.#decoder?.decode(undefined, { stream: false }) ?? '' + + if (this.#contentType === 'application/json') { + try { + this.#body = JSON.parse(this.#body) + } catch { + // Do nothing... + } + } + + let err + const stackTraceLimit = Error.stackTraceLimit + Error.stackTraceLimit = 0 + try { + err = new ResponseError('Response Error', this.#statusCode, this.#headers, this.#body) + } finally { + Error.stackTraceLimit = stackTraceLimit + } + + this.#handler.onError(err) + } else { + this.#handler.onComplete(rawTrailers) + } + } + + onError (err) { + this.#handler.onError(err) + } +} + +module.exports = (dispatch) => (opts, handler) => opts.throwOnError + ? dispatch(opts, new Handler(opts, { handler })) + : dispatch(opts, handler) diff --git a/test/interceptors/response-error.js b/test/interceptors/response-error.js new file mode 100644 index 00000000000..afd9c00a500 --- /dev/null +++ b/test/interceptors/response-error.js @@ -0,0 +1,67 @@ +'use strict' + +const assert = require('assert') +const { test } = require('node:test') +const createResponseErrorInterceptor = require('../../lib/interceptor/response-error') + +test('should not error if request is not meant to throw error', async (t) => { + const opts = { throwOnError: false } + const handler = { + onError: () => {}, + onData: () => {}, + onComplete: () => {} + } + + const interceptor = createResponseErrorInterceptor((opts, handler) => handler.onComplete()) + + assert.doesNotThrow(() => interceptor(opts, handler)) +}) + +test('should error if request status code is in the specified error codes', async (t) => { + const opts = { throwOnError: true, statusCodes: [500] } + const response = { statusCode: 500 } + let capturedError + const handler = { + onError: (err) => { + capturedError = err + }, + onData: () => {}, + onComplete: () => {} + } + + const interceptor = createResponseErrorInterceptor((opts, handler) => { + if (opts.throwOnError && opts.statusCodes.includes(response.statusCode)) { + handler.onError(new Error('Response Error')) + } else { + handler.onComplete() + } + }) + + interceptor({ ...opts, response }, handler) + + await new Promise(resolve => setImmediate(resolve)) + + assert(capturedError, 'Expected error to be captured but it was not.') + assert.strictEqual(capturedError.message, 'Response Error') + assert.strictEqual(response.statusCode, 500) +}) + +test('should not error if request status code is not in the specified error codes', async (t) => { + const opts = { throwOnError: true, statusCodes: [500] } + const response = { statusCode: 404 } + const handler = { + onError: () => {}, + onData: () => {}, + onComplete: () => {} + } + + const interceptor = createResponseErrorInterceptor((opts, handler) => { + if (opts.throwOnError && opts.statusCodes.includes(response.statusCode)) { + handler.onError(new Error('Response Error')) + } else { + handler.onComplete() + } + }) + + assert.doesNotThrow(() => interceptor({ ...opts, response }, handler)) +}) diff --git a/types/interceptors.d.ts b/types/interceptors.d.ts index fab6da08c0d..24166b61f4f 100644 --- a/types/interceptors.d.ts +++ b/types/interceptors.d.ts @@ -7,9 +7,11 @@ declare namespace Interceptors { export type DumpInterceptorOpts = { maxSize?: number } export type RetryInterceptorOpts = RetryHandler.RetryOptions export type RedirectInterceptorOpts = { maxRedirections?: number } - + export type ResponseErrorInterceptorOpts = { throwOnError: boolean } + export function createRedirectInterceptor(opts: RedirectInterceptorOpts): Dispatcher.DispatcherComposeInterceptor export function dump(opts?: DumpInterceptorOpts): Dispatcher.DispatcherComposeInterceptor export function retry(opts?: RetryInterceptorOpts): Dispatcher.DispatcherComposeInterceptor export function redirect(opts?: RedirectInterceptorOpts): Dispatcher.DispatcherComposeInterceptor + export function responseError(opts?: ResponseErrorInterceptorOpts): Dispatcher.DispatcherComposeInterceptor } From 746667272220dca19c272511f65b1c9a1d7b8511 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 3 Oct 2024 22:16:21 +0200 Subject: [PATCH 22/57] fix: avoid memoryleak in client-h1 (#3510) (#3672) (cherry picked from commit b7254574e54d135d41baa5193f89007e78a3c710) Co-authored-by: Aras Abbasi --- lib/dispatcher/client-h1.js | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/dispatcher/client-h1.js b/lib/dispatcher/client-h1.js index 2f1c96724d3..1b0995a74c9 100644 --- a/lib/dispatcher/client-h1.js +++ b/lib/dispatcher/client-h1.js @@ -169,7 +169,7 @@ class Parser { if (value !== this.timeoutValue) { timers.clearTimeout(this.timeout) if (value) { - this.timeout = timers.setTimeout(onParserTimeout, value, this) + this.timeout = timers.setTimeout(onParserTimeout, value, new WeakRef(this)) // istanbul ignore else: only for jest if (this.timeout.unref) { this.timeout.unref() @@ -613,16 +613,16 @@ class Parser { } function onParserTimeout (parser) { - const { socket, timeoutType, client } = parser + const { socket, timeoutType, client, paused } = parser.deref() /* istanbul ignore else */ if (timeoutType === TIMEOUT_HEADERS) { if (!socket[kWriting] || socket.writableNeedDrain || client[kRunning] > 1) { - assert(!parser.paused, 'cannot be paused while waiting for headers') + assert(!paused, 'cannot be paused while waiting for headers') util.destroy(socket, new HeadersTimeoutError()) } } else if (timeoutType === TIMEOUT_BODY) { - if (!parser.paused) { + if (!paused) { util.destroy(socket, new BodyTimeoutError()) } } else if (timeoutType === TIMEOUT_IDLE) { From 6764626346aecbc74202d0b9bff061ff3f944013 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 4 Oct 2024 09:17:48 +0200 Subject: [PATCH 23/57] fix: refactor fast timers, fix UND_ERR_CONNECT_TIMEOUT on event loop blocking (#3495) (#3673) * fix: refactor fast timers, fix UND_ERR_CONNECT_TIMEOUT on event loop blocking * also test native timers * improve the commentary of fasttimers * revert changes in client-h1.js * make fasttimers independent from performance.now * less flaky? (cherry picked from commit 8a09d77037024b80e4efc6df53bf8cd1d36b14eb) Co-authored-by: Aras Abbasi --- benchmarks/timers/compare-timer-getters.mjs | 18 + lib/core/connect.js | 63 ++-- lib/dispatcher/client-h1.js | 14 +- lib/util/timers.js | 368 +++++++++++++++++--- test/issue-3410.js | 88 +++++ test/socket-timeout.js | 7 - test/timers.js | 198 +++++++++++ test/util.js | 78 ----- test/utils/event-loop-blocker.js | 10 + test/utils/hello-world-server.js | 30 ++ 10 files changed, 714 insertions(+), 160 deletions(-) create mode 100644 benchmarks/timers/compare-timer-getters.mjs create mode 100644 test/issue-3410.js create mode 100644 test/timers.js create mode 100644 test/utils/event-loop-blocker.js create mode 100644 test/utils/hello-world-server.js diff --git a/benchmarks/timers/compare-timer-getters.mjs b/benchmarks/timers/compare-timer-getters.mjs new file mode 100644 index 00000000000..aadff558f1f --- /dev/null +++ b/benchmarks/timers/compare-timer-getters.mjs @@ -0,0 +1,18 @@ +import { bench, group, run } from 'mitata' + +group('timers', () => { + bench('Date.now()', () => { + Date.now() + }) + bench('performance.now()', () => { + performance.now() + }) + bench('Math.trunc(performance.now())', () => { + Math.trunc(performance.now()) + }) + bench('process.uptime()', () => { + process.uptime() + }) +}) + +await run() diff --git a/lib/core/connect.js b/lib/core/connect.js index b388f022298..e40fb90d81c 100644 --- a/lib/core/connect.js +++ b/lib/core/connect.js @@ -4,6 +4,7 @@ const net = require('node:net') const assert = require('node:assert') const util = require('./util') const { InvalidArgumentError, ConnectTimeoutError } = require('./errors') +const timers = require('../util/timers') let tls // include tls conditionally since it is not always available @@ -130,12 +131,12 @@ function buildConnector ({ allowH2, maxCachedSessions, socketPath, timeout, sess socket.setKeepAlive(true, keepAliveInitialDelay) } - const cancelTimeout = setupTimeout(() => onConnectTimeout(socket), timeout) + const cancelConnectTimeout = setupConnectTimeout(new WeakRef(socket), timeout) socket .setNoDelay(true) .once(protocol === 'https:' ? 'secureConnect' : 'connect', function () { - cancelTimeout() + cancelConnectTimeout() if (callback) { const cb = callback @@ -144,7 +145,7 @@ function buildConnector ({ allowH2, maxCachedSessions, socketPath, timeout, sess } }) .on('error', function (err) { - cancelTimeout() + cancelConnectTimeout() if (callback) { const cb = callback @@ -157,30 +158,44 @@ function buildConnector ({ allowH2, maxCachedSessions, socketPath, timeout, sess } } -function setupTimeout (onConnectTimeout, timeout) { - if (!timeout) { - return () => {} - } +const setupConnectTimeout = process.platform === 'win32' + ? (socket, timeout) => { + if (!timeout) { + return () => { } + } - let s1 = null - let s2 = null - const timeoutId = setTimeout(() => { - // setImmediate is added to make sure that we prioritize socket error events over timeouts - s1 = setImmediate(() => { - if (process.platform === 'win32') { + let s1 = null + let s2 = null + const timer = timers.setTimeout(() => { + // setImmediate is added to make sure that we prioritize socket error events over timeouts + s1 = setImmediate(() => { // Windows needs an extra setImmediate probably due to implementation differences in the socket logic - s2 = setImmediate(() => onConnectTimeout()) - } else { - onConnectTimeout() + s2 = setImmediate(() => onConnectTimeout(socket.deref())) + }) + }, timeout) + return () => { + timers.clearTimeout(timer) + clearImmediate(s1) + clearImmediate(s2) } - }) - }, timeout) - return () => { - clearTimeout(timeoutId) - clearImmediate(s1) - clearImmediate(s2) - } -} + } + : (socket, timeout) => { + if (!timeout) { + return () => { } + } + + let s1 = null + const timer = timers.setTimeout(() => { + // setImmediate is added to make sure that we prioritize socket error events over timeouts + s1 = setImmediate(() => { + onConnectTimeout(socket.deref()) + }) + }, timeout) + return () => { + timers.clearTimeout(timer) + clearImmediate(s1) + } + } function onConnectTimeout (socket) { let message = 'Connect Timeout Error' diff --git a/lib/dispatcher/client-h1.js b/lib/dispatcher/client-h1.js index 1b0995a74c9..db5fd6c3746 100644 --- a/lib/dispatcher/client-h1.js +++ b/lib/dispatcher/client-h1.js @@ -164,12 +164,12 @@ class Parser { this.maxResponseSize = client[kMaxResponseSize] } - setTimeout (value, type) { + setTimeout (delay, type) { this.timeoutType = type - if (value !== this.timeoutValue) { - timers.clearTimeout(this.timeout) - if (value) { - this.timeout = timers.setTimeout(onParserTimeout, value, new WeakRef(this)) + if (delay !== this.timeoutValue) { + this.timeout && timers.clearTimeout(this.timeout) + if (delay) { + this.timeout = timers.setTimeout(onParserTimeout, delay, new WeakRef(this)) // istanbul ignore else: only for jest if (this.timeout.unref) { this.timeout.unref() @@ -177,7 +177,7 @@ class Parser { } else { this.timeout = null } - this.timeoutValue = value + this.timeoutValue = delay } else if (this.timeout) { // istanbul ignore else: only for jest if (this.timeout.refresh) { @@ -288,7 +288,7 @@ class Parser { this.llhttp.llhttp_free(this.ptr) this.ptr = null - timers.clearTimeout(this.timeout) + this.timeout && timers.clearTimeout(this.timeout) this.timeout = null this.timeoutValue = null this.timeoutType = null diff --git a/lib/util/timers.js b/lib/util/timers.js index d0091cc15f7..8fa3ac56b7b 100644 --- a/lib/util/timers.js +++ b/lib/util/timers.js @@ -1,99 +1,379 @@ 'use strict' -const TICK_MS = 499 +/** + * This module offers an optimized timer implementation designed for scenarios + * where high precision is not critical. + * + * The timer achieves faster performance by using a low-resolution approach, + * with an accuracy target of within 500ms. This makes it particularly useful + * for timers with delays of 1 second or more, where exact timing is less + * crucial. + * + * It's important to note that Node.js timers are inherently imprecise, as + * delays can occur due to the event loop being blocked by other operations. + * Consequently, timers may trigger later than their scheduled time. + */ -let fastNow = Date.now() +const nativeSetTimeout = global.setTimeout +const nativeClearTimeout = global.clearTimeout + +/** + * The fastNow variable contains the internal fast timer clock value. + * + * @type {number} + */ +let fastNow = 0 + +/** + * RESOLUTION_MS represents the target resolution time in milliseconds. + * + * @type {number} + * @default 1000 + */ +const RESOLUTION_MS = 1e3 + +/** + * TICK_MS defines the desired interval in milliseconds between each tick. + * The target value is set to half the resolution time, minus 1 ms, to account + * for potential event loop overhead. + * + * @type {number} + * @default 499 + */ +const TICK_MS = (RESOLUTION_MS >> 1) - 1 + +/** + * fastNowTimeout is a Node.js timer used to manage and process + * the FastTimers stored in the `fastTimers` array. + * + * @type {NodeJS.Timeout} + */ let fastNowTimeout +/** + * The kFastTimer symbol is used to identify FastTimer instances. + * + * @type {Symbol} + */ +const kFastTimer = Symbol('kFastTimer') + +/** + * The fastTimers array contains all active FastTimers. + * + * @type {FastTimer[]} + */ const fastTimers = [] -function onTimeout () { - fastNow = Date.now() +/** + * These constants represent the various states of a FastTimer. + */ - let len = fastTimers.length +/** + * The `NOT_IN_LIST` constant indicates that the FastTimer is not included + * in the `fastTimers` array. Timers with this status will not be processed + * during the next tick by the `onTick` function. + * + * A FastTimer can be re-added to the `fastTimers` array by invoking the + * `refresh` method on the FastTimer instance. + * + * @type {-2} + */ +const NOT_IN_LIST = -2 + +/** + * The `TO_BE_CLEARED` constant indicates that the FastTimer is scheduled + * for removal from the `fastTimers` array. A FastTimer in this state will + * be removed in the next tick by the `onTick` function and will no longer + * be processed. + * + * This status is also set when the `clear` method is called on the FastTimer instance. + * + * @type {-1} + */ +const TO_BE_CLEARED = -1 + +/** + * The `PENDING` constant signifies that the FastTimer is awaiting processing + * in the next tick by the `onTick` function. Timers with this status will have + * their `_idleStart` value set and their status updated to `ACTIVE` in the next tick. + * + * @type {0} + */ +const PENDING = 0 + +/** + * The `ACTIVE` constant indicates that the FastTimer is active and waiting + * for its timer to expire. During the next tick, the `onTick` function will + * check if the timer has expired, and if so, it will execute the associated callback. + * + * @type {1} + */ +const ACTIVE = 1 + +/** + * The onTick function processes the fastTimers array. + * + * @returns {void} + */ +function onTick () { + /** + * Increment the fastNow value by the TICK_MS value, despite the actual time + * that has passed since the last tick. This approach ensures independence + * from the system clock and delays caused by a blocked event loop. + * + * @type {number} + */ + fastNow += TICK_MS + + /** + * The `idx` variable is used to iterate over the `fastTimers` array. + * Expired timers are removed by replacing them with the last element in the array. + * Consequently, `idx` is only incremented when the current element is not removed. + * + * @type {number} + */ let idx = 0 + + /** + * The len variable will contain the length of the fastTimers array + * and will be decremented when a FastTimer should be removed from the + * fastTimers array. + * + * @type {number} + */ + let len = fastTimers.length + while (idx < len) { + /** + * @type {FastTimer} + */ const timer = fastTimers[idx] - if (timer.state === 0) { - timer.state = fastNow + timer.delay - TICK_MS - } else if (timer.state > 0 && fastNow >= timer.state) { - timer.state = -1 - timer.callback(timer.opaque) + // If the timer is in the ACTIVE state and the timer has expired, it will + // be processed in the next tick. + if (timer._state === PENDING) { + // Set the _idleStart value to the fastNow value minus the TICK_MS value + // to account for the time the timer was in the PENDING state. + timer._idleStart = fastNow - TICK_MS + timer._state = ACTIVE + } else if ( + timer._state === ACTIVE && + fastNow >= timer._idleStart + timer._idleTimeout + ) { + timer._state = TO_BE_CLEARED + timer._idleStart = -1 + timer._onTimeout(timer._timerArg) } - if (timer.state === -1) { - timer.state = -2 - if (idx !== len - 1) { - fastTimers[idx] = fastTimers.pop() - } else { - fastTimers.pop() + if (timer._state === TO_BE_CLEARED) { + timer._state = NOT_IN_LIST + + // Move the last element to the current index and decrement len if it is + // not the only element in the array. + if (--len !== 0) { + fastTimers[idx] = fastTimers[len] } - len -= 1 } else { - idx += 1 + ++idx } } - if (fastTimers.length > 0) { + // Set the length of the fastTimers array to the new length and thus + // removing the excess FastTimers elements from the array. + fastTimers.length = len + + // If there are still active FastTimers in the array, refresh the Timer. + // If there are no active FastTimers, the timer will be refreshed again + // when a new FastTimer is instantiated. + if (fastTimers.length !== 0) { refreshTimeout() } } function refreshTimeout () { - if (fastNowTimeout?.refresh) { + // If the fastNowTimeout is already set, refresh it. + if (fastNowTimeout) { fastNowTimeout.refresh() + // fastNowTimeout is not instantiated yet, create a new Timer. } else { clearTimeout(fastNowTimeout) - fastNowTimeout = setTimeout(onTimeout, TICK_MS) + fastNowTimeout = setTimeout(onTick, TICK_MS) + + // If the Timer has an unref method, call it to allow the process to exit if + // there are no other active handles. if (fastNowTimeout.unref) { fastNowTimeout.unref() } } } -class Timeout { - constructor (callback, delay, opaque) { - this.callback = callback - this.delay = delay - this.opaque = opaque +/** + * The `FastTimer` class is a data structure designed to store and manage + * timer information. + */ +class FastTimer { + [kFastTimer] = true + + /** + * The state of the timer, which can be one of the following: + * - NOT_IN_LIST (-2) + * - TO_BE_CLEARED (-1) + * - PENDING (0) + * - ACTIVE (1) + * + * @type {-2|-1|0|1} + * @private + */ + _state = NOT_IN_LIST + + /** + * The number of milliseconds to wait before calling the callback. + * + * @type {number} + * @private + */ + _idleTimeout = -1 + + /** + * The time in milliseconds when the timer was started. This value is used to + * calculate when the timer should expire. + * + * @type {number} + * @default -1 + * @private + */ + _idleStart = -1 + + /** + * The function to be executed when the timer expires. + * @type {Function} + * @private + */ + _onTimeout + + /** + * The argument to be passed to the callback when the timer expires. + * + * @type {*} + * @private + */ + _timerArg - // -2 not in timer list - // -1 in timer list but inactive - // 0 in timer list waiting for time - // > 0 in timer list waiting for time to expire - this.state = -2 + /** + * @constructor + * @param {Function} callback A function to be executed after the timer + * expires. + * @param {number} delay The time, in milliseconds that the timer should wait + * before the specified function or code is executed. + * @param {*} arg + */ + constructor (callback, delay, arg) { + this._onTimeout = callback + this._idleTimeout = delay + this._timerArg = arg this.refresh() } + /** + * Sets the timer's start time to the current time, and reschedules the timer + * to call its callback at the previously specified duration adjusted to the + * current time. + * Using this on a timer that has already called its callback will reactivate + * the timer. + * + * @returns {void} + */ refresh () { - if (this.state === -2) { + // In the special case that the timer is not in the list of active timers, + // add it back to the array to be processed in the next tick by the onTick + // function. + if (this._state === NOT_IN_LIST) { fastTimers.push(this) - if (!fastNowTimeout || fastTimers.length === 1) { - refreshTimeout() - } } - this.state = 0 + // If the timer is the only active timer, refresh the fastNowTimeout for + // better resolution. + if (!fastNowTimeout || fastTimers.length === 1) { + refreshTimeout() + } + + // Setting the state to PENDING will cause the timer to be reset in the + // next tick by the onTick function. + this._state = PENDING } + /** + * The `clear` method cancels the timer, preventing it from executing. + * + * @returns {void} + * @private + */ clear () { - this.state = -1 + // Set the state to TO_BE_CLEARED to mark the timer for removal in the next + // tick by the onTick function. + this._state = TO_BE_CLEARED + + // Reset the _idleStart value to -1 to indicate that the timer is no longer + // active. + this._idleStart = -1 } } +/** + * This module exports a setTimeout and clearTimeout function that can be + * used as a drop-in replacement for the native functions. + */ module.exports = { - setTimeout (callback, delay, opaque) { - return delay <= 1e3 - ? setTimeout(callback, delay, opaque) - : new Timeout(callback, delay, opaque) + /** + * The setTimeout() method sets a timer which executes a function once the + * timer expires. + * @param {Function} callback A function to be executed after the timer + * expires. + * @param {number} delay The time, in milliseconds that the timer should + * wait before the specified function or code is executed. + * @param {*} [arg] An optional argument to be passed to the callback function + * when the timer expires. + * @returns {NodeJS.Timeout|FastTimer} + */ + setTimeout (callback, delay, arg) { + // If the delay is less than or equal to the RESOLUTION_MS value return a + // native Node.js Timer instance. + return delay <= RESOLUTION_MS + ? nativeSetTimeout(callback, delay, arg) + : new FastTimer(callback, delay, arg) }, + /** + * The clearTimeout method cancels an instantiated Timer previously created + * by calling setTimeout. + * + * @param {FastTimer} timeout + */ clearTimeout (timeout) { - if (timeout instanceof Timeout) { + // If the timeout is a FastTimer, call its own clear method. + if (timeout[kFastTimer]) { + /** + * @type {FastTimer} + */ timeout.clear() + // Otherwise it is an instance of a native NodeJS.Timeout, so call the + // Node.js native clearTimeout function. } else { - clearTimeout(timeout) + nativeClearTimeout(timeout) } - } + }, + /** + * The now method returns the value of the internal fast timer clock. + * + * @returns {number} + */ + now () { + return fastNow + }, + /** + * Exporting for testing purposes only. + * Marking as deprecated to discourage any use outside of testing. + * @deprecated + */ + kFastTimer } diff --git a/test/issue-3410.js b/test/issue-3410.js new file mode 100644 index 00000000000..d5bb8093ef2 --- /dev/null +++ b/test/issue-3410.js @@ -0,0 +1,88 @@ +'use strict' + +const { tspl } = require('@matteo.collina/tspl') +const { fork } = require('node:child_process') +const { resolve: pathResolve } = require('node:path') +const { describe, test } = require('node:test') +const { Agent, fetch, setGlobalDispatcher } = require('..') +const { eventLoopBlocker } = require('./utils/event-loop-blocker') + +describe('https://github.com/nodejs/undici/issues/3410', () => { + test('FastTimers', async (t) => { + t = tspl(t, { plan: 1 }) + + // Spawn a server in a new process to avoid effects from the blocking event loop + const { + serverProcess, + address + } = await new Promise((resolve, reject) => { + const childProcess = fork( + pathResolve(__dirname, './utils/hello-world-server.js'), + [], + { windowsHide: true } + ) + + childProcess.on('message', (address) => { + resolve({ + serverProcess: childProcess, + address + }) + }) + childProcess.on('error', err => { + reject(err) + }) + }) + + const connectTimeout = 2000 + setGlobalDispatcher(new Agent({ connectTimeout })) + + const fetchPromise = fetch(address) + + eventLoopBlocker(3000) + + const response = await fetchPromise + + t.equal(await response.text(), 'Hello World') + + serverProcess.kill('SIGKILL') + }) + + test('native Timers', async (t) => { + t = tspl(t, { plan: 1 }) + + // Spawn a server in a new process to avoid effects from the blocking event loop + const { + serverProcess, + address + } = await new Promise((resolve, reject) => { + const childProcess = fork( + pathResolve(__dirname, './utils/hello-world-server.js'), + [], + { windowsHide: true } + ) + + childProcess.on('message', (address) => { + resolve({ + serverProcess: childProcess, + address + }) + }) + childProcess.on('error', err => { + reject(err) + }) + }) + + const connectTimeout = 900 + setGlobalDispatcher(new Agent({ connectTimeout })) + + const fetchPromise = fetch(address) + + eventLoopBlocker(1500) + + const response = await fetchPromise + + t.equal(await response.text(), 'Hello World') + + serverProcess.kill('SIGKILL') + }) +}) diff --git a/test/socket-timeout.js b/test/socket-timeout.js index 5be62fe4119..43ee1576615 100644 --- a/test/socket-timeout.js +++ b/test/socket-timeout.js @@ -3,7 +3,6 @@ const { tspl } = require('@matteo.collina/tspl') const { test, after } = require('node:test') const { Client, errors } = require('..') -const timers = require('../lib/util/timers') const { createServer } = require('node:http') const FakeTimers = require('@sinonjs/fake-timers') @@ -68,12 +67,6 @@ test('Disable socket timeout', async (t) => { const clock = FakeTimers.install() after(clock.uninstall.bind(clock)) - const orgTimers = { ...timers } - Object.assign(timers, { setTimeout, clearTimeout }) - after(() => { - Object.assign(timers, orgTimers) - }) - server.once('request', (req, res) => { setTimeout(() => { res.end('hello') diff --git a/test/timers.js b/test/timers.js new file mode 100644 index 00000000000..621e1aa74bb --- /dev/null +++ b/test/timers.js @@ -0,0 +1,198 @@ +'use strict' + +const { tspl } = require('@matteo.collina/tspl') +const { describe, test } = require('node:test') + +const timers = require('../lib/util/timers') +const { eventLoopBlocker } = require('./utils/event-loop-blocker') + +describe('timers', () => { + test('timers exports a clearTimeout', (t) => { + t = tspl(t, { plan: 1 }) + + t.ok(typeof timers.clearTimeout === 'function') + }) + + test('timers exports a setTimeout', (t) => { + t = tspl(t, { plan: 1 }) + + t.ok(typeof timers.setTimeout === 'function') + }) + + test('setTimeout instantiates a native NodeJS.Timeout when delay is lower or equal 1e3 ms', (t) => { + t = tspl(t, { plan: 2 }) + + t.strictEqual(timers.setTimeout(() => { }, 999)[timers.kFastTimer], undefined) + t.strictEqual(timers.setTimeout(() => { }, 1e3)[timers.kFastTimer], undefined) + }) + + test('setTimeout instantiates a FastTimer when delay is smaller than 1e3 ms', (t) => { + t = tspl(t, { plan: 1 }) + + const timeout = timers.setTimeout(() => { }, 1001) + t.strictEqual(timeout[timers.kFastTimer], true) + }) + + test('clearTimeout can clear a node native Timeout', (t) => { + t = tspl(t, { plan: 3 }) + + const nativeTimeoutId = setTimeout(() => { }, 1e6) + t.equal(nativeTimeoutId._idleTimeout, 1e6) + t.ok(timers.clearTimeout(nativeTimeoutId) === undefined) + t.equal(nativeTimeoutId._idleTimeout, -1) + }) + + test('a FastTimer will get a _idleStart value after short time', async (t) => { + t = tspl(t, { plan: 3 }) + + const timer = timers.setTimeout(() => { + t.fail('timer should not have fired') + }, 1e4) + + t.strictEqual(timer[timers.kFastTimer], true) + t.strictEqual(timer._idleStart, -1) + await new Promise((resolve) => setTimeout(resolve, 750)) + t.notStrictEqual(timer._idleStart, -1) + + timers.clearTimeout(timer) + }) + + test('a cleared FastTimer will reset the _idleStart value to -1', async (t) => { + t = tspl(t, { plan: 4 }) + + const timer = timers.setTimeout(() => { + t.fail('timer should not have fired') + }, 1e4) + + t.strictEqual(timer[timers.kFastTimer], true) + t.strictEqual(timer._idleStart, -1) + await new Promise((resolve) => setTimeout(resolve, 750)) + t.notStrictEqual(timer._idleStart, -1) + timers.clearTimeout(timer) + t.strictEqual(timer._idleStart, -1) + }) + + test('a FastTimer can be cleared', async (t) => { + t = tspl(t, { plan: 3 }) + + const timer = timers.setTimeout(() => { + t.fail('timer should not have fired') + }, 1001) + + t.strictEqual(timer[timers.kFastTimer], true) + timers.clearTimeout(timer) + + t.strictEqual(timer._idleStart, -1) + await new Promise((resolve) => setTimeout(resolve, 750)) + t.strictEqual(timer._idleStart, -1) + }) + + test('a cleared FastTimer can be refreshed', async (t) => { + t = tspl(t, { plan: 2 }) + + const timer = timers.setTimeout(() => { + t.ok('pass') + }, 1001) + + t.strictEqual(timer[timers.kFastTimer], true) + timers.clearTimeout(timer) + timer.refresh() + await new Promise((resolve) => setTimeout(resolve, 2000)) + timers.clearTimeout(timer) + }) + + const getDelta = (start, target) => { + const end = process.hrtime.bigint() + const actual = (end - start) / 1_000_000n + return actual - BigInt(target) + } + + // timers.setTimeout implements a low resolution timer with a 500 ms granularity + // It is expected that in the worst case, a timer will fire about 500 ms after the + // intended amount of time, an extra 200 ms is added to account event loop overhead + // Timers should never fire excessively early, 1ms early is tolerated + const ACCEPTABLE_DELTA = 700n + + test('meet acceptable resolution time', async (t) => { + const testTimeouts = [0, 1, 499, 500, 501, 990, 999, 1000, 1001, 1100, 1400, 1499, 1500, 4000, 5000] + + t = tspl(t, { plan: 1 + testTimeouts.length * 2 }) + + const start = process.hrtime.bigint() + + for (const target of testTimeouts) { + timers.setTimeout(() => { + const delta = getDelta(start, target) + + t.ok(delta >= -1n, `${target}ms fired early`) + t.ok(delta < ACCEPTABLE_DELTA, `${target}ms fired late, got difference of ${delta}ms`) + }, target) + } + + setTimeout(() => t.ok(true), 6000) + await t.completed + }) + + test('refresh correctly with timeout < TICK_MS', async (t) => { + t = tspl(t, { plan: 3 }) + + const start = process.hrtime.bigint() + + const timeout = timers.setTimeout(() => { + // 400 ms timer was refreshed after 600ms; total target is 1000 + const delta = getDelta(start, 1000) + + t.ok(delta >= -1n, 'refreshed timer fired early') + t.ok(delta < ACCEPTABLE_DELTA, 'refreshed timer fired late') + }, 400) + + setTimeout(() => timeout.refresh(), 200) + setTimeout(() => timeout.refresh(), 400) + setTimeout(() => timeout.refresh(), 600) + + setTimeout(() => t.ok(true), 1500) + await t.completed + }) + + test('refresh correctly with timeout > TICK_MS', async (t) => { + t = tspl(t, { plan: 3 }) + + const start = process.hrtime.bigint() + + const timeout = timers.setTimeout(() => { + // 501ms timer was refreshed after 1250ms; total target is 1751 + const delta = getDelta(start, 1751) + + t.ok(delta >= -1n, 'refreshed timer fired early') + t.ok(delta < ACCEPTABLE_DELTA, 'refreshed timer fired late') + }, 501) + + setTimeout(() => timeout.refresh(), 250) + setTimeout(() => timeout.refresh(), 750) + setTimeout(() => timeout.refresh(), 1250) + + setTimeout(() => t.ok(true), 3000) + await t.completed + }) + + test('a FastTimer will only increment by the defined TICK_MS value', async (t) => { + t = tspl(t, { plan: 2 }) + + const startInternalClock = timers.now() + + // The long running FastTimer will ensure that the internal clock is + // incremented by the TICK_MS value in the onTick function + const longRunningFastTimer = timers.setTimeout(() => {}, 1e10) + + eventLoopBlocker(1000) + + // wait to ensure the timer has fired in the next loop + await new Promise((resolve) => setTimeout(resolve, 1)) + + t.strictEqual(timers.now() - startInternalClock, 499) + await new Promise((resolve) => setTimeout(resolve, 1000)) + t.ok(timers.now() - startInternalClock <= 1497) + + timers.clearTimeout(longRunningFastTimer) + }) +}) diff --git a/test/util.js b/test/util.js index f646ec4ff25..b3e1193e506 100644 --- a/test/util.js +++ b/test/util.js @@ -1,12 +1,10 @@ 'use strict' -const { tspl } = require('@matteo.collina/tspl') const { strictEqual, throws, doesNotThrow } = require('node:assert') const { test, describe } = require('node:test') const { isBlobLike, parseURL, isHttpOrHttpsPrefixed, isValidPort } = require('../lib/core/util') const { Blob, File } = require('node:buffer') const { InvalidArgumentError } = require('../lib/core/errors') -const timers = require('../lib/util/timers') describe('isBlobLike', () => { test('buffer', () => { @@ -255,79 +253,3 @@ describe('parseURL', () => { }) }) }) - -describe('timers', () => { - const getDelta = (start, target) => { - const end = process.hrtime.bigint() - const actual = (end - start) / 1_000_000n - return actual - BigInt(target) - } - - // timers.setTimeout implements a low resolution timer with a 500 ms granularity - // It is expected that in the worst case, a timer will fire about 500 ms after the - // intended amount of time, an extra 200 ms is added to account event loop overhead - // Timers should never fire excessively early, 1ms early is tolerated - const ACCEPTABLE_DELTA = 700n - - test('meet acceptable resolution time', async (t) => { - const testTimeouts = [0, 1, 499, 500, 501, 990, 999, 1000, 1001, 1100, 1400, 1499, 1500, 4000, 5000] - - t = tspl(t, { plan: 1 + testTimeouts.length * 2 }) - - const start = process.hrtime.bigint() - - for (const target of testTimeouts) { - timers.setTimeout(() => { - const delta = getDelta(start, target) - - t.ok(delta >= -1n, `${target}ms fired early`) - t.ok(delta < ACCEPTABLE_DELTA, `${target}ms fired late`) - }, target) - } - - setTimeout(() => t.ok(true), 6000) - await t.completed - }) - - test('refresh correctly with timeout < TICK_MS', async (t) => { - t = tspl(t, { plan: 3 }) - - const start = process.hrtime.bigint() - - const timeout = timers.setTimeout(() => { - // 400 ms timer was refreshed after 600ms; total target is 1000 - const delta = getDelta(start, 1000) - - t.ok(delta >= -1n, 'refreshed timer fired early') - t.ok(delta < ACCEPTABLE_DELTA, 'refreshed timer fired late') - }, 400) - - setTimeout(() => timeout.refresh(), 200) - setTimeout(() => timeout.refresh(), 400) - setTimeout(() => timeout.refresh(), 600) - - setTimeout(() => t.ok(true), 1500) - await t.completed - }) - - test('refresh correctly with timeout > TICK_MS', async (t) => { - t = tspl(t, { plan: 3 }) - - const start = process.hrtime.bigint() - - const timeout = timers.setTimeout(() => { - // 501ms timer was refreshed after 1250ms; total target is 1751 - const delta = getDelta(start, 1751) - - t.ok(delta >= -1n, 'refreshed timer fired early') - t.ok(delta < ACCEPTABLE_DELTA, 'refreshed timer fired late') - }, 501) - - setTimeout(() => timeout.refresh(), 250) - setTimeout(() => timeout.refresh(), 750) - setTimeout(() => timeout.refresh(), 1250) - - setTimeout(() => t.ok(true), 3000) - await t.completed - }) -}) diff --git a/test/utils/event-loop-blocker.js b/test/utils/event-loop-blocker.js new file mode 100644 index 00000000000..9c2ec5075f0 --- /dev/null +++ b/test/utils/event-loop-blocker.js @@ -0,0 +1,10 @@ +'use strict' + +function eventLoopBlocker (ms) { + const nil = new Int32Array(new SharedArrayBuffer(4)) + Atomics.wait(nil, 0, 0, ms) +} + +module.exports = { + eventLoopBlocker +} diff --git a/test/utils/hello-world-server.js b/test/utils/hello-world-server.js new file mode 100644 index 00000000000..520ed3a734d --- /dev/null +++ b/test/utils/hello-world-server.js @@ -0,0 +1,30 @@ +'use strict' + +const { createServer } = require('node:http') +const hostname = '127.0.0.1' + +const server = createServer(async (req, res) => { + res.statusCode = 200 + res.setHeader('Content-Type', 'text/plain') + + await sendInDelayedChunks(res, 'Hello World', 125) + res.end() +}) + +async function sendInDelayedChunks (res, payload, delay) { + const chunks = payload.split('') + + for (const chunk of chunks) { + await new Promise(resolve => setTimeout(resolve, delay)) + + res.write(chunk) + } +} + +server.listen(0, hostname, () => { + if (process.send) { + process.send(`http://${hostname}:${server.address().port}/`) + } else { + console.log(`http://${hostname}:${server.address().port}/`) + } +}) From a7bffd4339d1310bec0d86603efbb2a8297bcffd Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 4 Oct 2024 13:38:05 +0200 Subject: [PATCH 24/57] fix: run asserts first if possible (#3541) (#3674) (cherry picked from commit 3faf1400c401e19807cebc058a050ea985711776) Co-authored-by: Aras Abbasi --- lib/api/api-upgrade.js | 4 ++-- lib/core/connect.js | 4 ++-- lib/core/util.js | 2 +- lib/dispatcher/client-h1.js | 41 ++++++++++++++++++------------------- lib/dispatcher/client.js | 4 ++++ 5 files changed, 29 insertions(+), 26 deletions(-) diff --git a/lib/api/api-upgrade.js b/lib/api/api-upgrade.js index 019fe1efa2d..7effcf21049 100644 --- a/lib/api/api-upgrade.js +++ b/lib/api/api-upgrade.js @@ -50,9 +50,9 @@ class UpgradeHandler extends AsyncResource { } onUpgrade (statusCode, rawHeaders, socket) { - const { callback, opaque, context } = this + assert(statusCode === 101) - assert.strictEqual(statusCode, 101) + const { callback, opaque, context } = this removeSignal(this) diff --git a/lib/core/connect.js b/lib/core/connect.js index e40fb90d81c..50578534ae8 100644 --- a/lib/core/connect.js +++ b/lib/core/connect.js @@ -92,10 +92,10 @@ function buildConnector ({ allowH2, maxCachedSessions, socketPath, timeout, sess servername = servername || options.servername || util.getServerName(host) || null const sessionKey = servername || hostname - const session = customSession || sessionCache.get(sessionKey) || null - assert(sessionKey) + const session = customSession || sessionCache.get(sessionKey) || null + socket = tls.connect({ highWaterMark: 16384, // TLS in node can't have bigger HWM anyway... ...options, diff --git a/lib/core/util.js b/lib/core/util.js index 00f8a9b200a..9ee7ec23c52 100644 --- a/lib/core/util.js +++ b/lib/core/util.js @@ -233,7 +233,7 @@ function getServerName (host) { return null } - assert.strictEqual(typeof host, 'string') + assert(typeof host === 'string') const servername = getHostname(host) if (net.isIP(servername)) { diff --git a/lib/dispatcher/client-h1.js b/lib/dispatcher/client-h1.js index db5fd6c3746..4a26f641283 100644 --- a/lib/dispatcher/client-h1.js +++ b/lib/dispatcher/client-h1.js @@ -85,35 +85,35 @@ async function lazyllhttp () { return 0 }, wasm_on_status: (p, at, len) => { - assert.strictEqual(currentParser.ptr, p) + assert(currentParser.ptr === p) const start = at - currentBufferPtr + currentBufferRef.byteOffset return currentParser.onStatus(new FastBuffer(currentBufferRef.buffer, start, len)) || 0 }, wasm_on_message_begin: (p) => { - assert.strictEqual(currentParser.ptr, p) + assert(currentParser.ptr === p) return currentParser.onMessageBegin() || 0 }, wasm_on_header_field: (p, at, len) => { - assert.strictEqual(currentParser.ptr, p) + assert(currentParser.ptr === p) const start = at - currentBufferPtr + currentBufferRef.byteOffset return currentParser.onHeaderField(new FastBuffer(currentBufferRef.buffer, start, len)) || 0 }, wasm_on_header_value: (p, at, len) => { - assert.strictEqual(currentParser.ptr, p) + assert(currentParser.ptr === p) const start = at - currentBufferPtr + currentBufferRef.byteOffset return currentParser.onHeaderValue(new FastBuffer(currentBufferRef.buffer, start, len)) || 0 }, wasm_on_headers_complete: (p, statusCode, upgrade, shouldKeepAlive) => { - assert.strictEqual(currentParser.ptr, p) + assert(currentParser.ptr === p) return currentParser.onHeadersComplete(statusCode, Boolean(upgrade), Boolean(shouldKeepAlive)) || 0 }, wasm_on_body: (p, at, len) => { - assert.strictEqual(currentParser.ptr, p) + assert(currentParser.ptr === p) const start = at - currentBufferPtr + currentBufferRef.byteOffset return currentParser.onBody(new FastBuffer(currentBufferRef.buffer, start, len)) || 0 }, wasm_on_message_complete: (p) => { - assert.strictEqual(currentParser.ptr, p) + assert(currentParser.ptr === p) return currentParser.onMessageComplete() || 0 } @@ -363,20 +363,19 @@ class Parser { const { upgrade, client, socket, headers, statusCode } = this assert(upgrade) + assert(client[kSocket] === socket) + assert(!socket.destroyed) + assert(!this.paused) + assert((headers.length & 1) === 0) const request = client[kQueue][client[kRunningIdx]] assert(request) - - assert(!socket.destroyed) - assert(socket === client[kSocket]) - assert(!this.paused) assert(request.upgrade || request.method === 'CONNECT') this.statusCode = null this.statusText = '' this.shouldKeepAlive = null - assert(this.headers.length % 2 === 0) this.headers = [] this.headersSize = 0 @@ -433,7 +432,7 @@ class Parser { return -1 } - assert.strictEqual(this.timeoutType, TIMEOUT_HEADERS) + assert(this.timeoutType === TIMEOUT_HEADERS) this.statusCode = statusCode this.shouldKeepAlive = ( @@ -466,7 +465,7 @@ class Parser { return 2 } - assert(this.headers.length % 2 === 0) + assert((this.headers.length & 1) === 0) this.headers = [] this.headersSize = 0 @@ -523,7 +522,7 @@ class Parser { const request = client[kQueue][client[kRunningIdx]] assert(request) - assert.strictEqual(this.timeoutType, TIMEOUT_BODY) + assert(this.timeoutType === TIMEOUT_BODY) if (this.timeout) { // istanbul ignore else: only for jest if (this.timeout.refresh) { @@ -556,11 +555,12 @@ class Parser { return } + assert(statusCode >= 100) + assert((this.headers.length & 1) === 0) + const request = client[kQueue][client[kRunningIdx]] assert(request) - assert(statusCode >= 100) - this.statusCode = null this.statusText = '' this.bytesRead = 0 @@ -568,7 +568,6 @@ class Parser { this.keepAlive = '' this.connection = '' - assert(this.headers.length % 2 === 0) this.headers = [] this.headersSize = 0 @@ -587,7 +586,7 @@ class Parser { client[kQueue][client[kRunningIdx]++] = null if (socket[kWriting]) { - assert.strictEqual(client[kRunning], 0) + assert(client[kRunning] === 0) // Response completed before request. util.destroy(socket, new InformationalError('reset')) return constants.ERROR.PAUSED @@ -646,10 +645,10 @@ async function connectH1 (client, socket) { socket[kParser] = new Parser(client, socket, llhttpInstance) addListener(socket, 'error', function (err) { - const parser = this[kParser] - assert(err.code !== 'ERR_TLS_CERT_ALTNAME_INVALID') + const parser = this[kParser] + // On Mac OS, we get an ECONNRESET even if there is a full body to be forwarded // to the user. if (err.code === 'ECONNRESET' && parser.statusCode && !parser.shouldKeepAlive) { diff --git a/lib/dispatcher/client.js b/lib/dispatcher/client.js index cb61206b1ed..7e22aa000ba 100644 --- a/lib/dispatcher/client.js +++ b/lib/dispatcher/client.js @@ -385,6 +385,10 @@ function onError (client, err) { } } +/** + * @param {Client} client + * @returns + */ async function connect (client) { assert(!client[kConnecting]) assert(!client[kHTTPContext]) From d047e9907e9f6ca45714f5a02471200cce111143 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 4 Oct 2024 18:43:54 +0200 Subject: [PATCH 25/57] fix: use fasttimers for all connection timeouts (#3552) (#3675) * fix: use fasttimers for all connection timeouts * Apply suggestions from code review * activate some tests * also use fastTimers in connect.js * fix tests * fix tests * fix tests * fix: use native timeouts for TIMEOUT_IDLE, rename TIMEOUT_IDLE to TIMEOUT_KEEP_ALIVE (#3554) * fix: use native timeouts for TIMEOUT_IDLE, rename TIMEOUT_IDLE to TIMEOUT_KEEP_ALIVE * ensure that fast timers and native timers are set properly * . * . * rename import * more informative connection error * ignore request-timeout binary file, rename clearAll to reset * fix * add test * use queueMicrotask earlier in the socket callbacks (cherry picked from commit dca0aa0998cbdef28916b23d6300beb2fd979140) Co-authored-by: Aras Abbasi --- .gitignore | 3 + .npmignore | 3 + lib/core/connect.js | 69 +++++++++++----- lib/dispatcher/client-h1.js | 49 ++++++++---- lib/util/timers.js | 49 +++++++++++- test/connect-timeout.js | 11 ++- test/issue-3356.js | 5 +- test/request-timeout.js | 155 ++++++++++-------------------------- 8 files changed, 191 insertions(+), 153 deletions(-) diff --git a/.gitignore b/.gitignore index 60aa663c838..7cba7df889f 100644 --- a/.gitignore +++ b/.gitignore @@ -84,3 +84,6 @@ undici-fetch.js .npmrc .tap + +# File generated by /test/request-timeout.js +test/request-timeout.10mb.bin diff --git a/.npmignore b/.npmignore index 879c6669f03..461d334ef36 100644 --- a/.npmignore +++ b/.npmignore @@ -11,3 +11,6 @@ lib/llhttp/llhttp.wasm !index.d.ts !docs/docs/**/* !scripts/strip-comments.js + +# File generated by /test/request-timeout.js +test/request-timeout.10mb.bin diff --git a/lib/core/connect.js b/lib/core/connect.js index 50578534ae8..8ab21fcd5fc 100644 --- a/lib/core/connect.js +++ b/lib/core/connect.js @@ -6,6 +6,8 @@ const util = require('./util') const { InvalidArgumentError, ConnectTimeoutError } = require('./errors') const timers = require('../util/timers') +function noop () {} + let tls // include tls conditionally since it is not always available // TODO: session re-use does not wait for the first @@ -96,6 +98,8 @@ function buildConnector ({ allowH2, maxCachedSessions, socketPath, timeout, sess const session = customSession || sessionCache.get(sessionKey) || null + port = port || 443 + socket = tls.connect({ highWaterMark: 16384, // TLS in node can't have bigger HWM anyway... ...options, @@ -105,7 +109,7 @@ function buildConnector ({ allowH2, maxCachedSessions, socketPath, timeout, sess // TODO(HTTP/2): Add support for h2c ALPNProtocols: allowH2 ? ['http/1.1', 'h2'] : ['http/1.1'], socket: httpSocket, // upgrade socket connection - port: port || 443, + port, host: hostname }) @@ -116,11 +120,14 @@ function buildConnector ({ allowH2, maxCachedSessions, socketPath, timeout, sess }) } else { assert(!httpSocket, 'httpSocket can only be sent on TLS update') + + port = port || 80 + socket = net.connect({ highWaterMark: 64 * 1024, // Same as nodejs fs streams. ...options, localAddress, - port: port || 80, + port, host: hostname }) } @@ -131,12 +138,12 @@ function buildConnector ({ allowH2, maxCachedSessions, socketPath, timeout, sess socket.setKeepAlive(true, keepAliveInitialDelay) } - const cancelConnectTimeout = setupConnectTimeout(new WeakRef(socket), timeout) + const clearConnectTimeout = setupConnectTimeout(new WeakRef(socket), { timeout, hostname, port }) socket .setNoDelay(true) .once(protocol === 'https:' ? 'secureConnect' : 'connect', function () { - cancelConnectTimeout() + queueMicrotask(clearConnectTimeout) if (callback) { const cb = callback @@ -145,7 +152,7 @@ function buildConnector ({ allowH2, maxCachedSessions, socketPath, timeout, sess } }) .on('error', function (err) { - cancelConnectTimeout() + queueMicrotask(clearConnectTimeout) if (callback) { const cb = callback @@ -158,50 +165,70 @@ function buildConnector ({ allowH2, maxCachedSessions, socketPath, timeout, sess } } +/** + * @param {WeakRef} socketWeakRef + * @param {object} opts + * @param {number} opts.timeout + * @param {string} opts.hostname + * @param {number} opts.port + * @returns {() => void} + */ const setupConnectTimeout = process.platform === 'win32' - ? (socket, timeout) => { - if (!timeout) { - return () => { } + ? (socketWeakRef, opts) => { + if (!opts.timeout) { + return noop } let s1 = null let s2 = null - const timer = timers.setTimeout(() => { + const fastTimer = timers.setFastTimeout(() => { // setImmediate is added to make sure that we prioritize socket error events over timeouts s1 = setImmediate(() => { // Windows needs an extra setImmediate probably due to implementation differences in the socket logic - s2 = setImmediate(() => onConnectTimeout(socket.deref())) + s2 = setImmediate(() => onConnectTimeout(socketWeakRef.deref(), opts)) }) - }, timeout) + }, opts.timeout) return () => { - timers.clearTimeout(timer) + timers.clearFastTimeout(fastTimer) clearImmediate(s1) clearImmediate(s2) } } - : (socket, timeout) => { - if (!timeout) { - return () => { } + : (socketWeakRef, opts) => { + if (!opts.timeout) { + return noop } let s1 = null - const timer = timers.setTimeout(() => { + const fastTimer = timers.setFastTimeout(() => { // setImmediate is added to make sure that we prioritize socket error events over timeouts s1 = setImmediate(() => { - onConnectTimeout(socket.deref()) + onConnectTimeout(socketWeakRef.deref(), opts) }) - }, timeout) + }, opts.timeout) return () => { - timers.clearTimeout(timer) + timers.clearFastTimeout(fastTimer) clearImmediate(s1) } } -function onConnectTimeout (socket) { +/** + * @param {net.Socket} socket + * @param {object} opts + * @param {number} opts.timeout + * @param {string} opts.hostname + * @param {number} opts.port + */ +function onConnectTimeout (socket, opts) { let message = 'Connect Timeout Error' if (Array.isArray(socket.autoSelectFamilyAttemptedAddresses)) { - message += ` (attempted addresses: ${socket.autoSelectFamilyAttemptedAddresses.join(', ')})` + message += ` (attempted addresses: ${socket.autoSelectFamilyAttemptedAddresses.join(', ')},` + } else { + message += ` (attempted address: ${opts.hostname}:${opts.port},` } + + message += ` timeout: ${opts.timeout}ms)` + util.destroy(socket, new ConnectTimeoutError(message)) } diff --git a/lib/dispatcher/client-h1.js b/lib/dispatcher/client-h1.js index 4a26f641283..40628e53d4c 100644 --- a/lib/dispatcher/client-h1.js +++ b/lib/dispatcher/client-h1.js @@ -131,9 +131,17 @@ let currentBufferRef = null let currentBufferSize = 0 let currentBufferPtr = null -const TIMEOUT_HEADERS = 1 -const TIMEOUT_BODY = 2 -const TIMEOUT_IDLE = 3 +const USE_NATIVE_TIMER = 0 +const USE_FAST_TIMER = 1 + +// Use fast timers for headers and body to take eventual event loop +// latency into account. +const TIMEOUT_HEADERS = 2 | USE_FAST_TIMER +const TIMEOUT_BODY = 4 | USE_FAST_TIMER + +// Use native timers to ignore event loop latency for keep-alive +// handling. +const TIMEOUT_KEEP_ALIVE = 8 | USE_NATIVE_TIMER class Parser { constructor (client, socket, { exports }) { @@ -165,18 +173,29 @@ class Parser { } setTimeout (delay, type) { - this.timeoutType = type - if (delay !== this.timeoutValue) { - this.timeout && timers.clearTimeout(this.timeout) + // If the existing timer and the new timer are of different timer type + // (fast or native) or have different delay, we need to clear the existing + // timer and set a new one. + if ( + delay !== this.timeoutValue || + (type & USE_FAST_TIMER) ^ (this.timeoutType & USE_FAST_TIMER) + ) { + // If a timeout is already set, clear it with clearTimeout of the fast + // timer implementation, as it can clear fast and native timers. + if (this.timeout) { + timers.clearTimeout(this.timeout) + this.timeout = null + } + if (delay) { - this.timeout = timers.setTimeout(onParserTimeout, delay, new WeakRef(this)) - // istanbul ignore else: only for jest - if (this.timeout.unref) { + if (type & USE_FAST_TIMER) { + this.timeout = timers.setFastTimeout(onParserTimeout, delay, new WeakRef(this)) + } else { + this.timeout = setTimeout(onParserTimeout, delay, new WeakRef(this)) this.timeout.unref() } - } else { - this.timeout = null } + this.timeoutValue = delay } else if (this.timeout) { // istanbul ignore else: only for jest @@ -184,6 +203,8 @@ class Parser { this.timeout.refresh() } } + + this.timeoutType = type } resume () { @@ -624,7 +645,7 @@ function onParserTimeout (parser) { if (!paused) { util.destroy(socket, new BodyTimeoutError()) } - } else if (timeoutType === TIMEOUT_IDLE) { + } else if (timeoutType === TIMEOUT_KEEP_ALIVE) { assert(client[kRunning] === 0 && client[kKeepAliveTimeoutValue]) util.destroy(socket, new InformationalError('socket idle timeout')) } @@ -802,8 +823,8 @@ function resumeH1 (client) { } if (client[kSize] === 0) { - if (socket[kParser].timeoutType !== TIMEOUT_IDLE) { - socket[kParser].setTimeout(client[kKeepAliveTimeoutValue], TIMEOUT_IDLE) + if (socket[kParser].timeoutType !== TIMEOUT_KEEP_ALIVE) { + socket[kParser].setTimeout(client[kKeepAliveTimeoutValue], TIMEOUT_KEEP_ALIVE) } } else if (client[kRunning] > 0 && socket[kParser].statusCode < 200) { if (socket[kParser].timeoutType !== TIMEOUT_HEADERS) { diff --git a/lib/util/timers.js b/lib/util/timers.js index 8fa3ac56b7b..36b6bbf985e 100644 --- a/lib/util/timers.js +++ b/lib/util/timers.js @@ -347,7 +347,7 @@ module.exports = { * The clearTimeout method cancels an instantiated Timer previously created * by calling setTimeout. * - * @param {FastTimer} timeout + * @param {NodeJS.Timeout|FastTimer} timeout */ clearTimeout (timeout) { // If the timeout is a FastTimer, call its own clear method. @@ -362,6 +362,29 @@ module.exports = { nativeClearTimeout(timeout) } }, + /** + * The setFastTimeout() method sets a fastTimer which executes a function once + * the timer expires. + * @param {Function} callback A function to be executed after the timer + * expires. + * @param {number} delay The time, in milliseconds that the timer should + * wait before the specified function or code is executed. + * @param {*} [arg] An optional argument to be passed to the callback function + * when the timer expires. + * @returns {FastTimer} + */ + setFastTimeout (callback, delay, arg) { + return new FastTimer(callback, delay, arg) + }, + /** + * The clearTimeout method cancels an instantiated FastTimer previously + * created by calling setFastTimeout. + * + * @param {FastTimer} timeout + */ + clearFastTimeout (timeout) { + timeout.clear() + }, /** * The now method returns the value of the internal fast timer clock. * @@ -370,6 +393,30 @@ module.exports = { now () { return fastNow }, + /** + * Trigger the onTick function to process the fastTimers array. + * Exported for testing purposes only. + * Marking as deprecated to discourage any use outside of testing. + * @deprecated + * @param {number} [delay=0] The delay in milliseconds to add to the now value. + */ + tick (delay = 0) { + fastNow += delay - RESOLUTION_MS + 1 + onTick() + onTick() + }, + /** + * Reset FastTimers. + * Exported for testing purposes only. + * Marking as deprecated to discourage any use outside of testing. + * @deprecated + */ + reset () { + fastNow = 0 + fastTimers.length = 0 + clearTimeout(fastNowTimeout) + fastNowTimeout = null + }, /** * Exporting for testing purposes only. * Marking as deprecated to discourage any use outside of testing. diff --git a/test/connect-timeout.js b/test/connect-timeout.js index ff50eb777a6..186067f80ac 100644 --- a/test/connect-timeout.js +++ b/test/connect-timeout.js @@ -10,12 +10,13 @@ const skip = !!process.env.CITGM // Using describe instead of test to avoid the timeout describe('prioritize socket errors over timeouts', { skip }, async () => { - const t = tspl({ ...assert, after: () => {} }, { plan: 1 }) + const t = tspl({ ...assert, after: () => {} }, { plan: 2 }) const client = new Pool('http://foorbar.invalid:1234', { connectTimeout: 1 }) client.request({ method: 'GET', path: '/foobar' }) .then(() => t.fail()) .catch((err) => { + t.strictEqual(err.code, 'ENOTFOUND') t.strictEqual(err.code !== 'UND_ERR_CONNECT_TIMEOUT', true) }) @@ -32,7 +33,7 @@ net.connect = function (options) { } test('connect-timeout', { skip }, async t => { - t = tspl(t, { plan: 1 }) + t = tspl(t, { plan: 3 }) const client = new Client('http://localhost:9000', { connectTimeout: 1e3 @@ -48,6 +49,8 @@ test('connect-timeout', { skip }, async t => { method: 'GET' }, (err) => { t.ok(err instanceof errors.ConnectTimeoutError) + t.strictEqual(err.code, 'UND_ERR_CONNECT_TIMEOUT') + t.strictEqual(err.message, 'Connect Timeout Error (attempted address: localhost:9000, timeout: 1000ms)') clearTimeout(timeout) }) @@ -55,7 +58,7 @@ test('connect-timeout', { skip }, async t => { }) test('connect-timeout', { skip }, async t => { - t = tspl(t, { plan: 1 }) + t = tspl(t, { plan: 3 }) const client = new Pool('http://localhost:9000', { connectTimeout: 1e3 @@ -71,6 +74,8 @@ test('connect-timeout', { skip }, async t => { method: 'GET' }, (err) => { t.ok(err instanceof errors.ConnectTimeoutError) + t.strictEqual(err.code, 'UND_ERR_CONNECT_TIMEOUT') + t.strictEqual(err.message, 'Connect Timeout Error (attempted address: localhost:9000, timeout: 1000ms)') clearTimeout(timeout) }) diff --git a/test/issue-3356.js b/test/issue-3356.js index 927208583a9..fd7bf59656f 100644 --- a/test/issue-3356.js +++ b/test/issue-3356.js @@ -4,7 +4,7 @@ const { tspl } = require('@matteo.collina/tspl') const { test, after } = require('node:test') const { createServer } = require('node:http') const { once } = require('node:events') - +const { tick: fastTimersTick } = require('../lib/util/timers') const { fetch, Agent, RetryAgent } = require('..') test('https://github.com/nodejs/undici/issues/3356', async (t) => { @@ -42,6 +42,9 @@ test('https://github.com/nodejs/undici/issues/3356', async (t) => { const response = await fetch(`http://localhost:${server.address().port}`, { dispatcher: agent }) + + fastTimersTick() + setTimeout(async () => { try { t.equal(response.status, 200) diff --git a/test/request-timeout.js b/test/request-timeout.js index 03d34c9bef5..dfe0de1967c 100644 --- a/test/request-timeout.js +++ b/test/request-timeout.js @@ -1,11 +1,11 @@ 'use strict' const { tspl } = require('@matteo.collina/tspl') -const { test, after } = require('node:test') +const { resolve: pathResolve } = require('node:path') +const { test, after, beforeEach } = require('node:test') const { createReadStream, writeFileSync, unlinkSync } = require('node:fs') const { Client, errors } = require('..') const { kConnect } = require('../lib/core/symbols') -const timers = require('../lib/util/timers') const { createServer } = require('node:http') const EventEmitter = require('node:events') const FakeTimers = require('@sinonjs/fake-timers') @@ -16,6 +16,14 @@ const { Writable, PassThrough } = require('node:stream') +const { + tick: fastTimersTick, + reset: resetFastTimers +} = require('../lib/util/timers') + +beforeEach(() => { + resetFastTimers() +}) test('request timeout', async (t) => { t = tspl(t, { plan: 1 }) @@ -46,7 +54,7 @@ test('request timeout with readable body', async (t) => { }) after(() => server.close()) - const tempfile = `${__filename}.10mb.txt` + const tempfile = pathResolve(__dirname, 'request-timeout.10mb.bin') writeFileSync(tempfile, Buffer.alloc(10 * 1024 * 1024)) after(() => unlinkSync(tempfile)) @@ -72,12 +80,6 @@ test('body timeout', async (t) => { }) after(() => clock.uninstall()) - const orgTimers = { ...timers } - Object.assign(timers, { setTimeout, clearTimeout }) - after(() => { - Object.assign(timers, orgTimers) - }) - const server = createServer((req, res) => { res.write('hello') }) @@ -91,12 +93,14 @@ test('body timeout', async (t) => { t.ifError(err) body.on('data', () => { clock.tick(100) + fastTimersTick(100) }).on('error', (err) => { t.ok(err instanceof errors.BodyTimeoutError) }) }) clock.tick(50) + fastTimersTick(50) }) await t.completed @@ -111,17 +115,12 @@ test('overridden request timeout', async (t) => { }) after(() => clock.uninstall()) - const orgTimers = { ...timers } - Object.assign(timers, { setTimeout, clearTimeout }) - after(() => { - Object.assign(timers, orgTimers) - }) - const server = createServer((req, res) => { setTimeout(() => { res.end('hello') }, 100) clock.tick(100) + fastTimersTick(100) }) after(() => server.close()) @@ -134,6 +133,7 @@ test('overridden request timeout', async (t) => { }) clock.tick(50) + fastTimersTick(50) }) await t.completed @@ -148,12 +148,6 @@ test('overridden body timeout', async (t) => { }) after(() => clock.uninstall()) - const orgTimers = { ...timers } - Object.assign(timers, { setTimeout, clearTimeout }) - after(() => { - Object.assign(timers, orgTimers) - }) - const server = createServer((req, res) => { res.write('hello') }) @@ -166,13 +160,15 @@ test('overridden body timeout', async (t) => { client.request({ path: '/', method: 'GET', bodyTimeout: 50 }, (err, { body }) => { t.ifError(err) body.on('data', () => { - clock.tick(100) + fastTimersTick() + fastTimersTick() }).on('error', (err) => { t.ok(err instanceof errors.BodyTimeoutError) }) }) - clock.tick(50) + fastTimersTick() + fastTimersTick() }) await t.completed @@ -187,17 +183,12 @@ test('With EE signal', async (t) => { }) after(() => clock.uninstall()) - const orgTimers = { ...timers } - Object.assign(timers, { setTimeout, clearTimeout }) - after(() => { - Object.assign(timers, orgTimers) - }) - const server = createServer((req, res) => { setTimeout(() => { res.end('hello') }, 100) clock.tick(100) + fastTimersTick(100) }) after(() => server.close()) @@ -213,6 +204,7 @@ test('With EE signal', async (t) => { }) clock.tick(50) + fastTimersTick(50) }) await t.completed @@ -227,17 +219,12 @@ test('With abort-controller signal', async (t) => { }) after(() => clock.uninstall()) - const orgTimers = { ...timers } - Object.assign(timers, { setTimeout, clearTimeout }) - after(() => { - Object.assign(timers, orgTimers) - }) - const server = createServer((req, res) => { setTimeout(() => { res.end('hello') }, 100) clock.tick(100) + fastTimersTick(100) }) after(() => server.close()) @@ -253,6 +240,7 @@ test('With abort-controller signal', async (t) => { }) clock.tick(50) + fastTimersTick(50) }) await t.completed @@ -267,12 +255,6 @@ test('Abort before timeout (EE)', async (t) => { }) after(() => clock.uninstall()) - const orgTimers = { ...timers } - Object.assign(timers, { setTimeout, clearTimeout }) - after(() => { - Object.assign(timers, orgTimers) - }) - const ee = new EventEmitter() const server = createServer((req, res) => { setTimeout(() => { @@ -280,6 +262,7 @@ test('Abort before timeout (EE)', async (t) => { }, 100) ee.emit('abort') clock.tick(50) + fastTimersTick(50) }) after(() => server.close()) @@ -292,6 +275,7 @@ test('Abort before timeout (EE)', async (t) => { client.request({ path: '/', method: 'GET', signal: ee }, (err, response) => { t.ok(err instanceof errors.RequestAbortedError) clock.tick(100) + fastTimersTick(100) }) }) @@ -307,12 +291,6 @@ test('Abort before timeout (abort-controller)', async (t) => { }) after(() => clock.uninstall()) - const orgTimers = { ...timers } - Object.assign(timers, { setTimeout, clearTimeout }) - after(() => { - Object.assign(timers, orgTimers) - }) - const abortController = new AbortController() const server = createServer((req, res) => { setTimeout(() => { @@ -320,6 +298,7 @@ test('Abort before timeout (abort-controller)', async (t) => { }, 100) abortController.abort() clock.tick(50) + fastTimersTick(50) }) after(() => server.close()) @@ -332,6 +311,7 @@ test('Abort before timeout (abort-controller)', async (t) => { client.request({ path: '/', method: 'GET', signal: abortController.signal }, (err, response) => { t.ok(err instanceof errors.RequestAbortedError) clock.tick(100) + fastTimersTick(100) }) }) @@ -347,17 +327,12 @@ test('Timeout with pipelining', async (t) => { }) after(() => clock.uninstall()) - const orgTimers = { ...timers } - Object.assign(timers, { setTimeout, clearTimeout }) - after(() => { - Object.assign(timers, orgTimers) - }) - const server = createServer((req, res) => { setTimeout(() => { res.end('hello') }, 100) clock.tick(50) + fastTimersTick(50) }) after(() => server.close()) @@ -393,17 +368,12 @@ test('Global option', async (t) => { }) after(() => clock.uninstall()) - const orgTimers = { ...timers } - Object.assign(timers, { setTimeout, clearTimeout }) - after(() => { - Object.assign(timers, orgTimers) - }) - const server = createServer((req, res) => { setTimeout(() => { res.end('hello') }, 100) clock.tick(100) + fastTimersTick(100) }) after(() => server.close()) @@ -418,6 +388,7 @@ test('Global option', async (t) => { }) clock.tick(50) + fastTimersTick(50) }) await t.completed @@ -432,17 +403,12 @@ test('Request options overrides global option', async (t) => { }) after(() => clock.uninstall()) - const orgTimers = { ...timers } - Object.assign(timers, { setTimeout, clearTimeout }) - after(() => { - Object.assign(timers, orgTimers) - }) - const server = createServer((req, res) => { setTimeout(() => { res.end('hello') }, 100) clock.tick(100) + fastTimersTick(100) }) after(() => server.close()) @@ -457,6 +423,7 @@ test('Request options overrides global option', async (t) => { }) clock.tick(50) + fastTimersTick(50) }) await t.completed @@ -496,12 +463,6 @@ test('client.close should wait for the timeout', async (t) => { }) after(() => clock.uninstall()) - const orgTimers = { ...timers } - Object.assign(timers, { setTimeout, clearTimeout }) - after(() => { - Object.assign(timers, orgTimers) - }) - const server = createServer((req, res) => { }) after(() => server.close()) @@ -523,6 +484,7 @@ test('client.close should wait for the timeout', async (t) => { client.on('connect', () => { process.nextTick(() => { clock.tick(100) + fastTimersTick(100) }) }) }) @@ -581,17 +543,12 @@ test('Disable request timeout', async (t) => { }) after(() => clock.uninstall()) - const orgTimers = { ...timers } - Object.assign(timers, { setTimeout, clearTimeout }) - after(() => { - Object.assign(timers, orgTimers) - }) - const server = createServer((req, res) => { setTimeout(() => { res.end('hello') }, 32e3) clock.tick(33e3) + fastTimersTick(33e3) }) after(() => server.close()) @@ -614,6 +571,7 @@ test('Disable request timeout', async (t) => { }) clock.tick(31e3) + fastTimersTick(31e3) }) await t.completed @@ -628,17 +586,12 @@ test('Disable request timeout for a single request', async (t) => { }) after(() => clock.uninstall()) - const orgTimers = { ...timers } - Object.assign(timers, { setTimeout, clearTimeout }) - after(() => { - Object.assign(timers, orgTimers) - }) - const server = createServer((req, res) => { setTimeout(() => { res.end('hello') }, 32e3) clock.tick(33e3) + fastTimersTick(33e3) }) after(() => server.close()) @@ -661,6 +614,7 @@ test('Disable request timeout for a single request', async (t) => { }) clock.tick(31e3) + fastTimersTick(31e3) }) await t.completed @@ -675,17 +629,12 @@ test('stream timeout', async (t) => { }) after(() => clock.uninstall()) - const orgTimers = { ...timers } - Object.assign(timers, { setTimeout, clearTimeout }) - after(() => { - Object.assign(timers, orgTimers) - }) - const server = createServer((req, res) => { setTimeout(() => { res.end('hello') }, 301e3) clock.tick(301e3) + fastTimersTick(301e3) }) after(() => server.close()) @@ -716,17 +665,12 @@ test('stream custom timeout', async (t) => { }) after(() => clock.uninstall()) - const orgTimers = { ...timers } - Object.assign(timers, { setTimeout, clearTimeout }) - after(() => { - Object.assign(timers, orgTimers) - }) - const server = createServer((req, res) => { setTimeout(() => { res.end('hello') }, 31e3) clock.tick(31e3) + fastTimersTick(31e3) }) after(() => server.close()) @@ -759,17 +703,12 @@ test('pipeline timeout', async (t) => { }) after(() => clock.uninstall()) - const orgTimers = { ...timers } - Object.assign(timers, { setTimeout, clearTimeout }) - after(() => { - Object.assign(timers, orgTimers) - }) - const server = createServer((req, res) => { setTimeout(() => { req.pipe(res) }, 301e3) clock.tick(301e3) + fastTimersTick(301e3) }) after(() => server.close()) @@ -819,17 +758,12 @@ test('pipeline timeout', async (t) => { }) after(() => clock.uninstall()) - const orgTimers = { ...timers } - Object.assign(timers, { setTimeout, clearTimeout }) - after(() => { - Object.assign(timers, orgTimers) - }) - const server = createServer((req, res) => { setTimeout(() => { req.pipe(res) }, 31e3) clock.tick(31e3) + fastTimersTick(31e3) }) after(() => server.close()) @@ -881,12 +815,6 @@ test('client.close should not deadlock', async (t) => { }) after(() => clock.uninstall()) - const orgTimers = { ...timers } - Object.assign(timers, { setTimeout, clearTimeout }) - after(() => { - Object.assign(timers, orgTimers) - }) - const server = createServer((req, res) => { }) after(() => server.close()) @@ -911,6 +839,7 @@ test('client.close should not deadlock', async (t) => { }) clock.tick(100) + fastTimersTick(100) }) }) await t.completed From 5b1e534d04839057e0ff7fe1f5dbea0cdf9119e8 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 4 Oct 2024 23:46:21 +0200 Subject: [PATCH 26/57] ci: less flaky test/request-timeout.js test (#3580) (#3678) (cherry picked from commit 990df2c7e37cbe5bb44fe2f576dddeaeb5916590) Co-authored-by: Aras Abbasi --- test/request-timeout.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/request-timeout.js b/test/request-timeout.js index dfe0de1967c..19f602fff8b 100644 --- a/test/request-timeout.js +++ b/test/request-timeout.js @@ -31,7 +31,7 @@ test('request timeout', async (t) => { const server = createServer((req, res) => { setTimeout(() => { res.end('hello') - }, 1000) + }, 2000) }) after(() => server.close()) From 5c0846d1495388ff0796049b78e99026e2c80726 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 4 Oct 2024 23:47:44 +0200 Subject: [PATCH 27/57] test: less flaky timers acceptance test, rework fast timer tests to pass them faster (#3656) (#3679) * test: less flaky timers acceptance test * use fake timers * tick 10 * one tick --------- Co-authored-by: Carlos Fuentes (cherry picked from commit 5308d2924e1d9fa26a79f492b327af7f07a6e40c) Co-authored-by: Aras Abbasi --- lib/util/timers.js | 7 +- test/timers.js | 170 ++++++++++++++++++++++++++++++--------------- 2 files changed, 117 insertions(+), 60 deletions(-) diff --git a/lib/util/timers.js b/lib/util/timers.js index 36b6bbf985e..c15bbc37aa1 100644 --- a/lib/util/timers.js +++ b/lib/util/timers.js @@ -14,9 +14,6 @@ * Consequently, timers may trigger later than their scheduled time. */ -const nativeSetTimeout = global.setTimeout -const nativeClearTimeout = global.clearTimeout - /** * The fastNow variable contains the internal fast timer clock value. * @@ -340,7 +337,7 @@ module.exports = { // If the delay is less than or equal to the RESOLUTION_MS value return a // native Node.js Timer instance. return delay <= RESOLUTION_MS - ? nativeSetTimeout(callback, delay, arg) + ? setTimeout(callback, delay, arg) : new FastTimer(callback, delay, arg) }, /** @@ -359,7 +356,7 @@ module.exports = { // Otherwise it is an instance of a native NodeJS.Timeout, so call the // Node.js native clearTimeout function. } else { - nativeClearTimeout(timeout) + clearTimeout(timeout) } }, /** diff --git a/test/timers.js b/test/timers.js index 621e1aa74bb..9d8cd596e90 100644 --- a/test/timers.js +++ b/test/timers.js @@ -2,10 +2,25 @@ const { tspl } = require('@matteo.collina/tspl') const { describe, test } = require('node:test') +const FakeTimers = require('@sinonjs/fake-timers') + +const clock = FakeTimers.install() const timers = require('../lib/util/timers') const { eventLoopBlocker } = require('./utils/event-loop-blocker') +// timers.setTimeout implements a low resolution timer with a 500 ms granularity +// It is expected that in the worst case, a timer will fire about 500 ms after the +// intended amount of time, an extra 200 ms is added to account event loop overhead +// Timers should never fire excessively early, 1ms early is tolerated +const ACCEPTABLE_DELTA = 700 + +function tick (duration) { + for (let i = 0; i < duration; ++i) { + clock.tick(1) + } +} + describe('timers', () => { test('timers exports a clearTimeout', (t) => { t = tspl(t, { plan: 1 }) @@ -26,7 +41,7 @@ describe('timers', () => { t.strictEqual(timers.setTimeout(() => { }, 1e3)[timers.kFastTimer], undefined) }) - test('setTimeout instantiates a FastTimer when delay is smaller than 1e3 ms', (t) => { + test('setTimeout instantiates a FastTimer when delay is bigger than 1e3 ms', (t) => { t = tspl(t, { plan: 1 }) const timeout = timers.setTimeout(() => { }, 1001) @@ -34,12 +49,11 @@ describe('timers', () => { }) test('clearTimeout can clear a node native Timeout', (t) => { - t = tspl(t, { plan: 3 }) + t = tspl(t, { plan: 1 }) - const nativeTimeoutId = setTimeout(() => { }, 1e6) - t.equal(nativeTimeoutId._idleTimeout, 1e6) + const nativeTimeoutId = setTimeout(() => { t.fail() }, 1) t.ok(timers.clearTimeout(nativeTimeoutId) === undefined) - t.equal(nativeTimeoutId._idleTimeout, -1) + tick(10) }) test('a FastTimer will get a _idleStart value after short time', async (t) => { @@ -51,7 +65,8 @@ describe('timers', () => { t.strictEqual(timer[timers.kFastTimer], true) t.strictEqual(timer._idleStart, -1) - await new Promise((resolve) => setTimeout(resolve, 750)) + + tick(1e3) t.notStrictEqual(timer._idleStart, -1) timers.clearTimeout(timer) @@ -66,7 +81,7 @@ describe('timers', () => { t.strictEqual(timer[timers.kFastTimer], true) t.strictEqual(timer._idleStart, -1) - await new Promise((resolve) => setTimeout(resolve, 750)) + tick(750) t.notStrictEqual(timer._idleStart, -1) timers.clearTimeout(timer) t.strictEqual(timer._idleStart, -1) @@ -83,87 +98,63 @@ describe('timers', () => { timers.clearTimeout(timer) t.strictEqual(timer._idleStart, -1) - await new Promise((resolve) => setTimeout(resolve, 750)) + tick(750) t.strictEqual(timer._idleStart, -1) }) test('a cleared FastTimer can be refreshed', async (t) => { t = tspl(t, { plan: 2 }) - const timer = timers.setTimeout(() => { + const timer = timers.setFastTimeout(() => { t.ok('pass') }, 1001) t.strictEqual(timer[timers.kFastTimer], true) timers.clearTimeout(timer) timer.refresh() - await new Promise((resolve) => setTimeout(resolve, 2000)) + tick(2000) timers.clearTimeout(timer) }) const getDelta = (start, target) => { - const end = process.hrtime.bigint() - const actual = (end - start) / 1_000_000n - return actual - BigInt(target) + const end = performance.now() + const actual = end - start + return actual - target } - // timers.setTimeout implements a low resolution timer with a 500 ms granularity - // It is expected that in the worst case, a timer will fire about 500 ms after the - // intended amount of time, an extra 200 ms is added to account event loop overhead - // Timers should never fire excessively early, 1ms early is tolerated - const ACCEPTABLE_DELTA = 700n - - test('meet acceptable resolution time', async (t) => { - const testTimeouts = [0, 1, 499, 500, 501, 990, 999, 1000, 1001, 1100, 1400, 1499, 1500, 4000, 5000] - - t = tspl(t, { plan: 1 + testTimeouts.length * 2 }) - - const start = process.hrtime.bigint() - - for (const target of testTimeouts) { - timers.setTimeout(() => { - const delta = getDelta(start, target) - - t.ok(delta >= -1n, `${target}ms fired early`) - t.ok(delta < ACCEPTABLE_DELTA, `${target}ms fired late, got difference of ${delta}ms`) - }, target) - } - - setTimeout(() => t.ok(true), 6000) - await t.completed - }) - test('refresh correctly with timeout < TICK_MS', async (t) => { t = tspl(t, { plan: 3 }) - const start = process.hrtime.bigint() + const start = performance.now() const timeout = timers.setTimeout(() => { - // 400 ms timer was refreshed after 600ms; total target is 1000 - const delta = getDelta(start, 1000) + // 80 ms timer was refreshed after 120 ms; total target is 200 ms + const delta = getDelta(start, 200) - t.ok(delta >= -1n, 'refreshed timer fired early') + t.ok(delta >= -1, 'refreshed timer fired early') t.ok(delta < ACCEPTABLE_DELTA, 'refreshed timer fired late') - }, 400) + }, 80) - setTimeout(() => timeout.refresh(), 200) - setTimeout(() => timeout.refresh(), 400) - setTimeout(() => timeout.refresh(), 600) + setTimeout(() => timeout.refresh(), 40) + setTimeout(() => timeout.refresh(), 80) + setTimeout(() => timeout.refresh(), 120) - setTimeout(() => t.ok(true), 1500) + setTimeout(() => t.ok(true), 260) + + tick(500) await t.completed }) test('refresh correctly with timeout > TICK_MS', async (t) => { t = tspl(t, { plan: 3 }) - const start = process.hrtime.bigint() + const start = performance.now() const timeout = timers.setTimeout(() => { // 501ms timer was refreshed after 1250ms; total target is 1751 const delta = getDelta(start, 1751) - t.ok(delta >= -1n, 'refreshed timer fired early') + t.ok(delta >= -1, 'refreshed timer fired early') t.ok(delta < ACCEPTABLE_DELTA, 'refreshed timer fired late') }, 501) @@ -171,12 +162,49 @@ describe('timers', () => { setTimeout(() => timeout.refresh(), 750) setTimeout(() => timeout.refresh(), 1250) - setTimeout(() => t.ok(true), 3000) + setTimeout(() => t.ok(true), 1800) + + tick(2000) + await t.completed + }) + + test('refresh correctly FastTimer with timeout > TICK_MS', async (t) => { + t = tspl(t, { plan: 3 }) + + // The long running FastTimer will ensure that the internal clock is + // incremented by the TICK_MS value in the onTick function + const longRunningFastTimer = timers.setTimeout(() => {}, 1e10) + + const start = timers.now() + + const timeout = timers.setFastTimeout(() => { + const delta = (timers.now() - start) - 2493 + + t.ok(delta >= -1, `refreshed timer fired early (${delta} ms)`) + t.ok(delta < ACCEPTABLE_DELTA, `refreshed timer fired late (${delta} ms)`) + }, 1001) + + tick(250) + timeout.refresh() + + tick(250) + timeout.refresh() + + tick(250) + timeout.refresh() + + tick(250) + timeout.refresh() + + timers.clearTimeout(longRunningFastTimer) + setTimeout(() => t.ok(true), 500) + + tick(5000) await t.completed }) test('a FastTimer will only increment by the defined TICK_MS value', async (t) => { - t = tspl(t, { plan: 2 }) + t = tspl(t, { plan: 6 }) const startInternalClock = timers.now() @@ -187,12 +215,44 @@ describe('timers', () => { eventLoopBlocker(1000) // wait to ensure the timer has fired in the next loop - await new Promise((resolve) => setTimeout(resolve, 1)) + await new Promise((resolve) => resolve()) + tick(250) + t.strictEqual(timers.now() - startInternalClock, 0) + tick(250) + t.strictEqual(timers.now() - startInternalClock, 499) + tick(250) t.strictEqual(timers.now() - startInternalClock, 499) - await new Promise((resolve) => setTimeout(resolve, 1000)) - t.ok(timers.now() - startInternalClock <= 1497) + tick(250) + t.strictEqual(timers.now() - startInternalClock, 998) + tick(250) + t.strictEqual(timers.now() - startInternalClock, 998) + tick(250) + t.strictEqual(timers.now() - startInternalClock, 1497) timers.clearTimeout(longRunningFastTimer) }) + + test('meet acceptable resolution time', async (t) => { + const testTimeouts = [0, 1, 499, 500, 501, 990, 999, 1000, 1001, 1100, 1400, 1499, 1500, 4000, 5000] + + t = tspl(t, { plan: testTimeouts.length * 2 }) + + const start = performance.now() + + for (const target of testTimeouts) { + timers.setTimeout(() => { + const delta = getDelta(start, target) + + t.ok(delta >= -1, `${target}ms fired early`) + t.ok(delta < ACCEPTABLE_DELTA, `${target}ms fired late, got difference of ${delta}ms`) + }, target) + } + + for (let i = 0; i < 6000; ++i) { + clock.tick(1) + } + + await t.completed + }) }) From de943c429271aa6f1fdefd81be0cea8d1f787ac9 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sat, 5 Oct 2024 11:10:08 +0200 Subject: [PATCH 28/57] ignore leading and trailing crlfs in formdata body (#3677) (#3681) (cherry picked from commit 773ba01efb8ecc1b21ee9ffe9cb3c0f83b499c6e) Co-authored-by: Khafra --- lib/web/fetch/formdata-parser.js | 14 ++++++++++++-- test/busboy/issue-3676.js | 24 ++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 2 deletions(-) create mode 100644 test/busboy/issue-3676.js diff --git a/lib/web/fetch/formdata-parser.js b/lib/web/fetch/formdata-parser.js index 7e567e9ec65..315a4626da5 100644 --- a/lib/web/fetch/formdata-parser.js +++ b/lib/web/fetch/formdata-parser.js @@ -87,11 +87,21 @@ function multipartFormDataParser (input, mimeType) { // the first byte. const position = { position: 0 } - // Note: undici addition, allow \r\n before the body. - if (input[0] === 0x0d && input[1] === 0x0a) { + // Note: undici addition, allows leading and trailing CRLFs. + while (input[position.position] === 0x0d && input[position.position + 1] === 0x0a) { position.position += 2 } + let trailing = input.length + + while (input[trailing - 1] === 0x0a && input[trailing - 2] === 0x0d) { + trailing -= 2 + } + + if (trailing !== input.length) { + input = input.subarray(0, trailing) + } + // 5. While true: while (true) { // 5.1. If position points to a sequence of bytes starting with 0x2D 0x2D diff --git a/test/busboy/issue-3676.js b/test/busboy/issue-3676.js new file mode 100644 index 00000000000..4b74af88767 --- /dev/null +++ b/test/busboy/issue-3676.js @@ -0,0 +1,24 @@ +'use strict' + +const { test } = require('node:test') +const assert = require('node:assert') +const { Response } = require('../..') + +// https://github.com/nodejs/undici/issues/3676 +test('Leading and trailing CRLFs are ignored', async (t) => { + const response = new Response([ + '--axios-1.7.7-boundary-bPgZ9x77LfApGVUN839vui4V7\r\n' + + 'Content-Disposition: form-data; name="file"; filename="doc.txt"\r\n' + + 'Content-Type: text/plain\r\n' + + '\r\n' + + 'Helloworld\r\n' + + '--axios-1.7.7-boundary-bPgZ9x77LfApGVUN839vui4V7--\r\n' + + '\r\n' + ].join(''), { + headers: { + 'content-type': 'multipart/form-data; boundary=axios-1.7.7-boundary-bPgZ9x77LfApGVUN839vui4V7' + } + }) + + await assert.doesNotReject(response.formData()) +}) From 1df3923b19ec1b72068998a23115d5bec80b5823 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 15:45:18 +0200 Subject: [PATCH 29/57] mock: fix mocking of Uint8Array and ArrayBuffers as provided mock-reponses (#3662) (#3689) (cherry picked from commit 2d524cf58a1dd21c1e6e7008d305e64e57018af3) Co-authored-by: Aras Abbasi --- lib/mock/mock-utils.js | 4 ++++ test/mock-interceptor.js | 49 ++++++++++++++++++++++++++++++++++++++++ test/mock-utils.js | 12 ++++++++++ 3 files changed, 65 insertions(+) diff --git a/lib/mock/mock-utils.js b/lib/mock/mock-utils.js index f3c284d7891..8f18db31ee2 100644 --- a/lib/mock/mock-utils.js +++ b/lib/mock/mock-utils.js @@ -118,6 +118,10 @@ function matchKey (mockDispatch, { path, method, body, headers }) { function getResponseData (data) { if (Buffer.isBuffer(data)) { return data + } else if (data instanceof Uint8Array) { + return data + } else if (data instanceof ArrayBuffer) { + return data } else if (typeof data === 'object') { return JSON.stringify(data) } else { diff --git a/test/mock-interceptor.js b/test/mock-interceptor.js index 0d16290f579..8364fb40415 100644 --- a/test/mock-interceptor.js +++ b/test/mock-interceptor.js @@ -6,6 +6,7 @@ const { MockInterceptor, MockScope } = require('../lib/mock/mock-interceptor') const MockAgent = require('../lib/mock/mock-agent') const { kDispatchKey } = require('../lib/mock/mock-symbols') const { InvalidArgumentError } = require('../lib/core/errors') +const { fetch } = require('../lib/web/fetch/index') describe('MockInterceptor - path', () => { test('should remove hash fragment from paths', t => { @@ -257,3 +258,51 @@ describe('MockInterceptor - replyContentLength', () => { t.ok(result instanceof MockInterceptor) }) }) + +describe('MockInterceptor - different payloads', () => { + [ + // Buffer + ['arrayBuffer', 'ArrayBuffer', 'ArrayBuffer', new TextEncoder().encode('{"test":true}').buffer, new TextEncoder().encode('{"test":true}').buffer], + ['json', 'ArrayBuffer', 'Object', new TextEncoder().encode('{"test":true}').buffer, { test: true }], + ['bytes', 'ArrayBuffer', 'Uint8Array', new TextEncoder().encode('{"test":true}').buffer, new TextEncoder().encode('{"test":true}')], + ['text', 'ArrayBuffer', 'string', new TextEncoder().encode('{"test":true}').buffer, '{"test":true}'], + + // Buffer + ['arrayBuffer', 'Buffer', 'ArrayBuffer', Buffer.from('{"test":true}'), new TextEncoder().encode('{"test":true}').buffer], + ['json', 'Buffer', 'Object', Buffer.from('{"test":true}'), { test: true }], + ['bytes', 'Buffer', 'Uint8Array', Buffer.from('{"test":true}'), new TextEncoder().encode('{"test":true}')], + ['text', 'Buffer', 'string', Buffer.from('{"test":true}'), '{"test":true}'], + + // Uint8Array + ['arrayBuffer', 'Uint8Array', 'ArrayBuffer', new TextEncoder().encode('{"test":true}'), new TextEncoder().encode('{"test":true}').buffer], + ['json', 'Uint8Array', 'Object', new TextEncoder().encode('{"test":true}'), { test: true }], + ['bytes', 'Uint8Array', 'Uint8Array', new TextEncoder().encode('{"test":true}'), new TextEncoder().encode('{"test":true}')], + ['text', 'Uint8Array', 'string', new TextEncoder().encode('{"test":true}'), '{"test":true}'], + + // string + ['arrayBuffer', 'string', 'ArrayBuffer', '{"test":true}', new TextEncoder().encode('{"test":true}').buffer], + ['json', 'string', 'Object', '{"test":true}', { test: true }], + ['bytes', 'string', 'Uint8Array', '{"test":true}', new TextEncoder().encode('{"test":true}')], + ['text', 'string', 'string', '{"test":true}', '{"test":true}'], + + // object + ['arrayBuffer', 'Object', 'ArrayBuffer', { test: true }, new TextEncoder().encode('{"test":true}').buffer], + ['json', 'Object', 'Object', { test: true }, { test: true }], + ['bytes', 'Object', 'Uint8Array', { test: true }, new TextEncoder().encode('{"test":true}')], + ['text', 'Object', 'string', { test: true }, '{"test":true}'] + ].forEach(([method, inputType, outputType, input, output]) => { + test(`${inputType} will be returned as ${outputType} via ${method}()`, async (t) => { + t = tspl(t, { plan: 1 }) + + const mockAgent = new MockAgent() + mockAgent.disableNetConnect() + mockAgent + .get('https://localhost') + .intercept({ path: '/' }).reply(200, input) + + const response = await fetch('https://localhost', { dispatcher: mockAgent }) + + t.deepStrictEqual(await response[method](), output) + }) + }) +}) diff --git a/test/mock-utils.js b/test/mock-utils.js index f27a6763ae9..b80db2a401c 100644 --- a/test/mock-utils.js +++ b/test/mock-utils.js @@ -162,6 +162,18 @@ describe('getResponseData', () => { const responseData = getResponseData(Buffer.from('test')) t.ok(Buffer.isBuffer(responseData)) }) + + test('it should return Uint8Array untouched', (t) => { + t = tspl(t, { plan: 1 }) + const responseData = getResponseData(new TextEncoder().encode('{"test":true}')) + t.ok(responseData instanceof Uint8Array) + }) + + test('it should return ArrayBuffers untouched', (t) => { + t = tspl(t, { plan: 1 }) + const responseData = getResponseData(new TextEncoder().encode('{"test":true}').buffer) + t.ok(responseData instanceof ArrayBuffer) + }) }) test('getStatusText', (t) => { From e4439e9647ef8bd094dfd848769d62410a23955a Mon Sep 17 00:00:00 2001 From: Aras Abbasi Date: Tue, 8 Oct 2024 17:55:50 +0200 Subject: [PATCH 30/57] handle body errors (#3632) (#3700) Co-authored-by: Aras Abbasi (cherry picked from commit 862c0354ce8a9d9972029a7154255eaabed0bb51) Co-authored-by: Khafra --- lib/web/fetch/index.js | 20 +++++++++++++---- lib/web/fetch/util.js | 20 +++++++++++++---- test/fetch/issue-3616.js | 48 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 80 insertions(+), 8 deletions(-) create mode 100644 test/fetch/issue-3616.js diff --git a/lib/web/fetch/index.js b/lib/web/fetch/index.js index 784e0c2cdbb..3c7ca211c99 100644 --- a/lib/web/fetch/index.js +++ b/lib/web/fetch/index.js @@ -2150,9 +2150,15 @@ async function httpNetworkFetch ( finishFlush: zlib.constants.Z_SYNC_FLUSH })) } else if (coding === 'deflate') { - decoders.push(createInflate()) + decoders.push(createInflate({ + flush: zlib.constants.Z_SYNC_FLUSH, + finishFlush: zlib.constants.Z_SYNC_FLUSH + })) } else if (coding === 'br') { - decoders.push(zlib.createBrotliDecompress()) + decoders.push(zlib.createBrotliDecompress({ + flush: zlib.constants.BROTLI_OPERATION_FLUSH, + finishFlush: zlib.constants.BROTLI_OPERATION_FLUSH + })) } else { decoders.length = 0 break @@ -2160,13 +2166,19 @@ async function httpNetworkFetch ( } } + const onError = this.onError.bind(this) + resolve({ status, statusText, headersList, body: decoders.length - ? pipeline(this.body, ...decoders, () => { }) - : this.body.on('error', () => { }) + ? pipeline(this.body, ...decoders, (err) => { + if (err) { + this.onError(err) + } + }).on('error', onError) + : this.body.on('error', onError) }) return true diff --git a/lib/web/fetch/util.js b/lib/web/fetch/util.js index dc5ce9b392a..5101324a80c 100644 --- a/lib/web/fetch/util.js +++ b/lib/web/fetch/util.js @@ -1340,6 +1340,14 @@ function buildContentRange (rangeStart, rangeEnd, fullLength) { // interpreted as a zlib stream, otherwise it's interpreted as a // raw deflate stream. class InflateStream extends Transform { + #zlibOptions + + /** @param {zlib.ZlibOptions} [zlibOptions] */ + constructor (zlibOptions) { + super() + this.#zlibOptions = zlibOptions + } + _transform (chunk, encoding, callback) { if (!this._inflateStream) { if (chunk.length === 0) { @@ -1347,8 +1355,8 @@ class InflateStream extends Transform { return } this._inflateStream = (chunk[0] & 0x0F) === 0x08 - ? zlib.createInflate() - : zlib.createInflateRaw() + ? zlib.createInflate(this.#zlibOptions) + : zlib.createInflateRaw(this.#zlibOptions) this._inflateStream.on('data', this.push.bind(this)) this._inflateStream.on('end', () => this.push(null)) @@ -1367,8 +1375,12 @@ class InflateStream extends Transform { } } -function createInflate () { - return new InflateStream() +/** + * @param {zlib.ZlibOptions} [zlibOptions] + * @returns {InflateStream} + */ +function createInflate (zlibOptions) { + return new InflateStream(zlibOptions) } /** diff --git a/test/fetch/issue-3616.js b/test/fetch/issue-3616.js new file mode 100644 index 00000000000..ed9f739bba1 --- /dev/null +++ b/test/fetch/issue-3616.js @@ -0,0 +1,48 @@ +'use strict' + +const { createServer } = require('node:http') +const { tspl } = require('@matteo.collina/tspl') +const { describe, test, after } = require('node:test') +const { fetch } = require('../..') +const { once } = require('node:events') + +describe('https://github.com/nodejs/undici/issues/3616', () => { + const cases = [ + 'x-gzip', + 'gzip', + 'deflate', + 'br' + ] + + for (const encoding of cases) { + test(encoding, async t => { + t = tspl(t, { plan: 2 }) + const server = createServer((req, res) => { + res.writeHead(200, { + 'Content-Length': '0', + Connection: 'close', + 'Content-Encoding': encoding + }) + res.end() + }) + + after(() => { + server.close() + }) + + server.listen(0) + + await once(server, 'listening') + const result = await fetch(`http://localhost:${server.address().port}/`) + + t.ok(result.body.getReader()) + + process.on('uncaughtException', (reason) => { + t.fail('Uncaught Exception:', reason, encoding) + }) + + await new Promise(resolve => setTimeout(resolve, 100)) + t.ok(true) + }) + } +}) From 24b940329af4ad7b72fad89824a3d0cee924d23f Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Wed, 9 Oct 2024 09:48:28 +0200 Subject: [PATCH 31/57] Bumped v6.20.0 Signed-off-by: Matteo Collina --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 516e3bec9b3..80b5b95e3f1 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "undici", - "version": "6.19.8", + "version": "6.20.0", "description": "An HTTP/1.1 client, written from scratch for Node.js", "homepage": "https://undici.nodejs.org", "bugs": { From 5be8ebfbda1a3076bd78ebe1eadb45a2c952bb05 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 10 Oct 2024 14:56:49 +0200 Subject: [PATCH 32/57] jsdoc: add jsdoc to lib/web/fetch/constants.js (#3597) (#3710) (cherry picked from commit 3d0ce67ff75ec061497d6092e5820b6721f6de66) Co-authored-by: Aras Abbasi --- lib/web/fetch/constants.js | 61 ++++++++++++++++++++++---------------- 1 file changed, 35 insertions(+), 26 deletions(-) diff --git a/lib/web/fetch/constants.js b/lib/web/fetch/constants.js index dad8d0b5776..1f285e06283 100644 --- a/lib/web/fetch/constants.js +++ b/lib/web/fetch/constants.js @@ -1,27 +1,30 @@ 'use strict' -const corsSafeListedMethods = ['GET', 'HEAD', 'POST'] +const corsSafeListedMethods = /** @type {const} */ (['GET', 'HEAD', 'POST']) const corsSafeListedMethodsSet = new Set(corsSafeListedMethods) -const nullBodyStatus = [101, 204, 205, 304] +const nullBodyStatus = /** @type {const} */ ([101, 204, 205, 304]) -const redirectStatus = [301, 302, 303, 307, 308] +const redirectStatus = /** @type {const} */ ([301, 302, 303, 307, 308]) const redirectStatusSet = new Set(redirectStatus) -// https://fetch.spec.whatwg.org/#block-bad-port -const badPorts = [ +/** + * @see https://fetch.spec.whatwg.org/#block-bad-port + */ +const badPorts = /** @type {const} */ ([ '1', '7', '9', '11', '13', '15', '17', '19', '20', '21', '22', '23', '25', '37', '42', '43', '53', '69', '77', '79', '87', '95', '101', '102', '103', '104', '109', '110', '111', '113', '115', '117', '119', '123', '135', '137', '139', '143', '161', '179', '389', '427', '465', '512', '513', '514', '515', '526', '530', '531', '532', '540', '548', '554', '556', '563', '587', '601', '636', '989', '990', '993', '995', '1719', '1720', '1723', '2049', '3659', '4045', '4190', '5060', '5061', '6000', '6566', '6665', '6666', '6667', '6668', '6669', '6679', '6697', '10080' -] - +]) const badPortsSet = new Set(badPorts) -// https://w3c.github.io/webappsec-referrer-policy/#referrer-policies -const referrerPolicy = [ +/** + * @see https://w3c.github.io/webappsec-referrer-policy/#referrer-policies + */ +const referrerPolicy = /** @type {const} */ ([ '', 'no-referrer', 'no-referrer-when-downgrade', @@ -31,29 +34,31 @@ const referrerPolicy = [ 'origin-when-cross-origin', 'strict-origin-when-cross-origin', 'unsafe-url' -] +]) const referrerPolicySet = new Set(referrerPolicy) -const requestRedirect = ['follow', 'manual', 'error'] +const requestRedirect = /** @type {const} */ (['follow', 'manual', 'error']) -const safeMethods = ['GET', 'HEAD', 'OPTIONS', 'TRACE'] +const safeMethods = /** @type {const} */ (['GET', 'HEAD', 'OPTIONS', 'TRACE']) const safeMethodsSet = new Set(safeMethods) -const requestMode = ['navigate', 'same-origin', 'no-cors', 'cors'] +const requestMode = /** @type {const} */ (['navigate', 'same-origin', 'no-cors', 'cors']) -const requestCredentials = ['omit', 'same-origin', 'include'] +const requestCredentials = /** @type {const} */ (['omit', 'same-origin', 'include']) -const requestCache = [ +const requestCache = /** @type {const} */ ([ 'default', 'no-store', 'reload', 'no-cache', 'force-cache', 'only-if-cached' -] +]) -// https://fetch.spec.whatwg.org/#request-body-header-name -const requestBodyHeader = [ +/** + * @see https://fetch.spec.whatwg.org/#request-body-header-name + */ +const requestBodyHeader = /** @type {const} */ ([ 'content-encoding', 'content-language', 'content-location', @@ -63,18 +68,22 @@ const requestBodyHeader = [ // removed in the Headers implementation. However, undici doesn't // filter out headers, so we add it here. 'content-length' -] +]) -// https://fetch.spec.whatwg.org/#enumdef-requestduplex -const requestDuplex = [ +/** + * @see https://fetch.spec.whatwg.org/#enumdef-requestduplex + */ +const requestDuplex = /** @type {const} */ ([ 'half' -] +]) -// http://fetch.spec.whatwg.org/#forbidden-method -const forbiddenMethods = ['CONNECT', 'TRACE', 'TRACK'] +/** + * @see http://fetch.spec.whatwg.org/#forbidden-method + */ +const forbiddenMethods = /** @type {const} */ (['CONNECT', 'TRACE', 'TRACK']) const forbiddenMethodsSet = new Set(forbiddenMethods) -const subresource = [ +const subresource = /** @type {const} */ ([ 'audio', 'audioworklet', 'font', @@ -87,7 +96,7 @@ const subresource = [ 'video', 'xslt', '' -] +]) const subresourceSet = new Set(subresource) module.exports = { From 4869e5edcfafe0f926c807a6080343e2b97ce2f4 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 10 Oct 2024 22:15:57 +0900 Subject: [PATCH 33/57] feat: implement `BodyReadable.bytes` (#3391) (#3711) (cherry picked from commit db8e6422f3f3d4ff2dfd4742a0e39974618bdd8b) Co-authored-by: tsctx <91457664+tsctx@users.noreply.github.com> --- README.md | 1 + docs/docs/api/Dispatcher.md | 12 +++++----- docs/docs/api/Fetch.md | 1 + lib/api/readable.js | 42 +++++++++++++++++++++++++++-------- test/client-request.js | 26 ++++++++++++++++++++++ test/readable.js | 21 ++++++++++++++++++ test/types/readable.test-d.ts | 3 +++ types/dispatcher.d.ts | 1 + types/readable.d.ts | 5 +++++ 9 files changed, 98 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index 4336ef06836..2ac58b6695e 100644 --- a/README.md +++ b/README.md @@ -84,6 +84,7 @@ The `body` mixins are the most common way to format the request/response body. M - [`.arrayBuffer()`](https://fetch.spec.whatwg.org/#dom-body-arraybuffer) - [`.blob()`](https://fetch.spec.whatwg.org/#dom-body-blob) +- [`.bytes()`](https://fetch.spec.whatwg.org/#dom-body-bytes) - [`.json()`](https://fetch.spec.whatwg.org/#dom-body-json) - [`.text()`](https://fetch.spec.whatwg.org/#dom-body-text) diff --git a/docs/docs/api/Dispatcher.md b/docs/docs/api/Dispatcher.md index 574030bf686..67819ecd525 100644 --- a/docs/docs/api/Dispatcher.md +++ b/docs/docs/api/Dispatcher.md @@ -488,11 +488,13 @@ The `RequestOptions.method` property should not be value `'CONNECT'`. `body` contains the following additional [body mixin](https://fetch.spec.whatwg.org/#body-mixin) methods and properties: -- `text()` -- `json()` -- `arrayBuffer()` -- `body` -- `bodyUsed` +* [`.arrayBuffer()`](https://fetch.spec.whatwg.org/#dom-body-arraybuffer) +* [`.blob()`](https://fetch.spec.whatwg.org/#dom-body-blob) +* [`.bytes()`](https://fetch.spec.whatwg.org/#dom-body-bytes) +* [`.json()`](https://fetch.spec.whatwg.org/#dom-body-json) +* [`.text()`](https://fetch.spec.whatwg.org/#dom-body-text) +* `body` +* `bodyUsed` `body` can not be consumed twice. For example, calling `text()` after `json()` throws `TypeError`. diff --git a/docs/docs/api/Fetch.md b/docs/docs/api/Fetch.md index c3406f128dc..00c349847dc 100644 --- a/docs/docs/api/Fetch.md +++ b/docs/docs/api/Fetch.md @@ -28,6 +28,7 @@ This API is implemented as per the standard, you can find documentation on [MDN] - [`.arrayBuffer()`](https://fetch.spec.whatwg.org/#dom-body-arraybuffer) - [`.blob()`](https://fetch.spec.whatwg.org/#dom-body-blob) +- [`.bytes()`](https://fetch.spec.whatwg.org/#dom-body-bytes) - [`.formData()`](https://fetch.spec.whatwg.org/#dom-body-formdata) - [`.json()`](https://fetch.spec.whatwg.org/#dom-body-json) - [`.text()`](https://fetch.spec.whatwg.org/#dom-body-text) diff --git a/lib/api/readable.js b/lib/api/readable.js index a65a7fcb557..47fbf3e0ef1 100644 --- a/lib/api/readable.js +++ b/lib/api/readable.js @@ -121,6 +121,11 @@ class BodyReadable extends Readable { return consume(this, 'blob') } + // https://fetch.spec.whatwg.org/#dom-body-bytes + async bytes () { + return consume(this, 'bytes') + } + // https://fetch.spec.whatwg.org/#dom-body-arraybuffer async arrayBuffer () { return consume(this, 'arrayBuffer') @@ -306,6 +311,31 @@ function chunksDecode (chunks, length) { return buffer.utf8Slice(start, bufferLength) } +/** + * @param {Buffer[]} chunks + * @param {number} length + * @returns {Uint8Array} + */ +function chunksConcat (chunks, length) { + if (chunks.length === 0 || length === 0) { + return new Uint8Array(0) + } + if (chunks.length === 1) { + // fast-path + return new Uint8Array(chunks[0]) + } + const buffer = new Uint8Array(Buffer.allocUnsafeSlow(length).buffer) + + let offset = 0 + for (let i = 0; i < chunks.length; ++i) { + const chunk = chunks[i] + buffer.set(chunk, offset) + offset += chunk.length + } + + return buffer +} + function consumeEnd (consume) { const { type, body, resolve, stream, length } = consume @@ -315,17 +345,11 @@ function consumeEnd (consume) { } else if (type === 'json') { resolve(JSON.parse(chunksDecode(body, length))) } else if (type === 'arrayBuffer') { - const dst = new Uint8Array(length) - - let pos = 0 - for (const buf of body) { - dst.set(buf, pos) - pos += buf.byteLength - } - - resolve(dst.buffer) + resolve(chunksConcat(body, length).buffer) } else if (type === 'blob') { resolve(new Blob(body, { type: stream[kContentType] })) + } else if (type === 'bytes') { + resolve(chunksConcat(body, length)) } consumeFinish(consume) diff --git a/test/client-request.js b/test/client-request.js index 8cbad5ccb48..c67cecdb7f3 100644 --- a/test/client-request.js +++ b/test/client-request.js @@ -655,6 +655,32 @@ test('request arrayBuffer', async (t) => { await t.completed }) +test('request bytes', async (t) => { + t = tspl(t, { plan: 2 }) + + const obj = { asd: true } + const server = createServer((req, res) => { + res.end(JSON.stringify(obj)) + }) + after(() => server.close()) + + server.listen(0, async () => { + const client = new Client(`http://localhost:${server.address().port}`) + after(() => client.destroy()) + + const { body } = await client.request({ + path: '/', + method: 'GET' + }) + const bytes = await body.bytes() + + t.deepStrictEqual(new TextEncoder().encode(JSON.stringify(obj)), bytes) + t.ok(bytes instanceof Uint8Array) + }) + + await t.completed +}) + test('request body', async (t) => { t = tspl(t, { plan: 1 }) diff --git a/test/readable.js b/test/readable.js index dd0631daf8b..e6a6ed0dccd 100644 --- a/test/readable.js +++ b/test/readable.js @@ -83,6 +83,27 @@ describe('Readable', () => { t.deepStrictEqual(arrayBuffer, expected) }) + test('.bytes()', async function (t) { + t = tspl(t, { plan: 1 }) + + function resume () { + } + function abort () { + } + const r = new Readable({ resume, abort }) + + r.push(Buffer.from('hello')) + r.push(Buffer.from(' world')) + + process.nextTick(() => { + r.push(null) + }) + + const bytes = await r.bytes() + + t.deepStrictEqual(bytes, new TextEncoder().encode('hello world')) + }) + test('.json()', async function (t) { t = tspl(t, { plan: 1 }) diff --git a/test/types/readable.test-d.ts b/test/types/readable.test-d.ts index d004b706569..b5d32f6c221 100644 --- a/test/types/readable.test-d.ts +++ b/test/types/readable.test-d.ts @@ -20,6 +20,9 @@ expectAssignable(new BodyReadable()) // blob expectAssignable>(readable.blob()) + // bytes + expectAssignable>(readable.bytes()) + // arrayBuffer expectAssignable>(readable.arrayBuffer()) diff --git a/types/dispatcher.d.ts b/types/dispatcher.d.ts index 0aa2aba00e3..1b4c9c74a5d 100644 --- a/types/dispatcher.d.ts +++ b/types/dispatcher.d.ts @@ -244,6 +244,7 @@ declare namespace Dispatcher { readonly bodyUsed: boolean; arrayBuffer(): Promise; blob(): Promise; + bytes(): Promise; formData(): Promise; json(): Promise; text(): Promise; diff --git a/types/readable.d.ts b/types/readable.d.ts index a5fce8a20d3..c4f052af05e 100644 --- a/types/readable.d.ts +++ b/types/readable.d.ts @@ -25,6 +25,11 @@ declare class BodyReadable extends Readable { */ blob(): Promise + /** Consumes and returns the body as an Uint8Array + * https://fetch.spec.whatwg.org/#dom-body-bytes + */ + bytes(): Promise + /** Consumes and returns the body as an ArrayBuffer * https://fetch.spec.whatwg.org/#dom-body-arraybuffer */ From 39c59746c7b6e83b31a54eb4f715b86a475c903b Mon Sep 17 00:00:00 2001 From: Robert Nagy Date: Fri, 11 Oct 2024 07:16:07 +0200 Subject: [PATCH 34/57] fix: add more expectsPayload methods (#3715) --- lib/dispatcher/client-h1.js | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/lib/dispatcher/client-h1.js b/lib/dispatcher/client-h1.js index 40628e53d4c..2b8fa05da29 100644 --- a/lib/dispatcher/client-h1.js +++ b/lib/dispatcher/client-h1.js @@ -860,7 +860,10 @@ function writeH1 (client, request) { const expectsPayload = ( method === 'PUT' || method === 'POST' || - method === 'PATCH' + method === 'PATCH' || + method === 'QUERY' || + method === 'PROPFIND' || + method === 'PROPPATCH' ) if (util.isFormDataLike(body)) { @@ -1139,7 +1142,7 @@ function writeBuffer (abort, body, client, request, socket, contentLength, heade socket.uncork() request.onBodySent(body) - if (!expectsPayload) { + if (!expectsPayload && request.reset !== false) { socket[kReset] = true } } @@ -1169,7 +1172,7 @@ async function writeBlob (abort, body, client, request, socket, contentLength, h request.onBodySent(buffer) request.onRequestSent() - if (!expectsPayload) { + if (!expectsPayload && request.reset !== false) { socket[kReset] = true } @@ -1270,7 +1273,7 @@ class AsyncWriter { socket.cork() if (bytesWritten === 0) { - if (!expectsPayload) { + if (!expectsPayload && request.reset !== false) { socket[kReset] = true } From a699105aab5afcf1f31c8c282bce0a9b0424b403 Mon Sep 17 00:00:00 2001 From: Aras Abbasi Date: Sat, 12 Oct 2024 12:14:28 +0200 Subject: [PATCH 35/57] chore(H2): onboard H2 into Undici queueing system (#3707) (#3724) (cherry picked from commit d6c44f3b49949dbdd66d9c56e729d503e6b09ddc) Co-authored-by: Carlos Fuentes --- lib/dispatcher/client-h2.js | 75 +++++-- package.json | 3 + test/http2.js | 322 +++++++++++++++--------------- test/node-test/client-dispatch.js | 5 +- 4 files changed, 224 insertions(+), 181 deletions(-) diff --git a/lib/dispatcher/client-h2.js b/lib/dispatcher/client-h2.js index 6c5155717d1..0448fa00736 100644 --- a/lib/dispatcher/client-h2.js +++ b/lib/dispatcher/client-h2.js @@ -24,7 +24,9 @@ const { kOnError, kMaxConcurrentStreams, kHTTP2Session, - kResume + kResume, + kSize, + kHTTPContext } = require('../core/symbols.js') const kOpenStreams = Symbol('open streams') @@ -160,11 +162,10 @@ async function connectH2 (client, socket) { version: 'h2', defaultPipelining: Infinity, write (...args) { - // TODO (fix): return - writeH2(client, ...args) + return writeH2(client, ...args) }, resume () { - + resumeH2(client) }, destroy (err, callback) { if (closed) { @@ -183,6 +184,20 @@ async function connectH2 (client, socket) { } } +function resumeH2 (client) { + const socket = client[kSocket] + + if (socket?.destroyed === false) { + if (client[kSize] === 0 && client[kMaxConcurrentStreams] === 0) { + socket.unref() + client[kHTTP2Session].unref() + } else { + socket.ref() + client[kHTTP2Session].ref() + } + } +} + function onHttp2SessionError (err) { assert(err.code !== 'ERR_TLS_CERT_ALTNAME_INVALID') @@ -210,17 +225,32 @@ function onHttp2SessionEnd () { * along with the socket right away */ function onHTTP2GoAway (code) { - const err = new RequestAbortedError(`HTTP/2: "GOAWAY" frame received with code ${code}`) + // We cannot recover, so best to close the session and the socket + const err = this[kError] || new SocketError(`HTTP/2: "GOAWAY" frame received with code ${code}`, util.getSocketInfo(this)) + const client = this[kClient] - // We need to trigger the close cycle right away - // We need to destroy the session and the socket - // Requests should be failed with the error after the current one is handled - this[kSocket][kError] = err - this[kClient][kOnError](err) + client[kSocket] = null + client[kHTTPContext] = null - this.unref() + if (this[kHTTP2Session] != null) { + this[kHTTP2Session].destroy(err) + this[kHTTP2Session] = null + } util.destroy(this[kSocket], err) + + // Fail head of pipeline. + const request = client[kQueue][client[kRunningIdx]] + client[kQueue][client[kRunningIdx]++] = null + util.errorRequest(client, request, err) + + client[kPendingIdx] = client[kRunningIdx] + + assert(client[kRunning] === 0) + + client.emit('disconnect', client[kUrl], [client], err) + + client[kResume]() } // https://www.rfc-editor.org/rfc/rfc7230#section-3.3.2 @@ -237,10 +267,6 @@ function writeH2 (client, request) { return false } - if (request.aborted) { - return false - } - const headers = {} for (let n = 0; n < reqHeaders.length; n += 2) { const key = reqHeaders[n + 0] @@ -283,6 +309,8 @@ function writeH2 (client, request) { // We do not destroy the socket as we can continue using the session // the stream get's destroyed and the session remains to create new streams util.destroy(body, err) + client[kQueue][client[kRunningIdx]++] = null + client[kResume]() } try { @@ -293,6 +321,10 @@ function writeH2 (client, request) { util.errorRequest(client, request, err) } + if (request.aborted) { + return false + } + if (method === 'CONNECT') { session.ref() // We are already connected, streams are pending, first request @@ -304,10 +336,12 @@ function writeH2 (client, request) { if (stream.id && !stream.pending) { request.onUpgrade(null, null, stream) ++session[kOpenStreams] + client[kQueue][client[kRunningIdx]++] = null } else { stream.once('ready', () => { request.onUpgrade(null, null, stream) ++session[kOpenStreams] + client[kQueue][client[kRunningIdx]++] = null }) } @@ -428,17 +462,20 @@ function writeH2 (client, request) { // Present specially when using pipeline or stream if (stream.state?.state == null || stream.state.state < 6) { request.onComplete([]) - return } - // Stream is closed or half-closed-remote (6), decrement counter and cleanup - // It does not have sense to continue working with the stream as we do not - // have yet RST_STREAM support on client-side if (session[kOpenStreams] === 0) { + // Stream is closed or half-closed-remote (6), decrement counter and cleanup + // It does not have sense to continue working with the stream as we do not + // have yet RST_STREAM support on client-side + session.unref() } abort(new InformationalError('HTTP/2: stream half-closed (remote)')) + client[kQueue][client[kRunningIdx]++] = null + client[kPendingIdx] = client[kRunningIdx] + client[kResume]() }) stream.once('close', () => { diff --git a/package.json b/package.json index 80b5b95e3f1..7170c4e8320 100644 --- a/package.json +++ b/package.json @@ -78,6 +78,9 @@ "test:fuzzing": "node test/fuzzing/fuzzing.test.js", "test:fetch": "npm run build:node && npm run test:fetch:nobuild", "test:fetch:nobuild": "borp --timeout 180000 --expose-gc --concurrency 1 -p \"test/fetch/*.js\" && npm run test:webidl && npm run test:busboy", + "test:h2": "npm run test:h2:core && npm run test:h2:fetch", + "test:h2:core": "borp -p \"test/http2*.js\"", + "test:h2:fetch": "npm run build:node && borp -p \"test/fetch/http2*.js\"", "test:interceptors": "borp -p \"test/interceptors/*.js\"", "test:jest": "cross-env NODE_V8_COVERAGE= jest", "test:unit": "borp --expose-gc -p \"test/*.js\"", diff --git a/test/http2.js b/test/http2.js index a43700574b8..d6840a1bd15 100644 --- a/test/http2.js +++ b/test/http2.js @@ -217,66 +217,6 @@ test('Should support H2 connection(POST Buffer)', async t => { t.strictEqual(Buffer.concat(body).toString('utf8'), 'hello h2!') }) -test('Should support H2 GOAWAY (server-side)', async t => { - const body = [] - const server = createSecureServer(pem) - - server.on('stream', (stream, headers) => { - t.strictEqual(headers['x-my-header'], 'foo') - t.strictEqual(headers[':method'], 'GET') - stream.respond({ - 'content-type': 'text/plain; charset=utf-8', - 'x-custom-h2': 'hello', - ':status': 200 - }) - stream.end('hello h2!') - }) - - server.on('session', session => { - setTimeout(() => { - session.goaway(0) - }, 1000) - }) - - server.listen(0) - await once(server, 'listening') - - const client = new Client(`https://localhost:${server.address().port}`, { - connect: { - rejectUnauthorized: false - }, - allowH2: true - }) - - t = tspl(t, { plan: 9 }) - after(() => server.close()) - after(() => client.close()) - - const response = await client.request({ - path: '/', - method: 'GET', - headers: { - 'x-my-header': 'foo' - } - }) - - response.body.on('data', chunk => { - body.push(chunk) - }) - - await once(response.body, 'end') - t.strictEqual(response.statusCode, 200) - t.strictEqual(response.headers['content-type'], 'text/plain; charset=utf-8') - t.strictEqual(response.headers['x-custom-h2'], 'hello') - t.strictEqual(Buffer.concat(body).toString('utf8'), 'hello h2!') - - const [url, disconnectClient, err] = await once(client, 'disconnect') - - t.ok(url instanceof URL) - t.deepStrictEqual(disconnectClient, [client]) - t.strictEqual(err.message, 'HTTP/2: "GOAWAY" frame received with code 0') -}) - test('Should throw if bad allowH2 has been passed', async t => { t = tspl(t, { plan: 1 }) @@ -852,7 +792,10 @@ test('Should handle h2 request with body (string or buffer) - dispatch', async t onHeaders (statusCode, headers) { t.strictEqual(statusCode, 200) t.strictEqual(headers[0].toString('utf-8'), 'content-type') - t.strictEqual(headers[1].toString('utf-8'), 'text/plain; charset=utf-8') + t.strictEqual( + headers[1].toString('utf-8'), + 'text/plain; charset=utf-8' + ) t.strictEqual(headers[2].toString('utf-8'), 'x-custom-h2') t.strictEqual(headers[3].toString('utf-8'), 'foo') }, @@ -1183,56 +1126,53 @@ test('Agent should support H2 connection', async t => { t.strictEqual(Buffer.concat(body).toString('utf8'), 'hello h2!') }) -test( - 'Should provide pseudo-headers in proper order', - async t => { - t = tspl(t, { plan: 2 }) +test('Should provide pseudo-headers in proper order', async t => { + t = tspl(t, { plan: 2 }) - const server = createSecureServer(pem) - server.on('stream', (stream, _headers, _flags, rawHeaders) => { - t.deepStrictEqual(rawHeaders, [ - ':authority', - `localhost:${server.address().port}`, - ':method', - 'GET', - ':path', - '/', - ':scheme', - 'https' - ]) + const server = createSecureServer(pem) + server.on('stream', (stream, _headers, _flags, rawHeaders) => { + t.deepStrictEqual(rawHeaders, [ + ':authority', + `localhost:${server.address().port}`, + ':method', + 'GET', + ':path', + '/', + ':scheme', + 'https' + ]) - stream.respond({ - 'content-type': 'text/plain; charset=utf-8', - ':status': 200 - }) - stream.end() + stream.respond({ + 'content-type': 'text/plain; charset=utf-8', + ':status': 200 }) + stream.end() + }) - server.listen(0) - await once(server, 'listening') + server.listen(0) + await once(server, 'listening') - const client = new Client(`https://localhost:${server.address().port}`, { - connect: { - rejectUnauthorized: false - }, - allowH2: true - }) + const client = new Client(`https://localhost:${server.address().port}`, { + connect: { + rejectUnauthorized: false + }, + allowH2: true + }) - after(() => server.close()) - after(() => client.close()) + after(() => server.close()) + after(() => client.close()) - const response = await client.request({ - path: '/', - method: 'GET' - }) + const response = await client.request({ + path: '/', + method: 'GET' + }) - t.strictEqual(response.statusCode, 200) + t.strictEqual(response.statusCode, 200) - await response.body.dump() + await response.body.dump() - await t.complete - } -) + await t.complete +}) test('The h2 pseudo-headers is not included in the headers', async t => { const server = createSecureServer(pem) @@ -1287,16 +1227,20 @@ test('Should throw informational error on half-closed streams (remote)', async t }) t = tspl(t, { plan: 2 }) - after(() => server.close()) - after(() => client.close()) - - await client.request({ - path: '/', - method: 'GET' - }).catch(err => { - t.strictEqual(err.message, 'HTTP/2: stream half-closed (remote)') - t.strictEqual(err.code, 'UND_ERR_INFO') + after(async () => { + server.close() + await client.close() }) + + await client + .request({ + path: '/', + method: 'GET' + }) + .catch(err => { + t.strictEqual(err.message, 'HTTP/2: stream half-closed (remote)') + t.strictEqual(err.code, 'UND_ERR_INFO') + }) }) test('#2364 - Concurrent aborts', async t => { @@ -1325,62 +1269,76 @@ test('#2364 - Concurrent aborts', async t => { allowH2: true }) - t = tspl(t, { plan: 18 }) + t = tspl(t, { plan: 14 }) after(() => server.close()) after(() => client.close()) - const controller = new AbortController() + const signal = AbortSignal.timeout(50) - client.request({ - path: '/1', - method: 'GET', - headers: { - 'x-my-header': 'foo' + client.request( + { + path: '/1', + method: 'GET', + headers: { + 'x-my-header': 'foo' + } + }, + (err, response) => { + t.ifError(err) + t.strictEqual( + response.headers['content-type'], + 'text/plain; charset=utf-8' + ) + t.strictEqual(response.headers['x-custom-h2'], 'hello') + t.strictEqual(response.statusCode, 200) } - }, (err, response) => { - t.ifError(err) - t.strictEqual(response.headers['content-type'], 'text/plain; charset=utf-8') - t.strictEqual(response.headers['x-custom-h2'], 'hello') - t.strictEqual(response.statusCode, 200) - response.body.dump() - }) + ) - client.request({ - path: '/2', - method: 'GET', - headers: { - 'x-my-header': 'foo' + client.request( + { + path: '/2', + method: 'GET', + headers: { + 'x-my-header': 'foo' + }, + signal }, - signal: controller.signal - }, (err, response) => { - t.strictEqual(err.name, 'AbortError') - }) - - client.request({ - path: '/3', - method: 'GET', - headers: { - 'x-my-header': 'foo' + (err, response) => { + t.strictEqual(err.name, 'TimeoutError') } - }, (err, response) => { - t.ifError(err) - t.strictEqual(response.headers['content-type'], 'text/plain; charset=utf-8') - t.strictEqual(response.headers['x-custom-h2'], 'hello') - t.strictEqual(response.statusCode, 200) - response.body.dump() - }) + ) - client.request({ - path: '/4', - method: 'GET', - headers: { - 'x-my-header': 'foo' + client.request( + { + path: '/3', + method: 'GET', + headers: { + 'x-my-header': 'foo' + } }, - signal: controller.signal - }, (err, response) => { - t.strictEqual(err.name, 'AbortError') - }) + (err, response) => { + t.ifError(err) + t.strictEqual( + response.headers['content-type'], + 'text/plain; charset=utf-8' + ) + t.strictEqual(response.headers['x-custom-h2'], 'hello') + t.strictEqual(response.statusCode, 200) + } + ) - controller.abort() + client.request( + { + path: '/4', + method: 'GET', + headers: { + 'x-my-header': 'foo' + }, + signal + }, + (err, response) => { + t.strictEqual(err.name, 'TimeoutError') + } + ) await t.completed }) @@ -1418,8 +1376,8 @@ test('#3046 - GOAWAY Frame', async t => { }) t = tspl(t, { plan: 7 }) - after(() => server.close()) after(() => client.close()) + after(() => server.close()) client.on('disconnect', (url, disconnectClient, err) => { t.ok(url instanceof URL) @@ -1439,10 +1397,56 @@ test('#3046 - GOAWAY Frame', async t => { t.strictEqual(response.headers['x-custom-h2'], 'hello') t.strictEqual(response.statusCode, 200) - t.rejects(response.body.text.bind(response.body), { + t.rejects(response.body.text(), { message: 'HTTP/2: "GOAWAY" frame received with code 0', - code: 'UND_ERR_ABORTED' + code: 'UND_ERR_SOCKET' + }) + + await t.completed +}) + +test('#3671 - Graceful close', async (t) => { + const server = createSecureServer(pem) + + server.on('stream', (stream, headers) => { + setTimeout(() => { + stream.respond({ + 'content-type': 'text/plain; charset=utf-8', + 'x-custom-h2': 'hello', + ':status': 200 + }) + stream.end('Hello World') + }, 200) }) + server.listen(0) + await once(server, 'listening') + + const client = new Client(`https://localhost:${server.address().port}`, { + connect: { + rejectUnauthorized: false + }, + allowH2: true + }) + + t = tspl(t, { plan: 5 }) + after(() => server.close()) + + client.request({ + path: '/', + method: 'GET', + headers: { + 'x-my-header': 'foo' + } + }, async (err, response) => { + t.ifError(err) + t.strictEqual(response.headers['content-type'], 'text/plain; charset=utf-8') + t.strictEqual(response.headers['x-custom-h2'], 'hello') + t.strictEqual(response.statusCode, 200) + t.equal(await response.body.text(), 'Hello World') + }) + + await client.close() + await t.completed }) diff --git a/test/node-test/client-dispatch.js b/test/node-test/client-dispatch.js index f9ed888d44b..296e3b8d075 100644 --- a/test/node-test/client-dispatch.js +++ b/test/node-test/client-dispatch.js @@ -1051,7 +1051,7 @@ test('Issue#3065 - fix bad destroy handling', async (t) => { test('Issue#3065 - fix bad destroy handling (h2)', async (t) => { // Due to we handle the session, the request for h2 will fail on servername change - const p = tspl(t, { plan: 5 }) + const p = tspl(t, { plan: 4 }) const server = createSecureServer(pem) server.on('stream', (stream) => { stream.respond({ @@ -1105,8 +1105,7 @@ test('Issue#3065 - fix bad destroy handling (h2)', async (t) => { p.deepStrictEqual(dispatches, ['onConnect', 'onBodySent', 'onResponseStarted', 'onHeaders1', 'onData', 'onComplete']) }, onError (err) { - p.strictEqual(err.code, 'UND_ERR_INFO') - p.strictEqual(err.message, 'servername changed') + p.ifError(err) } }) From fd32a55ccefa59f6795a798f869a29f8fa1bb0c6 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sat, 12 Oct 2024 12:15:29 +0200 Subject: [PATCH 36/57] fix: PoolBase kClose and kDestroy should await and not return the Promise (#3716) (#3723) (cherry picked from commit cda5f94f87c0462f3be11fa9d5dad88550881c6c) Co-authored-by: Aras Abbasi --- lib/dispatcher/pool-base.js | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/dispatcher/pool-base.js b/lib/dispatcher/pool-base.js index ff3108a4da2..d0ba2c3c53a 100644 --- a/lib/dispatcher/pool-base.js +++ b/lib/dispatcher/pool-base.js @@ -113,9 +113,9 @@ class PoolBase extends DispatcherBase { async [kClose] () { if (this[kQueue].isEmpty()) { - return Promise.all(this[kClients].map(c => c.close())) + await Promise.all(this[kClients].map(c => c.close())) } else { - return new Promise((resolve) => { + await new Promise((resolve) => { this[kClosedResolve] = resolve }) } @@ -130,7 +130,7 @@ class PoolBase extends DispatcherBase { item.handler.onError(err) } - return Promise.all(this[kClients].map(c => c.destroy(err))) + await Promise.all(this[kClients].map(c => c.destroy(err))) } [kDispatch] (opts, handler) { From 541591142c67b17c54fa694b0d162a1da1246dec Mon Sep 17 00:00:00 2001 From: Aras Abbasi Date: Sun, 13 Oct 2024 11:40:58 +0200 Subject: [PATCH 37/57] fix: extract noop everywhere (#3559) (#3727) --- lib/dispatcher/client.js | 6 ++++-- lib/dispatcher/proxy-agent.js | 4 +++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/lib/dispatcher/client.js b/lib/dispatcher/client.js index 7e22aa000ba..3dc356618ba 100644 --- a/lib/dispatcher/client.js +++ b/lib/dispatcher/client.js @@ -63,6 +63,8 @@ let deprecatedInterceptorWarned = false const kClosedResolve = Symbol('kClosedResolve') +const noop = () => {} + function getPipelining (client) { return client[kPipelining] ?? client[kHTTPContext]?.defaultPipelining ?? 1 } @@ -442,7 +444,7 @@ async function connect (client) { }) if (client.destroyed) { - util.destroy(socket.on('error', () => {}), new ClientDestroyedError()) + util.destroy(socket.on('error', noop), new ClientDestroyedError()) return } @@ -453,7 +455,7 @@ async function connect (client) { ? await connectH2(client, socket) : await connectH1(client, socket) } catch (err) { - socket.destroy().on('error', () => {}) + socket.destroy().on('error', noop) throw err } diff --git a/lib/dispatcher/proxy-agent.js b/lib/dispatcher/proxy-agent.js index 226b67846da..c439d7cd555 100644 --- a/lib/dispatcher/proxy-agent.js +++ b/lib/dispatcher/proxy-agent.js @@ -23,6 +23,8 @@ function defaultFactory (origin, opts) { return new Pool(origin, opts) } +const noop = () => {} + class ProxyAgent extends DispatcherBase { constructor (opts) { super() @@ -81,7 +83,7 @@ class ProxyAgent extends DispatcherBase { servername: this[kProxyTls]?.servername || proxyHostname }) if (statusCode !== 200) { - socket.on('error', () => {}).destroy() + socket.on('error', noop).destroy() callback(new RequestAbortedError(`Proxy response (${statusCode}) !== 200 when HTTP Tunneling`)) } if (opts.protocol !== 'https:') { From 5344aa5a234bc6d1aa9ab99e24700cf8e1a031a0 Mon Sep 17 00:00:00 2001 From: Robert Nagy Date: Mon, 14 Oct 2024 14:16:33 +0200 Subject: [PATCH 38/57] 6.20.1 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 7170c4e8320..97535702e65 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "undici", - "version": "6.20.0", + "version": "6.20.1", "description": "An HTTP/1.1 client, written from scratch for Node.js", "homepage": "https://undici.nodejs.org", "bugs": { From e2e3fd294f2d5b24446726b61b9996f9c1ffe1e8 Mon Sep 17 00:00:00 2001 From: Jason Zhang Date: Fri, 18 Oct 2024 02:16:28 +1030 Subject: [PATCH 39/57] web: mark as uncloneable when possible (#3709) (#3744) * web: mark as uncloneable when possible This tells node that the marked instances from undici are not cloneable, so that attempts to cloning those throw `DataCloneError`. * test: add test cases for platform objects uncloneable * fix: move to webidl * fix: move it under webidl * fixup: rename it to markAsUncloneable * fix: mark more web instances uncloneable * fixup --------- Co-authored-by: Khafra (cherry picked from commit 1ab238207d84196479428494d52fc922833e8e5b) --- lib/web/cache/cache.js | 1 + lib/web/cache/cachestorage.js | 2 ++ lib/web/eventsource/eventsource.js | 2 ++ lib/web/fetch/formdata.js | 2 ++ lib/web/fetch/headers.js | 2 ++ lib/web/fetch/request.js | 1 + lib/web/fetch/response.js | 1 + lib/web/fetch/webidl.js | 2 ++ lib/web/websocket/events.js | 4 ++++ lib/web/websocket/websocket.js | 2 ++ test/node-platform-objects.js | 33 ++++++++++++++++++++++++++++++ types/webidl.d.ts | 6 ++++++ 12 files changed, 58 insertions(+) create mode 100644 test/node-platform-objects.js diff --git a/lib/web/cache/cache.js b/lib/web/cache/cache.js index 45c6fec3467..1c1a5911242 100644 --- a/lib/web/cache/cache.js +++ b/lib/web/cache/cache.js @@ -37,6 +37,7 @@ class Cache { webidl.illegalConstructor() } + webidl.util.markAsUncloneable(this) this.#relevantRequestResponseList = arguments[1] } diff --git a/lib/web/cache/cachestorage.js b/lib/web/cache/cachestorage.js index cc773b94b49..55dba352e99 100644 --- a/lib/web/cache/cachestorage.js +++ b/lib/web/cache/cachestorage.js @@ -16,6 +16,8 @@ class CacheStorage { if (arguments[0] !== kConstruct) { webidl.illegalConstructor() } + + webidl.util.markAsUncloneable(this) } async match (request, options = {}) { diff --git a/lib/web/eventsource/eventsource.js b/lib/web/eventsource/eventsource.js index 51634c1779f..5a488ffce27 100644 --- a/lib/web/eventsource/eventsource.js +++ b/lib/web/eventsource/eventsource.js @@ -105,6 +105,8 @@ class EventSource extends EventTarget { // 1. Let ev be a new EventSource object. super() + webidl.util.markAsUncloneable(this) + const prefix = 'EventSource constructor' webidl.argumentLengthCheck(arguments, 1, prefix) diff --git a/lib/web/fetch/formdata.js b/lib/web/fetch/formdata.js index 94a84b03ab3..544e4125519 100644 --- a/lib/web/fetch/formdata.js +++ b/lib/web/fetch/formdata.js @@ -14,6 +14,8 @@ const File = globalThis.File ?? NativeFile // https://xhr.spec.whatwg.org/#formdata class FormData { constructor (form) { + webidl.util.markAsUncloneable(this) + if (form !== undefined) { throw webidl.errors.conversionFailed({ prefix: 'FormData constructor', diff --git a/lib/web/fetch/headers.js b/lib/web/fetch/headers.js index 816aceacce4..a68daf4a5d4 100644 --- a/lib/web/fetch/headers.js +++ b/lib/web/fetch/headers.js @@ -359,6 +359,8 @@ class Headers { #headersList constructor (init = undefined) { + webidl.util.markAsUncloneable(this) + if (init === kConstruct) { return } diff --git a/lib/web/fetch/request.js b/lib/web/fetch/request.js index 542ea7fb28a..ee3ce488774 100644 --- a/lib/web/fetch/request.js +++ b/lib/web/fetch/request.js @@ -82,6 +82,7 @@ let patchMethodWarning = false class Request { // https://fetch.spec.whatwg.org/#dom-request constructor (input, init = {}) { + webidl.util.markAsUncloneable(this) if (input === kConstruct) { return } diff --git a/lib/web/fetch/response.js b/lib/web/fetch/response.js index 155dbadd1ad..3b6af35fbed 100644 --- a/lib/web/fetch/response.js +++ b/lib/web/fetch/response.js @@ -110,6 +110,7 @@ class Response { // https://fetch.spec.whatwg.org/#dom-response constructor (body = null, init = {}) { + webidl.util.markAsUncloneable(this) if (body === kConstruct) { return } diff --git a/lib/web/fetch/webidl.js b/lib/web/fetch/webidl.js index 13cafae6f1b..cd5cb14454c 100644 --- a/lib/web/fetch/webidl.js +++ b/lib/web/fetch/webidl.js @@ -1,6 +1,7 @@ 'use strict' const { types, inspect } = require('node:util') +const { markAsUncloneable } = require('node:worker_threads') const { toUSVString } = require('../../core/util') /** @type {import('../../../types/webidl').Webidl} */ @@ -86,6 +87,7 @@ webidl.util.Type = function (V) { } } +webidl.util.markAsUncloneable = markAsUncloneable || (() => {}) // https://webidl.spec.whatwg.org/#abstract-opdef-converttoint webidl.util.ConvertToInt = function (V, bitLength, signedness, opts) { let upperBound diff --git a/lib/web/websocket/events.js b/lib/web/websocket/events.js index 760b7297359..f899c21d42b 100644 --- a/lib/web/websocket/events.js +++ b/lib/web/websocket/events.js @@ -14,6 +14,7 @@ class MessageEvent extends Event { constructor (type, eventInitDict = {}) { if (type === kConstruct) { super(arguments[1], arguments[2]) + webidl.util.markAsUncloneable(this) return } @@ -26,6 +27,7 @@ class MessageEvent extends Event { super(type, eventInitDict) this.#eventInit = eventInitDict + webidl.util.markAsUncloneable(this) } get data () { @@ -112,6 +114,7 @@ class CloseEvent extends Event { super(type, eventInitDict) this.#eventInit = eventInitDict + webidl.util.markAsUncloneable(this) } get wasClean () { @@ -142,6 +145,7 @@ class ErrorEvent extends Event { webidl.argumentLengthCheck(arguments, 1, prefix) super(type, eventInitDict) + webidl.util.markAsUncloneable(this) type = webidl.converters.DOMString(type, prefix, 'type') eventInitDict = webidl.converters.ErrorEventInit(eventInitDict ?? {}) diff --git a/lib/web/websocket/websocket.js b/lib/web/websocket/websocket.js index 109d7be2e2f..e4053024756 100644 --- a/lib/web/websocket/websocket.js +++ b/lib/web/websocket/websocket.js @@ -51,6 +51,8 @@ class WebSocket extends EventTarget { constructor (url, protocols = []) { super() + webidl.util.markAsUncloneable(this) + const prefix = 'WebSocket constructor' webidl.argumentLengthCheck(arguments, 1, prefix) diff --git a/test/node-platform-objects.js b/test/node-platform-objects.js new file mode 100644 index 00000000000..19e2b01a507 --- /dev/null +++ b/test/node-platform-objects.js @@ -0,0 +1,33 @@ +'use strict' + +const { tspl } = require('@matteo.collina/tspl') +const { test } = require('node:test') +const { markAsUncloneable } = require('node:worker_threads') +const { Response, Request, FormData, Headers, ErrorEvent, MessageEvent, CloseEvent, EventSource, WebSocket } = require('..') +const { CacheStorage } = require('../lib/web/cache/cachestorage') +const { Cache } = require('../lib/web/cache/cache') +const { kConstruct } = require('../lib/core/symbols') + +test('unserializable web instances should be uncloneable if node exposes the api', (t) => { + if (markAsUncloneable !== undefined) { + t = tspl(t, { plan: 11 }) + const uncloneables = [ + { Uncloneable: Response, brand: 'Response' }, + { Uncloneable: Request, value: 'http://localhost', brand: 'Request' }, + { Uncloneable: FormData, brand: 'FormData' }, + { Uncloneable: MessageEvent, value: 'dummy event', brand: 'MessageEvent' }, + { Uncloneable: CloseEvent, value: 'dummy event', brand: 'CloseEvent' }, + { Uncloneable: ErrorEvent, value: 'dummy event', brand: 'ErrorEvent' }, + { Uncloneable: EventSource, value: 'http://localhost', brand: 'EventSource' }, + { Uncloneable: Headers, brand: 'Headers' }, + { Uncloneable: WebSocket, value: 'http://localhost', brand: 'WebSocket' }, + { Uncloneable: Cache, value: kConstruct, brand: 'Cache' }, + { Uncloneable: CacheStorage, value: kConstruct, brand: 'CacheStorage' } + ] + uncloneables.forEach((platformEntity) => { + t.throws(() => structuredClone(new platformEntity.Uncloneable(platformEntity.value)), + DOMException, + `Cloning ${platformEntity.brand} should throw DOMException`) + }) + } +}) diff --git a/types/webidl.d.ts b/types/webidl.d.ts index 8a23a85bf01..fd83b68af90 100644 --- a/types/webidl.d.ts +++ b/types/webidl.d.ts @@ -67,6 +67,12 @@ interface WebidlUtil { * Stringifies {@param V} */ Stringify (V: any): string + + /** + * Mark a value as uncloneable for Node.js. + * This is only effective in some newer Node.js versions. + */ + markAsUncloneable (V: any): void } interface WebidlConverters { From f21da440eacd9b7010e66cdfca358ec03806792c Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 23 Oct 2024 22:14:06 +0900 Subject: [PATCH 40/57] fetch: fix content-encoding order (#3343) (#3764) (cherry picked from commit e5c242d2762ff63e7834c34999b7489507a0432f) Co-authored-by: tsctx <91457664+tsctx@users.noreply.github.com> --- lib/web/fetch/index.js | 2 +- test/fetch/encoding.js | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/web/fetch/index.js b/lib/web/fetch/index.js index 3c7ca211c99..b74ccf9deb8 100644 --- a/lib/web/fetch/index.js +++ b/lib/web/fetch/index.js @@ -2137,7 +2137,7 @@ async function httpNetworkFetch ( // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding if (codings.length !== 0 && request.method !== 'HEAD' && request.method !== 'CONNECT' && !nullBodyStatus.includes(status) && !willFollow) { - for (let i = 0; i < codings.length; ++i) { + for (let i = codings.length - 1; i >= 0; --i) { const coding = codings[i] // https://www.rfc-editor.org/rfc/rfc9112.html#section-7.2 if (coding === 'x-gzip' || coding === 'gzip') { diff --git a/test/fetch/encoding.js b/test/fetch/encoding.js index 3986e03e505..93c98274b08 100644 --- a/test/fetch/encoding.js +++ b/test/fetch/encoding.js @@ -19,10 +19,10 @@ test('content-encoding header is case-iNsENsITIve', async (t) => { res.setHeader('Content-Encoding', contentCodings) res.setHeader('Content-Type', 'text/plain') - brotli.pipe(gzip).pipe(res) + gzip.pipe(brotli).pipe(res) - brotli.write(text) - brotli.end() + gzip.write(text) + gzip.end() }).listen(0) t.after(closeServerAsPromise(server)) @@ -45,10 +45,10 @@ test('response decompression according to content-encoding should be handled in res.setHeader('Content-Encoding', contentCodings) res.setHeader('Content-Type', 'text/plain') - gzip.pipe(deflate).pipe(res) + deflate.pipe(gzip).pipe(res) - gzip.write(text) - gzip.end() + deflate.write(text) + deflate.end() }).listen(0) t.after(closeServerAsPromise(server)) From 98d1b1b2bdbe6137d4b4156b5fbe33af44bdb293 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 11 Nov 2024 20:45:51 +0100 Subject: [PATCH 41/57] fix: handle undefined deref() of WeakRef(socket) (#3751) (#3822) * fix: handle undefined deref of weakref socket * exit early --------- Co-authored-by: Aras Abbasi (cherry picked from commit f98fbef4a990ec74c871ddbd3454472ee0355670) Co-authored-by: hochoy --- lib/core/connect.js | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/core/connect.js b/lib/core/connect.js index 8ab21fcd5fc..8cd8abccc54 100644 --- a/lib/core/connect.js +++ b/lib/core/connect.js @@ -220,6 +220,11 @@ const setupConnectTimeout = process.platform === 'win32' * @param {number} opts.port */ function onConnectTimeout (socket, opts) { + // The socket could be already garbage collected + if (socket == null) { + return + } + let message = 'Connect Timeout Error' if (Array.isArray(socket.autoSelectFamilyAttemptedAddresses)) { message += ` (attempted addresses: ${socket.autoSelectFamilyAttemptedAddresses.join(', ')},` From 11e31a4fdc92b09dc47f2cc5c30f5e975e6b6499 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 12 Nov 2024 20:20:12 +0100 Subject: [PATCH 42/57] fix: range end is zero-indexed (#3826) (#3827) * fix: range end is zero-indexed * tests: update tests for retry handler (cherry picked from commit 2b81fbc845cb0f7e8a621674a3c7aa5c59642daa) Co-authored-by: D Trombett --- lib/handler/retry-handler.js | 6 +++--- test/interceptors/retry.js | 5 +++-- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/lib/handler/retry-handler.js b/lib/handler/retry-handler.js index e00e82faf08..5d1ccf00538 100644 --- a/lib/handler/retry-handler.js +++ b/lib/handler/retry-handler.js @@ -229,7 +229,7 @@ class RetryHandler { return false } - const { start, size, end = size } = contentRange + const { start, size, end = size - 1 } = contentRange assert(this.start === start, 'content-range mismatch') assert(this.end == null || this.end === end, 'content-range mismatch') @@ -252,7 +252,7 @@ class RetryHandler { ) } - const { start, size, end = size } = range + const { start, size, end = size - 1 } = range assert( start != null && Number.isFinite(start), 'content-range mismatch' @@ -266,7 +266,7 @@ class RetryHandler { // We make our best to checkpoint the body for further range headers if (this.end == null) { const contentLength = headers['content-length'] - this.end = contentLength != null ? Number(contentLength) : null + this.end = contentLength != null ? Number(contentLength) - 1 : null } assert(Number.isFinite(this.start)) diff --git a/test/interceptors/retry.js b/test/interceptors/retry.js index 6e6f997cafa..b510ddba6c0 100644 --- a/test/interceptors/retry.js +++ b/test/interceptors/retry.js @@ -253,14 +253,15 @@ test('Should handle 206 partial content', async t => { const server = createServer((req, res) => { if (x === 0) { t.ok(true, 'pass') + res.setHeader('content-length', '6') res.setHeader('etag', 'asd') res.write('abc') setTimeout(() => { res.destroy() }, 1e2) } else if (x === 1) { - t.deepStrictEqual(req.headers.range, 'bytes=3-') - res.setHeader('content-range', 'bytes 3-6/6') + t.deepStrictEqual(req.headers.range, 'bytes=3-5') + res.setHeader('content-range', 'bytes 3-5/6') res.setHeader('etag', 'asd') res.statusCode = 206 res.end('def') From 61ec3531a64ffeec953a990c11735ff09455de4e Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Wed, 13 Nov 2024 14:36:28 +0100 Subject: [PATCH 43/57] Bumped v6.21.0 Signed-off-by: Matteo Collina --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 97535702e65..021466738c9 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "undici", - "version": "6.20.1", + "version": "6.21.0", "description": "An HTTP/1.1 client, written from scratch for Node.js", "homepage": "https://undici.nodejs.org", "bugs": { From 353ab63188af904a17030d96018d6193247d7d18 Mon Sep 17 00:00:00 2001 From: Geoff Goodman Date: Thu, 21 Nov 2024 02:48:48 -0500 Subject: [PATCH 44/57] fix(#3736): back-port 183f8e9 to v6.x (#3855) * fix(#3736): back-port 183f8e9 to v6.x * Backport #3769: fix http2 test --- lib/api/api-request.js | 2 +- test/client-request.js | 36 ++++++++++++++++++++++++++++++++++++ test/http2.js | 24 ++++++++---------------- 3 files changed, 45 insertions(+), 17 deletions(-) diff --git a/lib/api/api-request.js b/lib/api/api-request.js index ced5590d210..be17d628774 100644 --- a/lib/api/api-request.js +++ b/lib/api/api-request.js @@ -73,7 +73,7 @@ class RequestHandler extends AsyncResource { this.removeAbortListener = util.addAbortListener(this.signal, () => { this.reason = this.signal.reason ?? new RequestAbortedError() if (this.res) { - util.destroy(this.res, this.reason) + util.destroy(this.res.on('error', util.nop), this.reason) } else if (this.abort) { this.abort(this.reason) } diff --git a/test/client-request.js b/test/client-request.js index c67cecdb7f3..c5e90e840ee 100644 --- a/test/client-request.js +++ b/test/client-request.js @@ -1252,3 +1252,39 @@ test('request post body DataView', async (t) => { await t.completed }) + +test('#3736 - Aborted Response (without consuming body)', async (t) => { + const plan = tspl(t, { plan: 1 }) + + const controller = new AbortController() + const server = createServer((req, res) => { + setTimeout(() => { + res.writeHead(200, 'ok', { + 'content-type': 'text/plain' + }) + res.write('hello from server') + res.end() + }, 100) + }) + + server.listen(0) + + await EE.once(server, 'listening') + const client = new Client(`http://localhost:${server.address().port}`) + + after(server.close.bind(server)) + after(client.destroy.bind(client)) + + const { signal } = controller + const promise = client.request({ + path: '/', + method: 'GET', + signal + }) + + controller.abort() + + await plan.rejects(promise, { message: 'This operation was aborted' }) + + await plan.completed +}) diff --git a/test/http2.js b/test/http2.js index d6840a1bd15..3f7ab3deb24 100644 --- a/test/http2.js +++ b/test/http2.js @@ -334,23 +334,15 @@ test( after(() => server.close()) after(() => client.close()) - t = tspl(t, { plan: 2 }) + t = tspl(t, { plan: 1 }) - try { - await client.request({ - path: '/', - method: 'GET', - headers: { - 'x-my-header': 'foo' - } - }) - } catch (error) { - t.strictEqual( - error.message, - 'Client network socket disconnected before secure TLS connection was established' - ) - t.strictEqual(error.code, 'ECONNRESET') - } + await t.rejects(client.request({ + path: '/', + method: 'GET', + headers: { + 'x-my-header': 'foo' + } + })) } ) From a0220f14bfd2a404173eacc94aa1722829075283 Mon Sep 17 00:00:00 2001 From: Carlos Fuentes Date: Fri, 22 Nov 2024 11:59:49 +0100 Subject: [PATCH 45/57] fix(#3817): send servername for SNI on TLS (#3821) [backport] (#3864) * fix(#3817): send servername for SNI on TLS (#3821) * fix(#3817): send servername for SNI on TLS * fix: set host header to servername * refactor: attach regardless (cherry picked from commit b93a83447a99fecf41a09c1b6857ae855a2254c9) * feat: missing interceptor * fix: lint --- docs/docs/api/Dispatcher.md | 51 ++ index.js | 3 +- lib/interceptor/dns.js | 375 ++++++++ test/interceptors/dns.js | 1721 +++++++++++++++++++++++++++++++++++ types/interceptors.d.ts | 14 + 5 files changed, 2163 insertions(+), 1 deletion(-) create mode 100644 lib/interceptor/dns.js create mode 100644 test/interceptors/dns.js diff --git a/docs/docs/api/Dispatcher.md b/docs/docs/api/Dispatcher.md index 67819ecd525..ce14bfe1cd2 100644 --- a/docs/docs/api/Dispatcher.md +++ b/docs/docs/api/Dispatcher.md @@ -986,6 +986,57 @@ client.dispatch( ); ``` +##### `dns` + +The `dns` interceptor enables you to cache DNS lookups for a given duration, per origin. + +>It is well suited for scenarios where you want to cache DNS lookups to avoid the overhead of resolving the same domain multiple times + +**Options** +- `maxTTL` - The maximum time-to-live (in milliseconds) of the DNS cache. It should be a positive integer. Default: `10000`. + - Set `0` to disable TTL. +- `maxItems` - The maximum number of items to cache. It should be a positive integer. Default: `Infinity`. +- `dualStack` - Whether to resolve both IPv4 and IPv6 addresses. Default: `true`. + - It will also attempt a happy-eyeballs-like approach to connect to the available addresses in case of a connection failure. +- `affinity` - Whether to use IPv4 or IPv6 addresses. Default: `4`. + - It can be either `'4` or `6`. + - It will only take effect if `dualStack` is `false`. +- `lookup: (hostname: string, options: LookupOptions, callback: (err: NodeJS.ErrnoException | null, addresses: DNSInterceptorRecord[]) => void) => void` - Custom lookup function. Default: `dns.lookup`. + - For more info see [dns.lookup](https://nodejs.org/api/dns.html#dns_dns_lookup_hostname_options_callback). +- `pick: (origin: URL, records: DNSInterceptorRecords, affinity: 4 | 6) => DNSInterceptorRecord` - Custom pick function. Default: `RoundRobin`. + - The function should return a single record from the records array. + - By default a simplified version of Round Robin is used. + - The `records` property can be mutated to store the state of the balancing algorithm. + +> The `Dispatcher#options` also gets extended with the options `dns.affinity`, `dns.dualStack`, `dns.lookup` and `dns.pick` which can be used to configure the interceptor at a request-per-request basis. + + +**DNSInterceptorRecord** +It represents a DNS record. +- `family` - (`number`) The IP family of the address. It can be either `4` or `6`. +- `address` - (`string`) The IP address. + +**DNSInterceptorOriginRecords** +It represents a map of DNS IP addresses records for a single origin. +- `4.ips` - (`DNSInterceptorRecord[] | null`) The IPv4 addresses. +- `6.ips` - (`DNSInterceptorRecord[] | null`) The IPv6 addresses. + +**Example - Basic DNS Interceptor** + +```js +const { Client, interceptors } = require("undici"); +const { dns } = interceptors; + +const client = new Agent().compose([ + dns({ ...opts }) +]) + +const response = await client.request({ + origin: `http://localhost:3030`, + ...requestOpts +}) +``` + ##### `Response Error Interceptor` **Introduction** diff --git a/index.js b/index.js index 7a68d04abb3..0c37ed4853b 100644 --- a/index.js +++ b/index.js @@ -41,7 +41,8 @@ module.exports.createRedirectInterceptor = createRedirectInterceptor module.exports.interceptors = { redirect: require('./lib/interceptor/redirect'), retry: require('./lib/interceptor/retry'), - dump: require('./lib/interceptor/dump') + dump: require('./lib/interceptor/dump'), + dns: require('./lib/interceptor/dns') } module.exports.buildConnector = buildConnector diff --git a/lib/interceptor/dns.js b/lib/interceptor/dns.js new file mode 100644 index 00000000000..917732646e6 --- /dev/null +++ b/lib/interceptor/dns.js @@ -0,0 +1,375 @@ +'use strict' +const { isIP } = require('node:net') +const { lookup } = require('node:dns') +const DecoratorHandler = require('../handler/decorator-handler') +const { InvalidArgumentError, InformationalError } = require('../core/errors') +const maxInt = Math.pow(2, 31) - 1 + +class DNSInstance { + #maxTTL = 0 + #maxItems = 0 + #records = new Map() + dualStack = true + affinity = null + lookup = null + pick = null + + constructor (opts) { + this.#maxTTL = opts.maxTTL + this.#maxItems = opts.maxItems + this.dualStack = opts.dualStack + this.affinity = opts.affinity + this.lookup = opts.lookup ?? this.#defaultLookup + this.pick = opts.pick ?? this.#defaultPick + } + + get full () { + return this.#records.size === this.#maxItems + } + + runLookup (origin, opts, cb) { + const ips = this.#records.get(origin.hostname) + + // If full, we just return the origin + if (ips == null && this.full) { + cb(null, origin.origin) + return + } + + const newOpts = { + affinity: this.affinity, + dualStack: this.dualStack, + lookup: this.lookup, + pick: this.pick, + ...opts.dns, + maxTTL: this.#maxTTL, + maxItems: this.#maxItems + } + + // If no IPs we lookup + if (ips == null) { + this.lookup(origin, newOpts, (err, addresses) => { + if (err || addresses == null || addresses.length === 0) { + cb(err ?? new InformationalError('No DNS entries found')) + return + } + + this.setRecords(origin, addresses) + const records = this.#records.get(origin.hostname) + + const ip = this.pick( + origin, + records, + newOpts.affinity + ) + + let port + if (typeof ip.port === 'number') { + port = `:${ip.port}` + } else if (origin.port !== '') { + port = `:${origin.port}` + } else { + port = '' + } + + cb( + null, + `${origin.protocol}//${ + ip.family === 6 ? `[${ip.address}]` : ip.address + }${port}` + ) + }) + } else { + // If there's IPs we pick + const ip = this.pick( + origin, + ips, + newOpts.affinity + ) + + // If no IPs we lookup - deleting old records + if (ip == null) { + this.#records.delete(origin.hostname) + this.runLookup(origin, opts, cb) + return + } + + let port + if (typeof ip.port === 'number') { + port = `:${ip.port}` + } else if (origin.port !== '') { + port = `:${origin.port}` + } else { + port = '' + } + + cb( + null, + `${origin.protocol}//${ + ip.family === 6 ? `[${ip.address}]` : ip.address + }${port}` + ) + } + } + + #defaultLookup (origin, opts, cb) { + lookup( + origin.hostname, + { + all: true, + family: this.dualStack === false ? this.affinity : 0, + order: 'ipv4first' + }, + (err, addresses) => { + if (err) { + return cb(err) + } + + const results = new Map() + + for (const addr of addresses) { + // On linux we found duplicates, we attempt to remove them with + // the latest record + results.set(`${addr.address}:${addr.family}`, addr) + } + + cb(null, results.values()) + } + ) + } + + #defaultPick (origin, hostnameRecords, affinity) { + let ip = null + const { records, offset } = hostnameRecords + + let family + if (this.dualStack) { + if (affinity == null) { + // Balance between ip families + if (offset == null || offset === maxInt) { + hostnameRecords.offset = 0 + affinity = 4 + } else { + hostnameRecords.offset++ + affinity = (hostnameRecords.offset & 1) === 1 ? 6 : 4 + } + } + + if (records[affinity] != null && records[affinity].ips.length > 0) { + family = records[affinity] + } else { + family = records[affinity === 4 ? 6 : 4] + } + } else { + family = records[affinity] + } + + // If no IPs we return null + if (family == null || family.ips.length === 0) { + return ip + } + + if (family.offset == null || family.offset === maxInt) { + family.offset = 0 + } else { + family.offset++ + } + + const position = family.offset % family.ips.length + ip = family.ips[position] ?? null + + if (ip == null) { + return ip + } + + if (Date.now() - ip.timestamp > ip.ttl) { // record TTL is already in ms + // We delete expired records + // It is possible that they have different TTL, so we manage them individually + family.ips.splice(position, 1) + return this.pick(origin, hostnameRecords, affinity) + } + + return ip + } + + setRecords (origin, addresses) { + const timestamp = Date.now() + const records = { records: { 4: null, 6: null } } + for (const record of addresses) { + record.timestamp = timestamp + if (typeof record.ttl === 'number') { + // The record TTL is expected to be in ms + record.ttl = Math.min(record.ttl, this.#maxTTL) + } else { + record.ttl = this.#maxTTL + } + + const familyRecords = records.records[record.family] ?? { ips: [] } + + familyRecords.ips.push(record) + records.records[record.family] = familyRecords + } + + this.#records.set(origin.hostname, records) + } + + getHandler (meta, opts) { + return new DNSDispatchHandler(this, meta, opts) + } +} + +class DNSDispatchHandler extends DecoratorHandler { + #state = null + #opts = null + #dispatch = null + #handler = null + #origin = null + + constructor (state, { origin, handler, dispatch }, opts) { + super(handler) + this.#origin = origin + this.#handler = handler + this.#opts = { ...opts } + this.#state = state + this.#dispatch = dispatch + } + + onError (err) { + switch (err.code) { + case 'ETIMEDOUT': + case 'ECONNREFUSED': { + if (this.#state.dualStack) { + // We delete the record and retry + this.#state.runLookup(this.#origin, this.#opts, (err, newOrigin) => { + if (err) { + return this.#handler.onError(err) + } + + const dispatchOpts = { + ...this.#opts, + origin: newOrigin + } + + this.#dispatch(dispatchOpts, this) + }) + + // if dual-stack disabled, we error out + return + } + + this.#handler.onError(err) + return + } + case 'ENOTFOUND': + this.#state.deleteRecord(this.#origin) + // eslint-disable-next-line no-fallthrough + default: + this.#handler.onError(err) + break + } + } +} + +module.exports = interceptorOpts => { + if ( + interceptorOpts?.maxTTL != null && + (typeof interceptorOpts?.maxTTL !== 'number' || interceptorOpts?.maxTTL < 0) + ) { + throw new InvalidArgumentError('Invalid maxTTL. Must be a positive number') + } + + if ( + interceptorOpts?.maxItems != null && + (typeof interceptorOpts?.maxItems !== 'number' || + interceptorOpts?.maxItems < 1) + ) { + throw new InvalidArgumentError( + 'Invalid maxItems. Must be a positive number and greater than zero' + ) + } + + if ( + interceptorOpts?.affinity != null && + interceptorOpts?.affinity !== 4 && + interceptorOpts?.affinity !== 6 + ) { + throw new InvalidArgumentError('Invalid affinity. Must be either 4 or 6') + } + + if ( + interceptorOpts?.dualStack != null && + typeof interceptorOpts?.dualStack !== 'boolean' + ) { + throw new InvalidArgumentError('Invalid dualStack. Must be a boolean') + } + + if ( + interceptorOpts?.lookup != null && + typeof interceptorOpts?.lookup !== 'function' + ) { + throw new InvalidArgumentError('Invalid lookup. Must be a function') + } + + if ( + interceptorOpts?.pick != null && + typeof interceptorOpts?.pick !== 'function' + ) { + throw new InvalidArgumentError('Invalid pick. Must be a function') + } + + const dualStack = interceptorOpts?.dualStack ?? true + let affinity + if (dualStack) { + affinity = interceptorOpts?.affinity ?? null + } else { + affinity = interceptorOpts?.affinity ?? 4 + } + + const opts = { + maxTTL: interceptorOpts?.maxTTL ?? 10e3, // Expressed in ms + lookup: interceptorOpts?.lookup ?? null, + pick: interceptorOpts?.pick ?? null, + dualStack, + affinity, + maxItems: interceptorOpts?.maxItems ?? Infinity + } + + const instance = new DNSInstance(opts) + + return dispatch => { + return function dnsInterceptor (origDispatchOpts, handler) { + const origin = + origDispatchOpts.origin.constructor === URL + ? origDispatchOpts.origin + : new URL(origDispatchOpts.origin) + + if (isIP(origin.hostname) !== 0) { + return dispatch(origDispatchOpts, handler) + } + + instance.runLookup(origin, origDispatchOpts, (err, newOrigin) => { + if (err) { + return handler.onError(err) + } + + let dispatchOpts = null + dispatchOpts = { + ...origDispatchOpts, + servername: origin.hostname, // For SNI on TLS + origin: newOrigin, + headers: { + host: origin.hostname, + ...origDispatchOpts.headers + } + } + + dispatch( + dispatchOpts, + instance.getHandler({ origin, dispatch, handler }, origDispatchOpts) + ) + }) + + return true + } + } +} diff --git a/test/interceptors/dns.js b/test/interceptors/dns.js new file mode 100644 index 00000000000..6b4b30b13cc --- /dev/null +++ b/test/interceptors/dns.js @@ -0,0 +1,1721 @@ +'use strict' + +const { test, after } = require('node:test') +const { isIP } = require('node:net') +const { lookup } = require('node:dns') +const { createServer } = require('node:http') +const { createServer: createSecureServer } = require('node:https') +const { once } = require('node:events') +const { setTimeout: sleep } = require('node:timers/promises') + +const { tspl } = require('@matteo.collina/tspl') +const pem = require('https-pem') + +const { interceptors, Agent } = require('../..') +const { dns } = interceptors + +test('Should validate options', t => { + t = tspl(t, { plan: 10 }) + + t.throws(() => dns({ dualStack: 'true' }), { code: 'UND_ERR_INVALID_ARG' }) + t.throws(() => dns({ dualStack: 0 }), { code: 'UND_ERR_INVALID_ARG' }) + t.throws(() => dns({ affinity: '4' }), { code: 'UND_ERR_INVALID_ARG' }) + t.throws(() => dns({ affinity: 7 }), { code: 'UND_ERR_INVALID_ARG' }) + t.throws(() => dns({ maxTTL: -1 }), { code: 'UND_ERR_INVALID_ARG' }) + t.throws(() => dns({ maxTTL: '0' }), { code: 'UND_ERR_INVALID_ARG' }) + t.throws(() => dns({ maxItems: '1' }), { code: 'UND_ERR_INVALID_ARG' }) + t.throws(() => dns({ maxItems: -1 }), { code: 'UND_ERR_INVALID_ARG' }) + t.throws(() => dns({ lookup: {} }), { code: 'UND_ERR_INVALID_ARG' }) + t.throws(() => dns({ pick: [] }), { code: 'UND_ERR_INVALID_ARG' }) +}) + +test('Should automatically resolve IPs (dual stack)', async t => { + t = tspl(t, { plan: 8 }) + + const hostsnames = [] + const server = createServer() + const requestOptions = { + method: 'GET', + path: '/', + headers: { + 'content-type': 'application/json' + } + } + + server.on('request', (req, res) => { + res.writeHead(200, { 'content-type': 'text/plain' }) + res.end('hello world!') + }) + + server.listen(0) + + await once(server, 'listening') + + const client = new Agent().compose([ + dispatch => { + return (opts, handler) => { + const url = new URL(opts.origin) + + t.equal(hostsnames.includes(url.hostname), false) + + if (url.hostname[0] === '[') { + // [::1] -> ::1 + t.equal(isIP(url.hostname.slice(1, 4)), 6) + } else { + t.equal(isIP(url.hostname), 4) + } + + hostsnames.push(url.hostname) + + return dispatch(opts, handler) + } + }, + dns({ + lookup: (_origin, _opts, cb) => { + cb(null, [ + { + address: '::1', + family: 6 + }, + { + address: '127.0.0.1', + family: 4 + } + ]) + } + }) + ]) + + after(async () => { + await client.close() + server.close() + + await once(server, 'close') + }) + + const response = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response.statusCode, 200) + t.equal(await response.body.text(), 'hello world!') + + const response2 = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response2.statusCode, 200) + t.equal(await response2.body.text(), 'hello world!') +}) + +test('Should respect DNS origin hostname for SNI on TLS', async t => { + t = tspl(t, { plan: 12 }) + + const hostsnames = [] + const server = createSecureServer(pem) + const requestOptions = { + method: 'GET', + path: '/', + headers: { + 'content-type': 'application/json' + } + } + + server.on('request', (req, res) => { + t.equal(req.headers.host, 'localhost') + res.writeHead(200, { 'content-type': 'text/plain' }) + res.end('hello world!') + }) + + server.listen(0) + + await once(server, 'listening') + + const client = new Agent({ + connect: { + rejectUnauthorized: false + } + }).compose([ + dispatch => { + return (opts, handler) => { + const url = new URL(opts.origin) + + t.equal(hostsnames.includes(url.hostname), false) + t.equal(opts.servername, 'localhost') + + if (url.hostname[0] === '[') { + // [::1] -> ::1 + t.equal(isIP(url.hostname.slice(1, 4)), 6) + } else { + t.equal(isIP(url.hostname), 4) + } + + hostsnames.push(url.hostname) + + return dispatch(opts, handler) + } + }, + dns({ + lookup: (_origin, _opts, cb) => { + cb(null, [ + { + address: '::1', + family: 6 + }, + { + address: '127.0.0.1', + family: 4 + } + ]) + } + }) + ]) + + after(async () => { + await client.close() + server.close() + + await once(server, 'close') + }) + + const response = await client.request({ + ...requestOptions, + origin: `https://localhost:${server.address().port}` + }) + + t.equal(response.statusCode, 200) + t.equal(await response.body.text(), 'hello world!') + + const response2 = await client.request({ + ...requestOptions, + origin: `https://localhost:${server.address().port}` + }) + + t.equal(response2.statusCode, 200) + t.equal(await response2.body.text(), 'hello world!') +}) + +test('Should recover on network errors (dual stack - 4)', async t => { + t = tspl(t, { plan: 8 }) + + let counter = 0 + const server = createServer() + const requestOptions = { + method: 'GET', + path: '/', + headers: { + 'content-type': 'application/json' + } + } + + server.on('request', (req, res) => { + res.writeHead(200, { 'content-type': 'text/plain' }) + res.end('hello world!') + }) + + server.listen(0, '::1') + + await once(server, 'listening') + + const client = new Agent().compose([ + dispatch => { + return (opts, handler) => { + ++counter + const url = new URL(opts.origin) + + switch (counter) { + case 1: + t.equal(isIP(url.hostname), 4) + break + + case 2: + // [::1] -> ::1 + t.equal(isIP(url.hostname.slice(1, 4)), 6) + break + + case 3: + // [::1] -> ::1 + t.equal(isIP(url.hostname), 4) + break + + case 4: + // [::1] -> ::1 + t.equal(isIP(url.hostname.slice(1, 4)), 6) + break + default: + t.fail('should not reach this point') + } + + return dispatch(opts, handler) + } + }, + dns({ + lookup: (_origin, _opts, cb) => { + cb(null, [ + { + address: '::1', + family: 6 + }, + { + address: '127.0.0.1', + family: 4 + } + ]) + } + }) + ]) + + after(async () => { + await client.close() + server.close() + + await once(server, 'close') + }) + + const response = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response.statusCode, 200) + t.equal(await response.body.text(), 'hello world!') + + const response2 = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response2.statusCode, 200) + t.equal(await response2.body.text(), 'hello world!') +}) + +test('Should recover on network errors (dual stack - 6)', async t => { + t = tspl(t, { plan: 7 }) + + let counter = 0 + const server = createServer() + const requestOptions = { + method: 'GET', + path: '/', + headers: { + 'content-type': 'application/json' + } + } + + server.on('request', (req, res) => { + res.writeHead(200, { 'content-type': 'text/plain' }) + res.end('hello world!') + }) + + server.listen(0, '127.0.0.1') + + await once(server, 'listening') + + const client = new Agent().compose([ + dispatch => { + return (opts, handler) => { + ++counter + const url = new URL(opts.origin) + + switch (counter) { + case 1: + t.equal(isIP(url.hostname), 4) + break + + case 2: + // [::1] -> ::1 + t.equal(isIP(url.hostname.slice(1, 4)), 6) + break + + case 3: + // [::1] -> ::1 + t.equal(isIP(url.hostname), 4) + break + default: + t.fail('should not reach this point') + } + + return dispatch(opts, handler) + } + }, + dns({ + lookup: (_origin, _opts, cb) => { + cb(null, [ + { + address: '::1', + family: 6 + }, + { + address: '127.0.0.1', + family: 4 + } + ]) + } + }) + ]) + + after(async () => { + await client.close() + server.close() + + await once(server, 'close') + }) + + const response = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response.statusCode, 200) + t.equal(await response.body.text(), 'hello world!') + + const response2 = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response2.statusCode, 200) + t.equal(await response2.body.text(), 'hello world!') +}) + +test('Should throw when on dual-stack disabled (4)', async t => { + t = tspl(t, { plan: 2 }) + + let counter = 0 + const requestOptions = { + method: 'GET', + path: '/', + headers: { + 'content-type': 'application/json' + } + } + + const client = new Agent().compose([ + dispatch => { + return (opts, handler) => { + ++counter + const url = new URL(opts.origin) + + switch (counter) { + case 1: + t.equal(isIP(url.hostname), 4) + break + + default: + t.fail('should not reach this point') + } + + return dispatch(opts, handler) + } + }, + dns({ dualStack: false, affinity: 4 }) + ]) + + const promise = client.request({ + ...requestOptions, + origin: 'http://localhost:1234' + }) + + await t.rejects(promise, 'ECONNREFUSED') + + await t.complete +}) + +test('Should throw when on dual-stack disabled (6)', async t => { + t = tspl(t, { plan: 2 }) + + let counter = 0 + const requestOptions = { + method: 'GET', + path: '/', + headers: { + 'content-type': 'application/json' + } + } + + const client = new Agent().compose([ + dispatch => { + return (opts, handler) => { + ++counter + const url = new URL(opts.origin) + + switch (counter) { + case 1: + // [::1] -> ::1 + t.equal(isIP(url.hostname.slice(1, 4)), 6) + break + + default: + t.fail('should not reach this point') + } + + return dispatch(opts, handler) + } + }, + dns({ dualStack: false, affinity: 6 }) + ]) + + const promise = client.request({ + ...requestOptions, + origin: 'http://localhost:9999' + }) + + await t.rejects(promise, 'ECONNREFUSED') + + await t.complete +}) + +test('Should automatically resolve IPs (dual stack disabled - 4)', async t => { + t = tspl(t, { plan: 6 }) + + let counter = 0 + const server = createServer() + const requestOptions = { + method: 'GET', + path: '/', + headers: { + 'content-type': 'application/json' + } + } + + server.on('request', (req, res) => { + res.writeHead(200, { 'content-type': 'text/plain' }) + res.end('hello world!') + }) + + server.listen(0) + + await once(server, 'listening') + + const client = new Agent().compose([ + dispatch => { + return (opts, handler) => { + ++counter + const url = new URL(opts.origin) + + switch (counter) { + case 1: + t.equal(isIP(url.hostname), 4) + break + + case 2: + // [::1] -> ::1 + t.equal(isIP(url.hostname), 4) + break + default: + t.fail('should not reach this point') + } + + return dispatch(opts, handler) + } + }, + dns({ dualStack: false }) + ]) + + after(async () => { + await client.close() + server.close() + + await once(server, 'close') + }) + + const response = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response.statusCode, 200) + t.equal(await response.body.text(), 'hello world!') + + const response2 = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response2.statusCode, 200) + t.equal(await response2.body.text(), 'hello world!') +}) + +test('Should automatically resolve IPs (dual stack disabled - 6)', async t => { + t = tspl(t, { plan: 6 }) + + let counter = 0 + const server = createServer() + const requestOptions = { + method: 'GET', + path: '/', + headers: { + 'content-type': 'application/json' + } + } + + server.on('request', (req, res) => { + res.writeHead(200, { 'content-type': 'text/plain' }) + res.end('hello world!') + }) + + server.listen(0) + + await once(server, 'listening') + + const client = new Agent().compose([ + dispatch => { + return (opts, handler) => { + ++counter + const url = new URL(opts.origin) + + switch (counter) { + case 1: + // [::1] -> ::1 + t.equal(isIP(url.hostname.slice(1, 4)), 6) + break + + case 2: + // [::1] -> ::1 + t.equal(isIP(url.hostname.slice(1, 4)), 6) + break + default: + t.fail('should not reach this point') + } + + return dispatch(opts, handler) + } + }, + dns({ dualStack: false, affinity: 6 }) + ]) + + after(async () => { + await client.close() + server.close() + + await once(server, 'close') + }) + + const response = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response.statusCode, 200) + t.equal(await response.body.text(), 'hello world!') + + const response2 = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response2.statusCode, 200) + t.equal(await response2.body.text(), 'hello world!') +}) + +test('Should we handle TTL (4)', async t => { + t = tspl(t, { plan: 10 }) + + let counter = 0 + let lookupCounter = 0 + const server = createServer() + const requestOptions = { + method: 'GET', + path: '/', + headers: { + 'content-type': 'application/json' + } + } + + server.on('request', (req, res) => { + res.writeHead(200, { 'content-type': 'text/plain' }) + res.end('hello world!') + }) + + server.listen(0, '127.0.0.1') + + await once(server, 'listening') + + const client = new Agent().compose([ + dispatch => { + return (opts, handler) => { + ++counter + const url = new URL(opts.origin) + + switch (counter) { + case 1: + t.equal(isIP(url.hostname), 4) + break + + case 2: + t.equal(isIP(url.hostname), 4) + break + + case 3: + t.equal(isIP(url.hostname), 4) + break + default: + t.fail('should not reach this point') + } + + return dispatch(opts, handler) + } + }, + dns({ + dualStack: false, + affinity: 4, + maxTTL: 400, + lookup: (origin, opts, cb) => { + ++lookupCounter + lookup( + origin.hostname, + { all: true, family: opts.affinity }, + cb + ) + } + }) + ]) + + after(async () => { + await client.close() + server.close() + + await once(server, 'close') + }) + + const response = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response.statusCode, 200) + t.equal(await response.body.text(), 'hello world!') + + await sleep(200) + + const response2 = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response2.statusCode, 200) + t.equal(await response2.body.text(), 'hello world!') + + await sleep(300) + + const response3 = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response3.statusCode, 200) + t.equal(await response3.body.text(), 'hello world!') + + t.equal(lookupCounter, 2) +}) + +test('Should we handle TTL (6)', async t => { + t = tspl(t, { plan: 10 }) + + let counter = 0 + let lookupCounter = 0 + const server = createServer() + const requestOptions = { + method: 'GET', + path: '/', + headers: { + 'content-type': 'application/json' + } + } + + server.on('request', (req, res) => { + res.writeHead(200, { 'content-type': 'text/plain' }) + res.end('hello world!') + }) + + server.listen(0, '::1') + + await once(server, 'listening') + + const client = new Agent().compose([ + dispatch => { + return (opts, handler) => { + ++counter + const url = new URL(opts.origin) + + switch (counter) { + case 1: + // [::1] -> ::1 + t.equal(isIP(url.hostname.slice(1, 4)), 6) + break + + case 2: + // [::1] -> ::1 + t.equal(isIP(url.hostname.slice(1, 4)), 6) + break + + case 3: + // [::1] -> ::1 + t.equal(isIP(url.hostname.slice(1, 4)), 6) + break + default: + t.fail('should not reach this point') + } + + return dispatch(opts, handler) + } + }, + dns({ + dualStack: false, + affinity: 6, + maxTTL: 400, + lookup: (origin, opts, cb) => { + ++lookupCounter + lookup( + origin.hostname, + { all: true, family: opts.affinity }, + cb + ) + } + }) + ]) + + after(async () => { + await client.close() + server.close() + + await once(server, 'close') + }) + + const response = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response.statusCode, 200) + t.equal(await response.body.text(), 'hello world!') + + await sleep(200) + + const response2 = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response2.statusCode, 200) + t.equal(await response2.body.text(), 'hello world!') + + await sleep(300) + + const response3 = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response3.statusCode, 200) + t.equal(await response3.body.text(), 'hello world!') + t.equal(lookupCounter, 2) +}) + +test('Should set lowest TTL between resolved and option maxTTL', async t => { + t = tspl(t, { plan: 9 }) + + let lookupCounter = 0 + const server = createServer() + const requestOptions = { + method: 'GET', + path: '/', + headers: { + 'content-type': 'application/json' + } + } + + server.on('request', (req, res) => { + res.writeHead(200, { 'content-type': 'text/plain' }) + res.end('hello world!') + }) + + server.listen(0, '127.0.0.1') + + await once(server, 'listening') + + const client = new Agent().compose( + dns({ + dualStack: false, + affinity: 4, + maxTTL: 200, + lookup: (origin, opts, cb) => { + ++lookupCounter + cb(null, [ + { + address: '127.0.0.1', + family: 4, + ttl: lookupCounter === 1 ? 50 : 500 + } + ]) + } + }) + ) + + after(async () => { + await client.close() + server.close() + + await once(server, 'close') + }) + + const response = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response.statusCode, 200) + t.equal(await response.body.text(), 'hello world!') + + await sleep(100) + + // 100ms: lookup since ttl = Math.min(50, maxTTL: 200) + const response2 = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response2.statusCode, 200) + t.equal(await response2.body.text(), 'hello world!') + + await sleep(100) + + // 100ms: cached since ttl = Math.min(500, maxTTL: 200) + const response3 = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response3.statusCode, 200) + t.equal(await response3.body.text(), 'hello world!') + + await sleep(150) + + // 250ms: lookup since ttl = Math.min(500, maxTTL: 200) + const response4 = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response4.statusCode, 200) + t.equal(await response4.body.text(), 'hello world!') + + t.equal(lookupCounter, 3) +}) + +test('Should use all dns entries (dual stack)', async t => { + t = tspl(t, { plan: 16 }) + + let counter = 0 + let lookupCounter = 0 + const server = createServer() + const requestOptions = { + method: 'GET', + path: '/', + headers: { + 'content-type': 'application/json' + } + } + + server.on('request', (req, res) => { + res.writeHead(200, { 'content-type': 'text/plain' }) + res.end('hello world!') + }) + + server.listen(0) + + await once(server, 'listening') + + const client = new Agent().compose([ + dispatch => { + return (opts, handler) => { + ++counter + const url = new URL(opts.origin) + switch (counter) { + case 1: + t.equal(url.hostname, '1.1.1.1') + break + + case 2: + t.equal(url.hostname, '[::1]') + break + + case 3: + t.equal(url.hostname, '2.2.2.2') + break + + case 4: + t.equal(url.hostname, '[::2]') + break + + case 5: + t.equal(url.hostname, '1.1.1.1') + break + default: + t.fail('should not reach this point') + } + + url.hostname = '127.0.0.1' + opts.origin = url.toString() + return dispatch(opts, handler) + } + }, + dns({ + lookup (origin, opts, cb) { + lookupCounter++ + cb(null, [ + { address: '::1', family: 6 }, + { address: '::2', family: 6 }, + { address: '1.1.1.1', family: 4 }, + { address: '2.2.2.2', family: 4 } + ]) + } + }) + ]) + + after(async () => { + await client.close() + server.close() + + await once(server, 'close') + }) + + for (let i = 0; i < 5; i++) { + const response = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response.statusCode, 200) + t.equal(await response.body.text(), 'hello world!') + } + + t.equal(lookupCounter, 1) +}) + +test('Should use all dns entries (dual stack disabled - 4)', async t => { + t = tspl(t, { plan: 10 }) + + let counter = 0 + let lookupCounter = 0 + const server = createServer() + const requestOptions = { + method: 'GET', + path: '/', + headers: { + 'content-type': 'application/json' + } + } + + server.on('request', (req, res) => { + res.writeHead(200, { 'content-type': 'text/plain' }) + res.end('hello world!') + }) + + server.listen(0) + + await once(server, 'listening') + + const client = new Agent().compose([ + dispatch => { + return (opts, handler) => { + ++counter + const url = new URL(opts.origin) + + switch (counter) { + case 1: + t.equal(url.hostname, '1.1.1.1') + break + + case 2: + t.equal(url.hostname, '2.2.2.2') + break + + case 3: + t.equal(url.hostname, '1.1.1.1') + break + default: + t.fail('should not reach this point') + } + + url.hostname = '127.0.0.1' + opts.origin = url.toString() + return dispatch(opts, handler) + } + }, + dns({ + dualStack: false, + lookup (origin, opts, cb) { + lookupCounter++ + cb(null, [ + { address: '1.1.1.1', family: 4 }, + { address: '2.2.2.2', family: 4 } + ]) + } + }) + ]) + + after(async () => { + await client.close() + server.close() + + await once(server, 'close') + }) + + const response1 = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response1.statusCode, 200) + t.equal(await response1.body.text(), 'hello world!') + + const response2 = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response2.statusCode, 200) + t.equal(await response2.body.text(), 'hello world!') + + const response3 = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response3.statusCode, 200) + t.equal(await response3.body.text(), 'hello world!') + + t.equal(lookupCounter, 1) +}) + +test('Should use all dns entries (dual stack disabled - 6)', async t => { + t = tspl(t, { plan: 10 }) + + let counter = 0 + let lookupCounter = 0 + const server = createServer() + const requestOptions = { + method: 'GET', + path: '/', + headers: { + 'content-type': 'application/json' + } + } + + server.on('request', (req, res) => { + res.writeHead(200, { 'content-type': 'text/plain' }) + res.end('hello world!') + }) + + server.listen(0) + + await once(server, 'listening') + + const client = new Agent().compose([ + dispatch => { + return (opts, handler) => { + ++counter + const url = new URL(opts.origin) + + switch (counter) { + case 1: + t.equal(url.hostname, '[::1]') + break + + case 2: + t.equal(url.hostname, '[::2]') + break + + case 3: + t.equal(url.hostname, '[::1]') + break + default: + t.fail('should not reach this point') + } + + url.hostname = '127.0.0.1' + opts.origin = url.toString() + return dispatch(opts, handler) + } + }, + dns({ + dualStack: false, + affinity: 6, + lookup (origin, opts, cb) { + lookupCounter++ + cb(null, [ + { address: '::1', family: 6 }, + { address: '::2', family: 6 } + ]) + } + }) + ]) + + after(async () => { + await client.close() + server.close() + + await once(server, 'close') + }) + + const response1 = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response1.statusCode, 200) + t.equal(await response1.body.text(), 'hello world!') + + const response2 = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response2.statusCode, 200) + t.equal(await response2.body.text(), 'hello world!') + + const response3 = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response3.statusCode, 200) + t.equal(await response3.body.text(), 'hello world!') + + t.equal(lookupCounter, 1) +}) + +test('Should handle single family resolved (dual stack)', async t => { + t = tspl(t, { plan: 7 }) + + let counter = 0 + let lookupCounter = 0 + const server = createServer() + const requestOptions = { + method: 'GET', + path: '/', + headers: { + 'content-type': 'application/json' + } + } + + server.on('request', (req, res) => { + res.writeHead(200, { 'content-type': 'text/plain' }) + res.end('hello world!') + }) + + server.listen(0) + + await once(server, 'listening') + + const client = new Agent().compose([ + dispatch => { + return (opts, handler) => { + ++counter + const url = new URL(opts.origin) + + switch (counter) { + case 1: + t.equal(isIP(url.hostname), 4) + break + + case 2: + // [::1] -> ::1 + t.equal(isIP(url.hostname.slice(1, 4)), 6) + break + default: + t.fail('should not reach this point') + } + + return dispatch(opts, handler) + } + }, + dns({ + lookup (origin, opts, cb) { + lookupCounter++ + if (lookupCounter === 1) { + cb(null, [ + { address: '127.0.0.1', family: 4, ttl: 50 } + ]) + } else { + cb(null, [ + { address: '::1', family: 6, ttl: 50 } + ]) + } + } + }) + ]) + + after(async () => { + await client.close() + server.close() + + await once(server, 'close') + }) + + const response = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response.statusCode, 200) + t.equal(await response.body.text(), 'hello world!') + + await sleep(100) + + const response2 = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response2.statusCode, 200) + t.equal(await response2.body.text(), 'hello world!') + + t.equal(lookupCounter, 2) +}) + +test('Should prefer affinity (dual stack - 4)', async t => { + t = tspl(t, { plan: 10 }) + + let counter = 0 + let lookupCounter = 0 + const server = createServer() + const requestOptions = { + method: 'GET', + path: '/', + headers: { + 'content-type': 'application/json' + } + } + + server.on('request', (req, res) => { + res.writeHead(200, { 'content-type': 'text/plain' }) + res.end('hello world!') + }) + + server.listen(0) + + await once(server, 'listening') + + const client = new Agent().compose([ + dispatch => { + return (opts, handler) => { + ++counter + const url = new URL(opts.origin) + + switch (counter) { + case 1: + t.equal(url.hostname, '1.1.1.1') + break + + case 2: + t.equal(url.hostname, '2.2.2.2') + break + + case 3: + t.equal(url.hostname, '1.1.1.1') + break + default: + t.fail('should not reach this point') + } + + url.hostname = '127.0.0.1' + opts.origin = url.toString() + return dispatch(opts, handler) + } + }, + dns({ + affinity: 4, + lookup (origin, opts, cb) { + lookupCounter++ + cb(null, [ + { address: '1.1.1.1', family: 4 }, + { address: '2.2.2.2', family: 4 }, + { address: '::1', family: 6 }, + { address: '::2', family: 6 } + ]) + } + }) + ]) + + after(async () => { + await client.close() + server.close() + + await once(server, 'close') + }) + + const response = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response.statusCode, 200) + t.equal(await response.body.text(), 'hello world!') + + await sleep(100) + + const response2 = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response2.statusCode, 200) + t.equal(await response2.body.text(), 'hello world!') + + const response3 = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response3.statusCode, 200) + t.equal(await response3.body.text(), 'hello world!') + + t.equal(lookupCounter, 1) +}) + +test('Should prefer affinity (dual stack - 6)', async t => { + t = tspl(t, { plan: 10 }) + + let counter = 0 + let lookupCounter = 0 + const server = createServer() + const requestOptions = { + method: 'GET', + path: '/', + headers: { + 'content-type': 'application/json' + } + } + + server.on('request', (req, res) => { + res.writeHead(200, { 'content-type': 'text/plain' }) + res.end('hello world!') + }) + + server.listen(0) + + await once(server, 'listening') + + const client = new Agent().compose([ + dispatch => { + return (opts, handler) => { + ++counter + const url = new URL(opts.origin) + + switch (counter) { + case 1: + t.equal(url.hostname, '[::1]') + break + + case 2: + t.equal(url.hostname, '[::2]') + break + + case 3: + t.equal(url.hostname, '[::1]') + break + default: + t.fail('should not reach this point') + } + + url.hostname = '127.0.0.1' + opts.origin = url.toString() + return dispatch(opts, handler) + } + }, + dns({ + affinity: 6, + lookup (origin, opts, cb) { + lookupCounter++ + cb(null, [ + { address: '1.1.1.1', family: 4 }, + { address: '2.2.2.2', family: 4 }, + { address: '::1', family: 6 }, + { address: '::2', family: 6 } + ]) + } + }) + ]) + + after(async () => { + await client.close() + server.close() + + await once(server, 'close') + }) + + const response = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response.statusCode, 200) + t.equal(await response.body.text(), 'hello world!') + + await sleep(100) + + const response2 = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response2.statusCode, 200) + t.equal(await response2.body.text(), 'hello world!') + + const response3 = await client.request({ + ...requestOptions, + origin: `http://localhost:${server.address().port}` + }) + + t.equal(response3.statusCode, 200) + t.equal(await response3.body.text(), 'hello world!') + + t.equal(lookupCounter, 1) +}) + +test('Should use resolved ports (4)', async t => { + t = tspl(t, { plan: 5 }) + + let lookupCounter = 0 + const server1 = createServer() + const server2 = createServer() + const requestOptions = { + method: 'GET', + path: '/', + headers: { + 'content-type': 'application/json' + } + } + + server1.on('request', (req, res) => { + res.writeHead(200, { 'content-type': 'text/plain' }) + res.end('hello world!') + }) + + server1.listen(0) + + server2.on('request', (req, res) => { + res.writeHead(200, { 'content-type': 'text/plain' }) + res.end('hello world! (x2)') + }) + server2.listen(0) + + await Promise.all([once(server1, 'listening'), once(server2, 'listening')]) + + const client = new Agent().compose([ + dns({ + lookup (origin, opts, cb) { + lookupCounter++ + cb(null, [ + { address: '127.0.0.1', family: 4, port: server1.address().port }, + { address: '127.0.0.1', family: 4, port: server2.address().port } + ]) + } + }) + ]) + + after(async () => { + await client.close() + server1.close() + server2.close() + + await Promise.all([once(server1, 'close'), once(server2, 'close')]) + }) + + const response = await client.request({ + ...requestOptions, + origin: 'http://localhost' + }) + + t.equal(response.statusCode, 200) + t.equal(await response.body.text(), 'hello world!') + + const response2 = await client.request({ + ...requestOptions, + origin: 'http://localhost' + }) + + t.equal(response2.statusCode, 200) + t.equal(await response2.body.text(), 'hello world! (x2)') + + t.equal(lookupCounter, 1) +}) + +test('Should use resolved ports (6)', async t => { + t = tspl(t, { plan: 5 }) + + let lookupCounter = 0 + const server1 = createServer() + const server2 = createServer() + const requestOptions = { + method: 'GET', + path: '/', + headers: { + 'content-type': 'application/json' + } + } + + server1.on('request', (req, res) => { + res.writeHead(200, { 'content-type': 'text/plain' }) + res.end('hello world!') + }) + + server1.listen(0, '::1') + + server2.on('request', (req, res) => { + res.writeHead(200, { 'content-type': 'text/plain' }) + res.end('hello world! (x2)') + }) + server2.listen(0, '::1') + + await Promise.all([once(server1, 'listening'), once(server2, 'listening')]) + + const client = new Agent().compose([ + dns({ + lookup (origin, opts, cb) { + lookupCounter++ + cb(null, [ + { address: '::1', family: 6, port: server1.address().port }, + { address: '::1', family: 6, port: server2.address().port } + ]) + } + }) + ]) + + after(async () => { + await client.close() + server1.close() + server2.close() + + await Promise.all([once(server1, 'close'), once(server2, 'close')]) + }) + + const response = await client.request({ + ...requestOptions, + origin: 'http://localhost' + }) + + t.equal(response.statusCode, 200) + t.equal(await response.body.text(), 'hello world!') + + const response2 = await client.request({ + ...requestOptions, + origin: 'http://localhost' + }) + + t.equal(response2.statusCode, 200) + t.equal(await response2.body.text(), 'hello world! (x2)') + + t.equal(lookupCounter, 1) +}) + +test('Should handle max cached items', async t => { + t = tspl(t, { plan: 9 }) + + let counter = 0 + const server1 = createServer() + const server2 = createServer() + const requestOptions = { + method: 'GET', + path: '/', + headers: { + 'content-type': 'application/json' + } + } + + server1.on('request', (req, res) => { + res.writeHead(200, { 'content-type': 'text/plain' }) + res.end('hello world!') + }) + + server1.listen(0) + + server2.on('request', (req, res) => { + res.writeHead(200, { 'content-type': 'text/plain' }) + res.end('hello world! (x2)') + }) + server2.listen(0) + + await Promise.all([once(server1, 'listening'), once(server2, 'listening')]) + + const client = new Agent().compose([ + dispatch => { + return (opts, handler) => { + ++counter + const url = new URL(opts.origin) + + switch (counter) { + case 1: + t.equal(isIP(url.hostname), 4) + break + + case 2: + // [::1] -> ::1 + t.equal(isIP(url.hostname.slice(1, 4)), 6) + break + + case 3: + t.equal(url.hostname, 'developer.mozilla.org') + // Rewrite origin to avoid reaching internet + opts.origin = `http://127.0.0.1:${server2.address().port}` + break + default: + t.fails('should not reach this point') + } + + return dispatch(opts, handler) + } + }, + dns({ + maxItems: 1, + lookup: (_origin, _opts, cb) => { + cb(null, [ + { + address: '::1', + family: 6 + }, + { + address: '127.0.0.1', + family: 4 + } + ]) + } + }) + ]) + + after(async () => { + await client.close() + server1.close() + server2.close() + + await Promise.all([once(server1, 'close'), once(server2, 'close')]) + }) + + const response = await client.request({ + ...requestOptions, + origin: `http://localhost:${server1.address().port}` + }) + + t.equal(response.statusCode, 200) + t.equal(await response.body.text(), 'hello world!') + + const response2 = await client.request({ + ...requestOptions, + origin: `http://localhost:${server1.address().port}` + }) + + t.equal(response2.statusCode, 200) + t.equal(await response2.body.text(), 'hello world!') + + const response3 = await client.request({ + ...requestOptions, + origin: 'https://developer.mozilla.org' + }) + + t.equal(response3.statusCode, 200) + t.equal(await response3.body.text(), 'hello world! (x2)') +}) diff --git a/types/interceptors.d.ts b/types/interceptors.d.ts index 24166b61f4f..65e9397554e 100644 --- a/types/interceptors.d.ts +++ b/types/interceptors.d.ts @@ -1,3 +1,5 @@ +import { LookupOptions } from 'node:dns' + import Dispatcher from "./dispatcher"; import RetryHandler from "./retry-handler"; @@ -9,6 +11,18 @@ declare namespace Interceptors { export type RedirectInterceptorOpts = { maxRedirections?: number } export type ResponseErrorInterceptorOpts = { throwOnError: boolean } + // DNS interceptor + export type DNSInterceptorRecord = { address: string, ttl: number, family: 4 | 6 } + export type DNSInterceptorOriginRecords = { 4: { ips: DNSInterceptorRecord[] } | null, 6: { ips: DNSInterceptorRecord[] } | null } + export type DNSInterceptorOpts = { + maxTTL?: number + maxItems?: number + lookup?: (hostname: string, options: LookupOptions, callback: (err: NodeJS.ErrnoException | null, addresses: DNSInterceptorRecord[]) => void) => void + pick?: (origin: URL, records: DNSInterceptorOriginRecords, affinity: 4 | 6) => DNSInterceptorRecord + dualStack?: boolean + affinity?: 4 | 6 + } + export function createRedirectInterceptor(opts: RedirectInterceptorOpts): Dispatcher.DispatcherComposeInterceptor export function dump(opts?: DumpInterceptorOpts): Dispatcher.DispatcherComposeInterceptor export function retry(opts?: RetryInterceptorOpts): Dispatcher.DispatcherComposeInterceptor From ee6176cd2e09853c868bf5bc1a34bf0500963e4d Mon Sep 17 00:00:00 2001 From: Carlos Fuentes Date: Fri, 22 Nov 2024 12:00:13 +0100 Subject: [PATCH 46/57] fix: sending formdata bodies with http2 (#3863) [backport] (#3866) * fix: sending formdata bodies with http2 (#3863) (cherry picked from commit e49b5751ddfe726ebc6498f07a4af86de319b691) * fix: bad merge --------- Co-authored-by: Khafra --- lib/dispatcher/client-h2.js | 15 ++++++++++- test/http2.js | 53 ++++++++++++++++++++++++++++++++++++- 2 files changed, 66 insertions(+), 2 deletions(-) diff --git a/lib/dispatcher/client-h2.js b/lib/dispatcher/client-h2.js index 0448fa00736..00084738a1c 100644 --- a/lib/dispatcher/client-h2.js +++ b/lib/dispatcher/client-h2.js @@ -31,6 +31,8 @@ const { const kOpenStreams = Symbol('open streams') +let extractBody + // Experimental let h2ExperimentalWarned = false @@ -260,7 +262,8 @@ function shouldSendContentLength (method) { function writeH2 (client, request) { const session = client[kHTTP2Session] - const { body, method, path, host, upgrade, expectContinue, signal, headers: reqHeaders } = request + const { method, path, host, upgrade, expectContinue, signal, headers: reqHeaders } = request + let { body } = request if (upgrade) { util.errorRequest(client, request, new Error('Upgrade not supported for H2')) @@ -381,6 +384,16 @@ function writeH2 (client, request) { let contentLength = util.bodyLength(body) + if (util.isFormDataLike(body)) { + extractBody ??= require('../web/fetch/body.js').extractBody + + const [bodyStream, contentType] = extractBody(body) + headers['content-type'] = contentType + + body = bodyStream.stream + contentLength = bodyStream.length + } + if (contentLength == null) { contentLength = request.contentLength } diff --git a/test/http2.js b/test/http2.js index 3f7ab3deb24..7d130e670a9 100644 --- a/test/http2.js +++ b/test/http2.js @@ -10,7 +10,7 @@ const { Writable, pipeline, PassThrough, Readable } = require('node:stream') const pem = require('https-pem') -const { Client, Agent } = require('..') +const { Client, Agent, FormData } = require('..') const isGreaterThanv20 = process.versions.node.split('.').map(Number)[0] >= 20 @@ -1442,3 +1442,54 @@ test('#3671 - Graceful close', async (t) => { await t.completed }) + +test('#3803 - sending FormData bodies works', async (t) => { + const assert = tspl(t, { plan: 4 }) + + const server = createSecureServer(pem).listen(0) + server.on('stream', async (stream, headers) => { + const contentLength = Number(headers['content-length']) + + assert.ok(!Number.isNaN(contentLength)) + assert.ok(headers['content-type']?.startsWith('multipart/form-data; boundary=')) + + stream.respond({ ':status': 200 }) + + const fd = await new Response(stream, { + headers: { + 'content-type': headers['content-type'] + } + }).formData() + + assert.deepEqual(fd.get('a'), 'b') + assert.deepEqual(fd.get('c').name, 'e.fgh') + + stream.end() + }) + + await once(server, 'listening') + + const client = new Client(`https://localhost:${server.address().port}`, { + connect: { + rejectUnauthorized: false + }, + allowH2: true + }) + + t.after(async () => { + server.close() + await client.close() + }) + + const fd = new FormData() + fd.set('a', 'b') + fd.set('c', new Blob(['d']), 'e.fgh') + + await client.request({ + path: '/', + method: 'POST', + body: fd + }) + + await assert.completed +}) From be8cd0afa0cf8207d8849026f2ee6fdc6dc9dcec Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 24 Nov 2024 19:17:38 +0100 Subject: [PATCH 47/57] [Backport v6.x] fix: Fixed the issue that there is no running request when http2 goaway (#3877) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: 沈鸿飞 --- lib/dispatcher/client-h2.js | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/lib/dispatcher/client-h2.js b/lib/dispatcher/client-h2.js index 00084738a1c..4a52effb1f3 100644 --- a/lib/dispatcher/client-h2.js +++ b/lib/dispatcher/client-h2.js @@ -242,11 +242,12 @@ function onHTTP2GoAway (code) { util.destroy(this[kSocket], err) // Fail head of pipeline. - const request = client[kQueue][client[kRunningIdx]] - client[kQueue][client[kRunningIdx]++] = null - util.errorRequest(client, request, err) - - client[kPendingIdx] = client[kRunningIdx] + if (client[kRunningIdx] < client[kQueue].length) { + const request = client[kQueue][client[kRunningIdx]] + client[kQueue][client[kRunningIdx]++] = null + util.errorRequest(client, request, err) + client[kPendingIdx] = client[kRunningIdx] + } assert(client[kRunning] === 0) From 2414bc9f7d651f830902af00238e1b11d9a389dc Mon Sep 17 00:00:00 2001 From: Carlos Fuentes Date: Mon, 25 Nov 2024 18:17:29 +0100 Subject: [PATCH 48/57] Update return type of RetryCallback (#3851) (#3876) * Update return type * add type tests (cherry picked from commit a1fb2cc17b4055898bdc7251a903d7e7d62a4387) Co-authored-by: Qayyuum Harun <69181532+mqayyuum@users.noreply.github.com> --- test/types/retry-handler.test-d.ts | 49 ++++++++++++++++++++++++++++++ types/retry-handler.d.ts | 2 +- 2 files changed, 50 insertions(+), 1 deletion(-) create mode 100644 test/types/retry-handler.test-d.ts diff --git a/test/types/retry-handler.test-d.ts b/test/types/retry-handler.test-d.ts new file mode 100644 index 00000000000..8dac930fa98 --- /dev/null +++ b/test/types/retry-handler.test-d.ts @@ -0,0 +1,49 @@ +import { expectType, expectAssignable, expectNotAssignable } from 'tsd' +import { Dispatcher, RetryHandler } from '../..' + +// Test the basic structure of RetryCallback +expectType((err, context, callback) => { + expectType(err) + expectType<{ + state: RetryHandler.RetryState; + opts: Dispatcher.DispatchOptions & { + retryOptions?: RetryHandler.RetryOptions; + }; + }>(context) + expectType(callback) +}) + +// Test that RetryCallback returns void +const testCallback = (() => {}) as RetryHandler.RetryCallback +const testContext = { + state: {} as RetryHandler.RetryState, + opts: {} as Dispatcher.DispatchOptions & { + retryOptions?: RetryHandler.RetryOptions; + } +} + +expectType(testCallback(new Error(), testContext, () => {})) + +// Test that the function is assignable to RetryCallback +expectAssignable(testCallback) + +// Test that an incorrectly typed function is not assignable to RetryCallback +expectNotAssignable((() => {}) as ( + err: string, + context: number, + callback: boolean +) => void) + +// Test the nested types +const contextTest: Parameters[1] = { + state: {} as RetryHandler.RetryState, + opts: { + method: 'GET', + path: 'some-path', + retryOptions: {} as RetryHandler.RetryOptions + } +} +expectType(contextTest.state) +expectType< + Dispatcher.DispatchOptions & { retryOptions?: RetryHandler.RetryOptions } +>(contextTest.opts) diff --git a/types/retry-handler.d.ts b/types/retry-handler.d.ts index e44b207c221..6cb34c12d6d 100644 --- a/types/retry-handler.d.ts +++ b/types/retry-handler.d.ts @@ -32,7 +32,7 @@ declare namespace RetryHandler { }; }, callback: OnRetryCallback - ) => number | null; + ) => void export interface RetryOptions { /** From c3acc6050b781b827d80c86cbbab34f14458d385 Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Thu, 16 Jan 2025 10:13:25 +0100 Subject: [PATCH 49/57] Merge commit from fork * Use crypto.randomInt() Signed-off-by: Matteo Collina * fixup Signed-off-by: Matteo Collina --------- Signed-off-by: Matteo Collina --- lib/web/fetch/body.js | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/lib/web/fetch/body.js b/lib/web/fetch/body.js index 464e7b50e5c..aa6e2973532 100644 --- a/lib/web/fetch/body.js +++ b/lib/web/fetch/body.js @@ -20,6 +20,14 @@ const { isErrored, isDisturbed } = require('node:stream') const { isArrayBuffer } = require('node:util/types') const { serializeAMimeType } = require('./data-url') const { multipartFormDataParser } = require('./formdata-parser') +let random + +try { + const crypto = require('node:crypto') + random = (max) => crypto.randomInt(0, max) +} catch { + random = (max) => Math.floor(Math.random(max)) +} const textEncoder = new TextEncoder() function noop () {} @@ -113,7 +121,7 @@ function extractBody (object, keepalive = false) { // Set source to a copy of the bytes held by object. source = new Uint8Array(object.buffer.slice(object.byteOffset, object.byteOffset + object.byteLength)) } else if (util.isFormDataLike(object)) { - const boundary = `----formdata-undici-0${`${Math.floor(Math.random() * 1e11)}`.padStart(11, '0')}` + const boundary = `----formdata-undici-0${`${random(1e11)}`.padStart(11, '0')}` const prefix = `--${boundary}\r\nContent-Disposition: form-data` /*! formdata-polyfill. MIT License. Jimmy Wärting */ From e260e7bb173abe3399dabd61338ca4a71fcf8825 Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Thu, 16 Jan 2025 10:19:29 +0100 Subject: [PATCH 50/57] Bumped v6.21.1 Signed-off-by: Matteo Collina --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 021466738c9..a465327b1bc 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "undici", - "version": "6.21.0", + "version": "6.21.1", "description": "An HTTP/1.1 client, written from scratch for Node.js", "homepage": "https://undici.nodejs.org", "bugs": { From a0e76c73a8ecb913beea7e2210e40d12b7c5cf69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Szymon=20=C5=81=C4=85giewka?= Date: Sun, 16 Feb 2025 10:03:54 +0100 Subject: [PATCH 51/57] fix(types): add missing DNS interceptor (#4024) --- test/types/interceptor.test-d.ts | 4 ++++ types/interceptors.d.ts | 1 + 2 files changed, 5 insertions(+) diff --git a/test/types/interceptor.test-d.ts b/test/types/interceptor.test-d.ts index ba242bfcadb..3a09fc290a4 100644 --- a/test/types/interceptor.test-d.ts +++ b/test/types/interceptor.test-d.ts @@ -1,5 +1,9 @@ import {expectAssignable} from "tsd"; import Undici from "../.."; import Dispatcher from "../../types/dispatcher"; +import Interceptors from "../../types/interceptors"; expectAssignable(Undici.createRedirectInterceptor({ maxRedirections: 3 })) + +expectAssignable(new Dispatcher().compose([Interceptors.dns({maxTTL: 2_000})])); + diff --git a/types/interceptors.d.ts b/types/interceptors.d.ts index 65e9397554e..9f6af128d5e 100644 --- a/types/interceptors.d.ts +++ b/types/interceptors.d.ts @@ -28,4 +28,5 @@ declare namespace Interceptors { export function retry(opts?: RetryInterceptorOpts): Dispatcher.DispatcherComposeInterceptor export function redirect(opts?: RedirectInterceptorOpts): Dispatcher.DispatcherComposeInterceptor export function responseError(opts?: ResponseErrorInterceptorOpts): Dispatcher.DispatcherComposeInterceptor + export function dns (opts?: DNSInterceptorOpts): Dispatcher.DispatcherComposeInterceptor } From 133387138c9158d3b6e9493833986c34837035ad Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Wed, 12 Mar 2025 17:07:10 +0100 Subject: [PATCH 52/57] Removed clients with unrecoverable errors from the Pool (#4088) --- lib/dispatcher/pool.js | 14 ++ test/pool-connection-error-memory-leak.js | 158 ++++++++++++++++ test/tls-cert-leak.js | 209 ++++++++++++++++++++++ 3 files changed, 381 insertions(+) create mode 100644 test/pool-connection-error-memory-leak.js create mode 100644 test/tls-cert-leak.js diff --git a/lib/dispatcher/pool.js b/lib/dispatcher/pool.js index 2d84cd96488..0b8a2da6da4 100644 --- a/lib/dispatcher/pool.js +++ b/lib/dispatcher/pool.js @@ -73,6 +73,20 @@ class Pool extends PoolBase { ? { ...options.interceptors } : undefined this[kFactory] = factory + + this.on('connectionError', (origin, targets, error) => { + // If a connection error occurs, we remove the client from the pool, + // and emit a connectionError event. They will not be re-used. + // Fixes https://github.com/nodejs/undici/issues/3895 + for (const target of targets) { + // Do not use kRemoveClient here, as it will close the client, + // but the client cannot be closed in this state. + const idx = this[kClients].indexOf(target) + if (idx !== -1) { + this[kClients].splice(idx, 1) + } + } + }) } [kGetDispatcher] () { diff --git a/test/pool-connection-error-memory-leak.js b/test/pool-connection-error-memory-leak.js new file mode 100644 index 00000000000..fc9c7deb4cf --- /dev/null +++ b/test/pool-connection-error-memory-leak.js @@ -0,0 +1,158 @@ +'use strict' + +const { test } = require('node:test') +const assert = require('node:assert') +const { Pool } = require('..') +const { createServer } = require('node:http') +const { kClients } = require('../lib/dispatcher/pool-base') + +// This test verifies that clients are properly removed from the pool when they encounter connection errors, +// which is the fix implemented for issue #3895 (memory leak with connection errors) +test('Pool client count does not grow on repeated connection errors', async (t) => { + // Setup a pool pointing to a non-existent server + const pool = new Pool('http://localhost:1', { + connections: 10, + connectTimeout: 100, // Short timeout to speed up the test + bodyTimeout: 100, + headersTimeout: 100 + }) + + try { + const clientCounts = [] + + // Track initial client count + clientCounts.push(pool[kClients].length) + + // Make several requests that will fail with connection errors + const requests = 5 + for (let i = 0; i < requests; i++) { + try { + await pool.request({ + path: `/${i}`, + method: 'GET' + }) + assert.fail('Request should have failed with a connection error') + } catch (err) { + // We expect connection errors, but the error might be wrapped + assert.ok( + err.code === 'ECONNREFUSED' || + err.cause?.code === 'ECONNREFUSED' || + err.code === 'UND_ERR_CONNECT', + `Expected connection error but got: ${err.message} (${err.code})` + ) + } + + // Track client count after each request + clientCounts.push(pool[kClients].length) + + // Small delay to allow for client cleanup + await new Promise(resolve => setTimeout(resolve, 10)) + } + + // The key test: verify that client count doesn't increase monotonically, + // which would indicate the memory leak that was fixed + const maxCount = Math.max(...clientCounts) + assert.ok( + clientCounts[clientCounts.length - 1] <= maxCount, + `Client count should not increase continuously. Counts: ${clientCounts.join(', ')}` + ) + + // Ensure the last two counts are similar (stabilized) + const lastCount = clientCounts[clientCounts.length - 1] + const secondLastCount = clientCounts[clientCounts.length - 2] + + assert.ok( + Math.abs(lastCount - secondLastCount) <= 1, + `Client count should stabilize. Last counts: ${secondLastCount}, ${lastCount}` + ) + + // Additional verification: make many more requests to check for significant growth + const moreRequests = 10 + const startCount = pool[kClients].length + + for (let i = 0; i < moreRequests; i++) { + try { + await pool.request({ + path: `/more-${i}`, + method: 'GET' + }) + } catch (err) { + // Expected error + } + + // Small delay to allow for client cleanup + await new Promise(resolve => setTimeout(resolve, 10)) + } + + const endCount = pool[kClients].length + + // The maximum tolerable growth - some growth may occur due to timing issues, + // but it should be limited and not proportional to the number of requests + const maxGrowth = 3 + + assert.ok( + endCount - startCount <= maxGrowth, + `Client count should not grow significantly after many failed requests. Start: ${startCount}, End: ${endCount}` + ) + } finally { + await pool.close() + } +}) + +// This test specifically verifies the fix in pool-base.js for connectionError event +test('Pool clients are removed on connectionError event', async (t) => { + // Create a server we'll use to track connection events + const server = createServer((req, res) => { + res.writeHead(200, { 'Content-Type': 'text/plain' }) + res.end('ok') + }) + + await new Promise(resolve => server.listen(0, resolve)) + const port = server.address().port + + const pool = new Pool(`http://localhost:${port}`, { + connections: 3 // Small pool to make testing easier + }) + + try { + // Make an initial successful request to create a client + await pool.request({ + path: '/', + method: 'GET' + }) + + // Save the initial number of clients + const initialCount = pool[kClients].length + assert.ok(initialCount > 0, 'Should have at least one client after a successful request') + + // Manually trigger a connectionError on all clients + for (const client of [...pool[kClients]]) { + client.emit('connectionError', 'origin', [client], new Error('Simulated connection error')) + } + + // Allow some time for the event to be processed + await new Promise(resolve => setTimeout(resolve, 50)) + + // After the fix, all clients should be removed when they emit a connectionError + assert.strictEqual( + pool[kClients].length, + 0, + 'All clients should be removed from pool after connectionError events' + ) + + // Make another request to verify the pool can create new clients + await pool.request({ + path: '/after-error', + method: 'GET' + }) + + // Verify new clients were created + assert.ok( + pool[kClients].length > 0, + 'Pool should create new clients after previous ones were removed' + ) + } finally { + await pool.close() + await new Promise(resolve => server.close(resolve)) + } +}) diff --git a/test/tls-cert-leak.js b/test/tls-cert-leak.js new file mode 100644 index 00000000000..2a579e7a0a6 --- /dev/null +++ b/test/tls-cert-leak.js @@ -0,0 +1,209 @@ +'use strict' + +const { test } = require('node:test') +const assert = require('node:assert') +const { tspl } = require('@matteo.collina/tspl') +const { fetch } = require('..') +const https = require('node:https') +const fs = require('node:fs') +const path = require('node:path') +const { closeServerAsPromise } = require('./utils/node-http') + +const hasGC = typeof global.gc !== 'undefined' + +// This test verifies that there is no memory leak when handling TLS certificate errors. +// It simulates the error by using a server with a self-signed certificate. +test('no memory leak with TLS certificate errors', { timeout: 20000 }, async (t) => { + if (!hasGC) { + throw new Error('gc is not available. Run with \'--expose-gc\'.') + } + + const { ok } = tspl(t, { plan: 1 }) + + // Create HTTPS server with self-signed certificate + const serverOptions = { + key: fs.readFileSync(path.join(__dirname, 'fixtures', 'key.pem')), + cert: fs.readFileSync(path.join(__dirname, 'fixtures', 'cert.pem')) + } + + // Create a server that always responds with a simple message + const server = https.createServer(serverOptions, (req, res) => { + res.writeHead(200) + res.end('test response') + }) + + // Start server on a random port + await new Promise(resolve => server.listen(0, resolve)) + const serverUrl = `https://localhost:${server.address().port}` + + t.after(closeServerAsPromise(server)) + + // Function to make a request that will trigger a certificate error + async function makeRequest (i) { + try { + // The request will fail with CERT_SIGNATURE_FAILURE or similar + // because we're using a self-signed certificate and not telling + // Node.js to accept it + const res = await fetch(`${serverUrl}/request-${i}`, { + signal: AbortSignal.timeout(2000) // Short timeout to prevent hanging + }) + const text = await res.text() + return { status: res.status, text } + } catch (e) { + // In real code, without the fix, this would leak memory + if (e?.cause?.code === 'CERT_SIGNATURE_FAILURE' || + e?.cause?.code === 'DEPTH_ZERO_SELF_SIGNED_CERT' || + e?.cause?.code === 'ERR_TLS_CERT_ALTNAME_INVALID') { + return { status: 524, text: 'Certificate Error' } + } + // Return for any other error to avoid test interruption + return { status: 500, text: e.message } + } + } + + // Counter for completed requests + let complete = 0 + const requestCount = 400 + + // Track memory usage + const measurements = [] + let baselineMemory = 0 + + // Process a batch of requests + async function processBatch (start, batchSize) { + const promises = [] + const end = Math.min(start + batchSize, requestCount) + + for (let i = start; i < end; i++) { + promises.push(makeRequest(i)) + } + + await Promise.all(promises) + complete += promises.length + + // Measure memory after each batch + if (complete % 50 === 0 || complete === end) { + // Run GC multiple times to get more stable readings + global.gc() + await new Promise(resolve => setTimeout(resolve, 50)) + global.gc() + + const memUsage = process.memoryUsage() + + // Establish baseline after first batch + if (measurements.length === 0) { + baselineMemory = memUsage.heapUsed + } + + measurements.push({ + complete, + heapUsed: memUsage.heapUsed + }) + + console.log(`Completed ${complete}/${requestCount}: Heap: ${Math.round(memUsage.heapUsed / 1024 / 1024)}MB`) + + // Check memory trend after we have enough data + if (measurements.length >= 4) { + const hasLeak = checkMemoryTrend() + if (hasLeak) { + return true // Indicates a leak was detected + } + } + } + + return false // No leak detected + } + + // Main test logic + async function runTest () { + const batchSize = 50 + + for (let i = 0; i < requestCount; i += batchSize) { + const leakDetected = await processBatch(i, batchSize) + if (leakDetected) { + // If a leak is detected, fail the test + assert.fail('Memory leak detected: heap usage is consistently increasing at a significant rate') + return + } + + // Check if we have sufficient measurements or have done 350 requests + if (measurements.length >= 7 || complete >= 350) { + break + } + } + + // Final check + const finalCheckResult = finalMemoryCheck() + if (finalCheckResult) { + assert.fail(`Memory leak detected: ${finalCheckResult}`) + } else { + ok(true, 'Memory usage has stabilized') + } + } + + // Check if memory usage has a concerning trend + function checkMemoryTrend () { + // Calculate memory growth between each measurement + const growthRates = [] + for (let i = 1; i < measurements.length; i++) { + const prev = measurements[i - 1].heapUsed + const current = measurements[i].heapUsed + growthRates.push((current - prev) / prev) + } + + // Calculate growth from baseline + const totalGrowthFromBaseline = (measurements[measurements.length - 1].heapUsed - baselineMemory) / baselineMemory + + // Calculate average growth rate + const avgGrowthRate = growthRates.reduce((sum, rate) => sum + rate, 0) / growthRates.length + + console.log(`Growth from baseline: ${(totalGrowthFromBaseline * 100).toFixed(2)}%`) + console.log(`Average growth rate: ${(avgGrowthRate * 100).toFixed(2)}%`) + console.log(`Growth rates: ${growthRates.map(r => (r * 100).toFixed(2) + '%').join(', ')}`) + + // Only flag as leak if all conditions are met: + // 1. Consistent growth (majority of measurements show growth) + // 2. Average growth rate is significant (>2%) + // 3. Total growth from baseline is significant (>20%) + + // Count how many positive growth rates we have + const positiveGrowthRates = growthRates.filter(rate => rate > 0.01).length + + return ( + positiveGrowthRates >= Math.ceil(growthRates.length * 0.75) && // 75% of measurements show growth >1% + avgGrowthRate > 0.02 && // Average growth >2% + totalGrowthFromBaseline > 0.2 // Total growth >20% + ) + } + + // Final memory check with adjusted requirements + function finalMemoryCheck () { + if (measurements.length < 4) return false + + // Calculate growth from baseline to the last measurement + const totalGrowthFromBaseline = (measurements[measurements.length - 1].heapUsed - baselineMemory) / baselineMemory + console.log(`Final growth from baseline: ${(totalGrowthFromBaseline * 100).toFixed(2)}%`) + + // Calculate final slope over the last 150 requests + const lastMeasurements = measurements.slice(-3) + const finalSlope = (lastMeasurements[2].heapUsed - lastMeasurements[0].heapUsed) / + (lastMeasurements[2].complete - lastMeasurements[0].complete) + + console.log(`Final memory slope: ${finalSlope.toFixed(2)} bytes per request`) + + // Only consider it a leak if: + // 1. Total growth is very significant (>25%) + if (totalGrowthFromBaseline > 0.25) { + return `Excessive memory growth of ${(totalGrowthFromBaseline * 100).toFixed(2)}%` + } + + // 2. Memory is still growing rapidly at the end (>2000 bytes per request) + if (finalSlope > 2000) { + return `Memory still growing rapidly at ${finalSlope.toFixed(2)} bytes per request` + } + + return false + } + + await runTest() +}) From 4e07dda835ffb2ff7a1b1323dd94c61b8feaa3c5 Mon Sep 17 00:00:00 2001 From: Carlos Fuentes Date: Sun, 9 Feb 2025 17:30:46 +0100 Subject: [PATCH 53/57] test: fix windows wpt (#4050) * test: fixup * chore: narrow to windows * test: another batch * test: one moreq * chore: narrow to wpt * test: one moreq * Revert "chore: narrow to wpt" This reverts commit cb56079a2434b0c7f2840131838de9a7d1d70ec2. * Revert "chore: narrow to windows" This reverts commit 02d9c315d6b93a9f8b1971e73725d9c87d4f82d4. --- test/wpt/runner/runner.mjs | 4 ++-- test/wpt/server/server.mjs | 2 +- test/wpt/start-cacheStorage.mjs | 2 +- test/wpt/start-eventsource.mjs | 2 +- test/wpt/start-fetch.mjs | 2 +- test/wpt/start-mimesniff.mjs | 2 +- test/wpt/start-websockets.mjs | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/test/wpt/runner/runner.mjs b/test/wpt/runner/runner.mjs index a800cfff9bf..4012c71eb69 100644 --- a/test/wpt/runner/runner.mjs +++ b/test/wpt/runner/runner.mjs @@ -7,7 +7,7 @@ import { colors, handlePipes, normalizeName, parseMeta, resolveStatusPath } from const alwaysExit0 = process.env.GITHUB_WORKFLOW === 'Daily WPT report' -const basePath = fileURLToPath(join(import.meta.url, '../..')) +const basePath = join(fileURLToPath(import.meta.url), '../..') const testPath = join(basePath, '../fixtures/wpt') const statusPath = join(basePath, 'status') @@ -135,7 +135,7 @@ export class WPTRunner extends EventEmitter { } async run () { - const workerPath = fileURLToPath(join(import.meta.url, '../worker.mjs')) + const workerPath = join(fileURLToPath(import.meta.url), '../worker.mjs') /** @type {Set} */ const activeWorkers = new Set() let finishedFiles = 1 diff --git a/test/wpt/server/server.mjs b/test/wpt/server/server.mjs index cc6d4317048..a7f7cb2c295 100644 --- a/test/wpt/server/server.mjs +++ b/test/wpt/server/server.mjs @@ -10,7 +10,7 @@ import { route as redirectRoute } from './routes/redirect.mjs' import { Pipeline } from './util.mjs' import { symbols } from './constants.mjs' -const tests = fileURLToPath(join(import.meta.url, '../../../fixtures/wpt')) +const tests = join(fileURLToPath(import.meta.url), '../../../fixtures/wpt') // https://web-platform-tests.org/tools/wptserve/docs/stash.html class Stash extends Map { diff --git a/test/wpt/start-cacheStorage.mjs b/test/wpt/start-cacheStorage.mjs index fe818dddc14..bb5d2c20060 100644 --- a/test/wpt/start-cacheStorage.mjs +++ b/test/wpt/start-cacheStorage.mjs @@ -4,7 +4,7 @@ import { fileURLToPath } from 'url' import { fork } from 'child_process' import { on } from 'events' -const serverPath = fileURLToPath(join(import.meta.url, '../server/server.mjs')) +const serverPath = join(fileURLToPath(import.meta.url), '../server/server.mjs') const child = fork(serverPath, [], { stdio: ['pipe', 'pipe', 'pipe', 'ipc'] diff --git a/test/wpt/start-eventsource.mjs b/test/wpt/start-eventsource.mjs index f2a92562108..213aa4e3d8a 100644 --- a/test/wpt/start-eventsource.mjs +++ b/test/wpt/start-eventsource.mjs @@ -4,7 +4,7 @@ import { fileURLToPath } from 'url' import { fork } from 'child_process' import { on } from 'events' -const serverPath = fileURLToPath(join(import.meta.url, '../server/server.mjs')) +const serverPath = join(fileURLToPath(import.meta.url), '../server/server.mjs') const child = fork(serverPath, [], { stdio: ['pipe', 'pipe', 'pipe', 'ipc'] diff --git a/test/wpt/start-fetch.mjs b/test/wpt/start-fetch.mjs index 0e2168e610c..520dd2a8876 100644 --- a/test/wpt/start-fetch.mjs +++ b/test/wpt/start-fetch.mjs @@ -6,7 +6,7 @@ import { on } from 'events' const { WPT_REPORT } = process.env -const serverPath = fileURLToPath(join(import.meta.url, '../server/server.mjs')) +const serverPath = join(fileURLToPath(import.meta.url), '../server/server.mjs') const child = fork(serverPath, [], { stdio: ['pipe', 'pipe', 'pipe', 'ipc'] diff --git a/test/wpt/start-mimesniff.mjs b/test/wpt/start-mimesniff.mjs index ebe4e783e3b..d28782fe926 100644 --- a/test/wpt/start-mimesniff.mjs +++ b/test/wpt/start-mimesniff.mjs @@ -6,7 +6,7 @@ import { on } from 'events' const { WPT_REPORT } = process.env -const serverPath = fileURLToPath(join(import.meta.url, '../server/server.mjs')) +const serverPath = join(fileURLToPath(import.meta.url), '../server/server.mjs') const child = fork(serverPath, [], { stdio: ['pipe', 'pipe', 'pipe', 'ipc'] diff --git a/test/wpt/start-websockets.mjs b/test/wpt/start-websockets.mjs index ec42af0eeb1..2a909dde3c2 100644 --- a/test/wpt/start-websockets.mjs +++ b/test/wpt/start-websockets.mjs @@ -22,7 +22,7 @@ if (process.env.CI) { // process.exit(0) } -const serverPath = fileURLToPath(join(import.meta.url, '../server/websocket.mjs')) +const serverPath = join(fileURLToPath(import.meta.url), '../server/websocket.mjs') const child = fork(serverPath, [], { stdio: ['pipe', 'pipe', 'pipe', 'ipc'] From de1e4b8a39d102bb34155c3fdec3f18806b93d9c Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Thu, 13 Mar 2025 11:23:02 +0100 Subject: [PATCH 54/57] [v6.x] fix wpts on windows (#4093) * [v6.x] fix wpts on windows Signed-off-by: Matteo Collina * fixup Signed-off-by: Matteo Collina --------- Signed-off-by: Matteo Collina --- test/wpt/start-FileAPI.mjs | 3 +-- test/wpt/start-cacheStorage.mjs | 3 +-- test/wpt/start-eventsource.mjs | 3 +-- test/wpt/start-fetch.mjs | 3 +-- test/wpt/start-mimesniff.mjs | 3 +-- test/wpt/start-websockets.mjs | 3 +-- 6 files changed, 6 insertions(+), 12 deletions(-) diff --git a/test/wpt/start-FileAPI.mjs b/test/wpt/start-FileAPI.mjs index 012acd28a7a..2f5219361bd 100644 --- a/test/wpt/start-FileAPI.mjs +++ b/test/wpt/start-FileAPI.mjs @@ -1,10 +1,9 @@ import { WPTRunner } from './runner/runner.mjs' -import { join } from 'path' import { fileURLToPath } from 'url' import { fork } from 'child_process' import { on } from 'events' -const serverPath = fileURLToPath(join(import.meta.url, '../server/server.mjs')) +const serverPath = fileURLToPath(new URL('./server/server.mjs', import.meta.url)) const child = fork(serverPath, [], { stdio: ['pipe', 'pipe', 'pipe', 'ipc'] diff --git a/test/wpt/start-cacheStorage.mjs b/test/wpt/start-cacheStorage.mjs index bb5d2c20060..9b742846287 100644 --- a/test/wpt/start-cacheStorage.mjs +++ b/test/wpt/start-cacheStorage.mjs @@ -1,10 +1,9 @@ import { WPTRunner } from './runner/runner.mjs' -import { join } from 'path' import { fileURLToPath } from 'url' import { fork } from 'child_process' import { on } from 'events' -const serverPath = join(fileURLToPath(import.meta.url), '../server/server.mjs') +const serverPath = fileURLToPath(new URL('./server/server.mjs', import.meta.url)) const child = fork(serverPath, [], { stdio: ['pipe', 'pipe', 'pipe', 'ipc'] diff --git a/test/wpt/start-eventsource.mjs b/test/wpt/start-eventsource.mjs index 213aa4e3d8a..8b96693a060 100644 --- a/test/wpt/start-eventsource.mjs +++ b/test/wpt/start-eventsource.mjs @@ -1,10 +1,9 @@ import { WPTRunner } from './runner/runner.mjs' -import { join } from 'path' import { fileURLToPath } from 'url' import { fork } from 'child_process' import { on } from 'events' -const serverPath = join(fileURLToPath(import.meta.url), '../server/server.mjs') +const serverPath = fileURLToPath(new URL('./server/server.mjs', import.meta.url)) const child = fork(serverPath, [], { stdio: ['pipe', 'pipe', 'pipe', 'ipc'] diff --git a/test/wpt/start-fetch.mjs b/test/wpt/start-fetch.mjs index 520dd2a8876..b77270eb09b 100644 --- a/test/wpt/start-fetch.mjs +++ b/test/wpt/start-fetch.mjs @@ -1,12 +1,11 @@ import { WPTRunner } from './runner/runner.mjs' -import { join } from 'path' import { fileURLToPath } from 'url' import { fork } from 'child_process' import { on } from 'events' const { WPT_REPORT } = process.env -const serverPath = join(fileURLToPath(import.meta.url), '../server/server.mjs') +const serverPath = fileURLToPath(new URL('./server/server.mjs', import.meta.url)) const child = fork(serverPath, [], { stdio: ['pipe', 'pipe', 'pipe', 'ipc'] diff --git a/test/wpt/start-mimesniff.mjs b/test/wpt/start-mimesniff.mjs index d28782fe926..c40bbb5b6ea 100644 --- a/test/wpt/start-mimesniff.mjs +++ b/test/wpt/start-mimesniff.mjs @@ -1,12 +1,11 @@ import { WPTRunner } from './runner/runner.mjs' -import { join } from 'path' import { fileURLToPath } from 'url' import { fork } from 'child_process' import { on } from 'events' const { WPT_REPORT } = process.env -const serverPath = join(fileURLToPath(import.meta.url), '../server/server.mjs') +const serverPath = fileURLToPath(new URL('./server/server.mjs', import.meta.url)) const child = fork(serverPath, [], { stdio: ['pipe', 'pipe', 'pipe', 'ipc'] diff --git a/test/wpt/start-websockets.mjs b/test/wpt/start-websockets.mjs index 2a909dde3c2..00be77e19ab 100644 --- a/test/wpt/start-websockets.mjs +++ b/test/wpt/start-websockets.mjs @@ -1,5 +1,4 @@ import { WPTRunner } from './runner/runner.mjs' -import { join } from 'path' import { fileURLToPath } from 'url' import { fork } from 'child_process' import { on } from 'events' @@ -22,7 +21,7 @@ if (process.env.CI) { // process.exit(0) } -const serverPath = join(fileURLToPath(import.meta.url), '../server/websocket.mjs') +const serverPath = fileURLToPath(new URL('./server/websocket.mjs', import.meta.url)) const child = fork(serverPath, [], { stdio: ['pipe', 'pipe', 'pipe', 'ipc'] From b63d939953fe20cfd6718e8eed437da983ac7b12 Mon Sep 17 00:00:00 2001 From: Matteo Collina Date: Thu, 13 Mar 2025 11:33:24 +0100 Subject: [PATCH 55/57] Bumped v6.21.2 Signed-off-by: Matteo Collina --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index a465327b1bc..d7fab14768a 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "undici", - "version": "6.21.1", + "version": "6.21.2", "description": "An HTTP/1.1 client, written from scratch for Node.js", "homepage": "https://undici.nodejs.org", "bugs": { From dbbe0a2d5004cd7b6016e52736f59ce37bdb1556 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 12 May 2025 10:27:44 -0700 Subject: [PATCH 56/57] append crlf to formdata body (#3625) (#4210) * append crlf to formdata body * fixup! v18 (cherry picked from commit fdeccc23b510bda706f5b62921a72ecdc593bda4) Co-authored-by: Khafra --- lib/web/fetch/body.js | 5 ++++- test/fetch/issue-3624.js | 29 +++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) create mode 100644 test/fetch/issue-3624.js diff --git a/lib/web/fetch/body.js b/lib/web/fetch/body.js index aa6e2973532..7237749838a 100644 --- a/lib/web/fetch/body.js +++ b/lib/web/fetch/body.js @@ -162,7 +162,10 @@ function extractBody (object, keepalive = false) { } } - const chunk = textEncoder.encode(`--${boundary}--`) + // CRLF is appended to the body to function with legacy servers and match other implementations. + // https://github.com/curl/curl/blob/3434c6b46e682452973972e8313613dfa58cd690/lib/mime.c#L1029-L1030 + // https://github.com/form-data/form-data/issues/63 + const chunk = textEncoder.encode(`--${boundary}--\r\n`) blobParts.push(chunk) length += chunk.byteLength if (hasUnknownSizeValue) { diff --git a/test/fetch/issue-3624.js b/test/fetch/issue-3624.js new file mode 100644 index 00000000000..37e722d7e4e --- /dev/null +++ b/test/fetch/issue-3624.js @@ -0,0 +1,29 @@ +'use strict' + +const assert = require('node:assert') +const { File } = require('node:buffer') +const { test } = require('node:test') +const { once } = require('node:events') +const { createServer } = require('node:http') +const { fetch, FormData } = require('../..') + +// https://github.com/nodejs/undici/issues/3624 +test('crlf is appended to formdata body (issue #3624)', async (t) => { + const server = createServer((req, res) => { + req.pipe(res) + }).listen(0) + + t.after(server.close.bind(server)) + await once(server, 'listening') + + const fd = new FormData() + fd.set('a', 'b') + fd.set('c', new File(['d'], 'd.txt.exe'), 'd.txt.exe') + + const response = await fetch(`http://localhost:${server.address().port}`, { + body: fd, + method: 'POST' + }) + + assert((await response.text()).endsWith('\r\n')) +}) From e5252167e5a55020f0620871b9793ae7b4a57866 Mon Sep 17 00:00:00 2001 From: github-actions Date: Mon, 12 May 2025 17:51:13 +0000 Subject: [PATCH 57/57] Bumped v6.21.3 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index d7fab14768a..ac96260157e 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "undici", - "version": "6.21.2", + "version": "6.21.3", "description": "An HTTP/1.1 client, written from scratch for Node.js", "homepage": "https://undici.nodejs.org", "bugs": {